hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d0e9c60c6441c0d3b0a95023cf77197fc20dfddf | 104,509 | ipynb | Jupyter Notebook | DNN.ipynb | josephseverino/Stock_classifier | b7befa43099792a0e2acf8d440bbe6e47ff0d799 | [
"Apache-2.0"
] | 2 | 2018-10-22T17:52:31.000Z | 2021-11-20T07:37:18.000Z | DNN.ipynb | josephseverino/Stock_classifier | b7befa43099792a0e2acf8d440bbe6e47ff0d799 | [
"Apache-2.0"
] | null | null | null | DNN.ipynb | josephseverino/Stock_classifier | b7befa43099792a0e2acf8d440bbe6e47ff0d799 | [
"Apache-2.0"
] | null | null | null | 50.98 | 22,124 | 0.507765 | [
[
[
"import numpy\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import to_categorical\nfrom keras import backend as K\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import linear_model\nfrom statistics import mean, stdev\nfrom sklearn.preprocessing import scale \nfrom keras.utils import to_categorical\nfrom sklearn import metrics",
"_____no_output_____"
],
[
"SOXL = pd.read_csv('soxl_new.csv') #ETF growth cycle\nTQQQ = pd.read_csv('tqqq_new.csv') #3X Index\nMU = pd.read_csv('mu_new.csv') #high Beta\nAMD = pd.read_csv('amd_new.csv') # high beta\nNFLX = pd.read_csv('nflx_new.csv') #High growth\nAMZN = pd.read_csv('amzn_new.csv') #High growth\nV = pd.read_csv('visa_new.csv') #low volalitity\nNVDA = pd.read_csv('nvda_new.csv') #high growth\n\n\n\n",
"_____no_output_____"
],
[
"NFLX['tar_3best_class'].unique()",
"_____no_output_____"
],
[
"features = ['Day_previous_roi','ma10','rsi10','ma20','rsi20','ma_chg20',\n 'ma60','rsi60','ma200','rsi200','obv','macd_diff','ma_chg10',\n 'macd_diff_hist','aroon_diff','slope60','r_sqr_60','ma_chg60',\n 'slope10','r_sqr_10','slope5','r_sqr_5','stDev20','ma_chg200',\n 'rsi_chg10','rsi_chg20','rsi_chg60','rsi_chg200',\n 'percent_down','sine','leadsine','tsf10','tsf20','tsf60','tsf200',\n 'up_dwn_prev','shawman','hammer','semi_pk_pr']\ntop_feats = ['ma200', \n 'macd_diff_hist', \n 'tsf200', \n 'r_sqr_60', \n 'slope60']#, \n# 'macd_diff', \n# 'tsf60', \n# 'slope10', \n# 'r_sqr_10', \n# 'percent_down', \n# 'rsi60', \n# 'obv']\n#Set stock or dataframe\ndf_cln = NFLX\ntarget_name = 'tar_3best_class'\n#.75 make a 25/75 split\nstop = round(.80*len(df_cln))\n\n#set features\n\nfeatures = df_cln[features].values\ntop_features = df_cln[top_feats].values\ntargets = df_cln[target_name].values\narr = []\nend = targets.shape[0]\nfor i in range(end):\n #print(targets[i])\n if targets[i] == 'bel_1.02':\n arr.append([1,0,0,0])\n elif targets[i] == 'abv_1.02':\n arr.append([0,1,0,0])\n elif targets[i] == 'abv_1.04':\n arr.append([0,0,1,0])\n elif targets[i] == 'abv_1.07':\n arr.append([0,0,0,1])\ntarget_int = arr\n\nfeature_train = features[:stop]\nfeature_test = features[stop:]\n\ntop_feat_train = top_features[:stop]\ntop_feat_test = top_features[stop:]\n\ntarget_test_int = target_int[stop:]\ntarget_train_int = target_int[:stop]\n\n\n#set my targets\n\n\n# feature_train = np.array(feature_train)\n# target_train = np.array(target_train)\n# feature_test = np.array(feature_test)\n# target_test = np.array(target_test)\n\n\nfeature_train.reshape(feature_train.shape[0], \n feature_train.shape[1],\n 1).astype( 'float32' )\n\nfeature_test.reshape(feature_test.shape[0], \n feature_test.shape[1],\n 1).astype( 'float32' )\n#print(feature_train.shape,target_train.shape)\n\n",
"_____no_output_____"
],
[
"# from keras.utils.np_utils import to_categorical\n\n# categorical_labels = to_categorical(target_int, num_classes=4)\n# categorical_labels\n#target_train_int",
"_____no_output_____"
],
[
"# # Standardize the train and test features\n# scaled_train_features = scale(feature_train)\n# scaled_test_features = scale(feature_test)\n# # Create the model\n# def baseline_model():\n# model_1 = Sequential()\n# model_1.add(Dense(200, input_dim=scaled_train_features.shape[1], activation='relu'))\n# #model_1.add(Dropout(0.25))\n# model_1.add(Dense(200, activation='relu'))\n# #model_1.add(Dropout(0.25))\n# model_1.add(Dense(100, activation='relu'))\n# model_1.add(Dense(10, activation='relu'))\n# model_1.add(Dense(4, activation='softmax'))\n \n# model_1.compile(loss= 'categorical_crossentropy' ,\n# optimizer= 'adam' ,\n# metrics=[ 'accuracy' ])\n# return model_1\n\n# # Fit the model\n\n# history = KerasClassifier(build_fn=baseline_model, epochs=40, batch_size=200, verbose=True) \n",
"_____no_output_____"
],
[
"# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n# results = cross_val_score(history,scaled_train_features , target_train, cv=kfold)\n# print(results.mean())\n\n# %matplotlib inline\n# plt.plot(history.history['acc'],'b') \n# plt.plot(history.history['val_acc'],'r')\n# plt.show()",
"_____no_output_____"
],
[
"# model = baseline_model()\n# model.fit(feature_train,target_train, epochs=40, verbose=2)",
"_____no_output_____"
],
[
"# target_test.unique()\n# Standardize the train and test features\nmin_max_scaler = preprocessing.MinMaxScaler()\nX_train = min_max_scaler.fit_transform(feature_train)\nX_test = min_max_scaler.fit_transform(feature_test)\n",
"_____no_output_____"
],
[
"min_max_scaler = preprocessing.MinMaxScaler()\ntop_X_train = min_max_scaler.fit_transform(top_feat_train)\ntop_X_test = min_max_scaler.fit_transform(top_feat_test)\n",
"_____no_output_____"
],
[
"from keras.layers import Input, Dense, Dropout, BatchNormalization\nfrom keras.models import Model, load_model\nimport keras.backend as K\nfrom keras.callbacks import ModelCheckpoint",
"_____no_output_____"
],
[
"# Standardize the train and test features\nK.clear_session()\ntarget_test = np.array(target_test_int)\ntarget_train = np.array(target_train_int)\n# scaled_train_features = scale(feature_train)\n# scaled_test_features = scale(feature_test)\n# Create the model\nK.clear_session()\n\ninputs = Input(shape=(top_X_train.shape[1], ))\nx1 = Dense(128)(inputs)\nx1 = BatchNormalization()(x1)\nx1 = Activation('relu')(x1)\nx1 = Dropout(0.5)(x1)\n\n# x2 = Dense(16, activation='relu')(x1)\n# x2 = BatchNormalization()(x2)\n# x2 = Activation('relu')(x2)\n\n# x3 = Dense(16, activation='relu')(x2)\n# x3 = BatchNormalization()(x3)\n# x3 = Activation('relu')(x3)\n\n# x4 = Dense(8, activation='relu')(x3)\n# x4 = BatchNormalization()(x4)\n# x4 = Activation('relu')(x4)\n\n# x5 = Dense(16, activation='relu')(x4)\n# x5 = BatchNormalization()(x5)\n# x5 = Activation('relu')(x5)\n\n\n# x6 = Dense(32, activation='relu')(x5)\n# x6 = BatchNormalization()(x6)\n# x6 = Activation('relu')(x6)\n\n\n# x7 = Dense(64, activation='relu')(x6)\n# x7 = BatchNormalization()(x7)\n# x7 = Activation('relu')(x7)\n\nx = Dense(4, activation='softmax')(x1)\n\ncheckpoint = ModelCheckpoint('3_layer_dense.h5',\n monitor='val_loss',\n save_best_only=True)\ncb = [checkpoint]\n\n# this compiles our model so it is ready to fit\nmodel = Model(inputs, x)\nmodel.compile(loss= 'categorical_crossentropy' ,\n optimizer= 'adam' ,\n metrics=[ 'accuracy' ])\n\nmodel.summary()\n",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 5) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 128) 768 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 128) 512 \n_________________________________________________________________\nactivation_1 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 4) 516 \n=================================================================\nTotal params: 1,796\nTrainable params: 1,540\nNon-trainable params: 256\n_________________________________________________________________\n"
],
[
"2**7",
"_____no_output_____"
],
[
"#target_test = categorical_labels[stop:]\n#target_train = categorical_labels[:stop]\n# we actually fit the model here\nhistory = model.fit(top_X_train,\n target_train,\n epochs=100,\n validation_split=0.15,\n callbacks=cb,\n batch_size=200)",
"Train on 2645 samples, validate on 467 samples\nEpoch 1/100\n2645/2645 [==============================] - 1s 494us/step - loss: 1.6835 - acc: 0.2469 - val_loss: 1.4954 - val_acc: 0.1349\nEpoch 2/100\n2645/2645 [==============================] - 0s 50us/step - loss: 1.5413 - acc: 0.2563 - val_loss: 1.4504 - val_acc: 0.2441\nEpoch 3/100\n2645/2645 [==============================] - 0s 30us/step - loss: 1.5222 - acc: 0.2699 - val_loss: 1.4023 - val_acc: 0.2719\nEpoch 4/100\n2645/2645 [==============================] - 0s 51us/step - loss: 1.5103 - acc: 0.2665 - val_loss: 1.4080 - val_acc: 0.2463\nEpoch 5/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.5254 - acc: 0.2480 - val_loss: 1.4041 - val_acc: 0.2355\nEpoch 6/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.4828 - acc: 0.2677 - val_loss: 1.3979 - val_acc: 0.2334\nEpoch 7/100\n2645/2645 [==============================] - 0s 53us/step - loss: 1.4722 - acc: 0.2817 - val_loss: 1.3723 - val_acc: 0.2655\nEpoch 8/100\n2645/2645 [==============================] - 0s 40us/step - loss: 1.4706 - acc: 0.2771 - val_loss: 1.3670 - val_acc: 0.2784\nEpoch 9/100\n2645/2645 [==============================] - 0s 46us/step - loss: 1.4615 - acc: 0.2703 - val_loss: 1.3669 - val_acc: 0.2677\nEpoch 10/100\n2645/2645 [==============================] - 0s 45us/step - loss: 1.4448 - acc: 0.2745 - val_loss: 1.3642 - val_acc: 0.3019\nEpoch 11/100\n2645/2645 [==============================] - 0s 53us/step - loss: 1.4509 - acc: 0.2669 - val_loss: 1.3574 - val_acc: 0.3062\nEpoch 12/100\n2645/2645 [==============================] - 0s 74us/step - loss: 1.4367 - acc: 0.2783 - val_loss: 1.3524 - val_acc: 0.2869\nEpoch 13/100\n2645/2645 [==============================] - 0s 40us/step - loss: 1.4330 - acc: 0.2745 - val_loss: 1.3518 - val_acc: 0.2677\nEpoch 14/100\n2645/2645 [==============================] - 0s 56us/step - loss: 1.4352 - acc: 0.2767 - val_loss: 1.3501 - val_acc: 0.2570\nEpoch 15/100\n2645/2645 [==============================] - 0s 31us/step - loss: 1.4193 - acc: 0.2802 - val_loss: 1.3691 - val_acc: 0.2719\nEpoch 16/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.4117 - acc: 0.2870 - val_loss: 1.3651 - val_acc: 0.2634\nEpoch 17/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.4149 - acc: 0.2824 - val_loss: 1.3527 - val_acc: 0.2634\nEpoch 18/100\n2645/2645 [==============================] - 0s 80us/step - loss: 1.4044 - acc: 0.2949 - val_loss: 1.3646 - val_acc: 0.2591\nEpoch 19/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.4064 - acc: 0.2900 - val_loss: 1.3666 - val_acc: 0.2698\nEpoch 20/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3931 - acc: 0.2919 - val_loss: 1.3587 - val_acc: 0.2655\nEpoch 21/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3942 - acc: 0.2873 - val_loss: 1.3469 - val_acc: 0.2655\nEpoch 22/100\n2645/2645 [==============================] - 0s 54us/step - loss: 1.4031 - acc: 0.2888 - val_loss: 1.3503 - val_acc: 0.2655\nEpoch 23/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3988 - acc: 0.2896 - val_loss: 1.3567 - val_acc: 0.2805\nEpoch 24/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3852 - acc: 0.2820 - val_loss: 1.3583 - val_acc: 0.2891\nEpoch 25/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3887 - acc: 0.2979 - val_loss: 1.3440 - val_acc: 0.2719\nEpoch 26/100\n2645/2645 [==============================] - 0s 53us/step - loss: 1.3911 - acc: 0.2945 - val_loss: 1.3411 - val_acc: 0.2891\nEpoch 27/100\n2645/2645 [==============================] - 0s 40us/step - loss: 1.3874 - acc: 0.2900 - val_loss: 1.3481 - val_acc: 0.2784\nEpoch 28/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3890 - acc: 0.2998 - val_loss: 1.3498 - val_acc: 0.2827\nEpoch 29/100\n2645/2645 [==============================] - 0s 45us/step - loss: 1.3847 - acc: 0.2896 - val_loss: 1.3657 - val_acc: 0.2655\nEpoch 30/100\n2645/2645 [==============================] - 0s 74us/step - loss: 1.3818 - acc: 0.3006 - val_loss: 1.3496 - val_acc: 0.3062\nEpoch 31/100\n2645/2645 [==============================] - 0s 45us/step - loss: 1.3740 - acc: 0.3025 - val_loss: 1.3520 - val_acc: 0.3084\nEpoch 32/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3768 - acc: 0.3081 - val_loss: 1.3584 - val_acc: 0.2719\nEpoch 33/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3814 - acc: 0.3081 - val_loss: 1.3660 - val_acc: 0.2741\nEpoch 34/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3820 - acc: 0.2953 - val_loss: 1.3594 - val_acc: 0.2848\nEpoch 35/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3799 - acc: 0.2957 - val_loss: 1.3643 - val_acc: 0.2805\nEpoch 36/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3805 - acc: 0.3002 - val_loss: 1.3672 - val_acc: 0.2891\nEpoch 37/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3782 - acc: 0.3051 - val_loss: 1.3572 - val_acc: 0.3126\nEpoch 38/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3708 - acc: 0.3138 - val_loss: 1.3597 - val_acc: 0.2998\nEpoch 39/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3659 - acc: 0.3214 - val_loss: 1.3638 - val_acc: 0.2998\nEpoch 40/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3717 - acc: 0.3051 - val_loss: 1.3559 - val_acc: 0.3169\nEpoch 41/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3702 - acc: 0.3168 - val_loss: 1.3562 - val_acc: 0.3084\nEpoch 42/100\n2645/2645 [==============================] - 0s 79us/step - loss: 1.3699 - acc: 0.3093 - val_loss: 1.3560 - val_acc: 0.3084\nEpoch 43/100\n2645/2645 [==============================] - 0s 72us/step - loss: 1.3730 - acc: 0.3100 - val_loss: 1.3594 - val_acc: 0.3169\nEpoch 44/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3696 - acc: 0.3217 - val_loss: 1.3547 - val_acc: 0.2955\nEpoch 45/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3736 - acc: 0.3066 - val_loss: 1.3629 - val_acc: 0.2805\nEpoch 46/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3730 - acc: 0.3161 - val_loss: 1.3682 - val_acc: 0.2976\nEpoch 47/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3687 - acc: 0.3130 - val_loss: 1.3562 - val_acc: 0.2976\nEpoch 48/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3758 - acc: 0.3100 - val_loss: 1.3520 - val_acc: 0.2762\nEpoch 49/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3683 - acc: 0.3142 - val_loss: 1.3517 - val_acc: 0.3062\nEpoch 50/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3696 - acc: 0.3104 - val_loss: 1.3508 - val_acc: 0.3148\nEpoch 51/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3743 - acc: 0.3108 - val_loss: 1.3470 - val_acc: 0.2848\nEpoch 52/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3757 - acc: 0.2922 - val_loss: 1.3470 - val_acc: 0.3062\nEpoch 53/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3739 - acc: 0.3108 - val_loss: 1.3370 - val_acc: 0.3148\nEpoch 54/100\n2645/2645 [==============================] - 0s 43us/step - loss: 1.3647 - acc: 0.3180 - val_loss: 1.3422 - val_acc: 0.3041\nEpoch 55/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3686 - acc: 0.3161 - val_loss: 1.3468 - val_acc: 0.3084\nEpoch 56/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3666 - acc: 0.3248 - val_loss: 1.3533 - val_acc: 0.3084\nEpoch 57/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3661 - acc: 0.3157 - val_loss: 1.3548 - val_acc: 0.3084\nEpoch 58/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3646 - acc: 0.3217 - val_loss: 1.3543 - val_acc: 0.3105\nEpoch 59/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3679 - acc: 0.3164 - val_loss: 1.3624 - val_acc: 0.3105\nEpoch 60/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3655 - acc: 0.3274 - val_loss: 1.3542 - val_acc: 0.3062\nEpoch 61/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3648 - acc: 0.3308 - val_loss: 1.3562 - val_acc: 0.2848\nEpoch 62/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3644 - acc: 0.3206 - val_loss: 1.3676 - val_acc: 0.2677\nEpoch 63/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3682 - acc: 0.3028 - val_loss: 1.3732 - val_acc: 0.2805\nEpoch 64/100\n2645/2645 [==============================] - 0s 45us/step - loss: 1.3620 - acc: 0.3217 - val_loss: 1.3688 - val_acc: 0.2934\nEpoch 65/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3632 - acc: 0.3229 - val_loss: 1.3682 - val_acc: 0.2955\nEpoch 66/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3653 - acc: 0.3323 - val_loss: 1.3671 - val_acc: 0.3019\nEpoch 67/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3638 - acc: 0.3233 - val_loss: 1.3698 - val_acc: 0.3019\nEpoch 68/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3661 - acc: 0.3217 - val_loss: 1.3665 - val_acc: 0.3169\nEpoch 69/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3640 - acc: 0.3108 - val_loss: 1.3611 - val_acc: 0.3105\nEpoch 70/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3650 - acc: 0.3198 - val_loss: 1.3608 - val_acc: 0.3105\nEpoch 71/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3660 - acc: 0.3293 - val_loss: 1.3651 - val_acc: 0.3105\nEpoch 72/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3684 - acc: 0.3130 - val_loss: 1.3626 - val_acc: 0.3126\nEpoch 73/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3630 - acc: 0.3251 - val_loss: 1.3540 - val_acc: 0.3019\nEpoch 74/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3656 - acc: 0.3229 - val_loss: 1.3585 - val_acc: 0.3148\nEpoch 75/100\n2645/2645 [==============================] - 0s 78us/step - loss: 1.3645 - acc: 0.3282 - val_loss: 1.3647 - val_acc: 0.3105\nEpoch 76/100\n2645/2645 [==============================] - 0s 107us/step - loss: 1.3635 - acc: 0.3172 - val_loss: 1.3613 - val_acc: 0.3019\nEpoch 77/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3633 - acc: 0.3236 - val_loss: 1.3549 - val_acc: 0.3062\nEpoch 78/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3628 - acc: 0.3130 - val_loss: 1.3528 - val_acc: 0.2998\nEpoch 79/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3602 - acc: 0.3316 - val_loss: 1.3597 - val_acc: 0.3084\nEpoch 80/100\n2645/2645 [==============================] - 0s 78us/step - loss: 1.3582 - acc: 0.3289 - val_loss: 1.3543 - val_acc: 0.3062\nEpoch 81/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3676 - acc: 0.3221 - val_loss: 1.3552 - val_acc: 0.3084\nEpoch 82/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3605 - acc: 0.3327 - val_loss: 1.3523 - val_acc: 0.3126\nEpoch 83/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3646 - acc: 0.3168 - val_loss: 1.3546 - val_acc: 0.2912\nEpoch 84/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3585 - acc: 0.3323 - val_loss: 1.3528 - val_acc: 0.2998\nEpoch 85/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3584 - acc: 0.3282 - val_loss: 1.3604 - val_acc: 0.2976\nEpoch 86/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3627 - acc: 0.3191 - val_loss: 1.3492 - val_acc: 0.2955\nEpoch 87/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3624 - acc: 0.3251 - val_loss: 1.3520 - val_acc: 0.2805\nEpoch 88/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3653 - acc: 0.3255 - val_loss: 1.3507 - val_acc: 0.3041\nEpoch 89/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3641 - acc: 0.3259 - val_loss: 1.3557 - val_acc: 0.2976\nEpoch 90/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3627 - acc: 0.3274 - val_loss: 1.3607 - val_acc: 0.2762\nEpoch 91/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3606 - acc: 0.3214 - val_loss: 1.3635 - val_acc: 0.2762\nEpoch 92/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3637 - acc: 0.3236 - val_loss: 1.3554 - val_acc: 0.2805\nEpoch 93/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3621 - acc: 0.3331 - val_loss: 1.3588 - val_acc: 0.2869\nEpoch 94/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3603 - acc: 0.3130 - val_loss: 1.3512 - val_acc: 0.2976\nEpoch 95/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3663 - acc: 0.3176 - val_loss: 1.3527 - val_acc: 0.3019\nEpoch 96/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3601 - acc: 0.3221 - val_loss: 1.3615 - val_acc: 0.3019\nEpoch 97/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3601 - acc: 0.3248 - val_loss: 1.3641 - val_acc: 0.3084\nEpoch 98/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3576 - acc: 0.3342 - val_loss: 1.3642 - val_acc: 0.3105\nEpoch 99/100\n2645/2645 [==============================] - 0s 107us/step - loss: 1.3623 - acc: 0.3327 - val_loss: 1.3669 - val_acc: 0.2976\nEpoch 100/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3609 - acc: 0.3331 - val_loss: 1.3766 - val_acc: 0.3041\n"
],
[
"plt.plot(history.history['loss'], label = 'training loss')\nplt.plot(history.history['val_loss'], label = 'validation loss')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"history = model.fit(top_X_train,\n target_train,\n epochs=100,\n validation_split=0.15,\n callbacks=cb,\n batch_size=200)\ntarget_pred = model.predict(top_X_test)\n\nprint(\"Accuracy:\",metrics.accuracy_score(target_test_int, target_pred),'\\n'\n 'Cohans Kappa:', metrics.cohen_kappa_score(target_test_int, target_pred),'\\n'\n 'Train ACC:', metrics.accuracy_score(target_train_int, target_pred), '\\n'\n \"Confusion Matrix:\",'\\n',\n metrics.confusion_matrix(target_test, target_pred))\n\n\n",
"Train on 2645 samples, validate on 467 samples\nEpoch 1/100\n2645/2645 [==============================] - 0s 100us/step - loss: 1.3597 - acc: 0.3267 - val_loss: 1.3933 - val_acc: 0.2848\nEpoch 2/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3601 - acc: 0.3301 - val_loss: 1.4073 - val_acc: 0.2698\nEpoch 3/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3571 - acc: 0.3391 - val_loss: 1.4067 - val_acc: 0.2441\nEpoch 4/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3622 - acc: 0.3353 - val_loss: 1.4056 - val_acc: 0.2698\nEpoch 5/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3548 - acc: 0.3323 - val_loss: 1.4057 - val_acc: 0.2612\nEpoch 6/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3600 - acc: 0.3327 - val_loss: 1.4061 - val_acc: 0.2741\nEpoch 7/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3611 - acc: 0.3263 - val_loss: 1.3981 - val_acc: 0.2762\nEpoch 8/100\n2645/2645 [==============================] - 0s 107us/step - loss: 1.3611 - acc: 0.3251 - val_loss: 1.3883 - val_acc: 0.2827\nEpoch 9/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3585 - acc: 0.3301 - val_loss: 1.3903 - val_acc: 0.2655\nEpoch 10/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3602 - acc: 0.3214 - val_loss: 1.3872 - val_acc: 0.2591\nEpoch 11/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3580 - acc: 0.3384 - val_loss: 1.3947 - val_acc: 0.2612\nEpoch 12/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3643 - acc: 0.3270 - val_loss: 1.4091 - val_acc: 0.2698\nEpoch 13/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3583 - acc: 0.3338 - val_loss: 1.4052 - val_acc: 0.2805\nEpoch 14/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3616 - acc: 0.3301 - val_loss: 1.3986 - val_acc: 0.2848\nEpoch 15/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3623 - acc: 0.3259 - val_loss: 1.3918 - val_acc: 0.2827\nEpoch 16/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3591 - acc: 0.3316 - val_loss: 1.4034 - val_acc: 0.2677\nEpoch 17/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3563 - acc: 0.3342 - val_loss: 1.4009 - val_acc: 0.2634\nEpoch 18/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3562 - acc: 0.3350 - val_loss: 1.3889 - val_acc: 0.2848\nEpoch 19/100\n2645/2645 [==============================] - 0s 78us/step - loss: 1.3587 - acc: 0.3316 - val_loss: 1.3983 - val_acc: 0.2827\nEpoch 20/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3610 - acc: 0.3308 - val_loss: 1.3903 - val_acc: 0.2805\nEpoch 21/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3569 - acc: 0.3274 - val_loss: 1.3859 - val_acc: 0.2677\nEpoch 22/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3580 - acc: 0.3240 - val_loss: 1.3875 - val_acc: 0.2719\nEpoch 23/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3568 - acc: 0.3350 - val_loss: 1.3954 - val_acc: 0.2612\nEpoch 24/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3559 - acc: 0.3285 - val_loss: 1.3967 - val_acc: 0.2698\nEpoch 25/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3542 - acc: 0.3357 - val_loss: 1.4086 - val_acc: 0.2463\nEpoch 26/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3578 - acc: 0.3388 - val_loss: 1.4270 - val_acc: 0.2570\nEpoch 27/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3596 - acc: 0.3376 - val_loss: 1.4184 - val_acc: 0.2505\nEpoch 28/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3568 - acc: 0.3293 - val_loss: 1.4012 - val_acc: 0.2612\nEpoch 29/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3591 - acc: 0.3240 - val_loss: 1.3995 - val_acc: 0.2677\nEpoch 30/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3579 - acc: 0.3357 - val_loss: 1.4214 - val_acc: 0.2570\nEpoch 31/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3589 - acc: 0.3323 - val_loss: 1.4125 - val_acc: 0.2698\nEpoch 32/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3572 - acc: 0.3229 - val_loss: 1.3994 - val_acc: 0.2719\nEpoch 33/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3573 - acc: 0.3346 - val_loss: 1.4010 - val_acc: 0.2634\nEpoch 34/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3603 - acc: 0.3353 - val_loss: 1.3901 - val_acc: 0.2548\nEpoch 35/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3614 - acc: 0.3369 - val_loss: 1.4003 - val_acc: 0.2570\nEpoch 36/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3602 - acc: 0.3225 - val_loss: 1.4003 - val_acc: 0.2591\nEpoch 37/100\n2645/2645 [==============================] - 0s 107us/step - loss: 1.3587 - acc: 0.3233 - val_loss: 1.3895 - val_acc: 0.2934\nEpoch 38/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3537 - acc: 0.3251 - val_loss: 1.3891 - val_acc: 0.2634\nEpoch 39/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3594 - acc: 0.3331 - val_loss: 1.3966 - val_acc: 0.2698\nEpoch 40/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3583 - acc: 0.3172 - val_loss: 1.3940 - val_acc: 0.2612\nEpoch 41/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3554 - acc: 0.3353 - val_loss: 1.3933 - val_acc: 0.2591\nEpoch 42/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3561 - acc: 0.3372 - val_loss: 1.4042 - val_acc: 0.2591\nEpoch 43/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3579 - acc: 0.3395 - val_loss: 1.4010 - val_acc: 0.2719\nEpoch 44/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3562 - acc: 0.3278 - val_loss: 1.4014 - val_acc: 0.2762\nEpoch 45/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3563 - acc: 0.3384 - val_loss: 1.4020 - val_acc: 0.2655\nEpoch 46/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3589 - acc: 0.3263 - val_loss: 1.3945 - val_acc: 0.2677\nEpoch 47/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3535 - acc: 0.3456 - val_loss: 1.3996 - val_acc: 0.2484\nEpoch 48/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3609 - acc: 0.3304 - val_loss: 1.3955 - val_acc: 0.2591\nEpoch 49/100\n2645/2645 [==============================] - 0s 78us/step - loss: 1.3570 - acc: 0.3448 - val_loss: 1.3978 - val_acc: 0.2719\nEpoch 50/100\n2645/2645 [==============================] - 0s 73us/step - loss: 1.3607 - acc: 0.3357 - val_loss: 1.3854 - val_acc: 0.2741\nEpoch 51/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3547 - acc: 0.3452 - val_loss: 1.3909 - val_acc: 0.2570\nEpoch 52/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3575 - acc: 0.3270 - val_loss: 1.3896 - val_acc: 0.2484\nEpoch 53/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3564 - acc: 0.3388 - val_loss: 1.3917 - val_acc: 0.2505\nEpoch 54/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3552 - acc: 0.3316 - val_loss: 1.4060 - val_acc: 0.2527\nEpoch 55/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3552 - acc: 0.3263 - val_loss: 1.4014 - val_acc: 0.2484\nEpoch 56/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3584 - acc: 0.3293 - val_loss: 1.3891 - val_acc: 0.2527\nEpoch 57/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3552 - acc: 0.3425 - val_loss: 1.3983 - val_acc: 0.2655\nEpoch 58/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3585 - acc: 0.3353 - val_loss: 1.3914 - val_acc: 0.2612\nEpoch 59/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3513 - acc: 0.3388 - val_loss: 1.3918 - val_acc: 0.2634\nEpoch 60/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3591 - acc: 0.3312 - val_loss: 1.3879 - val_acc: 0.2762\nEpoch 61/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3569 - acc: 0.3395 - val_loss: 1.3899 - val_acc: 0.2784\nEpoch 62/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3520 - acc: 0.3357 - val_loss: 1.3943 - val_acc: 0.2612\nEpoch 63/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3594 - acc: 0.3406 - val_loss: 1.4027 - val_acc: 0.2612\nEpoch 64/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3561 - acc: 0.3289 - val_loss: 1.4108 - val_acc: 0.2548\nEpoch 65/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3555 - acc: 0.3353 - val_loss: 1.4093 - val_acc: 0.2677\nEpoch 66/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3534 - acc: 0.3403 - val_loss: 1.4106 - val_acc: 0.2655\nEpoch 67/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3556 - acc: 0.3312 - val_loss: 1.4006 - val_acc: 0.2698\nEpoch 68/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3568 - acc: 0.3293 - val_loss: 1.3983 - val_acc: 0.2741\nEpoch 69/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3591 - acc: 0.3384 - val_loss: 1.4016 - val_acc: 0.2784\nEpoch 70/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3575 - acc: 0.3293 - val_loss: 1.4085 - val_acc: 0.2698\nEpoch 71/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3569 - acc: 0.3285 - val_loss: 1.4060 - val_acc: 0.2570\nEpoch 72/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3551 - acc: 0.3342 - val_loss: 1.4110 - val_acc: 0.2570\nEpoch 73/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3548 - acc: 0.3418 - val_loss: 1.4127 - val_acc: 0.2655\nEpoch 74/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3584 - acc: 0.3395 - val_loss: 1.4079 - val_acc: 0.2612\nEpoch 75/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3543 - acc: 0.3319 - val_loss: 1.4081 - val_acc: 0.2655\nEpoch 76/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3537 - acc: 0.3316 - val_loss: 1.4114 - val_acc: 0.2655\nEpoch 77/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3551 - acc: 0.3437 - val_loss: 1.4123 - val_acc: 0.2591\nEpoch 78/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3559 - acc: 0.3316 - val_loss: 1.4159 - val_acc: 0.2655\nEpoch 79/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3555 - acc: 0.3312 - val_loss: 1.4192 - val_acc: 0.2698\nEpoch 80/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3584 - acc: 0.3372 - val_loss: 1.4174 - val_acc: 0.2741\nEpoch 81/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3522 - acc: 0.3410 - val_loss: 1.4152 - val_acc: 0.2719\nEpoch 82/100\n2645/2645 [==============================] - 0s 107us/step - loss: 1.3578 - acc: 0.3342 - val_loss: 1.4170 - val_acc: 0.2634\nEpoch 83/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3538 - acc: 0.3440 - val_loss: 1.4213 - val_acc: 0.2719\nEpoch 84/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3573 - acc: 0.3406 - val_loss: 1.4144 - val_acc: 0.2634\nEpoch 85/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3529 - acc: 0.3365 - val_loss: 1.4225 - val_acc: 0.2505\nEpoch 86/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3510 - acc: 0.3474 - val_loss: 1.4211 - val_acc: 0.2612\nEpoch 87/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3571 - acc: 0.3384 - val_loss: 1.4266 - val_acc: 0.2677\nEpoch 88/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3543 - acc: 0.3388 - val_loss: 1.4323 - val_acc: 0.2634\nEpoch 89/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3555 - acc: 0.3380 - val_loss: 1.4110 - val_acc: 0.2762\nEpoch 90/100\n2645/2645 [==============================] - 0s 106us/step - loss: 1.3555 - acc: 0.3384 - val_loss: 1.4057 - val_acc: 0.2527\nEpoch 91/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3595 - acc: 0.3372 - val_loss: 1.4058 - val_acc: 0.2591\nEpoch 92/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3580 - acc: 0.3338 - val_loss: 1.4146 - val_acc: 0.2677\nEpoch 93/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3544 - acc: 0.3440 - val_loss: 1.4292 - val_acc: 0.2484\nEpoch 94/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3603 - acc: 0.3293 - val_loss: 1.4212 - val_acc: 0.2698\nEpoch 95/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3533 - acc: 0.3384 - val_loss: 1.4079 - val_acc: 0.2719\nEpoch 96/100\n2645/2645 [==============================] - 0s 77us/step - loss: 1.3551 - acc: 0.3365 - val_loss: 1.4101 - val_acc: 0.2677\nEpoch 97/100\n2645/2645 [==============================] - 0s 105us/step - loss: 1.3537 - acc: 0.3463 - val_loss: 1.4100 - val_acc: 0.2698\nEpoch 98/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3580 - acc: 0.3418 - val_loss: 1.4063 - val_acc: 0.2698\nEpoch 99/100\n2645/2645 [==============================] - 0s 75us/step - loss: 1.3568 - acc: 0.3406 - val_loss: 1.4128 - val_acc: 0.2591\nEpoch 100/100\n2645/2645 [==============================] - 0s 76us/step - loss: 1.3583 - acc: 0.3376 - val_loss: 1.4072 - val_acc: 0.2762\n"
],
[
"target_pred",
"_____no_output_____"
],
[
"target_train_int",
"_____no_output_____"
],
[
"targets = df_cln[target_name].values\ntarg_test = targets[stop:]\ntarg_train = targets[:stop]\ntarg_train[0:10]",
"_____no_output_____"
],
[
"history.predict_classes(feature_test)",
"_____no_output_____"
],
[
"top_X_train,\n target_train,\n epochs=100,\n validation_split=0.15,\n callbacks=cb,\n batch_size=200",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0e9c6993152e6ed491bf477e009749b3c2e62a9 | 32,506 | ipynb | Jupyter Notebook | ?_Mapping_Cities.ipynb | Stalkcomrade/kickstarter_topic_modeling | 0d17357d6ba5e5c06d5f1b7bbb149dbc5fedee5a | [
"MIT"
] | null | null | null | ?_Mapping_Cities.ipynb | Stalkcomrade/kickstarter_topic_modeling | 0d17357d6ba5e5c06d5f1b7bbb149dbc5fedee5a | [
"MIT"
] | null | null | null | ?_Mapping_Cities.ipynb | Stalkcomrade/kickstarter_topic_modeling | 0d17357d6ba5e5c06d5f1b7bbb149dbc5fedee5a | [
"MIT"
] | null | null | null | 36.441704 | 195 | 0.346582 | [
[
[
"import pandas as pd\nimport numpy as np\n\n\n# with open(\"/home/stlk/Desktop/DigEc_data_additional/worldcitiespop.txt\", encoding='utf-8') as f:\n \n\nd = pd.read_csv(\"/home/stlk/Desktop/DigEc_data_additional/worldcitiespop.txt\", encoding='latin1', sep=',')",
"/home/stlk/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2698: DtypeWarning: Columns (3) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"# d = d.dropna(axis=\"1\")\nd.dropna?",
"_____no_output_____"
],
[
"# d.City.unique\n\n# d[d.City == \"moscow\"]\n\n#g = d.groupby([\"AccentCity\", \"Population\"])\n\nd.sort_values([\"AccentCity\", \"Population\"], ascending=False).groupby('AccentCity').head(3)\n\n# g = d['AccentCity'].groupby(level=0, group_keys=False)\n# res = g.apply(lambda x: x.order(ascending=False).head(3))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
d0e9ddfb385ee3b0f6d3cacc56016748af2e864e | 1,806 | ipynb | Jupyter Notebook | Data_Analyst_3_work.ipynb | karpokate/data_analys | 6102059e824f5d8fa4f81cf90195859d582e1534 | [
"MIT"
] | null | null | null | Data_Analyst_3_work.ipynb | karpokate/data_analys | 6102059e824f5d8fa4f81cf90195859d582e1534 | [
"MIT"
] | null | null | null | Data_Analyst_3_work.ipynb | karpokate/data_analys | 6102059e824f5d8fa4f81cf90195859d582e1534 | [
"MIT"
] | null | null | null | 34.730769 | 364 | 0.686047 | [
[
[
"# НЕЙРО-НЕЧІТКІ СИСТЕМИ",
"_____no_output_____"
],
[
"1. Ознайомитися з конспектом лекцій та рекомендованою літературою, а також додатками В та Г, що містять коротку характеристику нейро-нечітких мереж та опис програмного забезпечення для їх синтезу, відповідно.\n2. Обґрунтовано сформувати набір даних для обробки та аналізу.\n3. Використовуючи рекомендоване програмне забезпечення здійснити обробку набору даних з метою побудови нейро- нечітких мереж.\n4. Спробувати використати різні алгоритми кластеризації, різні кількості функцій приналежності для входів, різні кількості циклів навчання та різні алгоритми навчання.\n5. Використати побудовані нейро-нечіткі мережі для прийняття рішень на конкретному прикладі.\n6. Проаналізувати отримані результати та відповісти на питання: який алгоритм кластер-аналізу призводить до отримання мережі меньшої складності; як впливає задана кількість циклів навчання на точність навчання; як вливає задана точність навчання на тривалість навчання; які вимоги мають пред’являтися до навчальної вибірки та як це вплине на процес навчання?",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown"
]
] |
d0e9e89b4200e31546d26bf27e6cd1bac94f4956 | 736,708 | ipynb | Jupyter Notebook | practica2/accidentes2-1.ipynb | dcabezas98/IN | f61a751a2965f31b85249a95f970bd1dbcae01d0 | [
"MIT"
] | null | null | null | practica2/accidentes2-1.ipynb | dcabezas98/IN | f61a751a2965f31b85249a95f970bd1dbcae01d0 | [
"MIT"
] | null | null | null | practica2/accidentes2-1.ipynb | dcabezas98/IN | f61a751a2965f31b85249a95f970bd1dbcae01d0 | [
"MIT"
] | null | null | null | 279.056061 | 163,412 | 0.89808 | [
[
[
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import zscore\nimport sklearn.preprocessing as preproc\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom collections import Counter\nfrom pract2_utils import *",
"_____no_output_____"
],
[
"RESULTS='results/accidentes/'\nDATA='data/accidentes_2013.csv'\n\ndef readData(results_file):\n return pd.read_csv(results_file,header=0,engine='python')",
"_____no_output_____"
],
[
"dataTot=readData(DATA)\ndataTot",
"_____no_output_____"
],
[
"dataTot.columns",
"_____no_output_____"
],
[
"# Atributos numéricos que reflejan la gravedad del accidente, sobre los que haré clustering\natributos=['TOT_VICTIMAS','TOT_MUERTOS','TOT_HERIDOS_GRAVES','TOT_HERIDOS_LEVES','TOT_VEHICULOS_IMPLICADOS']",
"_____no_output_____"
]
],
[
[
"## Caso de uso 2: Estudio de los accidentes a altas horas de la madrugada",
"_____no_output_____"
],
[
"Filtramos los datos para quedarnos con accidentes que ocurren a altas horas de la madrugada.",
"_____no_output_____"
]
],
[
[
"data2=dataTot[dataTot.HORA<=6] \ndata=data2[atributos] # En data2 quedan el resto de variables\ndata",
"_____no_output_____"
],
[
"for a in atributos:\n print(a)\n print(Counter(data[a]))",
"TOT_VICTIMAS\nCounter({1: 4181, 2: 1110, 3: 355, 4: 193, 5: 88, 6: 17, 7: 12, 8: 9, 9: 3, 10: 1})\nTOT_MUERTOS\nCounter({0: 5801, 1: 157, 2: 10, 3: 1})\nTOT_HERIDOS_GRAVES\nCounter({0: 5201, 1: 676, 2: 74, 3: 14, 4: 4})\nTOT_HERIDOS_LEVES\nCounter({1: 3731, 2: 979, 0: 665, 3: 323, 4: 167, 5: 71, 7: 13, 6: 12, 8: 6, 9: 1, 10: 1})\nTOT_VEHICULOS_IMPLICADOS\nCounter({1: 3387, 2: 2247, 3: 227, 4: 60, 5: 31, 6: 12, 7: 2, 0: 1, 9: 1, 8: 1})\n"
],
[
"# Box Plot\nn_var = len(atributos)\nfig, axes = plt.subplots(1, n_var, sharey=True, figsize=(15,5))\nfig.subplots_adjust(wspace=0, hspace=0)\n\ncolors = sns.color_palette(palette=None, n_colors=1, desat=None)\n\nrango = []\nfor j in range(n_var):\n d=data[atributos[j]]\n rango.append([d.min(), d.max()])\n\nfor i in range(1):\n dat_filt = data\n for j in range(n_var):\n ax = sns.boxplot(x=dat_filt[atributos[j]],\n color=colors[i],\n flierprops={\n 'marker': 'o',\n 'markersize': 4\n },\n ax=axes[j], whis=3,showfliers=True)\n\n if (i == 0):\n axes[j].set_xlabel(atributos[j])\n else:\n axes[j].set_xlabel(\"\")\n\n if (j == 0):\n axes[j].set_ylabel(\"\")\n else:\n axes[j].set_ylabel(\"\")\n\n axes[j].set_yticks([])\n axes[j].grid(axis='x',\n linestyle='-',\n linewidth='0.2',\n color='gray')\n axes[j].grid(axis='y', b=False)\n\n ax.set_xlim(rango[j][0] - 0.05 * (rango[j][1] - rango[j][0]),\n rango[j][1] + 0.05 * (rango[j][1] - rango[j][0]))",
"_____no_output_____"
],
[
"# Normalizar los datos\nnormalizer=preproc.MinMaxScaler()\ndata_norm=normalizer.fit_transform(data)\ndata_norm",
"_____no_output_____"
]
],
[
[
"### K-Means",
"_____no_output_____"
]
],
[
[
"# Elección de un número adecuado de Clústers atendiendo a las métricas\nK=list(range(2,10))\nsilhouette=[]\ncalinski=[]\nfor k in K:\n results = KMeans(n_clusters=k, random_state=0).fit(data_norm)\n sil, cal = measures_silhoutte_calinski(data_norm, results.labels_)\n silhouette.append(sil)\n calinski.append(cal)",
"_____no_output_____"
],
[
"print(silhouette)\nprint(calinski)",
"[0.5976557737387063, 0.6051934122627416, 0.5599799072596432, 0.5980975278272924, 0.7136414866797324, 0.7198945488757651, 0.7375867003363279, 0.7777131896978559]\n[3023.2290840361243, 3620.773777961237, 3403.0074721083447, 3516.7066546279298, 3796.6293512906664, 3884.408527765032, 3916.431543161539, 4347.455957160576]\n"
],
[
"plt.plot(K,calinski, 'bo-')\nplt.xlabel('Nº de clústers. K')\nplt.ylabel('Calinski')\nplt.show()\nplt.plot(K,silhouette,'bo-')\nplt.ylabel('Silhouette')\nplt.xlabel('Nº de clústers. K')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Nos quedaremos con K=3. Introducir más clústers no gana tanto score y hace la segmentación muy difícil de interpretar.",
"_____no_output_____"
]
],
[
[
"K=3\nresults = KMeans(n_clusters=K, random_state=0).fit(data_norm)\nlabels=results.labels_\ncentroids=results.cluster_centers_",
"_____no_output_____"
]
],
[
[
"Análisis.",
"_____no_output_____"
]
],
[
[
"Counter(labels)",
"_____no_output_____"
],
[
"visualize_centroids(centroids, np.array(data), atributos, denormCentroids=True)",
"_____no_output_____"
],
[
"pairplot(data,atributos,labels)",
"/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n"
],
[
"dataC=data.copy()\ndataC['cluster']=labels\ndataC",
"_____no_output_____"
],
[
"# Box Plot\nn_var = len(atributos)\nfig, axes = plt.subplots(K, n_var, sharey=True, figsize=(15, 15))\nfig.subplots_adjust(wspace=0, hspace=0)\n\ncolors = sns.color_palette(palette=None, n_colors=K, desat=None)\n\nrango = []\nfor j in range(n_var):\n d=dataC[atributos[j]]\n rango.append([d.min(), d.max()])\n\nfor i in range(K):\n dat_filt = dataC.loc[dataC['cluster'] == i]\n for j in range(n_var):\n ax = sns.boxplot(x=dat_filt[atributos[j]],\n color=colors[i],\n flierprops={\n 'marker': 'o',\n 'markersize': 4\n },\n ax=axes[i, j], whis=3,showfliers=True)\n\n if (i == K - 1):\n axes[i, j].set_xlabel(atributos[j])\n else:\n axes[i, j].set_xlabel(\"\")\n\n if (j == 0):\n axes[i, j].set_ylabel(\"Cluster \" + str(i))\n else:\n axes[i, j].set_ylabel(\"\")\n\n axes[i, j].set_yticks([])\n axes[i, j].grid(axis='x',\n linestyle='-',\n linewidth='0.2',\n color='gray')\n axes[i, j].grid(axis='y', b=False)\n\n ax.set_xlim(rango[j][0] - 0.05 * (rango[j][1] - rango[j][0]),\n rango[j][1] + 0.05 * (rango[j][1] - rango[j][0]))",
"_____no_output_____"
]
],
[
[
"Con K=6 también obtenemos una ganancia considerable en las métricas, pero el análisis es más complejo debido al mayor número de clústers.",
"_____no_output_____"
]
],
[
[
"K=6\nresults = KMeans(n_clusters=K, random_state=0).fit(data_norm)\nlabels=results.labels_\ncentroids=results.cluster_centers_\n\ndata2['cluster']=labels # Para después observar el resto de variables\n\nprint(Counter(labels))\n\n# Introduce un clúster con muertos, antes los despreciaba\nvisualize_centroids(centroids, np.array(data), atributos, denormCentroids=True)",
"Counter({1: 2213, 5: 1399, 0: 1213, 4: 688, 2: 301, 3: 155})\n"
],
[
"pairplot(data,atributos,labels)",
"/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n"
],
[
"dataC=data.copy()\ndataC['cluster']=labels\ndataC",
"_____no_output_____"
],
[
"# Box Plot\nn_var = len(atributos)\nfig, axes = plt.subplots(K, n_var, sharey=True, figsize=(15, 15))\nfig.subplots_adjust(wspace=0, hspace=0)\n\ncolors = sns.color_palette(palette=None, n_colors=K, desat=None)\n\nrango = []\nfor j in range(n_var):\n d=dataC[atributos[j]]\n rango.append([d.min(), d.max()])\n\nfor i in range(K):\n dat_filt = dataC.loc[dataC['cluster'] == i]\n for j in range(n_var):\n ax = sns.boxplot(x=dat_filt[atributos[j]],\n color=colors[i],\n flierprops={\n 'marker': 'o',\n 'markersize': 4\n },\n ax=axes[i, j], whis=3,showfliers=True)\n\n if (i == K - 1):\n axes[i, j].set_xlabel(atributos[j])\n else:\n axes[i, j].set_xlabel(\"\")\n\n if (j == 0):\n axes[i, j].set_ylabel(\"Cluster \" + str(i))\n else:\n axes[i, j].set_ylabel(\"\")\n\n axes[i, j].set_yticks([])\n axes[i, j].grid(axis='x',\n linestyle='-',\n linewidth='0.2',\n color='gray')\n axes[i, j].grid(axis='y', b=False)\n\n ax.set_xlim(rango[j][0] - 0.05 * (rango[j][1] - rango[j][0]),\n rango[j][1] + 0.05 * (rango[j][1] - rango[j][0]))",
"_____no_output_____"
]
],
[
[
"### DBSCAN\n\nAhora utilizaremos el algoritmo DBSCAN para formar los clústers",
"_____no_output_____"
]
],
[
[
"# Elección de un umbral de distancia por encima del cual no se mezclarán más clústers\n#E=[0.1,0.15,0.2,0.25,0.3,0.35]\nE=[0.1,0.11,0.12,0.13,0.14,0.15]\n# Con 0.35 en adelante ya hace un sólo clúster\nK=[]\nsilhouette=[]\ncalinski=[]\nfor e in E:\n results = DBSCAN(eps=e, min_samples=50, n_jobs=4).fit(data_norm)\n sil, cal = measures_silhoutte_calinski(data_norm, results.labels_)\n silhouette.append(sil)\n calinski.append(cal)\n K.append(max(results.labels_)+1) # La etiqueta -1 corresponde a un clúster de muestras que el algoritmo considera ruidosas, luego lo desecharemos",
"_____no_output_____"
],
[
"print(silhouette)\nprint(calinski)\nprint(K)",
"[0.8710220293321265, 0.8710220293321265, 0.5900064904722773, 0.5900064904722773, 0.5900064904722773, 0.5349637984880388]\n[888.2122785899895, 888.2122785899895, 1540.8634818093465, 1540.8634818093465, 1540.8634818093465, 1053.2968281286423]\n[14, 14, 8, 8, 8, 3]\n"
],
[
"plt.plot(E,calinski, 'bo-')\nplt.ylabel('Calinski')\nplt.xlabel('Radio')\nplt.show()\nplt.plot(E,silhouette,'bo-')\nplt.ylabel('Silhouette')\nplt.xlabel('Radio')\nplt.show()\nplt.plot(E,K,'bo-')\nplt.ylabel('Nº de clústers. K')\nplt.xlabel('Radio')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Elegimos epsilon=0.15",
"_____no_output_____"
]
],
[
[
"E=0.15 #0.12 produce 8 clústers y considera demasiados elementos (266) como ruidosos\nresults = DBSCAN(eps=E, min_samples=50, n_jobs=4).fit(data_norm)\nlabels=results.labels_",
"_____no_output_____"
],
[
"Counter(labels)",
"_____no_output_____"
]
],
[
[
"Calculamos los centroides a mano",
"_____no_output_____"
]
],
[
[
"dataC=data.copy()\ndataC['cluster']=labels\ndataC",
"_____no_output_____"
],
[
"# Eliminamos los 183 ejemplos que el algoritmo considera ruidosos\ndataC.drop(dataC[dataC['cluster']==-1].index,inplace=True)\ndataC",
"_____no_output_____"
],
[
"centroids = dataC.groupby('cluster').mean()\ncentroids",
"_____no_output_____"
],
[
"centroids=centroids.values\ncentroids",
"_____no_output_____"
],
[
"visualize_centroids(centroids, np.array(data), atributos, denormCentroids=False)",
"_____no_output_____"
],
[
"labels=[l for l in labels if l != -1]\npairplot(dataC,atributos,labels)",
"/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n/home/dcabezas/.local/lib/python3.6/site-packages/seaborn/distributions.py:305: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n"
],
[
"K=max(labels)+1",
"_____no_output_____"
],
[
"# Box Plot\nn_var = len(atributos)\nfig, axes = plt.subplots(K, n_var, sharey=True, figsize=(15, 15))\nfig.subplots_adjust(wspace=0, hspace=0)\n\ncolors = sns.color_palette(palette=None, n_colors=K, desat=None)\n\nrango = []\nfor j in range(n_var):\n d=dataC[atributos[j]]\n rango.append([d.min(), d.max()])\n\nfor i in range(K):\n dat_filt = dataC.loc[dataC['cluster'] == i]\n for j in range(n_var):\n ax = sns.boxplot(x=dat_filt[atributos[j]],\n color=colors[i],\n flierprops={\n 'marker': 'o',\n 'markersize': 4\n },\n ax=axes[i, j], whis=3,showfliers=True)\n\n if (i == K - 1):\n axes[i, j].set_xlabel(atributos[j])\n else:\n axes[i, j].set_xlabel(\"\")\n\n if (j == 0):\n axes[i, j].set_ylabel(\"Cluster \" + str(i))\n else:\n axes[i, j].set_ylabel(\"\")\n\n axes[i, j].set_yticks([])\n axes[i, j].grid(axis='x',\n linestyle='-',\n linewidth='0.2',\n color='gray')\n axes[i, j].grid(axis='y', b=False)\n\n ax.set_xlim(rango[j][0] - 0.05 * (rango[j][1] - rango[j][0]),\n rango[j][1] + 0.05 * (rango[j][1] - rango[j][0]))",
"_____no_output_____"
],
[
"print(sum(data.TOT_MUERTOS))\nprint(sum(data.TOT_HERIDOS_GRAVES))",
"180\n882\n"
]
],
[
[
"### Estudio de variables circunstanciales y de tipo dentro en los clusters",
"_____no_output_____"
]
],
[
[
"Counter(data2.TIPO_ACCIDENTE)",
"_____no_output_____"
],
[
"cluster_3=data2[data2.cluster==3]\ncluster_4=data2[data2.cluster==4]\ndata2.shape[0], cluster_3.shape[0], cluster_4.shape[0]",
"_____no_output_____"
],
[
"conjuntos=[data2,cluster_3,cluster_4]",
"_____no_output_____"
],
[
"def prop(condicion,data):\n n=0\n for i, row in data.iterrows():\n if condicion(row):\n n+=1\n return n/data.shape[0]\n\ndef propChoques_front_lat(data):\n condicion=(lambda x: '(Front' in x.TIPO_ACCIDENTE or '(Lateral)' in x.TIPO_ACCIDENTE)\n return prop(condicion,data)\n\ndef propAlcances(data):\n condicion=(lambda x: '(Alcance)' in x.TIPO_ACCIDENTE)\n return prop(condicion,data)\n\ndef propInterurbanas(data):\n condicion=(lambda x: x.ZONA_AGRUPADA=='VÍAS INTERURBANAS')\n return prop(condicion,data)\n\ndef propUrbanas(data):\n condicion=(lambda x: x.ZONA_AGRUPADA=='VÍAS URBANAS')\n return prop(condicion,data)\n\ndef propAtropellos(data):\n condicion=(lambda x: 'peatón' in x.TIPO_ACCIDENTE)\n return prop(condicion,data)",
"_____no_output_____"
],
[
"for c in conjuntos: # Proporción de choques frontales y laterales\n print(propChoques_front_lat(c))",
"0.21728932819567767\n0.12258064516129032\n0.15988372093023256\n"
],
[
"for c in conjuntos: # Proporción de choques por alcance\n print(propAlcances(c))",
"0.11844530072038867\n0.06451612903225806\n0.06831395348837209\n"
],
[
"for c in conjuntos:\n print(propUrbanas(c))",
"0.5015915563746021\n0.2709677419354839\n0.45348837209302323\n"
],
[
"for c in conjuntos:\n print(propInterurbanas(c))",
"0.4984084436253979\n0.7290322580645161\n0.5465116279069767\n"
],
[
"for c in conjuntos:\n print(propAtropellos(c))",
"0.05913888423521528\n0.17419354838709677\n0.09593023255813954\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0e9e929e2f4cf25ea9fb6e25b34b48db7cd7088 | 11,748 | ipynb | Jupyter Notebook | tests/test.ipynb | anirudhbhashyam/QR-Algorithm | 13058b9374f837b20650963645eb5c36ff42482d | [
"MIT"
] | null | null | null | tests/test.ipynb | anirudhbhashyam/QR-Algorithm | 13058b9374f837b20650963645eb5c36ff42482d | [
"MIT"
] | null | null | null | tests/test.ipynb | anirudhbhashyam/QR-Algorithm | 13058b9374f837b20650963645eb5c36ff42482d | [
"MIT"
] | null | null | null | 25.876652 | 314 | 0.474123 | [
[
[
"\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom scipy.io import mmread\nfrom scipy.linalg import hessenberg\nimport scipy.linalg as sl\nsys.path.append(\"../qr\")\n\nfrom qr import *\nimport sympy as sp",
"_____no_output_____"
],
[
"a = np.random.default_rng().random(size = (10, 10))",
"_____no_output_____"
],
[
"%%timeit\nmax(a[0])",
"1.19 µs ± 22.7 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n"
],
[
"%%timeit\nnp.max(a[0])",
"3.42 µs ± 97.9 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n"
],
[
"path = \"../test_matrices\"\nmat_1_file = \"west0381\"\next = \".mtx.gz\"\n\nmat = mmread(os.path.join(path, \"\".join((mat_1_file, ext))))",
"_____no_output_____"
],
[
"m = mat.toarray()\nm",
"_____no_output_____"
],
[
"import mpmath as mpm\n\nmpm.dps = 15\n\ndef complex_matrix(n: int, a: float, b: float) -> np.ndarray:\n\tif a >= b:\n\t\traise ValueError(\"Required: b > a\")\n\t\n\tr = (b - a) * np.random.default_rng().random(size = (n, n)) + a\n\tc = (b - a) * np.random.default_rng().random(size = (n, n)) + a\n\tm = r + 1j * c\n\t\n\treturn m.astype(np.complex128)\n\ndef householder_reflector(x: np.array):\n\t\"\"\"\n\tProduces the Householder\n\tvector based on the input \n\tvector x. The householder \n \tvector acts as:\n \n\t|a_1|\t\t|alpha|\t\n\t|a_2|\t->\t|0|\n\t|a_3|\t\t|0|\n\n\tParameters\n\t----------\n\tx:\t\n\t\tA numpy array who's entries\n\t\tafter the 1st element needs to \n\t\tbe 0ed. \n \n\tReturns\n\t-------\n\tA numpy array that acts as the \n\tHouseholder vector. \n\t\"\"\"\n\tu = x.copy()\n\t\n\trho = -np.exp(1j * np.angle(u[0]), dtype = np.complex128)\n\n\t# Set the Householder vector\n\t# to u = u \\pm alpha e_1 to \n\t# avoid cancellation.\n\tu[0] -= rho * mpm.norm(u)\n \n\t# Vector needs to have 1 \n\t# in the 2nd dimension.\n\t# print(u)\n\treturn u.reshape(-1, 1)\n\ndef hessenberg_transform_1(M: np.ndarray) -> np.ndarray:\n\t\"\"\"\n\tConverts a given matrix to \n\tHessenberg form using\n\tHoueholder transformations.\n\n\tParameters\n\t----------\n\tM:\t\n \t\tA complex square \n\t\tnumpy 2darray.\n\n\tReturns\n\t-------\n\tA tuple consisting of numpy\n \t2-D arrays which are the \n\thessenberg form and the \n\tpermutation matrix.\n\t\"\"\"\n\th = M.copy()\n\tn = np.array(h.tolist()).shape[0]\n\tu = np.eye(n, dtype = np.complex128)\n\thouseholder_vectors = list()\n \n\t# MAIN LOOP.\n\tfor l in range(n - 2):\n\t\t# Get the Householder vector for h.\n\t\tt = householder_reflector(h[l + 1 :, l])\n\n\t\t# Norm**2 of the Householder vector.\n\t\tt_norm_squared = t.conj().T @ t\n \n\t\t# p = np.eye(h[l + 1:, l].shape[0]) - 2 * (np.outer(t, t)) / t_norm_squared\n\n\t\t# # Resize and refactor the Householder matrix.\n\t\t# p = np.pad(p, ((l + 1, 0), (l + 1, 0)), mode = \"constant\", constant_values = ((0, 0), (0, 0)))\n\t\t# for k in range(l + 1):\n\t\t# \tp[k, k] = 1\n\n\t\t# Perform a similarity transformation on h\n\t\t# using the Householder matrix.\n\t\t# h = p @ h @ p.\n \n\t\t# --- REAL --- #\n\t\t# Left multiplication by I - 2uu^{*}.\n\t\t# h_real[l + 1 :, l :] -= 2 * (t @ (t.conj().T @ h_real[l + 1 :, l :])) / t_norm_squared\n\t\t# Right multiplication by I - 2uu^{*}.\n\t\t# h_real[ :, l + 1 :] -= 2 * ((h[ :, l + 1 :] @ t) @ t.conj().T) / t_norm_squared\n\t\t# print(f\"{np.array(h[l + 1 :, l :].tolist()).shape = }\")\n\t\t# print(f\"{np.array(t.transpose_conj().tolist()).shape = }\")\n\t\t# print(f\"{np.array((t.transpose_conj() * h[l + 1 :, l :]).tolist()).shape = }\")\n\t\tfactor = 2 / t_norm_squared\n \n\t\th[l + 1 :, l :] -= factor * (t @ (t.conj().T @ h[l + 1 :, l :]))\n\n\t\t# --- IMAGINARY --- #\n\t\t# Left multiplication by I - 2uu^{*}.\n\t\t# h_imag[l + 1 :, l :] -= 2 * (t @ (t.conj().T @ h_imag[l + 1 :, l :])) / t_norm_squared\n\t\t# Right multiplication by I - 2uu^{*}.\n\t\t# h_imag[ :, l + 1 :] -= 2 * ((h[ :, l + 1 :] @ t) @ t.conj().T) / t_norm_squared\n\t\th[ :, l + 1 :] -= factor * ((h[ :, l + 1 :] @ t) @ t.conj().T)\n\t\t\n\t\t# Force elements below main\n\t\t# subdiagonal to be 0.\n\t\th[l + 2 :, l] = 0.0\n\n\t\t# Store the transformations \n\t\t# to compute u.\n\t\thouseholder_vectors.append(t)\n\t\t\t\n\t# Store the transformations.\n\tfor k in reversed(range(n - 2)):\n\t\tt = householder_vectors[k]\n\t\tt_norm_squared = np.dot(t.conj().T, t)\n\t\tu[k + 1 :, k + 1 :] = 2 * t * (t.conj().T @ u[k + 1 :, k + 1 :]) / t_norm_squared\n\n\t# h = h_real + 1j * h_imag\n\treturn h, u",
"_____no_output_____"
],
[
"n = 1000\na = 10.0\nb = 20.0\n# m = complex_matrix(n, a, b)\n# M = mpm.matrix(m.tolist())\nhess_from_alg, _ = hessenberg_transform_1(m)\nhess_from_scipy = hessenberg(m) ",
"../qr/hessenberg.py:51: ComplexWarning: Casting complex values to real discards the imaginary part\n \n"
],
[
"%%capture cap --no-stderr\n\npd.options.display.max_columns = 200\npd.set_option(\"display.width\", 1000)\npd.set_option(\"display.max_columns\", 200)\npd.set_option(\"display.max_rows\", 1000)\n\n# print(f\" Hessenberged:\\n {pd.DataFrame(hess_alg)}\")\n# print(f\"Hessenberged (scipy):\\n {pd.DataFrame(hess_from_scipy)}\")\neigs = np.sort(np.linalg.eig(hess_from_alg)[0])\neigs_scipy = np.sort(np.linalg.eig(hess_from_scipy)[0])\nprint(f\"Eigs:\\n {pd.DataFrame(np.vstack([eigs, eigs_scipy]).T)}\")\nprint(f\"Equality of eigs: {np.testing.assert_allclose(eigs_scipy, eigs, rtol = 1e-6)}\")",
"_____no_output_____"
],
[
"with open(\"test_ipynb_output.txt\", \"w\") as f:\n f.write(cap.stdout)",
"_____no_output_____"
],
[
"sl.blas.daxpy([1, 2, 3], [1, 2, 3], a = 0.5)",
"_____no_output_____"
],
[
"def sign(z: complex) -> complex:\n\t\tif z == 0:\n\t\t\treturn 1\n\t\treturn z / abs(z)\n\nsign(-2.0 + 2.j)",
"_____no_output_____"
],
[
"a = np.array([1.00345345, 2, 1, -1, 2])\nb = np.array([1.00354, 2, 1, -1, 2])\n[i for i, _ in enumerate(a) if np.isclose(_, 1.00354, 1e-3)]",
"_____no_output_____"
],
[
"np.round(1.011234, 1) == 1.0",
"_____no_output_____"
],
[
"dec = 6\na = f\"{0:.{dec}f}\"\nprint(float(a))",
"_____no_output_____"
],
[
"np.prod([1, 2, 3, 4])",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0e9f9fc70d3549e4b25a4197ef6c4b3f19939c0 | 249,405 | ipynb | Jupyter Notebook | notebooks/curve.ipynb | allenai/learning-curve | 6a56f9ea6adb398b342fe188e40cf53484e82c59 | [
"Apache-2.0"
] | 6 | 2021-06-07T06:16:44.000Z | 2022-03-18T18:00:03.000Z | notebooks/curve.ipynb | allenai/learning-curve | 6a56f9ea6adb398b342fe188e40cf53484e82c59 | [
"Apache-2.0"
] | null | null | null | notebooks/curve.ipynb | allenai/learning-curve | 6a56f9ea6adb398b342fe188e40cf53484e82c59 | [
"Apache-2.0"
] | null | null | null | 549.35022 | 41,768 | 0.943878 | [
[
[
"import os\nimport sys\nmodule_path = os.path.abspath('..')\nsys.path.append(module_path)\nfrom lc.measurements import CurveMeasurements\nfrom lc.curve import LearningCurveEstimator\nfrom omegaconf import OmegaConf",
"_____no_output_____"
]
],
[
[
"Load error measurements using `CurveMeasurements`. See `notebooks/measurements.ipynb` for more about reading error measurements. ",
"_____no_output_____"
]
],
[
[
"curvems = CurveMeasurements()\ncurvems.load_from_json('../data/no_pretr_ft.json')\nprint(curvems)",
"--\nnum_train_samples: 25\ntest_errors: [74.04, 74.48, 74.07, 74.4, 75.95, 74.97, 74.02, 75.8, 74.6, 74.27]\nnum_ms: 10\n--\nnum_train_samples: 50\ntest_errors: [57.59, 58.45, 58.13, 58.44, 59.46, 58.14, 58.71, 58.22]\nnum_ms: 8\n--\nnum_train_samples: 100\ntest_errors: [45.01, 43.64, 45.06, 44.63]\nnum_ms: 4\n--\nnum_train_samples: 200\ntest_errors: [35.36, 35.2]\nnum_ms: 2\n--\nnum_train_samples: 400\ntest_errors: [27.92]\nnum_ms: 1\n--\n\n"
]
],
[
[
"Load default config. Modify `config.yaml` directly or update parameters once loaded. ",
"_____no_output_____"
]
],
[
[
"cfg = OmegaConf.load('../lc/config.yaml')\nprint('-'*20)\nprint('Default config')\nprint('-'*20)\nprint(OmegaConf.to_yaml(cfg))\n\ncfg.gamma_search = False\nprint('-'*20)\nprint('Modified config')\nprint('-'*20)\nprint(OmegaConf.to_yaml(cfg))",
"--------------------\nDefault config\n--------------------\ngamma: -0.5\ngamma_search: true\ngamma_range:\n- -1.0\n- 0\nsearch_reg_coeff: 5\nnormalize_objective: false\nvariance_type: smooth\nuse_weights: true\nddof: 1\nv_0: 0.02\nv_1: null\nmin_n: 20\n'N': 400\nnum_interp_pts: 100\nmarker_size: 20\nshade_gt_4N: true\nshade_opacity: 0.8\n\n--------------------\nModified config\n--------------------\ngamma: -0.5\ngamma_search: false\ngamma_range:\n- -1.0\n- 0\nsearch_reg_coeff: 5\nnormalize_objective: false\nvariance_type: smooth\nuse_weights: true\nddof: 1\nv_0: 0.02\nv_1: null\nmin_n: 20\n'N': 400\nnum_interp_pts: 100\nmarker_size: 20\nshade_gt_4N: true\nshade_opacity: 0.8\n\n"
],
[
"curve_estimator = LearningCurveEstimator(cfg)\ncurve, objective = curve_estimator.estimate(curvems)\nprint('Quality of the fit:',objective)\ncurve.print_summary(cfg.N)",
"Quality of the fit: 16.561\n------------------------------\nLearning curve summary\n------------------------------\nerror_400: 28.2575\nbeta_400: 16.0257\ngamma: -0.5\nalpha: 12.2318\neta: 320.5148\n"
]
],
[
[
"Searching for gamma leads to better fit. To enable gamma search set `gamma_search` to `True` (default). When gamma search is disabled, `curve_estimator.estimate()` uses `cfg.gamma` to estimate the curve. ",
"_____no_output_____"
]
],
[
[
"cfg.gamma_search = True\ncurve, objective = curve_estimator.estimate(curvems)\nprint('Quality of the fit:',objective)\ncurve.print_summary(cfg.N)",
"Quality of the fit: 6.2808\n------------------------------\nLearning curve summary\n------------------------------\nerror_400: 27.9133\nbeta_400: 18.2284\ngamma: -0.41\nalpha: 5.6835\neta: 259.2864\n"
]
],
[
[
"Use `curve_estimator.plot()` to visualizes the learning curve and the error measurements. ",
"_____no_output_____"
]
],
[
[
"curve_estimator.plot(curve,curvems,label='No Pretr; Ft')",
"_____no_output_____"
]
],
[
[
"You may also want to visualize the variance estimates. We recommend using the smoothed variance estimate but you can switch to using sample variance for curve estimation by setting `cfg.variance_type='sample'`. See `notebooks/variance.ipynb` for details on smooth variance estimation. ",
"_____no_output_____"
]
],
[
[
"curve_estimator.err_mean_var_estimator.visualize(curvems)",
"_____no_output_____"
]
],
[
[
"Plot multiple curves for easy comparison.",
"_____no_output_____"
]
],
[
[
"plot_metadata = [\n ['../data/no_pretr_linear.json','No Pretr; Lin','r','--'],\n ['../data/no_pretr_ft.json','No Pretr; Ft','g','-'],\n ['../data/pretr_linear.json','Pretr; Lin','b','--'],\n ['../data/pretr_ft.json','Pretr; Ft','m','-']\n]\nfor (json_path,label,color,linestyle) in plot_metadata:\n curvems.load_from_json(json_path)\n curve, _ = curve_estimator.estimate(curvems)\n curve_estimator.plot(curve,curvems,label,color,linestyle)",
"_____no_output_____"
]
],
[
[
"## What if you don't have all recommended error measurements?\nTechnically, it is possible to use just 2 training set sizes to estimate the learning curve but the results may be susceptible to high variance. For instance, here we estimate the learning curve using only measurements on training set sizes of 400 and 200. Note that below, we plot all error measurements and not just the ones used to estimate the curve. ",
"_____no_output_____"
]
],
[
[
"import copy\nfor (json_path,label,color,linestyle) in plot_metadata:\n curvems.load_from_json(json_path)\n curvems_filtered = copy.deepcopy(curvems)\n curvems_filtered.curvems = [errms for errms in curvems if errms.num_train_samples in [400,200]]\n curve, _ = curve_estimator.estimate(curvems_filtered)\n curve_estimator.plot(curve,curvems,label,color,linestyle)",
"_____no_output_____"
]
],
[
[
"However, results improve considerably when using 3 training set sizes. Below, we estimate learning curve using measurements on training sets of sizes 400, 200, and 100. ",
"_____no_output_____"
]
],
[
[
"for (json_path,label,color,linestyle) in plot_metadata:\n curvems.load_from_json(json_path)\n curvems_filtered = copy.deepcopy(curvems)\n curvems_filtered.curvems = [errms for errms in curvems if errms.num_train_samples in [400,200,100]]\n curve, _ = curve_estimator.estimate(curvems_filtered)\n curve_estimator.plot(curve,curvems,label,color,linestyle)",
"_____no_output_____"
]
],
[
[
"It may be possible to use much smaller training set sizes to compute learning curves as shown below. Note that the errors predicted by the curve at 200 and 400 training set sizes, which were not used to estimate the curve, are reasonably accurate. ",
"_____no_output_____"
]
],
[
[
"for (json_path,label,color,linestyle) in plot_metadata:\n curvems.load_from_json(json_path)\n curvems_filtered = copy.deepcopy(curvems)\n curvems_filtered.curvems = [errms for errms in curvems if errms.num_train_samples in [100,50,25]]\n curve, _ = curve_estimator.estimate(curvems_filtered)\n curve_estimator.plot(curve,curvems,label,color,linestyle)",
"_____no_output_____"
]
],
[
[
"## Quick and Lazy Approach\nIt is possible to compute a decent learning curve with only 3 error measurments - 1 for each of full, half, and quarter dataset sizes. In this case, simply set `cfg.v_1` to a reasonable value and proceed as before. This is our recommended approach if you are in a rush. See `notebooks/basic_lazy_usage.ipynb` for more details on this quick and lazy approach. ",
"_____no_output_____"
]
],
[
[
"cfg.v_1 = 10\nfor (json_path,label,color,linestyle) in plot_metadata:\n curvems.load_from_json(json_path)\n curvems_filtered = copy.deepcopy(curvems)\n curvems_filtered.curvems = [errms for errms in curvems if errms.num_train_samples in [400,200,100]]\n for errms in curvems_filtered:\n # Through away all but 1 error measurment per train set size\n errms.test_errors = [errms.test_errors[0]]\n errms.num_ms = 1\n curve, _ = curve_estimator.estimate(curvems_filtered)\n curve_estimator.plot(curve,curvems,label,color,linestyle)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ea1c156e34eafd2a9fdb88d7ec05dcfb1a1e9f | 6,305 | ipynb | Jupyter Notebook | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/DeepLearningML/10_gpu_benchmarking/Exercise/exercise_fashion_mnist_gpu_benchmarking.ipynb | siddharthdeo99/Hacktoberfest2k21 | 95666a2e704b0ce43c2ce3f3d521ff5bd843b17e | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/DeepLearningML/10_gpu_benchmarking/Exercise/exercise_fashion_mnist_gpu_benchmarking.ipynb | siddharthdeo99/Hacktoberfest2k21 | 95666a2e704b0ce43c2ce3f3d521ff5bd843b17e | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/DeepLearningML/10_gpu_benchmarking/Exercise/exercise_fashion_mnist_gpu_benchmarking.ipynb | siddharthdeo99/Hacktoberfest2k21 | 95666a2e704b0ce43c2ce3f3d521ff5bd843b17e | [
"MIT"
] | null | null | null | 22.437722 | 333 | 0.533703 | [
[
[
"<h3 style='color:blue'>Exercise: GPU performance for fashion mnist dataset</h3>",
"_____no_output_____"
],
[
"This notebook is derived from a tensorflow tutorial here: https://www.tensorflow.org/tutorials/keras/classification\nSo please refer to it before starting work on this exercise",
"_____no_output_____"
],
[
"You need to write code wherever you see `your code goes here` comment. You are going to do image classification for fashion mnist dataset and then you will benchmark the performance of GPU vs CPU for 1 hidden layer and then for 5 hidden layers. You will eventually fill out this table with your performance benchmark numbers\n\n\n| Hidden Layer | CPU | GPU |\n|:------|:------|:------|\n| 1 | ? | ? |\n| 5 | ? | ? |",
"_____no_output_____"
]
],
[
[
"# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)",
"_____no_output_____"
],
[
"fashion_mnist = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()",
"_____no_output_____"
],
[
"class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']",
"_____no_output_____"
],
[
"train_images.shape",
"_____no_output_____"
],
[
"plt.imshow(train_images[0])",
"_____no_output_____"
],
[
"train_labels[0]",
"_____no_output_____"
],
[
"class_names[train_labels[0]]",
"_____no_output_____"
],
[
"plt.figure(figsize=(3,3))\nfor i in range(5):\n plt.imshow(train_images[i])\n plt.xlabel(class_names[train_labels[i]])\n plt.show()",
"_____no_output_____"
],
[
"train_images_scaled = train_images / 255.0\ntest_images_scaled = test_images / 255.0",
"_____no_output_____"
],
[
"def get_model(hidden_layers=1):\n layers = []\n # Your code goes here-----------START\n # Create Flatten input layers\n # Create hidden layers that are equal to hidden_layers argument in this function\n # Create output \n # Your code goes here-----------END\n model = keras.Sequential(layers)\n \n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n \n return model",
"_____no_output_____"
],
[
"model = get_model(1)\nmodel.fit(train_images_scaled, train_labels, epochs=5)",
"_____no_output_____"
],
[
"model.predict(test_images_scaled)[2]",
"_____no_output_____"
],
[
"test_labels[2]",
"_____no_output_____"
],
[
"tf.config.experimental.list_physical_devices() ",
"_____no_output_____"
]
],
[
[
"<h4 style=\"color:purple\">5 Epochs performance comparison for 1 hidden layer</h4>",
"_____no_output_____"
]
],
[
[
"%%timeit -n1 -r1\nwith tf.device('/CPU:0'):\n # your code goes here",
"_____no_output_____"
],
[
"%%timeit -n1 -r1\nwith tf.device('/GPU:0'):\n # your code goes here",
"_____no_output_____"
]
],
[
[
"<h4 style=\"color:purple\">5 Epocs performance comparison with 5 hidden layers</h4>",
"_____no_output_____"
]
],
[
[
"%%timeit -n1 -r1\nwith tf.device('/CPU:0'):\n # your code here",
"_____no_output_____"
],
[
"%%timeit -n1 -r1\nwith tf.device('/GPU:0'):\n # your code here",
"_____no_output_____"
]
],
[
[
"[Click me to check solution for this exercise](https://github.com/codebasics/py/blob/master/DeepLearningML/10_gpu_benchmarking/Exercise/exercise_solution.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d0ea349405e4a96a3b5ce1fe18b9b38436a4681c | 779,143 | ipynb | Jupyter Notebook | 07_plot.ipynb | abostroem/AstronomicalData | 8228f6b77e5723e4ddb6d5b98dbd744cbab996bc | [
"MIT"
] | null | null | null | 07_plot.ipynb | abostroem/AstronomicalData | 8228f6b77e5723e4ddb6d5b98dbd744cbab996bc | [
"MIT"
] | null | null | null | 07_plot.ipynb | abostroem/AstronomicalData | 8228f6b77e5723e4ddb6d5b98dbd744cbab996bc | [
"MIT"
] | null | null | null | 653.643456 | 204,684 | 0.94852 | [
[
[
"# Chapter 7\n\nThis is the seventh in a series of notebooks related to astronomy data.\n\nAs a continuing example, we will replicate part of the analysis in a recent paper, \"[Off the beaten path: Gaia reveals GD-1 stars outside of the main stream](https://arxiv.org/abs/1805.00425)\" by Adrian M. Price-Whelan and Ana Bonaca.\n\nIn the previous notebook we selected photometry data from Pan-STARRS and used it to identify stars we think are likely to be in GD-1\n\nIn this notebook, we'll take the results from previous lessons and use them to make a figure that tells a compelling scientific story.",
"_____no_output_____"
],
[
"## Outline\n\nHere are the steps in this notebook:\n\n1. Starting with the figure from the previous notebook, we'll add annotations to present the results more clearly.\n\n2. The we'll see several ways to customize figures to make them more appealing and effective.\n\n3. Finally, we'll see how to make a figure with multiple panels or subplots.\n\nAfter completing this lesson, you should be able to\n\n* Design a figure that tells a compelling story.\n\n* Use Matplotlib features to customize the appearance of figures.\n\n* Generate a figure with multiple subplots.",
"_____no_output_____"
],
[
"## Installing libraries\n\nIf you are running this notebook on Colab, you can run the following cell to install Astroquery and the other libraries we'll use.\n\nIf you are running this notebook on your own computer, you might have to install these libraries yourself. See the instructions in the preface.",
"_____no_output_____"
]
],
[
[
"# If we're running on Colab, install libraries\n\nimport sys\nIN_COLAB = 'google.colab' in sys.modules\n\nif IN_COLAB:\n !pip install astroquery astro-gala pyia python-wget",
"_____no_output_____"
]
],
[
[
"## Making Figures That Tell a Story\n\nSo far the figure we've made have been \"quick and dirty\". Mostly we have used Matplotlib's default style, although we have adjusted a few parameters, like `markersize` and `alpha`, to improve legibility.\n\nNow that the analysis is done, it's time to think more about:\n\n1. Making professional-looking figures that are ready for publication, and\n\n2. Making figures that communicate a scientific result clearly and compellingly.\n\nNot necessarily in that order.",
"_____no_output_____"
],
[
"Let's start by reviewing Figure 1 from the original paper. We've seen the individual panels, but now let's look at the whole thing, along with the caption:\n\n<img width=\"500\" src=\"https://github.com/datacarpentry/astronomy-python/raw/gh-pages/fig/gd1-5.png\">",
"_____no_output_____"
],
[
"**Exercise:** Think about the following questions:\n\n1. What is the primary scientific result of this work?\n\n2. What story is this figure telling?\n\n3. In the design of this figure, can you identify 1-2 choices the authors made that you think are effective? Think about big-picture elements, like the number of panels and how they are arranged, as well as details like the choice of typeface.\n\n4. Can you identify 1-2 elements that could be improved, or that you might have done differently?",
"_____no_output_____"
],
[
"Some topics that might come up in this discussion:\n\n1. The primary result is that the multiple stages of selection make it possible to separate likely candidates from the background more effectively than in previous work, which makes it possible to see the structure of GD-1 in \"unprecedented detail\".\n\n2. The figure documents the selection process as a sequence of steps. Reading right-to-left, top-to-bottom, we see selection based on proper motion, the results of the first selection, selection based on color and magnitude, and the results of the second selection. So this figure documents the methodology and presents the primary result.\n\n3. It's mostly black and white, with minimal use of color, so it will work well in print. The annotations in the bottom left panel guide the reader to the most important results. It contains enough technical detail for a professional audience, but most of it is also comprehensible to a more general audience. The two left panels have the same dimensions and their axes are aligned.\n\n4. Since the panels represent a sequence, it might be better to arrange them left-to-right. The placement and size of the axis labels could be tweaked. The entire figure could be a little bigger to match the width and proportion of the caption. The top left panel has unnused white space (but that leaves space for the annotations in the bottom left).",
"_____no_output_____"
],
[
"## Plotting GD-1\n\nLet's start with the panel in the lower left. The following cell reloads the data.",
"_____no_output_____"
]
],
[
[
"import os\nfrom wget import download\n\nfilename = 'gd1_merged.hdf5'\npath = 'https://github.com/AllenDowney/AstronomicalData/raw/main/data/'\n\nif not os.path.exists(filename):\n print(download(path+filename))",
"_____no_output_____"
],
[
"import pandas as pd\n\nselected = pd.read_hdf(filename, 'selected')",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\ndef plot_second_selection(df):\n x = df['phi1']\n y = df['phi2']\n\n plt.plot(x, y, 'ko', markersize=0.7, alpha=0.9)\n\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.title('Proper motion + photometry selection', fontsize='medium')\n\n plt.axis('equal')",
"_____no_output_____"
]
],
[
[
"And here's what it looks like.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,2.5))\nplot_second_selection(selected)",
"_____no_output_____"
]
],
[
[
"## Annotations\n\nThe figure in the paper uses three other features to present the results more clearly and compellingly:\n\n* A vertical dashed line to distinguish the previously undetected region of GD-1,\n\n* A label that identifies the new region, and\n\n* Several annotations that combine text and arrows to identify features of GD-1.\n\nAs an exercise, choose any or all of these features and add them to the figure:\n\n* To draw vertical lines, see [`plt.vlines`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.vlines.html) and [`plt.axvline`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.axvline.html#matplotlib.pyplot.axvline).\n\n* To add text, see [`plt.text`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.text.html).\n\n* To add an annotation with text and an arrow, see [plt.annotate]().\n\nAnd here is some [additional information about text and arrows](https://matplotlib.org/3.3.1/tutorials/text/annotations.html#plotting-guide-annotation).",
"_____no_output_____"
]
],
[
[
"# Solution\n\n# plt.axvline(-55, ls='--', color='gray', \n# alpha=0.4, dashes=(6,4), lw=2)\n# plt.text(-60, 5.5, 'Previously\\nundetected', \n# fontsize='small', ha='right', va='top');\n\n# arrowprops=dict(color='gray', shrink=0.05, width=1.5, \n# headwidth=6, headlength=8, alpha=0.4)\n\n# plt.annotate('Spur', xy=(-33, 2), xytext=(-35, 5.5),\n# arrowprops=arrowprops,\n# fontsize='small')\n\n# plt.annotate('Gap', xy=(-22, -1), xytext=(-25, -5.5),\n# arrowprops=arrowprops,\n# fontsize='small')",
"_____no_output_____"
]
],
[
[
"## Customization\n\nMatplotlib provides a default style that determines things like the colors of lines, the placement of labels and ticks on the axes, and many other properties.\n\nThere are several ways to override these defaults and customize your figures:\n\n* To customize only the current figure, you can call functions like `tick_params`, which we'll demonstrate below.\n\n* To customize all figures in a notebook, you use `rcParams`.\n\n* To override more than a few defaults at the same time, you can use a style sheet.",
"_____no_output_____"
],
[
"As a simple example, notice that Matplotlib puts ticks on the outside of the figures by default, and only on the left and bottom sides of the axes.\n\nTo change this behavior, you can use `gca()` to get the current axes and `tick_params` to change the settings.\n\nHere's how you can put the ticks on the inside of the figure:\n\n```\nplt.gca().tick_params(direction='in')\n```\n\n**Exercise:** Read the documentation of [`tick_params`](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.tick_params.html) and use it to put ticks on the top and right sides of the axes.",
"_____no_output_____"
]
],
[
[
"# Solution\n\n# plt.gca().tick_params(top=True, right=True)",
"_____no_output_____"
]
],
[
[
"## rcParams\n\nIf you want to make a customization that applies to all figures in a notebook, you can use `rcParams`.\n\nHere's an example that reads the current font size from `rcParams`:",
"_____no_output_____"
]
],
[
[
"plt.rcParams['font.size']",
"_____no_output_____"
]
],
[
[
"And sets it to a new value:",
"_____no_output_____"
]
],
[
[
"plt.rcParams['font.size'] = 14",
"_____no_output_____"
]
],
[
[
"**Exercise:** Plot the previous figure again, and see what font sizes have changed. Look up any other element of `rcParams`, change its value, and check the effect on the figure.",
"_____no_output_____"
],
[
"If you find yourself making the same customizations in several notebooks, you can put changes to `rcParams` in a `matplotlibrc` file, [which you can read about here](https://matplotlib.org/3.3.1/tutorials/introductory/customizing.html#customizing-with-matplotlibrc-files).",
"_____no_output_____"
],
[
"## Style sheets\n\nThe `matplotlibrc` file is read when you import Matplotlib, so it is not easy to switch from one set of options to another.\n\nThe solution to this problem is style sheets, [which you can read about here](https://matplotlib.org/3.1.1/tutorials/introductory/customizing.html).\n\nMatplotlib provides a set of predefined style sheets, or you can make your own.\n\nThe following cell displays a list of style sheets installed on your system.",
"_____no_output_____"
]
],
[
[
"plt.style.available",
"_____no_output_____"
]
],
[
[
"Note that `seaborn-paper`, `seaborn-talk` and `seaborn-poster` are particularly intended to prepare versions of a figure with text sizes and other features that work well in papers, talks, and posters.\n\nTo use any of these style sheets, run `plt.style.use` like this:\n\n```\nplt.style.use('fivethirtyeight')\n```",
"_____no_output_____"
],
[
"The style sheet you choose will affect the appearance of all figures you plot after calling `use`, unless you override any of the options or call `use` again.\n\n**Exercise:** Choose one of the styles on the list and select it by calling `use`. Then go back and plot one of the figures above and see what effect it has.",
"_____no_output_____"
],
[
"If you can't find a style sheet that's exactly what you want, you can make your own. This repository includes a style sheet called `az-paper-twocol.mplstyle`, with customizations chosen by Azalee Bostroem for publication in astronomy journals.\n\nThe following cell downloads the style sheet.",
"_____no_output_____"
]
],
[
[
"import os\n\nfilename = 'az-paper-twocol.mplstyle'\npath = 'https://github.com/AllenDowney/AstronomicalData/raw/main/data/'\n\nif not os.path.exists(filename):\n print(download(path+filename))",
"_____no_output_____"
]
],
[
[
"You can use it like this:\n\n```\nplt.style.use('./az-paper-twocol.mplstyle')\n```\n\nThe prefix `./` tells Matplotlib to look for the file in the current directory.",
"_____no_output_____"
],
[
"As an alternative, you can install a style sheet for your own use by putting it in your configuration directory. To find out where that is, you can run the following command:\n\n```\nimport matplotlib as mpl\n\nmpl.get_configdir()\n```",
"_____no_output_____"
],
[
"## LaTeX fonts\n\nWhen you include mathematical expressions in titles, labels, and annotations, Matplotlib uses [`mathtext`](https://matplotlib.org/3.1.0/tutorials/text/mathtext.html) to typeset them. `mathtext` uses the same syntax as LaTeX, but it provides only a subset of its features.\n\nIf you need features that are not provided by `mathtext`, or you prefer the way LaTeX typesets mathematical expressions, you can customize Matplotlib to use LaTeX.\n\nIn `matplotlibrc` or in a style sheet, you can add the following line:\n\n```\ntext.usetex : true\n```\n\nOr in a notebook you can run the following code.\n\n```\nplt.rcParams['text.usetex'] = True\n```",
"_____no_output_____"
]
],
[
[
"plt.rcParams['text.usetex'] = True",
"_____no_output_____"
]
],
[
[
"If you go back and draw the figure again, you should see the difference.\n\nIf you get an error message like\n\n```\nLaTeX Error: File `type1cm.sty' not found.\n```\n\nYou might have to install a package that contains the fonts LaTeX needs. On some systems, the packages `texlive-latex-extra` or `cm-super` might be what you need. [See here for more help with this](https://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib).\n\nIn case you are curious, `cm` stands for [Computer Modern](https://en.wikipedia.org/wiki/Computer_Modern), the font LaTeX uses to typeset math.",
"_____no_output_____"
],
[
"## Multiple panels\n\nSo far we've been working with one figure at a time, but the figure we are replicating contains multiple panels, also known as \"subplots\".\n\nConfusingly, Matplotlib provides *three* functions for making figures like this: `subplot`, `subplots`, and `subplot2grid`.\n\n* [`subplot`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot.html) is simple and similar to MATLAB, so if you are familiar with that interface, you might like `subplot`\n\n* [`subplots`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplots.html) is more object-oriented, which some people prefer.\n\n* [`subplot2grid`](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot2grid.html) is most convenient if you want to control the relative sizes of the subplots. \n\nSo we'll use `subplot2grid`.\n\nAll of these functions are easier to use if we put the code that generates each panel in a function.",
"_____no_output_____"
],
[
"## Upper right\n\nTo make the panel in the upper right, we have to reload `centerline`.",
"_____no_output_____"
]
],
[
[
"import os\n\nfilename = 'gd1_dataframe.hdf5'\npath = 'https://github.com/AllenDowney/AstronomicalData/raw/main/data/'\n\nif not os.path.exists(filename):\n print(download(path+filename))",
"_____no_output_____"
],
[
"import pandas as pd\n\ncenterline = pd.read_hdf(filename, 'centerline')",
"_____no_output_____"
]
],
[
[
"And define the coordinates of the rectangle we selected.",
"_____no_output_____"
]
],
[
[
"pm1_min = -8.9\npm1_max = -6.9\npm2_min = -2.2\npm2_max = 1.0\n\npm1_rect = [pm1_min, pm1_min, pm1_max, pm1_max]\npm2_rect = [pm2_min, pm2_max, pm2_max, pm2_min]",
"_____no_output_____"
]
],
[
[
"To plot this rectangle, we'll use a feature we have not seen before: `Polygon`, which is provided by Matplotlib.\n\nTo create a `Polygon`, we have to put the coordinates in an array with `x` values in the first column and `y` values in the second column. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nvertices = np.transpose([pm1_rect, pm2_rect])\nvertices",
"_____no_output_____"
]
],
[
[
"The following function takes a `DataFrame` as a parameter, plots the proper motion for each star, and adds a shaded `Polygon` to show the region we selected.",
"_____no_output_____"
]
],
[
[
"from matplotlib.patches import Polygon\n\ndef plot_proper_motion(df):\n pm1 = df['pm_phi1']\n pm2 = df['pm_phi2']\n\n plt.plot(pm1, pm2, 'ko', markersize=0.3, alpha=0.3)\n \n poly = Polygon(vertices, closed=True, \n facecolor='C1', alpha=0.4)\n plt.gca().add_patch(poly)\n \n plt.xlabel('$\\mu_{\\phi_1} [\\mathrm{mas~yr}^{-1}]$')\n plt.ylabel('$\\mu_{\\phi_2} [\\mathrm{mas~yr}^{-1}]$')\n\n plt.xlim(-12, 8)\n plt.ylim(-10, 10)",
"_____no_output_____"
]
],
[
[
"Notice that `add_patch` is like `invert_yaxis`; in order to call it, we have to use `gca` to get the current axes.\n\nHere's what the new version of the figure looks like. We've changed the labels on the axes to be consistent with the paper.",
"_____no_output_____"
]
],
[
[
"plt.rcParams['text.usetex'] = False\nplt.style.use('default')\n\nplot_proper_motion(centerline)",
"_____no_output_____"
]
],
[
[
"## Upper left\n\nNow let's work on the panel in the upper left. We have to reload `candidates`.",
"_____no_output_____"
]
],
[
[
"import os\n\nfilename = 'gd1_candidates.hdf5'\npath = 'https://github.com/AllenDowney/AstronomicalData/raw/main/data/'\n\nif not os.path.exists(filename):\n print(download(path+filename))",
"_____no_output_____"
],
[
"import pandas as pd\n\nfilename = 'gd1_candidates.hdf5'\n\ncandidate_df = pd.read_hdf(filename, 'candidate_df')",
"_____no_output_____"
]
],
[
[
"Here's a function that takes a `DataFrame` of candidate stars and plots their positions in GD-1 coordindates. ",
"_____no_output_____"
]
],
[
[
"def plot_first_selection(df):\n x = df['phi1']\n y = df['phi2']\n\n plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3)\n\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.title('Proper motion selection', fontsize='medium')\n\n plt.axis('equal')",
"_____no_output_____"
]
],
[
[
"And here's what it looks like.",
"_____no_output_____"
]
],
[
[
"plot_first_selection(candidate_df)",
"_____no_output_____"
]
],
[
[
"## Lower right\n\nFor the figure in the lower right, we need to reload the merged `DataFrame`, which contains data from Gaia and photometry data from Pan-STARRS.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nfilename = 'gd1_merged.hdf5'\n\nmerged = pd.read_hdf(filename, 'merged')",
"_____no_output_____"
]
],
[
[
"From the previous notebook, here's the function that plots the color-magnitude diagram.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ndef plot_cmd(table):\n \"\"\"Plot a color magnitude diagram.\n \n table: Table or DataFrame with photometry data\n \"\"\"\n y = table['g_mean_psf_mag']\n x = table['g_mean_psf_mag'] - table['i_mean_psf_mag']\n\n plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3)\n\n plt.xlim([0, 1.5])\n plt.ylim([14, 22])\n plt.gca().invert_yaxis()\n\n plt.ylabel('$g_0$')\n plt.xlabel('$(g-i)_0$')",
"_____no_output_____"
]
],
[
[
"And here's what it looks like.",
"_____no_output_____"
]
],
[
[
"plot_cmd(merged)",
"_____no_output_____"
]
],
[
[
"**Exercise:** Add a few lines to `plot_cmd` to show the Polygon we selected as a shaded area. \n\nRun these cells to get the polygon coordinates we saved in the previous notebook.",
"_____no_output_____"
]
],
[
[
"import os\n\nfilename = 'gd1_polygon.hdf5'\npath = 'https://github.com/AllenDowney/AstronomicalData/raw/main/data/'\n\nif not os.path.exists(filename):\n print(download(path+filename))",
"_____no_output_____"
],
[
"coords_df = pd.read_hdf(filename, 'coords_df')\ncoords = coords_df.to_numpy()\ncoords",
"_____no_output_____"
],
[
"# Solution\n\n#poly = Polygon(coords, closed=True, \n# facecolor='C1', alpha=0.4)\n#plt.gca().add_patch(poly)",
"_____no_output_____"
]
],
[
[
"## Subplots\n\nNow we're ready to put it all together. To make a figure with four subplots, we'll use `subplot2grid`, [which requires two arguments](https://matplotlib.org/3.3.1/api/_as_gen/matplotlib.pyplot.subplot2grid.html):\n\n* `shape`, which is a tuple with the number of rows and columns in the grid, and\n\n* `loc`, which is a tuple identifying the location in the grid we're about to fill.\n\nIn this example, `shape` is `(2, 2)` to create two rows and two columns.\n\nFor the first panel, `loc` is `(0, 0)`, which indicates row 0 and column 0, which is the upper-left panel.\n\nHere's how we use it to draw the four panels.",
"_____no_output_____"
]
],
[
[
"shape = (2, 2)\nplt.subplot2grid(shape, (0, 0))\nplot_first_selection(candidate_df)\n\nplt.subplot2grid(shape, (0, 1))\nplot_proper_motion(centerline)\n\nplt.subplot2grid(shape, (1, 0))\nplot_second_selection(selected)\n\nplt.subplot2grid(shape, (1, 1))\nplot_cmd(merged)\npoly = Polygon(coords, closed=True, \n facecolor='C1', alpha=0.4)\nplt.gca().add_patch(poly)\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"We use [`plt.tight_layout`](https://matplotlib.org/3.3.1/tutorials/intermediate/tight_layout_guide.html) at the end, which adjusts the sizes of the panels to make sure the titles and axis labels don't overlap.\n\n**Exercise:** See what happens if you leave out `tight_layout`.",
"_____no_output_____"
],
[
"## Adjusting proportions\n\nIn the previous figure, the panels are all the same size. To get a better view of GD-1, we'd like to stretch the panels on the left and compress the ones on the right.\n\nTo do that, we'll use the `colspan` argument to make a panel that spans multiple columns in the grid.\n\nIn the following example, `shape` is `(2, 4)`, which means 2 rows and 4 columns.\n\nThe panels on the left span three columns, so they are three times wider than the panels on the right.\n\nAt the same time, we use `figsize` to adjust the aspect ratio of the whole figure.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9, 4.5))\n\nshape = (2, 4)\nplt.subplot2grid(shape, (0, 0), colspan=3)\nplot_first_selection(candidate_df)\n\nplt.subplot2grid(shape, (0, 3))\nplot_proper_motion(centerline)\n\nplt.subplot2grid(shape, (1, 0), colspan=3)\nplot_second_selection(selected)\n\nplt.subplot2grid(shape, (1, 3))\nplot_cmd(merged)\npoly = Polygon(coords, closed=True, \n facecolor='C1', alpha=0.4)\nplt.gca().add_patch(poly)\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"This is looking more and more like the figure in the paper.\n\n**Exercise:** In this example, the ratio of the widths of the panels is 3:1. How would you adjust it if you wanted the ratio to be 3:2?",
"_____no_output_____"
],
[
"## Summary\n\nIn this notebook, we reverse-engineered the figure we've been replicating, identifying elements that seem effective and others that could be improved.\n\nWe explored features Matplotlib provides for adding annotations to figures -- including text, lines, arrows, and polygons -- and several ways to customize the appearance of figures. And we learned how to create figures that contain multiple panels.",
"_____no_output_____"
],
[
"## Best practices\n\n* The most effective figures focus on telling a single story clearly and compellingly.\n\n* Consider using annotations to guide the readers attention to the most important elements of a figure.\n\n* The default Matplotlib style generates good quality figures, but there are several ways you can override the defaults.\n\n* If you find yourself making the same customizations on several projects, you might want to create your own style sheet.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d0ea3f39c5f33e554604c9aa4aa5c141941e2f2a | 8,783 | ipynb | Jupyter Notebook | notebooks/exercises/.ipynb_checkpoints/2 - Data types and expressions-checkpoint.ipynb | smasoka/python-introduction | 923d7e634e6db1016c783b999477cd86188d99fb | [
"MIT"
] | 10 | 2017-02-09T12:43:32.000Z | 2022-01-02T14:40:34.000Z | notebooks/exercises/.ipynb_checkpoints/2 - Data types and expressions-checkpoint.ipynb | smasoka/python-introduction | 923d7e634e6db1016c783b999477cd86188d99fb | [
"MIT"
] | 1 | 2018-10-16T12:13:17.000Z | 2018-10-16T12:13:17.000Z | notebooks/exercises/.ipynb_checkpoints/2 - Data types and expressions-checkpoint.ipynb | smasoka/python-introduction | 923d7e634e6db1016c783b999477cd86188d99fb | [
"MIT"
] | 12 | 2017-06-22T23:46:41.000Z | 2021-12-26T15:22:24.000Z | 23.546917 | 286 | 0.543664 | [
[
[
"# Exercises\n## Playing with the interpreter\n\nTry to execute some simple statements and expressions (one at a time) e.g\n```\nprint(\"Hello!\")\n1j**2\n1 / 2\n1 // 2\n5 + 5\n10 / 2 + 5\nmy_tuple = (1, 2, 3)\nmy_tuple[0] = 1\n2.3**4.5\n```\nDo you understand what is going on in all cases?",
"_____no_output_____"
],
[
"Most Python functions and objects can provide documentation via **help** function. Look the documentation of e.g open function with ```help(open)```\n",
"_____no_output_____"
],
[
"Play with tabulator completion, by typing just ```pr``` and pressing then tabulator key. Pressing Shift-tab (after finalising completion) one sees also short documentation about the function or object. This works also on variable names, try e.g.\n```\nmy_extremely_long_variable_name = 5\nmy <TAB>\n```",
"_____no_output_____"
],
[
"## Basic syntax\nTry to assign the value 6 to the following variable names\n````\nfirst-name\nfamily_name\n3PO\n____variable\ninb4tool8\nprint\nin\n```\n\nWhich of them are valid to assign to? \n\nExtra: why do you think the ones that cause an error are not valid? What's the reason?",
"_____no_output_____"
],
[
"You probably noticed that even though ``print`` is a method in the namespace it was still valid to create a variable called ``print``. If you now try to actually print something, you will get an error. For built-in functions (such as print) one can recover with the following code",
"_____no_output_____"
]
],
[
[
"print = __builtin__.print\nprint(\"hello\")",
"_____no_output_____"
]
],
[
[
"Are the following pieces valid Python code?\n\n** Case 1 **\n```\nnumbers = [4, 5, 6, 9, 11]\nsum = 0\nfor n in numbers:\n sum += n\n print(\"Sum is now\"), sum\n```\n\n** Case 2 **\n```\nx = 11\ntest(x)\n\ndef test(a):\n if a < 0:\n print(\"negative number\")\n```",
"_____no_output_____"
],
[
"## Tuples and lists\n1. Create a tuple called ``mytuple``, with the following strings: \"sausage\", \"eggs\" and \"bacon\"\n2. check it's type using ``type()``\n3. Create than a list called ``mylist`` with the same contents. You use can the normal list definition syntax (``[]``) or coerce it from the tuple with the ``list()`` function.",
"_____no_output_____"
],
[
"Attempt to append the string \"spam\" \nto ``mylist`` and ``mytuple`` using ``append``.",
"_____no_output_____"
],
[
"List objects have a sort()\nfunction, use that for sorting the list alphabetically (e.g.\nmylist.sort() ). What is now the first item of the list?\n\nNext, remove the first item from the list, investigate the contents and remove then last item from the list.",
"_____no_output_____"
],
[
"### Slicing\n\nUsing ``range()`` create a list that has the numbers from 50 to 0 with a step of -2. Note that in Python 3 ``range()`` returns an *iterator* (we'll discuss iterators more later on), ``list(range(args))`` returns an actual list.",
"_____no_output_____"
],
[
"Using slicing syntax, select\n* the last 4 items from the list\n* the items from index 10 to index 13\n* the first 5 items from the list",
"_____no_output_____"
],
[
"Read up on the [stride syntax](https://en.wikipedia.org/wiki/Array_slicing#1991:_Python) . Then using it select \n* every third value in the list\n* the values with an odd-numbered index in the list",
"_____no_output_____"
],
[
"### Multidimensional lists\nCreate a two dimensional list of (x,y) value pairs, i.e.\narbitrary long list whose elements are two element lists.\n\nAre you able to use slicing for extracting only the y values? (Answer is no, but try it in any case)",
"_____no_output_____"
],
[
"## Dictionaries\nCreate a dictionary whose keys are the fruits “pineapple”, “strawberry”, and “banana”. As values use numbers\nrepresenting e.g. prices. \n\nAdd “orange” to the dictionary and then remove “banana” from the dictionary. Investigate the contents of dictionary and pay attention to the order of key-value pairs.",
"_____no_output_____"
],
[
"# Bonus exercises\nCreate a new “fruits” dictionary where the values are also\ndictionaries containing key-value pairs for color and weight,\ne.g. \n```\nfruits['apple'] = {'color':'green', 'weight': 120}\n```\nChange the color of *apple* from green to red",
"_____no_output_____"
],
[
"It is often useful idiom to create empty lists or dictionaries\nand add contents little by little.\n\nCreate first an empty dictionary for a mid-term grades of\nstudents. Then, add a key-value pairs where the keys are\nstudent names and the values are empty lists. \n\nFinally, add values to the lists and investigate the contents of the\ndictionary.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ea443cbd87f5f6da1cb89b8a9313e6e0b4ddee | 83,953 | ipynb | Jupyter Notebook | classification/notebooks/.ipynb_checkpoints/05 - Validation Measures-checkpoint.ipynb | pshn111/Machine-Learning-Package | fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2 | [
"MIT"
] | null | null | null | classification/notebooks/.ipynb_checkpoints/05 - Validation Measures-checkpoint.ipynb | pshn111/Machine-Learning-Package | fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2 | [
"MIT"
] | null | null | null | classification/notebooks/.ipynb_checkpoints/05 - Validation Measures-checkpoint.ipynb | pshn111/Machine-Learning-Package | fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2 | [
"MIT"
] | null | null | null | 57.73934 | 20,620 | 0.706967 | [
[
[
"<h1> Data Transformation </h1>",
"_____no_output_____"
],
[
"## Logistic Regression - on [Titanic Dataset](https://www.kaggle.com/c/titanic)\n\n- Models the probability an object belongs to a class\n- Values ranges from 0 to 1\n- Can use threshold to classify into which classes a class belongs\n- An S-shaped curve\n\n$\n\\begin{align}\n\\sigma(t) = \\frac{1}{1 + e^{-t}}\n\\end{align}\n$\n",
"_____no_output_____"
],
[
"#### Read the data",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf_train = pd.read_csv('../data/titanic_train.csv')",
"_____no_output_____"
],
[
"df_train.head(8)",
"_____no_output_____"
]
],
[
[
"## Data Statistics",
"_____no_output_____"
],
[
"#### Describing the statistics for numerical features",
"_____no_output_____"
]
],
[
[
"df_train.describe()",
"_____no_output_____"
]
],
[
[
"#### Find the count of the non-NaN values per feature",
"_____no_output_____"
]
],
[
[
"df_train.count()",
"_____no_output_____"
]
],
[
[
"## What features can be removed?",
"_____no_output_____"
],
[
"### Remove features that are not related to your outcome",
"_____no_output_____"
]
],
[
[
"df_train.drop(['Name', 'Ticket'], axis=1, inplace=True)",
"_____no_output_____"
]
],
[
[
"### Remove column with missing data",
"_____no_output_____"
]
],
[
[
"df_train.drop(['Cabin'], axis=1, inplace=True)",
"_____no_output_____"
]
],
[
[
"## Data Imputation - Filling in missing values\n- Select a percentage threshold that you would want to accomodate\n- Around 1/5th to 1/3rd of the data (20% to 33.3%)\n- if more than 50% of the data is missing, you will be generating data for the majority of your dataset - Not a good thing to do",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt\nimport seaborn as sns\nplt.figure(figsize=(7,5))\nsns.boxplot(x='Pclass',y='Age',data=df_train)",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\nimport seaborn as sns\nplt.figure(figsize=(7,5))\nsns.boxplot(x='Sex',y='Age',data=df_train)",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\nimport seaborn as sns\nplt.figure(figsize=(7,5))\nsns.boxplot(x='Embarked',y='Age',data=df_train)",
"_____no_output_____"
],
[
"def add_age(cols):\n Age = cols[0]\n Pclass = cols[1]\n if pd.isnull(Age):\n return int(df_train[df_train[\"Pclass\"] == Pclass][\"Age\"].mean())\n else:\n return Age",
"_____no_output_____"
],
[
"df_train['Age'] = df_train[['Age', 'Pclass']].apply(add_age,axis=1)",
"_____no_output_____"
],
[
"df_train.count()",
"_____no_output_____"
]
],
[
[
"### Drop Rows",
"_____no_output_____"
]
],
[
[
"df_train.dropna(inplace=True)",
"_____no_output_____"
],
[
"df_train.count()",
"_____no_output_____"
]
],
[
[
"## Data Transformation",
"_____no_output_____"
],
[
"#### Convert the categorical values to numeric\n- Find the columns that are explicitly categorical - like male, female\n- Find the columns that are although numerical, represent categorical features",
"_____no_output_____"
],
[
"### One-Hot Encoding\n- A technique to create multiple feature for each corrsponding value",
"_____no_output_____"
],
[
"<img src='img/one_hot_encoding.png'>",
"_____no_output_____"
]
],
[
[
"import numpy as np\ncol = 'Sex'\nprint(np.unique(df_train[col]))",
"['female' 'male']\n"
],
[
"import numpy as np\ncol = 'Embarked'\nprint(np.unique(df_train[col]))",
"['C' 'Q' 'S']\n"
],
[
"import numpy as np\ncol = 'Pclass'\nprint(np.unique(df_train[col]))",
"[1 2 3]\n"
],
[
"sex = pd.get_dummies(df_train[\"Sex\"],drop_first=True)\nembarked = pd.get_dummies(df_train[\"Embarked\"],drop_first=True)\npclass = pd.get_dummies(df_train[\"Pclass\"],drop_first=True)",
"_____no_output_____"
]
],
[
[
"### Drop the columns that were used for transformation",
"_____no_output_____"
]
],
[
[
"df_train.drop(['Sex', 'Embarked', 'Pclass', 'PassengerId'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"df_train.head()",
"_____no_output_____"
]
],
[
[
"### Add encoded columns to the training dataset",
"_____no_output_____"
]
],
[
[
"df_train = pd.concat([df_train,pclass,sex,embarked],axis=1)",
"_____no_output_____"
],
[
"df_train.head()",
"_____no_output_____"
]
],
[
[
"# Save the transformed file as a pickle file",
"_____no_output_____"
]
],
[
[
"df_train.shape",
"_____no_output_____"
],
[
"import pickle as pkl\ndf_train.to_pickle('../data/titanic_tansformed.pkl')",
"_____no_output_____"
]
],
[
[
"## Logistic Regression",
"_____no_output_____"
]
],
[
[
"data = df_train.drop(\"Survived\",axis=1)\nlabel = df_train[\"Survived\"]",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"from sklearn.cross_validation import train_test_split\ndata_train, data_test, label_train, label_test = train_test_split(data, label, test_size = 0.3, random_state = 101)",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\n\n# Run Logistic Regression\nlog_regr = LogisticRegression()\nlog_regr.fit(data_train, label_train)\npredictions = log_regr.predict(data_test)",
"_____no_output_____"
]
],
[
[
"### Accuracy",
"_____no_output_____"
]
],
[
[
"print('Accuracy', log_regr.score(data_test, label_test))\nprint('Coefficients', log_regr.coef_)\nprint('Intercept', log_regr.intercept_)",
"Accuracy 0.8127340823970037\nCoefficients [[-0.02813121 -0.22110356 -0.0893393 0.00480662 -0.49460192 -1.58514132\n -2.33322621 -0.02832966 -0.26523619]]\nIntercept [2.81524481]\n"
]
],
[
[
"### Precision Recall",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report\nprint(classification_report(label_test, predictions))",
" precision recall f1-score support\n\n 0 0.81 0.91 0.86 163\n 1 0.83 0.65 0.73 104\n\navg / total 0.81 0.81 0.81 267\n\n"
]
],
[
[
"## Cross Validation",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import StratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\n# skf = StratifiedKFold(n_splits=5)\n\nlog_regr = LogisticRegression()\nlog_regr.fit(data_train, label_train)\nscore = log_regr.score(data_train, label_train)\nprint('Train accuracy score', score)\n\n\nscore_cv = cross_val_score(log_regr, data_train, label_train, cv=10, scoring='accuracy')\nprint('Cross Val Accuracy for each run', score_cv)\nprint('CrossVal Accuracy', score_cv.mean())\n",
"Train accuracy score 0.8102893890675241\nCross Val Accuracy for each run [0.74603175 0.6984127 0.77777778 0.88888889 0.79365079 0.76190476\n 0.81967213 0.81967213 0.90163934 0.7704918 ]\nCrossVal Accuracy 0.7978142076502732\n"
]
],
[
[
"## AUC - Receiver Operating Characteristics\n- How much a model is capable of distinguishing between classes\n- Higher the AUC, better the model is\n\n$\n\\begin{align}\nTrue Positive Rate = \\frac{TP}{TP + FN}\n\\end{align}\n$\n\n<br>\n$\n\\begin{align}\n\\ False Positive Rate = 1 - \\frac{TN}{TN + FP} = \\frac{FP}{TN + FP}\n\\end{align}\n$",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\n\nfpr, tpr, threshold = metrics.roc_curve(label_test, log_regr.predict(data_test))\nroc_auc = metrics.auc(fpr, tpr)\nprint('AUCROC Stage1 vs Healthy: ' , roc_auc)",
"AUCROC Stage1 vs Healthy: 0.7839782916470033\n"
],
[
"import matplotlib.pyplot as plt\n\nplt.title('Receiver Operating Characteristic')\nplt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\nplt.legend(loc = 'lower right')\nplt.plot([0, 1], [0, 1],'r--')\nplt.xlim([0, 1])\nplt.ylim([0, 1])\nplt.ylabel('True Positive Rate')\nplt.xlabel('False Positive Rate')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0ea533eb5da2266755b4e7109ddac45520ee5c4 | 16,684 | ipynb | Jupyter Notebook | notebooks_business_vitality/week_2/9_1_Disclosure_Review.ipynb | Coleridge-Initiative/ada-2018-uchicago | 8284c893063bcf44d59ce0308557865c962e2738 | [
"CC0-1.0"
] | 5 | 2019-01-23T17:55:07.000Z | 2021-11-30T17:24:03.000Z | notebooks_business_vitality/week_2/9_1_Disclosure_Review.ipynb | withdata/ada-2018-uchicago | 8284c893063bcf44d59ce0308557865c962e2738 | [
"CC0-1.0"
] | null | null | null | notebooks_business_vitality/week_2/9_1_Disclosure_Review.ipynb | withdata/ada-2018-uchicago | 8284c893063bcf44d59ce0308557865c962e2738 | [
"CC0-1.0"
] | 4 | 2018-11-19T22:58:12.000Z | 2020-12-27T23:55:52.000Z | 35.497872 | 574 | 0.628626 | [
[
[
"**_Privacy and Confidentiality Exercises_**",
"_____no_output_____"
],
[
"This notebook shows you how to prepare your results for export and what you have to keep in mind in general when you want to export output. You will learn how to prepare files for export so they meet our export requirements.",
"_____no_output_____"
]
],
[
[
"# Load packages\n%pylab inline\nfrom __future__ import print_function\nimport os\nimport pandas as pd\nimport numpy as np\nimport psycopg2\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\nmatplotlib.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"# General Remarks on Disclosure Review\nThis notebook provides you with information on how to prepare research output for disclosure control. It outlines how to prepare different kind of outputs before submitting an export request and gives you an overview of the information needed for disclosure review. \n\n## Files you can export\nIn general you can export any kind of file format. However, most research results that researchers typically export are tables, graphs, regression output and aggregated data. Thus, we ask you to export one of these types which implies that every result you would like to export needs to be saved in either .csv, .txt or graph format.\n\n## Jupyter notebooks are only exported to retrieve code\nUnfortunately, you can't export results in a jupyter notebook. Doing disclosure reviews on output in jupyter notebooks is too burdensome for us. Jupyter notebooks will only be exported when the output is deleted for the purpose of exporting code. This does not mean that you won't need your jupyter notebooks during the export process. \n\n## Documentation of code is important\nDuring the export process we ask you to provide the code for every output you are asking to export. It is important for ADRF staff to have the code to better understand what you exactly did. Understanding how research results are created is important to understand your research output. Thus, it is important to document every single step of your analysis in your jupyter notebook. \n\n## General rules to keep in mind\nA more detailed description of the rules for exporting results can be found on the class website. This is just a quick overview. We recommend that you to go to the class website and read the entire guidelines before you prepare your files for export. \n- The disclosure review is based on the underlying observations of your study. Every statistic you want to export should be based on at least 10 individual data points\n- Document your code so the reviewer can follow your data work. Assessing re-identification risks highly depends on the context. Thus it is important that you provide context info with your anlysis for the reviewer\n- Save the requested output with the corresponding code in you input and output folder. Make sure the code is executable. The code should exactly produce the output you requested\n- In case you are exporting powerpoint slides that show project results you have to provide the code which produces the output in the slide\n- Please export results only when there are final and you need them for your presentation or final projcet report",
"_____no_output_____"
],
[
"# Disclosure Review Walkthrough\n\nWe will IL DES data and MO DES to construct our statistics we are interested in, and prepare it in a way so we can submit the output for disclosure review. ",
"_____no_output_____"
]
],
[
[
"# get working directory\nmypath = (os.getcwd())\nprint(mypath)",
"_____no_output_____"
],
[
"# connect to database\ndb_name = \"appliedda\"\nhostname = \"10.10.2.10\"\nconn = psycopg2.connect(database=db_name, host = hostname) ",
"_____no_output_____"
]
],
[
[
"## pull data\n\nIn this example we will use the workers who had a job in both MO and IL at some point over the course of our datasets (2005-2016)",
"_____no_output_____"
]
],
[
[
"# Get data\nquery = \"\"\"\nSELECT *, il_wage + mo_wage AS earnings\nFROM ada_18_uchi.il_mo_overlap_by_qtr\nWHERE year = 2011 \nAND quarter IN (2,3)\"\"\"",
"_____no_output_____"
],
[
"# Save query in dataframe\ndf = pd.read_sql( query, con = conn )",
"_____no_output_____"
],
[
"# Check dataframe\ndf.head()",
"_____no_output_____"
],
[
"# another way to check dataframe\ndf.info()",
"_____no_output_____"
],
[
"# basic stats of\ndf.describe()",
"_____no_output_____"
],
[
"# let's add an earnings categorization for \"low\", \"mid\" and \"high\" using a simple function\ndef earn_calc(earn):\n if earn < 16500:\n return('low')\n elif earn < 45000:\n return('mid')\n else:\n return('high')\n ",
"_____no_output_____"
],
[
"earn_calc(24000)",
"_____no_output_____"
],
[
"df['earn_cat'] = df['earnings'].apply(lambda x: earn_calc(x))",
"_____no_output_____"
]
],
[
[
"We now have loaded the data that we need to generate some basic statistics about our populations we want to compare",
"_____no_output_____"
]
],
[
[
"# Let's look at some first desccriptives by group\ngrouped = df.groupby('earn_cat')\ngrouped.describe()",
"_____no_output_____"
],
[
"grouped.describe().T",
"_____no_output_____"
]
],
[
[
"Statistics in this table will be released if the statistic is based on at least 10 entities (in this example individuals). We can see that the total number of individuals we observe in each group completely satisfies this (see cell count). However, we also report percentiles, and we report the minimum and maximum value. Especially the minimum and maximum value are most likely representing one individual person. \n\nThus, during disclosure review these values will be supressed. ",
"_____no_output_____"
]
],
[
[
"# Now let's export the statistics. Ideally we want to have a csv file\n# We can safe the statistics in a dataframe\nexport1 = grouped.describe()\n# and then print to csv\nexport1.to_csv('descriptives_by_group.csv')",
"_____no_output_____"
]
],
[
[
"### Reminder: Export of Statistics\nYou can save any dataframe as a csv file and export this csv file. The only thing you have to keep in mind is that besides the statistic X you are interested in you have to include a variable count of X so we can see on how many observations the statistic is based on. This also applies if you aggregate data. For example if you agregate by benefit type, we need to know how many observations are in each benefit program (because after the aggregation each benefit type will be only one data point). ",
"_____no_output_____"
],
[
"### Problematic Output\nSome subgroups (eg for some of the Illinois datasets dealing with race and gender) will result in cell sizes representing less than 10 people. \n\nTables with cells representing less than 10 individuals won't be released. In this case, disclosure review would mean to delete all cells with counts of less than 10. In addition, secondary suppression has to take place. The disclosure reviewer has to delete as many cells as needed to make it impossible to recalculate the suppressed values. \n\n### How to do it better\nInstead of asking for export of a tables like this, you should prepare your tables in advance that all cell sizes are at least represented by a minimum of 10 observations. ",
"_____no_output_____"
],
[
"### Reminder: Export of Tables\nFor tables of any kind you need to provide the underlying counts of the statistics presented in the table. Make sure you provide all counts. If you calculate ratios, for example employment rates you need to provide the count of individuals who are employed and the count of the ones who are not. If you are interested in percentages we still need the underlying counts for disclosure review. Please label the table in a way that we can easily understand what you are plotting. ",
"_____no_output_____"
]
],
[
[
"df[['il_flag', 'mo_flag']].describe(percentiles = [.5, .9, .99, .999])",
"_____no_output_____"
],
[
"# for this example let's cap the job counts to 5\ndf['il_flag'] = df['il_flag'].apply(lambda x: x if x < 5 else 5)\ndf['mo_flag'] = df['mo_flag'].apply(lambda x: x if x < 5 else 5)",
"_____no_output_____"
],
[
"# Let's say we are interested in plotting parts of the crosstabulation as a graph, for example benefit type and race\n# First we need to calulate the counts\ngraph = df.groupby(['earn_cat', 'il_flag'])['ssn'].count()",
"_____no_output_____"
],
[
"# Note: we need to add the unstack command here because our dataframe has nested indices. \n# We need to flatten out the data before plotting the graph\nprint(graph)\nprint(graph.unstack())",
"_____no_output_____"
],
[
"# Now we can generate the graph\nmygraph = graph.unstack().plot(kind='bar')",
"_____no_output_____"
]
],
[
[
"In this graph it is not clearly visible how many observations are in each bar. Thus we either have to provide a corresponding table (as we generated earlier), or we can use the table=True option to add a table of counts to the graph. In addition, we wnat to make sure that all our axes and legend are labeled properly.",
"_____no_output_____"
]
],
[
[
"# Graphical representation including underlying values: the option table=True displays the underlying counts\nmygraph = graph.unstack().plot(kind='bar', table=True, figsize=(7,5), fontsize=7)\n# Adjust legend and axes\nmygraph.legend([\"Unknown\",\"1\", \"2\", \"3\", \"4\", '5'], loc = 1, ncol= 3, fontsize=9)\nmygraph.set_ylabel(\"Number of Observations\", fontsize=9)\n# Add table with counts\n# We don't need an x axis if we display table\nmygraph.axes.get_xaxis().set_visible(False)\n# Grab table info\ntable = mygraph.tables[0]\n# Format table and figure\ntable.set_fontsize(9)",
"_____no_output_____"
]
],
[
[
"> in this example there is a problematic value, we will instead cap to 4 maximum jobs to ensure all cells are more than 10",
"_____no_output_____"
]
],
[
[
"# for this example let's cap the job counts to 5\ndf['il_flag'] = df['il_flag'].apply(lambda x: x if x < 4 else 4)\ndf['mo_flag'] = df['mo_flag'].apply(lambda x: x if x < 4 else 4)",
"_____no_output_____"
],
[
"# create our new \"graph\" dataframe to plot with\ngraph = df.groupby(['earn_cat', 'il_flag'])['ssn'].count()",
"_____no_output_____"
],
[
"# confirm we solved the issue\n\nmygraph = graph.unstack().plot(kind='bar', table=True, figsize=(7,5), fontsize=7)\n# Adjust legend and axes\nmygraph.legend([\"Unknown\",\"1\", \"2\", \"3\", \"4\", '5'], loc = 1, ncol= 3, fontsize=9)\nmygraph.set_ylabel(\"Number of Observations\", fontsize=9)\n# Add table with counts\n# We don't need an x axis if we display table\nmygraph.axes.get_xaxis().set_visible(False)\n# Grab table info\ntable = mygraph.tables[0]\n# Format table and figure\ntable.set_fontsize(9)",
"_____no_output_____"
],
[
"# We want to export the graph without the table though\n# Because we already generated the crosstab earlier which shows the counts\nmygraph = graph.unstack().plot(kind='bar', figsize=(7,5), fontsize=7, rot=0)\n# Adjust legend and axes\nmygraph.legend([\"Unknown\",\"1\", \"2\", \"3\", \"4\", '5'], loc = 1, ncol= 3, fontsize=9)\nmygraph.set_ylabel(\"Number of Observations\", fontsize=9)\nmygraph.set_xlabel(\"Income category\", fontsize=9)\nmygraph.annotate('Source: IL & MO DES', xy=(0.7,-0.2), xycoords=\"axes fraction\");",
"_____no_output_____"
],
[
"# Now we can export the graph as pdf\n# Save plot to file\nexport2 = mygraph.get_figure()\nexport2.set_size_inches(15,10, forward=True)\nexport2.savefig('barchart_jobs_income_category.pdf', bbox_inches='tight', dpi=300)",
"_____no_output_____"
]
],
[
[
"### Reminder: Export of Graphs\nIt is important that every point which is plotted in a graph is based on at least 10 observations. Thus scatterplots for example cannot be released. In case you are interested in a histogram you have to change the bin size to make sure that every bin contains at least 10 people. In addition to the graph you have to provide the ADRF with the underlying table in a .csv or .txt file. This file should have the same name as the graph so ADRF can directly see which files go together. Alternatively you can include the counts in the graph as shown in the example above. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0ea53bc250cb6c927150546e931025a9648311d | 75,238 | ipynb | Jupyter Notebook | t2dv2-candidate-feature-model_training/table-linker-t2dv2-candidate-feature-model_training.ipynb | nicklein/table-linker-pipelines | 3362ef348c0e156a6082c357b95951cd4b293ade | [
"MIT"
] | null | null | null | t2dv2-candidate-feature-model_training/table-linker-t2dv2-candidate-feature-model_training.ipynb | nicklein/table-linker-pipelines | 3362ef348c0e156a6082c357b95951cd4b293ade | [
"MIT"
] | 1 | 2021-07-16T22:45:20.000Z | 2021-07-16T22:45:20.000Z | t2dv2-candidate-feature-model_training/table-linker-t2dv2-candidate-feature-model_training.ipynb | nicklein/table-linker-pipelines | 3362ef348c0e156a6082c357b95951cd4b293ade | [
"MIT"
] | 6 | 2021-04-05T10:59:55.000Z | 2021-08-17T20:17:02.000Z | 40.020213 | 479 | 0.479359 | [
[
[
"import pandas as pd\nimport os\nimport glob\nimport shutil\nimport random\nimport time\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\nimport pickle\nimport numpy as np",
"_____no_output_____"
],
[
"es_url = 'http://ckg07:9200'\nes_index = 'wikidatadwd-augmented'\n\n# GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/t2dv2-train-canonical/\ntrain_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-train-canonical/'\n# GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/t2dv2-dev-canonical/\ndev_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-dev-canonical/'\n\n# GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/t2dv2-train-candidates-dwd-v2/\ntrain_candidate_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-train-candidates-dwd-v2/'\n# GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/t2dv2-dev-candidates-dwd-v2/\ndev_candidate_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-dev-candidates-dwd-v2/'\n\n# GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/ground_truth/Xinting_GT_csv\nground_truth_files = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/Xinting_GT_csv/round_1/'",
"_____no_output_____"
],
[
"aux_field = 'graph_embedding_complex,class_count,property_count'\ntemp_dir = './temp' #temp directory to store intermediate files\n\n#directory to store the property count file for each table. Can be directly used for computing the tf-idf features \n#without running the candidate generation process again which is expensive\n\n#GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/train_prop_count/\ntrain_prop_count = './train_prop_count/' \n#GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/dev_prop_count/\ndev_prop_count = './dev_prop_count/'\n\n#GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/train_class_count/\ntrain_class_count = './train_class_count/'\n#GDrive Path: /table-linker-dataset/2019-iswc_challenge_data/t2dv2/canonical-with-context/dev_class_count/\ndev_class_count = './dev_class_count/'\n\n\n!mkdir -p $temp_dir\n!mkdir -p $train_prop_count\n!mkdir -p $dev_prop_count\n!mkdir -p $train_class_count\n!mkdir -p $dev_class_count\ncandidates = os.path.join(temp_dir,'candidates.csv')\nembedding_file = os.path.join(temp_dir, 'graph_embedding_complex.tsv')\nprint(candidates)",
"./temp/candidates.csv\n"
],
[
"def cand_feat_generation(path, gt_path, output_path, class_count, prop_count):\n for file in glob.glob(path + '*.csv')[-1:]:\n st = time.time()\n filename = file.split('/')[-1]\n print(filename)\n gt_file = os.path.join(ground_truth_files, filename)\n output_file = os.path.join(output_path, filename)\n \n !tl clean -c label -o label_clean $file / \\\n --url $es_url --index $es_index \\\n get-fuzzy-augmented-matches -c label_clean \\\n --auxiliary-fields {aux_field} \\\n --auxiliary-folder $temp_dir / \\\n --url $es_url --index $es_index \\\n get-exact-matches -c label_clean \\\n --auxiliary-fields {aux_field} \\\n --auxiliary-folder {temp_dir} / \\\n ground-truth-labeler --gt-file $gt_file > $candidates\n \n for field in aux_field.split(','):\n aux_list = []\n for f in glob.glob(f'{temp_dir}/*{field}.tsv'):\n aux_list.append(pd.read_csv(f, sep='\\t', dtype=object))\n aux_df = pd.concat(aux_list).drop_duplicates(subset=['qnode'])\n if field == 'class_count':\n class_count_file = os.path.join(class_count, filename.strip('.csv') + '_class_count.tsv')\n aux_df.to_csv(class_count_file, sep='\\t', index=False)\n elif field == 'property_count':\n prop_count_file = os.path.join(prop_count, filename.strip('.csv') + '_prop_count.tsv')\n aux_df.to_csv(prop_count_file, sep='\\t', index=False)\n else:\n aux_df.to_csv(f'{temp_dir}/{field}.tsv', sep='\\t', index=False)\n \n !tl string-similarity -i --method symmetric_monge_elkan:tokenizer=word -o monge_elkan $candidates \\\n / string-similarity -i --method jaccard:tokenizer=word -c kg_descriptions context -o des_cont_jaccard \\\n / string-similarity -i --method jaro_winkler -o jaro_winkler \\\n / score-using-embedding --column-vector-strategy centroid-of-singletons -o graph-embedding-score \\\n --embedding-file $embedding_file \\\n / create-singleton-feature -o singleton\\\n / generate-reciprocal-rank -c graph-embedding-score -o reciprocal_rank\\\n / mosaic-features -c kg_labels --num-char --num-tokens > $output_file\n \n print(time.time() - st)\n\n",
"_____no_output_____"
],
[
"cand_feat_generation(train_path, ground_truth_files, train_candidate_path, train_class_count, train_prop_count)",
"_____no_output_____"
],
[
"candidate_generation(dev_path, ground_truth_files, dev_candidate_path, dev_class_count, dev_prop_count)",
"14067031_0_559833072073397908.csv\nQnodes to lookup: 7693\nQnodes from file: 7438\n412.62110209465027\n"
]
],
[
[
"### Generate Balanced Training Data",
"_____no_output_____"
]
],
[
[
"training_datapath = '../random_forest_ranking/training_data_dwd.csv'",
"_____no_output_____"
],
[
"final_list = []\nfor i,file in enumerate(glob.glob(train_candidate_path + '*.csv')):\n file_name = file.split('/')[-1]\n print(file_name)\n \n try:\n d_sample = pd.read_csv(file)\n grouped_obj = d_sample.groupby(['row', 'column'])\n for cell in grouped_obj:\n num_rows = random.randint(2,5)\n sorted_df = cell[1].sort_values('graph-embedding-score',ascending=False)\n if 0 in sorted_df['evaluation_label'].tolist():\n continue\n if sorted_df.empty:\n continue\n if num_rows < len(sorted_df):\n top_sample_df = sorted_df[sorted_df['evaluation_label'] == -1][:10].sample(n=num_rows)\n bottom_sample_df = sorted_df[sorted_df['evaluation_label'] == -1][-10:].sample(n=num_rows)\n final_list.extend(top_sample_df.to_dict(orient='records'))\n final_list.extend(bottom_sample_df.to_dict(orient='records'))\n else:\n sample_df = sorted_df[sorted_df['evaluation_label'] == -1]\n final_list.extend(sample_df.to_dict(orient='records'))\n a = cell[1][cell[1]['evaluation_label'] == 1]\n if a.empty:\n continue\n final_list.extend(a.to_dict(orient='records'))\n except: \n pass\n\ntrain_df = pd.DataFrame(final_list)",
"58891288_0_1117541047012405958.csv\n39173938_0_7916056990138658530.csv\n10579449_0_1681126353774891032.csv\n33401079_0_9127583903019856402.csv\n21362676_0_6854186738074119688.csv\n38428277_0_1311643810102462607.csv\n91959037_0_7907661684242014480.csv\n20135078_0_7570343137119682530.csv\n35188621_0_6058553107571275232.csv\n54719588_0_8417197176086756912.csv\n21245481_0_8730460088443117515.csv\n71840765_0_6664391841933033844.csv\n8468806_0_4382447409703007384.csv\n88523363_0_8180214313099580515.csv\n29414811_13_8724394428539174350.csv\n99070098_0_2074872741302696997.csv\n43237185_1_3636357855502246981.csv\n46671561_0_6122315295162029872.csv\n53989675_0_8697482470743954630.csv\n25404227_0_2240631045609013057.csv\n9834884_0_3871985887467090123.csv\n63450419_0_8012592961815711786.csv\n1438042986423_95_20150728002306-00125-ip-10-236-191-2_88435628_5.csv\n22864497_0_8632623712684511496.csv\n53822652_0_5767892317858575530.csv\n37856682_0_6818907050314633217.csv\n26310680_0_5150772059999313798.csv\n29414811_12_251152470253168163.csv\n69537082_0_7789694313271016902.csv\n1438042989018_40_20150728002309-00067-ip-10-236-191-2_57714692_2.csv\n60319454_0_3938426910282115527.csv\n16767252_0_2409448375013995751.csv\n84548468_0_5955155464119382182.csv\n80588006_0_6965325215443683359.csv\n39650055_5_7135804139753401681.csv\n40534006_0_4617468856744635526.csv\n90196673_0_5458330029110291950.csv\n24036779_0_5608105867560183058.csv\n9567241_0_5666388268510912770.csv\n41480166_0_6681239260286218499.csv\n77694908_0_6083291340991074532.csv\n1438042989043_35_20150728002309-00287-ip-10-236-191-2_875026214_2.csv\n39107734_2_2329160387535788734.csv\n50245608_0_871275842592178099.csv\n"
],
[
"train_df.to_csv(training_datapath, index=False)",
"_____no_output_____"
]
],
[
[
"### Data Exploration",
"_____no_output_____"
]
],
[
[
"train_datapath = '../random_forest_ranking/training_data_dwd.csv'",
"_____no_output_____"
],
[
"df = pd.read_csv(train_datapath)\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48365 entries, 0 to 48364\nData columns (total 23 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 column 48365 non-null int64 \n 1 row 48365 non-null int64 \n 2 label 48363 non-null object \n 3 context 48353 non-null object \n 4 label_clean 48363 non-null object \n 5 kg_id 47784 non-null object \n 6 kg_labels 46828 non-null object \n 7 kg_aliases 14414 non-null object \n 8 method 48365 non-null object \n 9 kg_descriptions 39166 non-null object \n 10 pagerank 48365 non-null float64\n 11 retrieval_score 48365 non-null float64\n 12 GT_kg_id 48365 non-null object \n 13 GT_kg_label 48365 non-null object \n 14 evaluation_label 48365 non-null int64 \n 15 monge_elkan 48365 non-null float64\n 16 des_cont_jaccard 48365 non-null float64\n 17 jaro_winkler 48365 non-null float64\n 18 graph-embedding-score 48365 non-null float64\n 19 singleton 48365 non-null int64 \n 20 reciprocal_rank 48365 non-null float64\n 21 num_char 48365 non-null int64 \n 22 num_tokens 48365 non-null int64 \ndtypes: float64(7), int64(6), object(10)\nmemory usage: 8.5+ MB\n"
],
[
"# Features we need to include in training\nfeatures = ['pagerank','retrieval_score','monge_elkan',\n 'des_cont_jaccard','jaro_winkler','graph-embedding-score',\n 'singleton','num_char','num_tokens','reciprocal_rank']\nevaluation_label = ['evaluation_label']\n\ndf[features].info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48365 entries, 0 to 48364\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pagerank 48365 non-null float64\n 1 retrieval_score 48365 non-null float64\n 2 monge_elkan 48365 non-null float64\n 3 des_cont_jaccard 48365 non-null float64\n 4 jaro_winkler 48365 non-null float64\n 5 graph-embedding-score 48365 non-null float64\n 6 singleton 48365 non-null int64 \n 7 num_char 48365 non-null int64 \n 8 num_tokens 48365 non-null int64 \n 9 reciprocal_rank 48365 non-null float64\ndtypes: float64(7), int64(3)\nmemory usage: 3.7 MB\n"
],
[
"df['graph-embedding-score'] = df['graph-embedding-score'].fillna(0.0)\ndf['reciprocal_rank'] = df['reciprocal_rank'].fillna(0.0)\ndf[features].info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48365 entries, 0 to 48364\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pagerank 48365 non-null float64\n 1 retrieval_score 48365 non-null float64\n 2 monge_elkan 48365 non-null float64\n 3 des_cont_jaccard 48365 non-null float64\n 4 jaro_winkler 48365 non-null float64\n 5 graph-embedding-score 48365 non-null float64\n 6 singleton 48365 non-null int64 \n 7 num_char 48365 non-null int64 \n 8 num_tokens 48365 non-null int64 \n 9 reciprocal_rank 48365 non-null float64\ndtypes: float64(7), int64(3)\nmemory usage: 3.7 MB\n"
]
],
[
[
"### Train a Random Forest Regressor",
"_____no_output_____"
]
],
[
[
"train_data = df[features]\ny_label = df[evaluation_label]",
"_____no_output_____"
],
[
"model = RandomForestRegressor(n_estimators=100, max_features=\"log2\",min_samples_leaf=3)\nmodel.fit(train_data,y_label)\ny_pred = model.predict(train_data)",
"/Users/rijulvohra/opt/anaconda3/envs/table_linker_dev/lib/python3.7/site-packages/ipykernel_launcher.py:2: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n \n"
],
[
"mean_squared_error(y_label, y_pred)",
"_____no_output_____"
],
[
"model_save_path = '../random_forest_ranking/rf_tuned_dwd_ranking.pkl'\n",
"_____no_output_____"
],
[
"pickle.dump(model,open(model_save_path,'wb'))",
"_____no_output_____"
],
[
"saved_model = pickle.load(open(model_save_path, 'rb'))",
"_____no_output_____"
]
],
[
[
"### Predicting Scores for Train set",
"_____no_output_____"
]
],
[
[
"train_candidate_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-train-candidates-dwd-v2/'\ntrain_pred_output = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-train-rf-pred-dwd-2/'",
"_____no_output_____"
],
[
"train_mse = []\nfor file in glob.glob(train_candidate_path + '*.csv'):\n try:\n file_name = file.split('/')[-1]\n print(file_name)\n df_file = pd.read_csv(file)\n data = df_file[features]\n y_file_label = df_file[evaluation_label]\n y_file_pred = saved_model.predict(data)\n df_file['rf_model_pred'] = y_file_pred\n file_mse = mean_squared_error(y_file_label,y_file_pred)\n train_mse.append(file_mse)\n df_file.to_csv(os.path.join(train_pred_output,file_name),index=False)\n except:\n pass",
"58891288_0_1117541047012405958.csv\n39173938_0_7916056990138658530.csv\n10579449_0_1681126353774891032.csv\n33401079_0_9127583903019856402.csv\n21362676_0_6854186738074119688.csv\n38428277_0_1311643810102462607.csv\n91959037_0_7907661684242014480.csv\n20135078_0_7570343137119682530.csv\n35188621_0_6058553107571275232.csv\n54719588_0_8417197176086756912.csv\n21245481_0_8730460088443117515.csv\n71840765_0_6664391841933033844.csv\n8468806_0_4382447409703007384.csv\n88523363_0_8180214313099580515.csv\n29414811_13_8724394428539174350.csv\n99070098_0_2074872741302696997.csv\n43237185_1_3636357855502246981.csv\n46671561_0_6122315295162029872.csv\n53989675_0_8697482470743954630.csv\n25404227_0_2240631045609013057.csv\n9834884_0_3871985887467090123.csv\n63450419_0_8012592961815711786.csv\n1438042986423_95_20150728002306-00125-ip-10-236-191-2_88435628_5.csv\n22864497_0_8632623712684511496.csv\n53822652_0_5767892317858575530.csv\n37856682_0_6818907050314633217.csv\n26310680_0_5150772059999313798.csv\n29414811_12_251152470253168163.csv\n69537082_0_7789694313271016902.csv\n1438042989018_40_20150728002309-00067-ip-10-236-191-2_57714692_2.csv\n60319454_0_3938426910282115527.csv\n16767252_0_2409448375013995751.csv\n84548468_0_5955155464119382182.csv\n80588006_0_6965325215443683359.csv\n39650055_5_7135804139753401681.csv\n40534006_0_4617468856744635526.csv\n90196673_0_5458330029110291950.csv\n24036779_0_5608105867560183058.csv\n9567241_0_5666388268510912770.csv\n41480166_0_6681239260286218499.csv\n77694908_0_6083291340991074532.csv\n1438042989043_35_20150728002309-00287-ip-10-236-191-2_875026214_2.csv\n39107734_2_2329160387535788734.csv\n50245608_0_871275842592178099.csv\n"
],
[
"print(\"Train MSE is: \", sum(train_mse)/len(train_mse))",
"Train MSE is: 0.4519201624197591\n"
]
],
[
[
"### Predicting Scores for dev set",
"_____no_output_____"
]
],
[
[
"dev_candidate_path = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-dev-candidates-dwd-v2/'\ndev_pred_output = '/Users/rijulvohra/Documents/work/Novartis-ISI/novartis-isi-git/entity_linking/t2dv2-raw/t2dv2/canonical-with-context/t2dv2-dev-rf-pred-dwd-2/'",
"_____no_output_____"
],
[
"dev_mse = []\nfor file in glob.glob(dev_candidate_path + '*.csv'):\n\n file_name = file.split('/')[-1]\n print(file_name)\n df_file = pd.read_csv(file)\n data = df_file[features]\n y_file_label = df_file[evaluation_label]\n y_file_pred = saved_model.predict(data)\n df_file['rf_model_pred'] = y_file_pred\n file_mse = mean_squared_error(y_file_label,y_file_pred)\n dev_mse.append(file_mse)\n df_file.to_csv(os.path.join(dev_pred_output,file_name),index=False)\n ",
"39759273_0_1427898308030295194.csv\n45073662_0_3179937335063201739.csv\n29414811_2_4773219892816395776.csv\n84575189_0_6365692015941409487.csv\n14380604_4_3329235705746762392.csv\n50270082_0_444360818941411589.csv\n28086084_0_3127660530989916727.csv\n14067031_0_559833072073397908.csv\n"
],
[
"print(\"Dev MSE is: \", sum(dev_mse)/len(dev_mse))",
"Dev MSE is: 0.40129949369858403\n"
]
],
[
[
"### Evaluation",
"_____no_output_____"
]
],
[
[
"final_score_path = dev_pred_output",
"_____no_output_____"
],
[
"import os\neval_file_names = []\nfor (dirpath, dirnames, filenames) in os.walk(final_score_path):\n for fn in filenames:\n if \"csv\" not in fn:\n continue\n abs_fn = dirpath + fn\n assert os.path.isfile(abs_fn)\n if os.path.getsize(abs_fn) == 0:\n continue\n eval_file_names.append(abs_fn)\nlen(eval_file_names)",
"_____no_output_____"
],
[
"# merge all eval files in one df\ndef merge_df(file_names: list):\n df_list = []\n for fn in file_names:\n fid = fn.split('/')[-1].split('.csv')[0]\n df = pd.read_csv(fn)\n df['table_id'] = fid\n # df = df.fillna('')\n df_list.append(df)\n return pd.concat(df_list)\nall_data = merge_df(eval_file_names)\nall_data",
"_____no_output_____"
],
[
"# parse eval file\nfrom pandas.core.common import SettingWithCopyError\npd.options.mode.chained_assignment = 'raise'\n\ndef parse_eval_files_stats(eval_data,method):\n res = {}\n candidate_eval_data = eval_data.groupby(['table_id', 'row', 'column'])['table_id'].count().reset_index(name=\"count\")\n res['num_tasks'] = len(eval_data.groupby(['table_id', 'row', 'column']))\n res['num_tasks_with_gt'] = len(eval_data[pd.notna(eval_data['GT_kg_id'])].groupby(['table_id', 'row', 'column']))\n res['num_tasks_with_gt_in_candidate'] = len(eval_data[eval_data['evaluation_label'] == 1].groupby(['table_id', 'row', 'column']))\n res['num_tasks_with_singleton_candidate'] = len(candidate_eval_data[candidate_eval_data['count'] == 1].groupby(['table_id', 'row', 'column']))\n singleton_eval_data = candidate_eval_data[candidate_eval_data['count'] == 1]\n num_tasks_with_singleton_candidate_with_gt = 0\n for i, row in singleton_eval_data.iterrows():\n table_id, row_idx, col_idx = row['table_id'], row['row'], row['column']\n c_e_data = eval_data[(eval_data['table_id'] == table_id) & (eval_data['row'] == row_idx) & (eval_data['column'] == col_idx)]\n assert len(c_e_data) == 1\n if c_e_data.iloc[0]['evaluation_label'] == 1:\n num_tasks_with_singleton_candidate_with_gt += 1\n res['num_tasks_with_singleton_candidate_with_gt'] = num_tasks_with_singleton_candidate_with_gt\n num_tasks_with_graph_top_one_accurate = []\n num_tasks_with_graph_top_five_accurate = []\n num_tasks_with_graph_top_ten_accurate = []\n num_tasks_with_final_score_top_one_accurate = []\n num_tasks_with_final_score_top_five_accurate = []\n num_tasks_with_final_score_top_ten_accurate = []\n num_tasks_with_model_score_top_one_accurate = []\n num_tasks_with_model_score_top_five_accurate = []\n num_tasks_with_model_score_top_ten_accurate = []\n ndcg_score_g_list = []\n ndcg_model_score_list = []\n has_gt_list = []\n has_gt_in_candidate = []\n # candidate_eval_data = candidate_eval_data[:1]\n for i, row in candidate_eval_data.iterrows():\n #print(i)\n table_id, row_idx, col_idx = row['table_id'], row['row'], row['column']\n c_e_data = eval_data[(eval_data['table_id'] == table_id) & (eval_data['row'] == row_idx) & (eval_data['column'] == col_idx)]\n assert len(c_e_data) > 0\n if np.nan not in set(c_e_data['GT_kg_id']):\n has_gt_list.append(1)\n else:\n has_gt_list.append(0)\n if 1 in set(c_e_data['evaluation_label']):\n has_gt_in_candidate.append(1)\n else:\n has_gt_in_candidate.append(0)\n \n # handle graph-embedding-score\n s_data = c_e_data.sort_values(by=['graph-embedding-score'], ascending=False)\n if s_data.iloc[0]['evaluation_label'] == 1:\n num_tasks_with_graph_top_one_accurate.append(1)\n else:\n num_tasks_with_graph_top_one_accurate.append(0)\n if 1 in set(s_data.iloc[0:5]['evaluation_label']):\n num_tasks_with_graph_top_five_accurate.append(1)\n else:\n num_tasks_with_graph_top_five_accurate.append(0)\n if 1 in set(s_data.iloc[0:10]['evaluation_label']):\n num_tasks_with_graph_top_ten_accurate.append(1)\n else:\n num_tasks_with_graph_top_ten_accurate.append(0)\n \n #rank on model score\n s_data = c_e_data.sort_values(by=[method], ascending=False)\n if s_data.iloc[0]['evaluation_label'] == 1:\n num_tasks_with_model_score_top_one_accurate.append(1)\n else:\n num_tasks_with_model_score_top_one_accurate.append(0)\n if 1 in set(s_data.iloc[0:5]['evaluation_label']):\n num_tasks_with_model_score_top_five_accurate.append(1)\n else:\n num_tasks_with_model_score_top_five_accurate.append(0)\n if 1 in set(s_data.iloc[0:10]['evaluation_label']):\n num_tasks_with_model_score_top_ten_accurate.append(1)\n else:\n num_tasks_with_model_score_top_ten_accurate.append(0)\n \n cf_e_data = c_e_data.copy()\n #cf_e_data['evaluation_label'] = cf_e_data['evaluation_label'].replace(-1, 0)\n# cf_e_data['text-embedding-score'] = cf_e_data['text-embedding-score'].replace(np.nan, 0)\n cf_e_data['graph-embedding-score'] = cf_e_data['graph-embedding-score'].replace(np.nan, 0)\n cf_e_data[method] = cf_e_data[method].replace(np.nan, 0)\n\n candidate_eval_data['graph_top_one_accurate'] = num_tasks_with_graph_top_one_accurate\n candidate_eval_data['graph_top_five_accurate'] = num_tasks_with_graph_top_five_accurate\n candidate_eval_data['graph_top_ten_accurate'] = num_tasks_with_graph_top_five_accurate\n candidate_eval_data['model_top_one_accurate'] = num_tasks_with_model_score_top_one_accurate\n candidate_eval_data['model_top_five_accurate'] = num_tasks_with_model_score_top_five_accurate\n candidate_eval_data['model_top_ten_accurate'] = num_tasks_with_model_score_top_ten_accurate\n candidate_eval_data['has_gt'] = has_gt_list\n candidate_eval_data['has_gt_in_candidate'] = has_gt_in_candidate\n res['num_tasks_with_graph_top_one_accurate'] = sum(num_tasks_with_graph_top_one_accurate)\n res['num_tasks_with_graph_top_five_accurate'] = sum(num_tasks_with_graph_top_five_accurate)\n res['num_tasks_with_graph_top_ten_accurate'] = sum(num_tasks_with_graph_top_ten_accurate)\n res['num_tasks_with_model_score_top_one_accurate'] = sum(num_tasks_with_model_score_top_one_accurate)\n res['num_tasks_with_model_score_top_five_accurate'] = sum(num_tasks_with_model_score_top_five_accurate)\n res['num_tasks_with_model_score_top_ten_accurate'] = sum(num_tasks_with_model_score_top_ten_accurate)\n return res, candidate_eval_data",
"_____no_output_____"
],
[
"res, candidate_eval_data = parse_eval_files_stats(all_data,'rf_model_pred')\nprint(res)\ndisplay(candidate_eval_data)",
"{'num_tasks': 714, 'num_tasks_with_gt': 708, 'num_tasks_with_gt_in_candidate': 695, 'num_tasks_with_singleton_candidate': 0, 'num_tasks_with_singleton_candidate_with_gt': 0, 'num_tasks_with_graph_top_one_accurate': 289, 'num_tasks_with_graph_top_five_accurate': 491, 'num_tasks_with_graph_top_ten_accurate': 552, 'num_tasks_with_model_score_top_one_accurate': 513, 'num_tasks_with_model_score_top_five_accurate': 595, 'num_tasks_with_model_score_top_ten_accurate': 616}\n"
],
[
"# Conclusion of exact-match on all tasks with ground truth (no filtering)\nprint(f\"number of tasks: {res['num_tasks']}\")\nprint(f\"number of tasks with ground truth: {res['num_tasks_with_gt']}\")\nprint(f\"number of tasks with ground truth in candidate set: {res['num_tasks_with_gt_in_candidate']}, which is {res['num_tasks_with_gt_in_candidate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks has singleton candidate set: {res['num_tasks_with_singleton_candidate']}, which is {res['num_tasks_with_singleton_candidate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks has singleton candidate set which is ground truth: {res['num_tasks_with_singleton_candidate_with_gt']}, which is {res['num_tasks_with_singleton_candidate_with_gt']/res['num_tasks_with_gt'] * 100}%\")\nprint()\nprint(f\"number of tasks with top-1 accuracy in terms of graph embedding score: {res['num_tasks_with_graph_top_one_accurate']}, which is {res['num_tasks_with_graph_top_one_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks with top-5 accuracy in terms of graph embedding score: {res['num_tasks_with_graph_top_five_accurate']}, which is {res['num_tasks_with_graph_top_five_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks with top-10 accuracy in terms of graph embedding score: {res['num_tasks_with_graph_top_ten_accurate']}, which is {res['num_tasks_with_graph_top_ten_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint()\nprint(f\"number of tasks with top-1 accuracy in terms of model score: {res['num_tasks_with_model_score_top_one_accurate']}, which is {res['num_tasks_with_model_score_top_one_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks with top-5 accuracy in terms of model score: {res['num_tasks_with_model_score_top_five_accurate']}, which is {res['num_tasks_with_model_score_top_five_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint(f\"number of tasks with top-10 accuracy in terms of model score: {res['num_tasks_with_model_score_top_ten_accurate']}, which is {res['num_tasks_with_model_score_top_ten_accurate']/res['num_tasks_with_gt'] * 100}%\")\nprint()\ncandidate_eval_data_with_gt = candidate_eval_data[candidate_eval_data['has_gt'] == 1]",
"number of tasks: 714\nnumber of tasks with ground truth: 708\nnumber of tasks with ground truth in candidate set: 695, which is 98.1638418079096%\nnumber of tasks has singleton candidate set: 0, which is 0.0%\nnumber of tasks has singleton candidate set which is ground truth: 0, which is 0.0%\n\nnumber of tasks with top-1 accuracy in terms of graph embedding score: 289, which is 40.81920903954802%\nnumber of tasks with top-5 accuracy in terms of graph embedding score: 491, which is 69.35028248587571%\nnumber of tasks with top-10 accuracy in terms of graph embedding score: 552, which is 77.96610169491525%\n\nnumber of tasks with top-1 accuracy in terms of model score: 513, which is 72.45762711864407%\nnumber of tasks with top-5 accuracy in terms of model score: 595, which is 84.03954802259888%\nnumber of tasks with top-10 accuracy in terms of model score: 616, which is 87.00564971751412%\n\n"
],
[
"c = candidate_eval_data.groupby(['table_id']).agg({\n 'graph_top_one_accurate':lambda x: sum(x)/len(x),\n 'model_top_one_accurate':lambda x: sum(x)/len(x),\n 'graph_top_five_accurate':lambda x: sum(x)/len(x),\n 'model_top_five_accurate':lambda x: sum(x)/len(x)\n})\nc['table type'] = [\n 'country II',\n 'companies',\n 'pope',\n 'video games',\n 'movies',\n 'players I',\n 'players II',\n 'magazines'\n]\nc",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ea59075a40d33c570070e7eb9b82e18deb8b70 | 5,429 | ipynb | Jupyter Notebook | client/workflows/examples-without-verta/notebooks/sklearn-census.ipynb | conradoverta/modeldb | adbf5ce44604e0d692fffe90b0f47e2368c178e3 | [
"Apache-2.0"
] | 624 | 2020-01-18T21:10:12.000Z | 2022-03-23T12:11:06.000Z | client/workflows/examples-without-verta/notebooks/sklearn-census.ipynb | conradoverta/modeldb | adbf5ce44604e0d692fffe90b0f47e2368c178e3 | [
"Apache-2.0"
] | 651 | 2019-04-18T12:55:07.000Z | 2022-03-31T23:45:09.000Z | client/workflows/examples-without-verta/notebooks/sklearn-census.ipynb | conradoverta/modeldb | adbf5ce44604e0d692fffe90b0f47e2368c178e3 | [
"Apache-2.0"
] | 118 | 2019-04-12T16:01:21.000Z | 2022-03-05T16:29:41.000Z | 24.022124 | 274 | 0.535826 | [
[
[
"# Logistic Regression with Hyperparameter Optimization (scikit-learn)",
"_____no_output_____"
],
[
"<a href=\"https://colab.research.google.com/github/VertaAI/modeldb/blob/master/client/workflows/examples-without-verta/notebooks/sklearn-census.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import warnings\nfrom sklearn.exceptions import ConvergenceWarning\nwarnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n\nimport itertools\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import model_selection\nfrom sklearn import linear_model\nfrom sklearn import metrics",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Prepare Data",
"_____no_output_____"
]
],
[
[
"try:\n import wget\nexcept ImportError:\n !pip install wget # you may need pip3\n import wget",
"_____no_output_____"
],
[
"train_data_url = \"http://s3.amazonaws.com/verta-starter/census-train.csv\"\ntrain_data_filename = wget.download(train_data_url)\ntest_data_url = \"http://s3.amazonaws.com/verta-starter/census-test.csv\"\ntest_data_filename = wget.download(test_data_url)",
"_____no_output_____"
],
[
"df_train = pd.read_csv(\"census-train.csv\")\nX_train = df_train.iloc[:,:-1].values\ny_train = df_train.iloc[:, -1]\n\ndf_train.head()",
"_____no_output_____"
]
],
[
[
"## Prepare Hyperparameters",
"_____no_output_____"
]
],
[
[
"hyperparam_candidates = {\n 'C': [1e-4, 1e-1, 1, 10, 1e3],\n 'solver': ['liblinear', 'lbfgs'],\n 'max_iter': [15, 28],\n}\n\n# total models 20\n\n# create hyperparam combinations\nhyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values))\n for values\n in itertools.product(*hyperparam_candidates.values())]",
"_____no_output_____"
]
],
[
[
"## Run Validation",
"_____no_output_____"
]
],
[
[
"# create validation split\n(X_val_train, X_val_test,\n y_val_train, y_val_test) = model_selection.train_test_split(X_train, y_train,\n test_size=0.2,\n shuffle=True) \n\ndef run_experiment(hyperparams):\n \n # create and train model\n model = linear_model.LogisticRegression(**hyperparams)\n model.fit(X_train, y_train)\n \n # calculate and log validation accuracy\n val_acc = model.score(X_val_test, y_val_test)\n print(hyperparams, end=' ')\n print(\"Validation accuracy: {:.4f}\".format(val_acc))\n \n# NOTE: run_experiment() could also be defined in a module, and executed in parallel\nfor hyperparams in hyperparam_sets:\n run_experiment(hyperparams)",
"_____no_output_____"
]
],
[
[
"## Pick the best hyperparameters and train the full data",
"_____no_output_____"
]
],
[
[
"best_hyperparams = {}\nmodel = linear_model.LogisticRegression(multi_class='auto', **best_hyperparams)\nmodel.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"## Calculate Accuracy on Full Training Set",
"_____no_output_____"
]
],
[
[
"train_acc = model.score(X_train, y_train)\nprint(\"Training accuracy: {:.4f}\".format(train_acc))",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0ea5c05964a3a7fc576340d4343fee2dcf8f933 | 655,075 | ipynb | Jupyter Notebook | Chapter10/prepare_data.ipynb | kwhkim/PyTorch-Computer-Vision-Cookbook | 2e203c4380a7e854c24bff35a2fc240e65b7ca68 | [
"MIT"
] | null | null | null | Chapter10/prepare_data.ipynb | kwhkim/PyTorch-Computer-Vision-Cookbook | 2e203c4380a7e854c24bff35a2fc240e65b7ca68 | [
"MIT"
] | null | null | null | Chapter10/prepare_data.ipynb | kwhkim/PyTorch-Computer-Vision-Cookbook | 2e203c4380a7e854c24bff35a2fc240e65b7ca68 | [
"MIT"
] | null | null | null | 86.707478 | 164 | 0.737899 | [
[
[
"After downloading, install unrar on your computer\n$ sudo apt-get install unrar\nUnrar data\n",
"_____no_output_____"
]
],
[
[
"import os\n\npath2data = \"../../../data\"\nsub_folder = \"hmdb51_org\"\nsub_folder_jpg = \"hmdb51_jpg\"\npath2aCatgs = os.path.join(path2data, sub_folder)\n\nlistOfCategories = os.listdir(path2aCatgs)\nlistOfCategories, len(listOfCategories)",
"_____no_output_____"
],
[
"for cat in listOfCategories:\n print(\"category:\", cat)\n path2acat = os.path.join(path2aCatgs, cat)\n listOfSubs = os.listdir(path2acat)\n print(\"number of sub-folders:\", len(listOfSubs))\n print(\"-\"*50)",
"category: kick\nnumber of sub-folders: 130\n--------------------------------------------------\ncategory: brush_hair\nnumber of sub-folders: 107\n--------------------------------------------------\ncategory: cartwheel\nnumber of sub-folders: 107\n--------------------------------------------------\ncategory: catch\nnumber of sub-folders: 102\n--------------------------------------------------\ncategory: chew\nnumber of sub-folders: 109\n--------------------------------------------------\ncategory: clap\nnumber of sub-folders: 130\n--------------------------------------------------\ncategory: climb\nnumber of sub-folders: 108\n--------------------------------------------------\ncategory: climb_stairs\nnumber of sub-folders: 112\n--------------------------------------------------\ncategory: dive\nnumber of sub-folders: 127\n--------------------------------------------------\ncategory: draw_sword\nnumber of sub-folders: 103\n--------------------------------------------------\ncategory: dribble\nnumber of sub-folders: 145\n--------------------------------------------------\ncategory: drink\nnumber of sub-folders: 164\n--------------------------------------------------\ncategory: eat\nnumber of sub-folders: 108\n--------------------------------------------------\ncategory: fall_floor\nnumber of sub-folders: 136\n--------------------------------------------------\ncategory: fencing\nnumber of sub-folders: 116\n--------------------------------------------------\ncategory: flic_flac\nnumber of sub-folders: 107\n--------------------------------------------------\ncategory: golf\nnumber of sub-folders: 105\n--------------------------------------------------\ncategory: handstand\nnumber of sub-folders: 113\n--------------------------------------------------\ncategory: hit\nnumber of sub-folders: 127\n--------------------------------------------------\ncategory: hug\nnumber of sub-folders: 118\n--------------------------------------------------\ncategory: jump\nnumber of sub-folders: 151\n--------------------------------------------------\ncategory: kick_ball\nnumber of sub-folders: 128\n--------------------------------------------------\ncategory: kiss\nnumber of sub-folders: 102\n--------------------------------------------------\ncategory: laugh\nnumber of sub-folders: 128\n--------------------------------------------------\ncategory: pick\nnumber of sub-folders: 106\n--------------------------------------------------\ncategory: pour\nnumber of sub-folders: 106\n--------------------------------------------------\ncategory: pullup\nnumber of sub-folders: 104\n--------------------------------------------------\ncategory: punch\nnumber of sub-folders: 126\n--------------------------------------------------\ncategory: push\nnumber of sub-folders: 116\n--------------------------------------------------\ncategory: pushup\nnumber of sub-folders: 103\n--------------------------------------------------\ncategory: ride_bike\nnumber of sub-folders: 103\n--------------------------------------------------\ncategory: ride_horse\nnumber of sub-folders: 116\n--------------------------------------------------\ncategory: run\nnumber of sub-folders: 232\n--------------------------------------------------\ncategory: shake_hands\nnumber of sub-folders: 162\n--------------------------------------------------\ncategory: shoot_ball\nnumber of sub-folders: 131\n--------------------------------------------------\ncategory: shoot_bow\nnumber of sub-folders: 112\n--------------------------------------------------\ncategory: shoot_gun\nnumber of sub-folders: 103\n--------------------------------------------------\ncategory: sit\nnumber of sub-folders: 142\n--------------------------------------------------\ncategory: situp\nnumber of sub-folders: 105\n--------------------------------------------------\ncategory: smile\nnumber of sub-folders: 102\n--------------------------------------------------\ncategory: smoke\nnumber of sub-folders: 109\n--------------------------------------------------\ncategory: somersault\nnumber of sub-folders: 140\n--------------------------------------------------\ncategory: stand\nnumber of sub-folders: 154\n--------------------------------------------------\ncategory: swing_baseball\nnumber of sub-folders: 143\n--------------------------------------------------\ncategory: sword_exercise\nnumber of sub-folders: 127\n--------------------------------------------------\ncategory: sword\nnumber of sub-folders: 127\n--------------------------------------------------\ncategory: talk\nnumber of sub-folders: 120\n--------------------------------------------------\ncategory: throw\nnumber of sub-folders: 102\n--------------------------------------------------\ncategory: turn\nnumber of sub-folders: 240\n--------------------------------------------------\ncategory: walk\nnumber of sub-folders: 548\n--------------------------------------------------\ncategory: wave\nnumber of sub-folders: 104\n--------------------------------------------------\n"
],
[
"import myutils",
"_____no_output_____"
],
[
"extension = \".avi\"\nn_frames = 16\nfor root, dirs, files in os.walk(path2aCatgs, topdown=False):\n for name in files:\n if extension not in name:\n continue\n path2vid = os.path.join(root, name)\n frames, vlen = myutils.get_frames(path2vid, n_frames= n_frames)\n path2store = path2vid.replace(sub_folder, sub_folder_jpg)\n path2store = path2store.replace(extension, \"\")\n print(path2store)\n os.makedirs(path2store, exist_ok= True)\n myutils.store_frames(frames, path2store)\n print(\"-\"*50) ",
"../../../data/hmdb51_jpg/kick/50_FIRST_DATES_kick_f_cm_np1_ba_med_19\n../../../data/hmdb51_jpg/kick/American_History_X_kick_f_nm_np1_le_bad_37\n../../../data/hmdb51_jpg/kick/American_History_X_kick_l_cm_np1_le_med_33\n../../../data/hmdb51_jpg/kick/Best_fight_scene_in_history_of_movie_kick_f_cm_np2_ba_bad_2\n../../../data/hmdb51_jpg/kick/Best_fight_scene_in_history_of_movie_kick_f_cm_np2_ri_bad_0\n../../../data/hmdb51_jpg/kick/CharlieAndTheChocolateFactory_kick_f_nm_np1_ri_med_19\n../../../data/hmdb51_jpg/kick/FOOT_2009_(REMI_GAILLARD)_kick_f_cm_np1_ba_med_5\n../../../data/hmdb51_jpg/kick/jonhs_netfreemovies_holygrail_kick_f_nm_np1_fr_med_4\n../../../data/hmdb51_jpg/kick/jonhs_netfreemovies_holygrail_kick_f_nm_np1_ri_med_10\n../../../data/hmdb51_jpg/kick/jonhs_netfreemovies_holygrail_kick_f_nm_np1_ri_med_11\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_f_cm_np1_ba_med_4\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_f_cm_np1_fr_med_0\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_f_cm_np1_fr_med_11\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_f_nm_np1_ba_med_7\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_f_nm_np1_ri_bad_2\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_1\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_10\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_3\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_5\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_6\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ba_med_8\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ri_med_12\n../../../data/hmdb51_jpg/kick/kick__Baddest_Fight_Scenes_EVER!_-_Kickboxer_-_Part_1_of_2_kick_u_cm_np1_ri_med_9\n../../../data/hmdb51_jpg/kick/kick__Best_CHUCK_NORRIS_kick_to_the_nuts!_kick_f_cm_np1_ba_goo_0\n../../../data/hmdb51_jpg/kick/kick__Best_CHUCK_NORRIS_kick_to_the_nuts!_kick_f_cm_np1_ba_med_2\n../../../data/hmdb51_jpg/kick/kick__Best_CHUCK_NORRIS_kick_to_the_nuts!_kick_f_cm_np1_ba_med_3\n../../../data/hmdb51_jpg/kick/kick__Best_CHUCK_NORRIS_kick_to_the_nuts!_kick_f_cm_np1_fr_med_1\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ba_bad_11\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ba_bad_7\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_fr_bad_10\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_fr_bad_14\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_fr_bad_5\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_le_bad_0\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_le_bad_6\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_le_bad_8\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_le_bad_9\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ri_bad_1\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ri_bad_15\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ri_bad_2\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ri_bad_3\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_f_cm_np1_ri_bad_4\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_u_cm_np1_fr_bad_12\n../../../data/hmdb51_jpg/kick/kick__Best_fight_scene_of_all_time_kick_u_cm_np1_fr_bad_13\n../../../data/hmdb51_jpg/kick/kick__Bruce_Lee_Kick_kick_u_cm_np1_fr_med_0\n../../../data/hmdb51_jpg/kick/kick__Bruce_Lee_sidekick_kick_f_cm_np1_ri_med_1\n../../../data/hmdb51_jpg/kick/kick__Bruce_Lee_sidekick_kick_f_nm_np1_ri_med_0\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ba_bad_10\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ba_bad_14\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ba_bad_15\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_le_bad_11\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_le_bad_13\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_le_bad_17\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_le_bad_19\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ri_bad_12\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ri_bad_18\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ri_bad_6\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_f_cm_np1_ri_bad_7\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_fr_bad_5\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_le_bad_16\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_ri_bad_0\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_ri_bad_1\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_ri_bad_8\n../../../data/hmdb51_jpg/kick/kick__bruce_lee_v_s_japanese_kick_u_cm_np1_ri_bad_9\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_fr_bad_9\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_fr_med_15\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_fr_med_44\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_fr_med_45\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_fr_med_58\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_cm_np1_le_med_10\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_f_nm_np1_fr_bad_37\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_u_cm_np1_fr_med_11\n../../../data/hmdb51_jpg/kick/KUNG_FU_HUSTLE_kick_u_cm_np1_le_med_12\n../../../data/hmdb51_jpg/kick/LONGESTYARD_kick_f_cm_np1_ri_med_16\n../../../data/hmdb51_jpg/kick/Pirates_6_kick_f_nm_np1_fr_bad_7\n../../../data/hmdb51_jpg/kick/Prelinger_ActYourA1949_kick_f_nm_np1_ri_bad_10\n../../../data/hmdb51_jpg/kick/RushHour2_kick_l_cm_np1_le_med_2\n../../../data/hmdb51_jpg/kick/TheBoondockSaints_kick_f_cm_np1_fr_med_123\n../../../data/hmdb51_jpg/kick/TheBoondockSaints_kick_f_cm_np1_fr_med_88\n../../../data/hmdb51_jpg/kick/TheBoondockSaints_kick_u_nm_np1_fr_bad_122\n../../../data/hmdb51_jpg/kick/The_Matrix_2_kick_f_cm_np1_le_med_6\n../../../data/hmdb51_jpg/kick/The_Matrix_2_kick_f_cm_np1_ri_med_5\n../../../data/hmdb51_jpg/kick/The_Matrix_3_kick_f_cm_np1_fr_med_5\n../../../data/hmdb51_jpg/kick/The_Matrix_3_kick_f_cm_np1_le_med_2\n../../../data/hmdb51_jpg/kick/The_Matrix_3_kick_f_nm_np1_fr_med_0\n../../../data/hmdb51_jpg/kick/The_Matrix_3_kick_f_nm_np1_le_med_1\n../../../data/hmdb51_jpg/kick/The_Matrix_5_kick_f_cm_np1_fr_bad_16\n../../../data/hmdb51_jpg/kick/The_Matrix_5_kick_f_cm_np1_fr_med_19\n../../../data/hmdb51_jpg/kick/The_Matrix_5_kick_f_cm_np1_fr_med_6\n../../../data/hmdb51_jpg/kick/The_Matrix_5_kick_f_cm_np1_le_med_13\n../../../data/hmdb51_jpg/kick/The_Matrix_5_kick_u_nm_np1_fr_bad_14\n../../../data/hmdb51_jpg/kick/The_Matrix_6_kick_f_nm_np1_fr_med_6\n../../../data/hmdb51_jpg/kick/The_Matrix_Revolutions_2_kick_f_cm_np1_ba_bad_2\n../../../data/hmdb51_jpg/kick/The_Matrix_Revolutions_6_kick_f_cm_np1_ba_med_7\n../../../data/hmdb51_jpg/kick/The_Matrix_Revolutions_6_kick_f_cm_np1_ri_med_0\n../../../data/hmdb51_jpg/kick/The_Matrix_Revolutions_6_kick_f_nm_np1_ri_med_4\n../../../data/hmdb51_jpg/kick/The_Matrix_Revolutions_6_kick_u_nm_np1_fr_med_6\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_ba_bad_37\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_ba_med_63\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_ba_med_69\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_ba_med_72\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_fr_bad_53\n../../../data/hmdb51_jpg/kick/THE_PROTECTOR_kick_f_cm_np1_fr_med_4\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d0ea5e507670abc60e967d37222084a11d756f6d | 29,199 | ipynb | Jupyter Notebook | notebooks/guionpracticas.ipynb | Mellandd/umucv | bf2eda0fc9147f652be6cbcb3bfced2bed29faea | [
"BSD-3-Clause"
] | null | null | null | notebooks/guionpracticas.ipynb | Mellandd/umucv | bf2eda0fc9147f652be6cbcb3bfced2bed29faea | [
"BSD-3-Clause"
] | null | null | null | notebooks/guionpracticas.ipynb | Mellandd/umucv | bf2eda0fc9147f652be6cbcb3bfced2bed29faea | [
"BSD-3-Clause"
] | null | null | null | 42.37881 | 866 | 0.666564 | [
[
[
"# Sesiones prácticas",
"_____no_output_____"
],
[
"## 0",
"_____no_output_____"
],
[
"Instalación de Python + ecosistema científico + opencv + opengl\n\n- aula virtual -> página web -> install\n- git o unzip master\n- anaconda completo o miniconda\n- linux, windows, mac\n- probar scripts con webcam y verificar opengl, dlib etc.\n- manejo básico de jupyter\n- repaso Python\n- Ejercicio: recortar y unir imágenes para conseguir [algo como esto](../images/demos/ej-c0.png).\n\nOpcional:\n\n- compilación opencv\n- docker",
"_____no_output_____"
],
[
"## 1",
"_____no_output_____"
],
[
"Ejercicio de comprobación de FOV/tamaños/distancias.\n\nDispositivos de captura\n\n- umucv (install con --upgrade) (update_umucv.sh)\n- webcam.py con opencv crudo\n- stream.py, opciones de autostream, efecto de teclas, --help, --dev=help\n\n - webcams\n - videos\n - carpeta de imágenes\n - teléfono\n - youtube\n - urls de tv\n \n- ejemplo de recorte invertido",
"_____no_output_____"
],
[
"## 2",
"_____no_output_____"
],
[
"Más utilidades\n\n- spyder\n\n- PYTHONPATH\n\n- control de webcam v4l2-ctl, vlc, gucview\n\n- wzoom.py (para las ventanas de Windows que no tienen zoom)\n\n- help_window.py\n\n- save_video.py\n\n- ampliar mouse.py:\n\n - círculos en las posiciones marcadas (cv.circle)\n \n - coordenadas textuales (cv.putText (ej. en hello.py) o umucv.util.putText)\n\n - marcar solo los dos últimos (pista: collections.deque)\n\n - reproducir code/medidor.py indicando la distancia en pixels\n\n - Dado el FOV: indicar el ángulo de las direcciones marcadas.",
"_____no_output_____"
],
[
"## 3",
"_____no_output_____"
],
[
"deque.py\n\nroi.py:\n\n- añadir la media del nivel de gris del recorte\n\n- guardar el recorte y mostrar cv.absdiff respecto al frame actual, mostrando su media o su máximo.\n\n(Sirve de punto de partida para el ejercicio ACTIVIDAD)",
"_____no_output_____"
],
[
"## 4",
"_____no_output_____"
],
[
"- aclaraciones ejercicio COLOR\n\n- demo spectral\n\n- trackbar.py\n\n- demo filtros\n\nEjercicio: \n\n- implementación de filtro gaussiano con tracker sigma en toda la imagen, monocromo ([ejemplo](../images/demos/ej-c4-0.png)).\n- add box y median\n- medir y mostrar tiempos de cómputo en diferentes casos\n\n(Sirve de punto de partida para el ejercicio opcional FILTROS)",
"_____no_output_____"
],
[
"## 5",
"_____no_output_____"
],
[
"HOG\n\n- (captura asíncrona)\n\n- (teoría de HOG, implementación sencilla)\n\n- hog0.py en detalle\n\n- pedestrian.py, detección multiescala\n\n- DLIB facelandmarks.py: HOG face detector con landmarks\n\nEjercicio: blink detection, inpainting eyes, etc.",
"_____no_output_____"
],
[
"## 6",
"_____no_output_____"
],
[
"Detección de corners y Flujo óptico de Lucas-Kanade\n\n- LK/*.py\n",
"_____no_output_____"
],
[
"Vamos a construir un \"tracker\" de puntos de interés basado en el método de Lucas-Kanade.",
"_____no_output_____"
],
[
"El primer paso es construir un detector de corners partiendo de cero, calculando la imagen de respuesta correspondiente al menor valor propio de la matriz de covarianza de la distribución local del gradiente en cada pixel (`corners0.py`). En realidad esta operación está directamente disponible en opencv mediante cv.goodFeaturesToTrack (`corners1.py`, `corners2.py ).",
"_____no_output_____"
],
[
"El siguiente ejemplo muestra cómo encontrar directamente con `cv.calcOpticalFlowPyrLK` la posición de los puntos detectados en el fotograma siguiente, sin necesidad de recalcular puntos nuevos y asociarlos con los anteriores (`lk_track0.py`).",
"_____no_output_____"
],
[
"A continuación ampliamos el código para generar puntos nuevos periódicamente y crear una lista de trayectorias \"tracks\" que se mantiene actualizada en cada fotograma (`lk_track1.py`).",
"_____no_output_____"
],
[
"Finalmente, ampliamos el código anterior para que solo se generen puntos nuevos en zonas de la imagen donde no los haya, y mejoramos la detección de las posiciones siguientes con un criterio de calidad muy robusto que exige que la predicción hacia el pasado de los puntos nuevos coincida con el punto inicial. Si no hay una asociación mutua el punto y su trayectoria se descartan (`lk_tracks.py`).",
"_____no_output_____"
],
[
"Ejercicios:\n\n- Analizar los tracks para determinar en qué dirección se mueve la cámara (UP,DOWN,LEFT,RIGHT, [FORWARD, BACKWARD])\n\n- Estudiar la posibilidad de hacer tracking de un ROI.",
"_____no_output_____"
],
[
"## 7",
"_____no_output_____"
],
[
"Experimentamos con el detector de puntos de interés SIFT.\n\nNuestro objetivo es obtener un conjunto de \"keypoints\", cada uno con su descriptor (vector de características que describe el entorno del punto), que permita encontrarlo en imágenes futuras. Esto tiene una aplicación inmediata para reconocer objetos y más adelante en geometría visual.\n\nEmpezamos con el ejemplo de código code/SIFT/sift0.py, que simplemente calcula y muestra los puntos de interés. Es interesante observar el efecto de los parámetros del método y el tiempo de cómputo en función del tamaño de la imagen (que puedes cambiar con --size o --resize).\n\nEl siguiente ejemplo code/SIFT/sift1.py muestra un primer ataque para establecer correspondencias. Los resultados son bastante pobres porque se aceptan todas las posibles coincidencias.\n\nFinalmente, en code/SIFT/sift.py aplicamos un criterio de selección para eliminar muchas correspondencias erróneas (aunque no todas). Esto es en principio suficiente para el reconocimiento de objetos. (Más adelante veremos una forma mucho mejor de eliminar correspondencias erróneas, necesaria para aplicaciones de geometría.)\n\nEl ejercicio obligatorio **SIFT** es una ampliación sencilla de este código. Se trata de almacenar un conjunto de modelos (¡con textura! para que tengan suficientes keypoints) como portadas de libros, discos, videojuegos, etc. y reconocerlos en base a la proporción de coincidencias detectadas.",
"_____no_output_____"
],
[
"En la segunda parte de la clase experimentamos con un servidor mjpg y creamos bots de telegram (explicados al final de este documento) para comunicarnos fácilmente con las aplicaciones de visión artificial desde el móvil.",
"_____no_output_____"
],
[
"## 8",
"_____no_output_____"
],
[
"Reconocimiento de formas mediante descriptores frecuenciales.\n\nNuestro objetivo es hacer un programa que reconozca la forma de trébol, como se muestra [en este pantallazo](../images/demos/shapedetect.png). Si no tenéis a mano un juego de cartas podéis usar --dev=dir:../images/card*.png para hacer las pruebas, aunque lo ideal es hacerlo funcionar con una cámara en vivo.\n\nTrabajaremos con los ejemplos de código de la carpeta `code/shapes` y, como es habitual, iremos añadiendo poco a poco funcionalidad. En cada nuevo paso los comentarios explican los cambios respecto al paso anterior.\n\nEmpezamos con el ejemplo shapes/trebol1.py, que simplemente prepara un bucle de captura básico, binariza la imagen y muestra los contornos encontrados. Se muestran varias formas de realizar la binarización y se puede experimentar con ellas, pero en principio el método automático propuesto suele funcionar bien en muchos casos.\n\nEl segundo paso en shapes/trebol2.py junta la visualización en una ventana y selecciona los contornos oscuros de tamaño razonable. Esto no es imprescincible para nuestra aplicación, pero es conveniente familiarizarse con el concepto de orientación de un contorno.\n\nEn shapes/trebol3.py leemos un modelo de la silueta trébol de una imagen que tenemos en el repositorio y la mostramos en una ventana.\n\nEn shapes/trebol3b.py hacemos una utilidad para ver gráficamente las componentes frecuenciales como elipses que componen la figura. Podemos ver las componentes en su tamaño natural, incluyendo la frecuencia principal, [como aquí](../images/demos/full-components.png), o quitando la frecuencia principal y ampliando el tamaño de las siguientes, que son la base del descriptor de forma, [como se ve aquí](../images/demos/shape-components.png). Observa que las configuraciones de elipses son parecidas cuando corresponden a la misma silueta.\n\nEn shapes/trebol4.py definimos la función que calcula el descriptor invariante. Se basa esencialmente en calcular los tamaños relativos de estas elipses. En el código se explica cómo se consigue la invarianza a las transformaciones deseadas: posición, tamaño, giros, punto de partida del contorno y ruido de medida.\n\nFinalmente, en shapes/trebol5.py calculamos el descriptor del modelo y en el bucle de captura calculamos los descriptores de los contornos oscuros detectados para marcar las siluetas que tienen un descriptor muy parecido al del trébol.",
"_____no_output_____"
],
[
"## 8b",
"_____no_output_____"
],
[
"En esta subsesión vamos a hacer varias actividades. Necesitamos algunos paquetes. En Linux son:\n\n sudo apt install tesseract-ocr tesseract-ocr-spa libtesseract-dev\n pip install tesserocr\n\n sudo apt install libzbar-dev\n pip install pyzbar\n\n[zbar Windows instaler](http://zbar.sourceforge.net) -> download\n\nUsuarios de Mac y Windows: investigad la forma de instalar tesseract.\n\n\n1) En primer lugar nos fijamos en el script [code/ocr.py](../code/ocr.py), cuya misión es poner en marcha el OCR con la cámara en vivo. Usamos el paquete de python `tesserocr`. Vamos a verificar el funcionamiento con una imagen estática, pero lo ideal es probarlo con la cámara en vivo.\n\n ./ocr.py --dev=dir:../images/texto/bo0.png \n\nEstá pensado para marcar una sola línea de texto, [como se muestra aquí](../images/demos/ocr.png). Este pantallazo se ha hecho con la imagen bo1.png disponible en la misma carpeta, que está desenfocada, pero aún así el OCR funciona bien.\n\n(En windows parece que hay que usar pytesseract en lugar de tesserocr, lo que requiere adaptar del script.)\n\nPara mostrar la complejidad de un ocr mostramos el resultado del script `crosscorr.py` sobre images/texto.png, para observar que la comparación pixel a pixel no es suficiente para obtener resultados satisfactorios. En esa misma imagen la binarización y extracción de componentes conexas no consigue separar letras individuales.\n\nFinalmente demostramos mediante `spectral.py` que la transormada de Fourier 2D permite detectar el ángulo y la separación entre renglones.",
"_____no_output_____"
],
[
"2) El segundo ejemplo es `code/zbardemo.png` que muestra el uso del paquete pyzbar para leer códigos de barras ([ejemplo](../images/demos/barcode.png)) y códigos QR ([ejemplo](../images/demos/qr.png)) con la cámara. En los códigos de barras se detectan puntos de referencia, y en los QR se detectan las 4 esquinas del cuadrado, que pueden ser útiles como referencia en algunas aplicaciones de geometría.",
"_____no_output_____"
],
[
"4) demo de `grabcut.py` para segmentar interactivamente una imagen. Lo probamos con images/puzzle3.png.",
"_____no_output_____"
],
[
"5) Ponemos en marcha el detector de caras de opencv con la webcam en vivo y comparamos con el detector de DLIB.",
"_____no_output_____"
],
[
"## 9",
"_____no_output_____"
],
[
"Hoy vamos a rectificar el plano de la mesa apoyándonos en marcadores artificiales.\n\nEn primer lugar trabajaremos con marcadores poligonales. Nuestro objetivo es detectar un marcador como el que aparece en el vídeo `images/rot4.mjpg`. Nos vamos a la carpeta `code/polygon`.\n\nEl primer paso (`polygon0.py`) es detectar figuras poligonales con el número de lados correcto a partir de los contornos detectados.\n\nA continuación (`polygon1.py`) nos quedamos con los polígonos que realmente pueden corresponder al marcador. Esto se hace viendo si existe una homografía que relaciona con precisión suficiente el marcador real y su posible imagen.\n\nFinalmente (`polygon2.py`) obtiene el plano rectificado\n\nTambién se puede añadir información \"virtual\" a la imagen original, como por ejemplo los ejes de coordenadas definidos por el marcador (`polygon3.py`).\n\n\nComo segunda actividad, en la carpeta `code/elipses` se muestra la forma de detectar un marcador basado en 4 círculos.",
"_____no_output_____"
],
[
"## 10",
"_____no_output_____"
],
[
"En esta sesión vamos a extraer la matriz de cámara a partir del marcador utilizado en la sesión anterior, lo que nos permitirá añadir objetos virtuales tridimensionales a la escena y determinar la posición de la cámara en el espacio.\n\nNos vamos a la carpeta `code/pose`, donde encontraremos los siguientes ejemplos de código:\n\n`pose0.py` incluye el código completo para extraer contornos, detectar el marcador poligonal, extraer la matriz de cámara y dibujar un cubo encima del marcador.\n\n`pose1.py` hace lo mismo con funciones de umucv.\n\n`pose2.py` trata de ocultar el marcador y dibuja un objeto que cambia de tamaño.\n\n`pose3.py` explica la forma de proyectar una imagen en la escena escapando del plano del marcador.\n\n`pose3D.py` es un ejemplo un poco más avanzado que utiliza el paquete pyqtgraph para mostrar en 3D la posición de la cámara en el espacio.\n\nEn el ejercicio **RA** puedes intentar que el comportamiento del objeto virtual dependa de acciones del usuario (p. ej. señalando con el ratón un punto del plano) o de objetos que se encuentran en la escena.",
"_____no_output_____"
],
[
"## 11",
"_____no_output_____"
],
[
"Breve introducción a scikit-learn y keras.\n\nEn primer lugar repasaremos algunos conceptos básicos en el notebook [machine learning](machine-learning.ipynb).\n\nEsta sesión está dedicada a poner en marcha una red convolucional sencilla. La tarea que vamos a resolver es el reconocimiento de dígitos manuscritos. Por eso, en primer lugar es conveniente escribir unos cuantos números en una hoja de papel, con un bolígrafo que tenga un trazo no demasiado fino, y sin preocuparnos mucho de que estén bien escritos. Pueden tener distintos tamaños, pero no deben estar muy girados. Para desarrollar el programa y hacer pruebas cómodamente se puede trabajar con una imagen fija, pero la idea es que nuestro programa funcione con la cámara en vivo.\n\nTrabajaremos en la carpeta [code/DL/CNN](../code/DL/CNN), donde tenemos las diferentes etapas de ejercicio y una imagen de prueba.\n\nEl primer paso es `digitslive-1.py` que simplemente encuentra las manchas de tinta que pueden ser posibles números.\n\nEn `digitslive-2.py` normalizamos el tamaño de las detecciones para poder utilizar la base de datos MNIST.\n\nEn `digitslive-3.py` implementamos un clasificador gaussiano con reducción de dimensión mediante PCA y lo ponemos en marcha con la imagen en vivo. (Funciona bastante bien pero, p.ej., en la imagen de prueba comete un error).\n\nFinalmente, en `digitslive-4.py` implementamos la clasificación mediante una red convolucional mediante el paquete **keras**. Usamos unos pesos precalculados. (Esta máquina ya no comete el error anterior.)\n\nComo siempre, en cada fase del ejercicio los comentarios explican el código que se va añadiendo.\n\nUna vez conseguido esto, la sesión práctica tiene una segunda actividad que consiste en **entrenar los pesos** de (por ejemplo) esta misma red convolucional. Para hacerlo en nuestro ordenador sin perder la paciencia necesitamos una GPU con CUDA y libCUDNN. La instalación de todo lo necesario puede no ser trivial. \n\nUna alternativa muy práctica es usar [google colab](https://colab.research.google.com/), que proporciona gratuitamente máquinas virtuales con GPU y un entorno de notebooks jupyter (un poco modificados pero compatibles). Para probarlo, entrad con vuestra cuenta de google y abrid un nuevo notebook. En la opción de menú **Runtime** hay que seleccionar **Change runtime type** y en hardware accelerator ponéis GPU. En una celda del notebook copiáis directamente el contenido del archivo `cnntest.py` que hay en este mismo directorio donde estamos trabajando hoy. Al evaluar la celda se descargará la base de datos y se lanzará un proceso de entrenamiento. Cada epoch tarda unos 4s. Podéis comparar con lo que se consigue con la CPU en vuestro propio ordenador. Se puede lanzar un entrenamiento más completo, guardar los pesos y descargarlos a vuestra máquina.\n\nComo curiosidad, podéis comparar con lo que conseguiría el OCR tesseract, y guardar algunos casos de dígitos que estén bien dibujados pero que la red clasifique mal.\n\nFinalmente, entrenamos un autoencoder (notebook [bottleneck](bottleneck.ipynb)) y comparamos el resultado con la reducción de dimensión PCA explicada al principio.",
"_____no_output_____"
],
[
"## 12",
"_____no_output_____"
],
[
"En esta sesión vamos a poner en marcha algunos modelos más avanzados de deep learning.\n\nLos ejemplos de código se han probado sobre LINUX. En Windows o Mac puede ser necesario hacer modificaciones; para no perder mucho tiempo mi recomendación es probarlo primero en una máquina virtual.\n\nSi tenéis una GPU nvidia reciente lo ideal es instalar CUDA y libCUDNN para conseguir una mayor velocidad de proceso. Si no tenéis GPU no hay ningún problema, todos los modelos funcionan con CPU. (Los ejercicios de deep learning que requieren entrenamiento son opcionales y se pueden entrenar en COLAB.)",
"_____no_output_____"
],
[
"1) Para probar el **reconocimiento de caras** nos vamos a la carpeta code/DL/facerec. Debe estar correctamente instalado DLIB. \n\nEn el directorio `gente` se guardan los modelos. Como ejemplo tenemos a los miembros de Monty Python:\n\n ./facerec.py --dev=dir:../../../images/monty-python*\n \n(Recuerda que las imágenes seleccionadas con --dev=dir: se avanzan pinchando con el ratón en la ventana pequeña de muestra).\n\nPuedes meter fotos tuyas y de tu familia en la carpeta `gente` para probar con la webcam o con otras fotos.\n\nEsta versión del reconocimiento de caras no tiene aceleración con GPU (tal vez se puede configurar). Si reducimos un poco el tamaño de la imagen funciona con bastante fluidez.\n\nEjercicio: selecciona una cara en la imagen en vivo pinchando con el ratón para ocultarla (emborronándola o pixelizándola) cuando se reconozca en las imágenes siguientes.",
"_____no_output_____"
],
[
"2) Para probar la máquina **inception** nos movemos a la carpeta code/DL/inception.\n\n ./inception0.py\n \n(Se descargará el modelo del la red). Se puede probar con las fotos incluidas en la carpeta con `--dev=dir:*.png`. La versión `inception1.py` captura en hilo aparte y muestra en consola las 5 categorías más probables.\n\nAunque se supone que consigue buenos resultados en las competiciones, sobre imágenes naturales comete bastante errores.",
"_____no_output_____"
],
[
"3) El funcionamiento de **YOLO** es mucho mejor. Puede ponerse en marcha fácilmente siguiendo las instrucciones en \nhttps://github.com/zzh8829/yolov3-tf2\n\nEl artículo de [YOLO V3](https://pjreddie.com/media/files/papers/YOLOv3.pdf) es interesante. En la sección 5 el autor explica que abandonó esta línea de investigación por razones éticas. Os recomiendo que la leáis. Posteriormente apareció [YOLO V4](https://arxiv.org/abs/2004.10934).",
"_____no_output_____"
],
[
"4) mediapipe proporciona unos detectorses de \"human pose\" and \"hand pose\" muy fáciles de usar.",
"_____no_output_____"
],
[
"En la carpeta `docker` hay un script para ejecutar una imagen docker que tiene instalados todos los paquetes que hemos estamos usando en la asignatura. Es experimental. No perdaís ahora tiempo con esto si no estáis familiarizados con docker.\n\nEl tema de deep learning en visión artificial es amplísimo. Para estudiarlo en detalle hace falta (como mínimo) una asignatura avanzada (master). Nuestro objetivo es familizarizarnos un poco con algunas de las máquinas preentrenadas disponibles para hacernos una idea de sus ventajas y limitaciones.\n\nSi estáis interesados en estos temas el paso siguiente es adaptar alguno de estos modelos a un problema propio mediante \"transfer learning\", que consiste en utilizar las primeras etapas de una red preentrenada para transformar nuestros datos y ajustar un clasificador sencillo. Alternativamente, se puede reajustar los pesos de un modelo preentrenado, fijando las capas iniciales al principio. Para remediar la posible falta de ejemplos se utilizan técnicas de \"data augmentation\", que generan variantes de los ejemplos de entrenamiento con múltiples transformaciones.",
"_____no_output_____"
],
[
"## 13",
"_____no_output_____"
],
[
"Repaso y dudas.",
"_____no_output_____"
],
[
"## otros modelos",
"_____no_output_____"
],
[
"UNET\n\nVariational autoencoder\n\nTransformers",
"_____no_output_____"
],
[
"## entrenar dlib",
"_____no_output_____"
],
[
"- (opcional) DLIB herramienta de etiquetado imglab. Entrenamiento de detector HOG SVM con herramientas de DLIB:\n\n - descargar y descomprimir dlib source\n - ir a los ejemplos/faces\n - meter dentro imglab (que hay que compilar pero tenemos versión precompilada en robot/material/va)\n - mostrar los training.xml y testing.xml (se pueden crear otros)\n - meter dentro train_detector.py y run_detector.py de code/hog\n - ./train_detector training.xml testing.xml (crea detector.svm)\n - ./run_detector detector.svm --dev=dir:\\*.jpg (o también --dev=dir:/path/to/umucv/images/monty\\*)\n ",
"_____no_output_____"
],
[
"## correlation filter",
"_____no_output_____"
],
[
"Comentamos el método de detección de objetos por correlación cruzada, que es el mismo criterio que se usa para buscar la posición de *corners* en imágenes sucesivas, y luego vemos la demostración del discriminative correlation filter.",
"_____no_output_____"
],
[
"- crosscorr.py\n\n- dcf.py",
"_____no_output_____"
],
[
"## flask server",
"_____no_output_____"
],
[
"El ejemplo `server.py` explica cómo hacer un servidor web sencillo con *flask* para enviar un pantallazo de la imagen actual de la webcam, y `mjpegserver.py` explica cómo hacer un servidor de streaming en formato mjpeg.",
"_____no_output_____"
],
[
"## telegram bot",
"_____no_output_____"
],
[
"Vamos a jugar con un bot de telegram que nos permite comunicarnos cómodamente con nuestro ordenador desde el teléfono móvil, sin necesidad de tener una dirección pública de internet.\n\nSimplemente necesitamos:\n\n pip install python-telegram-bot\n\nEl ejemplo `bot/bot0.py` nos envía al teléfono la IP del ordenador (es útil si necesitamos conectarnos por ssh con una máquina que tiene IP dinámica).\n\nEl ejemplo `bot/bot1.py` explica la forma de enviar una imagen nuestro teléfono cuando ocurre algo. En este caso se envía cuando se pulsa una tecla, pero lo normal es detectar automáticamente algún evento con las técnicas de visión artificial que estamos estudiando.\n\nEl ejemplo `bot/bot2.py` explica la forma de hacer que el bot responda a comandos. El comando /hello nos devuelve el saludo, el comando /stop detiene el programa y el comando /image nos devuelve una captura de nuestra webcam. (Se ha usado la captura en un hilo). \n\nEl ejemplo `bot/bot3.py` explica la forma de capturar comandos con argumentos y el procesamiento de una imagen enviada por el usuario.\n\nEsta práctica es muy útil para enviar cómodamente a nuestros programas de visión artificial una imagen tomada con la cámara sin necesidad de escribir una aplicación específica para el móvil. Algunos ejercicios que estamos haciendo se pueden adaptar fácilmente para probarlos a través de un bot de este tipo.\n\nPara crearos vuestro propio bot tenéis que contactar con el bot de telegram \"BotFather\", que os guiará paso a paso y os dará el token de acceso. Y luego el \"IDBot\" os dirá el id numérico de vuestro usuario.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ea6a8c5e3b1e70affd7f62416542c7a297a164 | 13,439 | ipynb | Jupyter Notebook | notebooks/07.More-libraries/07.05-other-widget-libraries.ipynb | datalayer-contrib/ipywidgets-tutorial | 4d3ee7af6058d2c0c5e55e9060f9a330829dc2fc | [
"BSD-3-Clause"
] | 342 | 2017-08-23T18:36:58.000Z | 2022-03-11T18:47:31.000Z | notebooks/07.More-libraries/07.05-other-widget-libraries.ipynb | datalayer-contrib/ipywidgets-tutorial | 4d3ee7af6058d2c0c5e55e9060f9a330829dc2fc | [
"BSD-3-Clause"
] | 118 | 2017-08-23T01:42:45.000Z | 2022-02-14T18:11:47.000Z | notebooks/07.More-libraries/07.05-other-widget-libraries.ipynb | datalayer-contrib/ipywidgets-tutorial | 4d3ee7af6058d2c0c5e55e9060f9a330829dc2fc | [
"BSD-3-Clause"
] | 152 | 2017-08-22T22:24:28.000Z | 2022-03-31T12:45:37.000Z | 29.278867 | 254 | 0.55711 | [
[
[
"# Other widget libraries\n\nWe would have loved to show you everything the Jupyter Widgets ecosystem has to offer today, but we are blessed to have such an active community of widget creators and unfortunately can't fit all widgets in a single session, no matter how long. \n\nThis notebook lists some of the widget libraries we wanted to demo but did not have enough time to include in the session. Enjoy!",
"_____no_output_____"
],
[
"# ipyleaflet: Interactive maps\n\n## A Jupyter - LeafletJS bridge\n\n## https://github.com/jupyter-widgets/ipyleaflet\n\n\nipyleaflet is a jupyter interactive widget library which provides interactive maps to the Jupyter notebook.\n\n- MIT Licensed\n\n**Installation:**\n\n```bash\nconda install -c conda-forge ipyleaflet\n```",
"_____no_output_____"
]
],
[
[
"from ipywidgets import Text, HTML, HBox\nfrom ipyleaflet import GeoJSON, WidgetControl, Map \nimport json",
"_____no_output_____"
],
[
"m = Map(center = (43,-100), zoom = 4)\n\ngeo_json_data = json.load(open('us-states-density-colored.json'))\ngeojson = GeoJSON(data=geo_json_data, hover_style={'color': 'black', 'dashArray': '5, 5', 'weight': 2})\nm.add_layer(geojson)\n\nhtml = HTML('''\n <h4>US population density</h4>\n Hover over a state\n''')\nhtml.layout.margin = '0px 20px 20px 20px'\ncontrol = WidgetControl(widget=html, position='topright')\nm.add_control(control)\n\ndef update_html(properties, **kwargs):\n html.value = '''\n <h4>US population density</h4>\n <h2><b>{}</b></h2>\n {} people / mi^2\n '''.format(properties['name'], properties['density'])\n\ngeojson.on_hover(update_html)\n\nm",
"_____no_output_____"
]
],
[
[
"# pythreejs: 3D rendering in the browser \n\n## A Jupyter - threejs bridge\n\n## https://github.com/jupyter-widgets/pythreejs\n\n\nPythreejs is a jupyter interactive widget bringing fast WebGL 3d visualization to the Jupyter notebook.\n\n- Originally authored by Jason Grout, currently maintained by Vidar Tonaas Fauske\n- BSD Licensed\n\nPythreejs is *not* a 3d plotting library, it only exposes the threejs scene objects to the Jupyter kernel.\n\n**Installation:**\n\n```bash\nconda install -c conda-forge pythreejs\n```",
"_____no_output_____"
]
],
[
[
"from pythreejs import *\nimport numpy as np\nfrom IPython.display import display\nfrom ipywidgets import HTML, Text, Output, VBox\nfrom traitlets import link, dlink",
"_____no_output_____"
],
[
"# Generate surface data:\nview_width = 600\nview_height = 400\nnx, ny = (20, 20)\nxmax=1\nx = np.linspace(-xmax, xmax, nx)\ny = np.linspace(-xmax, xmax, ny)\nxx, yy = np.meshgrid(x, y)\nz = xx ** 2 - yy ** 2\n#z[6,1] = float('nan')\n\n\n# Generate scene objects from data:\nsurf_g = SurfaceGeometry(z=list(z[::-1].flat), \n width=2 * xmax,\n height=2 * xmax,\n width_segments=nx - 1,\n height_segments=ny - 1)\n\nsurf = Mesh(geometry=surf_g,\n material=MeshLambertMaterial(map=height_texture(z[::-1], 'YlGnBu_r')))\n\nsurfgrid = SurfaceGrid(geometry=surf_g, material=LineBasicMaterial(color='black'),\n position=[0, 0, 1e-2]) # Avoid overlap by lifting grid slightly\n\n# Set up picking bojects:\nhover_point = Mesh(geometry=SphereGeometry(radius=0.05),\n material=MeshLambertMaterial(color='green'))\n\nclick_picker = Picker(controlling=surf, event='dblclick')\nhover_picker = Picker(controlling=surf, event='mousemove')\n\n# Set up scene:\nkey_light = DirectionalLight(color='white', position=[3, 5, 1], intensity=0.4)\nc = PerspectiveCamera(position=[0, 3, 3], up=[0, 0, 1], aspect=view_width / view_height,\n children=[key_light])\n\nscene = Scene(children=[surf, c, surfgrid, hover_point, AmbientLight(intensity=0.8)])\n\nrenderer = Renderer(camera=c, scene=scene,\n width=view_width, height=view_height,\n controls=[OrbitControls(controlling=c), click_picker, hover_picker])\n\n\n# Set up picking responses:\n# Add a new marker when double-clicking:\nout = Output()\ndef f(change):\n value = change['new']\n with out:\n print('Clicked on %s' % (value,))\n point = Mesh(geometry=SphereGeometry(radius=0.05), \n material=MeshLambertMaterial(color='hotpink'),\n position=value)\n scene.add(point)\n\nclick_picker.observe(f, names=['point'])\n\n# Have marker follow picker point:\nlink((hover_point, 'position'), (hover_picker, 'point'))\n\n# Show picker point coordinates as a label:\nh = HTML()\ndef g(change):\n h.value = 'Green point at (%.3f, %.3f, %.3f)' % tuple(change['new'])\n h.value += ' Double-click to add marker'\ng({'new': hover_point.position})\nhover_picker.observe(g, names=['point'])\n\ndisplay(VBox([h, renderer, out]))",
"_____no_output_____"
]
],
[
[
"# bqplot: complex interactive visualizations\n\n## https://github.com/bloomberg/bqplot\n\n## A Jupyter - d3.js bridge\n\nbqplot is a jupyter interactive widget library bringing d3.js visualization to the Jupyter notebook.\n\n- Apache Licensed\n\nbqplot implements the abstractions of Wilkinson’s “The Grammar of Graphics” as interactive Jupyter widgets.\n\nbqplot provides both\n-\thigh-level plotting procedures with relevant defaults for common chart types,\n-\tlower-level descriptions of data visualizations meant for complex interactive visualization dashboards and applications involving mouse interactions and user-provided Python callbacks.\n\n**Installation:**\n\n```bash\nconda install -c conda-forge bqplot\n```",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport bqplot as bq",
"_____no_output_____"
],
[
"xs = bq.LinearScale()\nys = bq.LinearScale()\nx = np.arange(100)\ny = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks\n\nline = bq.Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green'])\nxax = bq.Axis(scale=xs, label='x', grid_lines='solid')\nyax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')\n\nfig = bq.Figure(marks=[line], axes=[xax, yax], animation_duration=1000)\ndisplay(fig)",
"_____no_output_____"
],
[
"# update data of the line mark\nline.y = np.cumsum(np.random.randn(2, 100), axis=1)",
"_____no_output_____"
]
],
[
[
"# ipympl: The Matplotlib Jupyter Widget Backend\n\n## https://github.com/matplotlib/ipympl\n\n\nEnabling interaction with matplotlib charts in the Jupyter notebook and JupyterLab\n\n- BSD-3-Clause\n\n**Installation:**\n\n```bash\nconda install -c conda-forge ipympl\n```",
"_____no_output_____"
],
[
"Enabling the `widget` backend. This requires ipympl. ipympl can be install via pip or conda.",
"_____no_output_____"
]
],
[
[
"%matplotlib widget",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom ipywidgets import VBox, FloatSlider",
"_____no_output_____"
]
],
[
[
"When using the `widget` backend from ipympl, fig.canvas is a proper Jupyter interactive widget, which can be embedded in Layout classes like HBox and Vbox.\n\nOne can bound figure attributes to other widget values.",
"_____no_output_____"
]
],
[
[
"plt.ioff()\nplt.clf()\n\nslider = FloatSlider(\n value=1.0,\n min=0.02,\n max=2.0\n)\n\nfig1 = plt.figure(1)\n\nx1 = np.linspace(0, 20, 500)\n\nlines = plt.plot(x1, np.sin(slider.value * x1))\n\ndef update_lines(change):\n lines[0].set_data(x1, np.sin(change.new * x1))\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n\nslider.observe(update_lines, names='value')\n\nVBox([slider, fig1.canvas])",
"_____no_output_____"
]
],
[
[
"# ipytree: Interactive tree view based on ipywidgets\n\n## https://github.com/QuantStack/ipytree/\n\n\nipytree is a jupyter interactive widget library which provides a tree widget to the Jupyter notebook.\n\n- MIT Licensed\n\n**Installation:**\n\n```bash\nconda install -c conda-forge ipytree\n```",
"_____no_output_____"
],
[
"## Create a tree",
"_____no_output_____"
]
],
[
[
"from ipywidgets import Text, link\nfrom ipytree import Tree, Node",
"_____no_output_____"
],
[
"tree = Tree()\ntree.add_node(Node('node1'))\n\nnode2 = Node('node2')\ntree.add_node(node2)\n\ntree",
"_____no_output_____"
],
[
"node3 = Node('node3', disabled=True)\nnode4 = Node('node4')\nnode5 = Node('node5', [Node('1'), Node('2')])\nnode2.add_node(node3)\nnode2.add_node(node4)\nnode2.add_node(node5)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ea793e7d579b9bffbc938c59c92fc658a0725e | 41,032 | ipynb | Jupyter Notebook | 07_factors.ipynb | JackRab/Julia-DataFrames-Tutorial | ece5349c92dda84b844041b8066284b7df144772 | [
"MIT"
] | null | null | null | 07_factors.ipynb | JackRab/Julia-DataFrames-Tutorial | ece5349c92dda84b844041b8066284b7df144772 | [
"MIT"
] | null | null | null | 07_factors.ipynb | JackRab/Julia-DataFrames-Tutorial | ece5349c92dda84b844041b8066284b7df144772 | [
"MIT"
] | 1 | 2021-04-05T09:44:24.000Z | 2021-04-05T09:44:24.000Z | 27.065963 | 856 | 0.47363 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0ea7e29755f5e2c48e988a40ea02e2a677cb921 | 8,831 | ipynb | Jupyter Notebook | json-schema.ipynb | eelcodijkstra/mongodb-binder | 4347dbc9c129229f673551c81baf516ddd25ef54 | [
"MIT"
] | null | null | null | json-schema.ipynb | eelcodijkstra/mongodb-binder | 4347dbc9c129229f673551c81baf516ddd25ef54 | [
"MIT"
] | null | null | null | json-schema.ipynb | eelcodijkstra/mongodb-binder | 4347dbc9c129229f673551c81baf516ddd25ef54 | [
"MIT"
] | 1 | 2019-08-27T16:59:11.000Z | 2019-08-27T16:59:11.000Z | 28.672078 | 163 | 0.51557 | [
[
[
"[Schema](Schema.ipynb) <- vorige - [Inhoudsopgave](Inhoud.ipynb) - volgende -> [JSON-LD en linked data](json-ld-linked-data.ipynb)",
"_____no_output_____"
],
[
"# JSON-schema\n\nMet JSON-schema kun je schema's voor JSON-objecten maken.\nDeze kun je gebruiken als documentatie van JSON-objecten die bijvoorbeeld in een web-API gebruikt worden.\nVervolgens kun je JSON-objecten valideren tegen een schema.\nDit kan handig zijn voor JSON-objecten in web-API's,\nzowel bij het genereren als bij het accepteren van JSON objecten.\n",
"_____no_output_____"
],
[
"## JSON-schema in MongoDB\n\nIn MongoDB kun je schema's in de JSON-schema notatie gebruiken voor het valideren van documenten in een collection.",
"_____no_output_____"
],
[
"## Definiëren van een schema",
"_____no_output_____"
],
[
"### Annotaties\n\nDeze annotaties zijn niet verplicht, maar wel \"good practice\".\n\n* `$schema` - welke (standaard)notatie gebruiken we hier? (\"schema van het schema\")\n* `title` - de naam van het schema\n* `description` - een beschrijving van het schema",
"_____no_output_____"
],
[
"### Type, object, properties\n\nEen document heeft als type: `object`.\n\nPer veld (*property*) van dit object geef je de naam en het type.\nJe kunt ook aangeven of welke velden verplicht (*required*) zijn.",
"_____no_output_____"
]
],
[
[
"contact_schema = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Contact\",\n \"description\": \"schema for documents in the contacts collection\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"email\": {\"type\": \"string\"},\n \"telephone\": {\"type\": \"string\"},\n },\n \"required\": [\"name\", \"email\", \"telephone\"]\n}",
"_____no_output_____"
]
],
[
[
"## JSON-schema in Python\n\nJe kunt in Python een schema definiëren als een Python dictionary.\n(Dit is vrijwel dezelfde notatie als JSON.)\n\nDe libraries `jsonschema` (https://python-jsonschema.readthedocs.io) en `fastjsonschema` geven je functies om JSON-objecten te valideren tegen een schema.\n\n",
"_____no_output_____"
]
],
[
[
"from jsonschema import validate",
"_____no_output_____"
],
[
"mycontact = {\n \"name\": \"Harry van Doorn\",\n \"email\": \"[email protected]\",\n \"tel\": \"06-1357 8642\"\n}\n\nvalidate(instance=mycontact, schema=contact_schema)",
"_____no_output_____"
]
],
[
[
"* Verbeter `mycontact` en valideer opnieuw.",
"_____no_output_____"
],
[
"### Arrays\n\nAls we meerdere e-mailadressen toestaan dan kunnen we daarvan een `array` maken:",
"_____no_output_____"
]
],
[
[
"contact_schema_1 = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Contact\",\n \"description\": \"schema for documents in the contacts collection\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"email\": {\"type\": \"array\", \n \"items\": {\"type\": \"string\"}},\n \"telephone\": {\"type\": \"string\"},\n },\n \"required\": [\"name\", \"email\", \"telephone\"]\n}",
"_____no_output_____"
],
[
"mycontact_1 = {\n \"name\": \"Harry van Doorn\",\n \"email\": \"[email protected]\",\n \"telephone\": \"06-1357 8642\"\n}\n\nvalidate(instance=mycontact_1, schema=contact_schema_1)",
"_____no_output_____"
]
],
[
[
"* Verbeter het veld `email` door daar een array van te maken, en valideer opnieuw.",
"_____no_output_____"
],
[
"## Alternatieven\n\nIn een contact moeten we tenminste een e-mailadres opnemen of een telefoonnummer, beide is niet verplicht.\n\nWe gebruiken hiervoor het keyword `anyOf`, met een lijst van alternatieven.",
"_____no_output_____"
]
],
[
[
"contact_schema_2 = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Contact\",\n \"description\": \"schema for documents in the contacts collection\",\n \"type\": \"object\",\n \"required\": [\"name\"],\n \"properties\": {\n \"name\": {\"type\": \"string\"}\n },\n \"anyOf\": [\n {\"properties\": {\"email\": {\"type\": \"array\",\n \"items\": {\"type\": \"string\"}\n }\n \n },\n \"required\": [\"email\"]},\n \n {\"properties\": {\"telephone\": {\"type\": \"string\"}},\n \"required\": [\"telephone\"]}\n ]\n }",
"_____no_output_____"
],
[
"mycontact_2 = {\n \"name\": \"Harry van Doorn\"\n}\n\nvalidate(instance=mycontact_2, schema=contact_schema_2)",
"_____no_output_____"
]
],
[
[
"* verbeter `mycontact2` door een e-mailadres of een telefoonnummer toe te voegen, en valideer opnieuw.\n\n(Merk op dat het eerste missende alternatief als ontbrekend gemeld wordt,\nterwijl je natuurlijk ook de andere alternatieven kunt opgeven.",
"_____no_output_____"
],
[
"## Patronen (reguliere expressies)\n\nZoals je ziet is een waarde van een veld vaak een string.\nIn veel gevallen moet die string aan een bepaald patroon (reguliere expressie) voldoen.\nDit patronen kun je ook beschrijven in JSON-schema.\n\nZie: https://json-schema.org/understanding-json-schema/reference/regular_expressions.html\n",
"_____no_output_____"
],
[
"## Gestandaardiseerde schema's\n\nVoor veel voorkomende domeinen zijn standaard-schema's gemaakt.\nJe vindt deze bijvoorbeeld via (in json-schema formaat).\nSchema.org bevat een *ontologie* van veel-voorkomende begrippen.\nDeze begrippen zijn in samenhang met elkaar gedefinieerd.\n\nZie bijvoorbeeld:\n\n* https://schema.org/ContactPoint\n* https://json.schemastore.org/schema-org-contact-point (hetzelfde, in json-ld)\n* en de lijst: \n* https://schema.org/Person\n** met bijvoorbeeld: `givenName` en `familyName`.\n\n(Dit vormt een opstapje naar Linked Open Data.)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d0eaa91638f2abdfb3ea8f1f0aacc0cb94ede688 | 8,942 | ipynb | Jupyter Notebook | 1. Vanilla GAN TensorFlow.ipynb | HarshRangwala/ML-Competitions | 567c0923b4bab755fd5b645f697d1a2161767558 | [
"MIT"
] | 1 | 2021-01-01T08:19:11.000Z | 2021-01-01T08:19:11.000Z | 1. Vanilla GAN TensorFlow.ipynb | HarshRangwala/ML-Competitions | 567c0923b4bab755fd5b645f697d1a2161767558 | [
"MIT"
] | null | null | null | 1. Vanilla GAN TensorFlow.ipynb | HarshRangwala/ML-Competitions | 567c0923b4bab755fd5b645f697d1a2161767558 | [
"MIT"
] | null | null | null | 27.598765 | 123 | 0.535003 | [
[
[
"from IPython import display\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\n\nfrom utils import Logger\n\nimport tensorflow as tf\n\nimport numpy as np",
"_____no_output_____"
],
[
"DATA_FOLDER = './tf_data/VGAN/MNIST'\nIMAGE_PIXELS = 28*28\nNOISE_SIZE = 100\nBATCH_SIZE = 100",
"_____no_output_____"
],
[
"def noise(n_rows, n_cols):\n return np.random.normal(size=(n_rows, n_cols))\n\ndef xavier_init(size):\n in_dim = size[0] if len(size) == 1 else size[1]\n stddev = 1. / np.sqrt(float(in_dim))\n return tf.random_uniform(shape=size, minval=-stddev, maxval=stddev)\n\ndef images_to_vectors(images):\n return images.reshape(images.shape[0], 784)\n\ndef vectors_to_images(vectors):\n return vectors.reshape(vectors.shape[0], 28, 28, 1)",
"_____no_output_____"
]
],
[
[
"## Load Data",
"_____no_output_____"
]
],
[
[
"def mnist_data():\n compose = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((.5, .5, .5), (.5, .5, .5))\n ])\n out_dir = '{}/dataset'.format(DATA_FOLDER)\n return datasets.MNIST(root=out_dir, train=True, transform=compose, download=True)",
"_____no_output_____"
],
[
"# Load data\ndata = mnist_data()\n# Create loader with data, so that we can iterate over it\ndata_loader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True)\n# Num batches\nnum_batches = len(data_loader)",
"_____no_output_____"
]
],
[
[
"## Initialize Graph",
"_____no_output_____"
]
],
[
[
"## Discriminator\n\n# Input\nX = tf.placeholder(tf.float32, shape=(None, IMAGE_PIXELS))\n\n# Layer 1 Variables\nD_W1 = tf.Variable(xavier_init([784, 1024]))\nD_B1 = tf.Variable(xavier_init([1024]))\n\n# Layer 2 Variables\nD_W2 = tf.Variable(xavier_init([1024, 512]))\nD_B2 = tf.Variable(xavier_init([512]))\n\n# Layer 3 Variables\nD_W3 = tf.Variable(xavier_init([512, 256]))\nD_B3 = tf.Variable(xavier_init([256]))\n\n# Out Layer Variables\nD_W4 = tf.Variable(xavier_init([256, 1]))\nD_B4 = tf.Variable(xavier_init([1]))\n\n# Store Variables in list\nD_var_list = [D_W1, D_B1, D_W2, D_B2, D_W3, D_B3, D_W4, D_B4]",
"_____no_output_____"
],
[
"## Generator\n\n# Input\nZ = tf.placeholder(tf.float32, shape=(None, NOISE_SIZE))\n\n# Layer 1 Variables\nG_W1 = tf.Variable(xavier_init([100, 256]))\nG_B1 = tf.Variable(xavier_init([256]))\n\n# Layer 2 Variables\nG_W2 = tf.Variable(xavier_init([256, 512]))\nG_B2 = tf.Variable(xavier_init([512]))\n\n# Layer 3 Variables\nG_W3 = tf.Variable(xavier_init([512, 1024]))\nG_B3 = tf.Variable(xavier_init([1024]))\n\n# Out Layer Variables\nG_W4 = tf.Variable(xavier_init([1024, 784]))\nG_B4 = tf.Variable(xavier_init([784]))\n\n# Store Variables in list\nG_var_list = [G_W1, G_B1, G_W2, G_B2, G_W3, G_B3, G_W4, G_B4]",
"_____no_output_____"
],
[
"def discriminator(x):\n l1 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(x, D_W1) + D_B1, .2), .3)\n l2 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(l1, D_W2) + D_B2, .2), .3)\n l3 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(l2, D_W3) + D_B3, .2), .3)\n out = tf.matmul(l3, D_W4) + D_B4\n return out\n\ndef generator(z):\n l1 = tf.nn.leaky_relu(tf.matmul(z, G_W1) + G_B1, .2)\n l2 = tf.nn.leaky_relu(tf.matmul(l1, G_W2) + G_B2, .2)\n l3 = tf.nn.leaky_relu(tf.matmul(l2, G_W3) + G_B3, .2)\n out = tf.nn.tanh(tf.matmul(l3, G_W4) + G_B4)\n return out",
"_____no_output_____"
],
[
"G_sample = generator(Z)\nD_real = discriminator(X)\nD_fake = discriminator(G_sample)\n\n# Losses\nD_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))\nD_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))\nD_loss = D_loss_real + D_loss_fake\nG_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))\n\n# Optimizers\nD_opt = tf.train.AdamOptimizer(2e-4).minimize(D_loss, var_list=D_var_list)\nG_opt = tf.train.AdamOptimizer(2e-4).minimize(G_loss, var_list=G_var_list)",
"_____no_output_____"
]
],
[
[
"## Train",
"_____no_output_____"
],
[
"#### Testing",
"_____no_output_____"
]
],
[
[
"num_test_samples = 16\ntest_noise = noise(num_test_samples, NOISE_SIZE)",
"_____no_output_____"
]
],
[
[
"#### Inits",
"_____no_output_____"
]
],
[
[
"num_epochs = 200\n\n# Start interactive session\nsession = tf.InteractiveSession()\n# Init Variables\ntf.global_variables_initializer().run()\n# Init Logger\nlogger = Logger(model_name='DCGAN1', data_name='CIFAR10')",
"_____no_output_____"
]
],
[
[
"#### Train",
"_____no_output_____"
]
],
[
[
"# Iterate through epochs\nfor epoch in range(num_epochs):\n for n_batch, (batch,_) in enumerate(data_loader):\n \n # 1. Train Discriminator\n X_batch = images_to_vectors(batch.permute(0, 2, 3, 1).numpy())\n feed_dict = {X: X_batch, Z: noise(BATCH_SIZE, NOISE_SIZE)}\n _, d_error, d_pred_real, d_pred_fake = session.run(\n [D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict\n )\n\n # 2. Train Generator\n feed_dict = {Z: noise(BATCH_SIZE, NOISE_SIZE)}\n _, g_error = session.run(\n [G_opt, G_loss], feed_dict=feed_dict\n )\n\n if n_batch % 100 == 0:\n display.clear_output(True)\n # Generate images from test noise\n test_images = session.run(\n G_sample, feed_dict={Z: test_noise}\n )\n test_images = vectors_to_images(test_images)\n # Log Images\n logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches, format='NHWC');\n # Log Status\n logger.display_status(\n epoch, num_epochs, n_batch, num_batches,\n d_error, g_error, d_pred_real, d_pred_fake\n )",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0eab28f815f2172c62a8a92d23cf1a30ea413d6 | 10,967 | ipynb | Jupyter Notebook | python/practice/1-2_basic_operations.ipynb | daludaluking/LG_AI_all_in_one- | e0855af811deb1e5cf1695430bd52a8eb3d48827 | [
"Apache-2.0"
] | null | null | null | python/practice/1-2_basic_operations.ipynb | daludaluking/LG_AI_all_in_one- | e0855af811deb1e5cf1695430bd52a8eb3d48827 | [
"Apache-2.0"
] | null | null | null | python/practice/1-2_basic_operations.ipynb | daludaluking/LG_AI_all_in_one- | e0855af811deb1e5cf1695430bd52a8eb3d48827 | [
"Apache-2.0"
] | null | null | null | 10,967 | 10,967 | 0.65615 | [
[
[
"# 1. 숫자 자료형\n### 정수",
"_____no_output_____"
]
],
[
[
"print(123) #양의 정수\nprint(-321) #음의 정수\nprint(0) #0",
"123\n-321\n0\n"
]
],
[
[
"### 실수",
"_____no_output_____"
]
],
[
[
"print(3.14)\nprint(314e-2) # 314 * 10의 -2승",
"3.14\n3.14\n"
],
[
"print(0.1*0.1)",
"0.010000000000000002\n"
]
],
[
[
"### 복소수",
"_____no_output_____"
]
],
[
[
"print(1+2j)\nprint((1+2j).real)\nprint((1+2j).imag)\nprint((1+2j).conjugate())",
"(1+2j)\n1.0\n2.0\n(1-2j)\n"
],
[
"print(type(3))\nprint(type(3.14))\nprint(type(1+2j))",
"<class 'int'>\n<class 'float'>\n<class 'complex'>\n"
]
],
[
[
"# 2. 사칙연산",
"_____no_output_____"
]
],
[
[
"print(12+3)\nprint(12-3)\nprint(12*3)\nprint(12/3)\nprint(12**3)\nprint(12%3)\nprint(12%3)",
"15\n9\n36\n4.0\n1728\n0\n0\n"
]
],
[
[
"# 3. 연산자의 우선순위\n사칙연산과 똑같음",
"_____no_output_____"
]
],
[
[
"print(12+3*4)\nprint((12+3)*4)",
"24\n60\n"
]
],
[
[
"# 연습문제 1\n",
"_____no_output_____"
],
[
"## 1번\n10의 제곱을 출력해보자",
"_____no_output_____"
]
],
[
[
"# 정답을 적어주세요\nprint(10**2)",
"100\n"
],
[
"# 정답을 적어주세요",
"_____no_output_____"
]
],
[
[
"## 2번\n2 x 5 + 3 과 2 x (5 + 3)을 각각 화면에 출력해보자",
"_____no_output_____"
]
],
[
[
"# 정답을 적어주세요\nprint(2*5+3)",
"13\n"
],
[
"# 정답을 적어주세요\nprint(2*(5+3))",
"16\n"
]
],
[
[
"# 3. Assignment : =\n변수를 할당해주는 연산자 \n계산을 할때 equal sign으로도 쓰임\n",
"_____no_output_____"
]
],
[
[
"a=123\nprint(a)\nprint(id(a))\n\nstock_price = 1550\nprint(stock_price)\nprint(id(stock_price))",
"123\n94306837285952\n1550\n139713344318736\n"
],
[
"a=123\nprint(a)\nprint(id(a))",
"123\n94306837285952\n"
],
[
"a=b=12345678\nprint(a)\nprint(id(a))\nprint(b)\nprint(id(b))",
"12345678\n139713344319056\n12345678\n139713344319056\n"
],
[
"c=12345678\nd=12345678\nprint(c)\nprint(id(c))\nprint(d)\nprint(id(d))",
"12345678\n139713344318928\n12345678\n139713344318032\n"
]
],
[
[
"### 더 알아보기",
"_____no_output_____"
]
],
[
[
"variable = 2\nvariable",
"_____no_output_____"
],
[
"variable += 1\nvariable",
"_____no_output_____"
],
[
"variable = variable + 1\nvariable",
"_____no_output_____"
],
[
"variable -= 5\nvariable",
"_____no_output_____"
],
[
"variable *= -1\nvariable",
"_____no_output_____"
],
[
"variable /= 5\nvariable",
"_____no_output_____"
],
[
"a = 5\nb = 7\nc = a + b\nprint(c)",
"12\n"
],
[
"c = A + B",
"_____no_output_____"
]
],
[
[
"# 연습문제 2",
"_____no_output_____"
],
[
"## 1번\n사과가 5개 오렌지가 3개 있을 때 총 과일의 갯수를 구해보자 \n사과는 apple이라는 변수, 오렌지는 orange라는 변수에 할당 한 후, 총 과일의 갯수를 total이라는 변수에 저장해보자",
"_____no_output_____"
]
],
[
[
"# 정답을 적어주세요\napple = 5\norange = 3\ntotal = apple + orange\n\nprint(\"total = \", total)",
"total = 8\n"
],
[
"# 아래 주석을 풀고 실행시켜서 원하는 값이 나왔는지 확인하세요\ntotal",
"_____no_output_____"
]
],
[
[
"## 2번\n국어는 100점, 영어는 88점, 수학은 94점 일 때, 평균을 구하려고 한다. \n각각의 점수를 kor, eng, math라는 변수에 저장한 후 평균을 구해 avg라는 변수에 할당해보자.",
"_____no_output_____"
]
],
[
[
"# 정답을 적어주세요\nkor = 100\neng = 88\nmath = 94\navg = (kor + eng + math) / 3",
"_____no_output_____"
],
[
"# 아래 주석을 풀고 실행시켜서 원하는 값이 나왔는지 확인하세요\navg",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0eabe5ed9b0027f4eae0037c6ed171602b96ab5 | 10,167 | ipynb | Jupyter Notebook | 9.10.ipynb | ningjing957/zuoye | a2e35baf365b95da44eee6360fa6d03c2b0378df | [
"Apache-2.0"
] | null | null | null | 9.10.ipynb | ningjing957/zuoye | a2e35baf365b95da44eee6360fa6d03c2b0378df | [
"Apache-2.0"
] | null | null | null | 9.10.ipynb | ningjing957/zuoye | a2e35baf365b95da44eee6360fa6d03c2b0378df | [
"Apache-2.0"
] | null | null | null | 17.931217 | 74 | 0.451461 | [
[
[
"# 基本程序设计\n- 一切代码输入,请使用英文输入法",
"_____no_output_____"
]
],
[
[
"print('hello world')",
"hello world\n"
]
],
[
[
"## 编写一个简单的程序\n- 圆公式面积: area = radius \\* radius \\* 3.1415",
"_____no_output_____"
]
],
[
[
"radius = int(input('请输入一个半径:'))\narea = radius * radius * 3.1433223\nprint(area)",
"请输入一个半径:2\n12.5732892\n"
]
],
[
[
"### 在Python里面不需要定义数据的类型",
"_____no_output_____"
],
[
"## 控制台的读取与输入\n- input 输入进去的是字符串\n- eval",
"_____no_output_____"
],
[
"- 在jupyter用shift + tab 键可以跳出解释文档",
"_____no_output_____"
],
[
"## 变量命名的规范\n- 由字母、数字、下划线构成\n- 不能以数字开头 \\*\n- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)\n- 可以是任意长度\n- 驼峰式命名",
"_____no_output_____"
],
[
"## 变量、赋值语句和赋值表达式\n- 变量: 通俗理解为可以变化的量\n- x = 2 \\* x + 1 在数学中是一个方程,而在语言中它是一个表达式\n- test = test + 1 \\* 变量在赋值之前必须有值",
"_____no_output_____"
],
[
"## 同时赋值\nvar1, var2,var3... = exp1,exp2,exp3...",
"_____no_output_____"
],
[
"## 定义常量\n- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI\n- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的",
"_____no_output_____"
],
[
"## 数值数据类型和运算符\n- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次\n<img src = \"../Photo/01.jpg\"></img>",
"_____no_output_____"
],
[
"## 运算符 /、//、**",
"_____no_output_____"
],
[
"## 运算符 %",
"_____no_output_____"
],
[
"## EP:\n- 25/4 多少,如果要将其转变为整数该怎么改写\n- 输入一个数字判断是奇数还是偶数\n- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒\n- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天",
"_____no_output_____"
]
],
[
[
"25/4",
"_____no_output_____"
],
[
"25//4",
"_____no_output_____"
],
[
"num = int(input('请输入一个整数:'))\nif num%2==0:\n print('该数为偶数')\nelse:\n print('该数为奇数')",
"请输入一个整数:6\n该数为偶数\n"
],
[
"s = eval(input('请输入一个秒数:'))\nmin = s//60\ns1 = s%60\nprint (str(min)+'分'+str(s1)+'秒')",
"请输入一个秒数:500\n8分20秒\n"
]
],
[
[
"## 科学计数法\n- 1.234e+2\n- 1.234e-2",
"_____no_output_____"
],
[
"## 计算表达式和运算优先级\n<img src = \"../Photo/02.png\"></img>\n<img src = \"../Photo/03.png\"></img>",
"_____no_output_____"
],
[
"## 增强型赋值运算\n<img src = \"../Photo/04.png\"></img>",
"_____no_output_____"
],
[
"## 类型转换\n- float -> int\n- 四舍五入 round",
"_____no_output_____"
],
[
"## EP:\n- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)\n- 必须使用科学计数法",
"_____no_output_____"
],
[
"# Project\n- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)\n",
"_____no_output_____"
],
[
"# Homework\n- 1\n<img src=\"../Photo/06.png\"></img>",
"_____no_output_____"
]
],
[
[
"celsius = eval(input('请输入一个温度:'))\nfahrenheit = (9/5)*celsius+32\nprint (fahrenheit)",
"请输入一个温度:43\n109.4\n"
]
],
[
[
"- 2\n<img src=\"../Photo/07.png\"></img>",
"_____no_output_____"
]
],
[
[
"import math\nradius = eval(input('请输入一个半径:'))\nlength = eval(input('请输入一个高:'))\narea = radius*radius*math.pi\nvolume = area*length\nprint (round(area,1))\nprint (round(volume,1))",
"请输入一个半径:5.5\n请输入一个高:12\n95.0\n1140.4\n"
]
],
[
[
"- 3\n<img src=\"../Photo/08.png\"></img>",
"_____no_output_____"
]
],
[
[
"feet = eval(input('请输入一个英尺数:'))\nmeters = feet*0.305\nprint (meters)",
"请输入一个英尺数:16.5\n5.0325\n"
]
],
[
[
"- 4\n<img src=\"../Photo/10.png\"></img>",
"_____no_output_____"
]
],
[
[
"M = eval(input('请输入以kg为单位的水的质量:'))\ninitialTemperature = eval(input('请输入水的初始温度:'))\nfinalTemperature = eval(input('请输入水的最终温度:'))\nQ = M*(finalTemperature-initialTemperature)*4184\nprint (Q)",
"请输入以kg为单位的水的质量:55.5\n请输入水的初始温度:3.5\n请输入水的最终温度:10.5\n1625484.0\n"
]
],
[
[
"- 5\n<img src=\"../Photo/11.png\"></img>",
"_____no_output_____"
]
],
[
[
"balance = eval(input('请输入差额:'))\nrate = eval(input('请输入年利率:'))\ninterest = (balance*(rate/1200))\nprint (interest)",
"请输入差额:1000\n请输入年利率:3.5\n2.916666666666667\n"
]
],
[
[
"- 6\n<img src=\"../Photo/12.png\"></img>",
"_____no_output_____"
]
],
[
[
"v0 = eval(input('请输入初速度:'))\nv1 = eval(input('请输入末速度:'))\nt = eval(input('以秒为单位速度变化所占用的时间:'))\na = (v1-v0)/t\nprint (a)",
"请输入初速度:5.5\n请输入末速度:50.9\n以秒为单位速度变化所占用的时间:4.5\n10.088888888888889\n"
]
],
[
[
"- 7 进阶\n<img src=\"../Photo/13.png\"></img>",
"_____no_output_____"
]
],
[
[
"money = eval(input('请输入每月的存钱数:'))\none = money*(1+0.00417)\ntwo = (money+one)*(1+0.00417)\nthree = (money+two)*(1+0.00417)\nfour = (money+three)*(1+0.00417)\nfive = (money+four)*(1+0.00417)\nsix = (money+five)*(1+0.00417)\nprint (round(six,2))",
"请输入每月的存钱数:100\n608.82\n"
]
],
[
[
"- 8 进阶\n<img src=\"../Photo/14.png\"></img>",
"_____no_output_____"
]
],
[
[
"num = eval(input('请输入一个0到1000的整数:'))\na = num%10\nb = num//10\nc = b%10\nd = b//10\nprint (a+c+d)",
"请输入一个0到1000的整数:152\n8\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0eabe7439634c384f3cc825e82613078117b749 | 24,116 | ipynb | Jupyter Notebook | base/auxils/plotting_tool/CurtailmentHourly.ipynb | juanjerezm/Balmorel_code | d4efc1f4932f54e88af4b2337c9cda69c2bbad76 | [
"ISC"
] | null | null | null | base/auxils/plotting_tool/CurtailmentHourly.ipynb | juanjerezm/Balmorel_code | d4efc1f4932f54e88af4b2337c9cda69c2bbad76 | [
"ISC"
] | null | null | null | base/auxils/plotting_tool/CurtailmentHourly.ipynb | juanjerezm/Balmorel_code | d4efc1f4932f54e88af4b2337c9cda69c2bbad76 | [
"ISC"
] | null | null | null | 38.340223 | 6,796 | 0.610425 | [
[
[
"# Import Required Packages",
"_____no_output_____"
]
],
[
[
"# Imports\nimport os\nimport datetime\nimport glob\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n",
"_____no_output_____"
]
],
[
[
"# Input data from User",
"_____no_output_____"
]
],
[
[
"#Market analysed: 'Investment','FullYear','DayAhead','Balancing' (choose one or several)\nmarket_analysed=['DayAhead','Balancing'] \noutput='CurtailmentHourly'\nfirst_timestep=\"2012-01-02\"\n#Number of timesteps (total number of combination of SSS and TTT)\nnumber_periods=8736*12 \n#Time size of each time step for creating timestamp\nsize_timestep=\"300s\"\n#Time size of each TTT calculating energy values\nsize_t=1/12;\n#Countries in focus\nccc_in_focus = ['DENMARK', 'GERMANY', 'NORWAY', 'GREAT_BRITAIN','BELGIUM','HOLLAND']\n\n\n",
"_____no_output_____"
]
],
[
[
"# Plot Settings",
"_____no_output_____"
]
],
[
[
"# Set plotting specifications\n% matplotlib inline\nplt.rcParams.update({'font.size': 21})\nplt.rcParams['xtick.major.pad']='12'\nplt.rc('legend', fontsize=16)\ny_limit = 1.1\nlw = 3",
"_____no_output_____"
]
],
[
[
"# Read Input Files",
"_____no_output_____"
]
],
[
[
"data=pd.DataFrame()\nfor market in market_analysed:\n csvfiles = []\n for file in glob.glob(\"./input/results/\" + market + \"/*.csv\"):\n csvfiles.append(file)\n\n csvfiles=[file.replace('./input\\\\','') for file in csvfiles] \n csvfiles=[file.replace('.csv','') for file in csvfiles] \n csvfiles=[file.split('_') for file in csvfiles] \n csvfiles = np.asarray(csvfiles) \n csvfiles=pd.DataFrame.from_records(csvfiles)\n \n csvfiles.rename(columns={0: 'Output', 1: 'Scenario',2: 'Year',3:'Subset'}, inplace=True)\n scenarios=csvfiles.Scenario.unique().tolist()\n years=csvfiles.Year.unique().tolist()\n subsets=csvfiles.Subset.unique().tolist()\n\n for scenario in scenarios:\n for year in years:\n for subset in subsets:\n file = \"./input/results/\"+ market + \"/\"+ output + \"_\" + scenario + \"_\" + year + \"_\" + subset + \".csv\"\n if os.path.isfile(file):\n df=pd.read_csv(file,encoding='utf8')\n df['Scenario'] = scenario\n df['Market'] = market\n #Renaming columns just in case timeconversion was required\n df.rename(columns = {'G':'GGG', 'C':'CCC', 'Y':'YYY','TTT_NEW':'TTT','SSS_NEW':'SSS'}, inplace = True) \n data=data.append(df) \n\n \n ",
"_____no_output_____"
],
[
"#Timestamp addition\nfull_timesteps = pd.read_csv('./input/full_timesteps.csv')\nfull_timesteps.Key=full_timesteps['SSS']+full_timesteps['TTT']\nfull_timesteps['timestamp']= pd.date_range(first_timestep, periods = number_periods, freq =size_timestep)\ndict_timestamp=dict(zip(full_timesteps.Key, full_timesteps.timestamp))\ndata['timestamp']=data['SSS']+data['TTT']\ndata['timestamp']=data['timestamp'].map(dict_timestamp)\n\n",
"C:\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:3: UserWarning: Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access\n This is separate from the ipykernel package so we can avoid doing imports until\n"
],
[
"data.to_csv(r'./output/test.csv')",
"_____no_output_____"
]
],
[
[
"# Additional set declaration",
"_____no_output_____"
]
],
[
[
"ccc = list(data.CCC.unique())\nrrr = list(data.RRR.unique())\ntech_type = list(data.TECH_TYPE.unique())\ncommodity = list(data.COMMODITY.unique())\nfff = list(data.FFF.unique())\nsss = list(full_timesteps.SSS.unique())\nttt = list(full_timesteps.TTT.unique())\n",
"_____no_output_____"
]
],
[
[
"# Time step selection",
"_____no_output_____"
]
],
[
[
"# Seasons to investigate\n# season_names = ['S01', 'S07', 'S20', 'S24', 'S28', 'S38', 'S42', 'S43']\n# Make a list of every nth element of sss (1 <= nth <= number of elements in sss)\nnth = 1\ns = sss[0::nth]\n # Or select seasons by names\n# s = season_names\n",
"_____no_output_____"
],
[
"# Terms to investigate\n# term_names = ['T005', 'T019', 'T033', 'T047', 'T061', 'T075', 'T089', 'T103', 'T117', 'T131', 'T145', 'T159']\n# Make a list of every nth element of ttt (1 <= nth <= number of elements in ttt)\nnth = 1\nt = ttt[0::nth]\n# Or select terms by name\n# t = term_names",
"_____no_output_____"
]
],
[
[
"# Make Directories\n",
"_____no_output_____"
]
],
[
[
"# Make output folder\nif not os.path.isdir('output'):\n os.makedirs('output')",
"_____no_output_____"
],
[
"# Make CurtailmentHourly folder\nif not os.path.isdir('output/' + output):\n os.makedirs('output/' + output)",
"_____no_output_____"
],
[
"# Make market folder\nfor market in market_analysed:\n if not os.path.isdir('output/' + output + '/'+ market +'/Country_wise'):\n os.makedirs('output/' + output + '/'+ market +'/Country_wise')\n# Make country folder\n if not os.path.isdir('output/' + output + '/'+ market +'/Country_wise'):\n os.makedirs('output/' + output + '/'+ market +'/Country_wise')\n # Make country wise folders\n for c in ccc:\n if not os.path.isdir('output/' + output + '/'+ market +'/Country_wise/' + c):\n os.makedirs('output/' + output + '/'+ market +'/Country_wise/' + c)",
"_____no_output_____"
]
],
[
[
"# Plotting",
"_____no_output_____"
]
],
[
[
"# Make data frames to plot\ndata_plot = data[(data.SSS.isin(s)) & (data.TTT.isin(t))]\ndata_plot = data[data.CCC.isin(ccc_in_focus)]",
"_____no_output_____"
]
],
[
[
"## Plot per year, scenario, market ",
"_____no_output_____"
]
],
[
[
"df_plot=(pd.DataFrame(data_plot.groupby(['YYY', 'Scenario', 'Market'])['Val'].agg('sum')/1000000*size_t))",
"_____no_output_____"
],
[
"df_plot",
"_____no_output_____"
],
[
"df_plot.reset_index(inplace=True)",
"_____no_output_____"
],
[
"df_plot",
"_____no_output_____"
],
[
"for scenario in Scenarios:\n df_plot\nplt.bar(df_plot.YYY, df_plot.Val)\n",
"_____no_output_____"
],
[
"for i in years:\n spp_plot[data.SSS.isin([i])][ccc[:2]].plot(figsize=(16,9), lw=lw)\n plt.ylim([0, y_limit])\n plt.legend(loc=1)\n plt.title('Curtailment in ' + i)\n plt.xlabel('Terms')\n plt.xticks(t_marker, t_selected, rotation=45)\n for x_pos in t_marker:\n plt.axvline(x=x_pos, c='black', lw=6, alpha=0.3)\n plt.tight_layout()\n plt.savefig('output/pv_production/spp_' + i + '.png', compression=None)\n # plt.show()\n plt.close()\nplt.close()",
"_____no_output_____"
],
[
"#Plot example with several x axis\n\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax2 = ax1.twiny()\n\n# Add some extra space for the second axis at the bottom\nfig.subplots_adjust(bottom=0.2)\nax1.set_xticks([1,2,4,5,7,8])\nax1.set_xlim(0,9)\nax1.set_xticklabels(('2015','2016','2015','2016','2015','2016'))\nax2.spines[\"bottom\"].set_position((\"axes\", -0.15))\nax2.xaxis.set_ticks_position(\"bottom\")\nax2.spines[\"bottom\"].set_visible(True)\nax2.set_xticks([1.5,4.5,7.5])\nax2.set_xticklabels(('1','2','3'))\nax2.set_xlim(0,9)\n\nb1 = np.random.randint(0,100,6)\nb2 = np.random.randint(0,100,6)\nb3 = np.random.randint(0,100,6)\nplt.bar(np.array([1,2,4,5,7,8])-0.4,b1,color='blue')\nplt.bar(np.array([1,2,4,5,7,8])-0.4,b2,color='orange',bottom=b1)\nplt.bar(np.array([1,2,4,5,7,8])-0.4,b3,color='yellow',bottom=b1+b2)\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eac8139cbc0ed755199e527a5656a6e405aa57 | 21,134 | ipynb | Jupyter Notebook | example/STS/STS_Example2/STS_Example2.ipynb | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | example/STS/STS_Example2/STS_Example2.ipynb | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | example/STS/STS_Example2/STS_Example2.ipynb | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | 106.201005 | 8,730 | 0.854216 | [
[
[
"# Stratified Sampling - Example 2\n\n- Author: Michael D. Shields\n- Date: June 05, 2018",
"_____no_output_____"
],
[
"In this example, the stratified sampling method is employed to generate samples from an exponential distribution with strata defined by a text file. The method illustrates stratified sampling for cases where the space is not divided equally in all dimensions. ",
"_____no_output_____"
],
[
"Import the necessary libraries. Here we import standard libraries such as numpy and matplotlib, but also need to import the STS class from UQpy.SampleMethods.",
"_____no_output_____"
]
],
[
[
"from UQpy.SampleMethods import STS\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom scipy.stats import expon",
"_____no_output_____"
]
],
[
[
"Run STS for 6 samples.\n - 2 dimensions\n - Strata are defined by the file 'strata.txt'\n - The strata are unequally sized and are not equal in all dimensions.",
"_____no_output_____"
],
[
"Run stratified sampling",
"_____no_output_____"
]
],
[
[
"x_sts = STS(dimension=2, dist_name='Exponential', dist_params=np.ones(2), input_file='strata.txt')",
"UQpy: Successful execution of STS design..\n"
],
[
"fig, ax = plt.subplots()\nplt.title('Stratified Sample - Exponential')\nplt.scatter(x_sts.samples[:, 0], x_sts.samples[:, 1])\nplt.plot(expon.ppf([0.0, 0.5, 0.5, 0.0],1,1),expon.ppf([0.0, 0.0, 0.333, 0.333],1,1),'b')\nplt.plot(expon.ppf([0.0, 0.5, 0.5, 0.0],1,1),expon.ppf([0.333, 0.333, 0.667, 0.667],1,1),'b')\nplt.plot(expon.ppf([0.0, 0.5, 0.5, 0.0],1,1),expon.ppf([0.667, 0.667, 0.99, 1.0],1,1),'b')\nplt.plot(expon.ppf([0.5, 1.0, 0.99, 0.5],1,1),expon.ppf([0.0, 0.0, 0.5, 0.5],1,1),'b')\nplt.plot(expon.ppf([0.5, 0.75, 0.75, 0.5],1,1),expon.ppf([0.5, 0.5, 0.99, 0.99],1,1),'b')\nax.yaxis.grid(True)\nax.xaxis.grid(True)\nplt.ylim(1, expon.ppf(0.99,1,1))\nplt.xlim(1, expon.ppf(0.99,1,1))\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nplt.title('Stratified Sample - U(0,1)')\nplt.scatter(x_sts.samplesU01[:, 0], x_sts.samplesU01[:, 1])\nplt.plot([0.0, 0.5, 0.5, 0.0],[0.0, 0.0, 0.333, 0.333],'b')\nplt.plot([0.0, 0.5, 0.5, 0.0],[0.333, 0.333, 0.667, 0.667],'b')\nplt.plot([0.0, 0.5, 0.5, 0.0],[0.667, 0.667, 1.0, 1.0],'b')\nplt.plot([0.5, 1.0, 1.0, 0.5],[0.0, 0.0, 0.5, 0.5],'b')\nplt.plot([0.5, 0.75, 0.75, 0.5],[0.5, 0.5, 1.0, 1.0],'b')\nax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\nax.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\nax.yaxis.grid(True)\nax.xaxis.grid(True)\nplt.ylim(0, 1)\nplt.xlim(0, 1)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Return the sample weights",
"_____no_output_____"
]
],
[
[
"print(x_sts.strata.weights)",
"_____no_output_____"
],
[
"print(x_sts.samples)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0eac98bb539dbe3854f46213f33bc39e9608fe3 | 19,900 | ipynb | Jupyter Notebook | notebooks/_Appendix B - OAuth Primer.ipynb | alexistrator/Mining-the-Social-Web-3rd-Edition | 06c1d6f20eec98ed2cb8b3cca3835ed51eea3056 | [
"BSD-2-Clause"
] | 818 | 2018-12-01T11:15:57.000Z | 2022-03-30T21:56:32.000Z | notebooks/_Appendix B - OAuth Primer.ipynb | enixam/Mining-the-Social-Web-3rd-Edition | f628233cd8b50cec9e6cc5958da98a8c9a651bbc | [
"BSD-2-Clause"
] | 60 | 2018-11-30T23:34:16.000Z | 2022-03-11T23:51:21.000Z | notebooks/_Appendix B - OAuth Primer.ipynb | seiftry/test | ec50c57d1614ffcc91b98a508dc2b8d6c1e3e4fc | [
"BSD-2-Clause"
] | 475 | 2018-11-29T23:00:57.000Z | 2022-03-30T21:48:06.000Z | 50.636132 | 1,185 | 0.657186 | [
[
[
"# Mining the Social Web, 2nd Edition\n\n## Appendix B: OAuth Primer\n\nThis IPython Notebook provides an interactive way to follow along with and explore the numbered examples from [_Mining the Social Web (3rd Edition)_](http://bit.ly/Mining-the-Social-Web-3E). The intent behind this notebook is to reinforce the concepts from the sample code in a fun, convenient, and effective way. This notebook assumes that you are reading along with the book and have the context of the discussion as you work through these exercises.\n\nIn the somewhat unlikely event that you've somehow stumbled across this notebook outside of its context on GitHub, [you can find the full source code repository here](http://bit.ly/Mining-the-Social-Web-3E).\n\n## Copyright and Licensing\n\nYou are free to use or adapt this notebook for any purpose you'd like. However, please respect the [Simplified BSD License](https://github.com/mikhailklassen/Mining-the-Social-Web-3rd-Edition/blob/master/LICENSE) that governs its use.\n\n## Notes\n\nWhile the chapters in the book opt to simplify the discussion by avoiding a discussion of OAuth and instead opting to use application credentials provided by social web properties for API access, this notebook demonstrates how to implement some OAuth flows for several of the more prominent social web properties. While IPython Notebook is used for consistency and ease of learning, and in some cases, this actually adds a little bit of extra complexity in some cases given the nature of embedding a web server and handling asynchronous callbacks. (Still, the overall code should be straightforward to adapt as needed.)",
"_____no_output_____"
],
[
"# Twitter OAuth 1.0a Flow with IPython Notebook\n\nTwitter implements OAuth 1.0A as its standard authentication mechanism, and in order to use it to make requests to Twitter's API, you'll need to go to https://dev.twitter.com/apps and create a sample application. There are three items you'll need to note for an OAuth 1.0A workflow, a consumer key and consumer secret that identify the application as well as the oauth_callback URL that tells Twitter where redirect back to after the user has authorized the application. Note that you will need an ordinary Twitter account in order to login, create an app, and get these credentials. Keep in mind that for development purposes or for accessing your own account's data, you can simply use the oauth token and oauth token secret that are provided in your appliation settings to authenticate as opposed to going through the steps here. The process of obtaining an the oauth token and oauth token secret is fairly straight forward (especially with the help of a good library), but an implementation in IPython Notebook is a bit tricker due to the nature of embedding a web server, capturing information within web server contexts, and handling the various redirects along the way.\n\nYou must ensure that your browser is not blocking popups in order for this script to work.\n\n<img src=\"files/resources/ch01-twitter/images/Twitter-AppCredentials-oauth_callback.png\" width=\"600px\">",
"_____no_output_____"
],
[
"## Example 1. Twitter OAuth 1.0a Flow",
"_____no_output_____"
]
],
[
[
"import json\nfrom flask import Flask, request\nfrom threading import Timer\nfrom IPython.display import IFrame\nfrom IPython.display import display\nfrom IPython.display import Javascript as JS\n\nimport twitter\nfrom twitter.oauth_dance import parse_oauth_tokens\nfrom twitter.oauth import read_token_file, write_token_file\n\nOAUTH_FILE = \"/tmp/twitter_oauth\"\n\n# XXX: Go to http://twitter.com/apps/new to create an app and get values\n# for these credentials that you'll need to provide in place of these\n# empty string values that are defined as placeholders.\n# See https://dev.twitter.com/docs/auth/oauth for more information \n# on Twitter's OAuth implementation and ensure that *oauth_callback*\n# is defined in your application settings as shown below if you are \n# using Flask in this IPython Notebook\n\n# Define a few variables that will bleed into the lexical scope of a couple of \n# functions below\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\noauth_callback = 'http://127.0.0.1:5000/oauth_helper'\n \n# Setup a callback handler for when Twitter redirects back to us after the user authorizes the app\n\nwebserver = Flask(\"TwitterOAuth\")\[email protected](\"/oauth_helper\")\ndef oauth_helper():\n \n oauth_verifier = request.args.get('oauth_verifier')\n\n # Pick back up credentials from ipynb_oauth_dance\n oauth_token, oauth_token_secret = read_token_file(OAUTH_FILE)\n \n _twitter = twitter.Twitter(\n auth=twitter.OAuth(\n oauth_token, oauth_token_secret, CONSUMER_KEY, CONSUMER_SECRET),\n format='', api_version=None)\n\n oauth_token, oauth_token_secret = parse_oauth_tokens(\n _twitter.oauth.access_token(oauth_verifier=oauth_verifier))\n\n # This web server only needs to service one request, so shut it down\n shutdown_after_request = request.environ.get('werkzeug.server.shutdown')\n shutdown_after_request()\n\n # Write out the final credentials that can be picked up after the blocking\n # call to webserver.run() below.\n write_token_file(OAUTH_FILE, oauth_token, oauth_token_secret)\n return \"%s %s written to %s\" % (oauth_token, oauth_token_secret, OAUTH_FILE)\n\n\n# To handle Twitter's OAuth 1.0a implementation, we'll just need to implement a custom\n# \"oauth dance\" and will closely follower the pattern defined in twitter.oauth_dance.\n\ndef ipynb_oauth_dance():\n \n _twitter = twitter.Twitter(\n auth=twitter.OAuth('', '', CONSUMER_KEY, CONSUMER_SECRET),\n format='', api_version=None)\n\n oauth_token, oauth_token_secret = parse_oauth_tokens(\n _twitter.oauth.request_token(oauth_callback=oauth_callback))\n\n # Need to write these interim values out to a file to pick up on the callback from Twitter\n # that is handled by the web server in /oauth_helper\n write_token_file(OAUTH_FILE, oauth_token, oauth_token_secret)\n \n oauth_url = ('http://api.twitter.com/oauth/authorize?oauth_token=' + oauth_token)\n \n # Tap the browser's native capabilities to access the web server through a new window to get\n # user authorization\n display(JS(\"window.open('%s')\" % oauth_url))\n\n\n# After the webserver.run() blocking call, start the oauth dance that will ultimately\n# cause Twitter to redirect a request back to it. Once that request is serviced, the web\n# server will shutdown, and program flow will resume with the OAUTH_FILE containing the\n# necessary credentials\nTimer(1, lambda: ipynb_oauth_dance()).start()\n\nwebserver.run(host='0.0.0.0')\n\n# The values that are read from this file are written out at\n# the end of /oauth_helper\noauth_token, oauth_token_secret = read_token_file(OAUTH_FILE)\n\n# These 4 credentials are what is needed to authorize the application\nauth = twitter.oauth.OAuth(oauth_token, oauth_token_secret,\n CONSUMER_KEY, CONSUMER_SECRET)\n \ntwitter_api = twitter.Twitter(auth=auth)\n\nprint(twitter_api)",
"_____no_output_____"
]
],
[
[
"# Facebook OAuth 2.0 Flow with IPython Notebook\n\nFacebook implements OAuth 2.0 as its standard authentication mechanism, and this example demonstrates how get an access token for making API requests once you've created an app and gotten a \"client id\" value that can be used to initiate an OAuth flow. Note that you will need an ordinary Facebook account in order to login, create an app, and get these credentials. You can create an app through the \"Developer\" section of your account settings as shown below or by navigating directly to https://developers.facebook.com/apps/. During development or debugging cycles, or to just access data in your own account, you may sometimes find it convenient to also reference the access token that's available to you through the Graph API Explorer tool at https://developers.facebook.com/tools/explorer as opposed to using the flow described here. The process of obtaining an access token is fairly straight forward, but an implementation in IPython Notebook is a bit tricker due to the nature of embedding a web server, capturing information within web server contexts, and handling the various redirects along the way.\n\nYou must ensure that your browser is not blocking popups in order for this script to work.\n<br />\n<br />\n<img src=\"files/resources/ch02-facebook/images/fb_create_app.png\" width=\"400px\"><br />\nCreate apps at https://developers.facebook.com/apps/<br />\n<br />\n<img src=\"files/resources/ch02-facebook/images/fb_edit_app.png\" width=\"400px\"><br />\nClicking on the app in your list to see the app dashboard and access app settings.",
"_____no_output_____"
],
[
"## Example 2. Facebook OAuth 2.0 Flow",
"_____no_output_____"
]
],
[
[
"import urllib\nfrom flask import Flask, request\nfrom threading import Timer\nfrom IPython.display import display\nfrom IPython.display import Javascript as JS\n\n# XXX: Get this value from your Facebook application's settings for the OAuth flow\n# at https://developers.facebook.com/apps\n\nAPP_ID = '' \n\n# This value is where Facebook will redirect. We'll configure an embedded\n# web server to be serving requests here\n\nREDIRECT_URI = 'http://localhost:5000/oauth_helper'\n\n# You could customize which extended permissions are being requested for your app\n# by adding additional items to the list below. See\n# https://developers.facebook.com/docs/reference/login/extended-permissions/\n\nEXTENDED_PERMS = ['user_likes']\n\n# A temporary file to store a code from the web server\n\nOAUTH_FILE = 'resources/ch02-facebook/access_token.txt'\n\n# Configure an emedded web server that accepts one request, parses\n# the fragment identifier out of the browser window redirects to another\n# handler with the parsed out value in the query string where it can be captured\n# and stored to disk. (A webserver cannot capture information in the fragment \n# identifier or that work would simply be done in here.)\n\nwebserver = Flask(\"FacebookOAuth\")\[email protected](\"/oauth_helper\")\ndef oauth_helper():\n return '''<script type=\"text/javascript\">\n var at = window.location.hash.substring(\"access_token=\".length+1).split(\"&\")[0]; \n setTimeout(function() { window.location = \"/access_token_capture?access_token=\" + at }, 1000 /*ms*/);\n </script>'''\n\n# Parses out a query string parameter and stores it to disk. This is required because\n# the address space that Flask uses is not shared with IPython Notebook, so there is really\n# no other way to share the information than to store it to a file and access it afterward\[email protected](\"/access_token_capture\")\ndef access_token_capture():\n access_token = request.args.get('access_token')\n f = open(OAUTH_FILE, 'w') # Store the code as a file\n f.write(access_token)\n f.close()\n \n # It is safe (and convenient) to shut down the web server after this request\n shutdown_after_request = request.environ.get('werkzeug.server.shutdown')\n shutdown_after_request()\n return access_token\n\n\n# Send an OAuth request to Facebook, handle the redirect, and display the access\n# token that's included in the redirect for the user to copy and paste\n \nargs = dict(client_id=APP_ID, redirect_uri=REDIRECT_URI,\n scope=','.join(EXTENDED_PERMS), type='user_agent', display='popup'\n )\n\noauth_url = 'https://facebook.com/dialog/oauth?' + urllib.parse.urlencode(args)\n\nTimer(1, lambda: display(JS(\"window.open('%s')\" % oauth_url))).start()\n\n\nwebserver.run(host='0.0.0.0')\n\naccess_token = open(OAUTH_FILE).read()\n\nprint(access_token)",
"_____no_output_____"
]
],
[
[
"# LinkedIn OAuth 2.0 Flow with IPython Notebook\nLinkedIn implements OAuth 2.0 as one of its standard authentication mechanism, and \"Example 3\" demonstrates how to use it to get an access token for making API requests once you've created an app and gotten the \"API Key\" and \"Secret Key\" values that are part of the OAuth flow. Note that you will need an ordinary LinkedIn account in order to login, create an app, and get these credentials. You can create an app through the \"Developer\" section of your account settings as shown below or by navigating directly to https://www.linkedin.com/secure/developer.\n\nYou must ensure that your browser is not blocking popups in order for this script to work.\n\n<img src=\"files/resources/ch04-linkedin/images/LinkedIn-app.png\" width=\"600px\">",
"_____no_output_____"
],
[
"## Example 3. Using LinkedIn OAuth credentials to receive an access token an authorize an application",
"_____no_output_____"
],
[
"Note: You must ensure that your browser is not blocking popups in order for this script to work. LinkedIn's OAuth flow appears to expressly involve opening a new window, and it does not appear that an inline frame can be used as is the case with some other social web properties. You may also find it very convenient to ensure that you are logged into LinkedIn at http://www.linkedin.com/ with this browser before executing this script, because the OAuth flow will prompt you every time you run it if you are not already logged in. If for some reason you cause IPython Notebook to hang, just select \"Kernel => Interrupt\" from its menu.",
"_____no_output_____"
]
],
[
[
"import os\nfrom threading import Timer\nfrom flask import Flask, request\nfrom linkedin import linkedin # pip install python3-linkedin\nfrom IPython.display import display\nfrom IPython.display import Javascript as JS\n\n# XXX: Get these values from your application's settings for the OAuth flow\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\n\n# This value is where LinkedIn will redirect. We'll configure an embedded\n# web server to be serving requests here. Make sure to add this to your\n# app settings\nREDIRECT_URL = 'http://localhost:5000/oauth_helper'\n\n# A temporary file to store a code from the web server\nOAUTH_FILE = 'resources/ch04-linkedin/linkedin.authorization_code'\n\n# These should match those in your app settings\npermissions = {'BASIC_PROFILE': 'r_basicprofile',\n 'EMAIL_ADDRESS': 'r_emailaddress',\n 'SHARE': 'w_share',\n 'COMPANY_ADMIN': 'rw_company_admin'}\n\n# Configure an emedded web server that accepts one request, stores a file\n# that will need to be accessed outside of the request context, and \n# immediately shuts itself down\n\nwebserver = Flask(\"OAuthHelper\")\[email protected](\"/oauth_helper\")\ndef oauth_helper():\n code = request.args.get('code')\n f = open(OAUTH_FILE, 'w') # Store the code as a file\n f.write(code)\n f.close()\n shutdown_after_request = request.environ.get('werkzeug.server.shutdown')\n shutdown_after_request()\n return \"\"\"<p>Handled redirect and extracted code <strong>%s</strong> \n for authorization</p>\"\"\" % (code,)\n\n# Send an OAuth request to LinkedIn, handle the redirect, and display the access\n# token that's included in the redirect for the user to copy and paste\n\nauth = linkedin.LinkedInAuthentication(CONSUMER_KEY, CONSUMER_SECRET, REDIRECT_URL, \n permissions.values())\n\n# Display popup after a brief delay to ensure that the web server is running and \n# can handle the redirect back from LinkedIn\n\nTimer(1, lambda: display(JS(\"window.open('%s')\" % auth.authorization_url))).start()\n\n# Run the server to accept the redirect back from LinkedIn and capture the access\n# token. This command blocks, but the web server is configured to shut itself down\n# after it serves a request, so after the redirect occurs, program flow will continue\n\nwebserver.run(host='0.0.0.0')\n\n# As soon as the request finishes, the web server shuts down and these remaining commands\n# are executed, which exchange an authorization code for an access token. This process\n# seems to need full automation because the authorization code expires very quickly.\n\nauth.authorization_code = open(OAUTH_FILE).read()\nauth.get_access_token()\n\n# Prevent stale tokens from sticking around, which could complicate debugging\nos.remove(OAUTH_FILE)\n\n\n# How you can use the application to access the LinkedIn API...\napp = linkedin.LinkedInApplication(auth)\nprint(app.get_profile())",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
d0ead71e092cbb264d031cc7b1e13b0edc0832cc | 4,547 | ipynb | Jupyter Notebook | Estudos/pythonIntroduction.ipynb | bitwoman/formacao-cientista-dados-python-r | d087cd1d40caf5a78908a47f13672fcd6aaec218 | [
"MIT"
] | null | null | null | Estudos/pythonIntroduction.ipynb | bitwoman/formacao-cientista-dados-python-r | d087cd1d40caf5a78908a47f13672fcd6aaec218 | [
"MIT"
] | null | null | null | Estudos/pythonIntroduction.ipynb | bitwoman/formacao-cientista-dados-python-r | d087cd1d40caf5a78908a47f13672fcd6aaec218 | [
"MIT"
] | null | null | null | 22.509901 | 128 | 0.515285 | [
[
[
"#Importação de lib's: exemplo\nimport pandas as pd\nfrom apyori import apriori",
"_____no_output_____"
],
[
"#Formação Cientista de Dados",
"_____no_output_____"
],
[
"#Funções matemáticas, números aleatórios, criptografia, leitura de arquivos, protocolos de comunicação.",
"_____no_output_____"
],
[
"import statistics as st\nx = [10,20,30,40]\ny = st.mean(x) \nz = st.median(x)\n\nprint(y)\nprint(z)",
"25\n25.0\n"
],
[
"#Blocos de código reutilizáveis, podem ser chamados de qualquer parte do programa, podem ser chamados de outros programas.",
"_____no_output_____"
],
[
"#Função sem parâmetros (procedimento é sem retorno, função possui retorno):\ndef printScreen():\n print('This is a function!')\n \nprintScreen()",
"This is a function!\n"
],
[
"#Função com parâmetros:\ndef p(var):\n return var*var\n \nx = p(10)\nprint(x)",
"100\n"
],
[
"#Função: Exemplo com valor default.\ndef interval(beginning=1, end=10):\n for beginning in range(1, end+1):\n print(beginning, end=' ')\n\ninterval()",
"1 2 3 4 5 6 7 8 9 10 "
],
[
"#Funções padrões (denominadas funções internas).\nfrom statistics import *\nfrom numpy import * \nlst = [1,2,30,45]\na = random.random((8,8))\n\nprint('Mean: ', mean(lst))\nprint('Median: ', median(lst))\nprint('Mode: ', mode(lst))\nprint('Variance: ', variance(lst))\nprint('\\n')\nprint(a, end=' ')",
"Mean: 19.5\nMedian: 16.0\nMode: 1\nVariance: 469.6666666666667\n\n\n[[0.78107304 0.58556846 0.66393132 0.78917056 0.02899914 0.76024744\n 0.36835949 0.88389702]\n [0.94670637 0.31675199 0.64972635 0.80971208 0.89827705 0.37168518\n 0.53906549 0.12726267]\n [0.53329323 0.52614304 0.57949261 0.03488654 0.008831 0.00405279\n 0.55218266 0.00482724]\n [0.57775909 0.37780351 0.63449448 0.82814062 0.92971531 0.07993492\n 0.81400813 0.52151917]\n [0.74908193 0.44892598 0.46237993 0.72891499 0.72501929 0.07342445\n 0.33638875 0.81738937]\n [0.08385632 0.70673614 0.89137315 0.70742288 0.79918375 0.60230457\n 0.79295452 0.96775149]\n [0.59402879 0.202072 0.01094463 0.092823 0.72959689 0.29929399\n 0.05069849 0.45004356]\n [0.0414582 0.02598722 0.49326611 0.22675292 0.68225281 0.35970365\n 0.19424237 0.10534098]] "
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eafb4a92da7a839fbaa514fb2f73efe29c0bbc | 6,880 | ipynb | Jupyter Notebook | homeworks/homework9.ipynb | yang-chenyu104/berkeley-stat-157 | 327f77db7ecdc02001f8b7be8c1fcaf0607694c0 | [
"Apache-2.0"
] | 2,709 | 2018-12-29T18:15:20.000Z | 2022-03-31T13:24:29.000Z | homeworks/homework9.ipynb | yang-chenyu104/berkeley-stat-157 | 327f77db7ecdc02001f8b7be8c1fcaf0607694c0 | [
"Apache-2.0"
] | 7 | 2018-12-27T04:56:20.000Z | 2021-02-18T04:43:11.000Z | homeworks/homework9.ipynb | yang-chenyu104/berkeley-stat-157 | 327f77db7ecdc02001f8b7be8c1fcaf0607694c0 | [
"Apache-2.0"
] | 1,250 | 2019-01-07T05:51:39.000Z | 2022-03-31T13:24:18.000Z | 66.153846 | 596 | 0.678634 | [
[
[
"# Homework 9 - Berkeley STAT 157\n\n**Your name: XX, SID YY, teammates A,B,C** (Please add your name, SID and teammates to ease Ryan and Rachel to grade.)\n\n**Please submit your homework through [gradescope](http://gradescope.com/)**\n\nHandout 4/18/2019, due 4/25/2019 by 4pm.\n\nThis homework deals with sequence models for numbers. It builds on Homework 8 in terms of modeling. The main difference to last week is that we're modeling *real valued numbers* of stocks rather than characters. \n\n**This is teamwork.**\n\n## 1. Time Series Model\n\nThe goal is to develop multivariate regression models where the numbers are *nonnegative* and where changes are *relative*. That is, a stock price can never assume negative values and for convenience we assume that the companies listed do not go bankrupt, i.e. their stock price will never be zero. Moreover, we assume that we can ignore quantization of prices, i.e. the fact that stocks aren't traded at arbitrary prices in $\\mathbb{R}$ but only at fractions of a cent (see [this link for a backstory](https://www.investopedia.com/ask/answers/why-nyse-switch-fractions-to-decimals/)). \n\nThe prices $x_{st}$ for a security $s$ at time $t$ typically reported at a given date are `(open, high, low, close, volume)`. Here `open` denotes the price when the market opens, `high` the highest price that it was traded for during that day, `low` the lowest, and `close` is the price of the security at closing. Lastly `volume` is an indicator for how many units were sold at that day. We index the respective values with $x_{st} = (o, h, l, c, v) \\in \\mathbb{R}^{5}$. To process them we transform $x_{st}$ as follows:\n\n$$z_{st} := \\left(\\log o, 10 \\cdot (\\log h - \\log o), 100 \\cdot (\\log l - \\log o), \\log c, \\log v\\right)$$\n\nMoreover, we assume that $z_{st}$ is obtained as part of a regression problem with squared loss, i.e.\\ for an estimate $\\hat{z}_{st}$ we compute the loss as \n\n$$l(z_{st}, \\hat{z}_{st}) = \\frac{1}{2} \\|z_{st} - \\hat{z}_{st}\\|^2$$\n\n1. Why is converting values into logarithms (and logarithms of ratios) a good idea? Explain this for each variable.\n1. Why would we want to rescale the ratios by 10?\n1. Explain why this model assumes a *lognormal* distribution of prediction errors between the values of the securities ${z}_{st}$ and their estimates $\\hat{z}_{st}$. That is, rather than being drawn from a Gaussian, they're drawn from another distribution. Characterize it (hint - exploit the connection between squared loss and the normal distribution).\n1. Now assume that we have not just one security but the top 500 stocks over some period of time. Why might it make \n sense to estimate the share prices jointly? \n\n## 2. Load Data\n\n1. Obtain data from the S&P500 for the past 5 years and convert it into a time series. You can get the data either from Kaggle [www.kaggle.com/camnugent/sandp500](https://www.kaggle.com/camnugent/sandp500) or crawl it directly using the Python script given here: [github.com/CNuge/kaggle-code/blob/master/stock_data/getSandP.py](https://github.com/CNuge/kaggle-code/blob/master/stock_data/getSandP.py). Your dataset will contain tuples of the form \n`(date, open, high, low, close, volume, Name)`. \n1. Import this data into an NDArray dataset where you have a vector containing `(open, high, low, close, volume)` for each security. That is, this is a 2,500 dimensional vector and you have 5 years' worth of data. \n1. Preprocess the data into logarithmic representation as outlined in problem 1.\n1. Split the data into observations for the first 4 years and a dataset for the last year. \n1. Load data into an MXNet dataset iterator.\n1. Why do you need to do this as opposed to splitting into random segments?\n\n## 3. Time Series Implementation\n\n1. Implement a model similar to `RNNModel` of section [d2l.ai/chapter_recurrent-neural-networks/rnn-concise.html](http://en.d2l.ai/chapter_recurrent-neural-networks/rnn-concise.html) suitable for regression. It should take as input vector-valued data, such as the time series mentioned above and it should output vector-valued data (of some other dimensionality).\n1. Train the model on the first 4 years of data using plain RNN, GRU and LSTM cells (for a single layer). How well can the model \n * Predict the stock value the next day on the last 1 year of data (price at opening).\n * Plot how the quality of the model degrades as we apply it throughout the year (i.e. we ingest all the data up to day $t$ and predict forward at day $t+1$). \n * Predict the stock value the next week on the last 1 year of data (price at opening).\n1. Train the model on each stock separately (with much lower dimensionality) and compare the performance of the above model with the one you get by using each stock separately. \n1. Improve the model using better features, e.g. the fact that time is not uniformly spaced (Saturday, Sunday and holidays do not see any trades). For that use the day of the week as an additional input feature. \n1. Improve the model further by using a deeper RNN, e.g. with 2 layers. \n\nNote, there are many cases where we might want to know the *sequence* of stock prices over a period of time rather than just knowing the value, say one month from now. This is relevant e.g. for options pricing where investors can bet on or bet against volatility of a stock price. For a detailed description of this see e.g. [en.wikipedia.org/wiki/Options_strategy](https://en.wikipedia.org/wiki/Options_strategy).",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
d0eb056d4bb152f33344abc6252c7205b2044879 | 2,640 | ipynb | Jupyter Notebook | leetcode11.ipynb | younthu/LeetCode | 1a94e76cece623b05e7cda87819b0844ff5cf5be | [
"MIT"
] | null | null | null | leetcode11.ipynb | younthu/LeetCode | 1a94e76cece623b05e7cda87819b0844ff5cf5be | [
"MIT"
] | null | null | null | leetcode11.ipynb | younthu/LeetCode | 1a94e76cece623b05e7cda87819b0844ff5cf5be | [
"MIT"
] | null | null | null | 23.362832 | 106 | 0.473864 | [
[
[
"# https://leetcode.com/problems/container-with-most-water/\n# 思路:从数组两边往中间收敛\nfrom typing import List\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n max_value = 0\n left = 0\n right = len(height) - 1\n \n while left < right:\n width = right - left\n left_height = height[left]\n right_height = height[right]\n current_height = left_height if left_height < right_height else right_height\n temp_max = width * current_height\n if temp_max > max_value:\n max_value = temp_max\n \n if left_height < right_height:\n left += 1\n elif left_height == right_height:\n left += 1\n right -= 1\n else:\n right -= 1\n return max_value",
"_____no_output_____"
],
[
"s = Solution()\ns.maxArea([1,8,6,2,5,4,8,3,7])\n",
"_____no_output_____"
],
[
"s.maxArea([1,2])",
"_____no_output_____"
],
[
"Runtime: 48 ms, faster than 99.05% of Python3 online submissions for Container With Most Water.\nMemory Usage: 14.1 MB, less than 99.49% of Python3 online submissions for Container With Most Water.",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
d0eb0e540430d94e09ade9df0a044621d3762c93 | 190,650 | ipynb | Jupyter Notebook | _build/html/_sources/2_Data_Acquisition/2_Data_Acquisition.ipynb | niekveldhuis/compass | 720d46f33f2b08ddf6304674e324e37c5693c09a | [
"CC0-1.0"
] | 8 | 2019-08-13T08:49:05.000Z | 2021-08-24T08:14:51.000Z | 2_Data_Acquisition/2_Data_Acquisition.ipynb | niekveldhuis/compass | 720d46f33f2b08ddf6304674e324e37c5693c09a | [
"CC0-1.0"
] | 1 | 2020-06-20T12:18:13.000Z | 2020-06-22T11:25:05.000Z | 2_Data_Acquisition/2_Data_Acquisition.ipynb | niekveldhuis/compass | 720d46f33f2b08ddf6304674e324e37c5693c09a | [
"CC0-1.0"
] | 3 | 2019-06-06T14:00:47.000Z | 2020-08-06T22:38:59.000Z | 325.897436 | 146,585 | 0.373632 | [
[
[
"# 2 Data Acquisition\n\nIn this chapter we will discuss data acquisition and data formatting for four online Assyriological projects: [ORACC](http://oracc.org) (2.1), [ETCSL](https://etcsl.orinst.ox.ac.uk/), (2.2) [CDLI](http://cdli.ucla.edu) (2.3) and [BDTNS](http://bdtns.filol.csic.es/) (2.4). \n\nThe data in [CDLI](http://cdli.ucla.edu) and [BDTNS](http://bdtns.filol.csic.es/) are made available in raw-text format, with transliteration only. For instance (atf text format as used by [CDLI](http://cdli.ucla.edu)):\n\n```{admonition} ATF\n:class: tip, dropdown\nATF is short for ASCII Text Format. [ORACC](http://oracc.org) and [CDLI](http://cdli.ucla.edu) use different versions of the ATF format. The various symbols and conventions are explained [here](http://oracc.org/doc/help/editinginatf/cdliatf/).\n```",
"_____no_output_____"
],
[
"&P100001 = AAS 013 \n#atf: lang sux \n@tablet \n@obverse \n@column 1 \n$ beginning broken \n1'. a2-bi u4 [...] 5(u) 4(disz) 2/3(disz)-kam \n2'. 8(gesz2) 3(u) 5(disz) gurusz u4 1(disz)-sze3 \n3'. si-i3-tum nig2-ka9-ak mu en-mah-gal-an-na ba-hun \n4'. 2(asz) 2(barig) sze gur",
"_____no_output_____"
],
[
"This data format is easy to read for humans (those humans who know Sumerian), but less so for computers. It is necessary to tell the software which data elements belong to the text and which do not (for instance, line numbers and surface labels) and what the various non-textual elements mean. We will see examples of how such data sets may be used in the sections 2.3 ([CDLI](http://cdli.ucla.edu)) and 2.4 ([BDTNS](http://bdtns.filol.csic.es/)). Section 2.4 will also demonstrate code for constructing a search engine for [BDTNS](http://bdtns.filol.csic.es/) that ignores sign values - that is, searching for `luh` will also find `sukkal`, etc. The code uses both [BDTNS](http://bdtns.filol.csic.es/) data and the [ORACC Global Sign List](http://orac.org/ogsl), showing how data from different projects can be mashed into a single tool.\n\nThe data in [ORACC](http://oracc.org) and [ETCSL](https://etcsl.orinst.ox.ac.uk/) are made available in [JSON](http://json.org) and [XML](http://xml.org), respectively. Those formats are very explicit and atomistic. They less easy to read for humans, but are very flexible for computational usage and allow for multiple levels of annotation (with e.g. lexical, morphological, and graphemic information) at the same time. The data in [ORACC](http://oracc.org) and [ETCSL](https://etcsl.orinst.ox.ac.uk/) includes lemmatization, linking each word to an entry in a glossary. The following is an example of a JSON file, one may click on any of the lines with an arrow to expose more or less of the hierarchical structure. The usage of JSON and XML files will be discussed in sections 2.1 and 2.2.",
"_____no_output_____"
]
],
[
[
"import json\nimport panel as pn\npn.extension()\nwith open('P100001.json', 'r', encoding='utf8') as p:\n P100001 = json.load(p)\njson_object = pn.pane.JSON(P100001, name='P100001', depth=1, height=300, width=500, theme = 'light')\njson_object",
"_____no_output_____"
]
],
[
[
"This represents the same text as the one shown in raw text format above ([P100001 = AAS 13](http://oracc.org/epsd2/P100001)), but in this case provided with lemmatization and explicit information on the various data types.\n\n```{admonition} Full JSON file\n:class: tip, dropdown\nTo see the full JSON file of P100001 click [here](https://github.com/niekveldhuis/compass/blob/master/2_Data_Acquisition/P100001.json)\n\n```\n\nThe Compass project mostly deals with [ORACC](http://oracc.org) data, and much of this chapter will provide code and explanations for how to extract the various types of information that are included in the JSON files. The parsing of the [ETCSL](https://etcsl.orinst.ox.ac.uk/) XML files (section [2.2](2.2) is, to some extent, redundant, because all of the [ETCSL](https://etcsl.orinst.ox.ac.uk/) data have been incorporated into [epsd2/literary](http://oracc.org/epsd2/literary) and can be parsed with the tools for regular [ORACC](http://oracc.org) projects. \n\nThe Chapters 3-6 of Compass will work with [ORACC](http://oracc.org) data and will parse that data with the tools demonstrated and explained in section [2.1](2.1). Chapter 2 is not needed to follow along in those chapters. The present chapter is primarily meant for researchers who wish to pursue their own computational projects and need a deeper understanding of how the data is acquired and formatted.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0eb11bfd7c379d5a998eb22c26e557b8c009427 | 19,803 | ipynb | Jupyter Notebook | notebooks/4.0-gr-Build_Machine_Learning_API.ipynb | graemerenfrew/titanic | 2b1c5e52fe66ad923956f5de824422ff01ce7f36 | [
"MIT"
] | null | null | null | notebooks/4.0-gr-Build_Machine_Learning_API.ipynb | graemerenfrew/titanic | 2b1c5e52fe66ad923956f5de824422ff01ce7f36 | [
"MIT"
] | null | null | null | notebooks/4.0-gr-Build_Machine_Learning_API.ipynb | graemerenfrew/titanic | 2b1c5e52fe66ad923956f5de824422ff01ce7f36 | [
"MIT"
] | null | null | null | 30.233588 | 122 | 0.415745 | [
[
[
"# Hello World API with Flask",
"_____no_output_____"
]
],
[
[
"# We'll create the script - we created our folder structure with cookiecutter \nimport os\nhello_world_script_file = os.path.join(os.path.pardir,'src','models','hello_world_api2.py')",
"_____no_output_____"
],
[
"%%writefile $hello_world_script_file\n\nfrom flask import Flask, request\napp = Flask(__name__)\n\[email protected]('/api', methods=['POST']) #api will take an input, process it, and return it\n\ndef say_hello():\n data = request.get_json(force=True) #we will pass json, so use get_json to get extract the data\n name = data['name']\n return \"hello {0}\".format(name)\n\nif __name__ == '__main__': # script entry point, the flask app will run on port 10001 - can be any available port\n app.run(port=10001, debug=True,use_reloader=False) # debug = true for troubleshooting in dev",
"Overwriting ../src/models/hello_world_api2.py\n"
],
[
"# We have started the process via the command line with python3 hellow_world_api2.py\nimport json",
"_____no_output_____"
],
[
"import requests",
"_____no_output_____"
],
[
"# create a call to the API endpoint\nurl = 'http://127.0.0.1:10001/api'\n#create the data we are sending\ndata = json.dumps({'name':'graeme'}) #dumps creates the data in a json object\nr = requests.post(url, data) #call the API and store response in r.",
"_____no_output_____"
],
[
"print(r.text)",
"hello graeme\n"
],
[
"# This is calling the API and returning correctly :D",
"_____no_output_____"
]
],
[
[
"# API for Machine Learning with Flask",
"_____no_output_____"
]
],
[
[
"# We'll create the script - we created our folder structure with cookiecutter \nimport os\nmachine_learning_api_script_file = os.path.join(os.path.pardir,'src','models','machine_learning_api.py')",
"_____no_output_____"
],
[
"%%writefile $machine_learning_api_script_file\n# Now this is the code we used from all the previous steps we performed.\n\n\nfrom flask import Flask, request\nimport pandas as pd\nimport numpy as np\nimport json\nimport pickle\nimport os\n\napp = Flask(__name__)\n\n#load the model and scaler files\nmodel_path = os.path.join(os.path.pardir, os.path.pardir,'models')\nmodel_filepath = os.path.join(model_path, 'lr_model.pkl')\nscaler_filepath = os.path.join(model_path, 'lr_scaler.pkl')\n\n#load them in\nscaler = pickle.load(open(scaler_filepath,'rb')) #remember to set read more binary\nmodel = pickle.load(open(model_filepath,'rb'))\n\n# columns put in order that the ML model will expect\ncolumns = [ u'Age', u'Fare', u'FamilySize', \\\n u'IsMother', u'IsMale', u'Deck_A', u'Deck_B', u'Deck_C', u'Deck_D', \\\n u'Deck_E', u'Deck_F', u'Deck_G', u'Deck_Z', u'Pclass_1', u'Pclass_2', \\\n u'Pclass_3', u'Title_Lady', u'Title_Master', u'Title_Miss', u'Title_Mr', \\\n u'Title_Mrs', u'Title_Officer', u'Title_Sir', u'Fare_Bin_very_low', \\\n u'Fare_Bin_low', u'Fare_Bin_high', u'Fare_Bin_very_high', u'Embarked_C', \\\n u'Embarked_Q', u'Embarked_S', u'AgeState_Adult', u'AgeState_Child'] \n\[email protected]('/api', methods=['POST'])\ndef make_predicitions():\n #This will be executed with the API is called\n #Read the json object and convert it to a json string\n data = json.dumps(request.get_json(force='TRUE'))\n #create a data frame from the json string\n df = pd.read_json(data)\n #extract the index passenger id\n passenger_ids = df['PassengerId'].ravel()\n # capture the actual survived values -- we do not have all the actuals, those are on Kaggle, but this is how\n # this API process would work, if we did have a store of all the actual survival data\n actuals = df['Survived'].ravel()\n # extract all the columns from the data and convert into a matrix\n X = df[columns].as_matrix().astype('float')\n # transform the data into the scaled object\n X_scaled = scaler.transform(X)\n # make the predicitions\n predictions = model.predict(X_scaled)\n # create response object dataframe\n df_response = pd.DataFrame({'PassengerId': passenger_ids, 'Predicted': predictions, 'Actual': actuals})\n # return our JSON object\n return df_response.to_json()\n\nif __name__ == '__main__':\n #host the flask app\n app.run(port=10001, debug=True,use_reloader=False) # debug = true for troubleshooting in dev\n ",
"Overwriting ../src/models/machine_learning_api.py\n"
],
[
"# now we run the flask server from the command line\n# $ python3 machine_learning_api.py\n",
"_____no_output_____"
]
],
[
[
"## Invoke API using the Requests feature",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nprocessed_data_path = os.path.join(os.path.pardir,'data','processed')\ntrain_file_path = os.path.join(processed_data_path, 'train.csv')\ntrain_df = pd.read_csv(train_file_path)",
"_____no_output_____"
],
[
"# the processed training data will be used to check the API is working\n# let's use 5 passengers to check to see if they Survived\nsurvived_passengers = train_df[train_df['Survived'] == 1][:5]\nsurvived_passengers\n",
"_____no_output_____"
],
[
"# We should get same response from API. Let's create a helper\nimport requests\ndef make_api_request(data):\n # url where the API is exposed\n url = \"http://127.0.0.1:10001/api\"\n #request\n r = requests.post(url, data)\n #return\n # return r.text - check we get something\n return r.json()",
"_____no_output_____"
],
[
"# This should retrn the same output of Survived, as a check that hte API is working\nmake_api_request(survived_passengers.to_json())",
"_____no_output_____"
],
[
"# As we can see, all Survived",
"_____no_output_____"
],
[
"# Now pass the entire Training df to the api function\n# then convert the result to JSON and put it into a result df\n# Have a look at the top 5, then check accuracy by compare Actual to Predicted\n# Then we will convert that into a Mean value, to get the accuracy.\nresult = make_api_request(train_df.to_json())\ndf_result = pd.read_json(json.dumps(result))\ndf_result.head()",
"_____no_output_____"
],
[
"# what is the oveall accuracy?\nnp.mean(df_result.Actual == df_result.Predicted)",
"_____no_output_____"
],
[
"# This is as expected from our previous modeling persistence demo\n# So now we have a machine learning API\n#\n# How could we improve the API?\n# We should be able to tinker to allow the raw data to be feed, to be processed then passing that dat to the model",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eb1e45c6428179d7f9fb377fbbfb0ae3214412 | 892,196 | ipynb | Jupyter Notebook | notebooks/modulated.ipynb | emmetthough/gravity_sim | bda22113462bebd1e8e6c03ea775237ee755d255 | [
"MIT"
] | null | null | null | notebooks/modulated.ipynb | emmetthough/gravity_sim | bda22113462bebd1e8e6c03ea775237ee755d255 | [
"MIT"
] | null | null | null | notebooks/modulated.ipynb | emmetthough/gravity_sim | bda22113462bebd1e8e6c03ea775237ee755d255 | [
"MIT"
] | null | null | null | 1,712.46833 | 129,072 | 0.958064 | [
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport dill as pickle\nimport os, sys\nimport scipy.interpolate as intp\nimport bead_util as bu\nplt.rcParams['figure.figsize'] = (12,8)\nplt.rcParams['xtick.labelsize'] = 15\nplt.rcParams['ytick.labelsize'] = 15\n%matplotlib inline\n\nfrom symmetric_attractor_profile import attractor_profile\nfrom holes_analysis import holes_data, holes_analysis",
"_____no_output_____"
],
[
"parent = '/home/emmetth/gravity_sim'\nos.chdir(parent)\nfull_path = parent+'/sim_data/modulated/'\n\nPS = holes_data(data_dir=full_path)\ndata = PS.data",
"_____no_output_____"
],
[
"hrs = sorted(PS.hrs)\nseparations = sorted(PS.from_edges)",
"_____no_output_____"
],
[
"hrs",
"_____no_output_____"
],
[
"separations",
"_____no_output_____"
],
[
"p0 = 7,separations[-1],hrs[0],10.0,5.0\nFM0 = holes_analysis(data, p0)\n\nFM0.sum_harmonics(w=1, fsamp=5e3, num_harmonics=10, verbose=True)",
"First 10 harmonics:\n\n Radial Angular Axial\nNewtonian: 7.072e-24 3.747e-24 1.585e-24\nYukawa:\nl=1.00um: 1.638e-29 4.899e-31 7.566e-31\nl=10.00um: 2.669e-24 6.018e-25 5.337e-25\n\n"
],
[
"times, newt, (yuka, lambdas) = FM0.sample_Gdata(w=1, tint=1)",
"_____no_output_____"
],
[
"yuka.shape",
"_____no_output_____"
],
[
"plt.plot(times, yuka[1,0,:]-np.mean(yuka[1,0,:]))\nplt.plot(times, newt[0,:]-np.mean(newt[0,:]))\nplt.rcParams['figure.figsize'] = 12,8\nplt.xlim(0,0.5)",
"_____no_output_____"
],
[
"plt.close('all')\nfig, ax = FM0.plot_signals(log=False, fsamp=5e3, f0=7, num_harmonics=15)\nfig",
"_____no_output_____"
],
[
"%matplotlib inline\nfig,ax = FM0.plot_asd()",
"_____no_output_____"
],
[
"harms_rad = np.zeros((len(separations), len(hrs), 3))\nsep, height = 5.0,5.0\naxes_ind = {'radial': 0, 'angular': 1, 'axial': 2}\n\naxis = axes_ind['radial']\n\nfor i,edge in enumerate(separations):\n for j,hr in enumerate(hrs):\n p = 7,edge,hr,sep,height\n FM = holes_analysis(data, p)\n harms = FM.sum_harmonics(w=1, fsamp=5e3, num_harmonics=10)\n harms_rad[i,j,:] = harms[:,axis]\n \nnp.save('holes_harm_rad_new.npy', harms_rad)",
"_____no_output_____"
],
[
"%matplotlib inline\nplt.rcParams['figure.figsize'] = (12,8)\nplt.contourf(separations, hrs, harms_rad[:,:,0].T, levels=25)\nplt.colorbar()\nplt.xlabel('Modulation Distance [frac. of hr]', fontsize=18)\nplt.ylabel('Hole Radius [$\\mu m$]', fontsize=18)\nplt.title('Hole Harmonic Content\\nRadial Newtonian', fontsize=20, y=1.02)\nplt.tick_params('both', length=10, width=2.5, which='major', labelsize=15)\nplt.tick_params('both', length=10, width=2.5, which='minor')\n# plt.savefig('dist_radius.png', dpi=150)\nplt.show()",
"_____no_output_____"
]
],
[
[
"This is not at all what I expected, but it makes sense as the absolute magnitude increases with both hole size and distance from edge, so to see the feature matching in a colorbar one would need to normalize the peaks to each other or something similar.",
"_____no_output_____"
]
],
[
[
"for i,hr in enumerate(hrs):\n plt.plot(separations, harms_rad[:,i,:], 'o-')\n plt.legend(['newtonian', '$\\lambda=1\\mu m$', '$\\lambda=10\\mu m$'])\n plt.xlabel('modulation separation [$\\mu m$]', fontsize=18)\n plt.ylabel('harmonic strength [N/$\\sqrt{Hz}$]', fontsize=18)\n # plt.axvline(hr, ls='--', alpha=0.7)\n plt.title(f'{hr} $\\mu m$ hole radius harmonics vs distance', fontsize=18, y=1)\n # plt.savefig(f'new_feature_matching_plots/edge/{hr}.png', dpi=150)\n plt.show()",
"_____no_output_____"
],
[
"for i,edge in enumerate(separations):\n plt.plot(hrs, harms_rad[i,:,:], 'o-')\n plt.legend(['newtonian', '$\\lambda=1\\mu m$', '$\\lambda=10\\mu m$'])\n plt.xlabel('hole radius [$\\mu m$]', fontsize=18)\n plt.ylabel('harmonic strength [N/$\\sqrt{Hz}$]', fontsize=18)\n # plt.axvline(edge, ls='--', alpha=0.7)\n plt.title(f'{edge}x modulation distance harmonics vs radius', fontsize=18)\n # plt.savefig(f'new_feature_matching_plots/radius/{edge}.png', dpi=150)\n plt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0eb1e65647f2034c9fe13c0aadca53333af1251 | 59,840 | ipynb | Jupyter Notebook | dev/02_data_transforms.ipynb | tianjianjiang/fastai_dev | cc8e2d64c330c1a93dd84c854b12e700c7d68a8b | [
"Apache-2.0"
] | null | null | null | dev/02_data_transforms.ipynb | tianjianjiang/fastai_dev | cc8e2d64c330c1a93dd84c854b12e700c7d68a8b | [
"Apache-2.0"
] | null | null | null | dev/02_data_transforms.ipynb | tianjianjiang/fastai_dev | cc8e2d64c330c1a93dd84c854b12e700c7d68a8b | [
"Apache-2.0"
] | 1 | 2019-08-30T14:34:07.000Z | 2019-08-30T14:34:07.000Z | 56.293509 | 31,108 | 0.754796 | [
[
[
"#default_exp data.transform",
"_____no_output_____"
],
[
"#export\nfrom local.torch_basics import *\nfrom local.test import *\nfrom local.notebook.showdoc import show_doc",
"_____no_output_____"
],
[
"from PIL import Image",
"_____no_output_____"
]
],
[
[
"# Transforms",
"_____no_output_____"
],
[
"## Helpers",
"_____no_output_____"
]
],
[
[
"#exports\ndef type_hints(f):\n \"Same as `typing.get_type_hints` but returns `{}` if not allowed type\"\n return typing.get_type_hints(f) if isinstance(f, typing._allowed_types) else {}",
"_____no_output_____"
],
[
"#export\ndef anno_ret(func):\n \"Get the return annotation of `func`\"\n if not func: return None\n ann = type_hints(func)\n if not ann: return None\n return ann.get('return')",
"_____no_output_____"
],
[
"#hide\ndef f(x) -> float: return x\ntest_eq(anno_ret(f), float)\ndef f(x) -> Tuple[float,float]: return x\ntest_eq(anno_ret(f), Tuple[float,float])\ndef f(x) -> None: return x\ntest_eq(anno_ret(f), NoneType)\ndef f(x): return x\ntest_eq(anno_ret(f), None)\ntest_eq(anno_ret(None), None)",
"_____no_output_____"
],
[
"#export\ncmp_instance = functools.cmp_to_key(lambda a,b: 0 if a==b else 1 if issubclass(a,b) else -1)",
"_____no_output_____"
],
[
"td = {int:1, numbers.Number:2, numbers.Integral:3}\ntest_eq(sorted(td, key=cmp_instance), [numbers.Number, numbers.Integral, int])",
"_____no_output_____"
],
[
"#export\ndef _p1_anno(f):\n \"Get the annotation of first param of `f`\"\n hints = type_hints(f)\n ann = [o for n,o in hints.items() if n!='return']\n return ann[0] if ann else object",
"_____no_output_____"
],
[
"def _f(a, b): pass\ntest_eq(_p1_anno(_f), object)\ndef _f(a, b)->str: pass\ntest_eq(_p1_anno(_f), object)\ndef _f(a, b:str)->float: pass\ntest_eq(_p1_anno(_f), str)\ndef _f(a:int, b:int)->float: pass\ntest_eq(_p1_anno(_f), int)\ntest_eq(_p1_anno(attrgetter('foo')), object)",
"_____no_output_____"
]
],
[
[
"## Types",
"_____no_output_____"
]
],
[
[
"#export\n@delegates(plt.subplots, keep=True)\ndef subplots(nrows=1, ncols=1, **kwargs):\n fig,ax = plt.subplots(nrows,ncols,**kwargs)\n if nrows*ncols==1: ax = array([ax])\n return fig,ax",
"_____no_output_____"
],
[
"#export\nclass TensorImageBase(TensorBase):\n _show_args = {'cmap':'viridis'}\n def show(self, ctx=None, **kwargs):\n return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})\n\n def get_ctxs(self, max_n=10, rows=None, cols=None, figsize=None, **kwargs):\n n_samples = min(self.shape[0], max_n)\n rows = rows or int(np.ceil(math.sqrt(n_samples)))\n cols = cols or int(np.ceil(math.sqrt(n_samples)))\n figsize = (cols*3, rows*3) if figsize is None else figsize\n _,axs = subplots(rows, cols, figsize=figsize)\n return axs.flatten()",
"_____no_output_____"
],
[
"#export\nclass TensorImage(TensorImageBase): pass",
"_____no_output_____"
],
[
"#export\nclass TensorImageBW(TensorImage): _show_args = {'cmap':'Greys'}",
"_____no_output_____"
],
[
"#export\nclass TensorMask(TensorImageBase): _show_args = {'alpha':0.5, 'cmap':'tab20'}",
"_____no_output_____"
],
[
"im = Image.open(TEST_IMAGE)",
"_____no_output_____"
],
[
"im_t = TensorImage(array(im))\ntest_eq(type(im_t), TensorImage)",
"_____no_output_____"
],
[
"im_t2 = TensorMask(tensor(1))\ntest_eq(type(im_t2), TensorMask)\ntest_eq(im_t2, tensor(1))",
"_____no_output_____"
],
[
"ax = im_t.show(figsize=(2,2))",
"_____no_output_____"
],
[
"test_fig_exists(ax)",
"_____no_output_____"
],
[
"#hide\naxes = im_t.get_ctxs(1)\ntest_eq(axes.shape,[1])\nplt.close()\naxes = im_t.get_ctxs(4)\ntest_eq(axes.shape,[4])\nplt.close()",
"_____no_output_____"
]
],
[
[
"## TypeDispatch -",
"_____no_output_____"
]
],
[
[
"#export\nclass TypeDispatch:\n \"Dictionary-like object; `__getitem__` matches keys of types using `issubclass`\"\n def __init__(self, *funcs):\n self.funcs,self.cache = {},{}\n for f in funcs: self.add(f)\n self.inst = None\n\n def _reset(self):\n self.funcs = {k:self.funcs[k] for k in sorted(self.funcs, key=cmp_instance, reverse=True)}\n self.cache = {**self.funcs}\n\n def add(self, f):\n \"Add type `t` and function `f`\"\n self.funcs[_p1_anno(f) or object] = f\n self._reset()\n\n def returns(self, x): return anno_ret(self[type(x)])\n def returns_none(self, x):\n r = anno_ret(self[type(x)])\n return r if r == NoneType else None\n\n def __repr__(self): return str({getattr(k,'__name__',str(k)):v.__name__ for k,v in self.funcs.items()})\n\n def __call__(self, x, *args, **kwargs):\n f = self[type(x)]\n if not f: return x\n if self.inst: f = types.MethodType(f, self.inst)\n return f(x, *args, **kwargs)\n\n def __get__(self, inst, owner):\n self.inst = inst\n return self\n\n def __getitem__(self, k):\n \"Find first matching type that is a super-class of `k`\"\n if k in self.cache: return self.cache[k]\n types = [f for f in self.funcs if issubclass(k,f)]\n res = self.funcs[types[0]] if types else None\n self.cache[k] = res\n return res",
"_____no_output_____"
],
[
"def f_col(x:typing.Collection): return x\ndef f_nin(x:numbers.Integral)->int: return x+1\ndef f_bti(x:TensorMask): return x\ndef f_fti(x:TensorImage): return x\ndef f_bll(x:bool): return x\ndef f_num(x:numbers.Number): return x\nt = TypeDispatch(f_nin,f_fti,f_num,f_bti,f_bll)\n\ntest_eq(t[int], f_nin)\ntest_eq(t[str], None)\ntest_eq(t[TensorImage], f_fti)\ntest_eq(t[float], f_num)\nt.add(f_col)\ntest_eq(t[str], f_col)\ntest_eq(t[int], f_nin)\ntest_eq(t(1), 2)\ntest_eq(t.returns(1), int)\nt",
"_____no_output_____"
],
[
"def m_nin(self, x:numbers.Integral): return x+1\ndef m_bll(self, x:bool): return x\ndef m_num(self, x:numbers.Number): return x\n\nt = TypeDispatch(m_nin,m_num,m_bll)\nclass A: f = t\na = A()\ntest_eq(a.f(1), 2)\ntest_eq(a.f(1.), 1.)",
"_____no_output_____"
]
],
[
[
"## Transform -",
"_____no_output_____"
]
],
[
[
"#export\nclass _TfmDict(dict):\n def __setitem__(self,k,v):\n if k=='_': k='encodes'\n if k not in ('encodes','decodes') or not isinstance(v,Callable): return super().__setitem__(k,v)\n if k not in self: super().__setitem__(k,TypeDispatch())\n res = self[k]\n res.add(v)",
"_____no_output_____"
],
[
"#export\nclass _TfmMeta(type):\n def __new__(cls, name, bases, dict):\n res = super().__new__(cls, name, bases, dict)\n res.__signature__ = inspect.signature(res.__init__)\n return res\n\n def __call__(cls, *args, **kwargs):\n f = args[0] if args else None\n n = getattr(f,'__name__',None)\n if not hasattr(cls,'encodes'): cls.encodes=TypeDispatch()\n if not hasattr(cls,'decodes'): cls.decodes=TypeDispatch()\n if isinstance(f,Callable) and n in ('decodes','encodes','_'):\n getattr(cls,'encodes' if n=='_' else n).add(f)\n return f\n return super().__call__(*args, **kwargs)\n\n @classmethod\n def __prepare__(cls, name, bases): return _TfmDict()",
"_____no_output_____"
],
[
"#export\nclass Transform(metaclass=_TfmMeta):\n \"Delegates (`__call__`,`decode`) to (`encodes`,`decodes`) if `filt` matches\"\n filt,init_enc,as_item_force,as_item,order = None,False,None,True,0\n def __init__(self, enc=None, dec=None, filt=None, as_item=False):\n self.filt,self.as_item = ifnone(filt, self.filt),as_item\n self.init_enc = enc or dec\n if not self.init_enc: return\n\n # Passing enc/dec, so need to remove (base) class level enc/dec\n del(self.__class__.encodes,self.__class__.decodes)\n self.encodes,self.decodes = (TypeDispatch(),TypeDispatch())\n if enc:\n self.encodes.add(enc)\n self.order = getattr(self.encodes,'order',self.order)\n if dec: self.decodes.add(dec)\n\n @property\n def use_as_item(self): return ifnone(self.as_item_force, self.as_item)\n def __call__(self, x, **kwargs): return self._call('encodes', x, **kwargs)\n def decode (self, x, **kwargs): return self._call('decodes', x, **kwargs)\n def __repr__(self): return f'{self.__class__.__name__}: {self.use_as_item} {self.encodes} {self.decodes}'\n\n def _call(self, fn, x, filt=None, **kwargs):\n if filt!=self.filt and self.filt is not None: return x\n f = getattr(self, fn)\n if self.use_as_item or not is_listy(x): return self._do_call(f, x, **kwargs)\n res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)\n return retain_type(res, x)\n\n def _do_call(self, f, x, **kwargs):\n return x if f is None else retain_type(f(x, **kwargs), x, f.returns_none(x))\n\nadd_docs(Transform, decode=\"Delegate to `decodes` to undo transform\")",
"_____no_output_____"
],
[
"show_doc(Transform)",
"_____no_output_____"
]
],
[
[
"Base class that delegates `__call__` and `decode` to `encodes` and `decodes`, doing nothing if param annotation doesn't match type. If called with listy `x` then it calls function with each item (unless `whole_typle`, in which case it's passed directly as a whole). The function (if matching 1st param type) will cast the result to the same as the input type, unless there's a return annotation (in which case it's cast to that), or the return annotation is `None` (in which case no casting is done).\n\nDetails: `Transform` is a base class where you override encodes and/or decodes. e.g. `__call__` uses `call` which looks up what to call using `func`. If `whole_tuple` is set, that just returns `encodes` (or `decodes` if not `is_enc`). Otherwise we find the first annotated param with `_p1_anno` and check if `x` is an instance of that (if not `is_listy(x)`). If it is, we return the function (encodes/decodes), otherwise None. `call` then passes on to `_do_call` which does nothing if function is `None`. If `x` is listy, then we return a *list* of {functions or `None`}, and a list of results from `_do_call` for each function is returned.",
"_____no_output_____"
]
],
[
[
"class A(Transform): pass\n@A\ndef encodes(self, x): return x+1\nf1 = A()\ntest_eq(f1(1), 2)\n\nclass B(A): pass\nf2 = B()\ntest_eq(f2(1), 2)\n\nclass A(Transform): pass\nf3 = A()\ntest_eq_type(f3(2), 2)\ntest_eq_type(f3.decode(2.0), 2.0)",
"_____no_output_____"
]
],
[
[
"`Transform` can be used as a decorator, to turn a function into a `Transform`.",
"_____no_output_____"
]
],
[
[
"@Transform\ndef f(x): return x//2\ntest_eq_type(f(2), 1)\ntest_eq_type(f.decode(2.0), 2.0)",
"_____no_output_____"
]
],
[
[
"You can derive from `Transform` and use either `_` or `encodes` for your encoding function.",
"_____no_output_____"
]
],
[
[
"class A(Transform):\n def _(self, x:TensorImage): return -x\nf = A()\nt = f(im_t)\ntest_eq(t, -im_t)\ntest_eq(f(1), 1)\ntest_eq(type(t), TensorImage)\nf",
"_____no_output_____"
]
],
[
[
"Without return annotation we get an `Int` back since that's what was passed.",
"_____no_output_____"
]
],
[
[
"class A(Transform): pass\n@A\ndef _(self, x:Int): return x//2 # `_` is an abbreviation for `encodes`\n@A\ndef encodes(self, x:float): return x+1\n\nf = A()\ntest_eq_type(f(Int(2)), Int(1))\ntest_eq_type(f(2), 2)\ntest_eq_type(f(2.), 3.)",
"_____no_output_____"
]
],
[
[
"Without return annotation we don't cast if we're not a subclass of the input type.",
"_____no_output_____"
]
],
[
[
"class A(Transform):\n def encodes(self, x:Int): return x/2\n def _(self, x:float): return x+1\n\nf = A()\ntest_eq_type(f(Int(2)), 1.)\ntest_eq_type(f(2), 2)\ntest_eq_type(f(Float(2.)), Float(3.))",
"_____no_output_____"
]
],
[
[
"With return annotation `None` we get back whatever Python creates usually.",
"_____no_output_____"
]
],
[
[
"def func(x)->None: return x/2\nf = Transform(func)\ntest_eq_type(f(2), 1.)\ntest_eq_type(f(2.), 1.)",
"_____no_output_____"
]
],
[
[
"Since `decodes` has no return annotation, but `encodes` created an `Int` and we pass that result here to `decode`, we end up with an `Int`.",
"_____no_output_____"
]
],
[
[
"def func(x): return Int(x+1)\ndef dec (x): return x-1\nf = Transform(func,dec)\nt = f(1)\ntest_eq_type(t, Int(2))\ntest_eq_type(f.decode(t), Int(1))",
"_____no_output_____"
]
],
[
[
"If the transform has `filt` then it's only applied if `filt` param matches.",
"_____no_output_____"
]
],
[
[
"f.filt = 1\ntest_eq(f(1, filt=1),2)\ntest_eq_type(f(1, filt=0), 1)",
"_____no_output_____"
],
[
"class A(Transform): \n def encodes(self, xy): x,y=xy; return (x+y,y)\n def decodes(self, xy): x,y=xy; return (x-y,y)\n\nf = A(as_item=True)\nt = f((1,2))\ntest_eq(t, (3,2))\ntest_eq(f.decode(t), (1,2))\nf.filt = 1\ntest_eq(f((1,2), filt=1), (3,2))\ntest_eq(f((1,2), filt=0), (1,2))",
"_____no_output_____"
],
[
"class AL(Transform): pass\n@AL\ndef encodes(self, x): return L(x_+1 for x_ in x)\n@AL\ndef decodes(self, x): return L(x_-1 for x_ in x)\n\nf = AL(as_item=True)\nt = f([1,2])\ntest_eq(t, [2,3])\ntest_eq(f.decode(t), [1,2])",
"_____no_output_____"
],
[
"def neg_int(x:numbers.Integral): return -x\n\nf = Transform(neg_int, as_item=False)\ntest_eq(f([1]), (-1,))\ntest_eq(f([1.]), (1.,))\ntest_eq(f([1.,2,3.]), (1.,-2,3.))\ntest_eq(f.decode([1,2]), (1,2))",
"_____no_output_____"
],
[
"#export\nclass InplaceTransform(Transform):\n \"A `Transform` that modifies in-place and just returns whatever it's passed\"\n def _call(self, fn, x, filt=None, **kwargs):\n super()._call(fn,x,filt,**kwargs)\n return x",
"_____no_output_____"
]
],
[
[
"## TupleTransform",
"_____no_output_____"
]
],
[
[
"#export\nclass TupleTransform(Transform):\n \"`Transform` that always treats `as_item` as `False`\"\n as_item_force=False",
"_____no_output_____"
],
[
"#export\nclass ItemTransform (Transform):\n \"`Transform` that always treats `as_item` as `True`\"\n as_item_force=True",
"_____no_output_____"
],
[
"def float_to_int(x:(float,int)): return Int(x)\n\nf = TupleTransform(float_to_int)\ntest_eq_type(f([1.]), (Int(1),))\ntest_eq_type(f([1]), (Int(1),))\ntest_eq_type(f(['1']), ('1',))\ntest_eq_type(f([1,'1']), (Int(1),'1'))\ntest_eq(f.decode([1]), [1])\n\ntest_eq_type(f(TupleBase((1.,))), TupleBase((Int(1),)))",
"_____no_output_____"
],
[
"class B(TupleTransform): pass\nclass C(TupleTransform): pass\nf = B()\ntest_eq(f([1]), [1])",
"_____no_output_____"
],
[
"@B\ndef _(self, x:int): return x+1\n@B\ndef _(self, x:str): return x+'1'\n@B\ndef _(self, x)->None: return str(x)+'!'\n\nb,c = B(),C()\ntest_eq(b([1]), [2])\ntest_eq(b(['1']), ('11',))\ntest_eq(b([1.0]), ('1.0!',))\ntest_eq(c([1]), [1])\ntest_eq(b([1,2]), (2,3))\ntest_eq(b.decode([2]), [2])\nassert pickle.loads(pickle.dumps(b))",
"_____no_output_____"
],
[
"@B\ndef decodes(self, x:int): return x-1\ntest_eq(b.decode([2]), [1])\ntest_eq(b.decode(('2',)), ('2',))",
"_____no_output_____"
]
],
[
[
"Non-type-constrained functions are applied to all elements of a tuple.",
"_____no_output_____"
]
],
[
[
"class A(TupleTransform): pass\n@A\ndef _(self, x): return x+1\n@A\ndef decodes(self, x): return x-1\n\nf = A()\nt = f((1,2.0))\ntest_eq_type(t, (2,3.0))\ntest_eq_type(f.decode(t), (1,2.0))",
"_____no_output_____"
]
],
[
[
"Type-constrained functions are applied to only matching elements of a tuple, and return annotations are only applied where matching.",
"_____no_output_____"
]
],
[
[
"class B(TupleTransform):\n def encodes(self, x:int): return Int(x+1)\n def encodes(self, x:str): return x+'1'\n def decodes(self, x:Int): return x//2\n\nf = B()\nstart = (1.,2,'3')\nt = f(start)\ntest_eq_type(t, (1.,Int(3),'31'))\ntest_eq(f.decode(t), (1.,Int(1),'31'))",
"_____no_output_____"
]
],
[
[
"The same behavior also works with `typing` module type classes.",
"_____no_output_____"
]
],
[
[
"class A(Transform): pass\n@A\ndef _(self, x:numbers.Integral): return x+1\n@A\ndef _(self, x:float): return x*3\n@A\ndef decodes(self, x:int): return x-1\n\nf = A()\nstart = 1.0\nt = f(start)\ntest_eq(t, 3.)\ntest_eq(f.decode(t), 3)\n\nf = A(as_item=False)\nstart = (1.,2,3.)\nt = f(start)\ntest_eq(t, (3.,3,9.))\ntest_eq(f.decode(t), (3.,2,9.))",
"_____no_output_____"
]
],
[
[
"Transform accepts lists",
"_____no_output_____"
]
],
[
[
"def a(x): return L(x_+1 for x_ in x)\ndef b(x): return L(x_-1 for x_ in x)\nf = TupleTransform(a,b)\n\nt = f((L(1,2),))\ntest_eq(t, (L(2,3),))\ntest_eq(f.decode(t), (L(1,2),))",
"_____no_output_____"
]
],
[
[
"## Export -",
"_____no_output_____"
]
],
[
[
"#hide\nfrom local.notebook.export import notebook2script\nnotebook2script(all_fs=True)",
"Converted 00_test.ipynb.\nConverted 01_core.ipynb.\nConverted 01a_torch_core.ipynb.\nConverted 01b_script.ipynb.\nConverted 01c_dataloader.ipynb.\nConverted 02_data_transforms.ipynb.\nConverted 03_data_pipeline.ipynb.\nConverted 05_data_core.ipynb.\nConverted 06_data_source.ipynb.\nConverted 07_vision_core.ipynb.\nConverted 08_pets_tutorial.ipynb.\nConverted 09_vision_augment.ipynb.\nConverted 11_layers.ipynb.\nConverted 11a_vision_models_xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 14_callback_schedule.ipynb.\nConverted 15_callback_hook.ipynb.\nConverted 16_callback_progress.ipynb.\nConverted 17_callback_tracker.ipynb.\nConverted 18_callback_fp16.ipynb.\nConverted 19_callback_mixup.ipynb.\nConverted 20_metrics.ipynb.\nConverted 21_tutorial_imagenette.ipynb.\nConverted 30_text_core.ipynb.\nConverted 31_text_data.ipynb.\nConverted 32_text_models_awdlstm.ipynb.\nConverted 33_test_models_core.ipynb.\nConverted 34_callback_rnn.ipynb.\nConverted 35_tutorial_wikitext.ipynb.\nConverted 36_text_models_qrnn.ipynb.\nConverted 40_tabular_core.ipynb.\nConverted 41_tabular_model.ipynb.\nConverted 50_data_block.ipynb.\nConverted 90_notebook_core.ipynb.\nConverted 91_notebook_export.ipynb.\nConverted 92_notebook_showdoc.ipynb.\nConverted 93_notebook_export2html.ipynb.\nConverted 94_index.ipynb.\nConverted 95_utils_test.ipynb.\nConverted 96_data_external.ipynb.\nConverted notebook2jekyll.ipynb.\nConverted tmp.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0eb3444ff393d62b65a13216bb59329d3a8b8f9 | 13,549 | ipynb | Jupyter Notebook | src/Textgen_Notebook.ipynb | spinfo/LSS_DLwithText | 5eeeeda12c3e938894b9d43793a7f7f938244a36 | [
"MIT"
] | 4 | 2018-10-11T13:48:54.000Z | 2019-07-01T15:52:36.000Z | src/Textgen_Notebook.ipynb | spinfo/LSS_DLwithText | 5eeeeda12c3e938894b9d43793a7f7f938244a36 | [
"MIT"
] | null | null | null | src/Textgen_Notebook.ipynb | spinfo/LSS_DLwithText | 5eeeeda12c3e938894b9d43793a7f7f938244a36 | [
"MIT"
] | 4 | 2018-09-20T14:12:24.000Z | 2018-09-28T09:46:43.000Z | 30.378924 | 461 | 0.541147 | [
[
[
"# Text Generation with Neural Networks\n\nImport necessary packages for preprocessing, model building, etc. We follow the steps described in the theoretical part of this summer school as follows:\n\n0. Define Reseach Goal (already done)\n2. Retrieve Data\n3. Prepare Data\n4. Explore Data\n5. Model Data\n6. Present and automate Model",
"_____no_output_____"
]
],
[
[
"from keras.callbacks import LambdaCallback\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nfrom keras.models import load_model\nfrom keras import backend as K\nimport numpy as np\nimport random\nimport sys\nimport io",
"_____no_output_____"
]
],
[
[
"# 1. Retrieve Data\n\nLoad your data! You can pick up data from everywhere, such as plain text, HTML, source code, etc.\nYou can either automatically download with Keras get_file function or download it manually and import it in this notebook.\n\n## Example Data Set\n[trump.txt](https://raw.githubusercontent.com/harshilkamdar/trump-tweets/master/trump.txt)",
"_____no_output_____"
]
],
[
[
"#path = get_file('trump.txt', origin='https://raw.githubusercontent.com/harshilkamdar/trump-tweets/master/trump.txt')\ntext = io.open('resources/shakespeare.txt', encoding='utf-8').read().lower()\n\nprint('corpus length:', len(text))",
"_____no_output_____"
]
],
[
[
"# 2. Prepare Data\n\nAs described in the theoretical part of this workshop we need to convert our text into a word embedding that can be processed by a (later) defined Neural Network. \n",
"_____no_output_____"
],
[
"## 2.1. Create Classes \nThe goal after this step is to have a variable which contains the distinct characters of the text. Characters can be letters, digits, punctions, new lines, spaces, etc.\n\n### Example:\nLet's assume we have the following text as input: \"hallo. \"\n\nAfter the following step, we want to have all distinct characters, i.e.:\n\n``[ \"h\", \"a\", \"l\", \"o\", \".\", \" \" ] ``\n",
"_____no_output_____"
]
],
[
[
"chars = sorted(list(set(text)))\nprint('total chars:', len(chars))",
"_____no_output_____"
]
],
[
[
"## 2.2. Create Training Set\n\nIn the following section we need to create our test set based on our text. The idea is to map a sequence of characters to a class. In this case, a class is one of our distinct characters defined in the previous task. This means that a sequence of characters predicts the next character. This is important for the later model to know which characters come after specific sequences. The sequence length can be chosen. So try out different squence length.\n\n### Example:\nOur text is still: \"hallo. \"\nSequence length: 2 (i.e. 2 characters predict the next character)\n\nThe result (training set) should be defined as follows:\n\n``\nSeuences --> Class\n \"ha\" --> \"l\"\n \"al\" --> \"l\"\n \"ll\" --> \"o\"\n \"lo\" --> \".\"\n \"o.\" --> \" \"\n``\n\nYou can read the previous example like this: Squence \"ha\" predicts the next character \" l \", sequence \"al\" predicts next character \" l \" and so on.",
"_____no_output_____"
]
],
[
[
"seqlen = 40 # Sequence length parameter\nstep = 5 # Determines the how many characters the window should be shifted in the text \nsequences = [] # List of sequences\nchar_class = [] # Corresponding class of each sequence\n\nfor i in range(0, len(text) - seqlen, step):\n sequences.append(text[i: i + seqlen])\n char_class.append(text[i + seqlen])\nprint('#no sequences:', len(sequences))",
"_____no_output_____"
]
],
[
[
"## 2.3. Check your Data\n\nNow that we processed our data, it's time to understand what we have built so far.",
"_____no_output_____"
]
],
[
[
"for idx in range(len(sequences[:10])):\n print(sequences[idx], \":\" , char_class[idx])",
"_____no_output_____"
],
[
"# Print from 1st to 10th character \nchars[:10]",
"_____no_output_____"
],
[
"# Print from 150th to 160th character :-)\nchars[150:160]",
"_____no_output_____"
]
],
[
[
"## 2.4. Vectorization of Training Sequences\n\nThe following section describes the desired form of our final training set. \n\ntext: \"hallo. \".\nAs defined above we have a couple of sequences mapping to the next appearing character in the text (e.g. \"ha\" mapping to \"l\"). But first of all, we transform each sequence to the following one-hot-encoded matrix.\n\n**Example:** \nsequence \"ha\" maps to the following matrix\n\n| | h | a | l | o | . | ' ' |\n|-----|-----|-----|-----|-----|-----|-----|\n| h | 1 | 0 | 0 | 0 | 0 | 0 |\n| a | 0 | 1 | 0 | 0 | 0 | 0 |\n\nnext sequence \"al\" maps to the following matrix\n\n| | h | a | l | o | . | ' ' |\n|-----|-----|-----|-----|-----|-----|-----|\n| a | 0 | 1 | 0 | 0 | 0 | 0 |\n| l | 0 | 0 | 1 | 0 | 0 | 0 |\n\n... And so on\n\n## 2.5. Vectorization of Target Classes\n\nWe build our target classes similar to the training set. We need a one hot-encoded vector for each target (which is a character).\n\n**Example:** for target char \"l\" the vector looks like this\n\n| | h | a | l | o | . | ' ' |\n|-----|-----|-----|-----|-----|-----|-----|\n| l | 0 | 0 | 1 | 0 | 0 | 0 |",
"_____no_output_____"
]
],
[
[
"# Indexed characters as dictionary\nchar_indices = dict((c, i) for i, c in enumerate(chars))\n\n# Both matrices will initialized with zeros\ntraining_set = np.zeros((len(sequences), seqlen, len(chars)), dtype=np.bool)\ntarget_char = np.zeros((len(sequences), len(chars)), dtype=np.bool)\nfor i, sequence in enumerate(sequences):\n for t, char in enumerate(sequence):\n training_set[i, t, char_indices[char]] = 1\n target_char[i, char_indices[char_class[i]]] = 1",
"_____no_output_____"
]
],
[
[
"# 3. Explore Data",
"_____no_output_____"
]
],
[
[
"# Let's check the shape of the training_set\n\ntraining_set.shape",
"_____no_output_____"
]
],
[
[
"Output: (x, y, z)\n\n x = number of all sequences to test\n y = window size to predict the next character\n z = number of all appearing characters in text (for one-hot-enconding) ",
"_____no_output_____"
]
],
[
[
"# Let's check the shape of the target_char (act as our target classes)\n\ntarget_char.shape",
"_____no_output_____"
]
],
[
[
"Output: (x, y)\n\n x = number of all sequences to test\n y = the mapping of each sequence to the next character\n",
"_____no_output_____"
],
[
"# 4. Model data\n\nLet's get down to business! Create your model.\n\nTry different model configuration (see [keras doc](https://keras.io/models/about-keras-models/#about-keras-models)) ",
"_____no_output_____"
]
],
[
[
"model = Sequential()\n\n# build the model: a LSTM\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(seqlen, len(chars))))\nmodel.add(Dense(len(chars)))\nmodel.add(Activation('softmax'))\noptimizer = RMSprop(lr=0.01)\n\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\nmodel.summary()",
"_____no_output_____"
],
[
"def getNextCharIdx(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)",
"_____no_output_____"
],
[
"# Creation of reverse char index, to get the char for the predicted class\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\ndef on_epoch_end(epoch, logs):\n # Function invoked at end of each epoch. Prints generated text.\n print()\n print('----- Generating text after Epoch: %d' % epoch)\n start_index = random.randint(0, len(text) - seqlen - 1)\n for diversity in [1, 0.1, 0.5]:\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + seqlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(1000):\n x_pred = np.zeros((1, seqlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = getNextCharIdx(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()",
"_____no_output_____"
],
[
"print_callback = LambdaCallback(on_epoch_end=on_epoch_end)",
"_____no_output_____"
]
],
[
[
"# 5. Evaluate Model\n\nWe are not at the sweet part of the model. Let's fit our model and see what it prints!",
"_____no_output_____"
]
],
[
[
"model.fit(training_set, target_char,\n batch_size=128,\n epochs=150,\n callbacks=[print_callback])",
"_____no_output_____"
]
],
[
[
"# Present and Automate\n\nHaving a model trained for hours is a valuable asset! We need now to store the model and use it to solve the problem we wanted to solve with Machine Learning. Keras has a simple function to save a model to the local file system and also a function to load the model again and have it ready for our task!",
"_____no_output_____"
]
],
[
[
"model.save('shakespeareModel.h5')\nmodel = load_model('shakespeareModel.h5')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0eb4ee17913e1a22abfe22145b24bc7774a7036 | 56,118 | ipynb | Jupyter Notebook | String cosine similarity score-1750newsheadlines.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | String cosine similarity score-1750newsheadlines.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | String cosine similarity score-1750newsheadlines.ipynb | zarmeen92/ReadiTopics-Topic-Labeling | 0bc8923bbff076236ca7f6410ab6e7d59419f7d5 | [
"MIT"
] | null | null | null | 28.808008 | 119 | 0.451887 | [
[
[
"##take Max cosine/jaccard similarity from 5 annotators",
"_____no_output_____"
],
[
"from sklearn.metrics import cohen_kappa_score\nimport pandas as pd\nimport sklearn\nfrom rouge_score import rouge_scorer\nfrom similarity.cosine import Cosine\nfrom similarity.jaccard import Jaccard\nimport numpy as np",
"_____no_output_____"
],
[
"def compute_similarity_cosine(text1,text2):\n cosine = Cosine(2)\n p0 = cosine.get_profile(text1)\n p1 = cosine.get_profile(text2)\n score = cosine.similarity_profiles(p0, p1)\n return score\ndef compute_similarity_jaccard(text1,text2):\n jaccard = Jaccard(2)\n score = jaccard.similarity(text1, text2)\n return score\n",
"_____no_output_____"
],
[
"new_labels = pd.read_excel('proposed_gs_otherapproaches_1750news.xlsx',sheet_name=1)",
"_____no_output_____"
],
[
"new_labels.columns",
"_____no_output_____"
],
[
"#annotators = ['Zainab GS','Solat GS','Noor GS','Dr. Sajjad GS','Sumaira GS']\nannotators = ['Majority Voting']\ncomparison_methods = new_labels.columns[7:len(new_labels.columns)]",
"_____no_output_____"
],
[
"comparison_methods",
"_____no_output_____"
],
[
"#cosine similarity\ncosine_scores_method=[] #average cosine score for each method\nfor x in comparison_methods:\n cosine_score = []\n print(\"Method: %s\"%x)\n for index,row in new_labels.iterrows():\n cs_for_each_annotator=[]\n for a in annotators:\n print(a)\n cs_for_each_annotator.append(compute_similarity_cosine(row[x],row[a]))\n print(cs_for_each_annotator)\n cosine_score.append(np.max(cs_for_each_annotator))\n cosine_scores_method.append(np.average(cosine_score))",
"Method: Zero-Order\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.806225774829855]\nMajority Voting\n[0.492365963917331]\nMajority Voting\n[0.5477225575051661]\nMajority Voting\n[0.5698028822981898]\nMajority Voting\n[0.7569781192451159]\nMajority Voting\n[0.0]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.08362420100070908]\nMajority Voting\n[0.6666666666666666]\nMajority Voting\n[0.5547001962252291]\nMethod: M-Order\nMajority Voting\n[0.7385489458759964]\nMajority Voting\n[0.806225774829855]\nMajority Voting\n[0.492365963917331]\nMajority Voting\n[0.5477225575051661]\nMajority Voting\n[0.5698028822981898]\nMajority Voting\n[0.7569781192451159]\nMajority Voting\n[0.0]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.08362420100070908]\nMajority Voting\n[0.3849001794597505]\nMajority Voting\n[0.5547001962252291]\nMethod: T-Order\nMajority Voting\n[0.7385489458759964]\nMajority Voting\n[0.806225774829855]\nMajority Voting\n[0.492365963917331]\nMajority Voting\n[0.5477225575051661]\nMajority Voting\n[0.4652421051992354]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.0]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.08362420100070908]\nMajority Voting\n[0.3849001794597505]\nMajority Voting\n[0.5547001962252291]\nMethod: ProposedMethod\nMajority Voting\n[0.3888888888888889]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.9198662110078]\nMajority Voting\n[0.30618621784789724]\nMajority Voting\n[0.41502867831964485]\nMajority Voting\n[0.4767312946227961]\nMajority Voting\n[1.0]\nMajority Voting\n[0.3872983346207417]\nMajority Voting\n[0.5291502622129182]\nMajority Voting\n[0.75]\nMajority Voting\n[0.7817359599705717]\nMajority Voting\n[0.23570226039551587]\nMajority Voting\n[1.0]\nMethod: Proposed Method with Position Info(Stanza)\nMajority Voting\n[0.7092081432669752]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.9198662110078]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.5698028822981898]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.5244044240850758]\nMajority Voting\n[0.23570226039551587]\nMajority Voting\n[1.0]\nMethod: Proposed Method with Position Info(Used in Tallip version 1)\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.9198662110078]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.4103049699311091]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.75]\nMajority Voting\n[0.46442036401282394]\nMajority Voting\n[0.2626128657194451]\nMajority Voting\n[1.0]\nMethod: Proposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.7687061147858074]\nMajority Voting\n[0.9198662110078]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.4652421051992354]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.75]\nMajority Voting\n[0.46442036401282394]\nMajority Voting\n[0.3922322702763681]\nMajority Voting\n[1.0]\nMethod: 3keywords Combined with Any(any keyword found in ngram, crf pos tag spacy pattern for validation)\nMajority Voting\n[0.5222329678670935]\nMajority Voting\n[0.8498365855987976]\nMajority Voting\n[1.0]\nMajority Voting\n[0.43301270189221935]\nMajority Voting\n[0.5698028822981898]\nMajority Voting\n[0.8164965809277259]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[0.7500000000000001]\nMajority Voting\n[0.3273268353539886]\nMajority Voting\n[0.40201512610368484]\nMajority Voting\n[0.0]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[0.6324555320336759]\nMethod: Proposed Method with PositionInfo(using Spacy patterns for rules,Stanza POS tags)\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.7687061147858074]\nMajority Voting\n[0.9198662110078]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.4103049699311091]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.75]\nMajority Voting\n[0.5154060995580371]\nMajority Voting\n[0.5345224838248488]\nMajority Voting\n[1.0]\nMethod: Centroid\nMajority Voting\n[0.2148344622118299]\nMajority Voting\n[0.06880209161537815]\nMajority Voting\n[0.23596995186213474]\nMajority Voting\n[0.0]\nMajority Voting\n[0.21155435413917803]\nMajority Voting\n[0.3806934938134405]\nMajority Voting\n[0.12403473458920847]\nMajority Voting\n[0.10741723110591495]\nMajority Voting\n[0.32566947363946475]\nMajority Voting\n[0.041344911529736156]\nMajority Voting\n[0.07479575920067658]\nMajority Voting\n[0.062017367294604234]\nMajority Voting\n[0.31008683647302115]\nMethod: TextRankwindow3\nMajority Voting\n[0.7947194142390263]\nMajority Voting\n[0.609449400220044]\nMajority Voting\n[0.3553345272593508]\nMajority Voting\n[0.5477225575051661]\nMajority Voting\n[0.48900964692182575]\nMajority Voting\n[0.7569781192451159]\nMajority Voting\n[0.3086066999241838]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.7337993857053429]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.66332495807108]\nMajority Voting\n[0.31622776601683794]\nMajority Voting\n[0.48507125007266594]\nMethod: TextRankWindow24\nMajority Voting\n[0.7947194142390263]\nMajority Voting\n[0.806225774829855]\nMajority Voting\n[0.492365963917331]\nMajority Voting\n[0.5477225575051661]\nMajority Voting\n[0.5449492609130661]\nMajority Voting\n[0.7569781192451159]\nMajority Voting\n[0.29488391230979427]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.6236095644623235]\nMajority Voting\n[0.7745966692414833]\nMajority Voting\n[0.66332495807108]\nMajority Voting\n[0.26490647141300877]\nMajority Voting\n[0.48507125007266594]\nMethod: Topic Rank\nMajority Voting\n[0.7745966692414834]\nMajority Voting\n[0.0]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.5321811563901744]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7559289460184544]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[1.0]\nMajority Voting\n[0.6324555320336759]\nMajority Voting\n[1.0]\nMethod: PositionRank\nMajority Voting\n[0.7947194142390263]\nMajority Voting\n[0.7089175569585667]\nMajority Voting\n[0.492365963917331]\nMajority Voting\n[0.46291004988627577]\nMajority Voting\n[0.48900964692182575]\nMajority Voting\n[0.7569781192451159]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[0.5196152422706632]\nMajority Voting\n[0.7337993857053429]\nMajority Voting\n[0.8320502943378437]\nMajority Voting\n[0.6276459144608478]\nMajority Voting\n[0.30499714066520933]\nMajority Voting\n[0.3779644730092272]\nMethod: Multipartite Rank(Adding ADP)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.5863019699779287]\nMajority Voting\n[0.8660254037844386]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[1.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Multipartite Rank\nMajority Voting\n[0.7559289460184545]\nMajority Voting\n[0.806225774829855]\nMajority Voting\n[0.6030226891555273]\nMajority Voting\n[1.0000000000000002]\nMajority Voting\n[0.5698028822981898]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[0.7559289460184544]\nMajority Voting\n[0.7637626158259734]\nMajority Voting\n[0.9999999999999999]\nMajority Voting\n[1.0]\nMajority Voting\n[1.0]\nMajority Voting\n[0.6324555320336759]\nMajority Voting\n[1.0]\nMethod: Yake\nMajority Voting\n[0.7222222222222222]\nMajority Voting\n[0.7210366836744467]\nMajority Voting\n[0.8300573566392896]\nMajority Voting\n[0.447213595499958]\nMajority Voting\n[0.8528028654224417]\nMajority Voting\n[0.9309493362512627]\nMajority Voting\n[0.48507125007266594]\nMajority Voting\n[0.4815434123430769]\nMajority Voting\n[0.6069769786668838]\nMajority Voting\n[0.6882472016116852]\nMajority Voting\n[0.07312724241271307]\nMajority Voting\n[0.5345224838248488]\nMajority Voting\n[0.4588314677411235]\nMethod: JensenShannon\nMajority Voting\n[0.0]\nMajority Voting\n[0.09805806756909201]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.25712973861328997]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.2182178902359924]\nMajority Voting\n[0.0]\nMajority Voting\n[0.4714045207910316]\nMajority Voting\n[0.21938172723813917]\nMajority Voting\n[0.7559289460184544]\nMajority Voting\n[0.0]\n"
],
[
"arr = np.array(cosine_scores_method)\ntop_3_methods_index = arr.argsort()[-4:][::-1]\nfor x in top_3_methods_index:\n print(cosine_scores_method[x])\n print(comparison_methods[x])",
"0.8374713373984716\nMultipartite Rank\n0.8201975765333521\nProposed Method with PositionInfo(using Spacy patterns for rules,Stanza POS tags)\n0.8131513097305502\nProposed Method with Position Info(Used in Tallip version 1)\n0.8095561293160007\nProposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)\n"
],
[
"comparison_results = pd.DataFrame({'Method':comparison_methods,'Cosine_Similarity': cosine_scores_method})",
"_____no_output_____"
],
[
"comparison_results",
"_____no_output_____"
],
[
"#jaccard similarity\njaccard_scores_method=[] #average cosine score for each method\nfor x in comparison_methods:\n jaccard_score = []\n print(\"Method: %s\"%x)\n for index,row in new_labels.iterrows():\n js_for_each_annotator=[]\n for a in annotators:\n print(a)\n js_for_each_annotator.append(compute_similarity_jaccard(row[x],row[a]))\n print(cs_for_each_annotator)\n jaccard_score.append(np.max(js_for_each_annotator))\n jaccard_scores_method.append(np.average(jaccard_score))",
"Method: Zero-Order\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: M-Order\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: T-Order\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: ProposedMethod\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Proposed Method with Position Info(Stanza)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Proposed Method with Position Info(Used in Tallip version 1)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Proposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: 3keywords Combined with Any(any keyword found in ngram, crf pos tag spacy pattern for validation)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Proposed Method with PositionInfo(using Spacy patterns for rules,Stanza POS tags)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Centroid\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: TextRankwindow3\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: TextRankWindow24\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Topic Rank\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: PositionRank\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Multipartite Rank(Adding ADP)\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Multipartite Rank\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: Yake\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMethod: JensenShannon\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\nMajority Voting\n[0.0]\n"
],
[
"arr = np.array(jaccard_scores_method)\ntop_3_methods_index = arr.argsort()[-4:][::-1]\nfor x in top_3_methods_index:\n print(jaccard_scores_method[x])\n print(comparison_methods[x])",
"0.7354851938643147\nProposed Method with Position Info(Used in Tallip version 1)\n0.7264152514152514\nMultipartite Rank\n0.7185400358477282\nProposed Method with PositionInfo(using Spacy patterns for rules,Stanza POS tags)\n0.7167433304219905\nProposed Method with PositionInfo(using Spacy patterns for rules, CRF tags)\n"
],
[
"comparison_results['Jaccard Similarity'] = jaccard_scores_method",
"_____no_output_____"
],
[
"comparison_results",
"_____no_output_____"
],
[
"fname=\"automatic evaluation 1750newsheadlines_UsingMaxofAnnotators\"+\".csv\"\ncomparison_results.to_csv(fname,sep='\\t',index=False)",
"_____no_output_____"
]
],
[
[
"## Gold Standard Cosine Similarity",
"_____no_output_____"
]
],
[
[
"gold_standard = new_labels.columns[1:7]",
"_____no_output_____"
],
[
"gold_standard",
"_____no_output_____"
],
[
"cs_annotators = pd.DataFrame(columns=gold_standard)\n",
"_____no_output_____"
],
[
"\nfor x in gold_standard:\n cosine_scores_method_x=[] #average cosine score for each method\n for y in gold_standard:\n cosine_score = []\n for index,row in new_labels.iterrows():\n cosine_score.append(compute_similarity_cosine(row[x],row[y]))\n cosine_scores_method_x.append(np.average(cosine_score))\n cs_annotators[x] = cosine_scores_method_x",
"_____no_output_____"
],
[
"cs_annotators",
"_____no_output_____"
],
[
"cs_annotators.to_csv('cosine_similarity_annotators.csv',sep='\\t',index=False)",
"_____no_output_____"
],
[
"js_annotators = pd.DataFrame(columns=gold_standard)\n\nfor x in gold_standard:\n jaccard_scores_method_x=[] #average jaccard score for each method\n for y in gold_standard:\n jaccard_score = []\n for index,row in new_labels.iterrows():\n jaccard_score.append(compute_similarity_jaccard(row[x],row[y]))\n jaccard_scores_method_x.append(np.average(jaccard_score))\n js_annotators[x] = jaccard_scores_method_x",
"_____no_output_____"
],
[
"js_annotators",
"_____no_output_____"
],
[
"js_annotators.to_csv('jaccard_similarity_annotators.csv',sep='\\t',index=False)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eb5426ed107d6cdbdaa297149eac9c6920023c | 35,058 | ipynb | Jupyter Notebook | model_blocked_input.ipynb | levolz/ContextDependence | b5f9a4cc47bdd564d14ad3ea34d38dc9e5a05734 | [
"MIT"
] | 1 | 2021-04-27T21:22:15.000Z | 2021-04-27T21:22:15.000Z | model_blocked_input.ipynb | levolz/ContextDependence | b5f9a4cc47bdd564d14ad3ea34d38dc9e5a05734 | [
"MIT"
] | null | null | null | model_blocked_input.ipynb | levolz/ContextDependence | b5f9a4cc47bdd564d14ad3ea34d38dc9e5a05734 | [
"MIT"
] | 1 | 2021-06-14T20:42:11.000Z | 2021-06-14T20:42:11.000Z | 203.825581 | 29,708 | 0.89543 | [
[
[
"## Variant of the Blocked Input Model in which the stop process decelerates the go process by a rate that varies across trials",
"_____no_output_____"
]
],
[
[
"import numpy\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn\nimport pandas\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import FormatStrFormatter\n\n%matplotlib inline",
"_____no_output_____"
],
[
"params={'mugo':.2,\n 'mustop':.8, \n 'threshold':60,\n 'nondecisiongo':50,\n 'nondecisionstop':50,\n 'inhibitionParam':1, \n 'ssds':[1,50,100,150, 200,250, 300, 350, 400, 450, 500,3000],\n 'nreps':1000,\n 'maxtime':1000}\n\ndef interactiverace(params):\n stopaccumsave = []\n mustopsave = []\n stopsave = []\n meanrtgo = numpy.zeros(len(params['ssds']))\n presp = numpy.zeros(len(params['ssds']));\n \n for irep in range(params['nreps']):\n for j,ssd in enumerate(params['ssds']):\n stopsignaldelay = ssd\n goaccumulator = 0\n stopaccumulator = 0\n rtgo = 0\n itime = 0\n mustop = params['mustop']+numpy.random.normal(loc=0, scale=.7) \n if mustop < 0:\n mustop = 0\n mustopsave.append(mustop)\n while itime < params['maxtime'] and rtgo == 0: # single trial\n itime = itime + 1\n if itime < stopsignaldelay + params['nondecisionstop']:\n inhibition = 0\n else:\n inhibition = params['inhibitionParam']\n stopaccumulator = mustop + numpy.random.normal(loc=0, scale=.008)\n if stopaccumulator <= 0:\n stopaccumulator = 0;\n stopaccumsave.append(stopaccumulator)\n if itime >= params['nondecisiongo']:\n goaccumulator = goaccumulator + params['mugo'] - inhibition*stopaccumulator + numpy.random.normal(loc=0, scale=1)\n if goaccumulator <= 0:\n goaccumulator = 0;\n if goaccumulator > params['threshold']:\n if rtgo == 0:\n rtgo = itime;\n meanrtgo[j] += rtgo;\n if rtgo > 0:\n presp[j] += 1;\n\n for ssd in range(len(params['ssds'])):\n if presp[ssd] > 0:\n meanrtgo[ssd] = meanrtgo[ssd]/presp[ssd];\n presp[ssd] = presp[ssd]/params['nreps'];\n return(meanrtgo,presp,mustopsave,stopaccumsave)\n\nmeanrtgo,presp,mustopsave,stopaccumsave=interactiverace(params)\nprint(meanrtgo)\nprint(presp)\n#print(stopaccumsave)\n#print(mustopsave)",
"[407.08695652 394.6 383.40697674 363.03553299 321.43478261\n 302.06813187 307.7562777 318.64656212 329.71583514 334.87525988\n 339.04766734 342.256 ]\n[0.184 0.19 0.172 0.197 0.299 0.455 0.677 0.829 0.922 0.962 0.986 1. ]\n"
],
[
"plt.figure(figsize=(10,5))\nplt.subplot(1,2,1)\nplt.plot(params['ssds'][:11],meanrtgo[:11] - meanrtgo[11])\nplt.plot([params['ssds'][0],params['ssds'][10]],[0,0],'k:')\nplt.xlabel('Stop signal delay')\nplt.ylabel('Violation (Stop Failure RT - No-Stop RT)')\nplt.subplot(1,2,2)\nplt.plot(params['ssds'][:11],presp[:11])\nplt.xlabel('Stop signal delay')\nplt.ylabel('Probability of responding')\nplt.axis([params['ssds'][0],params['ssds'][10],0,1])\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0eb55c8478a18d7293dfd03b31848f922961f2a | 71,957 | ipynb | Jupyter Notebook | notebooks/T9 - 2 - K Nearest Neighbors Implementation.ipynb | ledvir26/Machine-Learning-with-Python | dc26ee5f0793f32c87073d3249772ae2aa0a08ec | [
"MIT"
] | null | null | null | notebooks/T9 - 2 - K Nearest Neighbors Implementation.ipynb | ledvir26/Machine-Learning-with-Python | dc26ee5f0793f32c87073d3249772ae2aa0a08ec | [
"MIT"
] | null | null | null | notebooks/T9 - 2 - K Nearest Neighbors Implementation.ipynb | ledvir26/Machine-Learning-with-Python | dc26ee5f0793f32c87073d3249772ae2aa0a08ec | [
"MIT"
] | null | null | null | 59.715353 | 6,224 | 0.465306 | [
[
[
"# Creando nuestro propio KNN",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport warnings\nfrom math import sqrt\nfrom collections import Counter",
"_____no_output_____"
],
[
"dataset = {\n 'k' : [[1,2],[2,3], [3,1]],\n 'r' : [[6,5],[7,7],[8,6]]\n}\nnew_point = [5,7]",
"_____no_output_____"
],
[
"[[plt.scatter(ii[0], ii[1], s=50, color=i) for ii in dataset[i]] for i in dataset]\nplt.scatter(new_point[0], new_point[1], s=100)",
"_____no_output_____"
],
[
"def k_nearest_neighbors(data, predict, k=3, verbose=False):\n \n if len(data) >= k:\n warnings.warn(\"K es un valor menor que el número total de elementos a votar!!\")\n \n distances = []\n for group in data:\n for feature in data[group]:\n # d = sqrt((feature[0]-predict[0])**2 + (feature[1]-predict[1])**2)\n # d = np.sqrt(np.sum((np.array(feature) - np.array(predict))**2))\n d = np.linalg.norm(np.array(feature) - np.array(predict))\n distances.append([d, group])\n if verbose:\n print(distances)\n \n votes = [i[1] for i in sorted(distances)[:k]] # sorted ordena por la primera columna\n if verbose:\n print(votes)\n \n vote_result = Counter(votes).most_common(1)\n if verbose:\n print(vote_result)\n \n return vote_result[0][0] #[('r',2), ('k', 1)]",
"_____no_output_____"
],
[
"new_point = [3,4]\nresult = k_nearest_neighbors(dataset, new_point)\nresult",
"_____no_output_____"
],
[
"[[plt.scatter(ii[0], ii[1], s=50, color=i) for ii in dataset[i]] for i in dataset]\nplt.scatter(new_point[0], new_point[1], s=100, color=result)",
"_____no_output_____"
]
],
[
[
"# Aplicando nueswtro KNN al Dataset del Cancer",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"../datasets/cancer/breast-cancer-wisconsin.data.txt\")",
"_____no_output_____"
],
[
"df.replace(\"?\", -999999, inplace=True)",
"_____no_output_____"
],
[
"df.columns = [\"name\", \"V1\", \"V2\", \"V3\", \"V4\", \"V5\", \"V6\", \"V7\", \"V8\", \"V9\", \"class\"]",
"_____no_output_____"
],
[
"df.drop([\"name\"], 1, inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"full_data = df.astype(float).values.tolist()",
"_____no_output_____"
],
[
"full_data",
"_____no_output_____"
],
[
"import random",
"_____no_output_____"
],
[
"random.shuffle(full_data)",
"_____no_output_____"
],
[
"test_size = 0.2",
"_____no_output_____"
],
[
"train_set = {2:[], 4:[]}\ntest_set = {2:[], 4:[]}",
"_____no_output_____"
],
[
"train_data = full_data[:-int(test_size**len(full_data))]\ntest_data = full_data[-int(test_size*len(full_data)):]",
"_____no_output_____"
],
[
"for i in train_data:\n train_set[i[-1]].append(i[:-1])\n\nfor i in test_data:\n test_set[i[-1]].append(i[:-1])",
"_____no_output_____"
],
[
"coorect = 0\ntotal = 0 \nfor group in test_set:\n for data in test_set[group]:\n vote = k_nearest_neighbors(train_set, data, k = 5)\n if group == vote:\n correct += 1\n total +=1\nprint(\"Eficacia del KNN = \", correct/total)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eb59347568c39e438f4cf0ed65666d0e4f289c | 521,850 | ipynb | Jupyter Notebook | Assignment2/Assignment2B.ipynb | avzero07/machine-learning-course | 5e132f740ba36c594066601d99c1ba14cc23c95b | [
"Apache-2.0"
] | null | null | null | Assignment2/Assignment2B.ipynb | avzero07/machine-learning-course | 5e132f740ba36c594066601d99c1ba14cc23c95b | [
"Apache-2.0"
] | null | null | null | Assignment2/Assignment2B.ipynb | avzero07/machine-learning-course | 5e132f740ba36c594066601d99c1ba14cc23c95b | [
"Apache-2.0"
] | 1 | 2021-02-05T06:39:30.000Z | 2021-02-05T06:39:30.000Z | 451.427336 | 173,544 | 0.93625 | [
[
[
"# ELEC 400M / EECE 571M Assignment 2: Neural networks\n(This assignment is a modified version of an assignment used in ECE 421 at the University of Toronto and kindly made available to us by the instructor.)\n\nIn this assignment, you will implement a neural network model for multi-class classification. The purpose is to demonstrate an understanding of the basic elements including training of neural network models. Hence, your implementation will be from scratch only using functions from the NumPy library.\n\nThe neural network you will be implementing has the following structure:\n* 3 layers: 1 input layer, 1 hidden layer with ReLU activation and 1 output layer with Softmax function \n* The loss function is the Cross Entropy Loss.\n* Training will be done using Gradient Descent with Momentum. ",
"_____no_output_____"
],
[
"## Data Set\nWe again consider the dataset of images of letters in different fonts contained in file notMNIST.npz (which btw is from http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html). This time we consider 10 letters (\"A\" to \"J\"), which are all the letters contained in this data set, and we want to classfiy the images according to the letter they display. The figure below shows 30 randomly selected image samples for the letters.\n\n\n\n\nYou will apply the function `loadData` given below to load the data set, which includes 18720 images and their labels, which we also refer to as targets. This script organizes the data set into training, validation and test sets. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"def loadData():\n with np.load('notMNIST.npz') as data:\n Data, Target = data['images'], data['labels']\n np.random.seed(521)\n randIndx = np.arange(len(Data))\n np.random.shuffle(randIndx)\n Data = Data[randIndx]/255.0\n Target = Target[randIndx]\n trainData, trainTarget = Data[:15000], Target[:15000]\n validData, validTarget = Data[15000:16000], Target[15000:16000]\n testData, testTarget = Data[16000:], Target[16000:]\n \n return trainData, validData, testData, trainTarget, validTarget, testTarget",
"_____no_output_____"
]
],
[
[
"## Data preprocessing [5 points]\n\nInput data: The classification should be based on the $d=28\\times 28=784$ intensity values in an image (as for Assignment 1).\n\nOutput data: Since you will be performing multi-class classification, the labels will be converted into a one-hot encoding format. \n\nPlease first briefly explain the meaning of one-hot encoding and why it is used (instead of keeping the numerical label values provided by the data set). State an example for a one-hot encoded label for the data set considered in this assignment.",
"_____no_output_____"
],
[
"**Solution:**\n\nOne-hot encoding is a way of encoding or representing labels or other categorical data. For instance, if there are 'n' classes that have unique labels, they may be numerically labeled using numbers from 0 to (n-1). Alternatively, they may also be one-hot encoded using a label that is n-bits wide and has the nth bit (coresponding to the nth numerical label) set to 1.\n\nFor most classification problems, machine learning algorithms may misinterpret numerical data and infer some sort of hierarchy or relationship amongst the data that is not necessarily relevant to the problem at hand. This can ultimately lead the algorithm to learn a false hypothesis. One-hot encoding is a way of pre-processing the labels to remove any such hierarchical relationship so that the machine learning algorithm does not infer anything that is otherwise not useful to the task it is trying to accomplish.\n\n__Example of One-Hot-Encoding__\n\nConsider a few numerical labels from the training data-set (numbered from 0-9), their corresponding one-hot encoded values are represented alongside.\n\n| Categorical Data (From Dataset) | Numerical Label | One-Hot Encoded Label |\n|:-------------------------------:|:---------------:|:---------------------:|\n| A | 0 | [1;0;0;0;0;0;0;0;0;0] |\n| B | 1 | [0;1;0;0;0;0;0;0;0;0] |\n| H | 8 | [0;0;0;0;0;0;0;0;1;0] |\n| E | 5 | [0;0;0;0;0;1;0;0;0;0] |",
"_____no_output_____"
],
[
"Now implement a function that one-hot encodes the labels (or targets) for the training, validation and test sets. ",
"_____no_output_____"
]
],
[
[
"def convertOneHot(trainTarget, validTarget, testTarget):\n trainTargetOneHot = np.zeros([trainTarget.shape[0],10])\n trainTargetOneHot[np.arange(trainTarget.size),trainTarget]=1\n \n validTargetOneHot = np.zeros([validTarget.shape[0],10])\n validTargetOneHot[np.arange(validTarget.size),validTarget] = 1\n \n testTargetOneHot = np.zeros([testTarget.shape[0],10])\n testTargetOneHot[np.arange(testTarget.size),testTarget]=1\n \n return trainTargetOneHot, validTargetOneHot, testTargetOneHot",
"_____no_output_____"
]
],
[
[
"## Structure of the network [2 points]\n\nSketch the structure of the network to classify the letters from the data set. Identify the dimensions of the network layers, include the activation functions, and do not forget the bias nodes. (You may sketch this by hand and upload a photo of your sketch.)",
"_____no_output_____"
],
[
"**Solution:**\n\n*Note: A scaled down version of the actual network has been sketched for the sake of space and clarity.*\n\n\n\nThe activation function in the hidden layer is the ReLU : $\\mathrm{ReLU}(x)=\\max(0,x).$\n\nThe activation function in the output layer is the Softmax : $ P_j = [\\sigma(\\mathbf{z})]_j = \\frac{\\mathrm{e}^{z_j}}{\\sum\\limits_{k=1}^{10}\\mathrm{e}^{z_{10}}}$ where $j=1,2,\\ldots, 10$, for $10$ classes.",
"_____no_output_____"
],
[
"## Helper functions [6 points]\nTo give the implementation of the network some structure, you will first implement five helper functions. \n\nUse Numpy arrays for your implementations, and organize data in vectors and matrices as appropriate for compact programming.\n\n1. `relu`: This function will accept one argument and return the ReLU activation: \n $$\\mathrm{ReLU}(x)=\\max(0,x).$$\n \n2. `softmax`: This function will accept one argument and return the softmax activations:\n $$ [\\sigma(\\mathbf{z})]_j = \\frac{\\mathrm{e}^{z_j}}{\\sum\\limits_{k=1}^K\\mathrm{e}^{z_k}},$$ $j=1,2,\\ldots, K$, for $K$ classes.\n\n3. `computeLayer`: This function will accept two arguments, the input vector $\\mathbf{x}$ for a layer and the weight matrix $\\mathbf{W}$, and return a vector $\\mathbf{s}=\\mathbf{W}^T\\mathbf{x}$, i.e., the input to the activation function of the layer (the notation for variables from the textbook is used). Don't forget to account for the bias term (which can be included in an augmented vector $\\mathbf{x}$ as in the textbook).\n\n4. `CE`: This function will accept two arguments, the one-hot encoded labels $\\mathbf{y}_n$ and the inputs $\\mathbf{s}_n$ to the softmax function, $n=1,2,\\ldots N$. It will return the cross entropy loss\n$$\\mathrm{E}_{\\mathrm{in}}=-\\frac{1}{N}\\sum\\limits_{n=1}^N\\sum\\limits_{k=1}^Ky_{n,k}\\log([\\sigma(\\mathbf{s}_n)]_k)$$\n\n5. `gradCE`: This function will accept two arguments, the labels and the inputs to the softmax function. It will return the gradient of the cross entropy loss with respect to the inputs (i.e., it returns the sensivity vector for the output layer as introduced in the textbook). \n\nFirst state the analytical expression for the gradient used in `gradCE` and then implement the five helper functions.",
"_____no_output_____"
],
[
"**Solution:**\n\nThe analytical expression for the gradient used in gradCE is given by,\n\n$$\\delta^{\\left(L\\right)}_{n}=\\frac{\\partial e_{n}}{\\partial S^{L}_{n}}=\\sigma\\left(S^{L}_{n}\\right)-y_{n}$$\n\nWhere $\\sigma$ refers to the softmax function and $n$ is associated with the $n^{th}$ input. That is, $n \\in [0,N]$ ",
"_____no_output_____"
]
],
[
[
"def relu(x):\n return np.maximum(0,x)",
"_____no_output_____"
],
[
"def softmax(x):\n maxes = np.amax(x,axis=0)\n maxes = maxes.reshape(1,maxes.shape[0])\n x = x - maxes\n op = np.exp(x)/sum(np.exp(x))\n return op",
"_____no_output_____"
],
[
"def computeLayer(x,W):\n return np.matmul(W.T,x)",
"_____no_output_____"
],
[
"def CE(target, prediction):\n return (-1/target.shape[1])*(np.sum(np.multiply(target,np.log(1E-15+softmax(prediction)))))",
"_____no_output_____"
],
[
"def gradCE(target, prediction):\n return softmax(prediction)-target",
"_____no_output_____"
]
],
[
[
"## Backpropagation [2 points]\n\nThe training of the network will be done via backpropagation. First derive the following gradients:\n1. $\\frac{\\partial E_{\\mathrm{in}}}{\\partial \\mathbf{W}^{\\mathrm{o}}}$, where $\\mathbf{W}^{\\mathrm{o}}$ is the weight matrix of the output layer.\n\n2. $\\frac{\\partial E_{\\mathrm{in}}}{\\partial \\mathbf{W}^{\\mathrm{h}}}$, where $\\mathbf{W}^{\\mathrm{h}}$ is the weight matrix of the hidden layer.\n\nWrite the results using the steps and notation used in the textbook.",
"_____no_output_____"
],
[
"**Solution:**\n\nWe know that,\n\n$$E_{in}=\\frac{1}{N}\\sum_{n=1}^{N}e_{n}$$\n\n$$\\frac{\\partial E_{in}}{\\partial W^{\\left(l\\right)}}=\\frac{1}{N}\\sum_{n=1}^{N}\\frac{\\partial e_{n}}{\\partial W^{\\left(l\\right)}}$$\n\nwhere,\n\n$$\\frac{\\partial e_{n}}{\\partial W^{\\left(l\\right)}}=\\frac{\\partial S_n^{\\left(l\\right)}}{\\partial W^{\\left(l\\right)}} . \\frac{\\partial e_n}{\\partial S_n^{\\left(l\\right)}}=\\frac{\\partial \\left( \\left(W^{\\left(l\\right)}\\right)^T.x_n^{l-1}\\right)}{\\partial W^{\\left(l\\right)}}.\\frac{\\partial e_n}{\\partial S_n^{\\left(l\\right)}}$$\n\nthus,\n\n$$\\frac{\\partial e_{n}}{\\partial W^{\\left(l\\right)}}=x_n^{l-1}.\\frac{\\partial e_n}{\\partial S_n^{\\left(l\\right)}}=x_n^{l-1}\\left(\\delta_n^{l}\\right)^T\\ \\ -----\\ Equation\\ 1$$\n\nWhere $\\frac{\\partial e_n}{\\partial S_n^{\\left(l\\right)}}=\\delta_n^{l}$ is the sensitivity vector at layer $l$. The gradients for each layer is given by,\n\n**1. $\\frac{\\partial E_{in}}{\\partial W^{O}}$ where $W^{O}$ is the weight matrix of the output layer.**\n\n$$\\frac{\\partial E_{in}}{\\partial W^{O}}=\\frac{1}{N}\\sum_{n=1}^{N}\\frac{\\partial e_{n}}{\\partial W^{O}}=\\frac{1}{N}\\sum_{n=1}^{N}\\left(x_n^{O-1}\\left(\\delta_n^{O}\\right)^T\\right)$$\n\nThe sensitivity at the output layer, $\\delta_n^{O}$ is given by,\n\n$$\\delta_n^{O}=\\frac{\\partial e_n}{\\partial S_n^{\\left(O\\right)}}$$\n\nwhere, $e_n=-\\sum_jy_{n,j}\\log{[\\sigma(\\mathbf{S}_n)]_j}$ and $\\sigma(\\mathbf{S}_n)=\\frac{e^{S_n}}{\\sum_{k=1}^Ne^{\\left(S_n\\right)_k}}$ where $\\sigma$ is the activation (softmax) at the output layer. \n\nApplying the analytical expression for gradCE from the previous section,\n\n$$\\delta_n^{O}=\\frac{\\partial e_n}{\\partial S_n^{\\left(O\\right)}}=\\frac{\\partial e_{n}}{\\partial S^{O}_{n}}=\\sigma\\left(S^{O}_{n}\\right)-y_{n}\\ \\ -----\\ Equation\\ 1.A$$\n\nApplying $Equation\\ 1.A$ in $Equation\\ 1$\n\n$$\\frac{\\partial E_{in}}{\\partial W^{O}}=\\frac{1}{N}\\sum_{n=1}^{N}\\left(x_n^{O-1}\\left(\\sigma\\left(S^{O}_{n}\\right)-y_{n}\\right)^T\\right)$$\n\n**2. $\\frac{\\partial E_{in}}{\\partial W^{h}}$ where $W^{h}$ is the weight matrix of the hidden layer.**\n\n$$\\frac{\\partial E_{in}}{\\partial W^{h}}=\\frac{1}{N}\\sum_{n=1}^{N}\\frac{\\partial e_{n}}{\\partial W^{h}}=\\frac{1}{N}\\sum_{n=1}^{N}\\left(x_n^{h-1}\\left(\\delta_n^{h}\\right)^T\\right)\\ \\ -----\\ Equation\\ 2.A$$\n\nThe sensitivity at the hidden layer, $\\delta_n^{h}$ is given by,\n\n$$\\delta_n^{h}=\\frac{\\partial e_n}{\\partial S_n^{\\left(h\\right)}}=\\frac{\\partial e_n}{\\partial x_n^{h}}.\\frac{\\partial x_n^{h}}{\\partial S_n^{h}}=\\frac{\\partial e_n}{\\partial x_n^{h}}.\\theta'\\left(S_n^{h}\\right)\\ \\ -----\\ Equation\\ 2.B$$\n\nWhere $x_n^{h}=\\theta\\left(S_n^{h}\\right)$ and $\\theta$ is the activation function (ReLU) at the hidden layer. \n\nFinally, \n\n$$\\frac{\\partial e_n}{\\partial x_n^{h}}=\\left[W^O\\delta^O\\right]_1^{d^{\\left(h\\right)}}$$\n\nApplying this to $Equation\\ 2.B$,\n\n$$\\delta_n^{h}=\\theta'\\left(S_n^{h}\\right)\\otimes\\left[W^O\\delta_n^O\\right]_1^{d^{\\left(h\\right)}}\\ \\ -----\\ Equation\\ 2.C$$\n\nWhere $\\otimes$ denotes element-wise multiplication.\n\nApplying $Equation\\ 2.C$ in $Equation\\ 2.A$, we finally get\n\n$$\\frac{\\partial E_{in}}{\\partial W^{h}}=\\frac{1}{N}\\sum_{n=1}^{N}x_n^{h-1}\\left[\\theta'\\left(S_n^{h}\\right)\\otimes\\left[W^O\\delta_n^O\\right]_1^{d^{\\left(h\\right)}}\\right]^T$$",
"_____no_output_____"
],
[
"## Network training [8 points]\n\nImplement a function to train the network. The function uses the helper functions from above. The optimization technique for backpropagation will be Gradient Descent with Momentum:\n$$\\mathbf{V}(t)=\\alpha \\mathbf{V}(t-1)-\\eta\\frac{\\partial E_{\\mathrm{in}}}{\\partial \\mathbf{W}(t)}$$\nand \n$$\\mathbf{W}(t+1)=\\mathbf{W}(t)+\\mathbf{V}(t),$$\nwhere $\\eta$ is the learning rate and $\\alpha$ is the momentum hyperparameter.\n\nThe training function accepts the following inputs: training data (features), training labels, weight matrix of the hidden layer, weight matrix of the output layer, number of iterations, parameters $\\eta$ and $\\alpha$, validation data, validation labels, test data, test labels. The validation and test inputs are initialized to \"None\" and need not be passed on. You will also need to initialize the velocity matrices $\\mathbf{V}$ for both hidden layer and output layer weights to small values, e.g. $10^{-5}%$.\n\nThe function outputs the updated weight matrices, the losses and classification accuracies for the training data, and if validation and test inputs were provided, then it also outputs the classification accuracies for the validation and test data.",
"_____no_output_____"
]
],
[
[
"# Utility Functions\n \n# A. Easy ForwardProp\ndef forwardProp(inputData,targetLabel,weightHidd,weightOp):\n # 1. Hidden Layer\n # Add Bias and Multiply with Weights to get S(1)\n sToHidd = computeLayer((np.append(np.ones((inputData.shape[0],1)),inputData,axis=1)).T,weightHidd)\n # Calculate Activation to get X(1)\n xToOp = relu(sToHidd)\n \n # 2. Output Layer\n # Add Bias and Multiply with Weights to get S(L)\n sToOp = computeLayer((np.append(np.ones((1,xToOp.shape[1])),xToOp,axis=0)),weightOp)\n # Calculate Activation to get h(x)\n fpassResult = softmax(sToOp)\n # Calculate Loss\n fpassLoss = CE(targetLabel.T,sToOp)\n \n return fpassResult, fpassLoss\n \n# B. Easy Classification Accuracies\ndef classAccuracy(fpassResult,targetLabel):\n # Fpass Classification\n fpassClass = np.argmax(fpassResult,axis=0)\n # True Classification\n trueClass = np.argmax(targetLabel,axis=0)\n \n return np.sum(fpassClass==trueClass)/targetLabel.shape[1]\n\ndef trainNN(trainingData, trainingTarget, weightHidd, weightOp, numIter, eta, alpha, validationData, validationTarget, testData, testTarget):\n # eta --> Learning Rate\n # alpha --> Momentum\n \n # Grab NN Dimensions\n numHiddenUnits = weightHidd.shape[1]\n numOpUnits = weightOp.shape[1]\n numIpVectors = trainingData.shape[0]\n numInputs = trainingData.shape[1]\n \n # Initialize Matrices\n velocityHidd = 1E-5 * (np.ones([weightHidd.shape[0],weightHidd.shape[1]]))\n velocityOp = 1E-5 * (np.ones([weightOp.shape[0],weightOp.shape[1]]))\n sToHidd = np.zeros([trainingData.shape[0],numHiddenUnits])\n sToOp = np.zeros([trainingData.shape[0],numOpUnits])\n \n lossesTrain = np.zeros([numIter,1])\n lossesValid = np.zeros([numIter,1])\n lossesTest = np.zeros([numIter,1])\n \n accuracyTrain = np.zeros([numIter,1])\n accuracyValid = np.zeros([numIter,1])\n accuracyTest = np.zeros([numIter,1])\n\n i = 1\n while (i != numIter+1):\n \n # 1. Hidden Layer\n # Add Bias and Multiply with Weights to get S(1)\n sToHidd = computeLayer((np.append(np.ones((trainingData.shape[0],1)),trainingData,axis=1)).T,weightHidd)\n # Calculate Activation to get X(1)\n xToOp = relu(sToHidd)\n \n # 2. Output Layer\n # Add Bias and Multiply with Weights to get S(L)\n sToOp = computeLayer((np.append(np.ones((1,xToOp.shape[1])),xToOp,axis=0)),weightOp)\n # Calculate Activation to get h(x)\n hx = softmax(sToOp)\n # Calculate Loss\n lossesTrain[i-1,0] = CE(trainingTarget.T,sToOp)\n \n # Back Propagation\n \n # Part 1 : At OP\n # 1. Grad w.r.t weightOp\n dEdWL = (1/1)*(np.matmul((np.append(np.ones((1,xToOp.shape[1])),xToOp,axis=0)),gradCE(trainingTarget.T,sToOp).T))\n # 2. Velocity OP\n velocityOp = (alpha*velocityOp)-(eta*dEdWL)\n # 3. weightOp Update\n weightOp = weightOp + velocityOp\n \n # Part 2 : At Hidden\n # 1. Grad w.r.t weightHidd\n dedxl = (np.matmul(weightOp[1:,:],gradCE(trainingTarget.T,sToOp)))\n derRelu = (sToHidd>0).astype(int) # Derivative of ReLU\n temp = np.multiply((derRelu),(dedxl)) # [n x numberHiddenNeuron]\n dEdWl = (1/1)*(np.matmul((np.append(np.ones((trainingData.shape[0],1)),trainingData,axis=1)).T,temp.T))\n # 2. Velocity Hidden\n velocityHidd = (alpha*velocityHidd)-(eta*dEdWl)\n # 3. weightHidd Update\n weightHidd = weightHidd + velocityHidd\n \n # Report Accuracies and Losses\n \n # 1. Training Accuracy\n accuracyTrain[i-1,0] = classAccuracy(hx,trainingTarget.T)\n \n # 2. Validation Accuracy\n fpassResValid, lossesValid[i-1,0] = forwardProp(validationData,validationTarget,weightHidd,weightOp)\n accuracyValid[i-1,0] = classAccuracy(fpassResValid,validationTarget.T)\n \n # 3. Testing Accuracy\n fpassResTest, lossesTest[i-1,0] = forwardProp(testData,testTarget,weightHidd,weightOp)\n accuracyTest[i-1,0] = classAccuracy(fpassResTest,testTarget.T)\n \n # Increment Index\n i = i + 1\n return weightHidd, weightOp, lossesTrain, lossesValid, lossesTest, accuracyTrain, accuracyValid, accuracyTest\n",
"_____no_output_____"
]
],
[
[
"## Network test [4 points]\n\nWrite a script that constructs the neural network.\n\nInitialize your weight matrices by drawing the elements i.i.d. at random from a zero-mean Gaussian distribution with variance equal to $$\\sigma_w^2=\\frac{2}{\\mbox{# of input nodes + # of output nodes}}$$ (Xavier normalization http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf) \n\nBuild a network with 1000 hidden units and train it for 200 epochs using $\\alpha=0.9$ and $\\eta=10^{-5}$. Plot the training, validation and testing accuracy curves. State the training, validation and testing accuracies after training. Show the plot and the accuracies in the next markdown cell.",
"_____no_output_____"
]
],
[
[
"# Load Data\ntrainData, validData, testData, trainTarget, validTarget, testTarget = loadData()\n\ntrainData = trainData.reshape(trainData.shape[0],-1)\nvalidData = validData.reshape(validData.shape[0],-1)\ntestData = testData.reshape(testData.shape[0],-1)\n\ntrainTargetOneHot, validTargetOneHot, testTargetOneHot = convertOneHot(trainTarget, validTarget, testTarget)\n\nalpha = 0.9 # Momentum\neta = 0.6*(1e-05) # Learning Rate\nnumIter = 200 # Epochs\nnumHiddenNeurons = 1000 # Number of Hidden Layer Neurons\nnumInputNodes = 784 # Excluding Bias\nnumOpNodes = 10 # 10 Classes\ncentre = 0 # Mean of Distribution to Draw Weights\n\n# Weight Matrix Initialization\n\n# Function To Generate Standard Deviation for Xavier Init\ndef standDevDistr(ipNodes,opNodes):\n variance = 2/(ipNodes+opNodes)\n return np.sqrt(variance)\n\ndef constructAndTrainNN(alpha,eta,numIter,numHiddenNeurons,numInputNodes,numOpNodes,centre,trainData,trainTargetOneHot,validData,validTargetOneHot,testData,testTargetOneHot):\n standDevHidd = standDevDistr(numInputNodes,numHiddenNeurons)\n standDevOp = standDevDistr(numHiddenNeurons,numOpNodes)\n\n weightHiddenLayer = np.random.normal(loc=centre,scale=standDevHidd,size=(numInputNodes+1,numHiddenNeurons))\n #weightHiddenLayer = np.zeros([numInputNodes+1,numHiddenNeurons])\n weightOpLayer = np.random.normal(loc=centre,scale=standDevOp,size=(numHiddenNeurons+1,numOpNodes))\n #weightOpLayer = np.zeros([numHiddenNeurons+1,numOpNodes])\n\n wHid, wOp, ltrain, lvalid, ltest, atrain, avalid, atest = trainNN(trainData, trainTargetOneHot, weightHiddenLayer, weightOpLayer,numIter,eta,alpha,validData,validTargetOneHot,testData,testTargetOneHot)\n \n plt.plot(atrain,\"-r\",label=\"Training Set\")\n plt.plot(avalid,\"-b\",label=\"Validation Set\")\n plt.plot(atest,\"-g\",label=\"Test Set\")\n plt.xlabel('Epochs')\n plt.ylabel('Classification Accuracy')\n plt.legend(loc=\"lower right\")\n plt.title(\"Classification Accuracy vs Number of Epochs\")\n plt.show()\n\n plt.plot(ltrain,\"-r\",label=\"Training Set\")\n plt.plot(lvalid,\"-b\",label=\"Validation Set\")\n plt.plot(ltest,\"-g\",label=\"Testing Set\")\n plt.xlabel('Epochs')\n plt.ylabel('Cross Entropy Loss')\n plt.legend(loc=\"upper right\")\n plt.title(\"Cross Entropy Loss vs Epochs\")\n plt.show()\n \n return wHid, wOp, ltrain, lvalid, ltest, atrain, avalid, atest\n\nnp.random.seed(7)\n# Construct Network With 1000 Hidden Nodes and Run Test\nwHid, wOp, ltrain, lvalid, ltest, atrain, avalid, atest = constructAndTrainNN(alpha,eta,numIter,numHiddenNeurons,numInputNodes,numOpNodes,centre,trainData,trainTargetOneHot,validData,validTargetOneHot,testData,testTargetOneHot)",
"_____no_output_____"
]
],
[
[
"**Solution:**\n\n**Accuracy Curves**\n\n\n\nFor this network with 1000 hidden nodes, the accuracies at the end of 200 Epochs are,\n\n| Dataset | Accuracy |\n|:----------:|:--------:|\n| Training | 95.45% |\n| Validation | 92.90% |\n| Testing | 90.89% |\n\n**Cross Entropy Loss**\n\n\n\n*Note: I noticed that at the learning rate of 1E-5, the solution often failed to converge. That is, after a few initial iterations, the loss would shoot up and never come down. As a result, the learning rate was lowered to 60% of 1E-5 to capture the outputs. No other aspect of the network was modified.*",
"_____no_output_____"
],
[
"## Hyperparameter investigation [3 points]\n\nContinue to use $\\alpha=0.9$ and $\\eta=10^{-5}$.\n\nTest your network with 500, 1500, 2500 hidden nodes and train for 200 epochs. Comment based on the validation accuracy after how many epochs training could be terminated early. \n\nPlot the training and validation accuracy curves for all three network sizes and 200 training epochs, and report the test accuracy for your selected network size and training length. Show the plot and the accuracies in the next markdown cell.\n\n(Training of the large network for 200 epochs should take about 30-60 mins.)",
"_____no_output_____"
]
],
[
[
"print('Number of Hidden Units = 500')\nwHid500, wOp500, ltrain500, lvalid500, ltest500, atrain500, avalid500, atest500 = constructAndTrainNN(alpha,eta,numIter,500,numInputNodes,numOpNodes,centre,trainData,trainTargetOneHot,validData,validTargetOneHot,testData,testTargetOneHot)\n\nprint('Number of Hidden Units = 1500')\nwHid1500, wOp1500, ltrain1500, lvalid1500, ltest1500, atrain1500, avalid1500, atest1500 = constructAndTrainNN(alpha,eta,numIter,1500,numInputNodes,numOpNodes,centre,trainData,trainTargetOneHot,validData,validTargetOneHot,testData,testTargetOneHot)\n\nprint('Number of Hidden Units = 2500')\nwHid2500, wOp2500, ltrain2500, lvalid2500, ltest2500, atrain2500, avalid2500, atest2500 = constructAndTrainNN(alpha,eta,numIter,2500,numInputNodes,numOpNodes,centre,trainData,trainTargetOneHot,validData,validTargetOneHot,testData,testTargetOneHot)",
"Number of Hidden Units = 500\n"
]
],
[
[
"**Solution:**\n\n**Number of Hidden Units = 500**\n\nAccuracy Curves\n\n\n\n**Number of Hidden Units = 1500**\n\nAccuracy Curves\n\n\n\n**Number of Hidden Units = 2500**\n\nAccuracy Curves\n\n\n\n**Summary of Accuracies**\n\n| | | | Accuracy | |\n|:------------:|:-----:|:------------:|:--------------:|:-----------:|\n| Hidden Units | Epoch | Training Set | Validation Set | Testing Set |\n| 500 | 200 | 95.11% | 92.40% | 90.93% |\n| 1500 | 200 | 95.65% | 92.10% | 91.26% |\n| 2500 | 200 | 95.93% | 92.60% | 91.18% |\n\nWith respect to the validation accuracies, the highest validation accuracies were achieved at the following epochs for the each network.\n\n| Hidden Units | Epoch Count at Highest Validation Accuracy |\n|:------------:|:------------------------------------------:|\n| 500 | 193 |\n| 1500 | 183 |\n| 2500 | 189 |\n\nAccuracies at the corresponding epochs have been tabulated below.\n\n| | | | Accuracy | |\n|:------------:|:-----:|:------------:|:--------------:|:-----------:|\n| Hidden Units | Epoch | Training Set | Validation Set | Testing Set |\n| 500 | 193 | 94.95% | 92.40% | 90.97% |\n| 1500 | 183 | 95.27% | 92.10% | 91.19% |\n| 2500 | 189 | 95.66% | 92.70% | 91.12% |",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0eb689d5fefcb20038a9551c2aecb7dfa3f94bd | 96,560 | ipynb | Jupyter Notebook | examples/notebook/examples/bus_driver_scheduling_flow_sat.ipynb | remiomosowon/or-tools | f15537de74088b60dfa325c3b2b5eab365333d03 | [
"Apache-2.0"
] | 8,273 | 2015-02-24T22:10:50.000Z | 2022-03-31T21:19:27.000Z | examples/notebook/examples/bus_driver_scheduling_flow_sat.ipynb | remiomosowon/or-tools | f15537de74088b60dfa325c3b2b5eab365333d03 | [
"Apache-2.0"
] | 2,530 | 2015-03-05T04:27:21.000Z | 2022-03-31T06:13:02.000Z | examples/notebook/examples/bus_driver_scheduling_flow_sat.ipynb | remiomosowon/or-tools | f15537de74088b60dfa325c3b2b5eab365333d03 | [
"Apache-2.0"
] | 2,057 | 2015-03-04T15:02:02.000Z | 2022-03-30T02:29:27.000Z | 50.687664 | 269 | 0.414095 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0eb835818116d97ef964b4f7d45a211785ecf41 | 117,584 | ipynb | Jupyter Notebook | 4_VALERIO_PT1_FEDERATED_SHARED_CLASSIFIER_MNIST.ipynb | CasellaJr/Federated-Transfer-Learning-using-Network-Composition | 7dc3ce821f6c938fff780eb9f3567a8a0f1e6163 | [
"MIT"
] | null | null | null | 4_VALERIO_PT1_FEDERATED_SHARED_CLASSIFIER_MNIST.ipynb | CasellaJr/Federated-Transfer-Learning-using-Network-Composition | 7dc3ce821f6c938fff780eb9f3567a8a0f1e6163 | [
"MIT"
] | null | null | null | 4_VALERIO_PT1_FEDERATED_SHARED_CLASSIFIER_MNIST.ipynb | CasellaJr/Federated-Transfer-Learning-using-Network-Composition | 7dc3ce821f6c938fff780eb9f3567a8a0f1e6163 | [
"MIT"
] | null | null | null | 54.161216 | 22,438 | 0.621403 | [
[
[
"!pip install --upgrade progressbar2",
"Requirement already satisfied: progressbar2 in /usr/local/lib/python3.7/dist-packages (3.38.0)\nCollecting progressbar2\n Downloading progressbar2-3.53.1-py2.py3-none-any.whl (25 kB)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from progressbar2) (1.15.0)\nRequirement already satisfied: python-utils>=2.3.0 in /usr/local/lib/python3.7/dist-packages (from progressbar2) (2.5.6)\nInstalling collected packages: progressbar2\n Attempting uninstall: progressbar2\n Found existing installation: progressbar2 3.38.0\n Uninstalling progressbar2-3.38.0:\n Successfully uninstalled progressbar2-3.38.0\nSuccessfully installed progressbar2-3.53.1\n"
],
[
"from torch import nn\nfrom collections import OrderedDict\nimport torch.nn.functional as F\nimport torch\nfrom torch.utils.data import DataLoader\nimport torchvision\nimport random\nfrom torch.utils.data import Subset\nfrom matplotlib import pyplot as plt\nfrom torchsummary import summary\nfrom torchvision import transforms\nimport progressbar as pb\nimport numpy as np",
"_____no_output_____"
],
[
"SUM = lambda x,y : x+y",
"_____no_output_____"
],
[
"def check_equity(property,a,b):\n pa = getattr(a,property)\n pb = getattr(b,property)\n assert pa==pb, \"Different {}: {}!={}\".format(property,pa,pb)\n\n return pa",
"_____no_output_____"
],
[
"def module_unwrap(mod:nn.Module,recursive=False):\n children = OrderedDict()\n try:\n for name, module in mod.named_children():\n if (recursive):\n recursive_call = module_unwrap(module,recursive=True)\n if (len(recursive_call)>0):\n for k,v in recursive_call.items():\n children[name+\"_\"+k] = v\n else:\n children[name] = module\n else:\n children[name] = module\n except AttributeError:\n pass\n\n return children",
"_____no_output_____"
],
[
"class VGGBlock(nn.Module):\n def __init__(self, in_channels, out_channels,batch_norm=False):\n\n super().__init__()\n\n conv2_params = {'kernel_size': (3, 3),\n 'stride' : (1, 1),\n 'padding' : 1\n }\n\n noop = lambda x : x\n\n self._batch_norm = batch_norm\n\n self.conv1 = nn.Conv2d(in_channels=in_channels,out_channels=out_channels , **conv2_params)\n #self.bn1 = nn.BatchNorm2d(out_channels) if batch_norm else noop\n self.bn1 = nn.GroupNorm(32, out_channels) if batch_norm else noop\n\n self.conv2 = nn.Conv2d(in_channels=out_channels,out_channels=out_channels, **conv2_params)\n #self.bn2 = nn.BatchNorm2d(out_channels) if batch_norm else noop\n self.bn2 = nn.GroupNorm(32, out_channels) if batch_norm else noop\n\n self.max_pooling = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))\n\n @property\n def batch_norm(self):\n return self._batch_norm\n\n def forward(self,x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n\n x = self.max_pooling(x)\n\n return x",
"_____no_output_____"
],
[
"class Classifier(nn.Module):\n\n def __init__(self,num_classes=10):\n super().__init__()\n\n self.classifier = nn.Sequential(\n nn.Linear(2048, 2048),\n nn.ReLU(True),\n nn.Dropout(p=0.5),\n nn.Linear(2048, 512),\n nn.ReLU(True),\n nn.Dropout(p=0.5),\n nn.Linear(512, num_classes)\n )\n\n def forward(self,x):\n\n return self.classifier(x)",
"_____no_output_____"
],
[
"class VGG16(nn.Module):\n\n def __init__(self, input_size, batch_norm=False):\n super(VGG16, self).__init__()\n\n self.in_channels,self.in_width,self.in_height = input_size\n\n self.block_1 = VGGBlock(self.in_channels,64,batch_norm=batch_norm)\n self.block_2 = VGGBlock(64, 128,batch_norm=batch_norm)\n self.block_3 = VGGBlock(128, 256,batch_norm=batch_norm)\n self.block_4 = VGGBlock(256,512,batch_norm=batch_norm)\n\n\n @property\n def input_size(self):\n return self.in_channels,self.in_width,self.in_height\n\n def forward(self, x):\n\n x = self.block_1(x)\n x = self.block_2(x)\n x = self.block_3(x)\n x = self.block_4(x)\n # x = self.avgpool(x)\n x = torch.flatten(x,1)\n\n return x",
"_____no_output_____"
],
[
"class CombinedLoss(nn.Module):\n def __init__(self, loss_a, loss_b, loss_combo, _lambda=1.0):\n super().__init__()\n self.loss_a = loss_a\n self.loss_b = loss_b\n self.loss_combo = loss_combo\n\n self.register_buffer('_lambda',torch.tensor(float(_lambda),dtype=torch.float32))\n\n\n def forward(self,y_hat,y):\n\n return self.loss_a(y_hat[0],y[0]) + self.loss_b(y_hat[1],y[1]) + self._lambda * self.loss_combo(y_hat[2],torch.cat(y,0))\n",
"_____no_output_____"
],
[
"DO='TRAIN'\n\nrandom.seed(47)\n\ncombo_fn = SUM\n\nlambda_reg = 1",
"_____no_output_____"
],
[
"def test(net,classifier, loader):\n\n net.to(dev)\n classifier.to(dev)\n\n net.eval()\n\n sum_accuracy = 0\n\n # Process each batch\n for j, (input, labels) in enumerate(loader):\n\n input = input.to(dev)\n labels = labels.float().to(dev)\n\n features = net(input)\n\n pred = torch.squeeze(classifier(features))\n\n # https://discuss.pytorch.org/t/bcewithlogitsloss-and-model-accuracy-calculation/59293/ 2\n #pred_labels = (pred >= 0.0).long() # Binarize predictions to 0 and 1\n _,pred_label = torch.max(pred, dim = 1)\n pred_labels = (pred_label == labels).float()\n\n batch_accuracy = pred_labels.sum().item() / len(labels)\n\n # Update accuracy\n sum_accuracy += batch_accuracy\n\n epoch_accuracy = sum_accuracy / len(loader)\n return epoch_accuracy\n #print(f\"Accuracy test: {epoch_accuracy:0.5}\")",
"_____no_output_____"
],
[
"def train(nets, loaders, optimizer, criterion, epochs=20, dev=None, save_param=False, model_name=\"federated_mnist\"):\n # try:\n nets = [n.to(dev) for n in nets]\n\n model_a = module_unwrap(nets[0], True)\n model_b = module_unwrap(nets[1], True)\n model_c = module_unwrap(nets[2], True)\n\n reg_loss = nn.MSELoss()\n\n criterion.to(dev)\n reg_loss.to(dev)\n\n # Initialize history\n history_loss = {\"train\": [], \"val\": [], \"test\": []}\n history_accuracy = {\"train\": [], \"val\": [], \"test\": []}\n history_test = []\n # Store the best val accuracy\n best_val_accuracy = 0\n # Store best accuracy to save the model\n best_accuracy = 0\n\n # Process each epoch\n for epoch in range(epochs):\n # Initialize epoch variables\n sum_loss = {\"train\": 0, \"val\": 0, \"test\": 0}\n sum_accuracy = {\"train\": [0,0,0], \"val\": [0,0,0], \"test\": [0,0,0]}\n\n progbar = None\n # Process each split\n for split in [\"train\", \"val\", \"test\"]:\n if split == \"train\":\n for n in nets:\n n.train()\n widgets = [\n ' [', pb.Timer(), '] ',\n pb.Bar(),\n ' [', pb.ETA(), '] ', pb.Variable('ta','[Train Acc: {formatted_value}]')\n ]\n\n progbar = pb.ProgressBar(max_value=len(loaders[split][0]),widgets=widgets,redirect_stdout=True)\n\n else:\n for n in nets:\n n.eval()\n # Process each batch\n for j,((input_a, labels_a),(input_b, labels_b)) in enumerate(zip(loaders[split][0],loaders[split][1])):\n\n input_a = input_a.to(dev)\n input_b = input_b.to(dev)\n\n labels_a = labels_a.long().to(dev)\n labels_b = labels_b.long().to(dev)\n\n inputs = torch.cat([input_a,input_b],axis=0)\n labels = torch.cat([labels_a, labels_b])\n\n\n # Reset gradients\n optimizer.zero_grad()\n # Compute output\n features_a = nets[0](input_a)\n features_b = nets[1](input_b)\n features_c = nets[2](inputs)\n\n pred_a = torch.squeeze(nets[3](features_a))\n pred_b = torch.squeeze(nets[3](features_b))\n pred_c = torch.squeeze(nets[3](features_c))\n\n loss = criterion(pred_a, labels_a) + criterion(pred_b, labels_b) + criterion(pred_c, labels)\n\n for n in model_a:\n layer_a = model_a[n]\n layer_b = model_b[n]\n layer_c = model_c[n]\n if (isinstance(layer_a,nn.Conv2d)):\n loss += lambda_reg * reg_loss(combo_fn(layer_a.weight,layer_b.weight),layer_c.weight)\n if (layer_a.bias is not None):\n loss += lambda_reg * reg_loss(combo_fn(layer_a.bias, layer_b.bias), layer_c.bias)\n\n # Update loss\n sum_loss[split] += loss.item()\n # Check parameter update\n if split == \"train\":\n # Compute gradients\n loss.backward()\n # Optimize\n optimizer.step()\n\n # Compute accuracy\n\n #https://discuss.pytorch.org/t/bcewithlogitsloss-and-model-accuracy-calculation/59293/ 2\n #pred_labels_a = (pred_a >= 0.0).long() # Binarize predictions to 0 and 1\n #pred_labels_b = (pred_b >= 0.0).long() # Binarize predictions to 0 and 1\n #pred_labels_c = (pred_c >= 0.0).long() # Binarize predictions to 0 and 1\n\n #print(pred_a.shape)\n\n _,pred_label_a = torch.max(pred_a, dim = 1)\n pred_labels_a = (pred_label_a == labels_a).float()\n\n _,pred_label_b = torch.max(pred_b, dim = 1)\n pred_labels_b = (pred_label_b == labels_b).float()\n\n _,pred_label_c = torch.max(pred_c, dim = 1)\n pred_labels_c = (pred_label_c == labels).float()\n\n batch_accuracy_a = pred_labels_a.sum().item() / len(labels_a)\n batch_accuracy_b = pred_labels_b.sum().item() / len(labels_b)\n batch_accuracy_c = pred_labels_c.sum().item() / len(labels)\n\n # Update accuracy\n sum_accuracy[split][0] += batch_accuracy_a\n sum_accuracy[split][1] += batch_accuracy_b\n sum_accuracy[split][2] += batch_accuracy_c\n\n\n if (split=='train'):\n progbar.update(j, ta=batch_accuracy_c)\n\n if (progbar is not None):\n progbar.finish()\n # Compute epoch loss/accuracy\n epoch_loss = {split: sum_loss[split] / len(loaders[split][0]) for split in [\"train\", \"val\", \"test\"]}\n epoch_accuracy = {split: [sum_accuracy[split][i] / len(loaders[split][0]) for i in range(len(sum_accuracy[split])) ] for split in [\"train\", \"val\", \"test\"]}\n\n\n print(f\"Epoch {epoch + 1}:\")\n # Update history\n for split in [\"train\", \"val\", \"test\"]:\n history_loss[split].append(epoch_loss[split])\n history_accuracy[split].append(epoch_accuracy[split])\n # Print info\n print(f\"\\t{split}\\tLoss: {epoch_loss[split]:0.5}\\tVGG 1:{epoch_accuracy[split][0]:0.5}\"\n f\"\\tVGG 2:{epoch_accuracy[split][1]:0.5}\\tVGG *:{epoch_accuracy[split][2]:0.5}\")\n\n if save_param:\n torch.save({'vgg_a':nets[0].state_dict(),'vgg_b':nets[1].state_dict(),'vgg_star':nets[2].state_dict(),'classifier':nets[3].state_dict()},f'{model_name}.pth')\n\n \n print(f\"Accuracy test VGGA: {test(nets[0], nets[3], test_loader_all):0.5}\")\n print(f\"Accuracy test VGGB: {test(nets[1], nets[3], test_loader_all):0.5}\")\n print(f\"Accuracy test VGG*: {test(nets[2], nets[3], test_loader_all):0.5}\")\n \n summed_state_dict = OrderedDict()\n \n for key in nets[2].state_dict():\n if key.find('conv') >=0:\n #print(key)\n summed_state_dict[key] = combo_fn(nets[0].state_dict()[key],nets[1].state_dict()[key])\n else:\n summed_state_dict[key] = nets[2].state_dict()[key]\n \n nets[2].load_state_dict(summed_state_dict)\n accuracy_star = test(nets[2], nets[3], test_loader_all)\n print(f\"Accuracy test VGGSTAR: {accuracy_star:0.5}\")\n history_test.append(accuracy_star)\n\n\n # Store params at the best validation accuracy\n if save_param and accuracy_star > best_accuracy:\n # torch.save(net.state_dict(), f\"{net.__class__.__name__}_best_val.pth\")\n torch.save({'vgg_a':nets[0].state_dict(),'classifier':nets[3].state_dict()}, f\"{model_name}_best_test.pth\")\n best_accuracy = accuracy_star\n print(f\"Best accuracy test is: {best_accuracy:0.5}\")\n\n\n # Plot accuracy\n plt.title(\"Accuracy VGGSTAR over epochs\")\n plt.plot(history_test)\n #plt.legend()\n plt.show()",
"_____no_output_____"
]
],
[
[
"MNIST",
"_____no_output_____"
]
],
[
[
"root_dir = './'\n\nrescale_data = transforms.Lambda(lambda x : x/255)",
"_____no_output_____"
],
[
"# Compose transformations\ndata_transform = transforms.Compose([\n transforms.Resize(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n rescale_data,\n #transforms.Normalize((-0.7376), (0.5795))\n])\n\ntest_transform = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n rescale_data,\n #transforms.Normalize((0.1327), (0.2919))\n])\n\n# Load MNIST dataset with transforms\ntrain_set = torchvision.datasets.MNIST(root=root_dir, train=True, download=True, transform=data_transform)\ntest_set = torchvision.datasets.MNIST(root=root_dir, train=False, download=True, transform=test_transform)",
"/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:180.)\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n"
],
[
"# Dataset len\nnum_train = len(train_set)\nnum_test = len(test_set)\nprint(f\"Num. training samples: {num_train}\")\nprint(f\"Num. test samples: {num_test}\")\n\ntrain_idx = np.random.permutation(np.arange(len(train_set)))\ntest_idx = np.arange(len(test_set))\n\n# Fraction of the original train set that we want to use as validation set\nval_frac = 0.1\n# Number of samples of the validation set\nnum_val = int(num_train * val_frac) \nnum_train = num_train - num_val\n\n# Split training set\nval_idx = train_idx[num_train:]\ntrain_idx = train_idx[:num_train]\n\nprint(f\"{num_train} samples used as train set\")\nprint(f\"{num_val} samples used as val set\")\nprint(f\"{len(test_set)} samples used as test set\")\n\nval_set_a = Subset(train_set, val_idx)\ntrain_set_a = Subset(train_set, train_idx)\ntest_set_a = test_set",
"Num. training samples: 60000\nNum. test samples: 10000\n54000 samples used as train set\n6000 samples used as val set\n10000 samples used as test set\n"
]
],
[
[
"MNIST PERTURBATO",
"_____no_output_____"
]
],
[
[
"root_dir = './'\n\nrescale_data = transforms.Lambda(lambda x : x/255)",
"_____no_output_____"
],
[
"class AddGaussianNoise(object):\n def __init__(self, mean=0., std=1.):\n self.std = std\n self.mean = mean\n \n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n \n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)",
"_____no_output_____"
],
[
"# Compose transformations\ndata_transform = transforms.Compose([\n transforms.Resize(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., 0.2),\n rescale_data,\n])\n\ntest_transform = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n AddGaussianNoise(0., 0.2),\n rescale_data,\n])\n\n# Load MNIST dataset with transforms\ntrain_set = torchvision.datasets.MNIST(root=root_dir, train=True, download=True, transform=data_transform)\ntest_set = torchvision.datasets.MNIST(root=root_dir, train=False, download=True, transform=test_transform)",
"_____no_output_____"
],
[
"# Dataset len\nnum_train = len(train_set)\nnum_test = len(test_set)\nprint(f\"Num. training samples: {num_train}\")\nprint(f\"Num. test samples: {num_test}\")\n\ntrain_idx = np.random.permutation(np.arange(len(train_set)))\ntest_idx = np.arange(len(test_set))\n\n# Fraction of the original train set that we want to use as validation set\nval_frac = 0.1\n# Number of samples of the validation set\nnum_val = int(num_train * val_frac) \nnum_train = num_train - num_val\n\n# Split training set\nval_idx = train_idx[num_train:]\ntrain_idx = train_idx[:num_train]\n\nprint(f\"{num_train} samples used as train set\")\nprint(f\"{num_val} samples used as val set\")\nprint(f\"{len(test_set)} samples used as test set\")\n\nval_set_b = Subset(train_set, val_idx)\ntrain_set_b = Subset(train_set, train_idx)\ntest_set_b = test_set",
"Num. training samples: 60000\nNum. test samples: 10000\n54000 samples used as train set\n6000 samples used as val set\n10000 samples used as test set\n"
],
[
"test_set = torch.utils.data.ConcatDataset([test_set_a, test_set_b])\n\n# Define loaders\n\ntrain_loader_a = DataLoader(train_set_a, batch_size=128, num_workers=0, shuffle=True, drop_last=True)\nval_loader_a = DataLoader(val_set_a, batch_size=128, num_workers=0, shuffle=False, drop_last=False)\ntest_loader_a = DataLoader(test_set_a, batch_size=128, num_workers=0, shuffle=False, drop_last=False)\n\ntrain_loader_b = DataLoader(train_set_b, batch_size=128, num_workers=0, shuffle=True, drop_last=True)\nval_loader_b = DataLoader(val_set_b, batch_size=128, num_workers=0, shuffle=False, drop_last=False)\ntest_loader_b = DataLoader(test_set_b, batch_size=128, num_workers=0, shuffle=False, drop_last=False)\n\ntest_loader_all = DataLoader(test_set,batch_size=128, num_workers=0,shuffle=False,drop_last=False)\n\n\n# Define dictionary of loaders\nloaders = {\"train\": [train_loader_a,train_loader_b],\n \"val\": [val_loader_a,val_loader_b],\n \"test\": [test_loader_a,test_loader_b]}",
"_____no_output_____"
],
[
"image, label = train_set_a[1]\nplt.imshow(image.squeeze(), cmap='gray')\nprint('Label:', label)",
"Label: 5\n"
],
[
"image, label = train_set_b[7]\nplt.imshow(image.squeeze(), cmap='gray')\nprint('Label:', label)",
"Label: 7\n"
],
[
"model1 = VGG16((1,32,32),batch_norm=True)\nmodel2 = VGG16((1,32,32),batch_norm=True)\nmodel3 = VGG16((1,32,32),batch_norm=True)\nclassifier = Classifier(num_classes=10)",
"_____no_output_____"
],
[
"nets = [model1,model2,model3,classifier]\n\ndev = torch.device('cuda')\n\nparameters = set()\n\nfor n in nets:\n parameters |= set(n.parameters())\n\noptimizer = torch.optim.SGD(parameters, lr = 0.01)\n# Define a loss\n#criterion = nn.BCEWithLogitsLoss()#,nn.BCEWithLogitsLoss(),nn.BCEWithLogitsLoss(),_lambda = 1)\ncriterion = nn.CrossEntropyLoss()\nn_params = 0",
"_____no_output_____"
],
[
"DO = 'TRAIN'\nif (DO=='TRAIN'):\n train(nets, loaders, optimizer, criterion, epochs=50, dev=dev,save_param=True)\nelse:\n state_dicts = torch.load('model.pth')\n model1.load_state_dict(state_dicts['vgg_a']) #questi state_dict vengono dalla funzione di training\n model2.load_state_dict(state_dicts['vgg_b'])\n model3.load_state_dict(state_dicts['vgg_star'])\n classifier.load_state_dict(state_dicts['classifier'])\n\n test(model1,classifier,test_loader_all)\n test(model2, classifier, test_loader_all)\n test(model3, classifier, test_loader_all)\n\n summed_state_dict = OrderedDict()\n\n for key in state_dicts['vgg_star']:\n if key.find('conv') >=0:\n print(key)\n summed_state_dict[key] = combo_fn(state_dicts['vgg_a'][key],state_dicts['vgg_b'][key])\n else:\n summed_state_dict[key] = state_dicts['vgg_star'][key]\n\n model3.load_state_dict(summed_state_dict)\n test(model3, classifier, test_loader_all)",
"/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n [Elapsed Time: 0:01:53] |###############| [Time: 0:01:53] [Train Acc: 0.18]\n"
]
],
[
[
"Now you can download federated_mnist_best_test.pth",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0eb888922334f27b137b4567281baa3db4327c6 | 5,150 | ipynb | Jupyter Notebook | object_detection.ipynb | sysad-aldama/opencv-object-detection | e11b8319a0d098482fa25648a17b34b4620c1fc6 | [
"MIT"
] | null | null | null | object_detection.ipynb | sysad-aldama/opencv-object-detection | e11b8319a0d098482fa25648a17b34b4620c1fc6 | [
"MIT"
] | null | null | null | object_detection.ipynb | sysad-aldama/opencv-object-detection | e11b8319a0d098482fa25648a17b34b4620c1fc6 | [
"MIT"
] | 1 | 2020-03-13T23:52:03.000Z | 2020-03-13T23:52:03.000Z | 27.248677 | 241 | 0.563689 | [
[
[
"## Quaxis Corporation for Research & Innovation 2020\n### Written by: JP Aldama\n#### MIT License, feel free to do whatever you want with this code.",
"_____no_output_____"
],
[
"### Goal: Object detection using opencv-python and numpy.\n***Requirements: opencv-python, numpy. It is highly recommended to install Anaconda for python 3.x. Using cv2 we will detect objects. For best results use still video (no camera movement).***",
"_____no_output_____"
],
[
"#### Step 1: Import required libraries. Install opencv-python and numpy.\n***pip install opencv-python numpy***",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"#### Step 2: Path to video source and invoke VideoCapture using source video.",
"_____no_output_____"
]
],
[
[
"SOURCE = 'data/qxdatasets/test_videos/walk_australia_372020.mp4'\ncapture = cv2.VideoCapture(SOURCE)",
"_____no_output_____"
]
],
[
[
"#### Step 3: Read first and second frame.",
"_____no_output_____"
]
],
[
[
"ret, frame1 = capture.read()\nret, frame2 = capture.read()",
"_____no_output_____"
]
],
[
[
"#### Step 4: The main loop\n***4a: Calculate the absolute difference between frame1 and frame2. \n4b: Convert frames to grayscale. \n4c: Apply Gaussian blur to the grayscale frames.\n4d: Set a threshold.\n4e: Apply dilation, find contours.\n4f: For each contour found in frames apply bounding rectangles to each contour found in total contours.\n4g: If movement is detected, display text on screen. If no movement is detected remove the text\n4h: Show the video \n4i: If 'q' is pressed, terminate the main loop and exit program.***",
"_____no_output_____"
]
],
[
[
"# KNOWN BUG: Program freezes if you try to exit the program. \n# Do not worry, this only applies if you are using Jupyter Notebook. \n# Just code in your IDE and everything will be fine.",
"_____no_output_____"
],
[
"while capture.isOpened():\n difference = cv2.absdiff(frame1, frame2)\n grayscale = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(grayscale, (5,5), 0)\n _, threshold = cv2.threshold(blur, 30, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(threshold, None, iterations=1)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n (x,y,w,h) = cv2.boundingRect(contour)\n if cv2.contourArea(contour) < 1300:\n continue\n cv2.rectangle(frame1, (x,y), (x+w,y+h),(255,0,255), 2) \n cv2.putText(frame1, 'STATUS: {}'.format('MOVEMENT!'),(10,50),cv2.FONT_HERSHEY_SIMPLEX,\n 1,(0,255,0), 2)\n cv2.imshow('video', frame1)\n frame1 = frame2\n ret, frame2 = capture.read()\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break",
"_____no_output_____"
]
],
[
[
"#### Step 5: Close the video and exit the program.",
"_____no_output_____"
]
],
[
[
"capture.release()\ncv2.destroyAllWindows()",
"_____no_output_____"
]
],
[
[
"### Conclusion: \n***We have implemented very basic object detection using opencv-python. You can further adjust thresholds so you may 'tune' your threshold. Try extreme values so you can see the difference and learn how to enhance its functionality.***",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0eb97ca696e8171727ee75ca28e9b4c8c49f36c | 8,703 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Dynamics_lab05_Ynm-checkpoint.ipynb | georgkaufmann/Lecture_Dynamics | bcc1cb9a926645c094c114010e3463e56d530cc4 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Dynamics_lab05_Ynm-checkpoint.ipynb | georgkaufmann/Lecture_Dynamics | bcc1cb9a926645c094c114010e3463e56d530cc4 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Dynamics_lab05_Ynm-checkpoint.ipynb | georgkaufmann/Lecture_Dynamics | bcc1cb9a926645c094c114010e3463e56d530cc4 | [
"MIT"
] | null | null | null | 27.454259 | 122 | 0.484431 | [
[
[
"<table>\n<tr><td><img style=\"height: 150px;\" src=\"images/geo_hydro1.jpg\"></td>\n<td bgcolor=\"#FFFFFF\">\n <p style=\"font-size: xx-large; font-weight: 900; line-height: 100%\">AG Dynamics of the Earth</p>\n <p style=\"font-size: large; color: rgba(0,0,0,0.5);\">Jupyter notebooks</p>\n <p style=\"font-size: large; color: rgba(0,0,0,0.5);\">Georg Kaufmann</p>\n </td>\n</tr>\n</table>",
"_____no_output_____"
],
[
"# Dynamic systems: 5. Gravity\n## Spherical harmonics\n---\n*Georg Kaufmann,\nGeophysics Section,\nInstitute of Geological Sciences,\nFreie Universität Berlin,\nGermany*\n\nIn this notebook, we introduce **spherical harmonics** as function space.\n\nWe discuss:\n\n- **Legendre** polynomials $P_n$\n- **Associated Legendre** polynomials $P_{nm}$\n- **Spherical harmonics** $Y_{nm}$\n\nWe start importing libraries first:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy.special import lpn,lpmn\nfrom scipy.special import sph_harm",
"_____no_output_____"
]
],
[
[
"----\n## Legendre polynomials\n\nWe refer to the recursive equation for Legendre polynomials, defined on $x \\in [-1,1]$:\n$$\n P_n(x) = -\\frac{n-1}{n} P_{n-2}(x)\n +\\frac{2n-1}{n} x P_{n-1}(x),\n$$\nwhere we have to know the first two polynomials:\n$$\n\\begin{array}{rcl}\nP_0(x) &=& 1 \\\\\nP_1(x) &=& x\n\\end{array}\n$$\n\nWe start defining the array for the argument $x$, then we call the function `lpn` from\nthe `scipy.special` package for degree $n \\in [0,n_{max}]$.",
"_____no_output_____"
]
],
[
[
"nmax = 5\nx = np.linspace(-1,1,51)\nPn = np.zeros(len(x)*(nmax+1)).reshape(nmax+1,len(x))\nprint(Pn.shape)\n\nfor i in range(len(x)):\n tmp = lpn(nmax,x[i])\n for n in range(nmax+1):\n Pn[n,i] = tmp[0][n]",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nplt.xlim([-1.1,1.1])\nplt.ylim([-1.1,1.1])\nplt.xlabel('x')\nplt.ylabel('P$_{n}$(x)')\nplt.grid(linestyle='--')\nfor n in range(nmax+1):\n plt.plot(x,Pn[n,:],linewidth=3,label='P$_{'+str(n)+'}$')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"----\n## Associated Legendre polynomials\n\nNext, we refer to the **associated Legendre Polynomials**, $P_n^m(x)$ with the\nrecursive equation:\n$$\n(n-m+1) P_{n+1}^{m}(x) = (2n+1) x P_{n}^{m}(x) - (l+m) P_{n-1}^{m}(x)\n$$\nWe normalize the calculated associated Legendre polynomials:\n$$\nP_{nm} = \\sqrt{2n+1} \\frac{(n-m)!}{(n+m)!} P_n^m\n$$",
"_____no_output_____"
]
],
[
[
"nmax = 5\nmmax = nmax\nx = np.linspace(-1,1,101)\nPnm = np.zeros(len(x)*(nmax+1)*(mmax+1)).reshape(nmax+1,mmax+1,len(x))\nprint(Pnm.shape)\n\nfor i in range(len(x)):\n tmp = lpmn(mmax,nmax,x[i])\n for n in range(nmax+1):\n for m in range(nmax+1):\n #print(n,m)\n if (m == 0):\n norm = np.sqrt(2*(2*n+1))\n elif (m > n):\n norm = 0.\n else:\n norm = np.sqrt((2*n+1)*math.factorial(n-m)/math.factorial(n+m))\n Pnm[n,m,i] = norm*tmp[0][m,n]",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nplt.xlim([-1.1,1.1])\nplt.ylim([-3,3])\nplt.xlabel('x')\nplt.ylabel('P$_{nm}$(x)')\nplt.grid(linestyle='--')\n\nn = nmax\nfor m in range(n+1):\n plt.plot(x,Pnm[n,m,:],linewidth=3,label='P$_{'+str(n)+str(m)+'}$')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"----\n## Spherical harmonics\n\n$$\nY_{nm}(\\vartheta,\\Phi) = \\sqrt{\\frac{2n+1}{4\\pi} \\frac{(n-m)!}{(n+m)!}} P_n^m(\\cos\\vartheta) e^{im \\Phi}\n$$\nwith\n- $\\Theta \\in [90,-90]$ latitude\n- $\\vartheta \\in [0,180]$ co-latitude\n- $\\Phi \\in [0,360]$ longitude\n\nNote: $x=\\cos(\\vartheta)$.\n\nNote: **Latitude** $\\Theta$ and **Co-latitude** $\\vartheta$ are (roughly) related through:\n$$\n\\Theta = 90 - \\vartheta\n$$",
"_____no_output_____"
]
],
[
[
"# define coordinates\nd2r = np.pi/180.\ndlong = 101 #21\ndcolat = 51 #11\ncolat = np.linspace(0, np.pi, dcolat)\nlong = np.linspace(0, 2*np.pi, dlong)\ncolat, long = np.meshgrid(colat, long)\nprint(colat.shape)",
"_____no_output_____"
],
[
"n=2;m=0\nYnm = sph_harm(m, n, long, colat)\n\nfig,axs = plt.subplots(2,1,figsize=(10,10))\naxs[0].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) cos(\\\\phi)$')\naxs[0].set_ylabel('Latitude [$^{\\circ}$]')\naxs[0].contourf(long/d2r,90-colat/d2r,Ynm.real)\naxs[1].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) sin(\\\\phi)$')\naxs[1].contourf(long/d2r,90-colat/d2r,Ynm.imag)\naxs[1].set_xlabel('Longitude [$^{\\circ}$]')\naxs[1].set_ylabel('Latitude [$^{\\circ}$]')",
"_____no_output_____"
],
[
"n=2;m=1\nYnm = sph_harm(m, n, long, colat)\n\nfig,axs = plt.subplots(2,1,figsize=(10,10))\naxs[0].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) cos(\\\\phi)$')\naxs[0].set_ylabel('Latitude [$^{\\circ}$]')\naxs[0].contourf(long/d2r,90-colat/d2r,Ynm.real)\naxs[1].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) sin(\\\\phi)$')\naxs[1].contourf(long/d2r,90-colat/d2r,Ynm.imag)\naxs[1].set_xlabel('Longitude [$^{\\circ}$]')\naxs[1].set_ylabel('Latitude [$^{\\circ}$]')",
"_____no_output_____"
],
[
"n=2;m=2\nYnm = sph_harm(m, n, long, colat)\n\nfig,axs = plt.subplots(2,1,figsize=(10,10))\naxs[0].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) cos(\\\\phi)$')\naxs[0].set_ylabel('Latitude [$^{\\circ}$]')\naxs[0].contourf(long/d2r,90-colat/d2r,Ynm.real)\naxs[1].set_title('P$_{'+str(n)+str(m)+'}(\\\\theta) sin(\\\\phi)$')\naxs[1].contourf(long/d2r,90-colat/d2r,Ynm.imag)\naxs[1].set_xlabel('Longitude [$^{\\circ}$]')\naxs[1].set_ylabel('Latitude [$^{\\circ}$]')",
"_____no_output_____"
]
],
[
[
"... done",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0eb9bcfded9b2e2201d8eb4d46b5c6e88f44bc1 | 946 | ipynb | Jupyter Notebook | TSX_preprocessing.ipynb | BYZ007/helpseeker_tsx_nbdev | de6f69891b76093a6ed74249f50ab4d461b598fc | [
"Apache-2.0"
] | null | null | null | TSX_preprocessing.ipynb | BYZ007/helpseeker_tsx_nbdev | de6f69891b76093a6ed74249f50ab4d461b598fc | [
"Apache-2.0"
] | null | null | null | TSX_preprocessing.ipynb | BYZ007/helpseeker_tsx_nbdev | de6f69891b76093a6ed74249f50ab4d461b598fc | [
"Apache-2.0"
] | null | null | null | 17.518519 | 37 | 0.521142 | [
[
[
"# default_exp TSX_preprocessing",
"_____no_output_____"
]
],
[
[
"# TSX preprocessing\n\n> script to preprocess TSX data",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
]
] |
d0eb9d12fec3107fae71f1966de07d9b23c31877 | 34,193 | ipynb | Jupyter Notebook | notebooks/lecture_5/DTE-LECTURE-5-LIGHTGBM.ipynb | dsMOOC/MLA-DTE | 6088967d4795d6b1ab91773ba45be866e83d4799 | [
"MIT-0",
"MIT"
] | null | null | null | notebooks/lecture_5/DTE-LECTURE-5-LIGHTGBM.ipynb | dsMOOC/MLA-DTE | 6088967d4795d6b1ab91773ba45be866e83d4799 | [
"MIT-0",
"MIT"
] | null | null | null | notebooks/lecture_5/DTE-LECTURE-5-LIGHTGBM.ipynb | dsMOOC/MLA-DTE | 6088967d4795d6b1ab91773ba45be866e83d4799 | [
"MIT-0",
"MIT"
] | null | null | null | 43.837179 | 1,506 | 0.543883 | [
[
[
"",
"_____no_output_____"
],
[
"## Amazon Access Samples Data Set\n \n Let's apply our boosting algorithm to a real dataset! We are going to use the __Amazon Access Samples dataset__. \n \n We download this dataset from UCI ML repository from this [link](https://archive.ics.uci.edu/ml/datasets/Amazon+Access+Samples). Dua, D. and Graff, C. (2019). [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml). Irvine, CA: University of California, School of Information and Computer Science.\n\n \n__Dataset description:__\n\nEmployees need to request certain resources to fulfill their daily duties. This data consists of anonymized historical data of employee IT access requests. Data fields look like this:\n #### Column Descriptions\n\n* __ACTION__: 1 if the resource was approved, 0 if not.\n* __RESOURCE__: An ID for each resource\n* __PERSON_MGR_ID__: ID of the user's manager\n* __PERSON_ROLLUP_1__: User grouping ID\n* __PERSON_ROLLUP_2__: User grouping ID\n* __PERSON_BUSINESS_TITLE__: Title ID \n* __PERSON_JOB_FAMILY__: Job family ID \n* __PERSON_JOB_CODE__: Job code ID \n\nOur task is to build a machine learning model that can automatically provision an employee's access to company resources given employee profile information and the resource requested.",
"_____no_output_____"
],
[
"### 1. Download and process the dataset\n\nIn this section, we will download our dataset and process it. It consists of two files, we will run the following code cells to get our dataset as a single file at the end. One of the files is large (4.8GB), so make sure you have enough storage.",
"_____no_output_____"
]
],
[
[
"! wget https://archive.ics.uci.edu/ml/machine-learning-databases/00216/amzn-anon-access-samples.tgz",
"zsh:1: command not found: wget\n"
],
[
"! tar -zxvf amzn-anon-access-samples.tgz",
"_____no_output_____"
]
],
[
[
"We have the following files:\n* __amzn-anon-access-samples-2.0.csv__: Employee profile data.\n* __amzn-anon-access-samples-history-2.0.csv__: Resource provision history\n\nBelow, we first read the amzn-anon-access-samples-2.0.csv file (it is a large file) and use some employee fields.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport random \n\nperson_fields = [\"PERSON_ID\", \"PERSON_MGR_ID\",\n \"PERSON_ROLLUP_1\", \"PERSON_ROLLUP_2\",\n \"PERSON_DEPTNAME\", \"PERSON_BUSINESS_TITLE\",\n \"PERSON_JOB_FAMILY\", \"PERSON_JOB_CODE\"]\n\npeople = {}\nfor chunk in pd.read_csv('amzn-anon-access-samples-2.0.csv', usecols = person_fields, chunksize=5000): \n for index, row in chunk.iterrows():\n people[row[\"PERSON_ID\"]] = [row[\"PERSON_MGR_ID\"], row[\"PERSON_ROLLUP_1\"],\n row[\"PERSON_ROLLUP_2\"], row[\"PERSON_DEPTNAME\"],\n row[\"PERSON_BUSINESS_TITLE\"], row[\"PERSON_JOB_FAMILY\"],\n row[\"PERSON_JOB_CODE\"]]",
"_____no_output_____"
]
],
[
[
"Now, let's read the resource provision history file. Here, we will create our dataset. We will read the add access and remove access actions and save them.",
"_____no_output_____"
]
],
[
[
"add_access_data = []\nremove_access_data = []\n\ndf = pd.read_csv('amzn-anon-access-samples-history-2.0.csv')\n\n# Loop through unique logins (employee ids)\nfor login in df[\"LOGIN\"].unique():\n login_df = df[df[\"LOGIN\"]==login].copy()\n # Save actions\n for target in login_df[\"TARGET_NAME\"].unique():\n login_target_df = login_df[login_df[\"TARGET_NAME\"]==target]\n unique_actions = login_target_df[\"ACTION\"].unique()\n if((len(unique_actions)==1) and (unique_actions[0]==\"remove_access\")):\n remove_access_data.append([0, target] + people[login])\n elif((len(unique_actions)==1) and (unique_actions[0]==\"add_access\")):\n add_access_data.append([1, target] + people[login])\n\n# Create random seed\nrandom.seed(30)\n\n# We will use only 8000 random add_access data\nadd_access_data = random.sample(add_access_data, 8000)\n\n# Add them together\ndata = add_access_data + remove_access_data\n\n# Let's shuffle it\nrandom.shuffle(data)",
"_____no_output_____"
]
],
[
[
"Let's save this data so that we can use it later",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(data, columns=[\"ACTION\", \"RESOURCE\",\n \"MGR_ID\", \"ROLLUP_1\",\n \"ROLLUP_2\", \"DEPTNAME\",\n \"BUSINESS_TITLE\", \"JOB_FAMILY\",\n \"JOB_CODE\"])\n\ndf.to_csv(\"data.csv\", index=False)",
"_____no_output_____"
]
],
[
[
"Here is how our data look like:",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"# Delete the downloaded files\n! rm amzn-anon-access-samples-2.0.csv amzn-anon-access-samples-history-2.0.csv amzn-anon-access-samples.tgz",
"_____no_output_____"
]
],
[
[
"### 2. LightGBM\n\nLet's use LightGBM on this dataset. ",
"_____no_output_____"
]
],
[
[
"! pip install -q lightgbm",
"\u001b[31mscikit-learn 0.20.4 has requirement scipy>=0.13.3, but you'll have scipy 0.13.0b1 which is incompatible.\u001b[0m\n\u001b[31mCould not install packages due to an EnvironmentError: [Errno 13] Permission denied: '/Library/Python/2.7/site-packages/scikit_learn-0.20.4.dist-info'\nConsider using the `--user` option or check the permissions.\n\u001b[0m\n"
]
],
[
[
"Let's read the dataset",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"data.csv\")",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 8152 entries, 0 to 8151\nData columns (total 9 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 ACTION 8152 non-null int64\n 1 RESOURCE 8152 non-null int64\n 2 MGR_ID 8152 non-null int64\n 3 ROLLUP_1 8152 non-null int64\n 4 ROLLUP_2 8152 non-null int64\n 5 DEPTNAME 8152 non-null int64\n 6 BUSINESS_TITLE 8152 non-null int64\n 7 JOB_FAMILY 8152 non-null int64\n 8 JOB_CODE 8152 non-null int64\ndtypes: int64(9)\nmemory usage: 573.3 KB\n"
],
[
"data[\"ACTION\"].value_counts()",
"_____no_output_____"
]
],
[
[
"We will fix the column types below to make sure they are handled as categorical variables.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\ny = data[\"ACTION\"].values\nX = data.drop(columns='ACTION')\n\nfor c in X.columns:\n X[c] = X[c].astype('category')\n \nX_train, X_valid, y_train, y_valid = train_test_split(X,\n y,\n test_size=0.15,\n random_state=136,\n stratify=y\n )",
"_____no_output_____"
]
],
[
[
"Let's fit the lightGBM model below.",
"_____no_output_____"
]
],
[
[
"import lightgbm as lgb\n\n# Create dataset for lightgbm\nlgb_train = lgb.Dataset(X_train, y_train)\nlgb_eval = lgb.Dataset(X_valid, y_valid, reference=lgb_train)\n\n# Let's see our parameters\n\n# boosting_type (string, optional (default='gbdt'))\n# ‘gbdt’, traditional Gradient Boosting Decision Tree.\n# ‘dart’, Dropouts meet Multiple Additive Regression Trees.\n# ‘goss’, Gradient-based One-Side Sampling.\n# ‘rf’, Random Forest.\n\nparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary', # ‘regression’ for LGBMRegressor, ‘binary’ or ‘multiclass’ for LGBMClassifier\n 'metric': ['auc'],\n 'n_estimators': 50, # We can change it, by default 100\n 'learning_rate': 0.1, # Default 0.1\n 'num_iterations': 1000, # Default 100\n 'is_unbalance': True, # Used to fix the class imbalance in the dataset\n 'verbose': 1\n}\n\n#Train\ngbm = lgb.train(params,\n lgb_train,\n valid_sets=lgb_eval,\n early_stopping_rounds=20\n )",
"[LightGBM] [Info] Number of positive: 6800, number of negative: 129\n[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000374 seconds.\nYou can set `force_col_wise=true` to remove the overhead.\n[LightGBM] [Info] Total Bins 2538\n[LightGBM] [Info] Number of data points in the train set: 6929, number of used features: 8\n[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.981383 -> initscore=3.964865\n[LightGBM] [Info] Start training from score 3.964865\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[1]\tvalid_0's auc: 0.766304\nTraining until validation scores don't improve for 20 rounds\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[2]\tvalid_0's auc: 0.850851\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[3]\tvalid_0's auc: 0.849565\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[4]\tvalid_0's auc: 0.856069\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[5]\tvalid_0's auc: 0.855181\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[6]\tvalid_0's auc: 0.853859\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[7]\tvalid_0's auc: 0.854366\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[8]\tvalid_0's auc: 0.853714\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[9]\tvalid_0's auc: 0.851775\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[10]\tvalid_0's auc: 0.850362\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[11]\tvalid_0's auc: 0.847409\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[12]\tvalid_0's auc: 0.846739\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[13]\tvalid_0's auc: 0.846105\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[14]\tvalid_0's auc: 0.845399\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[15]\tvalid_0's auc: 0.846214\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[16]\tvalid_0's auc: 0.845489\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[17]\tvalid_0's auc: 0.844547\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[18]\tvalid_0's auc: 0.842808\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[19]\tvalid_0's auc: 0.843678\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[20]\tvalid_0's auc: 0.842609\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[21]\tvalid_0's auc: 0.842627\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[22]\tvalid_0's auc: 0.840181\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[23]\tvalid_0's auc: 0.840743\n[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n[24]\tvalid_0's auc: 0.840308\nEarly stopping, best iteration is:\n[4]\tvalid_0's auc: 0.856069\n"
]
],
[
[
"Let's see the overall performance on validation set.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report\n\ny_pred = gbm.predict(X_valid, num_iteration=gbm.best_iteration)\n\nprint(classification_report(y_valid, np.round(y_pred)))",
" precision recall f1-score support\n\n 0 0.26 0.61 0.37 23\n 1 0.99 0.97 0.98 1200\n\n accuracy 0.96 1223\n macro avg 0.63 0.79 0.67 1223\nweighted avg 0.98 0.96 0.97 1223\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ebb5df2718bb94dceb150067fccfc845344650 | 22,119 | ipynb | Jupyter Notebook | AlphabetSoupCharity_Optimization.ipynb | melcardenas28/Deep_Learning- | 1dfad75ef44c0c4e5e7d4c7f3af13153347cab70 | [
"ADSL"
] | null | null | null | AlphabetSoupCharity_Optimization.ipynb | melcardenas28/Deep_Learning- | 1dfad75ef44c0c4e5e7d4c7f3af13153347cab70 | [
"ADSL"
] | null | null | null | AlphabetSoupCharity_Optimization.ipynb | melcardenas28/Deep_Learning- | 1dfad75ef44c0c4e5e7d4c7f3af13153347cab70 | [
"ADSL"
] | null | null | null | 45.985447 | 2,466 | 0.515665 | [
[
[
"## Preprocessing",
"_____no_output_____"
]
],
[
[
"# Import our dependencies\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport tensorflow as tf\n\n# Import and read the charity_data.csv.\nimport pandas as pd \napplication_df = pd.read_csv(\"/Users/melissa/Downloads/Deep Learning Hw/Resources /charity_data.csv\")\napplication_df.head()",
"_____no_output_____"
],
[
"# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.\napplication_df = application_df.drop(['EIN', 'NAME', 'STATUS'], axis=1)\n",
"_____no_output_____"
],
[
"# Determine the number of unique values in each column.\nprint(application_df.nunique())\n",
"APPLICATION_TYPE 17\nAFFILIATION 6\nCLASSIFICATION 71\nUSE_CASE 5\nORGANIZATION 4\nINCOME_AMT 9\nSPECIAL_CONSIDERATIONS 2\nASK_AMT 8747\nIS_SUCCESSFUL 2\ndtype: int64\n"
],
[
"# Look at APPLICATION_TYPE value counts for binning\napp_vc = application_df['APPLICATION_TYPE'].value_counts()\napp_vc",
"_____no_output_____"
],
[
"# Choose a cutoff value and create a list of application types to be replaced\n# use the variable name `application_types_to_replace`\napplication_types_to_replace = app_vc[app_vc < 50].index\n\n# Replace in dataframe\nfor app in application_types_to_replace:\n application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,\"Other\")\n\n# Check to make sure binning was successful\napplication_df['APPLICATION_TYPE'].value_counts()",
"_____no_output_____"
],
[
"# Look at CLASSIFICATION value counts for binning\nclass_vc = application_df['CLASSIFICATION'].value_counts()\nclass_vc",
"_____no_output_____"
],
[
"# You may find it helpful to look at CLASSIFICATION value counts >1\nclass_vc_1 = class_vc[class_vc > 1]\nclass_vc_1",
"_____no_output_____"
],
[
"# Choose a cutoff value and create a list of classifications to be replaced\n# use the variable name `classifications_to_replace`\nclassifications_to_replace = class_vc[class_vc < 500].index\n\n# Replace in dataframe\nfor cls in classifications_to_replace:\n application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,\"Other\")\n \n# Check to make sure binning was successful\napplication_df['CLASSIFICATION'].value_counts()",
"_____no_output_____"
],
[
"# Convert categorical data to numeric with `pd.get_dummies`\ndf = pd.get_dummies(application_df)\nprint(df.columns)",
"Index(['ASK_AMT', 'IS_SUCCESSFUL', 'APPLICATION_TYPE_Other',\n 'APPLICATION_TYPE_T10', 'APPLICATION_TYPE_T13', 'APPLICATION_TYPE_T19',\n 'APPLICATION_TYPE_T3', 'APPLICATION_TYPE_T4', 'APPLICATION_TYPE_T5',\n 'APPLICATION_TYPE_T6', 'APPLICATION_TYPE_T7', 'APPLICATION_TYPE_T8',\n 'APPLICATION_TYPE_T9', 'AFFILIATION_CompanySponsored',\n 'AFFILIATION_Family/Parent', 'AFFILIATION_Independent',\n 'AFFILIATION_National', 'AFFILIATION_Other', 'AFFILIATION_Regional',\n 'CLASSIFICATION_C1000', 'CLASSIFICATION_C1200', 'CLASSIFICATION_C2000',\n 'CLASSIFICATION_C2100', 'CLASSIFICATION_C3000', 'CLASSIFICATION_C7000',\n 'CLASSIFICATION_Other', 'USE_CASE_CommunityServ', 'USE_CASE_Heathcare',\n 'USE_CASE_Other', 'USE_CASE_Preservation', 'USE_CASE_ProductDev',\n 'ORGANIZATION_Association', 'ORGANIZATION_Co-operative',\n 'ORGANIZATION_Corporation', 'ORGANIZATION_Trust', 'INCOME_AMT_0',\n 'INCOME_AMT_1-9999', 'INCOME_AMT_10000-24999',\n 'INCOME_AMT_100000-499999', 'INCOME_AMT_10M-50M', 'INCOME_AMT_1M-5M',\n 'INCOME_AMT_25000-99999', 'INCOME_AMT_50M+', 'INCOME_AMT_5M-10M',\n 'SPECIAL_CONSIDERATIONS_N', 'SPECIAL_CONSIDERATIONS_Y'],\n dtype='object')\n"
],
[
"# Split our preprocessed data into our features and target arrays\ny = df[\"IS_SUCCESSFUL\"].values\nX = df.drop(columns=['IS_SUCCESSFUL']).values\n\n# Split the preprocessed data into a training and testing dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y)\n",
"_____no_output_____"
],
[
"# Create a StandardScaler instances\nscaler = StandardScaler()\n\n# Fit the StandardScaler\nX_scaler = scaler.fit(X_train)\n\n# Scale the data\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)",
"_____no_output_____"
]
],
[
[
"## Compile, Train and Evaluate the Model",
"_____no_output_____"
]
],
[
[
"# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.\nnumber_input_features = len(X_train[0])\nhidden_nodes_layer1 = 100\nhidden_nodes_layer2 = 70\nhidden_nodes_layer2 = 40\n\nnn = tf.keras.models.Sequential()\n\n# First hidden layer\nnn.add(\n tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation=\"relu\")\n)\n# Second hidden layer\nnn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation=\"relu\"))\n\n# third layer\nnn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation=\"softplus\"))\n\n# output layer\nnn.add(tf.keras.layers.Dense(units=1, activation=\"tanh\"))\n\n# Check the structure of the model\nnn.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 100) 4600 \n_________________________________________________________________\ndense_1 (Dense) (None, 40) 4040 \n_________________________________________________________________\ndense_2 (Dense) (None, 40) 1640 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 41 \n=================================================================\nTotal params: 10,321\nTrainable params: 10,321\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# Compile the model\nnn.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
"_____no_output_____"
],
[
"# Train the model\nfit_model = nn.fit(X_train_scaled,y_train,epochs=40)\n",
"Train on 25724 samples\nEpoch 1/40\n25724/25724 [==============================] - 3s 135us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 2/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 3/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 4/40\n25724/25724 [==============================] - 3s 108us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 5/40\n25724/25724 [==============================] - 3s 105us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 6/40\n25724/25724 [==============================] - 3s 104us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 7/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 8/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 9/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 10/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 11/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 12/40\n25724/25724 [==============================] - 3s 104us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 13/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 14/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 15/40\n25724/25724 [==============================] - 3s 104us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 16/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 17/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 18/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 19/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 20/40\n25724/25724 [==============================] - 3s 111us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 21/40\n25724/25724 [==============================] - 3s 117us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 22/40\n25724/25724 [==============================] - 3s 110us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 23/40\n25724/25724 [==============================] - 3s 101us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 24/40\n25724/25724 [==============================] - 3s 101us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 25/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 26/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 27/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 28/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 29/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 30/40\n25724/25724 [==============================] - 3s 101us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 31/40\n25724/25724 [==============================] - 3s 103us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 32/40\n25724/25724 [==============================] - 3s 105us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 33/40\n25724/25724 [==============================] - 3s 104us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 34/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 35/40\n25724/25724 [==============================] - 3s 101us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 36/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 37/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 38/40\n25724/25724 [==============================] - 3s 102us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 39/40\n25724/25724 [==============================] - 3s 108us/sample - loss: 8.2126 - accuracy: 0.4676\nEpoch 40/40\n25724/25724 [==============================] - 3s 106us/sample - loss: 8.2126 - accuracy: 0.4676\n"
],
[
"# Evaluate the model using the test data\nmodel_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")",
"8575/1 - 1s - loss: 8.8328 - accuracy: 0.4676\nLoss: 8.211648968176661, Accuracy: 0.4676384925842285\n"
],
[
"# Export our model to HDF5 file\nnn.save('AlphabetSoupCharity_Optimization.h5')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ebb72363d326ecdfdf0f622c8972a467ab21de | 104,836 | ipynb | Jupyter Notebook | notebooks/03_APIs/02_ML_Scripts/02_PT_ML_Scripts.ipynb | AlekseiMikhalev/lessons | 67b218e80191e49e37a093fc2dd2a4a3d80a387a | [
"MIT"
] | 1 | 2020-06-18T13:03:14.000Z | 2020-06-18T13:03:14.000Z | notebooks/03_APIs/02_ML_Scripts/02_PT_ML_Scripts.ipynb | AlekseiMikhalev/lessons | 67b218e80191e49e37a093fc2dd2a4a3d80a387a | [
"MIT"
] | null | null | null | notebooks/03_APIs/02_ML_Scripts/02_PT_ML_Scripts.ipynb | AlekseiMikhalev/lessons | 67b218e80191e49e37a093fc2dd2a4a3d80a387a | [
"MIT"
] | null | null | null | 104,836 | 104,836 | 0.765147 | [
[
[
"# ML Scripts\n\nSo far, we've done everything inside the Jupyter notebooks but we're going to now move our code into individual python scripts. We will lay out the code that needs to be inside each script but checkout the `API` lesson to see how it all comes together.",
"_____no_output_____"
],
[
"<div align=\"left\">\n<a href=\"https://github.com/madewithml/lessons/blob/master/notebooks/03_APIs/02_ML_Scripts/02_PT_ML_Scripts.ipynb\" role=\"button\"><img class=\"notebook-badge-image\" src=\"https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d\"></a> \n<a href=\"https://colab.research.google.com/github/madewithml/lessons/blob/master/notebooks/03_APIs/02_ML_Scripts/02_PT_ML_Scripts.ipynb\"><img class=\"notebook-badge-image\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n</div>",
"_____no_output_____"
],
[
"# data.py",
"_____no_output_____"
],
[
"## Load data",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport random\nimport urllib",
"_____no_output_____"
],
[
"SEED = 1234\nDATA_FILE = 'news.csv'\nINPUT_FEATURE = 'title'\nOUTPUT_FEATURE = 'category'",
"_____no_output_____"
],
[
"# Set seed for reproducibility\nnp.random.seed(SEED)\nrandom.seed(SEED)",
"_____no_output_____"
],
[
"# Load data from GitHub to notebook's local drive\nurl = \"https://raw.githubusercontent.com/madewithml/lessons/master/data/news.csv\"\nresponse = urllib.request.urlopen(url)\nhtml = response.read()\nwith open(DATA_FILE, 'wb') as fp:\n fp.write(html)",
"_____no_output_____"
],
[
"# Load data\ndf = pd.read_csv(DATA_FILE, header=0)\nX = df[INPUT_FEATURE].values\ny = df[OUTPUT_FEATURE].values\ndf.head(5)",
"_____no_output_____"
]
],
[
[
"## Preprocessing",
"_____no_output_____"
]
],
[
[
"import re",
"_____no_output_____"
],
[
"LOWER = True\nFILTERS = r\"[!\\\"'#$%&()*\\+,-./:;<=>?@\\\\\\[\\]^_`{|}~]\"",
"_____no_output_____"
],
[
"def preprocess_texts(texts, lower, filters):\n preprocessed_texts = []\n for text in texts: \n if lower:\n text = ' '.join(word.lower() for word in text.split(\" \"))\n text = re.sub(r\"([.,!?])\", r\" \\1 \", text)\n text = re.sub(filters, r\"\", text)\n text = re.sub(' +', ' ', text) # remove multiple spaces\n text = text.strip()\n preprocessed_texts.append(text)\n return preprocessed_texts",
"_____no_output_____"
],
[
"original_text = X[0]\nX = np.array(preprocess_texts(X, lower=LOWER, filters=FILTERS))\nprint (f\"{original_text} → {X[0]}\")",
"Wall St. Bears Claw Back Into the Black (Reuters) → wall st bears claw back into the black reuters\n"
]
],
[
[
"## Split data",
"_____no_output_____"
]
],
[
[
"import collections\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"TRAIN_SIZE = 0.7\nVAL_SIZE = 0.15\nTEST_SIZE = 0.15\nSHUFFLE = True",
"_____no_output_____"
],
[
"def train_val_test_split(X, y, val_size, test_size, shuffle):\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_size, stratify=y, shuffle=shuffle)\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=val_size, stratify=y_train, shuffle=shuffle)\n return X_train, X_val, X_test, y_train, y_val, y_test",
"_____no_output_____"
],
[
"# Create data splits\nX_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(\n X=X, y=y, val_size=VAL_SIZE, test_size=TEST_SIZE, shuffle=SHUFFLE)\nclass_counts = dict(collections.Counter(y))\nprint (f\"X_train: {X_train.shape}, y_train: {y_train.shape}\")\nprint (f\"X_val: {X_val.shape}, y_val: {y_val.shape}\")\nprint (f\"X_test: {X_test.shape}, y_test: {y_test.shape}\")\nprint (f\"{X_train[0]} → {y_train[0]}\")\nprint (f\"Classes: {class_counts}\")",
"X_train: (86700,), y_train: (86700,)\nX_val: (15300,), y_val: (15300,)\nX_test: (18000,), y_test: (18000,)\npga overhauls system for ryder cup points → Sports\nClasses: {'Business': 30000, 'Sci/Tech': 30000, 'Sports': 30000, 'World': 30000}\n"
]
],
[
[
"# tokenizers.py",
"_____no_output_____"
],
[
"## Tokenizer",
"_____no_output_____"
]
],
[
[
"import json\nimport re",
"_____no_output_____"
],
[
"SEPARATOR = ' ' # word level",
"_____no_output_____"
],
[
"class Tokenizer(object):\n def __init__(self, separator, pad_token='<PAD>', oov_token='<UNK>',\n token_to_index={'<PAD>': 0, '<UNK>': 1}):\n self.separator = separator\n self.oov_token = oov_token\n self.token_to_index = token_to_index\n self.index_to_token = {v: k for k, v in self.token_to_index.items()}\n\n def __len__(self):\n return len(self.token_to_index)\n \n def __str__(self):\n return f\"<Tokenizer(num_tokens={len(self)})>\"\n\n def fit_on_texts(self, texts):\n for text in texts:\n for token in text.split(self.separator):\n if token not in self.token_to_index:\n index = len(self)\n self.token_to_index[token] = index\n self.index_to_token[index] = token\n return self\n\n def texts_to_sequences(self, texts):\n sequences = []\n for text in texts:\n sequence = []\n for token in text.split(self.separator):\n sequence.append(self.token_to_index.get(\n token, self.token_to_index[self.oov_token]))\n sequences.append(sequence)\n return sequences\n \n def sequences_to_texts(self, sequences):\n texts = []\n for sequence in sequences:\n text = []\n for index in sequence:\n text.append(self.index_to_token.get(index, self.oov_token))\n texts.append(self.separator.join([token for token in text]))\n return texts\n\n def save(self, fp):\n with open(fp, 'w') as fp:\n contents = {\n 'separator': self.separator,\n 'oov_token': self.oov_token,\n 'token_to_index': self.token_to_index\n }\n json.dump(contents, fp, indent=4, sort_keys=False)\n\n @classmethod\n def load(cls, fp):\n with open(fp, 'r') as fp:\n kwargs = json.load(fp=fp)\n return cls(**kwargs)",
"_____no_output_____"
],
[
"# Input vectorizer\nX_tokenizer = Tokenizer(separator=SEPARATOR)\nX_tokenizer.fit_on_texts(texts=X_train)\nvocab_size = len(X_tokenizer)\nprint (X_tokenizer)",
"<Tokenizer(num_tokens=35635)>\n"
],
[
"# Convert text to sequence of tokens\noriginal_text = X_train[0]\nX_train = np.array(X_tokenizer.texts_to_sequences(X_train))\nX_val = np.array(X_tokenizer.texts_to_sequences(X_val))\nX_test = np.array(X_tokenizer.texts_to_sequences(X_test))\npreprocessed_text = X_tokenizer.sequences_to_texts([X_train[0]])\nprint (f\"{original_text} \\n\\t→ {preprocessed_text} \\n\\t→ {X_train[0]}\")",
"pga overhauls system for ryder cup points \n\t→ ['pga overhauls system for ryder cup points'] \n\t→ [2, 3, 4, 5, 6, 7, 8]\n"
],
[
"# Save tokenizer\nX_tokenizer.save(fp='X_tokenizer.json')",
"_____no_output_____"
],
[
"# Load tokenizer\nX_tokenizer = Tokenizer.load(fp='X_tokenizer.json')\nprint (X_tokenizer)",
"<Tokenizer(num_tokens=35635)>\n"
]
],
[
[
"## Label Encoder",
"_____no_output_____"
]
],
[
[
"class LabelEncoder(object):\n def __init__(self, class_to_index={}):\n self.class_to_index = class_to_index\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n\n def __len__(self):\n return len(self.class_to_index)\n\n def __str__(self):\n return f\"<LabelEncoder(num_classes={len(self)})>\"\n\n def fit(self, y_train):\n for i, class_ in enumerate(np.unique(y_train)):\n self.class_to_index[class_] = i\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n return self\n \n def transform(self, y):\n return np.array([self.class_to_index[class_] for class_ in y])\n\n def decode(self, index):\n return self.index_to_class.get(index, None)\n \n def save(self, fp):\n with open(fp, 'w') as fp:\n contents = {\n 'class_to_index': self.class_to_index\n }\n json.dump(contents, fp, indent=4, sort_keys=False)\n\n @classmethod\n def load(cls, fp):\n with open(fp, 'r') as fp:\n kwargs = json.load(fp=fp)\n return cls(**kwargs)",
"_____no_output_____"
],
[
"# Output vectorizer\ny_tokenizer = LabelEncoder()",
"_____no_output_____"
],
[
"# Fit on train data\ny_tokenizer = y_tokenizer.fit(y_train)\nprint (y_tokenizer)\nclasses = y_tokenizer.classes\nprint (f\"classes: {classes}\")",
"<LabelEncoder(num_classes=4)>\nclasses: ['Business', 'Sci/Tech', 'Sports', 'World']\n"
],
[
"# Convert labels to tokens\nclass_ = y_train[0]\ny_train = y_tokenizer.transform(y_train)\ny_val = y_tokenizer.transform(y_val)\ny_test = y_tokenizer.transform(y_test)\nprint (f\"{class_} → {y_train[0]}\")",
"Sports → 2\n"
],
[
"# Class weights\ncounts = np.bincount(y_train)\nclass_weights = {i: 1.0/count for i, count in enumerate(counts)}\nprint (f\"class counts: {counts},\\nclass weights: {class_weights}\")",
"class counts: [21675 21675 21675 21675],\nclass weights: {0: 4.61361014994233e-05, 1: 4.61361014994233e-05, 2: 4.61361014994233e-05, 3: 4.61361014994233e-05}\n"
],
[
"# Save label encoder\ny_tokenizer.save(fp='y_tokenizer.json')",
"_____no_output_____"
],
[
"# Load label encoder\ny_tokenizer = LabelEncoder.load(fp='y_tokenizer.json')\nprint (y_tokenizer)",
"<LabelEncoder(num_classes=4)>\n"
]
],
[
[
"# datasets.py",
"_____no_output_____"
]
],
[
[
"import math\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader",
"_____no_output_____"
],
[
"BATCH_SIZE = 128\nFILTER_SIZES = [2, 3, 4]",
"_____no_output_____"
],
[
"# Set seed for reproducibility\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.cuda.manual_seed_all(SEED) # multi-GPU.\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True",
"_____no_output_____"
],
[
"USE_CUDA = True\nDEVICE = torch.device('cuda' if (torch.cuda.is_available() and USE_CUDA) else 'cpu')\nprint (DEVICE)",
"cuda\n"
]
],
[
[
"## Pad",
"_____no_output_____"
]
],
[
[
"def pad_sequences(X, max_seq_len):\n sequences = np.zeros((len(X), max_seq_len))\n for i, sequence in enumerate(X):\n sequences[i][:len(sequence)] = sequence\n return sequences ",
"_____no_output_____"
],
[
"# Pad sequences\ninputs = [[1,2,3], [1,2,3,4], [1,2]]\nmax_seq_len = max(len(x) for x in inputs)\npadded_inputs = pad_sequences(X=inputs, max_seq_len=max_seq_len)\nprint (padded_inputs.shape)\nprint (padded_inputs)",
"(3, 4)\n[[1. 2. 3. 0.]\n [1. 2. 3. 4.]\n [1. 2. 0. 0.]]\n"
]
],
[
[
"## Dataset",
"_____no_output_____"
]
],
[
[
"class TextDataset(Dataset):\n def __init__(self, X, y, batch_size, max_filter_size):\n self.X = X\n self.y = y\n self.batch_size = batch_size\n self.max_filter_size = max_filter_size\n\n def __len__(self):\n return len(self.y)\n\n def __str__(self):\n return f\"<Dataset(N={len(self)}, batch_size={self.batch_size}, num_batches={self.get_num_batches()})>\"\n\n def __getitem__(self, index):\n X = self.X[index]\n y = self.y[index]\n return X, y\n\n def get_num_batches(self):\n return math.ceil(len(self)/self.batch_size)\n\n def collate_fn(self, batch):\n \"\"\"Processing on a batch.\"\"\"\n # Get inputs\n X = np.array(batch)[:, 0]\n y = np.array(batch)[:, 1]\n\n # Pad inputs\n max_seq_len = max(self.max_filter_size, max([len(x) for x in X]))\n X = pad_sequences(X=X, max_seq_len=max_seq_len)\n\n return X, y\n\n def generate_batches(self, shuffle=False, drop_last=False):\n dataloader = DataLoader(dataset=self, batch_size=self.batch_size, \n collate_fn=self.collate_fn, shuffle=shuffle, \n drop_last=drop_last, pin_memory=True)\n for (X, y) in dataloader:\n X = torch.LongTensor(X.astype(np.int32))\n y = torch.LongTensor(y.astype(np.int32))\n yield X, y",
"_____no_output_____"
],
[
"# Create datasets\ntrain_set = TextDataset(X=X_train, y=y_train, batch_size=BATCH_SIZE, max_filter_size=max(FILTER_SIZES))\nval_set = TextDataset(X=X_val, y=y_val, batch_size=BATCH_SIZE, max_filter_size=max(FILTER_SIZES))\ntest_set = TextDataset(X=X_test, y=y_test, batch_size=BATCH_SIZE, max_filter_size=max(FILTER_SIZES))\nprint (train_set)\nprint (train_set[0])",
"<Dataset(N=86700, batch_size=128, num_batches=678)>\n([2, 3, 4, 5, 6, 7, 8], 2)\n"
],
[
"# Generate batch\nbatch_X, batch_y = next(iter(test_set.generate_batches()))\nprint (batch_X.shape)\nprint (batch_y.shape)",
"torch.Size([128, 13])\ntorch.Size([128])\n"
]
],
[
[
"# utils.py",
"_____no_output_____"
],
[
"## Embeddings",
"_____no_output_____"
]
],
[
[
"from io import BytesIO\nfrom urllib.request import urlopen\nfrom zipfile import ZipFile",
"_____no_output_____"
],
[
"EMBEDDING_DIM = 100",
"_____no_output_____"
],
[
"def load_glove_embeddings(embeddings_file):\n \"\"\"Load embeddings from a file.\"\"\"\n embeddings = {}\n with open(embeddings_file, \"r\") as fp:\n for index, line in enumerate(fp):\n values = line.split()\n word = values[0]\n embedding = np.asarray(values[1:], dtype='float32')\n embeddings[word] = embedding\n return embeddings",
"_____no_output_____"
],
[
"def make_embeddings_matrix(embeddings, token_to_index, embedding_dim):\n \"\"\"Create embeddings matrix to use in Embedding layer.\"\"\"\n embedding_matrix = np.zeros((len(token_to_index), embedding_dim))\n for word, i in token_to_index.items():\n embedding_vector = embeddings.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix",
"_____no_output_____"
],
[
"# Unzip the file (may take ~3-5 minutes)\nresp = urlopen('http://nlp.stanford.edu/data/glove.6B.zip')\nzipfile = ZipFile(BytesIO(resp.read()))\nzipfile.namelist()",
"_____no_output_____"
],
[
"# Write embeddings to file\nembeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)\nzipfile.extract(embeddings_file)\n!ls",
"glove.6B.100d.txt news.csv sample_data X_tokenizer.json y_tokenizer.json\n"
],
[
"# Create embeddings\nembeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)\nglove_embeddings = load_glove_embeddings(embeddings_file=embeddings_file)\nembedding_matrix = make_embeddings_matrix(\n embeddings=glove_embeddings, token_to_index=X_tokenizer.token_to_index, \n embedding_dim=EMBEDDING_DIM)\nprint (embedding_matrix.shape)",
"(35635, 100)\n"
]
],
[
[
"# model.py",
"_____no_output_____"
],
[
"## Model",
"_____no_output_____"
]
],
[
[
"import torch.nn.functional as F",
"_____no_output_____"
],
[
"NUM_FILTERS = 50\nHIDDEN_DIM = 128\nDROPOUT_P = 0.1",
"_____no_output_____"
],
[
"class TextCNN(nn.Module):\n def __init__(self, embedding_dim, vocab_size, num_filters, filter_sizes, \n hidden_dim, dropout_p, num_classes, pretrained_embeddings=None, \n freeze_embeddings=False, padding_idx=0):\n super(TextCNN, self).__init__()\n\n # Initialize embeddings\n if pretrained_embeddings is None:\n self.embeddings = nn.Embedding(\n embedding_dim=embedding_dim, num_embeddings=vocab_size, \n padding_idx=padding_idx)\n else:\n pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()\n self.embeddings = nn.Embedding(\n embedding_dim=embedding_dim, num_embeddings=vocab_size, \n padding_idx=padding_idx, _weight=pretrained_embeddings)\n \n # Freeze embeddings or not\n if freeze_embeddings:\n self.embeddings.weight.requires_grad = False\n \n # Conv weights\n self.filter_sizes = filter_sizes\n self.conv = nn.ModuleList(\n [nn.Conv1d(in_channels=embedding_dim, \n out_channels=num_filters, \n kernel_size=f) for f in filter_sizes])\n \n # FC weights\n self.dropout = nn.Dropout(dropout_p)\n self.fc1 = nn.Linear(num_filters*len(filter_sizes), hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, num_classes)\n\n def forward(self, x_in, channel_first=False):\n \n # Embed\n x_in = self.embeddings(x_in)\n if not channel_first:\n x_in = x_in.transpose(1, 2) # (N, channels, sequence length)\n \n # Conv + pool\n z = []\n conv_outputs = [] # for interpretability\n max_seq_len = x_in.shape[2]\n for i, f in enumerate(self.filter_sizes):\n # `SAME` padding\n padding_left = int((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2)\n padding_right = int(math.ceil((self.conv[i].stride[0]*(max_seq_len-1) - max_seq_len + self.filter_sizes[i])/2))\n\n # Conv + pool\n _z = self.conv[i](F.pad(x_in, (padding_left, padding_right)))\n conv_outputs.append(_z)\n _z = F.max_pool1d(_z, _z.size(2)).squeeze(2)\n z.append(_z)\n \n # Concat conv outputs\n z = torch.cat(z, 1)\n\n # FC layers\n z = self.fc1(z)\n z = self.dropout(z)\n logits = self.fc2(z)\n\n return conv_outputs, logits",
"_____no_output_____"
],
[
"# Initialize model\nmodel = TextCNN(embedding_dim=EMBEDDING_DIM,\n vocab_size=vocab_size,\n num_filters=NUM_FILTERS,\n filter_sizes=FILTER_SIZES,\n hidden_dim=HIDDEN_DIM,\n dropout_p=DROPOUT_P,\n num_classes=len(classes),\n pretrained_embeddings=embedding_matrix,\n freeze_embeddings=False).to(DEVICE)\nprint (model.named_parameters)",
"<bound method Module.named_parameters of TextCNN(\n (embeddings): Embedding(35635, 100, padding_idx=0)\n (conv): ModuleList(\n (0): Conv1d(100, 50, kernel_size=(2,), stride=(1,))\n (1): Conv1d(100, 50, kernel_size=(3,), stride=(1,))\n (2): Conv1d(100, 50, kernel_size=(4,), stride=(1,))\n )\n (dropout): Dropout(p=0.1, inplace=False)\n (fc1): Linear(in_features=150, out_features=128, bias=True)\n (fc2): Linear(in_features=128, out_features=4, bias=True)\n)>\n"
]
],
[
[
"# train.py",
"_____no_output_____"
],
[
"## Training",
"_____no_output_____"
]
],
[
[
"from pathlib import Path\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.tensorboard import SummaryWriter\n%load_ext tensorboard",
"The tensorboard extension is already loaded. To reload it, use:\n %reload_ext tensorboard\n"
],
[
"LEARNING_RATE = 1e-4\nPATIENCE = 3\nNUM_EPOCHS = 100",
"_____no_output_____"
],
[
"def train_step(model, device, dataset, optimizer):\n \"\"\"Train step.\"\"\"\n # Set model to train mode\n model.train()\n train_loss = 0.\n correct = 0\n\n # Iterate over train batches\n for i, (X, y) in enumerate(dataset.generate_batches()):\n\n # Set device\n X, y = X.to(device), y.to(device)\n\n # Reset gradients\n optimizer.zero_grad()\n\n # Forward pass\n _, logits = model(X)\n\n # Define loss\n loss = F.cross_entropy(logits, y)\n\n # Backward pass\n loss.backward()\n\n # Update weights\n optimizer.step()\n\n # Metrics\n y_pred = logits.max(dim=1)[1] \n correct += torch.eq(y_pred, y).sum().item()\n train_loss += (loss.item() - train_loss) / (i + 1)\n\n train_acc = 100. * correct / len(dataset)\n return train_loss, train_acc",
"_____no_output_____"
],
[
"def test_step(model, device, dataset):\n \"\"\"Validation or test step.\"\"\"\n # Set model to eval mode\n model.eval()\n loss = 0.\n correct = 0\n y_preds = []\n y_targets = []\n\n # Iterate over val batches\n with torch.no_grad():\n for i, (X, y) in enumerate(dataset.generate_batches()):\n\n # Set device\n X, y = X.to(device), y.to(device)\n\n # Forward pass\n _, logits = model(X)\n \n # Metrics\n loss += F.cross_entropy(logits, y, reduction='sum').item()\n y_pred = logits.max(dim=1)[1] \n correct += torch.eq(y_pred, y).sum().item()\n\n # Outputs\n y_preds.extend(y_pred.cpu().numpy())\n y_targets.extend(y.cpu().numpy())\n\n loss /= len(dataset)\n accuracy = 100. * correct / len(dataset)\n return y_preds, y_targets, loss, accuracy",
"_____no_output_____"
],
[
"def train(model, optimizer, scheduler, \n train_set, val_set, test_set, writer):\n # Epochs\n best_val_loss = np.inf\n for epoch in range(NUM_EPOCHS):\n # Steps\n train_loss, train_acc = train_step(model, DEVICE, train_set, optimizer)\n _, _, val_loss, val_acc = test_step(model, DEVICE, val_set)\n\n # Metrics\n print (f\"Epoch: {epoch} | train_loss: {train_loss:.2f}, train_acc: {train_acc:.1f}, val_loss: {val_loss:.2f}, val_acc: {val_acc:.1f}\")\n writer.add_scalar(tag='training loss', scalar_value=train_loss, global_step=epoch)\n writer.add_scalar(tag='training accuracy', scalar_value=train_acc, global_step=epoch)\n writer.add_scalar(tag='validation loss', scalar_value=val_loss, global_step=epoch)\n writer.add_scalar(tag='validation accuracy', scalar_value=val_acc, global_step=epoch)\n\n # Adjust learning rate\n scheduler.step(val_loss)\n\n # Early stopping\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n patience = PATIENCE # reset patience\n torch.save(model.state_dict(), MODEL_PATH)\n else:\n patience -= 1\n if not patience: # 0\n print (\"Stopping early!\")\n break",
"_____no_output_____"
],
[
"# Optimizer\noptimizer = Adam(model.parameters(), lr=LEARNING_RATE) \nscheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3)",
"_____no_output_____"
],
[
"# Path to save model\nMODEL_NAME = 'TextCNN'\nMODEL_PATH = Path(f'models/{MODEL_NAME}.h5')\nPath(MODEL_PATH.parent).mkdir(parents=True, exist_ok=True)",
"_____no_output_____"
],
[
"# TensorBoard writer\nlog_dir = f'tensorboard/{MODEL_NAME}'\n!rm -rf {log_dir} # remove if it already exists\nwriter = SummaryWriter(log_dir=log_dir)",
"_____no_output_____"
],
[
"# Training\ntrain(model, optimizer, scheduler, \n train_set, val_set, test_set, writer)",
"Epoch: 0 | train_loss: 0.68, train_acc: 78.2, val_loss: 0.49, val_acc: 82.7\nEpoch: 1 | train_loss: 0.44, train_acc: 84.6, val_loss: 0.44, val_acc: 84.6\nEpoch: 2 | train_loss: 0.40, train_acc: 86.3, val_loss: 0.42, val_acc: 85.5\nEpoch: 3 | train_loss: 0.36, train_acc: 87.4, val_loss: 0.40, val_acc: 86.1\nEpoch: 4 | train_loss: 0.34, train_acc: 88.4, val_loss: 0.39, val_acc: 86.4\nEpoch: 5 | train_loss: 0.31, train_acc: 89.2, val_loss: 0.39, val_acc: 86.6\nEpoch: 6 | train_loss: 0.29, train_acc: 90.0, val_loss: 0.38, val_acc: 86.7\nEpoch: 7 | train_loss: 0.27, train_acc: 90.8, val_loss: 0.38, val_acc: 86.8\nEpoch: 8 | train_loss: 0.25, train_acc: 91.6, val_loss: 0.38, val_acc: 86.9\nEpoch: 9 | train_loss: 0.23, train_acc: 92.3, val_loss: 0.38, val_acc: 86.9\nEpoch: 10 | train_loss: 0.21, train_acc: 93.1, val_loss: 0.39, val_acc: 86.8\nStopping early!\n"
],
[
"%tensorboard --logdir {log_dir}",
"_____no_output_____"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"import io\nimport itertools\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support",
"_____no_output_____"
],
[
"def plot_confusion_matrix(y_pred, y_target, classes, cmap=plt.cm.Blues):\n \"\"\"Plot a confusion matrix using ground truth and predictions.\"\"\"\n # Confusion matrix\n cm = confusion_matrix(y_target, y_pred)\n cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # Figure\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm, cmap=plt.cm.Blues)\n fig.colorbar(cax)\n\n # Axis\n plt.title(\"Confusion matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n ax.set_xticklabels([''] + classes)\n ax.set_yticklabels([''] + classes)\n ax.xaxis.set_label_position('bottom') \n ax.xaxis.tick_bottom()\n\n # Values\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, f\"{cm[i, j]:d} ({cm_norm[i, j]*100:.1f}%)\",\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # Display\n plt.show()",
"_____no_output_____"
],
[
"def get_performance(y_pred, y_target, classes):\n \"\"\"Per-class performance metrics. \"\"\"\n performance = {'overall': {}, 'class': {}}\n metrics = precision_recall_fscore_support(y_target, y_pred)\n\n # Overall performance\n performance['overall']['precision'] = np.mean(metrics[0])\n performance['overall']['recall'] = np.mean(metrics[1])\n performance['overall']['f1'] = np.mean(metrics[2])\n performance['overall']['num_samples'] = np.float64(np.sum(metrics[3]))\n\n # Per-class performance\n for i in range(len(classes)):\n performance['class'][classes[i]] = {\n \"precision\": metrics[0][i],\n \"recall\": metrics[1][i],\n \"f1\": metrics[2][i],\n \"num_samples\": np.float64(metrics[3][i])\n }\n\n return performance",
"_____no_output_____"
],
[
"# Test\ny_preds, y_targets, test_loss, test_acc = test_step(model, DEVICE, test_set)\nprint (f\"test_loss: {test_loss:.2f}, test_acc: {test_acc:.1f}\")",
"test_loss: 0.56, test_acc: 85.8\n"
],
[
"# Class performance\nperformance = get_performance(y_preds, y_targets, classes)\nprint (json.dumps(performance, indent=4))",
"{\n \"overall\": {\n \"precision\": 0.8588907674416577,\n \"recall\": 0.8583333333333333,\n \"f1\": 0.8584737440288595,\n \"num_samples\": 18000.0\n },\n \"class\": {\n \"Business\": {\n \"precision\": 0.8334845735027223,\n \"recall\": 0.8164444444444444,\n \"f1\": 0.8248765154916928,\n \"num_samples\": 4500.0\n },\n \"Sci/Tech\": {\n \"precision\": 0.8220540540540541,\n \"recall\": 0.8448888888888889,\n \"f1\": 0.8333150684931507,\n \"num_samples\": 4500.0\n },\n \"Sports\": {\n \"precision\": 0.9189374856881154,\n \"recall\": 0.8917777777777778,\n \"f1\": 0.9051539415811436,\n \"num_samples\": 4500.0\n },\n \"World\": {\n \"precision\": 0.8610869565217392,\n \"recall\": 0.8802222222222222,\n \"f1\": 0.8705494505494505,\n \"num_samples\": 4500.0\n }\n }\n}\n"
],
[
"# Confusion matrix\nplt.rcParams[\"figure.figsize\"] = (7,7)\nplot_confusion_matrix(y_preds, y_targets, classes)\nprint (classification_report(y_targets, y_preds))",
"_____no_output_____"
]
],
[
[
"# inference.py",
"_____no_output_____"
],
[
"## Load model",
"_____no_output_____"
]
],
[
[
"# Load model\nmodel = TextCNN(embedding_dim=EMBEDDING_DIM,\n vocab_size=vocab_size,\n num_filters=NUM_FILTERS,\n filter_sizes=FILTER_SIZES,\n hidden_dim=HIDDEN_DIM,\n dropout_p=DROPOUT_P,\n num_classes=len(classes),\n pretrained_embeddings=embedding_matrix,\n freeze_embeddings=False).to(DEVICE)\nmodel.load_state_dict(torch.load(MODEL_PATH))\nmodel.eval()",
"_____no_output_____"
]
],
[
[
"## Inference",
"_____no_output_____"
]
],
[
[
"import collections",
"_____no_output_____"
],
[
"def get_probability_distribution(y_prob, classes):\n results = {}\n for i, class_ in enumerate(classes):\n results[class_] = np.float64(y_prob[i])\n sorted_results = {k: v for k, v in sorted(\n results.items(), key=lambda item: item[1], reverse=True)}\n return sorted_results",
"_____no_output_____"
],
[
"def get_top_n_grams(tokens, conv_outputs, filter_sizes):\n # Process conv outputs for each unique filter size\n n_grams = {}\n for i, filter_size in enumerate(filter_sizes):\n \n # Identify most important n-gram (excluding last token)\n popular_indices = collections.Counter([np.argmax(conv_output) \\\n for conv_output in conv_outputs[filter_size]])\n \n # Get corresponding text\n start = popular_indices.most_common(1)[-1][0]\n n_gram = \" \".join([token for token in tokens[start:start+filter_size]])\n n_grams[filter_size] = n_gram\n\n return n_grams",
"_____no_output_____"
],
[
"# Inputs\ntexts = [\"The Wimbledon tennis tournament starts next week!\",\n \"The President signed in the new law.\"]\ntexts = preprocess_texts(texts, lower=LOWER, filters=FILTERS)\nX_infer = np.array(X_tokenizer.texts_to_sequences(texts))\nprint (f\"{texts[0]} \\n\\t→ {X_tokenizer.sequences_to_texts(X_infer)[0]} \\n\\t→ {X_infer[0]}\")\ny_filler = np.array([0]*len(texts))",
"the wimbledon tennis tournament starts next week \n\t→ the wimbledon tennis tournament starts next week \n\t→ [ 39 20635 588 622 785 551 576]\n"
],
[
"# Dataset\ninfer_set = TextDataset(X=X_infer, y=y_filler, batch_size=BATCH_SIZE, \n max_filter_size=max(FILTER_SIZES))",
"_____no_output_____"
],
[
"# Iterate over infer batches\nconv_outputs = collections.defaultdict(list)\ny_probs = []\nwith torch.no_grad():\n for i, (X, y) in enumerate(infer_set.generate_batches()):\n \n # Set device\n X, y = X.to(DEVICE), y.to(DEVICE)\n\n # Forward pass\n conv_outputs_, logits = model(X)\n y_prob = F.softmax(logits, dim=1)\n\n # Save probabilities\n y_probs.extend(y_prob.cpu().numpy())\n for i, filter_size in enumerate(FILTER_SIZES):\n conv_outputs[filter_size].extend(conv_outputs_[i].cpu().numpy())",
"_____no_output_____"
],
[
"# Results\nresults = []\nfor index in range(len(X_infer)):\n results.append({\n 'raw_input': texts[index],\n 'preprocessed_input': X_tokenizer.sequences_to_texts([X_infer[index]])[0],\n 'probabilities': get_probability_distribution(y_prob[index], y_tokenizer.classes),\n 'top_n_grams': get_top_n_grams(\n tokens=preprocessed_input.split(' '), \n conv_outputs={k:v[index] for k,v in conv_outputs.items()}, \n filter_sizes=FILTER_SIZES)})\nprint (json.dumps(results, indent=4))",
"[\n {\n \"raw_input\": \"the wimbledon tennis tournament starts next week\",\n \"preprocessed_input\": \"the wimbledon tennis tournament starts next week\",\n \"probabilities\": {\n \"Sports\": 0.9998615980148315,\n \"World\": 0.0001376205327687785,\n \"Business\": 7.324182433876558e-07,\n \"Sci/Tech\": 7.507998844857866e-08\n },\n \"top_n_grams\": {\n \"2\": \"tournament starts\",\n \"3\": \"the wimbledon tennis\",\n \"4\": \"tennis tournament starts next\"\n }\n },\n {\n \"raw_input\": \"the president signed in the new law\",\n \"preprocessed_input\": \"the president signed in the new law\",\n \"probabilities\": {\n \"World\": 0.6943650245666504,\n \"Sports\": 0.14958152174949646,\n \"Business\": 0.1257830113172531,\n \"Sci/Tech\": 0.03027038462460041\n },\n \"top_n_grams\": {\n \"2\": \"law\",\n \"3\": \"the president signed\",\n \"4\": \"the president signed in\"\n }\n }\n]\n"
]
],
[
[
"Use inferences to collect information how the model performs on your real world data and use it to improve it over time. \n- Use a probability threshold for the top class (ex. If the predicted class is less than 75%, send the inference for review).\n- Combine the above with Use probability thresholds for each class (ex. if the predicted class is `Sports` at 85% but that class's precision/recall is low, then send it for review but maybe you don't do this when the predicted class is `Sports` but above 90%.\n- If the preprocessed sentence has <UNK> tokens, send the inference for further review.\n- When latency is not an issue, use the n-grams to validate the prediction.",
"_____no_output_____"
],
[
"Check out the `API` lesson to see how all of this comes together to create an ML service.",
"_____no_output_____"
],
[
"---\nShare and discover ML projects at <a href=\"https://madewithml.com/\">Made With ML</a>.\n\n<div align=\"left\">\n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://github.com/madewithml/lessons\"><img src=\"https://img.shields.io/github/stars/madewithml/lessons.svg?style=social&label=Star\"></a> \n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://www.linkedin.com/company/madewithml\"><img src=\"https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social\"></a> \n<a class=\"ai-header-badge\" target=\"_blank\" href=\"https://twitter.com/madewithml\"><img src=\"https://img.shields.io/twitter/follow/madewithml.svg?label=Follow&style=social\"></a>\n</div>\n ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d0ebea851e51b132bdc8c4fc33f781918ee33876 | 2,371 | ipynb | Jupyter Notebook | Breaking the records.ipynb | ayushsubedi/problem_solving | 3d9154545ff72095d19ff6a8c132a989380c2a0a | [
"MIT"
] | null | null | null | Breaking the records.ipynb | ayushsubedi/problem_solving | 3d9154545ff72095d19ff6a8c132a989380c2a0a | [
"MIT"
] | null | null | null | Breaking the records.ipynb | ayushsubedi/problem_solving | 3d9154545ff72095d19ff6a8c132a989380c2a0a | [
"MIT"
] | null | null | null | 20.982301 | 85 | 0.470687 | [
[
[
"# https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem",
"_____no_output_____"
]
],
[
[
"import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the breakingRecords function below.\ndef breakingRecords(scores):\n min_ = max_ = scores[0]\n min_change = max_change = 0\n for score in scores:\n if (score<min_):\n min_ = score\n min_change += 1\n elif (score>max_):\n max_ = score\n max_change +=1\n return [max_change, min_change]\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n scores = list(map(int, input().rstrip().split()))\n\n result = breakingRecords(scores)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()",
"_____no_output_____"
],
[
"breakingRecords([10, 5, 20, 20, 4, 5, 2, 25, 1])",
"_____no_output_____"
],
[
"breakingRecords([10, 5, 20, 20, 4, 5, 2, 25, 1])",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ebeed29ec105011e1e050934b8251db2d3e549 | 2,648 | ipynb | Jupyter Notebook | notebooks/morphological_operations.ipynb | arp95/til_biomarker_ovarian_cancer | b4e9f8126a6468d547fe1935fc4a224b36703ebe | [
"MIT"
] | null | null | null | notebooks/morphological_operations.ipynb | arp95/til_biomarker_ovarian_cancer | b4e9f8126a6468d547fe1935fc4a224b36703ebe | [
"MIT"
] | null | null | null | notebooks/morphological_operations.ipynb | arp95/til_biomarker_ovarian_cancer | b4e9f8126a6468d547fe1935fc4a224b36703ebe | [
"MIT"
] | null | null | null | 23.22807 | 90 | 0.530589 | [
[
[
"# header files\nimport cv2\nimport numpy as np",
"_____no_output_____"
],
[
"# read image and get binary image\nimage = cv2.imread(\"../code/TCGA-23-1123_epistroma_mask_15000_6000.png\", 0)\nimage = cv2.GaussianBlur(image, (3, 3), 0)\nimage = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY)[1]\nimage_inv = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]",
"_____no_output_____"
],
[
"# filter using contour area and remove small noise\ncnts = cv2.findContours(image_inv, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncnts = cnts[0] if len(cnts) == 2 else cnts[1]\nfor c in cnts:\n area = cv2.contourArea(c)\n if area < 5500:\n cv2.drawContours(image_inv, [c], -1, (0, 0, 0), -1)",
"_____no_output_____"
],
[
"# filter using contour area and remove small noise\noutput_mask = 255 - image_inv\ncnts = cv2.findContours(output_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncnts = cnts[0] if len(cnts) == 2 else cnts[1]\nfor c in cnts:\n area = cv2.contourArea(c)\n if area < 5500:\n cv2.drawContours(output_mask, [c], -1, (0, 0, 0), -1)",
"_____no_output_____"
],
[
"cv2.imwrite(\"../code/sample_4.png\", output_mask)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ebfbf99bd2f242412c33fb92324d3d4d93590d | 182,266 | ipynb | Jupyter Notebook | d2l-en/tensorflow/chapter_multilayer-perceptrons/underfit-overfit.ipynb | gr8khan/d2lai | 7c10432f38c80e86978cd075d0024902b47842a0 | [
"MIT"
] | 1 | 2021-05-08T13:21:16.000Z | 2021-05-08T13:21:16.000Z | d2l-en/tensorflow/chapter_multilayer-perceptrons/underfit-overfit.ipynb | gr8khan/d2lai | 7c10432f38c80e86978cd075d0024902b47842a0 | [
"MIT"
] | null | null | null | d2l-en/tensorflow/chapter_multilayer-perceptrons/underfit-overfit.ipynb | gr8khan/d2lai | 7c10432f38c80e86978cd075d0024902b47842a0 | [
"MIT"
] | null | null | null | 43.656527 | 493 | 0.49318 | [
[
[
"# Model Selection, Underfitting, and Overfitting\n:label:`sec_model_selection`\n\nAs machine learning scientists,\nour goal is to discover *patterns*.\nBut how can we be sure that we have\ntruly discovered a *general* pattern\nand not simply memorized our data?\nFor example, imagine that we wanted to hunt\nfor patterns among genetic markers\nlinking patients to their dementia status,\nwhere the labels are drawn from the set\n$\\{\\text{dementia}, \\text{mild cognitive impairment}, \\text{healthy}\\}$.\nBecause each person's genes identify them uniquely\n(ignoring identical siblings),\nit is possible to memorize the entire dataset.\n\nWe do not want our model to say\n*\"That's Bob! I remember him! He has dementia!\"*\nThe reason why is simple.\nWhen we deploy the model in the future,\nwe will encounter patients\nthat the model has never seen before.\nOur predictions will only be useful\nif our model has truly discovered a *general* pattern.\n\nTo recapitulate more formally,\nour goal is to discover patterns\nthat capture regularities in the underlying population\nfrom which our training set was drawn.\nIf we are successful in this endeavor,\nthen we could successfully assess risk\neven for individuals that we have never encountered before.\nThis problem---how to discover patterns that *generalize*---is\nthe fundamental problem of machine learning.\n\nThe danger is that when we train models,\nwe access just a small sample of data.\nThe largest public image datasets contain\nroughly one million images.\nMore often, we must learn from only thousands\nor tens of thousands of data examples.\nIn a large hospital system, we might access\nhundreds of thousands of medical records.\nWhen working with finite samples, we run the risk\nthat we might discover apparent associations\nthat turn out not to hold up when we collect more data.\n\nThe phenomenon of fitting our training data\nmore closely than we fit the underlying distribution is called *overfitting*, and the techniques used to combat overfitting are called *regularization*.\nIn the previous sections, you might have observed\nthis effect while experimenting with the Fashion-MNIST dataset.\nIf you altered the model structure or the hyperparameters during the experiment, you might have noticed that with enough neurons, layers, and training epochs, the model can eventually reach perfect accuracy on the training set, even as the accuracy on test data deteriorates.\n\n\n## Training Error and Generalization Error\n\nIn order to discuss this phenomenon more formally,\nwe need to differentiate between training error and generalization error.\nThe *training error* is the error of our model\nas calculated on the training dataset,\nwhile *generalization error* is the expectation of our model's error\nwere we to apply it to an infinite stream of additional data examples\ndrawn from the same underlying data distribution as our original sample.\n\nProblematically, we can never calculate the generalization error exactly.\nThat is because the stream of infinite data is an imaginary object.\nIn practice, we must *estimate* the generalization error\nby applying our model to an independent test set\nconstituted of a random selection of data examples\nthat were withheld from our training set.\n\nThe following three thought experiments\nwill help illustrate this situation better.\nConsider a college student trying to prepare for his final exam.\nA diligent student will strive to practice well\nand test his abilities using exams from previous years.\nNonetheless, doing well on past exams is no guarantee\nthat he will excel when it matters.\nFor instance, the student might try to prepare\nby rote learning the answers to the exam questions.\nThis requires the student to memorize many things.\nShe might even remember the answers for past exams perfectly.\nAnother student might prepare by trying to understand\nthe reasons for giving certain answers.\nIn most cases, the latter student will do much better.\n\nLikewise, consider a model that simply uses a lookup table to answer questions. If the set of allowable inputs is discrete and reasonably small, then perhaps after viewing *many* training examples, this approach would perform well. Still this model has no ability to do better than random guessing when faced with examples that it has never seen before.\nIn reality the input spaces are far too large to memorize the answers corresponding to every conceivable input. For example, consider the black and white $28\\times28$ images. If each pixel can take one among $256$ grayscale values, then there are $256^{784}$ possible images. That means that there are far more low-resolution grayscale thumbnail-sized images than there are atoms in the universe. Even if we could encounter such data, we could never afford to store the lookup table.\n\nLast, consider the problem of trying\nto classify the outcomes of coin tosses (class 0: heads, class 1: tails)\nbased on some contextual features that might be available.\nSuppose that the coin is fair.\nNo matter what algorithm we come up with,\nthe generalization error will always be $\\frac{1}{2}$.\nHowever, for most algorithms,\nwe should expect our training error to be considerably lower,\ndepending on the luck of the draw,\neven if we did not have any features!\nConsider the dataset {0, 1, 1, 1, 0, 1}.\nOur feature-less algorithm would have to fall back on always predicting\nthe *majority class*, which appears from our limited sample to be *1*.\nIn this case, the model that always predicts class 1\nwill incur an error of $\\frac{1}{3}$,\nconsiderably better than our generalization error.\nAs we increase the amount of data,\nthe probability that the fraction of heads\nwill deviate significantly from $\\frac{1}{2}$ diminishes,\nand our training error would come to match the generalization error.\n\n### Statistical Learning Theory\n\nSince generalization is the fundamental problem in machine learning,\nyou might not be surprised to learn\nthat many mathematicians and theorists have dedicated their lives\nto developing formal theories to describe this phenomenon.\nIn their [eponymous theorem](https://en.wikipedia.org/wiki/Glivenko%E2%80%93Cantelli_theorem), Glivenko and Cantelli\nderived the rate at which the training error\nconverges to the generalization error.\nIn a series of seminal papers, [Vapnik and Chervonenkis](https://en.wikipedia.org/wiki/Vapnik%E2%80%93Chervonenkis_theory)\nextended this theory to more general classes of functions.\nThis work laid the foundations of statistical learning theory.\n\n\nIn the standard supervised learning setting, which we have addressed up until now and will stick with throughout most of this book,\nwe assume that both the training data and the test data\nare drawn *independently* from *identical* distributions.\nThis is commonly called the *i.i.d. assumption*,\nwhich means that the process that samples our data has no memory.\nIn other words,\nthe second example drawn and the third drawn\nare no more correlated than the second and the two-millionth sample drawn.\n\nBeing a good machine learning scientist requires thinking critically,\nand already you should be poking holes in this assumption,\ncoming up with common cases where the assumption fails.\nWhat if we train a mortality risk predictor\non data collected from patients at UCSF Medical Center,\nand apply it on patients at Massachusetts General Hospital?\nThese distributions are simply not identical.\nMoreover, draws might be correlated in time.\nWhat if we are classifying the topics of Tweets?\nThe news cycle would create temporal dependencies\nin the topics being discussed, violating any assumptions of independence.\n\nSometimes we can get away with minor violations of the i.i.d. assumption\nand our models will continue to work remarkably well.\nAfter all, nearly every real-world application\ninvolves at least some minor violation of the i.i.d. assumption,\nand yet we have many useful tools for\nvarious applications such as\nface recognition,\nspeech recognition, and language translation.\n\nOther violations are sure to cause trouble.\nImagine, for example, if we try to train\na face recognition system by training it\nexclusively on university students\nand then want to deploy it as a tool\nfor monitoring geriatrics in a nursing home population.\nThis is unlikely to work well since college students\ntend to look considerably different from the elderly.\n\nIn subsequent chapters, we will discuss problems\narising from violations of the i.i.d. assumption.\nFor now, even taking the i.i.d. assumption for granted,\nunderstanding generalization is a formidable problem.\nMoreover, elucidating the precise theoretical foundations\nthat might explain why deep neural networks generalize as well as they do\ncontinues to vex the greatest minds in learning theory.\n\nWhen we train our models, we attempt to search for a function\nthat fits the training data as well as possible.\nIf the function is so flexible that it can catch on to spurious patterns\njust as easily as to true associations,\nthen it might perform *too well* without producing a model\nthat generalizes well to unseen data.\nThis is precisely what we want to avoid or at least control.\nMany of the techniques in deep learning are heuristics and tricks\naimed at guarding against overfitting.\n\n### Model Complexity\n\nWhen we have simple models and abundant data,\nwe expect the generalization error to resemble the training error.\nWhen we work with more complex models and fewer examples,\nwe expect the training error to go down but the generalization gap to grow.\nWhat precisely constitutes model complexity is a complex matter.\nMany factors govern whether a model will generalize well.\nFor example a model with more parameters might be considered more complex.\nA model whose parameters can take a wider range of values\nmight be more complex.\nOften with neural networks, we think of a model\nthat takes more training iterations as more complex,\nand one subject to *early stopping* (fewer training iterations) as less complex.\n\nIt can be difficult to compare the complexity among members\nof substantially different model classes\n(say, decision trees vs. neural networks).\nFor now, a simple rule of thumb is quite useful:\na model that can readily explain arbitrary facts\nis what statisticians view as complex,\nwhereas one that has only a limited expressive power\nbut still manages to explain the data well\nis probably closer to the truth.\nIn philosophy, this is closely related to Popper's\ncriterion of falsifiability\nof a scientific theory: a theory is good if it fits data\nand if there are specific tests that can be used to disprove it.\nThis is important since all statistical estimation is\n*post hoc*,\ni.e., we estimate after we observe the facts,\nhence vulnerable to the associated fallacy.\nFor now, we will put the philosophy aside and stick to more tangible issues.\n\nIn this section, to give you some intuition,\nwe will focus on a few factors that tend\nto influence the generalizability of a model class:\n\n1. The number of tunable parameters. When the number of tunable parameters, sometimes called the *degrees of freedom*, is large, models tend to be more susceptible to overfitting.\n1. The values taken by the parameters. When weights can take a wider range of values, models can be more susceptible to overfitting.\n1. The number of training examples. It is trivially easy to overfit a dataset containing only one or two examples even if your model is simple. But overfitting a dataset with millions of examples requires an extremely flexible model.\n\n## Model Selection\n\nIn machine learning, we usually select our final model\nafter evaluating several candidate models.\nThis process is called *model selection*.\nSometimes the models subject to comparison\nare fundamentally different in nature\n(say, decision trees vs. linear models).\nAt other times, we are comparing\nmembers of the same class of models\nthat have been trained with different hyperparameter settings.\n\nWith MLPs, for example,\nwe may wish to compare models with\ndifferent numbers of hidden layers,\ndifferent numbers of hidden units,\nand various choices of the activation functions\napplied to each hidden layer.\nIn order to determine the best among our candidate models,\nwe will typically employ a validation dataset.\n\n\n### Validation Dataset\n\nIn principle we should not touch our test set\nuntil after we have chosen all our hyperparameters.\nWere we to use the test data in the model selection process,\nthere is a risk that we might overfit the test data.\nThen we would be in serious trouble.\nIf we overfit our training data,\nthere is always the evaluation on test data to keep us honest.\nBut if we overfit the test data, how would we ever know?\n\n\nThus, we should never rely on the test data for model selection.\nAnd yet we cannot rely solely on the training data\nfor model selection either because\nwe cannot estimate the generalization error\non the very data that we use to train the model.\n\n\nIn practical applications, the picture gets muddier.\nWhile ideally we would only touch the test data once,\nto assess the very best model or to compare\na small number of models to each other,\nreal-world test data is seldom discarded after just one use.\nWe can seldom afford a new test set for each round of experiments.\n\nThe common practice to address this problem\nis to split our data three ways,\nincorporating a *validation dataset* (or *validation set*)\nin addition to the training and test datasets.\nThe result is a murky practice where the boundaries\nbetween validation and test data are worryingly ambiguous.\nUnless explicitly stated otherwise, in the experiments in this book\nwe are really working with what should rightly be called\ntraining data and validation data, with no true test sets.\nTherefore, the accuracy reported in each experiment of the book is really the validation accuracy and not a true test set accuracy.\n\n### $K$-Fold Cross-Validation\n\nWhen training data is scarce,\nwe might not even be able to afford to hold out\nenough data to constitute a proper validation set.\nOne popular solution to this problem is to employ\n$K$*-fold cross-validation*.\nHere, the original training data is split into $K$ non-overlapping subsets.\nThen model training and validation are executed $K$ times,\neach time training on $K-1$ subsets and validating\non a different subset (the one not used for training in that round).\nFinally, the training and validation errors are estimated\nby averaging over the results from the $K$ experiments.\n\n## Underfitting or Overfitting?\n\nWhen we compare the training and validation errors,\nwe want to be mindful of two common situations.\nFirst, we want to watch out for cases\nwhen our training error and validation error are both substantial\nbut there is a little gap between them.\nIf the model is unable to reduce the training error,\nthat could mean that our model is too simple\n(i.e., insufficiently expressive)\nto capture the pattern that we are trying to model.\nMoreover, since the *generalization gap*\nbetween our training and validation errors is small,\nwe have reason to believe that we could get away with a more complex model.\nThis phenomenon is known as *underfitting*.\n\nOn the other hand, as we discussed above,\nwe want to watch out for the cases\nwhen our training error is significantly lower\nthan our validation error, indicating severe *overfitting*.\nNote that overfitting is not always a bad thing.\nWith deep learning especially, it is well known\nthat the best predictive models often perform\nfar better on training data than on holdout data.\nUltimately, we usually care more about the validation error\nthan about the gap between the training and validation errors.\n\nWhether we overfit or underfit can depend\nboth on the complexity of our model\nand the size of the available training datasets,\ntwo topics that we discuss below.\n\n### Model Complexity\n\nTo illustrate some classical intuition\nabout overfitting and model complexity,\nwe give an example using polynomials.\nGiven training data consisting of a single feature $x$\nand a corresponding real-valued label $y$,\nwe try to find the polynomial of degree $d$\n\n$$\\hat{y}= \\sum_{i=0}^d x^i w_i$$\n\nto estimate the labels $y$.\nThis is just a linear regression problem\nwhere our features are given by the powers of $x$,\nthe model's weights are given by $w_i$,\nand the bias is given by $w_0$ since $x^0 = 1$ for all $x$.\nSince this is just a linear regression problem,\nwe can use the squared error as our loss function.\n\n\nA higher-order polynomial function is more complex\nthan a lower-order polynomial function,\nsince the higher-order polynomial has more parameters\nand the model function's selection range is wider.\nFixing the training dataset,\nhigher-order polynomial functions should always\nachieve lower (at worst, equal) training error\nrelative to lower degree polynomials.\nIn fact, whenever the data examples each have a distinct value of $x$,\na polynomial function with degree equal to the number of data examples\ncan fit the training set perfectly.\nWe visualize the relationship between polynomial degree\nand underfitting vs. overfitting in :numref:`fig_capacity_vs_error`.\n\n\n:label:`fig_capacity_vs_error`\n\n### Dataset Size\n\nThe other big consideration to bear in mind is the dataset size.\nFixing our model, the fewer samples we have in the training dataset,\nthe more likely (and more severely) we are to encounter overfitting.\nAs we increase the amount of training data,\nthe generalization error typically decreases.\nMoreover, in general, more data never hurt.\nFor a fixed task and data distribution,\nthere is typically a relationship between model complexity and dataset size.\nGiven more data, we might profitably attempt to fit a more complex model.\nAbsent sufficient data, simpler models may be more difficult to beat.\nFor many tasks, deep learning only outperforms linear models\nwhen many thousands of training examples are available.\nIn part, the current success of deep learning\nowes to the current abundance of massive datasets\ndue to Internet companies, cheap storage, connected devices,\nand the broad digitization of the economy.\n\n## Polynomial Regression\n\nWe can now explore these concepts interactively\nby fitting polynomials to data.\n",
"_____no_output_____"
]
],
[
[
"from d2l import tensorflow as d2l\nimport tensorflow as tf\nimport numpy as np\nimport math",
"_____no_output_____"
]
],
[
[
"### Generating the Dataset\n\nFirst we need data. Given $x$, we will use the following cubic polynomial to generate the labels on training and test data:\n\n$$y = 5 + 1.2x - 3.4\\frac{x^2}{2!} + 5.6 \\frac{x^3}{3!} + \\epsilon \\text{ where }\n\\epsilon \\sim \\mathcal{N}(0, 0.1^2).$$\n\nThe noise term $\\epsilon$ obeys a normal distribution\nwith a mean of 0 and a standard deviation of 0.1.\nFor optimization, we typically want to avoid\nvery large values of gradients or losses.\nThis is why the *features*\nare rescaled from $x^i$ to $\\frac{x^i}{i!}$.\nIt allows us to avoid very large values for large exponents $i$.\nWe will synthesize 100 samples each for the training set and test set.\n",
"_____no_output_____"
]
],
[
[
"max_degree = 20 # Maximum degree of the polynomial\nn_train, n_test = 100, 100 # Training and test dataset sizes\ntrue_w = np.zeros(max_degree) # Allocate lots of empty space\ntrue_w[0:4] = np.array([5, 1.2, -3.4, 5.6])\n\nfeatures = np.random.normal(size=(n_train + n_test, 1))\nnp.random.shuffle(features)\npoly_features = np.power(features, np.arange(max_degree).reshape(1, -1))\nfor i in range(max_degree):\n poly_features[:, i] /= math.gamma(i + 1) # `gamma(n)` = (n-1)!\n# Shape of `labels`: (`n_train` + `n_test`,)\nlabels = np.dot(poly_features, true_w)\nlabels += np.random.normal(scale=0.1, size=labels.shape)",
"_____no_output_____"
]
],
[
[
"Again, monomials stored in `poly_features`\nare rescaled by the gamma function,\nwhere $\\Gamma(n)=(n-1)!$.\nTake a look at the first 2 samples from the generated dataset.\nThe value 1 is technically a feature,\nnamely the constant feature corresponding to the bias.\n",
"_____no_output_____"
]
],
[
[
"# Convert from NumPy ndarrays to tensors\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=\n tf.float32) for x in [true_w, features, poly_features, labels]]",
"_____no_output_____"
],
[
"features[:2], poly_features[:2, :], labels[:2]",
"_____no_output_____"
]
],
[
[
"### Training and Testing the Model\n\nLet us first implement a function to evaluate the loss on a given dataset.\n",
"_____no_output_____"
]
],
[
[
"def evaluate_loss(net, data_iter, loss): #@save\n \"\"\"Evaluate the loss of a model on the given dataset.\"\"\"\n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), tf.size(l).numpy())\n return metric[0] / metric[1]",
"_____no_output_____"
]
],
[
[
"Now define the training function.\n",
"_____no_output_____"
]
],
[
[
"def train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n # Switch off the bias since we already catered for it in the polynomial\n # features\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size,\n is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log',\n xlim=[1, num_epochs], ylim=[1e-3, 1e2],\n legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss),\n evaluate_loss(net, test_iter, loss)))\n print('weight:', net.get_weights()[0].T)",
"_____no_output_____"
]
],
[
[
"### Third-Order Polynomial Function Fitting (Normal)\n\nWe will begin by first using a third-order polynomial function, which is the same order as that of the data generation function.\nThe results show that this model's training and test losses can be both effectively reduced.\nThe learned model parameters are also close\nto the true values $w = [5, 1.2, -3.4, 5.6]$.\n",
"_____no_output_____"
]
],
[
[
"# Pick the first four dimensions, i.e., 1, x, x^2/2!, x^3/3! from the\n# polynomial features\ntrain(poly_features[:n_train, :4], poly_features[n_train:, :4],\n labels[:n_train], labels[n_train:])",
"weight: [[ 4.988523 1.2056429 -3.3774073 5.5973654]]\n"
]
],
[
[
"### Linear Function Fitting (Underfitting)\n\nLet us take another look at linear function fitting.\nAfter the decline in early epochs,\nit becomes difficult to further decrease\nthis model's training loss.\nAfter the last epoch iteration has been completed,\nthe training loss is still high.\nWhen used to fit nonlinear patterns\n(like the third-order polynomial function here)\nlinear models are liable to underfit.\n",
"_____no_output_____"
]
],
[
[
"# Pick the first two dimensions, i.e., 1, x, from the polynomial features\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])",
"weight: [[2.8604352 4.265014 ]]\n"
]
],
[
[
"### Higher-Order Polynomial Function Fitting (Overfitting)\n\nNow let us try to train the model\nusing a polynomial of too high degree.\nHere, there are insufficient data to learn that\nthe higher-degree coefficients should have values close to zero.\nAs a result, our overly-complex model\nis so susceptible that it is being influenced\nby noise in the training data.\nThough the training loss can be effectively reduced,\nthe test loss is still much higher.\nIt shows that\nthe complex model overfits the data.\n",
"_____no_output_____"
]
],
[
[
"# Pick all the dimensions from the polynomial features\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)",
"weight: [[ 5.022744 1.3021078 -3.496279 5.1624517 0.31825364 1.0755647\n -0.12992163 0.2757634 -0.24490005 0.33325827 0.10778137 -0.07051218\n -0.47507778 -0.05186964 0.28836033 -0.360715 0.22909148 -0.1936694\n 0.18136841 0.16792756]]\n"
]
],
[
[
"In the subsequent sections, we will continue\nto discuss overfitting problems\nand methods for dealing with them,\nsuch as weight decay and dropout.\n\n\n## Summary\n\n* Since the generalization error cannot be estimated based on the training error, simply minimizing the training error will not necessarily mean a reduction in the generalization error. Machine learning models need to be careful to safeguard against overfitting so as to minimize the generalization error.\n* A validation set can be used for model selection, provided that it is not used too liberally.\n* Underfitting means that a model is not able to reduce the training error. When training error is much lower than validation error, there is overfitting.\n* We should choose an appropriately complex model and avoid using insufficient training samples.\n\n\n## Exercises\n\n1. Can you solve the polynomial regression problem exactly? Hint: use linear algebra.\n1. Consider model selection for polynomials:\n 1. Plot the training loss vs. model complexity (degree of the polynomial). What do you observe? What degree of polynomial do you need to reduce the training loss to 0?\n 1. Plot the test loss in this case.\n 1. Generate the same plot as a function of the amount of data.\n1. What happens if you drop the normalization ($1/i!$) of the polynomial features $x^i$? Can you fix this in some other way?\n1. Can you ever expect to see zero generalization error?\n",
"_____no_output_____"
],
[
"[Discussions](https://discuss.d2l.ai/t/234)\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
d0ec0aab4e0f8558f43da0b38fdfeb65881646e2 | 696,659 | ipynb | Jupyter Notebook | DD-Net/jhmdb_1D_lite.ipynb | anhquannguyen21/Skeleton-Action-Recognition | 1bf6162b72189be8d24bce98d37859aa13178a9d | [
"MIT"
] | 1 | 2020-04-21T09:04:19.000Z | 2020-04-21T09:04:19.000Z | DD-Net/jhmdb_1D_lite.ipynb | anhquannguyen21/Skeleton-Action-Recognition | 1bf6162b72189be8d24bce98d37859aa13178a9d | [
"MIT"
] | null | null | null | DD-Net/jhmdb_1D_lite.ipynb | anhquannguyen21/Skeleton-Action-Recognition | 1bf6162b72189be8d24bce98d37859aa13178a9d | [
"MIT"
] | null | null | null | 92.284938 | 33,256 | 0.498816 | [
[
[
"import numpy as np\nimport math\nimport random\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport cv2\nimport glob\nimport gc\nfrom google.colab import files\nsrc = list(files.upload().values())[0]\nopen('utils.py','wb').write(src)\nfrom utils import *\nfrom tqdm import tqdm\nimport pickle\n\nfrom keras.optimizers import *\nfrom keras.models import Model\nfrom keras.layers import *\nfrom keras.layers.core import *\nfrom keras.layers.convolutional import *\nfrom keras import backend as K\nimport tensorflow as tf",
"_____no_output_____"
]
],
[
[
"# Initialize the setting",
"_____no_output_____"
]
],
[
[
"os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"",
"_____no_output_____"
],
[
"random.seed(123)\n\nclass Config():\n def __init__(self):\n self.frame_l = 32 # the length of frames\n self.joint_n = 15 # the number of joints\n self.joint_d = 2 # the dimension of joints\n self.clc_num = 21 # the number of class\n self.feat_d = 105\n self.filters = 16\n self.data_dir = '/mnt/nasbi/homes/fan/projects/action/skeleton/data/JHMDB/'\nC = Config()",
"_____no_output_____"
],
[
"def data_generator(T,C,le):\n X_0 = []\n X_1 = []\n Y = []\n for i in tqdm(range(len(T['pose']))): \n p = np.copy(T['pose'][i])\n p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)\n\n label = np.zeros(C.clc_num)\n label[le.transform(T['label'])[i]-1] = 1 \n\n M = get_CG(p,C)\n\n X_0.append(M)\n X_1.append(p)\n Y.append(label)\n\n X_0 = np.stack(X_0) \n X_1 = np.stack(X_1) \n Y = np.stack(Y)\n return X_0,X_1,Y",
"_____no_output_____"
]
],
[
[
"# Building the model",
"_____no_output_____"
]
],
[
[
"def poses_diff(x):\n H, W = x.get_shape()[1],x.get_shape()[2]\n x = tf.subtract(x[:,1:,...],x[:,:-1,...])\n x = tf.image.resize_nearest_neighbor(x,size=[H.value,W.value],align_corners=False) # should not alignment here\n return x\n\ndef pose_motion(P,frame_l):\n P_diff_slow = Lambda(lambda x: poses_diff(x))(P)\n P_diff_slow = Reshape((frame_l,-1))(P_diff_slow)\n P_fast = Lambda(lambda x: x[:,::2,...])(P)\n P_diff_fast = Lambda(lambda x: poses_diff(x))(P_fast)\n P_diff_fast = Reshape((int(frame_l/2),-1))(P_diff_fast)\n return P_diff_slow,P_diff_fast\n \ndef c1D(x,filters,kernel):\n x = Conv1D(filters, kernel_size=kernel,padding='same',use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.2)(x)\n return x\n\ndef block(x,filters):\n x = c1D(x,filters,3)\n x = c1D(x,filters,3)\n return x\n \ndef d1D(x,filters):\n x = Dense(filters,use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.2)(x)\n return x\n\ndef build_FM(frame_l=32,joint_n=22,joint_d=2,feat_d=231,filters=16): \n M = Input(shape=(frame_l,feat_d))\n P = Input(shape=(frame_l,joint_n,joint_d))\n \n diff_slow,diff_fast = pose_motion(P,frame_l)\n \n x = c1D(M,filters*2,1)\n x = SpatialDropout1D(0.1)(x)\n x = c1D(x,filters,3)\n x = SpatialDropout1D(0.1)(x)\n x = c1D(x,filters,1)\n x = MaxPooling1D(2)(x)\n x = SpatialDropout1D(0.1)(x)\n\n x_d_slow = c1D(diff_slow,filters*2,1)\n x_d_slow = SpatialDropout1D(0.1)(x_d_slow)\n x_d_slow = c1D(x_d_slow,filters,3)\n x_d_slow = SpatialDropout1D(0.1)(x_d_slow)\n x_d_slow = c1D(x_d_slow,filters,1)\n x_d_slow = MaxPool1D(2)(x_d_slow)\n x_d_slow = SpatialDropout1D(0.1)(x_d_slow)\n \n x_d_fast = c1D(diff_fast,filters*2,1)\n x_d_fast = SpatialDropout1D(0.1)(x_d_fast)\n x_d_fast = c1D(x_d_fast,filters,3) \n x_d_fast = SpatialDropout1D(0.1)(x_d_fast)\n x_d_fast = c1D(x_d_fast,filters,1) \n x_d_fast = SpatialDropout1D(0.1)(x_d_fast)\n \n x = concatenate([x,x_d_slow,x_d_fast])\n x = block(x,filters*2)\n x = MaxPool1D(2)(x)\n x = SpatialDropout1D(0.1)(x)\n \n x = block(x,filters*4)\n x = MaxPool1D(2)(x)\n x = SpatialDropout1D(0.1)(x)\n\n x = block(x,filters*8)\n x = SpatialDropout1D(0.1)(x)\n \n return Model(inputs=[M,P],outputs=x)\n\n\ndef build_DD_Net(C):\n M = Input(name='M', shape=(C.frame_l,C.feat_d)) \n P = Input(name='P', shape=(C.frame_l,C.joint_n,C.joint_d)) \n \n FM = build_FM(C.frame_l,C.joint_n,C.joint_d,C.feat_d,C.filters)\n \n x = FM([M,P])\n\n x = GlobalMaxPool1D()(x)\n \n x = d1D(x,128)\n x = Dropout(0.5)(x)\n x = d1D(x,128)\n x = Dropout(0.5)(x)\n x = Dense(C.clc_num, activation='softmax')(x)\n \n ######################Self-supervised part\n model = Model(inputs=[M,P],outputs=x)\n return model",
"_____no_output_____"
],
[
"DD_Net = build_DD_Net(C)\nDD_Net.summary()",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:148: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3733: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\nModel: \"model_2\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nM (InputLayer) (None, 32, 105) 0 \n__________________________________________________________________________________________________\nP (InputLayer) (None, 32, 15, 2) 0 \n__________________________________________________________________________________________________\nmodel_1 (Model) (None, 4, 128) 113056 M[0][0] \n P[0][0] \n__________________________________________________________________________________________________\nglobal_max_pooling1d_1 (GlobalM (None, 128) 0 model_1[1][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 128) 16384 global_max_pooling1d_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_16 (BatchNo (None, 128) 512 dense_1[0][0] \n__________________________________________________________________________________________________\nleaky_re_lu_16 (LeakyReLU) (None, 128) 0 batch_normalization_16[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 leaky_re_lu_16[0][0] \n__________________________________________________________________________________________________\ndense_2 (Dense) (None, 128) 16384 dropout_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_17 (BatchNo (None, 128) 512 dense_2[0][0] \n__________________________________________________________________________________________________\nleaky_re_lu_17 (LeakyReLU) (None, 128) 0 batch_normalization_17[0][0] \n__________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 128) 0 leaky_re_lu_17[0][0] \n__________________________________________________________________________________________________\ndense_3 (Dense) (None, 21) 2709 dropout_2[0][0] \n==================================================================================================\nTotal params: 149,557\nTrainable params: 147,765\nNon-trainable params: 1,792\n__________________________________________________________________________________________________\n"
]
],
[
[
"## Train and test on GT_split 1",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nimport pickle\ndrive.mount('/content/drive')\nDATA_PATH1 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ninfile = open(DATA_PATH1+'/GT_train_1.pkl','rb')\nTrain = pickle.load(infile)\nDATA_PATH2 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ntestfile= open(DATA_PATH2+'/GT_test_1.pkl','rb')\nTest = pickle.load(testfile)\n\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(Train['label'])\n\nX_0,X_1,Y = data_generator(Train,C,le)\nX_test_0,X_test_1,Y_test = data_generator(Test,C,le)",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"import keras\nlr = 1e-3\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=1e-5)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=600,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )\n\nlr = 1e-3\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=500,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3576: The name tf.log is deprecated. Please use tf.math.log instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3005: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\nTrain on 433 samples, validate on 176 samples\nEpoch 1/600\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\n433/433 [==============================] - 7s 15ms/step - loss: 3.6107 - acc: 0.0508 - val_loss: 3.0470 - val_acc: 0.0682\nEpoch 2/600\n433/433 [==============================] - 0s 580us/step - loss: 3.5442 - acc: 0.0670 - val_loss: 2.9615 - val_acc: 0.1193\nEpoch 3/600\n433/433 [==============================] - 0s 557us/step - loss: 3.4610 - acc: 0.0554 - val_loss: 2.8810 - val_acc: 0.1364\nEpoch 4/600\n433/433 [==============================] - 0s 523us/step - loss: 3.2437 - acc: 0.0785 - val_loss: 2.8199 - val_acc: 0.1591\nEpoch 5/600\n433/433 [==============================] - 0s 533us/step - loss: 3.1207 - acc: 0.0970 - val_loss: 2.7642 - val_acc: 0.2045\nEpoch 6/600\n433/433 [==============================] - 0s 529us/step - loss: 3.1706 - acc: 0.0901 - val_loss: 2.7113 - val_acc: 0.2216\nEpoch 7/600\n433/433 [==============================] - 0s 511us/step - loss: 3.0095 - acc: 0.1339 - val_loss: 2.6591 - val_acc: 0.2557\nEpoch 8/600\n433/433 [==============================] - 0s 533us/step - loss: 2.9579 - acc: 0.1432 - val_loss: 2.6107 - val_acc: 0.2955\nEpoch 9/600\n433/433 [==============================] - 0s 526us/step - loss: 2.8467 - acc: 0.1594 - val_loss: 2.5590 - val_acc: 0.3295\nEpoch 10/600\n433/433 [==============================] - 0s 539us/step - loss: 2.8566 - acc: 0.1709 - val_loss: 2.5067 - val_acc: 0.3523\nEpoch 11/600\n433/433 [==============================] - 0s 526us/step - loss: 2.7707 - acc: 0.1732 - val_loss: 2.4556 - val_acc: 0.3636\nEpoch 12/600\n433/433 [==============================] - 0s 520us/step - loss: 2.6770 - acc: 0.2102 - val_loss: 2.4051 - val_acc: 0.3807\nEpoch 13/600\n433/433 [==============================] - 0s 550us/step - loss: 2.7246 - acc: 0.1848 - val_loss: 2.3499 - val_acc: 0.4091\nEpoch 14/600\n433/433 [==============================] - 0s 516us/step - loss: 2.6284 - acc: 0.2286 - val_loss: 2.2947 - val_acc: 0.3977\nEpoch 15/600\n433/433 [==============================] - 0s 519us/step - loss: 2.5638 - acc: 0.2240 - val_loss: 2.2363 - val_acc: 0.4205\nEpoch 16/600\n433/433 [==============================] - 0s 527us/step - loss: 2.5502 - acc: 0.2425 - val_loss: 2.1818 - val_acc: 0.4432\nEpoch 17/600\n433/433 [==============================] - 0s 533us/step - loss: 2.5231 - acc: 0.2587 - val_loss: 2.1313 - val_acc: 0.4545\nEpoch 18/600\n433/433 [==============================] - 0s 525us/step - loss: 2.4474 - acc: 0.2910 - val_loss: 2.0837 - val_acc: 0.4432\nEpoch 19/600\n433/433 [==============================] - 0s 557us/step - loss: 2.3858 - acc: 0.2933 - val_loss: 2.0416 - val_acc: 0.4489\nEpoch 20/600\n433/433 [==============================] - 0s 521us/step - loss: 2.3784 - acc: 0.3095 - val_loss: 2.0019 - val_acc: 0.4489\nEpoch 21/600\n433/433 [==============================] - 0s 525us/step - loss: 2.3265 - acc: 0.3256 - val_loss: 1.9626 - val_acc: 0.4545\nEpoch 22/600\n433/433 [==============================] - 0s 587us/step - loss: 2.3035 - acc: 0.2910 - val_loss: 1.9254 - val_acc: 0.4489\nEpoch 23/600\n433/433 [==============================] - 0s 553us/step - loss: 2.1987 - acc: 0.3464 - val_loss: 1.8932 - val_acc: 0.4489\nEpoch 24/600\n433/433 [==============================] - 0s 522us/step - loss: 2.1648 - acc: 0.3256 - val_loss: 1.8643 - val_acc: 0.4545\nEpoch 25/600\n433/433 [==============================] - 0s 516us/step - loss: 2.1232 - acc: 0.3695 - val_loss: 1.8372 - val_acc: 0.4432\nEpoch 26/600\n433/433 [==============================] - 0s 522us/step - loss: 2.0653 - acc: 0.3626 - val_loss: 1.8140 - val_acc: 0.4261\nEpoch 27/600\n433/433 [==============================] - 0s 532us/step - loss: 2.1493 - acc: 0.3441 - val_loss: 1.7906 - val_acc: 0.4261\nEpoch 28/600\n433/433 [==============================] - 0s 551us/step - loss: 2.0276 - acc: 0.3718 - val_loss: 1.7675 - val_acc: 0.4318\nEpoch 29/600\n433/433 [==============================] - 0s 530us/step - loss: 2.0896 - acc: 0.3903 - val_loss: 1.7463 - val_acc: 0.4261\nEpoch 30/600\n433/433 [==============================] - 0s 556us/step - loss: 2.0460 - acc: 0.3441 - val_loss: 1.7273 - val_acc: 0.4489\nEpoch 31/600\n433/433 [==============================] - 0s 556us/step - loss: 1.9460 - acc: 0.4111 - val_loss: 1.7097 - val_acc: 0.4716\nEpoch 32/600\n433/433 [==============================] - 0s 532us/step - loss: 1.8639 - acc: 0.4457 - val_loss: 1.6897 - val_acc: 0.4773\nEpoch 33/600\n433/433 [==============================] - 0s 544us/step - loss: 1.9091 - acc: 0.4365 - val_loss: 1.6703 - val_acc: 0.4716\nEpoch 34/600\n433/433 [==============================] - 0s 538us/step - loss: 1.8648 - acc: 0.4273 - val_loss: 1.6493 - val_acc: 0.4773\nEpoch 35/600\n433/433 [==============================] - 0s 548us/step - loss: 1.8026 - acc: 0.4758 - val_loss: 1.6306 - val_acc: 0.4830\nEpoch 36/600\n433/433 [==============================] - 0s 576us/step - loss: 1.7806 - acc: 0.4457 - val_loss: 1.6107 - val_acc: 0.4886\nEpoch 37/600\n433/433 [==============================] - 0s 559us/step - loss: 1.7956 - acc: 0.4642 - val_loss: 1.5902 - val_acc: 0.4886\nEpoch 38/600\n433/433 [==============================] - 0s 546us/step - loss: 1.7381 - acc: 0.4388 - val_loss: 1.5687 - val_acc: 0.4886\nEpoch 39/600\n433/433 [==============================] - 0s 531us/step - loss: 1.6942 - acc: 0.4711 - val_loss: 1.5493 - val_acc: 0.4886\nEpoch 40/600\n433/433 [==============================] - 0s 539us/step - loss: 1.5948 - acc: 0.4919 - val_loss: 1.5327 - val_acc: 0.4943\nEpoch 41/600\n433/433 [==============================] - 0s 581us/step - loss: 1.6537 - acc: 0.5035 - val_loss: 1.5185 - val_acc: 0.4943\nEpoch 42/600\n433/433 [==============================] - 0s 537us/step - loss: 1.5943 - acc: 0.5058 - val_loss: 1.5081 - val_acc: 0.5057\nEpoch 43/600\n433/433 [==============================] - 0s 539us/step - loss: 1.5002 - acc: 0.5058 - val_loss: 1.4969 - val_acc: 0.5057\nEpoch 44/600\n433/433 [==============================] - 0s 529us/step - loss: 1.5782 - acc: 0.5127 - val_loss: 1.4851 - val_acc: 0.5114\nEpoch 45/600\n433/433 [==============================] - 0s 539us/step - loss: 1.5312 - acc: 0.5242 - val_loss: 1.4745 - val_acc: 0.5341\nEpoch 46/600\n433/433 [==============================] - 0s 554us/step - loss: 1.5048 - acc: 0.5473 - val_loss: 1.4651 - val_acc: 0.5398\nEpoch 47/600\n433/433 [==============================] - 0s 513us/step - loss: 1.5273 - acc: 0.4896 - val_loss: 1.4573 - val_acc: 0.5227\nEpoch 48/600\n433/433 [==============================] - 0s 516us/step - loss: 1.3885 - acc: 0.5704 - val_loss: 1.4521 - val_acc: 0.5227\nEpoch 49/600\n433/433 [==============================] - 0s 519us/step - loss: 1.3819 - acc: 0.5497 - val_loss: 1.4481 - val_acc: 0.5170\nEpoch 50/600\n433/433 [==============================] - 0s 527us/step - loss: 1.3669 - acc: 0.5589 - val_loss: 1.4387 - val_acc: 0.5284\nEpoch 51/600\n433/433 [==============================] - 0s 560us/step - loss: 1.3279 - acc: 0.5543 - val_loss: 1.4252 - val_acc: 0.5341\nEpoch 52/600\n433/433 [==============================] - 0s 515us/step - loss: 1.3863 - acc: 0.5404 - val_loss: 1.4098 - val_acc: 0.5398\nEpoch 53/600\n433/433 [==============================] - 0s 545us/step - loss: 1.3574 - acc: 0.5404 - val_loss: 1.3944 - val_acc: 0.5398\nEpoch 54/600\n433/433 [==============================] - 0s 540us/step - loss: 1.3083 - acc: 0.5866 - val_loss: 1.3782 - val_acc: 0.5455\nEpoch 55/600\n433/433 [==============================] - 0s 524us/step - loss: 1.3487 - acc: 0.5681 - val_loss: 1.3630 - val_acc: 0.5455\nEpoch 56/600\n433/433 [==============================] - 0s 548us/step - loss: 1.2880 - acc: 0.5935 - val_loss: 1.3522 - val_acc: 0.5625\nEpoch 57/600\n433/433 [==============================] - 0s 512us/step - loss: 1.2716 - acc: 0.5820 - val_loss: 1.3407 - val_acc: 0.5739\nEpoch 58/600\n433/433 [==============================] - 0s 524us/step - loss: 1.2055 - acc: 0.6305 - val_loss: 1.3298 - val_acc: 0.5795\nEpoch 59/600\n433/433 [==============================] - 0s 529us/step - loss: 1.2252 - acc: 0.6051 - val_loss: 1.3205 - val_acc: 0.5852\nEpoch 60/600\n433/433 [==============================] - 0s 555us/step - loss: 1.1728 - acc: 0.6351 - val_loss: 1.3195 - val_acc: 0.5966\nEpoch 61/600\n433/433 [==============================] - 0s 523us/step - loss: 1.2168 - acc: 0.6236 - val_loss: 1.3173 - val_acc: 0.5966\nEpoch 62/600\n433/433 [==============================] - 0s 526us/step - loss: 1.1990 - acc: 0.5982 - val_loss: 1.3158 - val_acc: 0.5966\nEpoch 63/600\n433/433 [==============================] - 0s 533us/step - loss: 1.1152 - acc: 0.6443 - val_loss: 1.3159 - val_acc: 0.5966\nEpoch 64/600\n433/433 [==============================] - 0s 523us/step - loss: 1.0746 - acc: 0.6721 - val_loss: 1.3165 - val_acc: 0.5909\nEpoch 65/600\n433/433 [==============================] - 0s 534us/step - loss: 1.1202 - acc: 0.6443 - val_loss: 1.3176 - val_acc: 0.5852\nEpoch 66/600\n433/433 [==============================] - 0s 516us/step - loss: 1.0720 - acc: 0.6605 - val_loss: 1.3235 - val_acc: 0.5739\nEpoch 67/600\n433/433 [==============================] - 0s 522us/step - loss: 1.0780 - acc: 0.6813 - val_loss: 1.3236 - val_acc: 0.5739\nEpoch 68/600\n433/433 [==============================] - 0s 522us/step - loss: 1.1278 - acc: 0.6166 - val_loss: 1.3162 - val_acc: 0.5795\nEpoch 69/600\n433/433 [==============================] - 0s 530us/step - loss: 1.0413 - acc: 0.6536 - val_loss: 1.3133 - val_acc: 0.5909\nEpoch 70/600\n433/433 [==============================] - 0s 534us/step - loss: 0.9632 - acc: 0.7044 - val_loss: 1.3082 - val_acc: 0.6023\nEpoch 71/600\n433/433 [==============================] - 0s 557us/step - loss: 0.9584 - acc: 0.6952 - val_loss: 1.3091 - val_acc: 0.5909\nEpoch 72/600\n433/433 [==============================] - 0s 534us/step - loss: 0.9983 - acc: 0.6674 - val_loss: 1.3105 - val_acc: 0.5852\nEpoch 73/600\n433/433 [==============================] - 0s 543us/step - loss: 1.0496 - acc: 0.6744 - val_loss: 1.3113 - val_acc: 0.5852\nEpoch 74/600\n433/433 [==============================] - 0s 531us/step - loss: 1.0037 - acc: 0.6674 - val_loss: 1.3042 - val_acc: 0.5852\nEpoch 75/600\n433/433 [==============================] - 0s 535us/step - loss: 0.9841 - acc: 0.6952 - val_loss: 1.2981 - val_acc: 0.5909\nEpoch 76/600\n433/433 [==============================] - 0s 531us/step - loss: 0.9120 - acc: 0.6882 - val_loss: 1.2950 - val_acc: 0.5909\nEpoch 77/600\n433/433 [==============================] - 0s 539us/step - loss: 0.9020 - acc: 0.7044 - val_loss: 1.2965 - val_acc: 0.5852\nEpoch 78/600\n433/433 [==============================] - 0s 558us/step - loss: 0.9925 - acc: 0.6767 - val_loss: 1.2961 - val_acc: 0.5852\nEpoch 79/600\n433/433 [==============================] - 0s 566us/step - loss: 0.9477 - acc: 0.7113 - val_loss: 1.3005 - val_acc: 0.5852\nEpoch 80/600\n433/433 [==============================] - 0s 542us/step - loss: 0.9012 - acc: 0.7090 - val_loss: 1.3060 - val_acc: 0.5966\nEpoch 81/600\n433/433 [==============================] - 0s 531us/step - loss: 0.8690 - acc: 0.7436 - val_loss: 1.3121 - val_acc: 0.6023\nEpoch 82/600\n433/433 [==============================] - 0s 560us/step - loss: 0.9489 - acc: 0.6767 - val_loss: 1.3147 - val_acc: 0.6023\nEpoch 83/600\n433/433 [==============================] - 0s 524us/step - loss: 0.8704 - acc: 0.7298 - val_loss: 1.3201 - val_acc: 0.6023\nEpoch 84/600\n433/433 [==============================] - 0s 540us/step - loss: 0.7973 - acc: 0.7575 - val_loss: 1.3303 - val_acc: 0.5966\nEpoch 85/600\n433/433 [==============================] - 0s 623us/step - loss: 0.8028 - acc: 0.7252 - val_loss: 1.3385 - val_acc: 0.6023\nEpoch 86/600\n433/433 [==============================] - 0s 560us/step - loss: 0.8518 - acc: 0.7113 - val_loss: 1.3466 - val_acc: 0.6023\nEpoch 87/600\n433/433 [==============================] - 0s 546us/step - loss: 0.8066 - acc: 0.7436 - val_loss: 1.3418 - val_acc: 0.6193\nEpoch 88/600\n433/433 [==============================] - 0s 551us/step - loss: 0.8285 - acc: 0.7390 - val_loss: 1.3344 - val_acc: 0.6023\nEpoch 89/600\n433/433 [==============================] - 0s 536us/step - loss: 0.7563 - acc: 0.7667 - val_loss: 1.3193 - val_acc: 0.6023\nEpoch 90/600\n433/433 [==============================] - 0s 551us/step - loss: 0.7970 - acc: 0.7436 - val_loss: 1.3009 - val_acc: 0.6023\nEpoch 91/600\n433/433 [==============================] - 0s 579us/step - loss: 0.8141 - acc: 0.7275 - val_loss: 1.2807 - val_acc: 0.6023\nEpoch 92/600\n433/433 [==============================] - 0s 558us/step - loss: 0.7791 - acc: 0.7413 - val_loss: 1.2692 - val_acc: 0.5966\nEpoch 93/600\n433/433 [==============================] - 0s 582us/step - loss: 0.7680 - acc: 0.7737 - val_loss: 1.2528 - val_acc: 0.6080\nEpoch 94/600\n433/433 [==============================] - 0s 556us/step - loss: 0.7568 - acc: 0.7575 - val_loss: 1.2243 - val_acc: 0.6136\nEpoch 95/600\n433/433 [==============================] - 0s 533us/step - loss: 0.7251 - acc: 0.7829 - val_loss: 1.2056 - val_acc: 0.6420\nEpoch 96/600\n433/433 [==============================] - 0s 530us/step - loss: 0.7441 - acc: 0.7667 - val_loss: 1.1874 - val_acc: 0.6534\nEpoch 97/600\n433/433 [==============================] - 0s 527us/step - loss: 0.7443 - acc: 0.7529 - val_loss: 1.1731 - val_acc: 0.6648\nEpoch 98/600\n433/433 [==============================] - 0s 532us/step - loss: 0.6995 - acc: 0.7806 - val_loss: 1.1602 - val_acc: 0.6648\nEpoch 99/600\n433/433 [==============================] - 0s 517us/step - loss: 0.7090 - acc: 0.7945 - val_loss: 1.1462 - val_acc: 0.6648\nEpoch 100/600\n433/433 [==============================] - 0s 529us/step - loss: 0.7936 - acc: 0.7506 - val_loss: 1.1323 - val_acc: 0.6705\nEpoch 101/600\n433/433 [==============================] - 0s 546us/step - loss: 0.7265 - acc: 0.7506 - val_loss: 1.1259 - val_acc: 0.6705\nEpoch 102/600\n433/433 [==============================] - 0s 536us/step - loss: 0.6791 - acc: 0.7852 - val_loss: 1.1179 - val_acc: 0.6705\nEpoch 103/600\n433/433 [==============================] - 0s 530us/step - loss: 0.7035 - acc: 0.7991 - val_loss: 1.1117 - val_acc: 0.6705\nEpoch 104/600\n433/433 [==============================] - 0s 530us/step - loss: 0.7207 - acc: 0.7529 - val_loss: 1.1074 - val_acc: 0.6761\nEpoch 105/600\n433/433 [==============================] - 0s 537us/step - loss: 0.7161 - acc: 0.7552 - val_loss: 1.1044 - val_acc: 0.6761\nEpoch 106/600\n433/433 [==============================] - 0s 528us/step - loss: 0.6399 - acc: 0.8014 - val_loss: 1.1014 - val_acc: 0.6818\nEpoch 107/600\n433/433 [==============================] - 0s 564us/step - loss: 0.6722 - acc: 0.7783 - val_loss: 1.0989 - val_acc: 0.6818\nEpoch 108/600\n433/433 [==============================] - 0s 541us/step - loss: 0.7168 - acc: 0.7783 - val_loss: 1.0983 - val_acc: 0.6875\nEpoch 109/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6903 - acc: 0.7760 - val_loss: 1.0971 - val_acc: 0.6875\nEpoch 110/600\n433/433 [==============================] - 0s 515us/step - loss: 0.6469 - acc: 0.8037 - val_loss: 1.0955 - val_acc: 0.6818\nEpoch 111/600\n433/433 [==============================] - 0s 527us/step - loss: 0.6141 - acc: 0.8176 - val_loss: 1.0939 - val_acc: 0.6818\nEpoch 112/600\n433/433 [==============================] - 0s 526us/step - loss: 0.6410 - acc: 0.7991 - val_loss: 1.0937 - val_acc: 0.6818\nEpoch 113/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6490 - acc: 0.7852 - val_loss: 1.0906 - val_acc: 0.6761\nEpoch 114/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5946 - acc: 0.7921 - val_loss: 1.0914 - val_acc: 0.6761\nEpoch 115/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5882 - acc: 0.8199 - val_loss: 1.0905 - val_acc: 0.6761\nEpoch 116/600\n433/433 [==============================] - 0s 549us/step - loss: 0.6839 - acc: 0.7598 - val_loss: 1.0885 - val_acc: 0.6761\nEpoch 117/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6214 - acc: 0.8060 - val_loss: 1.0863 - val_acc: 0.6705\nEpoch 118/600\n433/433 [==============================] - 0s 521us/step - loss: 0.6163 - acc: 0.8222 - val_loss: 1.0832 - val_acc: 0.6705\nEpoch 119/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5391 - acc: 0.8383 - val_loss: 1.0795 - val_acc: 0.6705\nEpoch 120/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5983 - acc: 0.8199 - val_loss: 1.0766 - val_acc: 0.6705\nEpoch 121/600\n433/433 [==============================] - 0s 522us/step - loss: 0.6731 - acc: 0.8037 - val_loss: 1.0736 - val_acc: 0.6705\nEpoch 122/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5789 - acc: 0.8222 - val_loss: 1.0712 - val_acc: 0.6705\nEpoch 123/600\n433/433 [==============================] - 0s 559us/step - loss: 0.6370 - acc: 0.8083 - val_loss: 1.0701 - val_acc: 0.6648\nEpoch 124/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5779 - acc: 0.8268 - val_loss: 1.0737 - val_acc: 0.6705\nEpoch 125/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6539 - acc: 0.7991 - val_loss: 1.0713 - val_acc: 0.6705\nEpoch 126/600\n433/433 [==============================] - 0s 538us/step - loss: 0.6040 - acc: 0.8129 - val_loss: 1.0671 - val_acc: 0.6875\nEpoch 127/600\n433/433 [==============================] - 0s 589us/step - loss: 0.6182 - acc: 0.8152 - val_loss: 1.0615 - val_acc: 0.6932\nEpoch 128/600\n433/433 [==============================] - 0s 540us/step - loss: 0.6108 - acc: 0.8014 - val_loss: 1.0547 - val_acc: 0.7102\nEpoch 129/600\n433/433 [==============================] - 0s 518us/step - loss: 0.6115 - acc: 0.8268 - val_loss: 1.0477 - val_acc: 0.7102\nEpoch 130/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6024 - acc: 0.8152 - val_loss: 1.0414 - val_acc: 0.7102\nEpoch 131/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5710 - acc: 0.8199 - val_loss: 1.0375 - val_acc: 0.7045\nEpoch 132/600\n433/433 [==============================] - 0s 519us/step - loss: 0.5491 - acc: 0.8360 - val_loss: 1.0338 - val_acc: 0.7045\nEpoch 133/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6167 - acc: 0.8037 - val_loss: 1.0300 - val_acc: 0.7045\nEpoch 134/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5602 - acc: 0.8476 - val_loss: 1.0254 - val_acc: 0.7045\nEpoch 135/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5634 - acc: 0.8453 - val_loss: 1.0208 - val_acc: 0.7045\nEpoch 136/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5235 - acc: 0.8522 - val_loss: 1.0164 - val_acc: 0.7045\nEpoch 137/600\n433/433 [==============================] - 0s 511us/step - loss: 0.5694 - acc: 0.8176 - val_loss: 1.0132 - val_acc: 0.7045\nEpoch 138/600\n433/433 [==============================] - 0s 521us/step - loss: 0.5624 - acc: 0.8430 - val_loss: 1.0106 - val_acc: 0.7045\nEpoch 139/600\n433/433 [==============================] - 0s 533us/step - loss: 0.6070 - acc: 0.8222 - val_loss: 1.0081 - val_acc: 0.7045\nEpoch 140/600\n433/433 [==============================] - 0s 518us/step - loss: 0.5310 - acc: 0.8406 - val_loss: 1.0055 - val_acc: 0.7045\nEpoch 141/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5534 - acc: 0.8245 - val_loss: 1.0034 - val_acc: 0.7045\nEpoch 142/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5938 - acc: 0.8337 - val_loss: 1.0013 - val_acc: 0.7045\nEpoch 143/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5986 - acc: 0.8060 - val_loss: 0.9984 - val_acc: 0.7045\nEpoch 144/600\n433/433 [==============================] - 0s 520us/step - loss: 0.6338 - acc: 0.7898 - val_loss: 0.9962 - val_acc: 0.7045\nEpoch 145/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5569 - acc: 0.8383 - val_loss: 0.9934 - val_acc: 0.7045\nEpoch 146/600\n433/433 [==============================] - 0s 527us/step - loss: 0.6096 - acc: 0.8037 - val_loss: 0.9910 - val_acc: 0.7045\nEpoch 147/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5785 - acc: 0.8245 - val_loss: 0.9887 - val_acc: 0.7045\nEpoch 148/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6018 - acc: 0.8314 - val_loss: 0.9867 - val_acc: 0.7045\nEpoch 149/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5200 - acc: 0.8430 - val_loss: 0.9846 - val_acc: 0.7102\nEpoch 150/600\n433/433 [==============================] - 0s 580us/step - loss: 0.5784 - acc: 0.8222 - val_loss: 0.9826 - val_acc: 0.7102\nEpoch 151/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5588 - acc: 0.8106 - val_loss: 0.9804 - val_acc: 0.7102\nEpoch 152/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5301 - acc: 0.8360 - val_loss: 0.9785 - val_acc: 0.7102\nEpoch 153/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5294 - acc: 0.8568 - val_loss: 0.9765 - val_acc: 0.7102\nEpoch 154/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5384 - acc: 0.8268 - val_loss: 0.9744 - val_acc: 0.7159\nEpoch 155/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5581 - acc: 0.8430 - val_loss: 0.9722 - val_acc: 0.7159\nEpoch 156/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5930 - acc: 0.8291 - val_loss: 0.9698 - val_acc: 0.7159\nEpoch 157/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5721 - acc: 0.8222 - val_loss: 0.9673 - val_acc: 0.7159\nEpoch 158/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5711 - acc: 0.8268 - val_loss: 0.9650 - val_acc: 0.7159\nEpoch 159/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5595 - acc: 0.8383 - val_loss: 0.9623 - val_acc: 0.7159\nEpoch 160/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5652 - acc: 0.8129 - val_loss: 0.9598 - val_acc: 0.7159\nEpoch 161/600\n433/433 [==============================] - 0s 584us/step - loss: 0.6621 - acc: 0.8060 - val_loss: 0.9577 - val_acc: 0.7159\nEpoch 162/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5641 - acc: 0.8199 - val_loss: 0.9552 - val_acc: 0.7159\nEpoch 163/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5583 - acc: 0.8268 - val_loss: 0.9528 - val_acc: 0.7102\nEpoch 164/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5374 - acc: 0.8291 - val_loss: 0.9507 - val_acc: 0.7216\nEpoch 165/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5504 - acc: 0.8152 - val_loss: 0.9480 - val_acc: 0.7216\nEpoch 166/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6065 - acc: 0.7991 - val_loss: 0.9459 - val_acc: 0.7216\nEpoch 167/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5670 - acc: 0.8268 - val_loss: 0.9437 - val_acc: 0.7216\nEpoch 168/600\n433/433 [==============================] - 0s 526us/step - loss: 0.5833 - acc: 0.8222 - val_loss: 0.9416 - val_acc: 0.7216\nEpoch 169/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5535 - acc: 0.8291 - val_loss: 0.9395 - val_acc: 0.7216\nEpoch 170/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5908 - acc: 0.8176 - val_loss: 0.9373 - val_acc: 0.7273\nEpoch 171/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5656 - acc: 0.8337 - val_loss: 0.9353 - val_acc: 0.7273\nEpoch 172/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5820 - acc: 0.8314 - val_loss: 0.9334 - val_acc: 0.7273\nEpoch 173/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5646 - acc: 0.8268 - val_loss: 0.9316 - val_acc: 0.7273\nEpoch 174/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6005 - acc: 0.7921 - val_loss: 0.9295 - val_acc: 0.7273\nEpoch 175/600\n433/433 [==============================] - 0s 568us/step - loss: 0.5527 - acc: 0.8406 - val_loss: 0.9273 - val_acc: 0.7330\nEpoch 176/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6020 - acc: 0.8014 - val_loss: 0.9256 - val_acc: 0.7273\nEpoch 177/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5505 - acc: 0.8453 - val_loss: 0.9238 - val_acc: 0.7330\nEpoch 178/600\n433/433 [==============================] - 0s 529us/step - loss: 0.6027 - acc: 0.7968 - val_loss: 0.9219 - val_acc: 0.7330\nEpoch 179/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5137 - acc: 0.8430 - val_loss: 0.9202 - val_acc: 0.7330\nEpoch 180/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5883 - acc: 0.8245 - val_loss: 0.9185 - val_acc: 0.7330\nEpoch 181/600\n433/433 [==============================] - 0s 543us/step - loss: 0.4959 - acc: 0.8591 - val_loss: 0.9168 - val_acc: 0.7330\nEpoch 182/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5488 - acc: 0.8453 - val_loss: 0.9149 - val_acc: 0.7330\nEpoch 183/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5655 - acc: 0.8199 - val_loss: 0.9135 - val_acc: 0.7330\nEpoch 184/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5836 - acc: 0.8383 - val_loss: 0.9119 - val_acc: 0.7330\nEpoch 185/600\n433/433 [==============================] - 0s 528us/step - loss: 0.6177 - acc: 0.8129 - val_loss: 0.9104 - val_acc: 0.7386\nEpoch 186/600\n433/433 [==============================] - 0s 533us/step - loss: 0.4878 - acc: 0.8545 - val_loss: 0.9090 - val_acc: 0.7386\nEpoch 187/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5613 - acc: 0.8199 - val_loss: 0.9074 - val_acc: 0.7386\nEpoch 188/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5505 - acc: 0.8152 - val_loss: 0.9061 - val_acc: 0.7386\nEpoch 189/600\n433/433 [==============================] - 0s 532us/step - loss: 0.6096 - acc: 0.8129 - val_loss: 0.9047 - val_acc: 0.7386\nEpoch 190/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5632 - acc: 0.8268 - val_loss: 0.9033 - val_acc: 0.7386\nEpoch 191/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5754 - acc: 0.8060 - val_loss: 0.9022 - val_acc: 0.7386\nEpoch 192/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5475 - acc: 0.8383 - val_loss: 0.9012 - val_acc: 0.7386\nEpoch 193/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5555 - acc: 0.8222 - val_loss: 0.8999 - val_acc: 0.7386\nEpoch 194/600\n433/433 [==============================] - 0s 526us/step - loss: 0.5741 - acc: 0.8406 - val_loss: 0.8986 - val_acc: 0.7386\nEpoch 195/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5772 - acc: 0.8268 - val_loss: 0.8974 - val_acc: 0.7386\nEpoch 196/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5792 - acc: 0.8129 - val_loss: 0.8963 - val_acc: 0.7386\nEpoch 197/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5901 - acc: 0.8245 - val_loss: 0.8952 - val_acc: 0.7386\nEpoch 198/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6093 - acc: 0.8060 - val_loss: 0.8939 - val_acc: 0.7386\nEpoch 199/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5467 - acc: 0.8337 - val_loss: 0.8930 - val_acc: 0.7386\nEpoch 200/600\n433/433 [==============================] - 0s 528us/step - loss: 0.6156 - acc: 0.7875 - val_loss: 0.8921 - val_acc: 0.7386\nEpoch 201/600\n433/433 [==============================] - 0s 563us/step - loss: 0.6052 - acc: 0.8176 - val_loss: 0.8910 - val_acc: 0.7386\nEpoch 202/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5247 - acc: 0.8406 - val_loss: 0.8900 - val_acc: 0.7386\nEpoch 203/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5922 - acc: 0.8037 - val_loss: 0.8890 - val_acc: 0.7386\nEpoch 204/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5615 - acc: 0.8314 - val_loss: 0.8880 - val_acc: 0.7386\nEpoch 205/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5761 - acc: 0.8314 - val_loss: 0.8870 - val_acc: 0.7386\nEpoch 206/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5355 - acc: 0.8222 - val_loss: 0.8858 - val_acc: 0.7443\nEpoch 207/600\n433/433 [==============================] - 0s 573us/step - loss: 0.5351 - acc: 0.8430 - val_loss: 0.8847 - val_acc: 0.7386\nEpoch 208/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5708 - acc: 0.8245 - val_loss: 0.8837 - val_acc: 0.7386\nEpoch 209/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5537 - acc: 0.8245 - val_loss: 0.8825 - val_acc: 0.7386\nEpoch 210/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5287 - acc: 0.8268 - val_loss: 0.8815 - val_acc: 0.7386\nEpoch 211/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5853 - acc: 0.8291 - val_loss: 0.8805 - val_acc: 0.7386\nEpoch 212/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5609 - acc: 0.8268 - val_loss: 0.8796 - val_acc: 0.7386\nEpoch 213/600\n433/433 [==============================] - 0s 574us/step - loss: 0.5243 - acc: 0.8476 - val_loss: 0.8786 - val_acc: 0.7386\nEpoch 214/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5419 - acc: 0.8406 - val_loss: 0.8776 - val_acc: 0.7443\nEpoch 215/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5505 - acc: 0.8337 - val_loss: 0.8765 - val_acc: 0.7500\nEpoch 216/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5104 - acc: 0.8430 - val_loss: 0.8757 - val_acc: 0.7500\nEpoch 217/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5552 - acc: 0.8314 - val_loss: 0.8749 - val_acc: 0.7557\nEpoch 218/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5126 - acc: 0.8314 - val_loss: 0.8740 - val_acc: 0.7557\nEpoch 219/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5827 - acc: 0.8014 - val_loss: 0.8732 - val_acc: 0.7557\nEpoch 220/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5491 - acc: 0.8314 - val_loss: 0.8724 - val_acc: 0.7557\nEpoch 221/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5932 - acc: 0.8245 - val_loss: 0.8718 - val_acc: 0.7557\nEpoch 222/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5211 - acc: 0.8152 - val_loss: 0.8712 - val_acc: 0.7557\nEpoch 223/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5910 - acc: 0.7875 - val_loss: 0.8705 - val_acc: 0.7557\nEpoch 224/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5408 - acc: 0.8268 - val_loss: 0.8698 - val_acc: 0.7557\nEpoch 225/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5732 - acc: 0.8291 - val_loss: 0.8693 - val_acc: 0.7557\nEpoch 226/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5615 - acc: 0.8106 - val_loss: 0.8687 - val_acc: 0.7557\nEpoch 227/600\n433/433 [==============================] - 0s 578us/step - loss: 0.5108 - acc: 0.8453 - val_loss: 0.8683 - val_acc: 0.7557\nEpoch 228/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5770 - acc: 0.8129 - val_loss: 0.8678 - val_acc: 0.7557\nEpoch 229/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5410 - acc: 0.8314 - val_loss: 0.8672 - val_acc: 0.7557\nEpoch 230/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5614 - acc: 0.8360 - val_loss: 0.8669 - val_acc: 0.7557\nEpoch 231/600\n433/433 [==============================] - 0s 526us/step - loss: 0.5396 - acc: 0.8476 - val_loss: 0.8665 - val_acc: 0.7500\nEpoch 232/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5587 - acc: 0.8314 - val_loss: 0.8661 - val_acc: 0.7500\nEpoch 233/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5410 - acc: 0.8406 - val_loss: 0.8656 - val_acc: 0.7500\nEpoch 234/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5479 - acc: 0.8268 - val_loss: 0.8653 - val_acc: 0.7500\nEpoch 235/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5567 - acc: 0.8383 - val_loss: 0.8650 - val_acc: 0.7500\nEpoch 236/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5970 - acc: 0.8291 - val_loss: 0.8642 - val_acc: 0.7500\nEpoch 237/600\n433/433 [==============================] - 0s 548us/step - loss: 0.6075 - acc: 0.8176 - val_loss: 0.8638 - val_acc: 0.7500\nEpoch 238/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5766 - acc: 0.8152 - val_loss: 0.8635 - val_acc: 0.7500\nEpoch 239/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5682 - acc: 0.8176 - val_loss: 0.8632 - val_acc: 0.7500\nEpoch 240/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6291 - acc: 0.8060 - val_loss: 0.8629 - val_acc: 0.7500\nEpoch 241/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5891 - acc: 0.8106 - val_loss: 0.8626 - val_acc: 0.7500\nEpoch 242/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5422 - acc: 0.8176 - val_loss: 0.8622 - val_acc: 0.7500\nEpoch 243/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5547 - acc: 0.8245 - val_loss: 0.8620 - val_acc: 0.7500\nEpoch 244/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5400 - acc: 0.8337 - val_loss: 0.8616 - val_acc: 0.7500\nEpoch 245/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5075 - acc: 0.8476 - val_loss: 0.8612 - val_acc: 0.7500\nEpoch 246/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5957 - acc: 0.8037 - val_loss: 0.8607 - val_acc: 0.7500\nEpoch 247/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5836 - acc: 0.8129 - val_loss: 0.8605 - val_acc: 0.7500\nEpoch 248/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5425 - acc: 0.8430 - val_loss: 0.8602 - val_acc: 0.7386\nEpoch 249/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5113 - acc: 0.8568 - val_loss: 0.8600 - val_acc: 0.7386\nEpoch 250/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5995 - acc: 0.8060 - val_loss: 0.8598 - val_acc: 0.7386\nEpoch 251/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5573 - acc: 0.8268 - val_loss: 0.8595 - val_acc: 0.7386\nEpoch 252/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5355 - acc: 0.8222 - val_loss: 0.8594 - val_acc: 0.7386\nEpoch 253/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5734 - acc: 0.8337 - val_loss: 0.8591 - val_acc: 0.7443\nEpoch 254/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5345 - acc: 0.8453 - val_loss: 0.8589 - val_acc: 0.7443\nEpoch 255/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5669 - acc: 0.8453 - val_loss: 0.8587 - val_acc: 0.7443\nEpoch 256/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5235 - acc: 0.8245 - val_loss: 0.8588 - val_acc: 0.7443\nEpoch 257/600\n433/433 [==============================] - 0s 587us/step - loss: 0.5419 - acc: 0.8314 - val_loss: 0.8587 - val_acc: 0.7443\nEpoch 258/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5631 - acc: 0.8268 - val_loss: 0.8584 - val_acc: 0.7443\nEpoch 259/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5617 - acc: 0.8245 - val_loss: 0.8584 - val_acc: 0.7443\nEpoch 260/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5037 - acc: 0.8476 - val_loss: 0.8582 - val_acc: 0.7443\nEpoch 261/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5398 - acc: 0.8406 - val_loss: 0.8581 - val_acc: 0.7443\nEpoch 262/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5339 - acc: 0.8337 - val_loss: 0.8579 - val_acc: 0.7443\nEpoch 263/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5332 - acc: 0.8314 - val_loss: 0.8578 - val_acc: 0.7443\nEpoch 264/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5198 - acc: 0.8314 - val_loss: 0.8576 - val_acc: 0.7443\nEpoch 265/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5810 - acc: 0.8383 - val_loss: 0.8574 - val_acc: 0.7443\nEpoch 266/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5838 - acc: 0.8060 - val_loss: 0.8571 - val_acc: 0.7443\nEpoch 267/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5512 - acc: 0.8176 - val_loss: 0.8568 - val_acc: 0.7443\nEpoch 268/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5333 - acc: 0.8453 - val_loss: 0.8563 - val_acc: 0.7443\nEpoch 269/600\n433/433 [==============================] - 0s 519us/step - loss: 0.5395 - acc: 0.8152 - val_loss: 0.8559 - val_acc: 0.7443\nEpoch 270/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5083 - acc: 0.8453 - val_loss: 0.8557 - val_acc: 0.7443\nEpoch 271/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5467 - acc: 0.8360 - val_loss: 0.8553 - val_acc: 0.7443\nEpoch 272/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5900 - acc: 0.8152 - val_loss: 0.8551 - val_acc: 0.7443\nEpoch 273/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5995 - acc: 0.8037 - val_loss: 0.8550 - val_acc: 0.7443\nEpoch 274/600\n433/433 [==============================] - 0s 518us/step - loss: 0.5849 - acc: 0.8268 - val_loss: 0.8547 - val_acc: 0.7443\nEpoch 275/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5708 - acc: 0.8014 - val_loss: 0.8545 - val_acc: 0.7443\nEpoch 276/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5193 - acc: 0.8406 - val_loss: 0.8545 - val_acc: 0.7443\nEpoch 277/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5265 - acc: 0.8499 - val_loss: 0.8542 - val_acc: 0.7443\nEpoch 278/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5200 - acc: 0.8291 - val_loss: 0.8539 - val_acc: 0.7443\nEpoch 279/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5727 - acc: 0.8129 - val_loss: 0.8538 - val_acc: 0.7443\nEpoch 280/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5281 - acc: 0.8406 - val_loss: 0.8536 - val_acc: 0.7443\nEpoch 281/600\n433/433 [==============================] - 0s 529us/step - loss: 0.6198 - acc: 0.8083 - val_loss: 0.8535 - val_acc: 0.7443\nEpoch 282/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5732 - acc: 0.8199 - val_loss: 0.8534 - val_acc: 0.7443\nEpoch 283/600\n433/433 [==============================] - 0s 521us/step - loss: 0.5447 - acc: 0.8245 - val_loss: 0.8533 - val_acc: 0.7443\nEpoch 284/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5915 - acc: 0.7921 - val_loss: 0.8532 - val_acc: 0.7443\nEpoch 285/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5645 - acc: 0.8152 - val_loss: 0.8532 - val_acc: 0.7443\nEpoch 286/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5971 - acc: 0.8406 - val_loss: 0.8532 - val_acc: 0.7443\nEpoch 287/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5596 - acc: 0.8314 - val_loss: 0.8533 - val_acc: 0.7443\nEpoch 288/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5722 - acc: 0.8152 - val_loss: 0.8531 - val_acc: 0.7443\nEpoch 289/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5500 - acc: 0.8314 - val_loss: 0.8530 - val_acc: 0.7443\nEpoch 290/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5326 - acc: 0.8406 - val_loss: 0.8529 - val_acc: 0.7386\nEpoch 291/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5619 - acc: 0.8268 - val_loss: 0.8527 - val_acc: 0.7386\nEpoch 292/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5138 - acc: 0.8499 - val_loss: 0.8525 - val_acc: 0.7386\nEpoch 293/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5973 - acc: 0.8129 - val_loss: 0.8524 - val_acc: 0.7386\nEpoch 294/600\n433/433 [==============================] - 0s 525us/step - loss: 0.6024 - acc: 0.7852 - val_loss: 0.8523 - val_acc: 0.7386\nEpoch 295/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5241 - acc: 0.8499 - val_loss: 0.8522 - val_acc: 0.7386\nEpoch 296/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5492 - acc: 0.8406 - val_loss: 0.8520 - val_acc: 0.7386\nEpoch 297/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5792 - acc: 0.8152 - val_loss: 0.8521 - val_acc: 0.7386\nEpoch 298/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5526 - acc: 0.8406 - val_loss: 0.8520 - val_acc: 0.7386\nEpoch 299/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5756 - acc: 0.8291 - val_loss: 0.8519 - val_acc: 0.7386\nEpoch 300/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5783 - acc: 0.8152 - val_loss: 0.8515 - val_acc: 0.7386\nEpoch 301/600\n433/433 [==============================] - 0s 616us/step - loss: 0.5854 - acc: 0.8291 - val_loss: 0.8515 - val_acc: 0.7386\nEpoch 302/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5441 - acc: 0.8268 - val_loss: 0.8514 - val_acc: 0.7386\nEpoch 303/600\n433/433 [==============================] - 0s 557us/step - loss: 0.6080 - acc: 0.7945 - val_loss: 0.8512 - val_acc: 0.7386\nEpoch 304/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5223 - acc: 0.8268 - val_loss: 0.8512 - val_acc: 0.7386\nEpoch 305/600\n433/433 [==============================] - 0s 539us/step - loss: 0.6166 - acc: 0.7829 - val_loss: 0.8510 - val_acc: 0.7386\nEpoch 306/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5351 - acc: 0.8476 - val_loss: 0.8509 - val_acc: 0.7386\nEpoch 307/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5668 - acc: 0.7945 - val_loss: 0.8507 - val_acc: 0.7386\nEpoch 308/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5450 - acc: 0.8245 - val_loss: 0.8507 - val_acc: 0.7386\nEpoch 309/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5684 - acc: 0.8360 - val_loss: 0.8506 - val_acc: 0.7386\nEpoch 310/600\n433/433 [==============================] - 0s 574us/step - loss: 0.5758 - acc: 0.8083 - val_loss: 0.8506 - val_acc: 0.7386\nEpoch 311/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5967 - acc: 0.7991 - val_loss: 0.8506 - val_acc: 0.7386\nEpoch 312/600\n433/433 [==============================] - 0s 520us/step - loss: 0.5415 - acc: 0.8453 - val_loss: 0.8505 - val_acc: 0.7386\nEpoch 313/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5587 - acc: 0.8337 - val_loss: 0.8504 - val_acc: 0.7386\nEpoch 314/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5347 - acc: 0.8568 - val_loss: 0.8503 - val_acc: 0.7330\nEpoch 315/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5614 - acc: 0.8291 - val_loss: 0.8502 - val_acc: 0.7330\nEpoch 316/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5304 - acc: 0.8245 - val_loss: 0.8501 - val_acc: 0.7330\nEpoch 317/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5380 - acc: 0.8291 - val_loss: 0.8502 - val_acc: 0.7330\nEpoch 318/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5600 - acc: 0.8176 - val_loss: 0.8502 - val_acc: 0.7330\nEpoch 319/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5412 - acc: 0.8337 - val_loss: 0.8503 - val_acc: 0.7330\nEpoch 320/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5478 - acc: 0.8083 - val_loss: 0.8503 - val_acc: 0.7273\nEpoch 321/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5285 - acc: 0.8406 - val_loss: 0.8504 - val_acc: 0.7273\nEpoch 322/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5821 - acc: 0.8222 - val_loss: 0.8504 - val_acc: 0.7273\nEpoch 323/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5511 - acc: 0.8245 - val_loss: 0.8505 - val_acc: 0.7273\nEpoch 324/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5072 - acc: 0.8268 - val_loss: 0.8506 - val_acc: 0.7273\nEpoch 325/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5040 - acc: 0.8406 - val_loss: 0.8506 - val_acc: 0.7273\nEpoch 326/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5562 - acc: 0.8129 - val_loss: 0.8506 - val_acc: 0.7273\nEpoch 327/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5214 - acc: 0.8430 - val_loss: 0.8505 - val_acc: 0.7273\nEpoch 328/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5678 - acc: 0.8337 - val_loss: 0.8504 - val_acc: 0.7273\nEpoch 329/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5150 - acc: 0.8453 - val_loss: 0.8504 - val_acc: 0.7273\nEpoch 330/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5622 - acc: 0.8176 - val_loss: 0.8504 - val_acc: 0.7273\nEpoch 331/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5359 - acc: 0.8245 - val_loss: 0.8503 - val_acc: 0.7273\nEpoch 332/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5418 - acc: 0.8360 - val_loss: 0.8501 - val_acc: 0.7273\nEpoch 333/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5029 - acc: 0.8568 - val_loss: 0.8502 - val_acc: 0.7273\nEpoch 334/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5966 - acc: 0.8199 - val_loss: 0.8501 - val_acc: 0.7273\nEpoch 335/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5446 - acc: 0.8430 - val_loss: 0.8502 - val_acc: 0.7273\nEpoch 336/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5546 - acc: 0.8222 - val_loss: 0.8500 - val_acc: 0.7273\nEpoch 337/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5870 - acc: 0.8152 - val_loss: 0.8500 - val_acc: 0.7273\nEpoch 338/600\n433/433 [==============================] - 0s 544us/step - loss: 0.4986 - acc: 0.8568 - val_loss: 0.8499 - val_acc: 0.7273\nEpoch 339/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5866 - acc: 0.8199 - val_loss: 0.8500 - val_acc: 0.7273\nEpoch 340/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5589 - acc: 0.8268 - val_loss: 0.8499 - val_acc: 0.7273\nEpoch 341/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5955 - acc: 0.8222 - val_loss: 0.8495 - val_acc: 0.7273\nEpoch 342/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5482 - acc: 0.8383 - val_loss: 0.8495 - val_acc: 0.7273\nEpoch 343/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5545 - acc: 0.8383 - val_loss: 0.8494 - val_acc: 0.7273\nEpoch 344/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5809 - acc: 0.8268 - val_loss: 0.8495 - val_acc: 0.7273\nEpoch 345/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5890 - acc: 0.8268 - val_loss: 0.8493 - val_acc: 0.7273\nEpoch 346/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5355 - acc: 0.8406 - val_loss: 0.8493 - val_acc: 0.7273\nEpoch 347/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6029 - acc: 0.8060 - val_loss: 0.8491 - val_acc: 0.7273\nEpoch 348/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5078 - acc: 0.8499 - val_loss: 0.8490 - val_acc: 0.7273\nEpoch 349/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5921 - acc: 0.8129 - val_loss: 0.8489 - val_acc: 0.7273\nEpoch 350/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5459 - acc: 0.8245 - val_loss: 0.8488 - val_acc: 0.7273\nEpoch 351/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5411 - acc: 0.8152 - val_loss: 0.8488 - val_acc: 0.7273\nEpoch 352/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5155 - acc: 0.8499 - val_loss: 0.8486 - val_acc: 0.7273\nEpoch 353/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5459 - acc: 0.8245 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 354/600\n433/433 [==============================] - 0s 586us/step - loss: 0.5369 - acc: 0.8314 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 355/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5289 - acc: 0.8406 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 356/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5460 - acc: 0.8383 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 357/600\n433/433 [==============================] - 0s 526us/step - loss: 0.5102 - acc: 0.8476 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 358/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5947 - acc: 0.8291 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 359/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5094 - acc: 0.8383 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 360/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5657 - acc: 0.8383 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 361/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5517 - acc: 0.8476 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 362/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5326 - acc: 0.8337 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 363/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5463 - acc: 0.8176 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 364/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5182 - acc: 0.8499 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 365/600\n433/433 [==============================] - 0s 538us/step - loss: 0.6096 - acc: 0.8106 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 366/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5586 - acc: 0.8152 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 367/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5270 - acc: 0.8499 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 368/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5642 - acc: 0.8268 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 369/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5852 - acc: 0.8037 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 370/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5626 - acc: 0.8383 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 371/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5572 - acc: 0.8268 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 372/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5342 - acc: 0.8476 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 373/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5450 - acc: 0.8291 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 374/600\n433/433 [==============================] - 0s 517us/step - loss: 0.5840 - acc: 0.8014 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 375/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5761 - acc: 0.8129 - val_loss: 0.8481 - val_acc: 0.7273\nEpoch 376/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5725 - acc: 0.8291 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 377/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5832 - acc: 0.8314 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 378/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5344 - acc: 0.8291 - val_loss: 0.8486 - val_acc: 0.7273\nEpoch 379/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5079 - acc: 0.8406 - val_loss: 0.8486 - val_acc: 0.7273\nEpoch 380/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5296 - acc: 0.8291 - val_loss: 0.8486 - val_acc: 0.7273\nEpoch 381/600\n433/433 [==============================] - 0s 569us/step - loss: 0.5609 - acc: 0.8453 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 382/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5360 - acc: 0.8291 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 383/600\n433/433 [==============================] - 0s 550us/step - loss: 0.4769 - acc: 0.8430 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 384/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5375 - acc: 0.8360 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 385/600\n433/433 [==============================] - 0s 542us/step - loss: 0.6033 - acc: 0.7968 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 386/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5486 - acc: 0.8314 - val_loss: 0.8481 - val_acc: 0.7273\nEpoch 387/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5595 - acc: 0.8291 - val_loss: 0.8480 - val_acc: 0.7273\nEpoch 388/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5413 - acc: 0.8360 - val_loss: 0.8481 - val_acc: 0.7273\nEpoch 389/600\n433/433 [==============================] - 0s 518us/step - loss: 0.5227 - acc: 0.8314 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 390/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5337 - acc: 0.8314 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 391/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5078 - acc: 0.8430 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 392/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5473 - acc: 0.8430 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 393/600\n433/433 [==============================] - 0s 548us/step - loss: 0.6084 - acc: 0.8037 - val_loss: 0.8483 - val_acc: 0.7273\nEpoch 394/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5338 - acc: 0.8406 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 395/600\n433/433 [==============================] - 0s 534us/step - loss: 0.6043 - acc: 0.8037 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 396/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5954 - acc: 0.8037 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 397/600\n433/433 [==============================] - 0s 548us/step - loss: 0.4812 - acc: 0.8591 - val_loss: 0.8486 - val_acc: 0.7273\nEpoch 398/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5121 - acc: 0.8430 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 399/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5278 - acc: 0.8406 - val_loss: 0.8488 - val_acc: 0.7273\nEpoch 400/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5839 - acc: 0.8083 - val_loss: 0.8488 - val_acc: 0.7273\nEpoch 401/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5248 - acc: 0.8268 - val_loss: 0.8489 - val_acc: 0.7273\nEpoch 402/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5591 - acc: 0.8383 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 403/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5509 - acc: 0.8406 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 404/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5453 - acc: 0.8406 - val_loss: 0.8489 - val_acc: 0.7273\nEpoch 405/600\n433/433 [==============================] - 0s 557us/step - loss: 0.6038 - acc: 0.8152 - val_loss: 0.8487 - val_acc: 0.7273\nEpoch 406/600\n433/433 [==============================] - 0s 568us/step - loss: 0.4899 - acc: 0.8522 - val_loss: 0.8485 - val_acc: 0.7273\nEpoch 407/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5552 - acc: 0.8291 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 408/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5576 - acc: 0.8453 - val_loss: 0.8484 - val_acc: 0.7273\nEpoch 409/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5464 - acc: 0.8406 - val_loss: 0.8482 - val_acc: 0.7273\nEpoch 410/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5345 - acc: 0.8383 - val_loss: 0.8481 - val_acc: 0.7273\nEpoch 411/600\n433/433 [==============================] - 0s 598us/step - loss: 0.5361 - acc: 0.8245 - val_loss: 0.8478 - val_acc: 0.7273\nEpoch 412/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5469 - acc: 0.8199 - val_loss: 0.8477 - val_acc: 0.7273\nEpoch 413/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5307 - acc: 0.8406 - val_loss: 0.8477 - val_acc: 0.7273\nEpoch 414/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5371 - acc: 0.8222 - val_loss: 0.8474 - val_acc: 0.7273\nEpoch 415/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5034 - acc: 0.8406 - val_loss: 0.8473 - val_acc: 0.7273\nEpoch 416/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5622 - acc: 0.8291 - val_loss: 0.8471 - val_acc: 0.7273\nEpoch 417/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5355 - acc: 0.8314 - val_loss: 0.8469 - val_acc: 0.7273\nEpoch 418/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5218 - acc: 0.8314 - val_loss: 0.8468 - val_acc: 0.7330\nEpoch 419/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5528 - acc: 0.8291 - val_loss: 0.8468 - val_acc: 0.7330\nEpoch 420/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5408 - acc: 0.8383 - val_loss: 0.8469 - val_acc: 0.7273\nEpoch 421/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5367 - acc: 0.8453 - val_loss: 0.8470 - val_acc: 0.7330\nEpoch 422/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5458 - acc: 0.8406 - val_loss: 0.8470 - val_acc: 0.7330\nEpoch 423/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5283 - acc: 0.8360 - val_loss: 0.8471 - val_acc: 0.7330\nEpoch 424/600\n433/433 [==============================] - 0s 514us/step - loss: 0.5007 - acc: 0.8430 - val_loss: 0.8471 - val_acc: 0.7330\nEpoch 425/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5498 - acc: 0.8314 - val_loss: 0.8472 - val_acc: 0.7330\nEpoch 426/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5448 - acc: 0.8406 - val_loss: 0.8471 - val_acc: 0.7330\nEpoch 427/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5401 - acc: 0.8406 - val_loss: 0.8470 - val_acc: 0.7330\nEpoch 428/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5378 - acc: 0.8360 - val_loss: 0.8470 - val_acc: 0.7330\nEpoch 429/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5362 - acc: 0.8406 - val_loss: 0.8469 - val_acc: 0.7330\nEpoch 430/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5421 - acc: 0.8199 - val_loss: 0.8467 - val_acc: 0.7330\nEpoch 431/600\n433/433 [==============================] - 0s 543us/step - loss: 0.4709 - acc: 0.8430 - val_loss: 0.8468 - val_acc: 0.7330\nEpoch 432/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5236 - acc: 0.8406 - val_loss: 0.8467 - val_acc: 0.7330\nEpoch 433/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5569 - acc: 0.8383 - val_loss: 0.8466 - val_acc: 0.7330\nEpoch 434/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5427 - acc: 0.8152 - val_loss: 0.8465 - val_acc: 0.7330\nEpoch 435/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5729 - acc: 0.8083 - val_loss: 0.8465 - val_acc: 0.7330\nEpoch 436/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5402 - acc: 0.8476 - val_loss: 0.8465 - val_acc: 0.7330\nEpoch 437/600\n433/433 [==============================] - 0s 516us/step - loss: 0.5000 - acc: 0.8245 - val_loss: 0.8466 - val_acc: 0.7330\nEpoch 438/600\n433/433 [==============================] - 0s 533us/step - loss: 0.6295 - acc: 0.7968 - val_loss: 0.8467 - val_acc: 0.7330\nEpoch 439/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5549 - acc: 0.8337 - val_loss: 0.8466 - val_acc: 0.7330\nEpoch 440/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5261 - acc: 0.8383 - val_loss: 0.8464 - val_acc: 0.7330\nEpoch 441/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5582 - acc: 0.8222 - val_loss: 0.8464 - val_acc: 0.7330\nEpoch 442/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5041 - acc: 0.8661 - val_loss: 0.8464 - val_acc: 0.7330\nEpoch 443/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5308 - acc: 0.8499 - val_loss: 0.8462 - val_acc: 0.7330\nEpoch 444/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5447 - acc: 0.8037 - val_loss: 0.8461 - val_acc: 0.7330\nEpoch 445/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5866 - acc: 0.8152 - val_loss: 0.8461 - val_acc: 0.7330\nEpoch 446/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5361 - acc: 0.8453 - val_loss: 0.8462 - val_acc: 0.7330\nEpoch 447/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5377 - acc: 0.8337 - val_loss: 0.8463 - val_acc: 0.7330\nEpoch 448/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5105 - acc: 0.8568 - val_loss: 0.8465 - val_acc: 0.7330\nEpoch 449/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5497 - acc: 0.8453 - val_loss: 0.8467 - val_acc: 0.7273\nEpoch 450/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5277 - acc: 0.8522 - val_loss: 0.8468 - val_acc: 0.7273\nEpoch 451/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5548 - acc: 0.8245 - val_loss: 0.8469 - val_acc: 0.7273\nEpoch 452/600\n433/433 [==============================] - 0s 537us/step - loss: 0.4977 - acc: 0.8637 - val_loss: 0.8470 - val_acc: 0.7273\nEpoch 453/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5988 - acc: 0.8245 - val_loss: 0.8469 - val_acc: 0.7273\nEpoch 454/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5326 - acc: 0.8430 - val_loss: 0.8469 - val_acc: 0.7273\nEpoch 455/600\n433/433 [==============================] - 0s 534us/step - loss: 0.4893 - acc: 0.8614 - val_loss: 0.8468 - val_acc: 0.7273\nEpoch 456/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5596 - acc: 0.8199 - val_loss: 0.8467 - val_acc: 0.7273\nEpoch 457/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5292 - acc: 0.8222 - val_loss: 0.8469 - val_acc: 0.7216\nEpoch 458/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5581 - acc: 0.8337 - val_loss: 0.8470 - val_acc: 0.7216\nEpoch 459/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5022 - acc: 0.8499 - val_loss: 0.8468 - val_acc: 0.7216\nEpoch 460/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5261 - acc: 0.8476 - val_loss: 0.8469 - val_acc: 0.7216\nEpoch 461/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5259 - acc: 0.8337 - val_loss: 0.8470 - val_acc: 0.7216\nEpoch 462/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5166 - acc: 0.8499 - val_loss: 0.8470 - val_acc: 0.7216\nEpoch 463/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5403 - acc: 0.8360 - val_loss: 0.8468 - val_acc: 0.7216\nEpoch 464/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5566 - acc: 0.8383 - val_loss: 0.8468 - val_acc: 0.7216\nEpoch 465/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5067 - acc: 0.8430 - val_loss: 0.8466 - val_acc: 0.7216\nEpoch 466/600\n433/433 [==============================] - 0s 530us/step - loss: 0.5656 - acc: 0.8152 - val_loss: 0.8465 - val_acc: 0.7216\nEpoch 467/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5473 - acc: 0.8360 - val_loss: 0.8462 - val_acc: 0.7216\nEpoch 468/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5425 - acc: 0.8430 - val_loss: 0.8461 - val_acc: 0.7273\nEpoch 469/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5615 - acc: 0.8106 - val_loss: 0.8461 - val_acc: 0.7216\nEpoch 470/600\n433/433 [==============================] - 0s 537us/step - loss: 0.4935 - acc: 0.8476 - val_loss: 0.8461 - val_acc: 0.7216\nEpoch 471/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5338 - acc: 0.8291 - val_loss: 0.8460 - val_acc: 0.7216\nEpoch 472/600\n433/433 [==============================] - 0s 521us/step - loss: 0.5682 - acc: 0.8360 - val_loss: 0.8459 - val_acc: 0.7216\nEpoch 473/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5616 - acc: 0.8199 - val_loss: 0.8458 - val_acc: 0.7216\nEpoch 474/600\n433/433 [==============================] - 0s 560us/step - loss: 0.4919 - acc: 0.8637 - val_loss: 0.8459 - val_acc: 0.7216\nEpoch 475/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5294 - acc: 0.8360 - val_loss: 0.8458 - val_acc: 0.7216\nEpoch 476/600\n433/433 [==============================] - 0s 533us/step - loss: 0.4950 - acc: 0.8568 - val_loss: 0.8457 - val_acc: 0.7216\nEpoch 477/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5257 - acc: 0.8476 - val_loss: 0.8455 - val_acc: 0.7216\nEpoch 478/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5291 - acc: 0.8522 - val_loss: 0.8454 - val_acc: 0.7216\nEpoch 479/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5155 - acc: 0.8152 - val_loss: 0.8453 - val_acc: 0.7216\nEpoch 480/600\n433/433 [==============================] - 0s 565us/step - loss: 0.5355 - acc: 0.8291 - val_loss: 0.8453 - val_acc: 0.7216\nEpoch 481/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5732 - acc: 0.8360 - val_loss: 0.8453 - val_acc: 0.7216\nEpoch 482/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5688 - acc: 0.8383 - val_loss: 0.8450 - val_acc: 0.7216\nEpoch 483/600\n433/433 [==============================] - 0s 605us/step - loss: 0.5193 - acc: 0.8430 - val_loss: 0.8447 - val_acc: 0.7216\nEpoch 484/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5791 - acc: 0.8314 - val_loss: 0.8445 - val_acc: 0.7216\nEpoch 485/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5425 - acc: 0.8545 - val_loss: 0.8444 - val_acc: 0.7216\nEpoch 486/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5209 - acc: 0.8476 - val_loss: 0.8442 - val_acc: 0.7216\nEpoch 487/600\n433/433 [==============================] - 0s 525us/step - loss: 0.4864 - acc: 0.8545 - val_loss: 0.8440 - val_acc: 0.7216\nEpoch 488/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5781 - acc: 0.8291 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 489/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5406 - acc: 0.8453 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 490/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5330 - acc: 0.8406 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 491/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5451 - acc: 0.8222 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 492/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5217 - acc: 0.8383 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 493/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5276 - acc: 0.8383 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 494/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5522 - acc: 0.8222 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 495/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5511 - acc: 0.8314 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 496/600\n433/433 [==============================] - 0s 538us/step - loss: 0.4923 - acc: 0.8522 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 497/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5180 - acc: 0.8637 - val_loss: 0.8435 - val_acc: 0.7273\nEpoch 498/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6139 - acc: 0.8060 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 499/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5348 - acc: 0.8453 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 500/600\n433/433 [==============================] - 0s 545us/step - loss: 0.4932 - acc: 0.8522 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 501/600\n433/433 [==============================] - 0s 549us/step - loss: 0.4994 - acc: 0.8545 - val_loss: 0.8435 - val_acc: 0.7273\nEpoch 502/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5672 - acc: 0.8337 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 503/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5420 - acc: 0.8337 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 504/600\n433/433 [==============================] - 0s 523us/step - loss: 0.4983 - acc: 0.8430 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 505/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5568 - acc: 0.8176 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 506/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5141 - acc: 0.8614 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 507/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5022 - acc: 0.8383 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 508/600\n433/433 [==============================] - 0s 548us/step - loss: 0.4773 - acc: 0.8591 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 509/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5829 - acc: 0.8176 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 510/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5237 - acc: 0.8522 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 511/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5728 - acc: 0.8176 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 512/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5427 - acc: 0.8453 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 513/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5592 - acc: 0.8199 - val_loss: 0.8444 - val_acc: 0.7273\nEpoch 514/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5310 - acc: 0.8545 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 515/600\n433/433 [==============================] - 0s 545us/step - loss: 0.4975 - acc: 0.8545 - val_loss: 0.8444 - val_acc: 0.7273\nEpoch 516/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5405 - acc: 0.8406 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 517/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5290 - acc: 0.8291 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 518/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5417 - acc: 0.8406 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 519/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5318 - acc: 0.8383 - val_loss: 0.8444 - val_acc: 0.7273\nEpoch 520/600\n433/433 [==============================] - 0s 544us/step - loss: 0.4867 - acc: 0.8661 - val_loss: 0.8445 - val_acc: 0.7273\nEpoch 521/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5502 - acc: 0.8499 - val_loss: 0.8445 - val_acc: 0.7273\nEpoch 522/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5259 - acc: 0.8430 - val_loss: 0.8444 - val_acc: 0.7273\nEpoch 523/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5526 - acc: 0.8268 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 524/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5127 - acc: 0.8476 - val_loss: 0.8441 - val_acc: 0.7273\nEpoch 525/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5661 - acc: 0.8268 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 526/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5218 - acc: 0.8152 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 527/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5213 - acc: 0.8430 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 528/600\n433/433 [==============================] - 0s 517us/step - loss: 0.5251 - acc: 0.8314 - val_loss: 0.8444 - val_acc: 0.7273\nEpoch 529/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5558 - acc: 0.8360 - val_loss: 0.8443 - val_acc: 0.7273\nEpoch 530/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5378 - acc: 0.8268 - val_loss: 0.8442 - val_acc: 0.7273\nEpoch 531/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5546 - acc: 0.8430 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 532/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5141 - acc: 0.8522 - val_loss: 0.8441 - val_acc: 0.7273\nEpoch 533/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5400 - acc: 0.8360 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 534/600\n433/433 [==============================] - 0s 561us/step - loss: 0.4895 - acc: 0.8776 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 535/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5162 - acc: 0.8661 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 536/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5123 - acc: 0.8406 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 537/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5427 - acc: 0.8176 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 538/600\n433/433 [==============================] - 0s 520us/step - loss: 0.5075 - acc: 0.8637 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 539/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5190 - acc: 0.8360 - val_loss: 0.8441 - val_acc: 0.7273\nEpoch 540/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5500 - acc: 0.8268 - val_loss: 0.8440 - val_acc: 0.7273\nEpoch 541/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5872 - acc: 0.8083 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 542/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5309 - acc: 0.8430 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 543/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5851 - acc: 0.8222 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 544/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5086 - acc: 0.8245 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 545/600\n433/433 [==============================] - 0s 567us/step - loss: 0.5799 - acc: 0.8268 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 546/600\n433/433 [==============================] - 0s 521us/step - loss: 0.5095 - acc: 0.8568 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 547/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5353 - acc: 0.8222 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 548/600\n433/433 [==============================] - 0s 535us/step - loss: 0.4759 - acc: 0.8614 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 549/600\n433/433 [==============================] - 0s 526us/step - loss: 0.4797 - acc: 0.8545 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 550/600\n433/433 [==============================] - 0s 578us/step - loss: 0.5122 - acc: 0.8337 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 551/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5191 - acc: 0.8199 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 552/600\n433/433 [==============================] - 0s 548us/step - loss: 0.4932 - acc: 0.8476 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 553/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5437 - acc: 0.8291 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 554/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5730 - acc: 0.7945 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 555/600\n433/433 [==============================] - 0s 565us/step - loss: 0.5608 - acc: 0.8383 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 556/600\n433/433 [==============================] - 0s 569us/step - loss: 0.5096 - acc: 0.8476 - val_loss: 0.8438 - val_acc: 0.7273\nEpoch 557/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5465 - acc: 0.8199 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 558/600\n433/433 [==============================] - 0s 531us/step - loss: 0.4866 - acc: 0.8545 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 559/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5327 - acc: 0.8383 - val_loss: 0.8439 - val_acc: 0.7273\nEpoch 560/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5096 - acc: 0.8591 - val_loss: 0.8437 - val_acc: 0.7273\nEpoch 561/600\n433/433 [==============================] - 0s 530us/step - loss: 0.5371 - acc: 0.8383 - val_loss: 0.8436 - val_acc: 0.7273\nEpoch 562/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5622 - acc: 0.8453 - val_loss: 0.8435 - val_acc: 0.7273\nEpoch 563/600\n433/433 [==============================] - 0s 541us/step - loss: 0.4788 - acc: 0.8730 - val_loss: 0.8435 - val_acc: 0.7273\nEpoch 564/600\n433/433 [==============================] - 0s 536us/step - loss: 0.4830 - acc: 0.8337 - val_loss: 0.8433 - val_acc: 0.7273\nEpoch 565/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5291 - acc: 0.8476 - val_loss: 0.8434 - val_acc: 0.7273\nEpoch 566/600\n433/433 [==============================] - 0s 544us/step - loss: 0.4268 - acc: 0.8730 - val_loss: 0.8433 - val_acc: 0.7273\nEpoch 567/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5316 - acc: 0.8360 - val_loss: 0.8431 - val_acc: 0.7273\nEpoch 568/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5713 - acc: 0.8291 - val_loss: 0.8431 - val_acc: 0.7273\nEpoch 569/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5301 - acc: 0.8476 - val_loss: 0.8429 - val_acc: 0.7273\nEpoch 570/600\n433/433 [==============================] - 0s 523us/step - loss: 0.5011 - acc: 0.8383 - val_loss: 0.8427 - val_acc: 0.7273\nEpoch 571/600\n433/433 [==============================] - 0s 527us/step - loss: 0.4806 - acc: 0.8499 - val_loss: 0.8425 - val_acc: 0.7273\nEpoch 572/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5569 - acc: 0.8314 - val_loss: 0.8424 - val_acc: 0.7273\nEpoch 573/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5631 - acc: 0.8129 - val_loss: 0.8424 - val_acc: 0.7273\nEpoch 574/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5498 - acc: 0.8314 - val_loss: 0.8423 - val_acc: 0.7273\nEpoch 575/600\n433/433 [==============================] - 0s 533us/step - loss: 0.4891 - acc: 0.8430 - val_loss: 0.8422 - val_acc: 0.7273\nEpoch 576/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5204 - acc: 0.8522 - val_loss: 0.8422 - val_acc: 0.7273\nEpoch 577/600\n433/433 [==============================] - 0s 573us/step - loss: 0.5099 - acc: 0.8476 - val_loss: 0.8421 - val_acc: 0.7273\nEpoch 578/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5114 - acc: 0.8314 - val_loss: 0.8420 - val_acc: 0.7273\nEpoch 579/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5140 - acc: 0.8268 - val_loss: 0.8419 - val_acc: 0.7273\nEpoch 580/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5748 - acc: 0.8453 - val_loss: 0.8418 - val_acc: 0.7273\nEpoch 581/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5359 - acc: 0.8360 - val_loss: 0.8417 - val_acc: 0.7273\nEpoch 582/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5275 - acc: 0.8430 - val_loss: 0.8416 - val_acc: 0.7273\nEpoch 583/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5118 - acc: 0.8337 - val_loss: 0.8414 - val_acc: 0.7273\nEpoch 584/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5549 - acc: 0.8476 - val_loss: 0.8412 - val_acc: 0.7273\nEpoch 585/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5503 - acc: 0.8199 - val_loss: 0.8410 - val_acc: 0.7273\nEpoch 586/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5532 - acc: 0.8106 - val_loss: 0.8407 - val_acc: 0.7273\nEpoch 587/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5101 - acc: 0.8383 - val_loss: 0.8406 - val_acc: 0.7273\nEpoch 588/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5130 - acc: 0.8406 - val_loss: 0.8402 - val_acc: 0.7273\nEpoch 589/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5297 - acc: 0.8430 - val_loss: 0.8401 - val_acc: 0.7273\nEpoch 590/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5503 - acc: 0.8499 - val_loss: 0.8400 - val_acc: 0.7273\nEpoch 591/600\n433/433 [==============================] - 0s 521us/step - loss: 0.5553 - acc: 0.8406 - val_loss: 0.8399 - val_acc: 0.7273\nEpoch 592/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5167 - acc: 0.8614 - val_loss: 0.8399 - val_acc: 0.7273\nEpoch 593/600\n433/433 [==============================] - 0s 530us/step - loss: 0.5183 - acc: 0.8499 - val_loss: 0.8398 - val_acc: 0.7273\nEpoch 594/600\n433/433 [==============================] - 0s 520us/step - loss: 0.5453 - acc: 0.8268 - val_loss: 0.8396 - val_acc: 0.7273\nEpoch 595/600\n433/433 [==============================] - 0s 541us/step - loss: 0.4733 - acc: 0.8614 - val_loss: 0.8395 - val_acc: 0.7273\nEpoch 596/600\n433/433 [==============================] - 0s 526us/step - loss: 0.5440 - acc: 0.8360 - val_loss: 0.8395 - val_acc: 0.7273\nEpoch 597/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5373 - acc: 0.8453 - val_loss: 0.8393 - val_acc: 0.7273\nEpoch 598/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5052 - acc: 0.8406 - val_loss: 0.8393 - val_acc: 0.7273\nEpoch 599/600\n433/433 [==============================] - 0s 610us/step - loss: 0.5198 - acc: 0.8360 - val_loss: 0.8391 - val_acc: 0.7273\nEpoch 600/600\n433/433 [==============================] - 0s 660us/step - loss: 0.5076 - acc: 0.8522 - val_loss: 0.8392 - val_acc: 0.7273\nTrain on 433 samples, validate on 176 samples\nEpoch 1/500\n433/433 [==============================] - 6s 15ms/step - loss: 0.5407 - acc: 0.8291 - val_loss: 0.8405 - val_acc: 0.7273\nEpoch 2/500\n433/433 [==============================] - 0s 573us/step - loss: 0.5114 - acc: 0.8430 - val_loss: 0.8647 - val_acc: 0.7386\nEpoch 3/500\n433/433 [==============================] - 0s 521us/step - loss: 0.5369 - acc: 0.8337 - val_loss: 0.8732 - val_acc: 0.7386\nEpoch 4/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5801 - acc: 0.8222 - val_loss: 0.8774 - val_acc: 0.7386\nEpoch 5/500\n433/433 [==============================] - 0s 527us/step - loss: 0.4806 - acc: 0.8522 - val_loss: 0.8766 - val_acc: 0.7443\nEpoch 6/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5161 - acc: 0.8337 - val_loss: 0.8703 - val_acc: 0.7273\nEpoch 7/500\n433/433 [==============================] - 0s 526us/step - loss: 0.5190 - acc: 0.8591 - val_loss: 0.8624 - val_acc: 0.7273\nEpoch 8/500\n433/433 [==============================] - 0s 564us/step - loss: 0.5115 - acc: 0.8383 - val_loss: 0.8674 - val_acc: 0.7159\nEpoch 9/500\n433/433 [==============================] - 0s 534us/step - loss: 0.5330 - acc: 0.8314 - val_loss: 0.8764 - val_acc: 0.7273\nEpoch 10/500\n433/433 [==============================] - 0s 527us/step - loss: 0.4756 - acc: 0.8730 - val_loss: 0.8762 - val_acc: 0.7386\nEpoch 11/500\n433/433 [==============================] - 0s 532us/step - loss: 0.4384 - acc: 0.8707 - val_loss: 0.8869 - val_acc: 0.7273\nEpoch 12/500\n433/433 [==============================] - 0s 524us/step - loss: 0.4111 - acc: 0.8730 - val_loss: 0.9147 - val_acc: 0.7273\nEpoch 13/500\n433/433 [==============================] - 0s 530us/step - loss: 0.5041 - acc: 0.8430 - val_loss: 0.9399 - val_acc: 0.7159\nEpoch 14/500\n433/433 [==============================] - 0s 528us/step - loss: 0.4290 - acc: 0.8753 - val_loss: 0.9530 - val_acc: 0.7159\nEpoch 15/500\n433/433 [==============================] - 0s 514us/step - loss: 0.4621 - acc: 0.8637 - val_loss: 0.9429 - val_acc: 0.7216\nEpoch 16/500\n433/433 [==============================] - 0s 523us/step - loss: 0.4352 - acc: 0.8684 - val_loss: 0.9247 - val_acc: 0.7216\nEpoch 17/500\n433/433 [==============================] - 0s 536us/step - loss: 0.4138 - acc: 0.8684 - val_loss: 0.9098 - val_acc: 0.7273\nEpoch 18/500\n433/433 [==============================] - 0s 541us/step - loss: 0.4174 - acc: 0.8730 - val_loss: 0.9036 - val_acc: 0.7273\nEpoch 19/500\n433/433 [==============================] - 0s 527us/step - loss: 0.4426 - acc: 0.8406 - val_loss: 0.9033 - val_acc: 0.7216\nEpoch 20/500\n433/433 [==============================] - 0s 527us/step - loss: 0.3912 - acc: 0.8845 - val_loss: 0.8984 - val_acc: 0.7443\nEpoch 21/500\n433/433 [==============================] - 0s 547us/step - loss: 0.4335 - acc: 0.8684 - val_loss: 0.9003 - val_acc: 0.7443\nEpoch 22/500\n433/433 [==============================] - 0s 537us/step - loss: 0.4235 - acc: 0.8637 - val_loss: 0.9045 - val_acc: 0.7500\nEpoch 23/500\n433/433 [==============================] - 0s 552us/step - loss: 0.3992 - acc: 0.8799 - val_loss: 0.9128 - val_acc: 0.7500\nEpoch 24/500\n433/433 [==============================] - 0s 565us/step - loss: 0.4203 - acc: 0.8799 - val_loss: 0.9198 - val_acc: 0.7443\nEpoch 25/500\n433/433 [==============================] - 0s 568us/step - loss: 0.3804 - acc: 0.8938 - val_loss: 0.9285 - val_acc: 0.7386\nEpoch 26/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3656 - acc: 0.8961 - val_loss: 0.9403 - val_acc: 0.7330\nEpoch 27/500\n433/433 [==============================] - 0s 552us/step - loss: 0.4075 - acc: 0.8799 - val_loss: 0.9482 - val_acc: 0.7386\nEpoch 28/500\n433/433 [==============================] - 0s 527us/step - loss: 0.3798 - acc: 0.9007 - val_loss: 0.9556 - val_acc: 0.7330\nEpoch 29/500\n433/433 [==============================] - 0s 547us/step - loss: 0.3573 - acc: 0.9099 - val_loss: 0.9562 - val_acc: 0.7330\nEpoch 30/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3771 - acc: 0.8753 - val_loss: 0.9539 - val_acc: 0.7330\nEpoch 31/500\n433/433 [==============================] - 0s 574us/step - loss: 0.3501 - acc: 0.9076 - val_loss: 0.9500 - val_acc: 0.7330\nEpoch 32/500\n433/433 [==============================] - 0s 555us/step - loss: 0.4023 - acc: 0.8799 - val_loss: 0.9380 - val_acc: 0.7330\nEpoch 33/500\n433/433 [==============================] - 0s 542us/step - loss: 0.4093 - acc: 0.8730 - val_loss: 0.9244 - val_acc: 0.7443\nEpoch 34/500\n433/433 [==============================] - 0s 528us/step - loss: 0.3100 - acc: 0.9053 - val_loss: 0.9127 - val_acc: 0.7443\nEpoch 35/500\n433/433 [==============================] - 0s 571us/step - loss: 0.3549 - acc: 0.8961 - val_loss: 0.9058 - val_acc: 0.7443\nEpoch 36/500\n433/433 [==============================] - 0s 538us/step - loss: 0.3738 - acc: 0.8822 - val_loss: 0.8979 - val_acc: 0.7500\nEpoch 37/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3366 - acc: 0.8822 - val_loss: 0.8911 - val_acc: 0.7500\nEpoch 38/500\n433/433 [==============================] - 0s 544us/step - loss: 0.3800 - acc: 0.9076 - val_loss: 0.8871 - val_acc: 0.7500\nEpoch 39/500\n433/433 [==============================] - 0s 528us/step - loss: 0.3966 - acc: 0.8868 - val_loss: 0.8817 - val_acc: 0.7443\nEpoch 40/500\n433/433 [==============================] - 0s 542us/step - loss: 0.3660 - acc: 0.8961 - val_loss: 0.8790 - val_acc: 0.7443\nEpoch 41/500\n433/433 [==============================] - 0s 529us/step - loss: 0.3462 - acc: 0.8915 - val_loss: 0.8774 - val_acc: 0.7386\nEpoch 42/500\n433/433 [==============================] - 0s 534us/step - loss: 0.3356 - acc: 0.9053 - val_loss: 0.8766 - val_acc: 0.7386\nEpoch 43/500\n433/433 [==============================] - 0s 526us/step - loss: 0.3169 - acc: 0.9238 - val_loss: 0.8770 - val_acc: 0.7386\nEpoch 44/500\n433/433 [==============================] - 0s 520us/step - loss: 0.2758 - acc: 0.9284 - val_loss: 0.8779 - val_acc: 0.7330\nEpoch 45/500\n433/433 [==============================] - 0s 547us/step - loss: 0.3239 - acc: 0.9122 - val_loss: 0.8785 - val_acc: 0.7330\nEpoch 46/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2864 - acc: 0.9307 - val_loss: 0.8793 - val_acc: 0.7330\nEpoch 47/500\n433/433 [==============================] - 0s 533us/step - loss: 0.3076 - acc: 0.9192 - val_loss: 0.8815 - val_acc: 0.7330\nEpoch 48/500\n433/433 [==============================] - 0s 582us/step - loss: 0.2986 - acc: 0.9400 - val_loss: 0.8840 - val_acc: 0.7330\nEpoch 49/500\n433/433 [==============================] - 0s 521us/step - loss: 0.3273 - acc: 0.8961 - val_loss: 0.8862 - val_acc: 0.7330\nEpoch 50/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3349 - acc: 0.9030 - val_loss: 0.8864 - val_acc: 0.7330\nEpoch 51/500\n433/433 [==============================] - 0s 530us/step - loss: 0.3125 - acc: 0.9215 - val_loss: 0.8849 - val_acc: 0.7330\nEpoch 52/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3121 - acc: 0.9076 - val_loss: 0.8829 - val_acc: 0.7330\nEpoch 53/500\n433/433 [==============================] - 0s 549us/step - loss: 0.2842 - acc: 0.9145 - val_loss: 0.8810 - val_acc: 0.7330\nEpoch 54/500\n433/433 [==============================] - 0s 513us/step - loss: 0.3009 - acc: 0.9261 - val_loss: 0.8793 - val_acc: 0.7330\nEpoch 55/500\n433/433 [==============================] - 0s 530us/step - loss: 0.2880 - acc: 0.9261 - val_loss: 0.8787 - val_acc: 0.7330\nEpoch 56/500\n433/433 [==============================] - 0s 532us/step - loss: 0.2934 - acc: 0.9145 - val_loss: 0.8791 - val_acc: 0.7330\nEpoch 57/500\n433/433 [==============================] - 0s 541us/step - loss: 0.3124 - acc: 0.9169 - val_loss: 0.8787 - val_acc: 0.7330\nEpoch 58/500\n433/433 [==============================] - 0s 564us/step - loss: 0.3119 - acc: 0.9099 - val_loss: 0.8792 - val_acc: 0.7386\nEpoch 59/500\n433/433 [==============================] - 0s 536us/step - loss: 0.3325 - acc: 0.8984 - val_loss: 0.8784 - val_acc: 0.7386\nEpoch 60/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3393 - acc: 0.8961 - val_loss: 0.8776 - val_acc: 0.7386\nEpoch 61/500\n433/433 [==============================] - 0s 539us/step - loss: 0.3207 - acc: 0.9076 - val_loss: 0.8767 - val_acc: 0.7386\nEpoch 62/500\n433/433 [==============================] - 0s 525us/step - loss: 0.2852 - acc: 0.9307 - val_loss: 0.8761 - val_acc: 0.7386\nEpoch 63/500\n433/433 [==============================] - 0s 528us/step - loss: 0.3110 - acc: 0.9076 - val_loss: 0.8757 - val_acc: 0.7386\nEpoch 64/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2946 - acc: 0.9076 - val_loss: 0.8756 - val_acc: 0.7386\nEpoch 65/500\n433/433 [==============================] - 0s 519us/step - loss: 0.2666 - acc: 0.9307 - val_loss: 0.8756 - val_acc: 0.7386\nEpoch 66/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2651 - acc: 0.9215 - val_loss: 0.8756 - val_acc: 0.7386\nEpoch 67/500\n433/433 [==============================] - 0s 528us/step - loss: 0.2934 - acc: 0.9076 - val_loss: 0.8756 - val_acc: 0.7386\nEpoch 68/500\n433/433 [==============================] - 0s 524us/step - loss: 0.2702 - acc: 0.9284 - val_loss: 0.8757 - val_acc: 0.7386\nEpoch 69/500\n433/433 [==============================] - 0s 565us/step - loss: 0.3189 - acc: 0.9099 - val_loss: 0.8751 - val_acc: 0.7386\nEpoch 70/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2719 - acc: 0.9353 - val_loss: 0.8742 - val_acc: 0.7386\nEpoch 71/500\n433/433 [==============================] - 0s 536us/step - loss: 0.2629 - acc: 0.9376 - val_loss: 0.8737 - val_acc: 0.7386\nEpoch 72/500\n433/433 [==============================] - 0s 538us/step - loss: 0.3149 - acc: 0.8961 - val_loss: 0.8733 - val_acc: 0.7386\nEpoch 73/500\n433/433 [==============================] - 0s 567us/step - loss: 0.3030 - acc: 0.9122 - val_loss: 0.8728 - val_acc: 0.7386\nEpoch 74/500\n433/433 [==============================] - 0s 559us/step - loss: 0.3259 - acc: 0.9099 - val_loss: 0.8723 - val_acc: 0.7386\nEpoch 75/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3372 - acc: 0.8915 - val_loss: 0.8717 - val_acc: 0.7386\nEpoch 76/500\n433/433 [==============================] - 0s 540us/step - loss: 0.3113 - acc: 0.9053 - val_loss: 0.8703 - val_acc: 0.7386\nEpoch 77/500\n433/433 [==============================] - 0s 543us/step - loss: 0.3187 - acc: 0.9215 - val_loss: 0.8697 - val_acc: 0.7386\nEpoch 78/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2860 - acc: 0.9192 - val_loss: 0.8687 - val_acc: 0.7386\nEpoch 79/500\n433/433 [==============================] - 0s 553us/step - loss: 0.3503 - acc: 0.8891 - val_loss: 0.8677 - val_acc: 0.7386\nEpoch 80/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2276 - acc: 0.9238 - val_loss: 0.8669 - val_acc: 0.7386\nEpoch 81/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3039 - acc: 0.9122 - val_loss: 0.8657 - val_acc: 0.7386\nEpoch 82/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2963 - acc: 0.9076 - val_loss: 0.8650 - val_acc: 0.7386\nEpoch 83/500\n433/433 [==============================] - 0s 571us/step - loss: 0.2999 - acc: 0.9145 - val_loss: 0.8640 - val_acc: 0.7386\nEpoch 84/500\n433/433 [==============================] - 0s 557us/step - loss: 0.3022 - acc: 0.9145 - val_loss: 0.8628 - val_acc: 0.7386\nEpoch 85/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2947 - acc: 0.9192 - val_loss: 0.8622 - val_acc: 0.7386\nEpoch 86/500\n433/433 [==============================] - 0s 559us/step - loss: 0.2901 - acc: 0.9076 - val_loss: 0.8614 - val_acc: 0.7386\nEpoch 87/500\n433/433 [==============================] - 0s 535us/step - loss: 0.3568 - acc: 0.8938 - val_loss: 0.8609 - val_acc: 0.7386\nEpoch 88/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2785 - acc: 0.9238 - val_loss: 0.8605 - val_acc: 0.7386\nEpoch 89/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3552 - acc: 0.8799 - val_loss: 0.8597 - val_acc: 0.7386\nEpoch 90/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3017 - acc: 0.9076 - val_loss: 0.8588 - val_acc: 0.7386\nEpoch 91/500\n433/433 [==============================] - 0s 611us/step - loss: 0.2678 - acc: 0.9423 - val_loss: 0.8581 - val_acc: 0.7443\nEpoch 92/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2833 - acc: 0.8984 - val_loss: 0.8574 - val_acc: 0.7443\nEpoch 93/500\n433/433 [==============================] - 0s 556us/step - loss: 0.3113 - acc: 0.9030 - val_loss: 0.8569 - val_acc: 0.7443\nEpoch 94/500\n433/433 [==============================] - 0s 541us/step - loss: 0.3276 - acc: 0.9099 - val_loss: 0.8560 - val_acc: 0.7443\nEpoch 95/500\n433/433 [==============================] - 0s 570us/step - loss: 0.2973 - acc: 0.9261 - val_loss: 0.8551 - val_acc: 0.7443\nEpoch 96/500\n433/433 [==============================] - 0s 559us/step - loss: 0.3229 - acc: 0.9007 - val_loss: 0.8544 - val_acc: 0.7443\nEpoch 97/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3313 - acc: 0.8984 - val_loss: 0.8538 - val_acc: 0.7443\nEpoch 98/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3083 - acc: 0.9145 - val_loss: 0.8529 - val_acc: 0.7443\nEpoch 99/500\n433/433 [==============================] - 0s 569us/step - loss: 0.3517 - acc: 0.8868 - val_loss: 0.8520 - val_acc: 0.7443\nEpoch 100/500\n433/433 [==============================] - 0s 556us/step - loss: 0.3263 - acc: 0.9007 - val_loss: 0.8513 - val_acc: 0.7443\nEpoch 101/500\n433/433 [==============================] - 0s 552us/step - loss: 0.3084 - acc: 0.8938 - val_loss: 0.8507 - val_acc: 0.7443\nEpoch 102/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2792 - acc: 0.9261 - val_loss: 0.8499 - val_acc: 0.7443\nEpoch 103/500\n433/433 [==============================] - 0s 570us/step - loss: 0.3272 - acc: 0.8961 - val_loss: 0.8492 - val_acc: 0.7443\nEpoch 104/500\n433/433 [==============================] - 0s 561us/step - loss: 0.3373 - acc: 0.9284 - val_loss: 0.8484 - val_acc: 0.7500\nEpoch 105/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3065 - acc: 0.9122 - val_loss: 0.8476 - val_acc: 0.7500\nEpoch 106/500\n433/433 [==============================] - 0s 533us/step - loss: 0.2742 - acc: 0.9238 - val_loss: 0.8469 - val_acc: 0.7500\nEpoch 107/500\n433/433 [==============================] - 0s 561us/step - loss: 0.2811 - acc: 0.9145 - val_loss: 0.8462 - val_acc: 0.7500\nEpoch 108/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2736 - acc: 0.9122 - val_loss: 0.8456 - val_acc: 0.7500\nEpoch 109/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3040 - acc: 0.9099 - val_loss: 0.8451 - val_acc: 0.7500\nEpoch 110/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3011 - acc: 0.9238 - val_loss: 0.8445 - val_acc: 0.7500\nEpoch 111/500\n433/433 [==============================] - 0s 533us/step - loss: 0.3067 - acc: 0.9099 - val_loss: 0.8442 - val_acc: 0.7500\nEpoch 112/500\n433/433 [==============================] - 0s 543us/step - loss: 0.3027 - acc: 0.9145 - val_loss: 0.8435 - val_acc: 0.7500\nEpoch 113/500\n433/433 [==============================] - 0s 574us/step - loss: 0.3074 - acc: 0.9145 - val_loss: 0.8431 - val_acc: 0.7500\nEpoch 114/500\n433/433 [==============================] - 0s 543us/step - loss: 0.2870 - acc: 0.9145 - val_loss: 0.8427 - val_acc: 0.7500\nEpoch 115/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2638 - acc: 0.9284 - val_loss: 0.8424 - val_acc: 0.7500\nEpoch 116/500\n433/433 [==============================] - 0s 541us/step - loss: 0.3055 - acc: 0.9169 - val_loss: 0.8421 - val_acc: 0.7500\nEpoch 117/500\n433/433 [==============================] - 0s 565us/step - loss: 0.2882 - acc: 0.9122 - val_loss: 0.8418 - val_acc: 0.7500\nEpoch 118/500\n433/433 [==============================] - 0s 575us/step - loss: 0.2382 - acc: 0.9400 - val_loss: 0.8412 - val_acc: 0.7500\nEpoch 119/500\n433/433 [==============================] - 0s 569us/step - loss: 0.2868 - acc: 0.9099 - val_loss: 0.8409 - val_acc: 0.7500\nEpoch 120/500\n433/433 [==============================] - 0s 558us/step - loss: 0.3206 - acc: 0.9122 - val_loss: 0.8404 - val_acc: 0.7500\nEpoch 121/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2529 - acc: 0.9261 - val_loss: 0.8398 - val_acc: 0.7500\nEpoch 122/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3083 - acc: 0.8938 - val_loss: 0.8397 - val_acc: 0.7500\nEpoch 123/500\n433/433 [==============================] - 0s 578us/step - loss: 0.2651 - acc: 0.9192 - val_loss: 0.8392 - val_acc: 0.7500\nEpoch 124/500\n433/433 [==============================] - 0s 544us/step - loss: 0.2836 - acc: 0.9284 - val_loss: 0.8389 - val_acc: 0.7500\nEpoch 125/500\n433/433 [==============================] - 0s 565us/step - loss: 0.3250 - acc: 0.9099 - val_loss: 0.8384 - val_acc: 0.7500\nEpoch 126/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2501 - acc: 0.9284 - val_loss: 0.8378 - val_acc: 0.7500\nEpoch 127/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2750 - acc: 0.9238 - val_loss: 0.8375 - val_acc: 0.7500\nEpoch 128/500\n433/433 [==============================] - 0s 567us/step - loss: 0.2888 - acc: 0.9122 - val_loss: 0.8372 - val_acc: 0.7500\nEpoch 129/500\n433/433 [==============================] - 0s 567us/step - loss: 0.3053 - acc: 0.9007 - val_loss: 0.8367 - val_acc: 0.7500\nEpoch 130/500\n433/433 [==============================] - 0s 577us/step - loss: 0.2997 - acc: 0.9099 - val_loss: 0.8364 - val_acc: 0.7500\nEpoch 131/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2580 - acc: 0.9376 - val_loss: 0.8360 - val_acc: 0.7500\nEpoch 132/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2867 - acc: 0.9238 - val_loss: 0.8356 - val_acc: 0.7500\nEpoch 133/500\n433/433 [==============================] - 0s 582us/step - loss: 0.2606 - acc: 0.9353 - val_loss: 0.8351 - val_acc: 0.7500\nEpoch 134/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2638 - acc: 0.9192 - val_loss: 0.8347 - val_acc: 0.7500\nEpoch 135/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2871 - acc: 0.9192 - val_loss: 0.8343 - val_acc: 0.7500\nEpoch 136/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2751 - acc: 0.9169 - val_loss: 0.8338 - val_acc: 0.7500\nEpoch 137/500\n433/433 [==============================] - 0s 573us/step - loss: 0.2906 - acc: 0.9192 - val_loss: 0.8335 - val_acc: 0.7500\nEpoch 138/500\n433/433 [==============================] - 0s 559us/step - loss: 0.2332 - acc: 0.9400 - val_loss: 0.8330 - val_acc: 0.7500\nEpoch 139/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3034 - acc: 0.9122 - val_loss: 0.8326 - val_acc: 0.7500\nEpoch 140/500\n433/433 [==============================] - 0s 592us/step - loss: 0.2809 - acc: 0.9169 - val_loss: 0.8322 - val_acc: 0.7500\nEpoch 141/500\n433/433 [==============================] - 0s 546us/step - loss: 0.3102 - acc: 0.9122 - val_loss: 0.8320 - val_acc: 0.7500\nEpoch 142/500\n433/433 [==============================] - 0s 573us/step - loss: 0.2778 - acc: 0.9238 - val_loss: 0.8316 - val_acc: 0.7500\nEpoch 143/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3261 - acc: 0.8915 - val_loss: 0.8313 - val_acc: 0.7500\nEpoch 144/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2819 - acc: 0.9053 - val_loss: 0.8311 - val_acc: 0.7500\nEpoch 145/500\n433/433 [==============================] - 0s 565us/step - loss: 0.2919 - acc: 0.9076 - val_loss: 0.8307 - val_acc: 0.7500\nEpoch 146/500\n433/433 [==============================] - 0s 565us/step - loss: 0.3378 - acc: 0.9076 - val_loss: 0.8301 - val_acc: 0.7500\nEpoch 147/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2887 - acc: 0.9215 - val_loss: 0.8295 - val_acc: 0.7500\nEpoch 148/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2985 - acc: 0.9076 - val_loss: 0.8289 - val_acc: 0.7500\nEpoch 149/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3365 - acc: 0.8938 - val_loss: 0.8286 - val_acc: 0.7500\nEpoch 150/500\n433/433 [==============================] - 0s 569us/step - loss: 0.2758 - acc: 0.9261 - val_loss: 0.8281 - val_acc: 0.7500\nEpoch 151/500\n433/433 [==============================] - 0s 557us/step - loss: 0.3039 - acc: 0.9053 - val_loss: 0.8275 - val_acc: 0.7500\nEpoch 152/500\n433/433 [==============================] - 0s 579us/step - loss: 0.2767 - acc: 0.9145 - val_loss: 0.8270 - val_acc: 0.7500\nEpoch 153/500\n433/433 [==============================] - 0s 569us/step - loss: 0.2838 - acc: 0.9169 - val_loss: 0.8267 - val_acc: 0.7500\nEpoch 154/500\n433/433 [==============================] - 0s 565us/step - loss: 0.3036 - acc: 0.9122 - val_loss: 0.8263 - val_acc: 0.7500\nEpoch 155/500\n433/433 [==============================] - 0s 535us/step - loss: 0.3084 - acc: 0.9122 - val_loss: 0.8259 - val_acc: 0.7500\nEpoch 156/500\n433/433 [==============================] - 0s 578us/step - loss: 0.2730 - acc: 0.9215 - val_loss: 0.8258 - val_acc: 0.7500\nEpoch 157/500\n433/433 [==============================] - 0s 569us/step - loss: 0.2941 - acc: 0.9169 - val_loss: 0.8254 - val_acc: 0.7500\nEpoch 158/500\n433/433 [==============================] - 0s 562us/step - loss: 0.2969 - acc: 0.9215 - val_loss: 0.8252 - val_acc: 0.7500\nEpoch 159/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2676 - acc: 0.9307 - val_loss: 0.8246 - val_acc: 0.7500\nEpoch 160/500\n433/433 [==============================] - 0s 547us/step - loss: 0.2939 - acc: 0.9053 - val_loss: 0.8245 - val_acc: 0.7500\nEpoch 161/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2749 - acc: 0.9376 - val_loss: 0.8242 - val_acc: 0.7500\nEpoch 162/500\n433/433 [==============================] - 0s 581us/step - loss: 0.2872 - acc: 0.9284 - val_loss: 0.8237 - val_acc: 0.7500\nEpoch 163/500\n433/433 [==============================] - 0s 565us/step - loss: 0.2503 - acc: 0.9169 - val_loss: 0.8233 - val_acc: 0.7500\nEpoch 164/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3213 - acc: 0.9099 - val_loss: 0.8229 - val_acc: 0.7500\nEpoch 165/500\n433/433 [==============================] - 0s 562us/step - loss: 0.3002 - acc: 0.9030 - val_loss: 0.8224 - val_acc: 0.7500\nEpoch 166/500\n433/433 [==============================] - 0s 583us/step - loss: 0.2899 - acc: 0.9099 - val_loss: 0.8223 - val_acc: 0.7500\nEpoch 167/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3071 - acc: 0.9053 - val_loss: 0.8219 - val_acc: 0.7500\nEpoch 168/500\n433/433 [==============================] - 0s 571us/step - loss: 0.3519 - acc: 0.8891 - val_loss: 0.8216 - val_acc: 0.7500\nEpoch 169/500\n433/433 [==============================] - 0s 583us/step - loss: 0.3209 - acc: 0.9169 - val_loss: 0.8212 - val_acc: 0.7500\nEpoch 170/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2852 - acc: 0.9261 - val_loss: 0.8208 - val_acc: 0.7500\nEpoch 171/500\n433/433 [==============================] - 0s 560us/step - loss: 0.2863 - acc: 0.9145 - val_loss: 0.8205 - val_acc: 0.7500\nEpoch 172/500\n433/433 [==============================] - 0s 543us/step - loss: 0.3251 - acc: 0.9099 - val_loss: 0.8201 - val_acc: 0.7500\nEpoch 173/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2844 - acc: 0.9122 - val_loss: 0.8199 - val_acc: 0.7500\nEpoch 174/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2990 - acc: 0.9030 - val_loss: 0.8197 - val_acc: 0.7500\nEpoch 175/500\n433/433 [==============================] - 0s 576us/step - loss: 0.2991 - acc: 0.9122 - val_loss: 0.8195 - val_acc: 0.7500\nEpoch 176/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2777 - acc: 0.9238 - val_loss: 0.8194 - val_acc: 0.7500\nEpoch 177/500\n433/433 [==============================] - 0s 572us/step - loss: 0.2845 - acc: 0.9145 - val_loss: 0.8192 - val_acc: 0.7500\nEpoch 178/500\n433/433 [==============================] - 0s 572us/step - loss: 0.2510 - acc: 0.9330 - val_loss: 0.8189 - val_acc: 0.7500\nEpoch 179/500\n433/433 [==============================] - 0s 573us/step - loss: 0.2680 - acc: 0.9284 - val_loss: 0.8186 - val_acc: 0.7500\nEpoch 180/500\n433/433 [==============================] - 0s 573us/step - loss: 0.3041 - acc: 0.9145 - val_loss: 0.8187 - val_acc: 0.7500\nEpoch 181/500\n433/433 [==============================] - 0s 566us/step - loss: 0.2820 - acc: 0.9122 - val_loss: 0.8185 - val_acc: 0.7500\nEpoch 182/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2715 - acc: 0.9215 - val_loss: 0.8184 - val_acc: 0.7500\nEpoch 183/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2979 - acc: 0.9099 - val_loss: 0.8183 - val_acc: 0.7500\nEpoch 184/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2773 - acc: 0.9238 - val_loss: 0.8182 - val_acc: 0.7500\nEpoch 185/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2936 - acc: 0.9030 - val_loss: 0.8181 - val_acc: 0.7500\nEpoch 186/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2968 - acc: 0.9099 - val_loss: 0.8179 - val_acc: 0.7500\nEpoch 187/500\n433/433 [==============================] - 0s 566us/step - loss: 0.2849 - acc: 0.9353 - val_loss: 0.8177 - val_acc: 0.7500\nEpoch 188/500\n433/433 [==============================] - 0s 578us/step - loss: 0.2845 - acc: 0.9284 - val_loss: 0.8176 - val_acc: 0.7500\nEpoch 189/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2979 - acc: 0.9145 - val_loss: 0.8175 - val_acc: 0.7500\nEpoch 190/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2811 - acc: 0.9215 - val_loss: 0.8172 - val_acc: 0.7500\nEpoch 191/500\n433/433 [==============================] - 0s 575us/step - loss: 0.2900 - acc: 0.9145 - val_loss: 0.8170 - val_acc: 0.7500\nEpoch 192/500\n433/433 [==============================] - 0s 560us/step - loss: 0.2645 - acc: 0.9238 - val_loss: 0.8168 - val_acc: 0.7500\nEpoch 193/500\n433/433 [==============================] - 0s 556us/step - loss: 0.3441 - acc: 0.8984 - val_loss: 0.8163 - val_acc: 0.7557\nEpoch 194/500\n433/433 [==============================] - 0s 560us/step - loss: 0.2991 - acc: 0.9030 - val_loss: 0.8160 - val_acc: 0.7557\nEpoch 195/500\n433/433 [==============================] - 0s 570us/step - loss: 0.3030 - acc: 0.9192 - val_loss: 0.8157 - val_acc: 0.7557\nEpoch 196/500\n433/433 [==============================] - 0s 538us/step - loss: 0.2654 - acc: 0.9238 - val_loss: 0.8156 - val_acc: 0.7557\nEpoch 197/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2877 - acc: 0.9122 - val_loss: 0.8155 - val_acc: 0.7557\nEpoch 198/500\n433/433 [==============================] - 0s 544us/step - loss: 0.3432 - acc: 0.8776 - val_loss: 0.8152 - val_acc: 0.7557\nEpoch 199/500\n433/433 [==============================] - 0s 549us/step - loss: 0.3133 - acc: 0.9169 - val_loss: 0.8151 - val_acc: 0.7557\nEpoch 200/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2943 - acc: 0.9099 - val_loss: 0.8149 - val_acc: 0.7557\nEpoch 201/500\n433/433 [==============================] - 0s 578us/step - loss: 0.3559 - acc: 0.8915 - val_loss: 0.8147 - val_acc: 0.7557\nEpoch 202/500\n433/433 [==============================] - 0s 547us/step - loss: 0.2735 - acc: 0.9261 - val_loss: 0.8145 - val_acc: 0.7557\nEpoch 203/500\n433/433 [==============================] - 0s 553us/step - loss: 0.3241 - acc: 0.9053 - val_loss: 0.8146 - val_acc: 0.7557\nEpoch 204/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2187 - acc: 0.9584 - val_loss: 0.8145 - val_acc: 0.7557\nEpoch 205/500\n433/433 [==============================] - 0s 585us/step - loss: 0.3276 - acc: 0.9030 - val_loss: 0.8143 - val_acc: 0.7557\nEpoch 206/500\n433/433 [==============================] - 0s 598us/step - loss: 0.3370 - acc: 0.8938 - val_loss: 0.8142 - val_acc: 0.7557\nEpoch 207/500\n433/433 [==============================] - 0s 612us/step - loss: 0.3033 - acc: 0.9215 - val_loss: 0.8140 - val_acc: 0.7557\nEpoch 208/500\n433/433 [==============================] - 0s 604us/step - loss: 0.2544 - acc: 0.9238 - val_loss: 0.8140 - val_acc: 0.7557\nEpoch 209/500\n433/433 [==============================] - 0s 594us/step - loss: 0.3009 - acc: 0.9238 - val_loss: 0.8137 - val_acc: 0.7557\nEpoch 210/500\n433/433 [==============================] - 0s 579us/step - loss: 0.3068 - acc: 0.9099 - val_loss: 0.8136 - val_acc: 0.7557\nEpoch 211/500\n433/433 [==============================] - 0s 565us/step - loss: 0.2787 - acc: 0.9192 - val_loss: 0.8134 - val_acc: 0.7557\nEpoch 212/500\n433/433 [==============================] - 0s 585us/step - loss: 0.2999 - acc: 0.9169 - val_loss: 0.8135 - val_acc: 0.7557\nEpoch 213/500\n433/433 [==============================] - 0s 574us/step - loss: 0.2533 - acc: 0.9330 - val_loss: 0.8132 - val_acc: 0.7557\nEpoch 214/500\n433/433 [==============================] - 0s 579us/step - loss: 0.3011 - acc: 0.9192 - val_loss: 0.8130 - val_acc: 0.7557\nEpoch 215/500\n433/433 [==============================] - 0s 573us/step - loss: 0.2682 - acc: 0.9238 - val_loss: 0.8130 - val_acc: 0.7557\nEpoch 216/500\n433/433 [==============================] - 0s 572us/step - loss: 0.3255 - acc: 0.8938 - val_loss: 0.8128 - val_acc: 0.7557\nEpoch 217/500\n433/433 [==============================] - 0s 580us/step - loss: 0.2793 - acc: 0.9099 - val_loss: 0.8127 - val_acc: 0.7557\nEpoch 218/500\n433/433 [==============================] - 0s 586us/step - loss: 0.2652 - acc: 0.9215 - val_loss: 0.8126 - val_acc: 0.7557\nEpoch 219/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3577 - acc: 0.8938 - val_loss: 0.8125 - val_acc: 0.7557\nEpoch 220/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2726 - acc: 0.9261 - val_loss: 0.8123 - val_acc: 0.7557\nEpoch 221/500\n433/433 [==============================] - 0s 589us/step - loss: 0.3393 - acc: 0.8891 - val_loss: 0.8122 - val_acc: 0.7557\nEpoch 222/500\n433/433 [==============================] - 0s 583us/step - loss: 0.3144 - acc: 0.9053 - val_loss: 0.8121 - val_acc: 0.7557\nEpoch 223/500\n433/433 [==============================] - 0s 580us/step - loss: 0.3045 - acc: 0.9076 - val_loss: 0.8119 - val_acc: 0.7557\nEpoch 224/500\n433/433 [==============================] - 0s 579us/step - loss: 0.3046 - acc: 0.9030 - val_loss: 0.8118 - val_acc: 0.7557\nEpoch 225/500\n433/433 [==============================] - 0s 592us/step - loss: 0.3271 - acc: 0.9169 - val_loss: 0.8119 - val_acc: 0.7557\nEpoch 226/500\n433/433 [==============================] - 0s 573us/step - loss: 0.2656 - acc: 0.9330 - val_loss: 0.8117 - val_acc: 0.7557\nEpoch 227/500\n433/433 [==============================] - 0s 577us/step - loss: 0.2999 - acc: 0.9145 - val_loss: 0.8117 - val_acc: 0.7557\nEpoch 228/500\n433/433 [==============================] - 0s 564us/step - loss: 0.2967 - acc: 0.9169 - val_loss: 0.8116 - val_acc: 0.7557\nEpoch 229/500\n433/433 [==============================] - 0s 562us/step - loss: 0.2500 - acc: 0.9215 - val_loss: 0.8114 - val_acc: 0.7557\nEpoch 230/500\n433/433 [==============================] - 0s 575us/step - loss: 0.3218 - acc: 0.9007 - val_loss: 0.8114 - val_acc: 0.7557\nEpoch 231/500\n433/433 [==============================] - 0s 571us/step - loss: 0.3103 - acc: 0.8938 - val_loss: 0.8113 - val_acc: 0.7557\nEpoch 232/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2865 - acc: 0.9122 - val_loss: 0.8113 - val_acc: 0.7557\nEpoch 233/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2950 - acc: 0.9238 - val_loss: 0.8115 - val_acc: 0.7557\nEpoch 234/500\n433/433 [==============================] - 0s 600us/step - loss: 0.2728 - acc: 0.9215 - val_loss: 0.8115 - val_acc: 0.7557\nEpoch 235/500\n433/433 [==============================] - 0s 576us/step - loss: 0.3260 - acc: 0.9053 - val_loss: 0.8115 - val_acc: 0.7557\nEpoch 236/500\n433/433 [==============================] - 0s 560us/step - loss: 0.2482 - acc: 0.9353 - val_loss: 0.8115 - val_acc: 0.7557\nEpoch 237/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2913 - acc: 0.9030 - val_loss: 0.8116 - val_acc: 0.7557\nEpoch 238/500\n433/433 [==============================] - 0s 567us/step - loss: 0.3239 - acc: 0.9122 - val_loss: 0.8117 - val_acc: 0.7557\nEpoch 239/500\n433/433 [==============================] - 0s 562us/step - loss: 0.3000 - acc: 0.9238 - val_loss: 0.8116 - val_acc: 0.7557\nEpoch 240/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2870 - acc: 0.9122 - val_loss: 0.8115 - val_acc: 0.7557\nEpoch 241/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2902 - acc: 0.9076 - val_loss: 0.8114 - val_acc: 0.7557\nEpoch 242/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2671 - acc: 0.9261 - val_loss: 0.8113 - val_acc: 0.7557\nEpoch 243/500\n433/433 [==============================] - 0s 586us/step - loss: 0.2839 - acc: 0.9145 - val_loss: 0.8113 - val_acc: 0.7557\nEpoch 244/500\n433/433 [==============================] - 0s 564us/step - loss: 0.2984 - acc: 0.9122 - val_loss: 0.8113 - val_acc: 0.7557\nEpoch 245/500\n433/433 [==============================] - 0s 583us/step - loss: 0.2753 - acc: 0.9284 - val_loss: 0.8111 - val_acc: 0.7557\nEpoch 246/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2607 - acc: 0.9400 - val_loss: 0.8112 - val_acc: 0.7557\nEpoch 247/500\n433/433 [==============================] - 0s 561us/step - loss: 0.3430 - acc: 0.8868 - val_loss: 0.8111 - val_acc: 0.7557\nEpoch 248/500\n433/433 [==============================] - 0s 559us/step - loss: 0.3241 - acc: 0.8915 - val_loss: 0.8109 - val_acc: 0.7557\nEpoch 249/500\n433/433 [==============================] - 0s 577us/step - loss: 0.2481 - acc: 0.9376 - val_loss: 0.8109 - val_acc: 0.7557\nEpoch 250/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2957 - acc: 0.9215 - val_loss: 0.8108 - val_acc: 0.7557\nEpoch 251/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2805 - acc: 0.9238 - val_loss: 0.8109 - val_acc: 0.7557\nEpoch 252/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2725 - acc: 0.9261 - val_loss: 0.8107 - val_acc: 0.7557\nEpoch 253/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2664 - acc: 0.9330 - val_loss: 0.8107 - val_acc: 0.7557\nEpoch 254/500\n433/433 [==============================] - 0s 561us/step - loss: 0.2571 - acc: 0.9284 - val_loss: 0.8109 - val_acc: 0.7557\nEpoch 255/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2957 - acc: 0.9192 - val_loss: 0.8108 - val_acc: 0.7557\nEpoch 256/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2329 - acc: 0.9376 - val_loss: 0.8107 - val_acc: 0.7557\nEpoch 257/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2711 - acc: 0.9238 - val_loss: 0.8110 - val_acc: 0.7557\nEpoch 258/500\n433/433 [==============================] - 0s 549us/step - loss: 0.2855 - acc: 0.9238 - val_loss: 0.8109 - val_acc: 0.7557\nEpoch 259/500\n433/433 [==============================] - 0s 582us/step - loss: 0.2901 - acc: 0.9307 - val_loss: 0.8108 - val_acc: 0.7557\nEpoch 260/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2681 - acc: 0.9215 - val_loss: 0.8106 - val_acc: 0.7557\nEpoch 261/500\n433/433 [==============================] - 0s 565us/step - loss: 0.2790 - acc: 0.9307 - val_loss: 0.8106 - val_acc: 0.7557\nEpoch 262/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2979 - acc: 0.9169 - val_loss: 0.8105 - val_acc: 0.7557\nEpoch 263/500\n433/433 [==============================] - 0s 561us/step - loss: 0.3022 - acc: 0.9169 - val_loss: 0.8105 - val_acc: 0.7557\nEpoch 264/500\n433/433 [==============================] - 0s 543us/step - loss: 0.2534 - acc: 0.9215 - val_loss: 0.8105 - val_acc: 0.7557\nEpoch 265/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3177 - acc: 0.9122 - val_loss: 0.8105 - val_acc: 0.7557\nEpoch 266/500\n433/433 [==============================] - 0s 574us/step - loss: 0.3233 - acc: 0.9169 - val_loss: 0.8105 - val_acc: 0.7557\nEpoch 267/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2665 - acc: 0.9238 - val_loss: 0.8103 - val_acc: 0.7557\nEpoch 268/500\n433/433 [==============================] - 0s 570us/step - loss: 0.2716 - acc: 0.9215 - val_loss: 0.8101 - val_acc: 0.7557\nEpoch 269/500\n433/433 [==============================] - 0s 564us/step - loss: 0.2286 - acc: 0.9446 - val_loss: 0.8101 - val_acc: 0.7557\nEpoch 270/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3272 - acc: 0.9099 - val_loss: 0.8100 - val_acc: 0.7557\nEpoch 271/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2895 - acc: 0.9376 - val_loss: 0.8100 - val_acc: 0.7557\nEpoch 272/500\n433/433 [==============================] - 0s 549us/step - loss: 0.3013 - acc: 0.9053 - val_loss: 0.8098 - val_acc: 0.7557\nEpoch 273/500\n433/433 [==============================] - 0s 564us/step - loss: 0.3011 - acc: 0.9099 - val_loss: 0.8099 - val_acc: 0.7557\nEpoch 274/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2661 - acc: 0.9261 - val_loss: 0.8099 - val_acc: 0.7557\nEpoch 275/500\n433/433 [==============================] - 0s 560us/step - loss: 0.2687 - acc: 0.9284 - val_loss: 0.8097 - val_acc: 0.7557\nEpoch 276/500\n433/433 [==============================] - 0s 538us/step - loss: 0.3156 - acc: 0.9053 - val_loss: 0.8097 - val_acc: 0.7557\nEpoch 277/500\n433/433 [==============================] - 0s 563us/step - loss: 0.3007 - acc: 0.9122 - val_loss: 0.8095 - val_acc: 0.7557\nEpoch 278/500\n433/433 [==============================] - 0s 542us/step - loss: 0.2895 - acc: 0.9007 - val_loss: 0.8096 - val_acc: 0.7557\nEpoch 279/500\n433/433 [==============================] - 0s 583us/step - loss: 0.2951 - acc: 0.9145 - val_loss: 0.8094 - val_acc: 0.7557\nEpoch 280/500\n433/433 [==============================] - 0s 536us/step - loss: 0.3010 - acc: 0.9099 - val_loss: 0.8094 - val_acc: 0.7557\nEpoch 281/500\n433/433 [==============================] - 0s 561us/step - loss: 0.2754 - acc: 0.9261 - val_loss: 0.8094 - val_acc: 0.7557\nEpoch 282/500\n433/433 [==============================] - 0s 542us/step - loss: 0.2981 - acc: 0.9145 - val_loss: 0.8094 - val_acc: 0.7557\nEpoch 283/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2764 - acc: 0.9261 - val_loss: 0.8095 - val_acc: 0.7557\nEpoch 284/500\n433/433 [==============================] - 0s 553us/step - loss: 0.3256 - acc: 0.9053 - val_loss: 0.8093 - val_acc: 0.7557\nEpoch 285/500\n433/433 [==============================] - 0s 559us/step - loss: 0.2991 - acc: 0.9030 - val_loss: 0.8091 - val_acc: 0.7557\nEpoch 286/500\n433/433 [==============================] - 0s 566us/step - loss: 0.2819 - acc: 0.9238 - val_loss: 0.8090 - val_acc: 0.7557\nEpoch 287/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2830 - acc: 0.9053 - val_loss: 0.8089 - val_acc: 0.7557\nEpoch 288/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2908 - acc: 0.9145 - val_loss: 0.8088 - val_acc: 0.7557\nEpoch 289/500\n433/433 [==============================] - 0s 562us/step - loss: 0.3311 - acc: 0.9053 - val_loss: 0.8089 - val_acc: 0.7557\nEpoch 290/500\n433/433 [==============================] - 0s 549us/step - loss: 0.2796 - acc: 0.9169 - val_loss: 0.8090 - val_acc: 0.7557\nEpoch 291/500\n433/433 [==============================] - 0s 553us/step - loss: 0.3412 - acc: 0.9099 - val_loss: 0.8089 - val_acc: 0.7557\nEpoch 292/500\n433/433 [==============================] - 0s 540us/step - loss: 0.2888 - acc: 0.9238 - val_loss: 0.8090 - val_acc: 0.7557\nEpoch 293/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2836 - acc: 0.9169 - val_loss: 0.8088 - val_acc: 0.7557\nEpoch 294/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2813 - acc: 0.9145 - val_loss: 0.8089 - val_acc: 0.7557\nEpoch 295/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2881 - acc: 0.9192 - val_loss: 0.8090 - val_acc: 0.7557\nEpoch 296/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2864 - acc: 0.9192 - val_loss: 0.8089 - val_acc: 0.7557\nEpoch 297/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3208 - acc: 0.9030 - val_loss: 0.8090 - val_acc: 0.7557\nEpoch 298/500\n433/433 [==============================] - 0s 545us/step - loss: 0.3011 - acc: 0.9122 - val_loss: 0.8091 - val_acc: 0.7557\nEpoch 299/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3174 - acc: 0.9030 - val_loss: 0.8091 - val_acc: 0.7500\nEpoch 300/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2753 - acc: 0.9238 - val_loss: 0.8093 - val_acc: 0.7500\nEpoch 301/500\n433/433 [==============================] - 0s 566us/step - loss: 0.3209 - acc: 0.9030 - val_loss: 0.8093 - val_acc: 0.7500\nEpoch 302/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3264 - acc: 0.9030 - val_loss: 0.8093 - val_acc: 0.7500\nEpoch 303/500\n433/433 [==============================] - 0s 560us/step - loss: 0.3397 - acc: 0.8891 - val_loss: 0.8092 - val_acc: 0.7500\nEpoch 304/500\n433/433 [==============================] - 0s 552us/step - loss: 0.3064 - acc: 0.9053 - val_loss: 0.8092 - val_acc: 0.7500\nEpoch 305/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2921 - acc: 0.9215 - val_loss: 0.8092 - val_acc: 0.7500\nEpoch 306/500\n433/433 [==============================] - 0s 544us/step - loss: 0.2885 - acc: 0.9215 - val_loss: 0.8092 - val_acc: 0.7500\nEpoch 307/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3014 - acc: 0.9007 - val_loss: 0.8092 - val_acc: 0.7500\nEpoch 308/500\n433/433 [==============================] - 0s 577us/step - loss: 0.3124 - acc: 0.9169 - val_loss: 0.8093 - val_acc: 0.7500\nEpoch 309/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2862 - acc: 0.9053 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 310/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2947 - acc: 0.9169 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 311/500\n433/433 [==============================] - 0s 543us/step - loss: 0.2778 - acc: 0.9215 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 312/500\n433/433 [==============================] - 0s 565us/step - loss: 0.3298 - acc: 0.9030 - val_loss: 0.8096 - val_acc: 0.7500\nEpoch 313/500\n433/433 [==============================] - 0s 547us/step - loss: 0.3163 - acc: 0.9169 - val_loss: 0.8098 - val_acc: 0.7500\nEpoch 314/500\n433/433 [==============================] - 0s 534us/step - loss: 0.3132 - acc: 0.9169 - val_loss: 0.8097 - val_acc: 0.7500\nEpoch 315/500\n433/433 [==============================] - 0s 547us/step - loss: 0.2751 - acc: 0.9261 - val_loss: 0.8096 - val_acc: 0.7500\nEpoch 316/500\n433/433 [==============================] - 0s 523us/step - loss: 0.3327 - acc: 0.9076 - val_loss: 0.8096 - val_acc: 0.7500\nEpoch 317/500\n433/433 [==============================] - 0s 554us/step - loss: 0.3010 - acc: 0.9099 - val_loss: 0.8095 - val_acc: 0.7500\nEpoch 318/500\n433/433 [==============================] - 0s 547us/step - loss: 0.2997 - acc: 0.9145 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 319/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2863 - acc: 0.9169 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 320/500\n433/433 [==============================] - 0s 538us/step - loss: 0.2918 - acc: 0.9122 - val_loss: 0.8095 - val_acc: 0.7500\nEpoch 321/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2948 - acc: 0.9192 - val_loss: 0.8097 - val_acc: 0.7500\nEpoch 322/500\n433/433 [==============================] - 0s 525us/step - loss: 0.2877 - acc: 0.9099 - val_loss: 0.8097 - val_acc: 0.7500\nEpoch 323/500\n433/433 [==============================] - 0s 576us/step - loss: 0.2990 - acc: 0.9099 - val_loss: 0.8097 - val_acc: 0.7500\nEpoch 324/500\n433/433 [==============================] - 0s 542us/step - loss: 0.3001 - acc: 0.9030 - val_loss: 0.8096 - val_acc: 0.7500\nEpoch 325/500\n433/433 [==============================] - 0s 541us/step - loss: 0.2747 - acc: 0.9215 - val_loss: 0.8095 - val_acc: 0.7500\nEpoch 326/500\n433/433 [==============================] - 0s 539us/step - loss: 0.3128 - acc: 0.8961 - val_loss: 0.8094 - val_acc: 0.7500\nEpoch 327/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2677 - acc: 0.9261 - val_loss: 0.8093 - val_acc: 0.7500\nEpoch 328/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2799 - acc: 0.9238 - val_loss: 0.8091 - val_acc: 0.7500\nEpoch 329/500\n433/433 [==============================] - 0s 549us/step - loss: 0.3549 - acc: 0.8938 - val_loss: 0.8090 - val_acc: 0.7500\nEpoch 330/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3055 - acc: 0.9030 - val_loss: 0.8089 - val_acc: 0.7500\nEpoch 331/500\n433/433 [==============================] - 0s 564us/step - loss: 0.3515 - acc: 0.9122 - val_loss: 0.8087 - val_acc: 0.7500\nEpoch 332/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2681 - acc: 0.9307 - val_loss: 0.8085 - val_acc: 0.7500\nEpoch 333/500\n433/433 [==============================] - 0s 584us/step - loss: 0.3018 - acc: 0.9122 - val_loss: 0.8084 - val_acc: 0.7500\nEpoch 334/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2931 - acc: 0.9145 - val_loss: 0.8083 - val_acc: 0.7500\nEpoch 335/500\n433/433 [==============================] - 0s 576us/step - loss: 0.2811 - acc: 0.9215 - val_loss: 0.8082 - val_acc: 0.7500\nEpoch 336/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2982 - acc: 0.9215 - val_loss: 0.8080 - val_acc: 0.7500\nEpoch 337/500\n433/433 [==============================] - 0s 562us/step - loss: 0.3013 - acc: 0.9076 - val_loss: 0.8079 - val_acc: 0.7500\nEpoch 338/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2735 - acc: 0.9145 - val_loss: 0.8078 - val_acc: 0.7500\nEpoch 339/500\n433/433 [==============================] - 0s 593us/step - loss: 0.2411 - acc: 0.9261 - val_loss: 0.8076 - val_acc: 0.7500\nEpoch 340/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2848 - acc: 0.9030 - val_loss: 0.8074 - val_acc: 0.7500\nEpoch 341/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2825 - acc: 0.9169 - val_loss: 0.8071 - val_acc: 0.7500\nEpoch 342/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2657 - acc: 0.9284 - val_loss: 0.8072 - val_acc: 0.7500\nEpoch 343/500\n433/433 [==============================] - 0s 569us/step - loss: 0.3027 - acc: 0.9169 - val_loss: 0.8072 - val_acc: 0.7500\nEpoch 344/500\n433/433 [==============================] - 0s 530us/step - loss: 0.2525 - acc: 0.9261 - val_loss: 0.8071 - val_acc: 0.7500\nEpoch 345/500\n433/433 [==============================] - 0s 582us/step - loss: 0.3295 - acc: 0.8868 - val_loss: 0.8070 - val_acc: 0.7500\nEpoch 346/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3009 - acc: 0.9192 - val_loss: 0.8068 - val_acc: 0.7500\nEpoch 347/500\n433/433 [==============================] - 0s 560us/step - loss: 0.3123 - acc: 0.9307 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 348/500\n433/433 [==============================] - 0s 535us/step - loss: 0.2590 - acc: 0.9492 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 349/500\n433/433 [==============================] - 0s 527us/step - loss: 0.3141 - acc: 0.9030 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 350/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3137 - acc: 0.8938 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 351/500\n433/433 [==============================] - 0s 525us/step - loss: 0.3042 - acc: 0.8961 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 352/500\n433/433 [==============================] - 0s 517us/step - loss: 0.2738 - acc: 0.9099 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 353/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2812 - acc: 0.9307 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 354/500\n433/433 [==============================] - 0s 505us/step - loss: 0.3120 - acc: 0.8961 - val_loss: 0.8068 - val_acc: 0.7500\nEpoch 355/500\n433/433 [==============================] - 0s 532us/step - loss: 0.3190 - acc: 0.9007 - val_loss: 0.8068 - val_acc: 0.7500\nEpoch 356/500\n433/433 [==============================] - 0s 524us/step - loss: 0.2967 - acc: 0.9169 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 357/500\n433/433 [==============================] - 0s 530us/step - loss: 0.2949 - acc: 0.9076 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 358/500\n433/433 [==============================] - 0s 519us/step - loss: 0.2733 - acc: 0.9261 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 359/500\n433/433 [==============================] - 0s 514us/step - loss: 0.3250 - acc: 0.9030 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 360/500\n433/433 [==============================] - 0s 524us/step - loss: 0.2714 - acc: 0.9238 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 361/500\n433/433 [==============================] - 0s 525us/step - loss: 0.3035 - acc: 0.9145 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 362/500\n433/433 [==============================] - 0s 520us/step - loss: 0.2528 - acc: 0.9238 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 363/500\n433/433 [==============================] - 0s 525us/step - loss: 0.2632 - acc: 0.9099 - val_loss: 0.8067 - val_acc: 0.7500\nEpoch 364/500\n433/433 [==============================] - 0s 531us/step - loss: 0.2775 - acc: 0.9330 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 365/500\n433/433 [==============================] - 0s 533us/step - loss: 0.3098 - acc: 0.9030 - val_loss: 0.8065 - val_acc: 0.7500\nEpoch 366/500\n433/433 [==============================] - 0s 526us/step - loss: 0.3362 - acc: 0.9053 - val_loss: 0.8065 - val_acc: 0.7500\nEpoch 367/500\n433/433 [==============================] - 0s 560us/step - loss: 0.3030 - acc: 0.9145 - val_loss: 0.8065 - val_acc: 0.7500\nEpoch 368/500\n433/433 [==============================] - 0s 543us/step - loss: 0.3007 - acc: 0.9053 - val_loss: 0.8065 - val_acc: 0.7500\nEpoch 369/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2657 - acc: 0.9261 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 370/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2596 - acc: 0.9122 - val_loss: 0.8065 - val_acc: 0.7500\nEpoch 371/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3002 - acc: 0.9030 - val_loss: 0.8066 - val_acc: 0.7500\nEpoch 372/500\n433/433 [==============================] - 0s 550us/step - loss: 0.3450 - acc: 0.8845 - val_loss: 0.8064 - val_acc: 0.7500\nEpoch 373/500\n433/433 [==============================] - 0s 560us/step - loss: 0.3672 - acc: 0.8984 - val_loss: 0.8063 - val_acc: 0.7500\nEpoch 374/500\n433/433 [==============================] - 0s 547us/step - loss: 0.3061 - acc: 0.9053 - val_loss: 0.8062 - val_acc: 0.7500\nEpoch 375/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2911 - acc: 0.9169 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 376/500\n433/433 [==============================] - 0s 543us/step - loss: 0.2938 - acc: 0.9215 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 377/500\n433/433 [==============================] - 0s 567us/step - loss: 0.3060 - acc: 0.9169 - val_loss: 0.8058 - val_acc: 0.7500\nEpoch 378/500\n433/433 [==============================] - 0s 541us/step - loss: 0.2602 - acc: 0.9192 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 379/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2890 - acc: 0.9261 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 380/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2926 - acc: 0.9145 - val_loss: 0.8054 - val_acc: 0.7500\nEpoch 381/500\n433/433 [==============================] - 0s 544us/step - loss: 0.2931 - acc: 0.9145 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 382/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2928 - acc: 0.9145 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 383/500\n433/433 [==============================] - 0s 541us/step - loss: 0.2665 - acc: 0.9215 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 384/500\n433/433 [==============================] - 0s 545us/step - loss: 0.3136 - acc: 0.9099 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 385/500\n433/433 [==============================] - 0s 579us/step - loss: 0.3008 - acc: 0.9099 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 386/500\n433/433 [==============================] - 0s 549us/step - loss: 0.2997 - acc: 0.9145 - val_loss: 0.8045 - val_acc: 0.7500\nEpoch 387/500\n433/433 [==============================] - 0s 547us/step - loss: 0.3537 - acc: 0.8868 - val_loss: 0.8043 - val_acc: 0.7500\nEpoch 388/500\n433/433 [==============================] - 0s 563us/step - loss: 0.2091 - acc: 0.9492 - val_loss: 0.8042 - val_acc: 0.7500\nEpoch 389/500\n433/433 [==============================] - 0s 561us/step - loss: 0.2913 - acc: 0.9122 - val_loss: 0.8040 - val_acc: 0.7500\nEpoch 390/500\n433/433 [==============================] - 0s 612us/step - loss: 0.2913 - acc: 0.9076 - val_loss: 0.8040 - val_acc: 0.7500\nEpoch 391/500\n433/433 [==============================] - 0s 529us/step - loss: 0.3082 - acc: 0.9215 - val_loss: 0.8039 - val_acc: 0.7500\nEpoch 392/500\n433/433 [==============================] - 0s 529us/step - loss: 0.3016 - acc: 0.9215 - val_loss: 0.8037 - val_acc: 0.7500\nEpoch 393/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2674 - acc: 0.9215 - val_loss: 0.8036 - val_acc: 0.7500\nEpoch 394/500\n433/433 [==============================] - 0s 533us/step - loss: 0.2859 - acc: 0.9307 - val_loss: 0.8033 - val_acc: 0.7500\nEpoch 395/500\n433/433 [==============================] - 0s 535us/step - loss: 0.2803 - acc: 0.9076 - val_loss: 0.8032 - val_acc: 0.7500\nEpoch 396/500\n433/433 [==============================] - 0s 523us/step - loss: 0.3072 - acc: 0.9330 - val_loss: 0.8031 - val_acc: 0.7500\nEpoch 397/500\n433/433 [==============================] - 0s 543us/step - loss: 0.2697 - acc: 0.9307 - val_loss: 0.8032 - val_acc: 0.7500\nEpoch 398/500\n433/433 [==============================] - 0s 534us/step - loss: 0.3148 - acc: 0.9145 - val_loss: 0.8032 - val_acc: 0.7500\nEpoch 399/500\n433/433 [==============================] - 0s 521us/step - loss: 0.2638 - acc: 0.9307 - val_loss: 0.8031 - val_acc: 0.7500\nEpoch 400/500\n433/433 [==============================] - 0s 522us/step - loss: 0.3126 - acc: 0.9030 - val_loss: 0.8030 - val_acc: 0.7500\nEpoch 401/500\n433/433 [==============================] - 0s 530us/step - loss: 0.3028 - acc: 0.9145 - val_loss: 0.8030 - val_acc: 0.7500\nEpoch 402/500\n433/433 [==============================] - 0s 538us/step - loss: 0.2782 - acc: 0.9353 - val_loss: 0.8028 - val_acc: 0.7500\nEpoch 403/500\n433/433 [==============================] - 0s 527us/step - loss: 0.2808 - acc: 0.9192 - val_loss: 0.8026 - val_acc: 0.7500\nEpoch 404/500\n433/433 [==============================] - 0s 532us/step - loss: 0.2457 - acc: 0.9145 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 405/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2840 - acc: 0.9145 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 406/500\n433/433 [==============================] - 0s 539us/step - loss: 0.3439 - acc: 0.9076 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 407/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2866 - acc: 0.9215 - val_loss: 0.8024 - val_acc: 0.7500\nEpoch 408/500\n433/433 [==============================] - 0s 525us/step - loss: 0.2881 - acc: 0.9145 - val_loss: 0.8024 - val_acc: 0.7500\nEpoch 409/500\n433/433 [==============================] - 0s 526us/step - loss: 0.2563 - acc: 0.9261 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 410/500\n433/433 [==============================] - 0s 519us/step - loss: 0.2878 - acc: 0.9330 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 411/500\n433/433 [==============================] - 0s 535us/step - loss: 0.2545 - acc: 0.9330 - val_loss: 0.8027 - val_acc: 0.7500\nEpoch 412/500\n433/433 [==============================] - 0s 520us/step - loss: 0.2926 - acc: 0.9099 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 413/500\n433/433 [==============================] - 0s 533us/step - loss: 0.3375 - acc: 0.9122 - val_loss: 0.8026 - val_acc: 0.7500\nEpoch 414/500\n433/433 [==============================] - 0s 533us/step - loss: 0.2894 - acc: 0.9122 - val_loss: 0.8026 - val_acc: 0.7500\nEpoch 415/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2789 - acc: 0.9238 - val_loss: 0.8026 - val_acc: 0.7500\nEpoch 416/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2687 - acc: 0.9307 - val_loss: 0.8025 - val_acc: 0.7500\nEpoch 417/500\n433/433 [==============================] - 0s 538us/step - loss: 0.3393 - acc: 0.9099 - val_loss: 0.8026 - val_acc: 0.7500\nEpoch 418/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2796 - acc: 0.9007 - val_loss: 0.8028 - val_acc: 0.7500\nEpoch 419/500\n433/433 [==============================] - 0s 541us/step - loss: 0.2845 - acc: 0.9307 - val_loss: 0.8031 - val_acc: 0.7500\nEpoch 420/500\n433/433 [==============================] - 0s 521us/step - loss: 0.3180 - acc: 0.9099 - val_loss: 0.8033 - val_acc: 0.7500\nEpoch 421/500\n433/433 [==============================] - 0s 541us/step - loss: 0.2824 - acc: 0.9169 - val_loss: 0.8035 - val_acc: 0.7500\nEpoch 422/500\n433/433 [==============================] - 0s 532us/step - loss: 0.2987 - acc: 0.9192 - val_loss: 0.8036 - val_acc: 0.7500\nEpoch 423/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2822 - acc: 0.9169 - val_loss: 0.8038 - val_acc: 0.7500\nEpoch 424/500\n433/433 [==============================] - 0s 538us/step - loss: 0.2685 - acc: 0.9353 - val_loss: 0.8039 - val_acc: 0.7500\nEpoch 425/500\n433/433 [==============================] - 0s 532us/step - loss: 0.3025 - acc: 0.9145 - val_loss: 0.8040 - val_acc: 0.7500\nEpoch 426/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2979 - acc: 0.9145 - val_loss: 0.8042 - val_acc: 0.7500\nEpoch 427/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2662 - acc: 0.9261 - val_loss: 0.8043 - val_acc: 0.7500\nEpoch 428/500\n433/433 [==============================] - 0s 523us/step - loss: 0.2711 - acc: 0.9215 - val_loss: 0.8042 - val_acc: 0.7500\nEpoch 429/500\n433/433 [==============================] - 0s 517us/step - loss: 0.2876 - acc: 0.9192 - val_loss: 0.8043 - val_acc: 0.7500\nEpoch 430/500\n433/433 [==============================] - 0s 528us/step - loss: 0.2624 - acc: 0.9284 - val_loss: 0.8043 - val_acc: 0.7500\nEpoch 431/500\n433/433 [==============================] - 0s 536us/step - loss: 0.3164 - acc: 0.8961 - val_loss: 0.8046 - val_acc: 0.7500\nEpoch 432/500\n433/433 [==============================] - 0s 529us/step - loss: 0.2651 - acc: 0.9353 - val_loss: 0.8047 - val_acc: 0.7500\nEpoch 433/500\n433/433 [==============================] - 0s 571us/step - loss: 0.2734 - acc: 0.9261 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 434/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3050 - acc: 0.9192 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 435/500\n433/433 [==============================] - 0s 531us/step - loss: 0.2732 - acc: 0.9192 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 436/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2895 - acc: 0.9238 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 437/500\n433/433 [==============================] - 0s 532us/step - loss: 0.3193 - acc: 0.9007 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 438/500\n433/433 [==============================] - 0s 528us/step - loss: 0.2948 - acc: 0.9145 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 439/500\n433/433 [==============================] - 0s 526us/step - loss: 0.2977 - acc: 0.9169 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 440/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2785 - acc: 0.9261 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 441/500\n433/433 [==============================] - 0s 520us/step - loss: 0.2525 - acc: 0.9238 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 442/500\n433/433 [==============================] - 0s 530us/step - loss: 0.2498 - acc: 0.9353 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 443/500\n433/433 [==============================] - 0s 530us/step - loss: 0.3021 - acc: 0.9192 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 444/500\n433/433 [==============================] - 0s 516us/step - loss: 0.2927 - acc: 0.8961 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 445/500\n433/433 [==============================] - 0s 537us/step - loss: 0.2722 - acc: 0.9284 - val_loss: 0.8048 - val_acc: 0.7500\nEpoch 446/500\n433/433 [==============================] - 0s 528us/step - loss: 0.3334 - acc: 0.8984 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 447/500\n433/433 [==============================] - 0s 523us/step - loss: 0.2320 - acc: 0.9469 - val_loss: 0.8049 - val_acc: 0.7500\nEpoch 448/500\n433/433 [==============================] - 0s 533us/step - loss: 0.3020 - acc: 0.9215 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 449/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2791 - acc: 0.9099 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 450/500\n433/433 [==============================] - 0s 524us/step - loss: 0.2941 - acc: 0.9192 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 451/500\n433/433 [==============================] - 0s 524us/step - loss: 0.2523 - acc: 0.9169 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 452/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2936 - acc: 0.9053 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 453/500\n433/433 [==============================] - 0s 545us/step - loss: 0.2831 - acc: 0.9169 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 454/500\n433/433 [==============================] - 0s 559us/step - loss: 0.2675 - acc: 0.9400 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 455/500\n433/433 [==============================] - 0s 547us/step - loss: 0.2940 - acc: 0.9192 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 456/500\n433/433 [==============================] - 0s 559us/step - loss: 0.3006 - acc: 0.9145 - val_loss: 0.8055 - val_acc: 0.7500\nEpoch 457/500\n433/433 [==============================] - 0s 580us/step - loss: 0.3034 - acc: 0.8984 - val_loss: 0.8055 - val_acc: 0.7500\nEpoch 458/500\n433/433 [==============================] - 0s 570us/step - loss: 0.2773 - acc: 0.9238 - val_loss: 0.8055 - val_acc: 0.7500\nEpoch 459/500\n433/433 [==============================] - 0s 553us/step - loss: 0.2756 - acc: 0.9238 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 460/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2761 - acc: 0.9169 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 461/500\n433/433 [==============================] - 0s 559us/step - loss: 0.2860 - acc: 0.9261 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 462/500\n433/433 [==============================] - 0s 561us/step - loss: 0.2607 - acc: 0.9145 - val_loss: 0.8060 - val_acc: 0.7500\nEpoch 463/500\n433/433 [==============================] - 0s 582us/step - loss: 0.2715 - acc: 0.9307 - val_loss: 0.8062 - val_acc: 0.7500\nEpoch 464/500\n433/433 [==============================] - 0s 555us/step - loss: 0.2836 - acc: 0.9145 - val_loss: 0.8061 - val_acc: 0.7500\nEpoch 465/500\n433/433 [==============================] - 0s 566us/step - loss: 0.2421 - acc: 0.9376 - val_loss: 0.8061 - val_acc: 0.7500\nEpoch 466/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2811 - acc: 0.9192 - val_loss: 0.8061 - val_acc: 0.7500\nEpoch 467/500\n433/433 [==============================] - 0s 539us/step - loss: 0.2863 - acc: 0.9169 - val_loss: 0.8062 - val_acc: 0.7500\nEpoch 468/500\n433/433 [==============================] - 0s 546us/step - loss: 0.2905 - acc: 0.9192 - val_loss: 0.8061 - val_acc: 0.7500\nEpoch 469/500\n433/433 [==============================] - 0s 542us/step - loss: 0.3101 - acc: 0.9169 - val_loss: 0.8060 - val_acc: 0.7500\nEpoch 470/500\n433/433 [==============================] - 0s 567us/step - loss: 0.2860 - acc: 0.9192 - val_loss: 0.8058 - val_acc: 0.7500\nEpoch 471/500\n433/433 [==============================] - 0s 548us/step - loss: 0.3317 - acc: 0.8891 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 472/500\n433/433 [==============================] - 0s 568us/step - loss: 0.2762 - acc: 0.9169 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 473/500\n433/433 [==============================] - 0s 566us/step - loss: 0.3087 - acc: 0.9169 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 474/500\n433/433 [==============================] - 0s 542us/step - loss: 0.2761 - acc: 0.9307 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 475/500\n433/433 [==============================] - 0s 534us/step - loss: 0.2694 - acc: 0.9215 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 476/500\n433/433 [==============================] - 0s 527us/step - loss: 0.2649 - acc: 0.9353 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 477/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3074 - acc: 0.8984 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 478/500\n433/433 [==============================] - 0s 557us/step - loss: 0.2667 - acc: 0.9215 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 479/500\n433/433 [==============================] - 0s 552us/step - loss: 0.2681 - acc: 0.9353 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 480/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2818 - acc: 0.9192 - val_loss: 0.8059 - val_acc: 0.7500\nEpoch 481/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3150 - acc: 0.9030 - val_loss: 0.8058 - val_acc: 0.7500\nEpoch 482/500\n433/433 [==============================] - 0s 558us/step - loss: 0.2911 - acc: 0.9099 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 483/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2726 - acc: 0.9145 - val_loss: 0.8058 - val_acc: 0.7500\nEpoch 484/500\n433/433 [==============================] - 0s 560us/step - loss: 0.3105 - acc: 0.9007 - val_loss: 0.8058 - val_acc: 0.7500\nEpoch 485/500\n433/433 [==============================] - 0s 550us/step - loss: 0.3011 - acc: 0.9215 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 486/500\n433/433 [==============================] - 0s 552us/step - loss: 0.3044 - acc: 0.9169 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 487/500\n433/433 [==============================] - 0s 551us/step - loss: 0.3210 - acc: 0.9238 - val_loss: 0.8056 - val_acc: 0.7500\nEpoch 488/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2940 - acc: 0.9215 - val_loss: 0.8057 - val_acc: 0.7500\nEpoch 489/500\n433/433 [==============================] - 0s 548us/step - loss: 0.2910 - acc: 0.9215 - val_loss: 0.8055 - val_acc: 0.7500\nEpoch 490/500\n433/433 [==============================] - 0s 555us/step - loss: 0.3216 - acc: 0.9099 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 491/500\n433/433 [==============================] - 0s 549us/step - loss: 0.2790 - acc: 0.9076 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 492/500\n433/433 [==============================] - 0s 524us/step - loss: 0.3060 - acc: 0.9238 - val_loss: 0.8054 - val_acc: 0.7500\nEpoch 493/500\n433/433 [==============================] - 0s 559us/step - loss: 0.3262 - acc: 0.8938 - val_loss: 0.8053 - val_acc: 0.7500\nEpoch 494/500\n433/433 [==============================] - 0s 550us/step - loss: 0.2991 - acc: 0.9169 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 495/500\n433/433 [==============================] - 0s 554us/step - loss: 0.2730 - acc: 0.9145 - val_loss: 0.8052 - val_acc: 0.7500\nEpoch 496/500\n433/433 [==============================] - 0s 537us/step - loss: 0.2654 - acc: 0.9307 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 497/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2846 - acc: 0.9076 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 498/500\n433/433 [==============================] - 0s 564us/step - loss: 0.2922 - acc: 0.9076 - val_loss: 0.8051 - val_acc: 0.7500\nEpoch 499/500\n433/433 [==============================] - 0s 551us/step - loss: 0.2835 - acc: 0.9215 - val_loss: 0.8050 - val_acc: 0.7500\nEpoch 500/500\n433/433 [==============================] - 0s 556us/step - loss: 0.2898 - acc: 0.9192 - val_loss: 0.8050 - val_acc: 0.7500\n"
],
[
"# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Train and test on GT_split 2",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nimport pickle\ndrive.mount('/content/drive')\nDATA_PATH1 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ninfile = open(DATA_PATH1+'/GT_train_2.pkl','rb')\nTrain = pickle.load(infile)\nDATA_PATH2 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ntestfile= open(DATA_PATH2+'/GT_test_2.pkl','rb')\nTest = pickle.load(testfile)\n\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(Train['label'])\n\nX_0,X_1,Y = data_generator(Train,C,le)\nX_test_0,X_test_1,Y_test = data_generator(Test,C,le)",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"# Re-initialize weights, since training and testing data switch\nDD_Net = build_DD_Net(C)",
"_____no_output_____"
],
[
"import keras\nlr = 1e-3\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=1e-5)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=600,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )\n\nlr = 1e-4\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=500,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )",
"Train on 433 samples, validate on 176 samples\nEpoch 1/600\n433/433 [==============================] - 8s 19ms/step - loss: 3.6266 - acc: 0.0647 - val_loss: 3.0408 - val_acc: 0.0795\nEpoch 2/600\n433/433 [==============================] - 0s 579us/step - loss: 3.6029 - acc: 0.0670 - val_loss: 2.9623 - val_acc: 0.1080\nEpoch 3/600\n433/433 [==============================] - 0s 517us/step - loss: 3.4706 - acc: 0.0855 - val_loss: 2.8844 - val_acc: 0.1136\nEpoch 4/600\n433/433 [==============================] - 0s 535us/step - loss: 3.5416 - acc: 0.0762 - val_loss: 2.8067 - val_acc: 0.1477\nEpoch 5/600\n433/433 [==============================] - 0s 525us/step - loss: 3.2489 - acc: 0.1085 - val_loss: 2.7381 - val_acc: 0.1591\nEpoch 6/600\n433/433 [==============================] - 0s 524us/step - loss: 3.2520 - acc: 0.0970 - val_loss: 2.6630 - val_acc: 0.1932\nEpoch 7/600\n433/433 [==============================] - 0s 535us/step - loss: 3.0701 - acc: 0.1247 - val_loss: 2.5927 - val_acc: 0.1989\nEpoch 8/600\n433/433 [==============================] - 0s 525us/step - loss: 3.0135 - acc: 0.1201 - val_loss: 2.5275 - val_acc: 0.2330\nEpoch 9/600\n433/433 [==============================] - 0s 526us/step - loss: 2.9695 - acc: 0.1339 - val_loss: 2.4709 - val_acc: 0.2500\nEpoch 10/600\n433/433 [==============================] - 0s 538us/step - loss: 2.8782 - acc: 0.1732 - val_loss: 2.4098 - val_acc: 0.2614\nEpoch 11/600\n433/433 [==============================] - 0s 562us/step - loss: 2.7234 - acc: 0.2148 - val_loss: 2.3527 - val_acc: 0.2670\nEpoch 12/600\n433/433 [==============================] - 0s 523us/step - loss: 2.8098 - acc: 0.1686 - val_loss: 2.3001 - val_acc: 0.2841\nEpoch 13/600\n433/433 [==============================] - 0s 541us/step - loss: 2.6876 - acc: 0.1963 - val_loss: 2.2499 - val_acc: 0.2955\nEpoch 14/600\n433/433 [==============================] - 0s 522us/step - loss: 2.6882 - acc: 0.2009 - val_loss: 2.1993 - val_acc: 0.3011\nEpoch 15/600\n433/433 [==============================] - 0s 519us/step - loss: 2.6445 - acc: 0.2079 - val_loss: 2.1507 - val_acc: 0.3068\nEpoch 16/600\n433/433 [==============================] - 0s 521us/step - loss: 2.4899 - acc: 0.2263 - val_loss: 2.1036 - val_acc: 0.3125\nEpoch 17/600\n433/433 [==============================] - 0s 532us/step - loss: 2.5391 - acc: 0.2055 - val_loss: 2.0631 - val_acc: 0.3068\nEpoch 18/600\n433/433 [==============================] - 0s 533us/step - loss: 2.4296 - acc: 0.2818 - val_loss: 2.0282 - val_acc: 0.3409\nEpoch 19/600\n433/433 [==============================] - 0s 515us/step - loss: 2.4147 - acc: 0.2517 - val_loss: 1.9973 - val_acc: 0.3466\nEpoch 20/600\n433/433 [==============================] - 0s 547us/step - loss: 2.4299 - acc: 0.2818 - val_loss: 1.9729 - val_acc: 0.3636\nEpoch 21/600\n433/433 [==============================] - 0s 518us/step - loss: 2.2769 - acc: 0.3025 - val_loss: 1.9584 - val_acc: 0.3807\nEpoch 22/600\n433/433 [==============================] - 0s 531us/step - loss: 2.2680 - acc: 0.3025 - val_loss: 1.9505 - val_acc: 0.3864\nEpoch 23/600\n433/433 [==============================] - 0s 535us/step - loss: 2.2125 - acc: 0.3279 - val_loss: 1.9477 - val_acc: 0.3750\nEpoch 24/600\n433/433 [==============================] - 0s 519us/step - loss: 2.2136 - acc: 0.3164 - val_loss: 1.9429 - val_acc: 0.3864\nEpoch 25/600\n433/433 [==============================] - 0s 535us/step - loss: 2.1334 - acc: 0.3279 - val_loss: 1.9425 - val_acc: 0.3807\nEpoch 26/600\n433/433 [==============================] - 0s 542us/step - loss: 2.2073 - acc: 0.3072 - val_loss: 1.9471 - val_acc: 0.3977\nEpoch 27/600\n433/433 [==============================] - 0s 547us/step - loss: 2.0731 - acc: 0.3349 - val_loss: 1.9485 - val_acc: 0.3864\nEpoch 28/600\n433/433 [==============================] - 0s 551us/step - loss: 2.1178 - acc: 0.3233 - val_loss: 1.9397 - val_acc: 0.4034\nEpoch 29/600\n433/433 [==============================] - 0s 518us/step - loss: 2.0884 - acc: 0.3603 - val_loss: 1.9090 - val_acc: 0.4034\nEpoch 30/600\n433/433 [==============================] - 0s 533us/step - loss: 1.9664 - acc: 0.4180 - val_loss: 1.8678 - val_acc: 0.4091\nEpoch 31/600\n433/433 [==============================] - 0s 526us/step - loss: 2.0096 - acc: 0.3487 - val_loss: 1.8219 - val_acc: 0.4318\nEpoch 32/600\n433/433 [==============================] - 0s 549us/step - loss: 1.8986 - acc: 0.3626 - val_loss: 1.7740 - val_acc: 0.4545\nEpoch 33/600\n433/433 [==============================] - 0s 523us/step - loss: 1.9010 - acc: 0.3695 - val_loss: 1.7365 - val_acc: 0.4886\nEpoch 34/600\n433/433 [==============================] - 0s 536us/step - loss: 1.9004 - acc: 0.3880 - val_loss: 1.6920 - val_acc: 0.5057\nEpoch 35/600\n433/433 [==============================] - 0s 525us/step - loss: 1.8259 - acc: 0.3903 - val_loss: 1.6573 - val_acc: 0.5057\nEpoch 36/600\n433/433 [==============================] - 0s 536us/step - loss: 1.7339 - acc: 0.4296 - val_loss: 1.6299 - val_acc: 0.4716\nEpoch 37/600\n433/433 [==============================] - 0s 541us/step - loss: 1.8641 - acc: 0.4203 - val_loss: 1.6090 - val_acc: 0.4773\nEpoch 38/600\n433/433 [==============================] - 0s 552us/step - loss: 1.7531 - acc: 0.4065 - val_loss: 1.5943 - val_acc: 0.4830\nEpoch 39/600\n433/433 [==============================] - 0s 558us/step - loss: 1.7543 - acc: 0.4342 - val_loss: 1.5835 - val_acc: 0.4830\nEpoch 40/600\n433/433 [==============================] - 0s 554us/step - loss: 1.7274 - acc: 0.4573 - val_loss: 1.5698 - val_acc: 0.5114\nEpoch 41/600\n433/433 [==============================] - 0s 517us/step - loss: 1.7064 - acc: 0.4688 - val_loss: 1.5592 - val_acc: 0.5057\nEpoch 42/600\n433/433 [==============================] - 0s 566us/step - loss: 1.6861 - acc: 0.4457 - val_loss: 1.5486 - val_acc: 0.5057\nEpoch 43/600\n433/433 [==============================] - 0s 523us/step - loss: 1.6798 - acc: 0.4642 - val_loss: 1.5394 - val_acc: 0.5057\nEpoch 44/600\n433/433 [==============================] - 0s 543us/step - loss: 1.6436 - acc: 0.4503 - val_loss: 1.5310 - val_acc: 0.5000\nEpoch 45/600\n433/433 [==============================] - 0s 529us/step - loss: 1.6448 - acc: 0.4896 - val_loss: 1.5178 - val_acc: 0.5227\nEpoch 46/600\n433/433 [==============================] - 0s 531us/step - loss: 1.5680 - acc: 0.5058 - val_loss: 1.5045 - val_acc: 0.5227\nEpoch 47/600\n433/433 [==============================] - 0s 537us/step - loss: 1.5516 - acc: 0.4873 - val_loss: 1.4911 - val_acc: 0.5114\nEpoch 48/600\n433/433 [==============================] - 0s 534us/step - loss: 1.4944 - acc: 0.5335 - val_loss: 1.4753 - val_acc: 0.5114\nEpoch 49/600\n433/433 [==============================] - 0s 540us/step - loss: 1.5193 - acc: 0.4781 - val_loss: 1.4561 - val_acc: 0.5170\nEpoch 50/600\n433/433 [==============================] - 0s 527us/step - loss: 1.4411 - acc: 0.5219 - val_loss: 1.4432 - val_acc: 0.5227\nEpoch 51/600\n433/433 [==============================] - 0s 525us/step - loss: 1.4997 - acc: 0.5219 - val_loss: 1.4308 - val_acc: 0.5284\nEpoch 52/600\n433/433 [==============================] - 0s 529us/step - loss: 1.3894 - acc: 0.5473 - val_loss: 1.4187 - val_acc: 0.5398\nEpoch 53/600\n433/433 [==============================] - 0s 532us/step - loss: 1.4653 - acc: 0.4873 - val_loss: 1.4107 - val_acc: 0.5455\nEpoch 54/600\n433/433 [==============================] - 0s 536us/step - loss: 1.4083 - acc: 0.5312 - val_loss: 1.4070 - val_acc: 0.5398\nEpoch 55/600\n433/433 [==============================] - 0s 516us/step - loss: 1.4025 - acc: 0.5150 - val_loss: 1.4088 - val_acc: 0.5398\nEpoch 56/600\n433/433 [==============================] - 0s 564us/step - loss: 1.3084 - acc: 0.5727 - val_loss: 1.4119 - val_acc: 0.5398\nEpoch 57/600\n433/433 [==============================] - 0s 521us/step - loss: 1.3909 - acc: 0.5242 - val_loss: 1.4139 - val_acc: 0.5341\nEpoch 58/600\n433/433 [==============================] - 0s 538us/step - loss: 1.3948 - acc: 0.5266 - val_loss: 1.4145 - val_acc: 0.5455\nEpoch 59/600\n433/433 [==============================] - 0s 529us/step - loss: 1.3498 - acc: 0.5381 - val_loss: 1.4109 - val_acc: 0.5455\nEpoch 60/600\n433/433 [==============================] - 0s 540us/step - loss: 1.3039 - acc: 0.5497 - val_loss: 1.4087 - val_acc: 0.5511\nEpoch 61/600\n433/433 [==============================] - 0s 534us/step - loss: 1.3398 - acc: 0.5566 - val_loss: 1.4050 - val_acc: 0.5511\nEpoch 62/600\n433/433 [==============================] - 0s 539us/step - loss: 1.2415 - acc: 0.5912 - val_loss: 1.4006 - val_acc: 0.5455\nEpoch 63/600\n433/433 [==============================] - 0s 541us/step - loss: 1.1955 - acc: 0.5935 - val_loss: 1.3972 - val_acc: 0.5625\nEpoch 64/600\n433/433 [==============================] - 0s 539us/step - loss: 1.2145 - acc: 0.5843 - val_loss: 1.3934 - val_acc: 0.5682\nEpoch 65/600\n433/433 [==============================] - 0s 533us/step - loss: 1.1801 - acc: 0.5820 - val_loss: 1.3910 - val_acc: 0.5682\nEpoch 66/600\n433/433 [==============================] - 0s 531us/step - loss: 1.1643 - acc: 0.6120 - val_loss: 1.3885 - val_acc: 0.5795\nEpoch 67/600\n433/433 [==============================] - 0s 529us/step - loss: 1.1101 - acc: 0.6166 - val_loss: 1.3849 - val_acc: 0.5739\nEpoch 68/600\n433/433 [==============================] - 0s 531us/step - loss: 1.1416 - acc: 0.6189 - val_loss: 1.3877 - val_acc: 0.5739\nEpoch 69/600\n433/433 [==============================] - 0s 567us/step - loss: 1.1301 - acc: 0.6051 - val_loss: 1.3899 - val_acc: 0.5625\nEpoch 70/600\n433/433 [==============================] - 0s 541us/step - loss: 1.1439 - acc: 0.6074 - val_loss: 1.3921 - val_acc: 0.5568\nEpoch 71/600\n433/433 [==============================] - 0s 571us/step - loss: 1.1271 - acc: 0.6143 - val_loss: 1.3895 - val_acc: 0.5568\nEpoch 72/600\n433/433 [==============================] - 0s 553us/step - loss: 1.0797 - acc: 0.6420 - val_loss: 1.3907 - val_acc: 0.5568\nEpoch 73/600\n433/433 [==============================] - 0s 553us/step - loss: 1.0184 - acc: 0.6790 - val_loss: 1.3912 - val_acc: 0.5511\nEpoch 74/600\n433/433 [==============================] - 0s 547us/step - loss: 1.1448 - acc: 0.6189 - val_loss: 1.3944 - val_acc: 0.5511\nEpoch 75/600\n433/433 [==============================] - 0s 528us/step - loss: 1.0293 - acc: 0.6582 - val_loss: 1.3981 - val_acc: 0.5511\nEpoch 76/600\n433/433 [==============================] - 0s 538us/step - loss: 1.0867 - acc: 0.6420 - val_loss: 1.4007 - val_acc: 0.5455\nEpoch 77/600\n433/433 [==============================] - 0s 575us/step - loss: 1.0793 - acc: 0.6467 - val_loss: 1.4025 - val_acc: 0.5511\nEpoch 78/600\n433/433 [==============================] - 0s 541us/step - loss: 0.9900 - acc: 0.6721 - val_loss: 1.4035 - val_acc: 0.5511\nEpoch 79/600\n433/433 [==============================] - 0s 556us/step - loss: 1.0349 - acc: 0.6651 - val_loss: 1.4029 - val_acc: 0.5625\nEpoch 80/600\n433/433 [==============================] - 0s 544us/step - loss: 0.9901 - acc: 0.6536 - val_loss: 1.4016 - val_acc: 0.5568\nEpoch 81/600\n433/433 [==============================] - 0s 575us/step - loss: 0.9645 - acc: 0.6928 - val_loss: 1.4001 - val_acc: 0.5511\nEpoch 82/600\n433/433 [==============================] - 0s 574us/step - loss: 0.9578 - acc: 0.6905 - val_loss: 1.4008 - val_acc: 0.5511\nEpoch 83/600\n433/433 [==============================] - 0s 556us/step - loss: 0.9673 - acc: 0.6882 - val_loss: 1.3950 - val_acc: 0.5568\nEpoch 84/600\n433/433 [==============================] - 0s 536us/step - loss: 1.0048 - acc: 0.6697 - val_loss: 1.3830 - val_acc: 0.5625\nEpoch 85/600\n433/433 [==============================] - 0s 554us/step - loss: 0.9646 - acc: 0.6928 - val_loss: 1.3698 - val_acc: 0.5682\nEpoch 86/600\n433/433 [==============================] - 0s 546us/step - loss: 0.8966 - acc: 0.7344 - val_loss: 1.3588 - val_acc: 0.5852\nEpoch 87/600\n433/433 [==============================] - 0s 529us/step - loss: 0.8900 - acc: 0.7229 - val_loss: 1.3532 - val_acc: 0.5909\nEpoch 88/600\n433/433 [==============================] - 0s 569us/step - loss: 0.9207 - acc: 0.7067 - val_loss: 1.3494 - val_acc: 0.5966\nEpoch 89/600\n433/433 [==============================] - 0s 546us/step - loss: 0.8768 - acc: 0.7206 - val_loss: 1.3484 - val_acc: 0.5795\nEpoch 90/600\n433/433 [==============================] - 0s 538us/step - loss: 0.8684 - acc: 0.7136 - val_loss: 1.3486 - val_acc: 0.5909\nEpoch 91/600\n433/433 [==============================] - 0s 534us/step - loss: 0.8539 - acc: 0.7067 - val_loss: 1.3538 - val_acc: 0.5966\nEpoch 92/600\n433/433 [==============================] - 0s 532us/step - loss: 0.8618 - acc: 0.7206 - val_loss: 1.3574 - val_acc: 0.6023\nEpoch 93/600\n433/433 [==============================] - 0s 557us/step - loss: 0.8385 - acc: 0.7252 - val_loss: 1.3595 - val_acc: 0.6023\nEpoch 94/600\n433/433 [==============================] - 0s 550us/step - loss: 0.8268 - acc: 0.7252 - val_loss: 1.3579 - val_acc: 0.6080\nEpoch 95/600\n433/433 [==============================] - 0s 528us/step - loss: 0.8922 - acc: 0.7113 - val_loss: 1.3560 - val_acc: 0.6080\nEpoch 96/600\n433/433 [==============================] - 0s 535us/step - loss: 0.8219 - acc: 0.7552 - val_loss: 1.3542 - val_acc: 0.6193\nEpoch 97/600\n433/433 [==============================] - 0s 566us/step - loss: 0.8148 - acc: 0.7460 - val_loss: 1.3457 - val_acc: 0.6364\nEpoch 98/600\n433/433 [==============================] - 0s 572us/step - loss: 0.7883 - acc: 0.7390 - val_loss: 1.3396 - val_acc: 0.6364\nEpoch 99/600\n433/433 [==============================] - 0s 531us/step - loss: 0.7549 - acc: 0.7621 - val_loss: 1.3288 - val_acc: 0.6420\nEpoch 100/600\n433/433 [==============================] - 0s 523us/step - loss: 0.8351 - acc: 0.7252 - val_loss: 1.3180 - val_acc: 0.6420\nEpoch 101/600\n433/433 [==============================] - 0s 545us/step - loss: 0.8036 - acc: 0.7229 - val_loss: 1.3043 - val_acc: 0.6420\nEpoch 102/600\n433/433 [==============================] - 0s 538us/step - loss: 0.7474 - acc: 0.7460 - val_loss: 1.2894 - val_acc: 0.6477\nEpoch 103/600\n433/433 [==============================] - 0s 560us/step - loss: 0.7767 - acc: 0.7575 - val_loss: 1.2760 - val_acc: 0.6534\nEpoch 104/600\n433/433 [==============================] - 0s 522us/step - loss: 0.7875 - acc: 0.7691 - val_loss: 1.2644 - val_acc: 0.6591\nEpoch 105/600\n433/433 [==============================] - 0s 518us/step - loss: 0.6597 - acc: 0.8037 - val_loss: 1.2596 - val_acc: 0.6534\nEpoch 106/600\n433/433 [==============================] - 0s 552us/step - loss: 0.7338 - acc: 0.7598 - val_loss: 1.2528 - val_acc: 0.6477\nEpoch 107/600\n433/433 [==============================] - 0s 521us/step - loss: 0.7683 - acc: 0.7367 - val_loss: 1.2478 - val_acc: 0.6420\nEpoch 108/600\n433/433 [==============================] - 0s 528us/step - loss: 0.7614 - acc: 0.7552 - val_loss: 1.2491 - val_acc: 0.6420\nEpoch 109/600\n433/433 [==============================] - 0s 527us/step - loss: 0.7630 - acc: 0.7644 - val_loss: 1.2508 - val_acc: 0.6307\nEpoch 110/600\n433/433 [==============================] - 0s 534us/step - loss: 0.7002 - acc: 0.7921 - val_loss: 1.2535 - val_acc: 0.6364\nEpoch 111/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6688 - acc: 0.7991 - val_loss: 1.2549 - val_acc: 0.6307\nEpoch 112/600\n433/433 [==============================] - 0s 526us/step - loss: 0.7114 - acc: 0.7852 - val_loss: 1.2552 - val_acc: 0.6364\nEpoch 113/600\n433/433 [==============================] - 0s 522us/step - loss: 0.6999 - acc: 0.7760 - val_loss: 1.2583 - val_acc: 0.6364\nEpoch 114/600\n433/433 [==============================] - 0s 549us/step - loss: 0.6337 - acc: 0.7898 - val_loss: 1.2606 - val_acc: 0.6364\nEpoch 115/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6438 - acc: 0.7852 - val_loss: 1.2645 - val_acc: 0.6364\nEpoch 116/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6058 - acc: 0.7898 - val_loss: 1.2677 - val_acc: 0.6364\nEpoch 117/600\n433/433 [==============================] - 0s 586us/step - loss: 0.6216 - acc: 0.7898 - val_loss: 1.2713 - val_acc: 0.6420\nEpoch 118/600\n433/433 [==============================] - 0s 559us/step - loss: 0.6853 - acc: 0.7898 - val_loss: 1.2746 - val_acc: 0.6477\nEpoch 119/600\n433/433 [==============================] - 0s 528us/step - loss: 0.6579 - acc: 0.7783 - val_loss: 1.2773 - val_acc: 0.6591\nEpoch 120/600\n433/433 [==============================] - 0s 520us/step - loss: 0.6807 - acc: 0.7991 - val_loss: 1.2771 - val_acc: 0.6591\nEpoch 121/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6251 - acc: 0.8199 - val_loss: 1.2789 - val_acc: 0.6648\nEpoch 122/600\n433/433 [==============================] - 0s 526us/step - loss: 0.6242 - acc: 0.8014 - val_loss: 1.2776 - val_acc: 0.6591\nEpoch 123/600\n433/433 [==============================] - 0s 561us/step - loss: 0.6701 - acc: 0.7806 - val_loss: 1.2770 - val_acc: 0.6591\nEpoch 124/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6619 - acc: 0.7806 - val_loss: 1.2749 - val_acc: 0.6591\nEpoch 125/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5946 - acc: 0.8037 - val_loss: 1.2737 - val_acc: 0.6591\nEpoch 126/600\n433/433 [==============================] - 0s 554us/step - loss: 0.6208 - acc: 0.8152 - val_loss: 1.2708 - val_acc: 0.6648\nEpoch 127/600\n433/433 [==============================] - 0s 534us/step - loss: 0.6226 - acc: 0.8199 - val_loss: 1.2677 - val_acc: 0.6648\nEpoch 128/600\n433/433 [==============================] - 0s 553us/step - loss: 0.6379 - acc: 0.7644 - val_loss: 1.2631 - val_acc: 0.6591\nEpoch 129/600\n433/433 [==============================] - 0s 549us/step - loss: 0.6312 - acc: 0.7991 - val_loss: 1.2601 - val_acc: 0.6591\nEpoch 130/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6032 - acc: 0.8060 - val_loss: 1.2586 - val_acc: 0.6591\nEpoch 131/600\n433/433 [==============================] - 0s 540us/step - loss: 0.6153 - acc: 0.7921 - val_loss: 1.2553 - val_acc: 0.6591\nEpoch 132/600\n433/433 [==============================] - 0s 566us/step - loss: 0.6180 - acc: 0.8037 - val_loss: 1.2512 - val_acc: 0.6591\nEpoch 133/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6148 - acc: 0.8106 - val_loss: 1.2481 - val_acc: 0.6591\nEpoch 134/600\n433/433 [==============================] - 0s 543us/step - loss: 0.6149 - acc: 0.8037 - val_loss: 1.2448 - val_acc: 0.6591\nEpoch 135/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6005 - acc: 0.7945 - val_loss: 1.2413 - val_acc: 0.6591\nEpoch 136/600\n433/433 [==============================] - 0s 533us/step - loss: 0.6417 - acc: 0.7921 - val_loss: 1.2377 - val_acc: 0.6591\nEpoch 137/600\n433/433 [==============================] - 0s 548us/step - loss: 0.6572 - acc: 0.7852 - val_loss: 1.2341 - val_acc: 0.6591\nEpoch 138/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6065 - acc: 0.8245 - val_loss: 1.2307 - val_acc: 0.6591\nEpoch 139/600\n433/433 [==============================] - 0s 529us/step - loss: 0.6648 - acc: 0.7691 - val_loss: 1.2266 - val_acc: 0.6591\nEpoch 140/600\n433/433 [==============================] - 0s 584us/step - loss: 0.5857 - acc: 0.8083 - val_loss: 1.2225 - val_acc: 0.6591\nEpoch 141/600\n433/433 [==============================] - 0s 532us/step - loss: 0.6551 - acc: 0.8083 - val_loss: 1.2183 - val_acc: 0.6591\nEpoch 142/600\n433/433 [==============================] - 0s 527us/step - loss: 0.6434 - acc: 0.7852 - val_loss: 1.2138 - val_acc: 0.6591\nEpoch 143/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5979 - acc: 0.8129 - val_loss: 1.2093 - val_acc: 0.6534\nEpoch 144/600\n433/433 [==============================] - 0s 525us/step - loss: 0.6033 - acc: 0.8176 - val_loss: 1.2049 - val_acc: 0.6534\nEpoch 145/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6013 - acc: 0.8083 - val_loss: 1.2003 - val_acc: 0.6534\nEpoch 146/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5817 - acc: 0.8268 - val_loss: 1.1958 - val_acc: 0.6534\nEpoch 147/600\n433/433 [==============================] - 0s 529us/step - loss: 0.6299 - acc: 0.7898 - val_loss: 1.1912 - val_acc: 0.6534\nEpoch 148/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6204 - acc: 0.8060 - val_loss: 1.1873 - val_acc: 0.6477\nEpoch 149/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5938 - acc: 0.8129 - val_loss: 1.1842 - val_acc: 0.6477\nEpoch 150/600\n433/433 [==============================] - 0s 554us/step - loss: 0.6132 - acc: 0.8014 - val_loss: 1.1814 - val_acc: 0.6534\nEpoch 151/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6365 - acc: 0.7921 - val_loss: 1.1778 - val_acc: 0.6534\nEpoch 152/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5979 - acc: 0.8152 - val_loss: 1.1752 - val_acc: 0.6534\nEpoch 153/600\n433/433 [==============================] - 0s 539us/step - loss: 0.6145 - acc: 0.7945 - val_loss: 1.1724 - val_acc: 0.6534\nEpoch 154/600\n433/433 [==============================] - 0s 522us/step - loss: 0.6727 - acc: 0.7829 - val_loss: 1.1698 - val_acc: 0.6534\nEpoch 155/600\n433/433 [==============================] - 0s 590us/step - loss: 0.6612 - acc: 0.7783 - val_loss: 1.1670 - val_acc: 0.6477\nEpoch 156/600\n433/433 [==============================] - 0s 555us/step - loss: 0.6508 - acc: 0.7806 - val_loss: 1.1644 - val_acc: 0.6477\nEpoch 157/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5969 - acc: 0.7898 - val_loss: 1.1615 - val_acc: 0.6534\nEpoch 158/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5929 - acc: 0.8360 - val_loss: 1.1589 - val_acc: 0.6534\nEpoch 159/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5614 - acc: 0.8222 - val_loss: 1.1560 - val_acc: 0.6534\nEpoch 160/600\n433/433 [==============================] - 0s 588us/step - loss: 0.6152 - acc: 0.8152 - val_loss: 1.1533 - val_acc: 0.6534\nEpoch 161/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5906 - acc: 0.8106 - val_loss: 1.1505 - val_acc: 0.6534\nEpoch 162/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5937 - acc: 0.8222 - val_loss: 1.1480 - val_acc: 0.6534\nEpoch 163/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5947 - acc: 0.8152 - val_loss: 1.1457 - val_acc: 0.6534\nEpoch 164/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6375 - acc: 0.8014 - val_loss: 1.1434 - val_acc: 0.6534\nEpoch 165/600\n433/433 [==============================] - 0s 571us/step - loss: 0.6155 - acc: 0.8060 - val_loss: 1.1415 - val_acc: 0.6534\nEpoch 166/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6092 - acc: 0.8106 - val_loss: 1.1394 - val_acc: 0.6534\nEpoch 167/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5987 - acc: 0.8083 - val_loss: 1.1374 - val_acc: 0.6534\nEpoch 168/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5916 - acc: 0.8106 - val_loss: 1.1353 - val_acc: 0.6534\nEpoch 169/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5907 - acc: 0.8129 - val_loss: 1.1335 - val_acc: 0.6534\nEpoch 170/600\n433/433 [==============================] - 0s 539us/step - loss: 0.6342 - acc: 0.7898 - val_loss: 1.1314 - val_acc: 0.6534\nEpoch 171/600\n433/433 [==============================] - 0s 560us/step - loss: 0.6624 - acc: 0.7945 - val_loss: 1.1293 - val_acc: 0.6534\nEpoch 172/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5967 - acc: 0.8014 - val_loss: 1.1277 - val_acc: 0.6534\nEpoch 173/600\n433/433 [==============================] - 0s 561us/step - loss: 0.6020 - acc: 0.8176 - val_loss: 1.1260 - val_acc: 0.6534\nEpoch 174/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5767 - acc: 0.8106 - val_loss: 1.1243 - val_acc: 0.6591\nEpoch 175/600\n433/433 [==============================] - 0s 554us/step - loss: 0.6226 - acc: 0.7945 - val_loss: 1.1225 - val_acc: 0.6591\nEpoch 176/600\n433/433 [==============================] - 0s 577us/step - loss: 0.6077 - acc: 0.7991 - val_loss: 1.1207 - val_acc: 0.6591\nEpoch 177/600\n433/433 [==============================] - 0s 589us/step - loss: 0.6170 - acc: 0.8245 - val_loss: 1.1192 - val_acc: 0.6591\nEpoch 178/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5782 - acc: 0.8199 - val_loss: 1.1177 - val_acc: 0.6591\nEpoch 179/600\n433/433 [==============================] - 0s 573us/step - loss: 0.5564 - acc: 0.8383 - val_loss: 1.1161 - val_acc: 0.6648\nEpoch 180/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6509 - acc: 0.8152 - val_loss: 1.1148 - val_acc: 0.6648\nEpoch 181/600\n433/433 [==============================] - 0s 566us/step - loss: 0.5960 - acc: 0.8106 - val_loss: 1.1133 - val_acc: 0.6648\nEpoch 182/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6031 - acc: 0.8014 - val_loss: 1.1118 - val_acc: 0.6648\nEpoch 183/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6205 - acc: 0.7898 - val_loss: 1.1102 - val_acc: 0.6648\nEpoch 184/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5797 - acc: 0.8199 - val_loss: 1.1090 - val_acc: 0.6648\nEpoch 185/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6140 - acc: 0.7968 - val_loss: 1.1078 - val_acc: 0.6648\nEpoch 186/600\n433/433 [==============================] - 0s 575us/step - loss: 0.6608 - acc: 0.7783 - val_loss: 1.1064 - val_acc: 0.6648\nEpoch 187/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6098 - acc: 0.7921 - val_loss: 1.1051 - val_acc: 0.6648\nEpoch 188/600\n433/433 [==============================] - 0s 567us/step - loss: 0.5856 - acc: 0.8245 - val_loss: 1.1040 - val_acc: 0.6648\nEpoch 189/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5897 - acc: 0.8129 - val_loss: 1.1029 - val_acc: 0.6648\nEpoch 190/600\n433/433 [==============================] - 0s 524us/step - loss: 0.5647 - acc: 0.8245 - val_loss: 1.1016 - val_acc: 0.6648\nEpoch 191/600\n433/433 [==============================] - 0s 563us/step - loss: 0.6018 - acc: 0.8060 - val_loss: 1.1004 - val_acc: 0.6648\nEpoch 192/600\n433/433 [==============================] - 0s 562us/step - loss: 0.6160 - acc: 0.8014 - val_loss: 1.0991 - val_acc: 0.6648\nEpoch 193/600\n433/433 [==============================] - 0s 566us/step - loss: 0.6151 - acc: 0.8037 - val_loss: 1.0980 - val_acc: 0.6648\nEpoch 194/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5954 - acc: 0.8222 - val_loss: 1.0966 - val_acc: 0.6648\nEpoch 195/600\n433/433 [==============================] - 0s 549us/step - loss: 0.6066 - acc: 0.7945 - val_loss: 1.0954 - val_acc: 0.6648\nEpoch 196/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5760 - acc: 0.8268 - val_loss: 1.0942 - val_acc: 0.6648\nEpoch 197/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6036 - acc: 0.8014 - val_loss: 1.0930 - val_acc: 0.6705\nEpoch 198/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5771 - acc: 0.8222 - val_loss: 1.0918 - val_acc: 0.6705\nEpoch 199/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6416 - acc: 0.7852 - val_loss: 1.0906 - val_acc: 0.6705\nEpoch 200/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5757 - acc: 0.8222 - val_loss: 1.0895 - val_acc: 0.6705\nEpoch 201/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6020 - acc: 0.8199 - val_loss: 1.0883 - val_acc: 0.6705\nEpoch 202/600\n433/433 [==============================] - 0s 542us/step - loss: 0.6163 - acc: 0.8152 - val_loss: 1.0869 - val_acc: 0.6705\nEpoch 203/600\n433/433 [==============================] - 0s 577us/step - loss: 0.5582 - acc: 0.8406 - val_loss: 1.0858 - val_acc: 0.6705\nEpoch 204/600\n433/433 [==============================] - 0s 549us/step - loss: 0.6182 - acc: 0.7875 - val_loss: 1.0849 - val_acc: 0.6705\nEpoch 205/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5925 - acc: 0.8129 - val_loss: 1.0840 - val_acc: 0.6705\nEpoch 206/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6215 - acc: 0.7968 - val_loss: 1.0829 - val_acc: 0.6705\nEpoch 207/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5677 - acc: 0.8129 - val_loss: 1.0818 - val_acc: 0.6705\nEpoch 208/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5503 - acc: 0.8314 - val_loss: 1.0808 - val_acc: 0.6705\nEpoch 209/600\n433/433 [==============================] - 0s 530us/step - loss: 0.5805 - acc: 0.8222 - val_loss: 1.0799 - val_acc: 0.6705\nEpoch 210/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5878 - acc: 0.8222 - val_loss: 1.0789 - val_acc: 0.6705\nEpoch 211/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5845 - acc: 0.8268 - val_loss: 1.0780 - val_acc: 0.6761\nEpoch 212/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5950 - acc: 0.8199 - val_loss: 1.0769 - val_acc: 0.6761\nEpoch 213/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5458 - acc: 0.8337 - val_loss: 1.0761 - val_acc: 0.6761\nEpoch 214/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5563 - acc: 0.8406 - val_loss: 1.0753 - val_acc: 0.6761\nEpoch 215/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5876 - acc: 0.8176 - val_loss: 1.0745 - val_acc: 0.6761\nEpoch 216/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5771 - acc: 0.7991 - val_loss: 1.0737 - val_acc: 0.6761\nEpoch 217/600\n433/433 [==============================] - 0s 562us/step - loss: 0.6171 - acc: 0.7991 - val_loss: 1.0727 - val_acc: 0.6761\nEpoch 218/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5934 - acc: 0.8152 - val_loss: 1.0717 - val_acc: 0.6761\nEpoch 219/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6420 - acc: 0.7991 - val_loss: 1.0709 - val_acc: 0.6761\nEpoch 220/600\n433/433 [==============================] - 0s 554us/step - loss: 0.6230 - acc: 0.7968 - val_loss: 1.0701 - val_acc: 0.6761\nEpoch 221/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6094 - acc: 0.8014 - val_loss: 1.0692 - val_acc: 0.6761\nEpoch 222/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6244 - acc: 0.8106 - val_loss: 1.0683 - val_acc: 0.6761\nEpoch 223/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5568 - acc: 0.8245 - val_loss: 1.0675 - val_acc: 0.6818\nEpoch 224/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6400 - acc: 0.8060 - val_loss: 1.0667 - val_acc: 0.6818\nEpoch 225/600\n433/433 [==============================] - 0s 539us/step - loss: 0.6217 - acc: 0.8083 - val_loss: 1.0658 - val_acc: 0.6818\nEpoch 226/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6044 - acc: 0.8060 - val_loss: 1.0650 - val_acc: 0.6818\nEpoch 227/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5437 - acc: 0.8453 - val_loss: 1.0642 - val_acc: 0.6818\nEpoch 228/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6253 - acc: 0.8222 - val_loss: 1.0634 - val_acc: 0.6818\nEpoch 229/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5857 - acc: 0.8152 - val_loss: 1.0626 - val_acc: 0.6875\nEpoch 230/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5906 - acc: 0.8014 - val_loss: 1.0617 - val_acc: 0.6932\nEpoch 231/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5784 - acc: 0.8199 - val_loss: 1.0611 - val_acc: 0.6932\nEpoch 232/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5942 - acc: 0.8152 - val_loss: 1.0602 - val_acc: 0.6932\nEpoch 233/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6122 - acc: 0.7806 - val_loss: 1.0596 - val_acc: 0.6932\nEpoch 234/600\n433/433 [==============================] - 0s 532us/step - loss: 0.6319 - acc: 0.7921 - val_loss: 1.0590 - val_acc: 0.6932\nEpoch 235/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5668 - acc: 0.8406 - val_loss: 1.0583 - val_acc: 0.6932\nEpoch 236/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5969 - acc: 0.8060 - val_loss: 1.0578 - val_acc: 0.6875\nEpoch 237/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5813 - acc: 0.8222 - val_loss: 1.0573 - val_acc: 0.6875\nEpoch 238/600\n433/433 [==============================] - 0s 574us/step - loss: 0.6155 - acc: 0.7991 - val_loss: 1.0568 - val_acc: 0.6875\nEpoch 239/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5859 - acc: 0.8060 - val_loss: 1.0562 - val_acc: 0.6875\nEpoch 240/600\n433/433 [==============================] - 0s 529us/step - loss: 0.5557 - acc: 0.8222 - val_loss: 1.0557 - val_acc: 0.6875\nEpoch 241/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5706 - acc: 0.8314 - val_loss: 1.0552 - val_acc: 0.6875\nEpoch 242/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5840 - acc: 0.8106 - val_loss: 1.0545 - val_acc: 0.6875\nEpoch 243/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5789 - acc: 0.8291 - val_loss: 1.0538 - val_acc: 0.6875\nEpoch 244/600\n433/433 [==============================] - 0s 538us/step - loss: 0.6445 - acc: 0.7968 - val_loss: 1.0532 - val_acc: 0.6875\nEpoch 245/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5853 - acc: 0.8129 - val_loss: 1.0527 - val_acc: 0.6875\nEpoch 246/600\n433/433 [==============================] - 0s 559us/step - loss: 0.6186 - acc: 0.7806 - val_loss: 1.0522 - val_acc: 0.6875\nEpoch 247/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5867 - acc: 0.8037 - val_loss: 1.0519 - val_acc: 0.6875\nEpoch 248/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5740 - acc: 0.8291 - val_loss: 1.0514 - val_acc: 0.6875\nEpoch 249/600\n433/433 [==============================] - 0s 558us/step - loss: 0.6468 - acc: 0.7875 - val_loss: 1.0510 - val_acc: 0.6875\nEpoch 250/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5914 - acc: 0.8152 - val_loss: 1.0506 - val_acc: 0.6875\nEpoch 251/600\n433/433 [==============================] - 0s 547us/step - loss: 0.6239 - acc: 0.8060 - val_loss: 1.0502 - val_acc: 0.6875\nEpoch 252/600\n433/433 [==============================] - 0s 522us/step - loss: 0.6116 - acc: 0.8152 - val_loss: 1.0497 - val_acc: 0.6875\nEpoch 253/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5870 - acc: 0.8129 - val_loss: 1.0493 - val_acc: 0.6875\nEpoch 254/600\n433/433 [==============================] - 0s 523us/step - loss: 0.6084 - acc: 0.8176 - val_loss: 1.0488 - val_acc: 0.6875\nEpoch 255/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5766 - acc: 0.8129 - val_loss: 1.0484 - val_acc: 0.6875\nEpoch 256/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5900 - acc: 0.8291 - val_loss: 1.0480 - val_acc: 0.6875\nEpoch 257/600\n433/433 [==============================] - 0s 531us/step - loss: 0.5863 - acc: 0.8360 - val_loss: 1.0475 - val_acc: 0.6875\nEpoch 258/600\n433/433 [==============================] - 0s 516us/step - loss: 0.5524 - acc: 0.8268 - val_loss: 1.0468 - val_acc: 0.6875\nEpoch 259/600\n433/433 [==============================] - 0s 530us/step - loss: 0.5997 - acc: 0.8199 - val_loss: 1.0465 - val_acc: 0.6875\nEpoch 260/600\n433/433 [==============================] - 0s 522us/step - loss: 0.5435 - acc: 0.8499 - val_loss: 1.0461 - val_acc: 0.6875\nEpoch 261/600\n433/433 [==============================] - 0s 537us/step - loss: 0.6392 - acc: 0.7829 - val_loss: 1.0460 - val_acc: 0.6875\nEpoch 262/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5959 - acc: 0.8014 - val_loss: 1.0458 - val_acc: 0.6875\nEpoch 263/600\n433/433 [==============================] - 0s 530us/step - loss: 0.6033 - acc: 0.7921 - val_loss: 1.0454 - val_acc: 0.6875\nEpoch 264/600\n433/433 [==============================] - 0s 532us/step - loss: 0.5989 - acc: 0.8152 - val_loss: 1.0451 - val_acc: 0.6875\nEpoch 265/600\n433/433 [==============================] - 0s 536us/step - loss: 0.6245 - acc: 0.8060 - val_loss: 1.0448 - val_acc: 0.6875\nEpoch 266/600\n433/433 [==============================] - 0s 516us/step - loss: 0.5710 - acc: 0.8199 - val_loss: 1.0445 - val_acc: 0.6875\nEpoch 267/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5749 - acc: 0.8337 - val_loss: 1.0442 - val_acc: 0.6875\nEpoch 268/600\n433/433 [==============================] - 0s 517us/step - loss: 0.5595 - acc: 0.8314 - val_loss: 1.0439 - val_acc: 0.6875\nEpoch 269/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5881 - acc: 0.8199 - val_loss: 1.0436 - val_acc: 0.6932\nEpoch 270/600\n433/433 [==============================] - 0s 522us/step - loss: 0.6087 - acc: 0.8129 - val_loss: 1.0434 - val_acc: 0.6932\nEpoch 271/600\n433/433 [==============================] - 0s 516us/step - loss: 0.5999 - acc: 0.8337 - val_loss: 1.0431 - val_acc: 0.6932\nEpoch 272/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5564 - acc: 0.8199 - val_loss: 1.0425 - val_acc: 0.6875\nEpoch 273/600\n433/433 [==============================] - 0s 533us/step - loss: 0.5770 - acc: 0.8268 - val_loss: 1.0423 - val_acc: 0.6875\nEpoch 274/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6074 - acc: 0.8152 - val_loss: 1.0419 - val_acc: 0.6875\nEpoch 275/600\n433/433 [==============================] - 0s 520us/step - loss: 0.5744 - acc: 0.8152 - val_loss: 1.0417 - val_acc: 0.6875\nEpoch 276/600\n433/433 [==============================] - 0s 534us/step - loss: 0.6182 - acc: 0.7968 - val_loss: 1.0412 - val_acc: 0.6875\nEpoch 277/600\n433/433 [==============================] - 0s 520us/step - loss: 0.5682 - acc: 0.8337 - val_loss: 1.0408 - val_acc: 0.6875\nEpoch 278/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5325 - acc: 0.8545 - val_loss: 1.0405 - val_acc: 0.6875\nEpoch 279/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5867 - acc: 0.8176 - val_loss: 1.0403 - val_acc: 0.6875\nEpoch 280/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5690 - acc: 0.8199 - val_loss: 1.0400 - val_acc: 0.6875\nEpoch 281/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6305 - acc: 0.8037 - val_loss: 1.0395 - val_acc: 0.6875\nEpoch 282/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5574 - acc: 0.8245 - val_loss: 1.0390 - val_acc: 0.6875\nEpoch 283/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5829 - acc: 0.8176 - val_loss: 1.0387 - val_acc: 0.6875\nEpoch 284/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5601 - acc: 0.8406 - val_loss: 1.0383 - val_acc: 0.6875\nEpoch 285/600\n433/433 [==============================] - 0s 536us/step - loss: 0.6336 - acc: 0.7921 - val_loss: 1.0382 - val_acc: 0.6875\nEpoch 286/600\n433/433 [==============================] - 0s 519us/step - loss: 0.5432 - acc: 0.8268 - val_loss: 1.0381 - val_acc: 0.6875\nEpoch 287/600\n433/433 [==============================] - 0s 511us/step - loss: 0.6059 - acc: 0.8199 - val_loss: 1.0378 - val_acc: 0.6875\nEpoch 288/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6274 - acc: 0.7945 - val_loss: 1.0376 - val_acc: 0.6875\nEpoch 289/600\n433/433 [==============================] - 0s 533us/step - loss: 0.6110 - acc: 0.7968 - val_loss: 1.0374 - val_acc: 0.6875\nEpoch 290/600\n433/433 [==============================] - 0s 515us/step - loss: 0.5733 - acc: 0.8176 - val_loss: 1.0372 - val_acc: 0.6875\nEpoch 291/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6003 - acc: 0.8060 - val_loss: 1.0367 - val_acc: 0.6875\nEpoch 292/600\n433/433 [==============================] - 0s 543us/step - loss: 0.6114 - acc: 0.8014 - val_loss: 1.0365 - val_acc: 0.6875\nEpoch 293/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6095 - acc: 0.8129 - val_loss: 1.0362 - val_acc: 0.6875\nEpoch 294/600\n433/433 [==============================] - 0s 536us/step - loss: 0.6104 - acc: 0.7968 - val_loss: 1.0357 - val_acc: 0.6875\nEpoch 295/600\n433/433 [==============================] - 0s 516us/step - loss: 0.5991 - acc: 0.8037 - val_loss: 1.0356 - val_acc: 0.6875\nEpoch 296/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5859 - acc: 0.8129 - val_loss: 1.0357 - val_acc: 0.6875\nEpoch 297/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5905 - acc: 0.8268 - val_loss: 1.0358 - val_acc: 0.6875\nEpoch 298/600\n433/433 [==============================] - 0s 586us/step - loss: 0.5699 - acc: 0.8176 - val_loss: 1.0356 - val_acc: 0.6875\nEpoch 299/600\n433/433 [==============================] - 0s 548us/step - loss: 0.6120 - acc: 0.8106 - val_loss: 1.0354 - val_acc: 0.6875\nEpoch 300/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5417 - acc: 0.8129 - val_loss: 1.0350 - val_acc: 0.6818\nEpoch 301/600\n433/433 [==============================] - 0s 560us/step - loss: 0.6872 - acc: 0.7460 - val_loss: 1.0349 - val_acc: 0.6818\nEpoch 302/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5745 - acc: 0.8222 - val_loss: 1.0346 - val_acc: 0.6818\nEpoch 303/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5997 - acc: 0.8176 - val_loss: 1.0343 - val_acc: 0.6818\nEpoch 304/600\n433/433 [==============================] - 0s 591us/step - loss: 0.5945 - acc: 0.8360 - val_loss: 1.0340 - val_acc: 0.6818\nEpoch 305/600\n433/433 [==============================] - 0s 542us/step - loss: 0.6078 - acc: 0.8129 - val_loss: 1.0338 - val_acc: 0.6818\nEpoch 306/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5045 - acc: 0.8476 - val_loss: 1.0336 - val_acc: 0.6818\nEpoch 307/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6391 - acc: 0.7806 - val_loss: 1.0335 - val_acc: 0.6818\nEpoch 308/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5813 - acc: 0.8129 - val_loss: 1.0333 - val_acc: 0.6818\nEpoch 309/600\n433/433 [==============================] - 0s 586us/step - loss: 0.6134 - acc: 0.8037 - val_loss: 1.0332 - val_acc: 0.6818\nEpoch 310/600\n433/433 [==============================] - 0s 553us/step - loss: 0.6204 - acc: 0.7852 - val_loss: 1.0329 - val_acc: 0.6818\nEpoch 311/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5760 - acc: 0.8222 - val_loss: 1.0327 - val_acc: 0.6818\nEpoch 312/600\n433/433 [==============================] - 0s 570us/step - loss: 0.6039 - acc: 0.8222 - val_loss: 1.0324 - val_acc: 0.6818\nEpoch 313/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5850 - acc: 0.8199 - val_loss: 1.0320 - val_acc: 0.6818\nEpoch 314/600\n433/433 [==============================] - 0s 561us/step - loss: 0.6267 - acc: 0.8083 - val_loss: 1.0318 - val_acc: 0.6818\nEpoch 315/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5814 - acc: 0.8176 - val_loss: 1.0314 - val_acc: 0.6818\nEpoch 316/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5750 - acc: 0.8314 - val_loss: 1.0310 - val_acc: 0.6818\nEpoch 317/600\n433/433 [==============================] - 0s 566us/step - loss: 0.5627 - acc: 0.8499 - val_loss: 1.0308 - val_acc: 0.6818\nEpoch 318/600\n433/433 [==============================] - 0s 582us/step - loss: 0.5799 - acc: 0.8268 - val_loss: 1.0307 - val_acc: 0.6818\nEpoch 319/600\n433/433 [==============================] - 0s 576us/step - loss: 0.6239 - acc: 0.8129 - val_loss: 1.0304 - val_acc: 0.6818\nEpoch 320/600\n433/433 [==============================] - 0s 580us/step - loss: 0.5575 - acc: 0.8129 - val_loss: 1.0303 - val_acc: 0.6818\nEpoch 321/600\n433/433 [==============================] - 0s 537us/step - loss: 0.6126 - acc: 0.7991 - val_loss: 1.0302 - val_acc: 0.6818\nEpoch 322/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6182 - acc: 0.7945 - val_loss: 1.0300 - val_acc: 0.6818\nEpoch 323/600\n433/433 [==============================] - 0s 582us/step - loss: 0.6067 - acc: 0.8083 - val_loss: 1.0298 - val_acc: 0.6818\nEpoch 324/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5844 - acc: 0.8060 - val_loss: 1.0295 - val_acc: 0.6818\nEpoch 325/600\n433/433 [==============================] - 0s 574us/step - loss: 0.5985 - acc: 0.8245 - val_loss: 1.0292 - val_acc: 0.6818\nEpoch 326/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5717 - acc: 0.8383 - val_loss: 1.0290 - val_acc: 0.6818\nEpoch 327/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5429 - acc: 0.8314 - val_loss: 1.0285 - val_acc: 0.6818\nEpoch 328/600\n433/433 [==============================] - 0s 593us/step - loss: 0.5971 - acc: 0.8083 - val_loss: 1.0282 - val_acc: 0.6818\nEpoch 329/600\n433/433 [==============================] - 0s 557us/step - loss: 0.6027 - acc: 0.7991 - val_loss: 1.0279 - val_acc: 0.6818\nEpoch 330/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6001 - acc: 0.8060 - val_loss: 1.0278 - val_acc: 0.6818\nEpoch 331/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6087 - acc: 0.7991 - val_loss: 1.0273 - val_acc: 0.6818\nEpoch 332/600\n433/433 [==============================] - 0s 563us/step - loss: 0.6023 - acc: 0.8176 - val_loss: 1.0269 - val_acc: 0.6818\nEpoch 333/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5685 - acc: 0.8083 - val_loss: 1.0267 - val_acc: 0.6818\nEpoch 334/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5866 - acc: 0.8222 - val_loss: 1.0266 - val_acc: 0.6818\nEpoch 335/600\n433/433 [==============================] - 0s 571us/step - loss: 0.6196 - acc: 0.8106 - val_loss: 1.0265 - val_acc: 0.6818\nEpoch 336/600\n433/433 [==============================] - 0s 568us/step - loss: 0.6340 - acc: 0.8060 - val_loss: 1.0263 - val_acc: 0.6818\nEpoch 337/600\n433/433 [==============================] - 0s 587us/step - loss: 0.6021 - acc: 0.8152 - val_loss: 1.0261 - val_acc: 0.6818\nEpoch 338/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5857 - acc: 0.8060 - val_loss: 1.0260 - val_acc: 0.6818\nEpoch 339/600\n433/433 [==============================] - 0s 565us/step - loss: 0.6156 - acc: 0.8129 - val_loss: 1.0259 - val_acc: 0.6818\nEpoch 340/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5631 - acc: 0.8453 - val_loss: 1.0256 - val_acc: 0.6818\nEpoch 341/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6068 - acc: 0.8083 - val_loss: 1.0253 - val_acc: 0.6818\nEpoch 342/600\n433/433 [==============================] - 0s 562us/step - loss: 0.6127 - acc: 0.8083 - val_loss: 1.0250 - val_acc: 0.6818\nEpoch 343/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5797 - acc: 0.8152 - val_loss: 1.0250 - val_acc: 0.6818\nEpoch 344/600\n433/433 [==============================] - 0s 578us/step - loss: 0.6210 - acc: 0.8152 - val_loss: 1.0247 - val_acc: 0.6818\nEpoch 345/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5817 - acc: 0.8106 - val_loss: 1.0245 - val_acc: 0.6818\nEpoch 346/600\n433/433 [==============================] - 0s 565us/step - loss: 0.6070 - acc: 0.8106 - val_loss: 1.0245 - val_acc: 0.6818\nEpoch 347/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5783 - acc: 0.8406 - val_loss: 1.0241 - val_acc: 0.6818\nEpoch 348/600\n433/433 [==============================] - 0s 555us/step - loss: 0.6038 - acc: 0.8037 - val_loss: 1.0241 - val_acc: 0.6818\nEpoch 349/600\n433/433 [==============================] - 0s 573us/step - loss: 0.6273 - acc: 0.7921 - val_loss: 1.0240 - val_acc: 0.6818\nEpoch 350/600\n433/433 [==============================] - 0s 590us/step - loss: 0.5878 - acc: 0.8083 - val_loss: 1.0240 - val_acc: 0.6818\nEpoch 351/600\n433/433 [==============================] - 0s 583us/step - loss: 0.6214 - acc: 0.8129 - val_loss: 1.0238 - val_acc: 0.6818\nEpoch 352/600\n433/433 [==============================] - 0s 590us/step - loss: 0.5305 - acc: 0.8383 - val_loss: 1.0237 - val_acc: 0.6818\nEpoch 353/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5533 - acc: 0.8314 - val_loss: 1.0235 - val_acc: 0.6818\nEpoch 354/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5492 - acc: 0.8176 - val_loss: 1.0234 - val_acc: 0.6818\nEpoch 355/600\n433/433 [==============================] - 0s 582us/step - loss: 0.5641 - acc: 0.8268 - val_loss: 1.0232 - val_acc: 0.6818\nEpoch 356/600\n433/433 [==============================] - 0s 565us/step - loss: 0.5928 - acc: 0.8083 - val_loss: 1.0230 - val_acc: 0.6818\nEpoch 357/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6564 - acc: 0.7806 - val_loss: 1.0227 - val_acc: 0.6818\nEpoch 358/600\n433/433 [==============================] - 0s 600us/step - loss: 0.6305 - acc: 0.8060 - val_loss: 1.0226 - val_acc: 0.6818\nEpoch 359/600\n433/433 [==============================] - 0s 589us/step - loss: 0.5901 - acc: 0.7991 - val_loss: 1.0224 - val_acc: 0.6818\nEpoch 360/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6032 - acc: 0.8268 - val_loss: 1.0223 - val_acc: 0.6818\nEpoch 361/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5612 - acc: 0.8176 - val_loss: 1.0223 - val_acc: 0.6761\nEpoch 362/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5760 - acc: 0.8152 - val_loss: 1.0222 - val_acc: 0.6761\nEpoch 363/600\n433/433 [==============================] - 0s 566us/step - loss: 0.6027 - acc: 0.8199 - val_loss: 1.0220 - val_acc: 0.6761\nEpoch 364/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5301 - acc: 0.8406 - val_loss: 1.0219 - val_acc: 0.6761\nEpoch 365/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6026 - acc: 0.8106 - val_loss: 1.0218 - val_acc: 0.6761\nEpoch 366/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5391 - acc: 0.8383 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 367/600\n433/433 [==============================] - 0s 527us/step - loss: 0.5845 - acc: 0.8083 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 368/600\n433/433 [==============================] - 0s 568us/step - loss: 0.6173 - acc: 0.7898 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 369/600\n433/433 [==============================] - 0s 542us/step - loss: 0.6303 - acc: 0.7991 - val_loss: 1.0217 - val_acc: 0.6761\nEpoch 370/600\n433/433 [==============================] - 0s 555us/step - loss: 0.6006 - acc: 0.7875 - val_loss: 1.0218 - val_acc: 0.6761\nEpoch 371/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5313 - acc: 0.8406 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 372/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5995 - acc: 0.8129 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 373/600\n433/433 [==============================] - 0s 563us/step - loss: 0.6211 - acc: 0.8129 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 374/600\n433/433 [==============================] - 0s 555us/step - loss: 0.6469 - acc: 0.7760 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 375/600\n433/433 [==============================] - 0s 550us/step - loss: 0.6077 - acc: 0.7945 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 376/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5837 - acc: 0.8360 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 377/600\n433/433 [==============================] - 0s 555us/step - loss: 0.6384 - acc: 0.8060 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 378/600\n433/433 [==============================] - 0s 569us/step - loss: 0.5762 - acc: 0.8129 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 379/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5895 - acc: 0.8268 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 380/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5917 - acc: 0.8060 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 381/600\n433/433 [==============================] - 0s 547us/step - loss: 0.6280 - acc: 0.7898 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 382/600\n433/433 [==============================] - 0s 584us/step - loss: 0.6246 - acc: 0.8314 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 383/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5663 - acc: 0.7968 - val_loss: 1.0218 - val_acc: 0.6761\nEpoch 384/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5744 - acc: 0.8060 - val_loss: 1.0218 - val_acc: 0.6761\nEpoch 385/600\n433/433 [==============================] - 0s 546us/step - loss: 0.6297 - acc: 0.7921 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 386/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5927 - acc: 0.8014 - val_loss: 1.0217 - val_acc: 0.6761\nEpoch 387/600\n433/433 [==============================] - 0s 585us/step - loss: 0.5947 - acc: 0.8291 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 388/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5984 - acc: 0.8152 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 389/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5154 - acc: 0.8430 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 390/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5538 - acc: 0.8291 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 391/600\n433/433 [==============================] - 0s 554us/step - loss: 0.6039 - acc: 0.8291 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 392/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5917 - acc: 0.8152 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 393/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5871 - acc: 0.8245 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 394/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5841 - acc: 0.8106 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 395/600\n433/433 [==============================] - 0s 558us/step - loss: 0.6182 - acc: 0.7829 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 396/600\n433/433 [==============================] - 0s 567us/step - loss: 0.6315 - acc: 0.8060 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 397/600\n433/433 [==============================] - 0s 593us/step - loss: 0.5753 - acc: 0.8083 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 398/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5632 - acc: 0.8199 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 399/600\n433/433 [==============================] - 0s 534us/step - loss: 0.5993 - acc: 0.8106 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 400/600\n433/433 [==============================] - 0s 574us/step - loss: 0.5977 - acc: 0.8176 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 401/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5551 - acc: 0.8222 - val_loss: 1.0212 - val_acc: 0.6761\nEpoch 402/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6063 - acc: 0.7945 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 403/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5454 - acc: 0.8360 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 404/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5628 - acc: 0.8360 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 405/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5580 - acc: 0.8291 - val_loss: 1.0211 - val_acc: 0.6761\nEpoch 406/600\n433/433 [==============================] - 0s 588us/step - loss: 0.5597 - acc: 0.8199 - val_loss: 1.0211 - val_acc: 0.6761\nEpoch 407/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5791 - acc: 0.7968 - val_loss: 1.0211 - val_acc: 0.6761\nEpoch 408/600\n433/433 [==============================] - 0s 568us/step - loss: 0.6021 - acc: 0.8083 - val_loss: 1.0209 - val_acc: 0.6761\nEpoch 409/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5570 - acc: 0.8060 - val_loss: 1.0207 - val_acc: 0.6761\nEpoch 410/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5810 - acc: 0.8245 - val_loss: 1.0207 - val_acc: 0.6761\nEpoch 411/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5558 - acc: 0.8245 - val_loss: 1.0205 - val_acc: 0.6761\nEpoch 412/600\n433/433 [==============================] - 0s 579us/step - loss: 0.6340 - acc: 0.7898 - val_loss: 1.0204 - val_acc: 0.6761\nEpoch 413/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5956 - acc: 0.8152 - val_loss: 1.0204 - val_acc: 0.6761\nEpoch 414/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6148 - acc: 0.8083 - val_loss: 1.0202 - val_acc: 0.6761\nEpoch 415/600\n433/433 [==============================] - 0s 532us/step - loss: 0.6117 - acc: 0.8176 - val_loss: 1.0200 - val_acc: 0.6761\nEpoch 416/600\n433/433 [==============================] - 0s 569us/step - loss: 0.5997 - acc: 0.8060 - val_loss: 1.0198 - val_acc: 0.6761\nEpoch 417/600\n433/433 [==============================] - 0s 548us/step - loss: 0.6015 - acc: 0.8060 - val_loss: 1.0200 - val_acc: 0.6761\nEpoch 418/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5572 - acc: 0.8291 - val_loss: 1.0201 - val_acc: 0.6761\nEpoch 419/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5744 - acc: 0.8014 - val_loss: 1.0201 - val_acc: 0.6761\nEpoch 420/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5761 - acc: 0.8037 - val_loss: 1.0201 - val_acc: 0.6761\nEpoch 421/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5710 - acc: 0.8060 - val_loss: 1.0200 - val_acc: 0.6761\nEpoch 422/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5925 - acc: 0.8199 - val_loss: 1.0199 - val_acc: 0.6761\nEpoch 423/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5578 - acc: 0.8268 - val_loss: 1.0198 - val_acc: 0.6761\nEpoch 424/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5815 - acc: 0.8314 - val_loss: 1.0197 - val_acc: 0.6761\nEpoch 425/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5975 - acc: 0.7945 - val_loss: 1.0194 - val_acc: 0.6761\nEpoch 426/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5831 - acc: 0.8106 - val_loss: 1.0191 - val_acc: 0.6761\nEpoch 427/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5712 - acc: 0.8176 - val_loss: 1.0188 - val_acc: 0.6761\nEpoch 428/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5478 - acc: 0.8268 - val_loss: 1.0189 - val_acc: 0.6761\nEpoch 429/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5904 - acc: 0.7991 - val_loss: 1.0189 - val_acc: 0.6761\nEpoch 430/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5869 - acc: 0.8199 - val_loss: 1.0188 - val_acc: 0.6761\nEpoch 431/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5824 - acc: 0.8383 - val_loss: 1.0187 - val_acc: 0.6761\nEpoch 432/600\n433/433 [==============================] - 0s 551us/step - loss: 0.6117 - acc: 0.7968 - val_loss: 1.0187 - val_acc: 0.6761\nEpoch 433/600\n433/433 [==============================] - 0s 565us/step - loss: 0.5999 - acc: 0.8037 - val_loss: 1.0185 - val_acc: 0.6761\nEpoch 434/600\n433/433 [==============================] - 0s 577us/step - loss: 0.5685 - acc: 0.7991 - val_loss: 1.0185 - val_acc: 0.6761\nEpoch 435/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5280 - acc: 0.8545 - val_loss: 1.0184 - val_acc: 0.6761\nEpoch 436/600\n433/433 [==============================] - 0s 557us/step - loss: 0.6029 - acc: 0.7875 - val_loss: 1.0182 - val_acc: 0.6761\nEpoch 437/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5749 - acc: 0.8199 - val_loss: 1.0181 - val_acc: 0.6818\nEpoch 438/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5778 - acc: 0.8037 - val_loss: 1.0183 - val_acc: 0.6818\nEpoch 439/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5175 - acc: 0.8637 - val_loss: 1.0182 - val_acc: 0.6818\nEpoch 440/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5288 - acc: 0.8430 - val_loss: 1.0183 - val_acc: 0.6818\nEpoch 441/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5449 - acc: 0.8222 - val_loss: 1.0184 - val_acc: 0.6818\nEpoch 442/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5467 - acc: 0.8176 - val_loss: 1.0185 - val_acc: 0.6818\nEpoch 443/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5558 - acc: 0.8337 - val_loss: 1.0184 - val_acc: 0.6818\nEpoch 444/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5484 - acc: 0.8337 - val_loss: 1.0182 - val_acc: 0.6818\nEpoch 445/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5342 - acc: 0.8337 - val_loss: 1.0181 - val_acc: 0.6818\nEpoch 446/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5859 - acc: 0.8060 - val_loss: 1.0179 - val_acc: 0.6818\nEpoch 447/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5720 - acc: 0.8060 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 448/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5780 - acc: 0.8152 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 449/600\n433/433 [==============================] - 0s 542us/step - loss: 0.6294 - acc: 0.8106 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 450/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5602 - acc: 0.8222 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 451/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5989 - acc: 0.8083 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 452/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5707 - acc: 0.8245 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 453/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5325 - acc: 0.8176 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 454/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5917 - acc: 0.8337 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 455/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5826 - acc: 0.8083 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 456/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5596 - acc: 0.8245 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 457/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5729 - acc: 0.8199 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 458/600\n433/433 [==============================] - 0s 559us/step - loss: 0.6194 - acc: 0.8083 - val_loss: 1.0174 - val_acc: 0.6761\nEpoch 459/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5669 - acc: 0.8083 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 460/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5542 - acc: 0.8199 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 461/600\n433/433 [==============================] - 0s 570us/step - loss: 0.6054 - acc: 0.7875 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 462/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5697 - acc: 0.8199 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 463/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5232 - acc: 0.8383 - val_loss: 1.0180 - val_acc: 0.6761\nEpoch 464/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5591 - acc: 0.8176 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 465/600\n433/433 [==============================] - 0s 535us/step - loss: 0.6195 - acc: 0.8060 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 466/600\n433/433 [==============================] - 0s 584us/step - loss: 0.5832 - acc: 0.8129 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 467/600\n433/433 [==============================] - 0s 558us/step - loss: 0.6236 - acc: 0.7968 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 468/600\n433/433 [==============================] - 0s 569us/step - loss: 0.5771 - acc: 0.8014 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 469/600\n433/433 [==============================] - 0s 566us/step - loss: 0.5515 - acc: 0.8337 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 470/600\n433/433 [==============================] - 0s 576us/step - loss: 0.5934 - acc: 0.8083 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 471/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5742 - acc: 0.8222 - val_loss: 1.0179 - val_acc: 0.6761\nEpoch 472/600\n433/433 [==============================] - 0s 552us/step - loss: 0.6548 - acc: 0.7829 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 473/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5646 - acc: 0.8314 - val_loss: 1.0176 - val_acc: 0.6761\nEpoch 474/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5624 - acc: 0.8291 - val_loss: 1.0174 - val_acc: 0.6761\nEpoch 475/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5997 - acc: 0.8014 - val_loss: 1.0172 - val_acc: 0.6761\nEpoch 476/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5381 - acc: 0.8222 - val_loss: 1.0170 - val_acc: 0.6761\nEpoch 477/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5690 - acc: 0.8152 - val_loss: 1.0169 - val_acc: 0.6761\nEpoch 478/600\n433/433 [==============================] - 0s 566us/step - loss: 0.5684 - acc: 0.8337 - val_loss: 1.0168 - val_acc: 0.6761\nEpoch 479/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5526 - acc: 0.8314 - val_loss: 1.0165 - val_acc: 0.6761\nEpoch 480/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5567 - acc: 0.8291 - val_loss: 1.0164 - val_acc: 0.6761\nEpoch 481/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5610 - acc: 0.8106 - val_loss: 1.0165 - val_acc: 0.6761\nEpoch 482/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5658 - acc: 0.8176 - val_loss: 1.0165 - val_acc: 0.6761\nEpoch 483/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5413 - acc: 0.8360 - val_loss: 1.0164 - val_acc: 0.6761\nEpoch 484/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5726 - acc: 0.7968 - val_loss: 1.0164 - val_acc: 0.6761\nEpoch 485/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5578 - acc: 0.8083 - val_loss: 1.0165 - val_acc: 0.6761\nEpoch 486/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5648 - acc: 0.8152 - val_loss: 1.0163 - val_acc: 0.6761\nEpoch 487/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5574 - acc: 0.8430 - val_loss: 1.0163 - val_acc: 0.6761\nEpoch 488/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5851 - acc: 0.8176 - val_loss: 1.0164 - val_acc: 0.6705\nEpoch 489/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5392 - acc: 0.8199 - val_loss: 1.0161 - val_acc: 0.6705\nEpoch 490/600\n433/433 [==============================] - 0s 567us/step - loss: 0.5932 - acc: 0.7991 - val_loss: 1.0161 - val_acc: 0.6705\nEpoch 491/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5998 - acc: 0.7829 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 492/600\n433/433 [==============================] - 0s 607us/step - loss: 0.5507 - acc: 0.8291 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 493/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5721 - acc: 0.8222 - val_loss: 1.0159 - val_acc: 0.6705\nEpoch 494/600\n433/433 [==============================] - 0s 561us/step - loss: 0.6056 - acc: 0.8014 - val_loss: 1.0159 - val_acc: 0.6705\nEpoch 495/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5935 - acc: 0.8037 - val_loss: 1.0157 - val_acc: 0.6705\nEpoch 496/600\n433/433 [==============================] - 0s 560us/step - loss: 0.6032 - acc: 0.8176 - val_loss: 1.0157 - val_acc: 0.6705\nEpoch 497/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5824 - acc: 0.8314 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 498/600\n433/433 [==============================] - 0s 578us/step - loss: 0.5350 - acc: 0.8222 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 499/600\n433/433 [==============================] - 0s 557us/step - loss: 0.5823 - acc: 0.8199 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 500/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5258 - acc: 0.8360 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 501/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5672 - acc: 0.8337 - val_loss: 1.0159 - val_acc: 0.6705\nEpoch 502/600\n433/433 [==============================] - 0s 593us/step - loss: 0.6342 - acc: 0.7875 - val_loss: 1.0158 - val_acc: 0.6705\nEpoch 503/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5629 - acc: 0.8314 - val_loss: 1.0158 - val_acc: 0.6705\nEpoch 504/600\n433/433 [==============================] - 0s 567us/step - loss: 0.5333 - acc: 0.8406 - val_loss: 1.0158 - val_acc: 0.6705\nEpoch 505/600\n433/433 [==============================] - 0s 548us/step - loss: 0.5605 - acc: 0.8383 - val_loss: 1.0159 - val_acc: 0.6705\nEpoch 506/600\n433/433 [==============================] - 0s 597us/step - loss: 0.5769 - acc: 0.8129 - val_loss: 1.0159 - val_acc: 0.6705\nEpoch 507/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5762 - acc: 0.8268 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 508/600\n433/433 [==============================] - 0s 558us/step - loss: 0.6000 - acc: 0.8199 - val_loss: 1.0160 - val_acc: 0.6705\nEpoch 509/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5797 - acc: 0.8037 - val_loss: 1.0158 - val_acc: 0.6705\nEpoch 510/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5644 - acc: 0.8176 - val_loss: 1.0157 - val_acc: 0.6705\nEpoch 511/600\n433/433 [==============================] - 0s 575us/step - loss: 0.5800 - acc: 0.8222 - val_loss: 1.0157 - val_acc: 0.6705\nEpoch 512/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5744 - acc: 0.8314 - val_loss: 1.0156 - val_acc: 0.6705\nEpoch 513/600\n433/433 [==============================] - 0s 567us/step - loss: 0.5366 - acc: 0.8383 - val_loss: 1.0156 - val_acc: 0.6705\nEpoch 514/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5582 - acc: 0.8337 - val_loss: 1.0154 - val_acc: 0.6705\nEpoch 515/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5468 - acc: 0.8453 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 516/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6016 - acc: 0.8176 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 517/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6099 - acc: 0.7968 - val_loss: 1.0151 - val_acc: 0.6705\nEpoch 518/600\n433/433 [==============================] - 0s 568us/step - loss: 0.5582 - acc: 0.8083 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 519/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5442 - acc: 0.8245 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 520/600\n433/433 [==============================] - 0s 557us/step - loss: 0.6043 - acc: 0.8106 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 521/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5725 - acc: 0.8152 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 522/600\n433/433 [==============================] - 0s 579us/step - loss: 0.5825 - acc: 0.8152 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 523/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5406 - acc: 0.8383 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 524/600\n433/433 [==============================] - 0s 585us/step - loss: 0.5788 - acc: 0.8176 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 525/600\n433/433 [==============================] - 0s 563us/step - loss: 0.5510 - acc: 0.8222 - val_loss: 1.0151 - val_acc: 0.6705\nEpoch 526/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5785 - acc: 0.8129 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 527/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5755 - acc: 0.8314 - val_loss: 1.0153 - val_acc: 0.6705\nEpoch 528/600\n433/433 [==============================] - 0s 541us/step - loss: 0.6179 - acc: 0.8014 - val_loss: 1.0153 - val_acc: 0.6705\nEpoch 529/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5260 - acc: 0.8476 - val_loss: 1.0154 - val_acc: 0.6705\nEpoch 530/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5658 - acc: 0.8268 - val_loss: 1.0154 - val_acc: 0.6705\nEpoch 531/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5684 - acc: 0.8176 - val_loss: 1.0154 - val_acc: 0.6705\nEpoch 532/600\n433/433 [==============================] - 0s 561us/step - loss: 0.6229 - acc: 0.8129 - val_loss: 1.0155 - val_acc: 0.6705\nEpoch 533/600\n433/433 [==============================] - 0s 540us/step - loss: 0.5548 - acc: 0.8453 - val_loss: 1.0155 - val_acc: 0.6705\nEpoch 534/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5805 - acc: 0.8499 - val_loss: 1.0154 - val_acc: 0.6705\nEpoch 535/600\n433/433 [==============================] - 0s 560us/step - loss: 0.6208 - acc: 0.7968 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 536/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5816 - acc: 0.8430 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 537/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5506 - acc: 0.8383 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 538/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5896 - acc: 0.8037 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 539/600\n433/433 [==============================] - 0s 556us/step - loss: 0.6641 - acc: 0.7875 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 540/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5897 - acc: 0.8083 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 541/600\n433/433 [==============================] - 0s 571us/step - loss: 0.5524 - acc: 0.8129 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 542/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5810 - acc: 0.8106 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 543/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5687 - acc: 0.8291 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 544/600\n433/433 [==============================] - 0s 568us/step - loss: 0.5467 - acc: 0.8383 - val_loss: 1.0146 - val_acc: 0.6705\nEpoch 545/600\n433/433 [==============================] - 0s 537us/step - loss: 0.5852 - acc: 0.8176 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 546/600\n433/433 [==============================] - 0s 552us/step - loss: 0.5333 - acc: 0.8222 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 547/600\n433/433 [==============================] - 0s 525us/step - loss: 0.5816 - acc: 0.8083 - val_loss: 1.0146 - val_acc: 0.6705\nEpoch 548/600\n433/433 [==============================] - 0s 553us/step - loss: 0.5629 - acc: 0.8176 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 549/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5652 - acc: 0.8337 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 550/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5648 - acc: 0.8337 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 551/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5845 - acc: 0.8152 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 552/600\n433/433 [==============================] - 0s 539us/step - loss: 0.5808 - acc: 0.7968 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 553/600\n433/433 [==============================] - 0s 578us/step - loss: 0.5830 - acc: 0.8245 - val_loss: 1.0142 - val_acc: 0.6705\nEpoch 554/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5557 - acc: 0.8291 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 555/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5282 - acc: 0.8291 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 556/600\n433/433 [==============================] - 0s 551us/step - loss: 0.5735 - acc: 0.8268 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 557/600\n433/433 [==============================] - 0s 556us/step - loss: 0.5534 - acc: 0.8268 - val_loss: 1.0142 - val_acc: 0.6705\nEpoch 558/600\n433/433 [==============================] - 0s 553us/step - loss: 0.6376 - acc: 0.8060 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 559/600\n433/433 [==============================] - 0s 528us/step - loss: 0.5656 - acc: 0.8383 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 560/600\n433/433 [==============================] - 0s 544us/step - loss: 0.5844 - acc: 0.7898 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 561/600\n433/433 [==============================] - 0s 535us/step - loss: 0.5809 - acc: 0.8037 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 562/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5457 - acc: 0.8406 - val_loss: 1.0144 - val_acc: 0.6705\nEpoch 563/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5366 - acc: 0.8152 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 564/600\n433/433 [==============================] - 0s 538us/step - loss: 0.5484 - acc: 0.8106 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 565/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5777 - acc: 0.8337 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 566/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5787 - acc: 0.8314 - val_loss: 1.0145 - val_acc: 0.6705\nEpoch 567/600\n433/433 [==============================] - 0s 581us/step - loss: 0.5764 - acc: 0.8430 - val_loss: 1.0146 - val_acc: 0.6705\nEpoch 568/600\n433/433 [==============================] - 0s 550us/step - loss: 0.5129 - acc: 0.8337 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 569/600\n433/433 [==============================] - 0s 549us/step - loss: 0.5432 - acc: 0.8268 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 570/600\n433/433 [==============================] - 0s 529us/step - loss: 0.6184 - acc: 0.8014 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 571/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5599 - acc: 0.8222 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 572/600\n433/433 [==============================] - 0s 572us/step - loss: 0.5925 - acc: 0.8291 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 573/600\n433/433 [==============================] - 0s 572us/step - loss: 0.5311 - acc: 0.8499 - val_loss: 1.0151 - val_acc: 0.6705\nEpoch 574/600\n433/433 [==============================] - 0s 543us/step - loss: 0.5493 - acc: 0.8222 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 575/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5482 - acc: 0.8383 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 576/600\n433/433 [==============================] - 0s 544us/step - loss: 0.6239 - acc: 0.8037 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 577/600\n433/433 [==============================] - 0s 559us/step - loss: 0.5426 - acc: 0.8199 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 578/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5970 - acc: 0.8222 - val_loss: 1.0152 - val_acc: 0.6705\nEpoch 579/600\n433/433 [==============================] - 0s 545us/step - loss: 0.5648 - acc: 0.8314 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 580/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5521 - acc: 0.8383 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 581/600\n433/433 [==============================] - 0s 600us/step - loss: 0.5429 - acc: 0.8222 - val_loss: 1.0151 - val_acc: 0.6705\nEpoch 582/600\n433/433 [==============================] - 0s 558us/step - loss: 0.5233 - acc: 0.8383 - val_loss: 1.0150 - val_acc: 0.6705\nEpoch 583/600\n433/433 [==============================] - 0s 542us/step - loss: 0.5754 - acc: 0.8014 - val_loss: 1.0149 - val_acc: 0.6705\nEpoch 584/600\n433/433 [==============================] - 0s 574us/step - loss: 0.5550 - acc: 0.8222 - val_loss: 1.0148 - val_acc: 0.6705\nEpoch 585/600\n433/433 [==============================] - 0s 587us/step - loss: 0.5488 - acc: 0.8522 - val_loss: 1.0147 - val_acc: 0.6705\nEpoch 586/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5978 - acc: 0.8152 - val_loss: 1.0143 - val_acc: 0.6705\nEpoch 587/600\n433/433 [==============================] - 0s 546us/step - loss: 0.5644 - acc: 0.8014 - val_loss: 1.0140 - val_acc: 0.6705\nEpoch 588/600\n433/433 [==============================] - 0s 587us/step - loss: 0.5704 - acc: 0.7968 - val_loss: 1.0139 - val_acc: 0.6705\nEpoch 589/600\n433/433 [==============================] - 0s 561us/step - loss: 0.5287 - acc: 0.8430 - val_loss: 1.0137 - val_acc: 0.6705\nEpoch 590/600\n433/433 [==============================] - 0s 560us/step - loss: 0.5840 - acc: 0.8383 - val_loss: 1.0136 - val_acc: 0.6705\nEpoch 591/600\n433/433 [==============================] - 0s 564us/step - loss: 0.5436 - acc: 0.8406 - val_loss: 1.0134 - val_acc: 0.6705\nEpoch 592/600\n433/433 [==============================] - 0s 562us/step - loss: 0.5599 - acc: 0.8360 - val_loss: 1.0135 - val_acc: 0.6705\nEpoch 593/600\n433/433 [==============================] - 0s 536us/step - loss: 0.5506 - acc: 0.8199 - val_loss: 1.0133 - val_acc: 0.6705\nEpoch 594/600\n433/433 [==============================] - 0s 555us/step - loss: 0.5973 - acc: 0.8291 - val_loss: 1.0132 - val_acc: 0.6705\nEpoch 595/600\n433/433 [==============================] - 0s 573us/step - loss: 0.5560 - acc: 0.8337 - val_loss: 1.0131 - val_acc: 0.6705\nEpoch 596/600\n433/433 [==============================] - 0s 547us/step - loss: 0.5593 - acc: 0.8268 - val_loss: 1.0132 - val_acc: 0.6705\nEpoch 597/600\n433/433 [==============================] - 0s 570us/step - loss: 0.5766 - acc: 0.8083 - val_loss: 1.0130 - val_acc: 0.6705\nEpoch 598/600\n433/433 [==============================] - 0s 554us/step - loss: 0.5695 - acc: 0.8152 - val_loss: 1.0129 - val_acc: 0.6705\nEpoch 599/600\n433/433 [==============================] - 0s 545us/step - loss: 0.6161 - acc: 0.8083 - val_loss: 1.0128 - val_acc: 0.6705\nEpoch 600/600\n433/433 [==============================] - 0s 541us/step - loss: 0.5384 - acc: 0.8129 - val_loss: 1.0128 - val_acc: 0.6705\nTrain on 433 samples, validate on 176 samples\nEpoch 1/500\n433/433 [==============================] - 9s 20ms/step - loss: 0.5364 - acc: 0.8222 - val_loss: 1.0191 - val_acc: 0.6705\nEpoch 2/500\n433/433 [==============================] - 0s 572us/step - loss: 0.5385 - acc: 0.8222 - val_loss: 1.0238 - val_acc: 0.6705\nEpoch 3/500\n433/433 [==============================] - 0s 575us/step - loss: 0.5524 - acc: 0.8476 - val_loss: 1.0253 - val_acc: 0.6705\nEpoch 4/500\n433/433 [==============================] - 0s 556us/step - loss: 0.6317 - acc: 0.8152 - val_loss: 1.0280 - val_acc: 0.6705\nEpoch 5/500\n433/433 [==============================] - 0s 553us/step - loss: 0.6151 - acc: 0.8268 - val_loss: 1.0306 - val_acc: 0.6705\nEpoch 6/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5717 - acc: 0.8245 - val_loss: 1.0316 - val_acc: 0.6705\nEpoch 7/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5082 - acc: 0.8499 - val_loss: 1.0324 - val_acc: 0.6705\nEpoch 8/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5463 - acc: 0.8476 - val_loss: 1.0330 - val_acc: 0.6705\nEpoch 9/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5469 - acc: 0.8199 - val_loss: 1.0335 - val_acc: 0.6705\nEpoch 10/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5750 - acc: 0.8291 - val_loss: 1.0333 - val_acc: 0.6761\nEpoch 11/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5337 - acc: 0.8499 - val_loss: 1.0327 - val_acc: 0.6761\nEpoch 12/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5723 - acc: 0.8291 - val_loss: 1.0326 - val_acc: 0.6761\nEpoch 13/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5268 - acc: 0.8337 - val_loss: 1.0322 - val_acc: 0.6761\nEpoch 14/500\n433/433 [==============================] - 0s 571us/step - loss: 0.5229 - acc: 0.8453 - val_loss: 1.0316 - val_acc: 0.6761\nEpoch 15/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5361 - acc: 0.8176 - val_loss: 1.0311 - val_acc: 0.6761\nEpoch 16/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5277 - acc: 0.8314 - val_loss: 1.0308 - val_acc: 0.6761\nEpoch 17/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5052 - acc: 0.8406 - val_loss: 1.0305 - val_acc: 0.6761\nEpoch 18/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5834 - acc: 0.8176 - val_loss: 1.0302 - val_acc: 0.6761\nEpoch 19/500\n433/433 [==============================] - 0s 577us/step - loss: 0.5736 - acc: 0.8176 - val_loss: 1.0297 - val_acc: 0.6761\nEpoch 20/500\n433/433 [==============================] - 0s 533us/step - loss: 0.5215 - acc: 0.8291 - val_loss: 1.0296 - val_acc: 0.6761\nEpoch 21/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5276 - acc: 0.8476 - val_loss: 1.0293 - val_acc: 0.6761\nEpoch 22/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5453 - acc: 0.8360 - val_loss: 1.0292 - val_acc: 0.6761\nEpoch 23/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5588 - acc: 0.8268 - val_loss: 1.0290 - val_acc: 0.6761\nEpoch 24/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5595 - acc: 0.8360 - val_loss: 1.0286 - val_acc: 0.6761\nEpoch 25/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5608 - acc: 0.8176 - val_loss: 1.0286 - val_acc: 0.6761\nEpoch 26/500\n433/433 [==============================] - 0s 536us/step - loss: 0.5590 - acc: 0.8360 - val_loss: 1.0285 - val_acc: 0.6761\nEpoch 27/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5622 - acc: 0.8222 - val_loss: 1.0282 - val_acc: 0.6761\nEpoch 28/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5265 - acc: 0.8453 - val_loss: 1.0279 - val_acc: 0.6761\nEpoch 29/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5609 - acc: 0.8360 - val_loss: 1.0276 - val_acc: 0.6761\nEpoch 30/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5505 - acc: 0.8222 - val_loss: 1.0273 - val_acc: 0.6761\nEpoch 31/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5436 - acc: 0.8222 - val_loss: 1.0270 - val_acc: 0.6761\nEpoch 32/500\n433/433 [==============================] - 0s 556us/step - loss: 0.4978 - acc: 0.8591 - val_loss: 1.0265 - val_acc: 0.6761\nEpoch 33/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5385 - acc: 0.8199 - val_loss: 1.0261 - val_acc: 0.6761\nEpoch 34/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5404 - acc: 0.8291 - val_loss: 1.0258 - val_acc: 0.6761\nEpoch 35/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5603 - acc: 0.8199 - val_loss: 1.0255 - val_acc: 0.6761\nEpoch 36/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5891 - acc: 0.8337 - val_loss: 1.0253 - val_acc: 0.6761\nEpoch 37/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5728 - acc: 0.8014 - val_loss: 1.0248 - val_acc: 0.6761\nEpoch 38/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5651 - acc: 0.8199 - val_loss: 1.0246 - val_acc: 0.6761\nEpoch 39/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5385 - acc: 0.8453 - val_loss: 1.0245 - val_acc: 0.6761\nEpoch 40/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5445 - acc: 0.8499 - val_loss: 1.0245 - val_acc: 0.6761\nEpoch 41/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5525 - acc: 0.8360 - val_loss: 1.0243 - val_acc: 0.6761\nEpoch 42/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5836 - acc: 0.8129 - val_loss: 1.0242 - val_acc: 0.6761\nEpoch 43/500\n433/433 [==============================] - 0s 575us/step - loss: 0.5378 - acc: 0.8314 - val_loss: 1.0242 - val_acc: 0.6761\nEpoch 44/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5077 - acc: 0.8360 - val_loss: 1.0239 - val_acc: 0.6761\nEpoch 45/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5477 - acc: 0.8222 - val_loss: 1.0235 - val_acc: 0.6761\nEpoch 46/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5103 - acc: 0.8430 - val_loss: 1.0232 - val_acc: 0.6761\nEpoch 47/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5322 - acc: 0.8222 - val_loss: 1.0230 - val_acc: 0.6761\nEpoch 48/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5555 - acc: 0.8314 - val_loss: 1.0228 - val_acc: 0.6761\nEpoch 49/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5330 - acc: 0.8176 - val_loss: 1.0225 - val_acc: 0.6761\nEpoch 50/500\n433/433 [==============================] - 0s 528us/step - loss: 0.5165 - acc: 0.8360 - val_loss: 1.0223 - val_acc: 0.6761\nEpoch 51/500\n433/433 [==============================] - 0s 583us/step - loss: 0.5994 - acc: 0.8037 - val_loss: 1.0220 - val_acc: 0.6761\nEpoch 52/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5976 - acc: 0.7968 - val_loss: 1.0220 - val_acc: 0.6761\nEpoch 53/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5747 - acc: 0.8245 - val_loss: 1.0221 - val_acc: 0.6761\nEpoch 54/500\n433/433 [==============================] - 0s 561us/step - loss: 0.5059 - acc: 0.8314 - val_loss: 1.0220 - val_acc: 0.6761\nEpoch 55/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5493 - acc: 0.8106 - val_loss: 1.0219 - val_acc: 0.6761\nEpoch 56/500\n433/433 [==============================] - 0s 561us/step - loss: 0.5426 - acc: 0.8499 - val_loss: 1.0216 - val_acc: 0.6761\nEpoch 57/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5704 - acc: 0.8314 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 58/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5367 - acc: 0.8199 - val_loss: 1.0215 - val_acc: 0.6761\nEpoch 59/500\n433/433 [==============================] - 0s 568us/step - loss: 0.5161 - acc: 0.8406 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 60/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5316 - acc: 0.8268 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 61/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5248 - acc: 0.8522 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 62/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5334 - acc: 0.8314 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 63/500\n433/433 [==============================] - 0s 547us/step - loss: 0.4724 - acc: 0.8453 - val_loss: 1.0214 - val_acc: 0.6761\nEpoch 64/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5581 - acc: 0.8337 - val_loss: 1.0213 - val_acc: 0.6761\nEpoch 65/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5431 - acc: 0.8314 - val_loss: 1.0211 - val_acc: 0.6761\nEpoch 66/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5518 - acc: 0.8222 - val_loss: 1.0209 - val_acc: 0.6761\nEpoch 67/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5696 - acc: 0.8152 - val_loss: 1.0209 - val_acc: 0.6761\nEpoch 68/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5607 - acc: 0.8314 - val_loss: 1.0206 - val_acc: 0.6761\nEpoch 69/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5925 - acc: 0.8176 - val_loss: 1.0205 - val_acc: 0.6761\nEpoch 70/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5789 - acc: 0.8083 - val_loss: 1.0203 - val_acc: 0.6761\nEpoch 71/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5501 - acc: 0.8314 - val_loss: 1.0204 - val_acc: 0.6761\nEpoch 72/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5520 - acc: 0.8406 - val_loss: 1.0203 - val_acc: 0.6761\nEpoch 73/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5596 - acc: 0.8129 - val_loss: 1.0203 - val_acc: 0.6761\nEpoch 74/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5493 - acc: 0.8291 - val_loss: 1.0203 - val_acc: 0.6761\nEpoch 75/500\n433/433 [==============================] - 0s 600us/step - loss: 0.4999 - acc: 0.8314 - val_loss: 1.0200 - val_acc: 0.6761\nEpoch 76/500\n433/433 [==============================] - 0s 567us/step - loss: 0.5273 - acc: 0.8199 - val_loss: 1.0199 - val_acc: 0.6761\nEpoch 77/500\n433/433 [==============================] - 0s 597us/step - loss: 0.5182 - acc: 0.8129 - val_loss: 1.0198 - val_acc: 0.6761\nEpoch 78/500\n433/433 [==============================] - 0s 583us/step - loss: 0.5567 - acc: 0.8406 - val_loss: 1.0197 - val_acc: 0.6761\nEpoch 79/500\n433/433 [==============================] - 0s 601us/step - loss: 0.5311 - acc: 0.8499 - val_loss: 1.0198 - val_acc: 0.6761\nEpoch 80/500\n433/433 [==============================] - 0s 582us/step - loss: 0.5119 - acc: 0.8499 - val_loss: 1.0196 - val_acc: 0.6761\nEpoch 81/500\n433/433 [==============================] - 0s 590us/step - loss: 0.5334 - acc: 0.8199 - val_loss: 1.0195 - val_acc: 0.6761\nEpoch 82/500\n433/433 [==============================] - 0s 582us/step - loss: 0.5406 - acc: 0.8453 - val_loss: 1.0193 - val_acc: 0.6761\nEpoch 83/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5772 - acc: 0.8037 - val_loss: 1.0193 - val_acc: 0.6761\nEpoch 84/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5757 - acc: 0.8129 - val_loss: 1.0192 - val_acc: 0.6761\nEpoch 85/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5836 - acc: 0.8129 - val_loss: 1.0191 - val_acc: 0.6761\nEpoch 86/500\n433/433 [==============================] - 0s 610us/step - loss: 0.5860 - acc: 0.8245 - val_loss: 1.0188 - val_acc: 0.6761\nEpoch 87/500\n433/433 [==============================] - 0s 566us/step - loss: 0.5534 - acc: 0.8245 - val_loss: 1.0187 - val_acc: 0.6761\nEpoch 88/500\n433/433 [==============================] - 0s 591us/step - loss: 0.5070 - acc: 0.8245 - val_loss: 1.0187 - val_acc: 0.6761\nEpoch 89/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5432 - acc: 0.8383 - val_loss: 1.0186 - val_acc: 0.6761\nEpoch 90/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5657 - acc: 0.8176 - val_loss: 1.0187 - val_acc: 0.6761\nEpoch 91/500\n433/433 [==============================] - 0s 579us/step - loss: 0.5191 - acc: 0.8545 - val_loss: 1.0186 - val_acc: 0.6761\nEpoch 92/500\n433/433 [==============================] - 0s 577us/step - loss: 0.5850 - acc: 0.7921 - val_loss: 1.0185 - val_acc: 0.6761\nEpoch 93/500\n433/433 [==============================] - 0s 576us/step - loss: 0.5801 - acc: 0.8222 - val_loss: 1.0182 - val_acc: 0.6761\nEpoch 94/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5530 - acc: 0.8476 - val_loss: 1.0182 - val_acc: 0.6761\nEpoch 95/500\n433/433 [==============================] - 0s 576us/step - loss: 0.5486 - acc: 0.8337 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 96/500\n433/433 [==============================] - 0s 536us/step - loss: 0.5193 - acc: 0.8199 - val_loss: 1.0179 - val_acc: 0.6761\nEpoch 97/500\n433/433 [==============================] - 0s 560us/step - loss: 0.4873 - acc: 0.8661 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 98/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5362 - acc: 0.8314 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 99/500\n433/433 [==============================] - 0s 599us/step - loss: 0.5751 - acc: 0.7991 - val_loss: 1.0178 - val_acc: 0.6761\nEpoch 100/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5330 - acc: 0.8222 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 101/500\n433/433 [==============================] - 0s 571us/step - loss: 0.5932 - acc: 0.8453 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 102/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5106 - acc: 0.8406 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 103/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5407 - acc: 0.8245 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 104/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5549 - acc: 0.8222 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 105/500\n433/433 [==============================] - 0s 576us/step - loss: 0.5397 - acc: 0.8222 - val_loss: 1.0177 - val_acc: 0.6761\nEpoch 106/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5578 - acc: 0.8406 - val_loss: 1.0175 - val_acc: 0.6761\nEpoch 107/500\n433/433 [==============================] - 0s 581us/step - loss: 0.5936 - acc: 0.8268 - val_loss: 1.0174 - val_acc: 0.6761\nEpoch 108/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5031 - acc: 0.8499 - val_loss: 1.0171 - val_acc: 0.6761\nEpoch 109/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5108 - acc: 0.8453 - val_loss: 1.0169 - val_acc: 0.6761\nEpoch 110/500\n433/433 [==============================] - 0s 564us/step - loss: 0.5304 - acc: 0.8314 - val_loss: 1.0169 - val_acc: 0.6761\nEpoch 111/500\n433/433 [==============================] - 0s 573us/step - loss: 0.5817 - acc: 0.8291 - val_loss: 1.0167 - val_acc: 0.6761\nEpoch 112/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5247 - acc: 0.8614 - val_loss: 1.0167 - val_acc: 0.6761\nEpoch 113/500\n433/433 [==============================] - 0s 567us/step - loss: 0.5813 - acc: 0.8129 - val_loss: 1.0166 - val_acc: 0.6761\nEpoch 114/500\n433/433 [==============================] - 0s 571us/step - loss: 0.5629 - acc: 0.8268 - val_loss: 1.0164 - val_acc: 0.6761\nEpoch 115/500\n433/433 [==============================] - 0s 596us/step - loss: 0.6122 - acc: 0.7829 - val_loss: 1.0163 - val_acc: 0.6761\nEpoch 116/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5448 - acc: 0.8245 - val_loss: 1.0162 - val_acc: 0.6761\nEpoch 117/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5273 - acc: 0.8614 - val_loss: 1.0161 - val_acc: 0.6761\nEpoch 118/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5493 - acc: 0.8245 - val_loss: 1.0160 - val_acc: 0.6761\nEpoch 119/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5127 - acc: 0.8545 - val_loss: 1.0159 - val_acc: 0.6761\nEpoch 120/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5527 - acc: 0.8176 - val_loss: 1.0159 - val_acc: 0.6761\nEpoch 121/500\n433/433 [==============================] - 0s 575us/step - loss: 0.5348 - acc: 0.8268 - val_loss: 1.0157 - val_acc: 0.6761\nEpoch 122/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5532 - acc: 0.8199 - val_loss: 1.0156 - val_acc: 0.6761\nEpoch 123/500\n433/433 [==============================] - 0s 576us/step - loss: 0.5116 - acc: 0.8430 - val_loss: 1.0155 - val_acc: 0.6761\nEpoch 124/500\n433/433 [==============================] - 0s 568us/step - loss: 0.5254 - acc: 0.8083 - val_loss: 1.0155 - val_acc: 0.6761\nEpoch 125/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5744 - acc: 0.8152 - val_loss: 1.0154 - val_acc: 0.6761\nEpoch 126/500\n433/433 [==============================] - 0s 577us/step - loss: 0.4875 - acc: 0.8476 - val_loss: 1.0154 - val_acc: 0.6761\nEpoch 127/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5188 - acc: 0.8499 - val_loss: 1.0154 - val_acc: 0.6761\nEpoch 128/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5766 - acc: 0.8199 - val_loss: 1.0154 - val_acc: 0.6761\nEpoch 129/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5660 - acc: 0.8222 - val_loss: 1.0152 - val_acc: 0.6818\nEpoch 130/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5225 - acc: 0.8591 - val_loss: 1.0152 - val_acc: 0.6818\nEpoch 131/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5807 - acc: 0.8176 - val_loss: 1.0152 - val_acc: 0.6818\nEpoch 132/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5366 - acc: 0.8430 - val_loss: 1.0151 - val_acc: 0.6818\nEpoch 133/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5971 - acc: 0.8106 - val_loss: 1.0149 - val_acc: 0.6818\nEpoch 134/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5386 - acc: 0.8430 - val_loss: 1.0149 - val_acc: 0.6818\nEpoch 135/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5668 - acc: 0.8176 - val_loss: 1.0148 - val_acc: 0.6818\nEpoch 136/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5268 - acc: 0.8499 - val_loss: 1.0148 - val_acc: 0.6818\nEpoch 137/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5109 - acc: 0.8383 - val_loss: 1.0146 - val_acc: 0.6818\nEpoch 138/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5218 - acc: 0.8337 - val_loss: 1.0146 - val_acc: 0.6818\nEpoch 139/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5746 - acc: 0.8083 - val_loss: 1.0147 - val_acc: 0.6818\nEpoch 140/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5199 - acc: 0.8545 - val_loss: 1.0147 - val_acc: 0.6818\nEpoch 141/500\n433/433 [==============================] - 0s 564us/step - loss: 0.5128 - acc: 0.8245 - val_loss: 1.0146 - val_acc: 0.6818\nEpoch 142/500\n433/433 [==============================] - 0s 568us/step - loss: 0.4844 - acc: 0.8568 - val_loss: 1.0146 - val_acc: 0.6818\nEpoch 143/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5412 - acc: 0.8176 - val_loss: 1.0143 - val_acc: 0.6818\nEpoch 144/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5283 - acc: 0.8476 - val_loss: 1.0143 - val_acc: 0.6818\nEpoch 145/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5093 - acc: 0.8568 - val_loss: 1.0142 - val_acc: 0.6818\nEpoch 146/500\n433/433 [==============================] - 0s 534us/step - loss: 0.5571 - acc: 0.8245 - val_loss: 1.0143 - val_acc: 0.6818\nEpoch 147/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5825 - acc: 0.8176 - val_loss: 1.0141 - val_acc: 0.6818\nEpoch 148/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5152 - acc: 0.8499 - val_loss: 1.0141 - val_acc: 0.6818\nEpoch 149/500\n433/433 [==============================] - 0s 538us/step - loss: 0.5329 - acc: 0.8591 - val_loss: 1.0141 - val_acc: 0.6818\nEpoch 150/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5433 - acc: 0.8268 - val_loss: 1.0142 - val_acc: 0.6818\nEpoch 151/500\n433/433 [==============================] - 0s 549us/step - loss: 0.4712 - acc: 0.8568 - val_loss: 1.0140 - val_acc: 0.6818\nEpoch 152/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5274 - acc: 0.8453 - val_loss: 1.0140 - val_acc: 0.6818\nEpoch 153/500\n433/433 [==============================] - 0s 605us/step - loss: 0.5073 - acc: 0.8476 - val_loss: 1.0140 - val_acc: 0.6818\nEpoch 154/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5385 - acc: 0.8314 - val_loss: 1.0139 - val_acc: 0.6818\nEpoch 155/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5059 - acc: 0.8406 - val_loss: 1.0139 - val_acc: 0.6818\nEpoch 156/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5425 - acc: 0.8337 - val_loss: 1.0140 - val_acc: 0.6818\nEpoch 157/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5173 - acc: 0.8499 - val_loss: 1.0139 - val_acc: 0.6818\nEpoch 158/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5415 - acc: 0.8222 - val_loss: 1.0138 - val_acc: 0.6818\nEpoch 159/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5618 - acc: 0.8222 - val_loss: 1.0138 - val_acc: 0.6818\nEpoch 160/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5259 - acc: 0.8314 - val_loss: 1.0135 - val_acc: 0.6818\nEpoch 161/500\n433/433 [==============================] - 0s 528us/step - loss: 0.5759 - acc: 0.8291 - val_loss: 1.0135 - val_acc: 0.6818\nEpoch 162/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5139 - acc: 0.8568 - val_loss: 1.0133 - val_acc: 0.6818\nEpoch 163/500\n433/433 [==============================] - 0s 541us/step - loss: 0.4969 - acc: 0.8545 - val_loss: 1.0133 - val_acc: 0.6818\nEpoch 164/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5426 - acc: 0.8476 - val_loss: 1.0134 - val_acc: 0.6818\nEpoch 165/500\n433/433 [==============================] - 0s 564us/step - loss: 0.5533 - acc: 0.8360 - val_loss: 1.0133 - val_acc: 0.6818\nEpoch 166/500\n433/433 [==============================] - 0s 534us/step - loss: 0.5616 - acc: 0.8199 - val_loss: 1.0133 - val_acc: 0.6818\nEpoch 167/500\n433/433 [==============================] - 0s 517us/step - loss: 0.5648 - acc: 0.8222 - val_loss: 1.0133 - val_acc: 0.6818\nEpoch 168/500\n433/433 [==============================] - 0s 526us/step - loss: 0.5782 - acc: 0.8291 - val_loss: 1.0131 - val_acc: 0.6818\nEpoch 169/500\n433/433 [==============================] - 0s 520us/step - loss: 0.5393 - acc: 0.8337 - val_loss: 1.0131 - val_acc: 0.6761\nEpoch 170/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5608 - acc: 0.8152 - val_loss: 1.0132 - val_acc: 0.6761\nEpoch 171/500\n433/433 [==============================] - 0s 517us/step - loss: 0.5178 - acc: 0.8383 - val_loss: 1.0132 - val_acc: 0.6761\nEpoch 172/500\n433/433 [==============================] - 0s 519us/step - loss: 0.5478 - acc: 0.8337 - val_loss: 1.0131 - val_acc: 0.6761\nEpoch 173/500\n433/433 [==============================] - 0s 517us/step - loss: 0.5933 - acc: 0.8268 - val_loss: 1.0130 - val_acc: 0.6761\nEpoch 174/500\n433/433 [==============================] - 0s 524us/step - loss: 0.5512 - acc: 0.8199 - val_loss: 1.0127 - val_acc: 0.6761\nEpoch 175/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5110 - acc: 0.8406 - val_loss: 1.0126 - val_acc: 0.6761\nEpoch 176/500\n433/433 [==============================] - 0s 554us/step - loss: 0.4765 - acc: 0.8637 - val_loss: 1.0124 - val_acc: 0.6761\nEpoch 177/500\n433/433 [==============================] - 0s 535us/step - loss: 0.4877 - acc: 0.8430 - val_loss: 1.0124 - val_acc: 0.6761\nEpoch 178/500\n433/433 [==============================] - 0s 527us/step - loss: 0.5110 - acc: 0.8360 - val_loss: 1.0124 - val_acc: 0.6761\nEpoch 179/500\n433/433 [==============================] - 0s 537us/step - loss: 0.5360 - acc: 0.8383 - val_loss: 1.0126 - val_acc: 0.6761\nEpoch 180/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5887 - acc: 0.8152 - val_loss: 1.0125 - val_acc: 0.6761\nEpoch 181/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5197 - acc: 0.8430 - val_loss: 1.0127 - val_acc: 0.6761\nEpoch 182/500\n433/433 [==============================] - 0s 523us/step - loss: 0.5123 - acc: 0.8406 - val_loss: 1.0126 - val_acc: 0.6761\nEpoch 183/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5625 - acc: 0.8176 - val_loss: 1.0127 - val_acc: 0.6761\nEpoch 184/500\n433/433 [==============================] - 0s 536us/step - loss: 0.5577 - acc: 0.8337 - val_loss: 1.0126 - val_acc: 0.6761\nEpoch 185/500\n433/433 [==============================] - 0s 606us/step - loss: 0.5338 - acc: 0.8383 - val_loss: 1.0125 - val_acc: 0.6761\nEpoch 186/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5095 - acc: 0.8245 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 187/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5892 - acc: 0.8430 - val_loss: 1.0124 - val_acc: 0.6761\nEpoch 188/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5449 - acc: 0.8222 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 189/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5159 - acc: 0.8453 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 190/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5322 - acc: 0.8268 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 191/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5196 - acc: 0.8499 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 192/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5241 - acc: 0.8453 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 193/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5441 - acc: 0.8291 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 194/500\n433/433 [==============================] - 0s 537us/step - loss: 0.5192 - acc: 0.8383 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 195/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5569 - acc: 0.8222 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 196/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5121 - acc: 0.8476 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 197/500\n433/433 [==============================] - 0s 543us/step - loss: 0.4697 - acc: 0.8661 - val_loss: 1.0126 - val_acc: 0.6818\nEpoch 198/500\n433/433 [==============================] - 0s 531us/step - loss: 0.5327 - acc: 0.8314 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 199/500\n433/433 [==============================] - 0s 590us/step - loss: 0.5593 - acc: 0.8383 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 200/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5206 - acc: 0.8222 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 201/500\n433/433 [==============================] - 0s 551us/step - loss: 0.4818 - acc: 0.8383 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 202/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5446 - acc: 0.8245 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 203/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5114 - acc: 0.8245 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 204/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5343 - acc: 0.8453 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 205/500\n433/433 [==============================] - 0s 637us/step - loss: 0.5715 - acc: 0.8014 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 206/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5543 - acc: 0.8152 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 207/500\n433/433 [==============================] - 0s 575us/step - loss: 0.5352 - acc: 0.8268 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 208/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5811 - acc: 0.8152 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 209/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5093 - acc: 0.8314 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 210/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5742 - acc: 0.8152 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 211/500\n433/433 [==============================] - 0s 616us/step - loss: 0.5629 - acc: 0.8337 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 212/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5117 - acc: 0.8499 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 213/500\n433/433 [==============================] - 0s 587us/step - loss: 0.5331 - acc: 0.8360 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 214/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5401 - acc: 0.8360 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 215/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5283 - acc: 0.8383 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 216/500\n433/433 [==============================] - 0s 546us/step - loss: 0.4990 - acc: 0.8499 - val_loss: 1.0126 - val_acc: 0.6818\nEpoch 217/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5310 - acc: 0.8476 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 218/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5242 - acc: 0.8360 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 219/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5178 - acc: 0.8499 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 220/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5679 - acc: 0.8268 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 221/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5659 - acc: 0.8268 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 222/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5417 - acc: 0.8406 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 223/500\n433/433 [==============================] - 0s 561us/step - loss: 0.4837 - acc: 0.8522 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 224/500\n433/433 [==============================] - 0s 548us/step - loss: 0.4734 - acc: 0.8614 - val_loss: 1.0125 - val_acc: 0.6818\nEpoch 225/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5907 - acc: 0.8106 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 226/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5353 - acc: 0.8314 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 227/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5182 - acc: 0.8499 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 228/500\n433/433 [==============================] - 0s 566us/step - loss: 0.5041 - acc: 0.8383 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 229/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5049 - acc: 0.8314 - val_loss: 1.0118 - val_acc: 0.6818\nEpoch 230/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5092 - acc: 0.8406 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 231/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5351 - acc: 0.8268 - val_loss: 1.0118 - val_acc: 0.6818\nEpoch 232/500\n433/433 [==============================] - 0s 564us/step - loss: 0.5395 - acc: 0.8430 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 233/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5275 - acc: 0.8383 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 234/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5138 - acc: 0.8453 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 235/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5646 - acc: 0.8291 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 236/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5425 - acc: 0.8199 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 237/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5234 - acc: 0.8430 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 238/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5078 - acc: 0.8430 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 239/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5630 - acc: 0.8176 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 240/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5625 - acc: 0.8152 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 241/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5039 - acc: 0.8499 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 242/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5664 - acc: 0.8129 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 243/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5168 - acc: 0.8314 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 244/500\n433/433 [==============================] - 0s 568us/step - loss: 0.5585 - acc: 0.8152 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 245/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5388 - acc: 0.8222 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 246/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5808 - acc: 0.8245 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 247/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5558 - acc: 0.8268 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 248/500\n433/433 [==============================] - 0s 538us/step - loss: 0.5303 - acc: 0.8453 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 249/500\n433/433 [==============================] - 0s 531us/step - loss: 0.4536 - acc: 0.8707 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 250/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5694 - acc: 0.8291 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 251/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5504 - acc: 0.8337 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 252/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5495 - acc: 0.8406 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 253/500\n433/433 [==============================] - 0s 543us/step - loss: 0.4982 - acc: 0.8406 - val_loss: 1.0124 - val_acc: 0.6818\nEpoch 254/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5566 - acc: 0.8268 - val_loss: 1.0123 - val_acc: 0.6818\nEpoch 255/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5958 - acc: 0.8152 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 256/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5601 - acc: 0.8245 - val_loss: 1.0122 - val_acc: 0.6818\nEpoch 257/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5937 - acc: 0.7783 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 258/500\n433/433 [==============================] - 0s 535us/step - loss: 0.4988 - acc: 0.8545 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 259/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5425 - acc: 0.8152 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 260/500\n433/433 [==============================] - 0s 541us/step - loss: 0.6333 - acc: 0.7921 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 261/500\n433/433 [==============================] - 0s 538us/step - loss: 0.5434 - acc: 0.8499 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 262/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5240 - acc: 0.8476 - val_loss: 1.0121 - val_acc: 0.6818\nEpoch 263/500\n433/433 [==============================] - 0s 538us/step - loss: 0.5169 - acc: 0.8337 - val_loss: 1.0120 - val_acc: 0.6818\nEpoch 264/500\n433/433 [==============================] - 0s 523us/step - loss: 0.5801 - acc: 0.8083 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 265/500\n433/433 [==============================] - 0s 533us/step - loss: 0.4829 - acc: 0.8499 - val_loss: 1.0119 - val_acc: 0.6818\nEpoch 266/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5517 - acc: 0.8268 - val_loss: 1.0117 - val_acc: 0.6818\nEpoch 267/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5858 - acc: 0.8291 - val_loss: 1.0116 - val_acc: 0.6818\nEpoch 268/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5133 - acc: 0.8614 - val_loss: 1.0114 - val_acc: 0.6818\nEpoch 269/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5986 - acc: 0.7991 - val_loss: 1.0114 - val_acc: 0.6818\nEpoch 270/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5112 - acc: 0.8430 - val_loss: 1.0114 - val_acc: 0.6818\nEpoch 271/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5560 - acc: 0.8268 - val_loss: 1.0112 - val_acc: 0.6818\nEpoch 272/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5392 - acc: 0.8406 - val_loss: 1.0110 - val_acc: 0.6818\nEpoch 273/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5354 - acc: 0.8476 - val_loss: 1.0109 - val_acc: 0.6818\nEpoch 274/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5158 - acc: 0.8268 - val_loss: 1.0109 - val_acc: 0.6818\nEpoch 275/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5504 - acc: 0.8291 - val_loss: 1.0109 - val_acc: 0.6818\nEpoch 276/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5254 - acc: 0.8453 - val_loss: 1.0107 - val_acc: 0.6818\nEpoch 277/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5358 - acc: 0.8199 - val_loss: 1.0106 - val_acc: 0.6818\nEpoch 278/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5448 - acc: 0.8129 - val_loss: 1.0104 - val_acc: 0.6818\nEpoch 279/500\n433/433 [==============================] - 0s 547us/step - loss: 0.4929 - acc: 0.8591 - val_loss: 1.0104 - val_acc: 0.6818\nEpoch 280/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5443 - acc: 0.8222 - val_loss: 1.0104 - val_acc: 0.6818\nEpoch 281/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5579 - acc: 0.8222 - val_loss: 1.0102 - val_acc: 0.6818\nEpoch 282/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5194 - acc: 0.8176 - val_loss: 1.0103 - val_acc: 0.6818\nEpoch 283/500\n433/433 [==============================] - 0s 562us/step - loss: 0.4995 - acc: 0.8453 - val_loss: 1.0103 - val_acc: 0.6818\nEpoch 284/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5770 - acc: 0.8129 - val_loss: 1.0102 - val_acc: 0.6818\nEpoch 285/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5202 - acc: 0.8545 - val_loss: 1.0102 - val_acc: 0.6818\nEpoch 286/500\n433/433 [==============================] - 0s 561us/step - loss: 0.5062 - acc: 0.8499 - val_loss: 1.0101 - val_acc: 0.6818\nEpoch 287/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5947 - acc: 0.8060 - val_loss: 1.0100 - val_acc: 0.6818\nEpoch 288/500\n433/433 [==============================] - 0s 577us/step - loss: 0.5688 - acc: 0.7945 - val_loss: 1.0099 - val_acc: 0.6818\nEpoch 289/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5539 - acc: 0.8083 - val_loss: 1.0098 - val_acc: 0.6818\nEpoch 290/500\n433/433 [==============================] - 0s 537us/step - loss: 0.5494 - acc: 0.8383 - val_loss: 1.0096 - val_acc: 0.6818\nEpoch 291/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5650 - acc: 0.8360 - val_loss: 1.0096 - val_acc: 0.6818\nEpoch 292/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5127 - acc: 0.8406 - val_loss: 1.0094 - val_acc: 0.6818\nEpoch 293/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5484 - acc: 0.8291 - val_loss: 1.0094 - val_acc: 0.6818\nEpoch 294/500\n433/433 [==============================] - 0s 536us/step - loss: 0.5236 - acc: 0.8430 - val_loss: 1.0093 - val_acc: 0.6818\nEpoch 295/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5381 - acc: 0.8499 - val_loss: 1.0093 - val_acc: 0.6818\nEpoch 296/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5270 - acc: 0.8360 - val_loss: 1.0092 - val_acc: 0.6818\nEpoch 297/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5371 - acc: 0.8314 - val_loss: 1.0091 - val_acc: 0.6818\nEpoch 298/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5263 - acc: 0.8268 - val_loss: 1.0090 - val_acc: 0.6818\nEpoch 299/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5256 - acc: 0.8430 - val_loss: 1.0088 - val_acc: 0.6818\nEpoch 300/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5174 - acc: 0.8360 - val_loss: 1.0087 - val_acc: 0.6818\nEpoch 301/500\n433/433 [==============================] - 0s 582us/step - loss: 0.5459 - acc: 0.8268 - val_loss: 1.0085 - val_acc: 0.6818\nEpoch 302/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5721 - acc: 0.8176 - val_loss: 1.0086 - val_acc: 0.6818\nEpoch 303/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5367 - acc: 0.8545 - val_loss: 1.0084 - val_acc: 0.6818\nEpoch 304/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5306 - acc: 0.8406 - val_loss: 1.0086 - val_acc: 0.6818\nEpoch 305/500\n433/433 [==============================] - 0s 550us/step - loss: 0.4769 - acc: 0.8614 - val_loss: 1.0085 - val_acc: 0.6818\nEpoch 306/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5190 - acc: 0.8499 - val_loss: 1.0084 - val_acc: 0.6818\nEpoch 307/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5095 - acc: 0.8614 - val_loss: 1.0083 - val_acc: 0.6818\nEpoch 308/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5185 - acc: 0.8545 - val_loss: 1.0083 - val_acc: 0.6818\nEpoch 309/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5685 - acc: 0.8383 - val_loss: 1.0082 - val_acc: 0.6818\nEpoch 310/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5558 - acc: 0.8060 - val_loss: 1.0081 - val_acc: 0.6818\nEpoch 311/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5059 - acc: 0.8430 - val_loss: 1.0080 - val_acc: 0.6818\nEpoch 312/500\n433/433 [==============================] - 0s 547us/step - loss: 0.4827 - acc: 0.8707 - val_loss: 1.0079 - val_acc: 0.6818\nEpoch 313/500\n433/433 [==============================] - 0s 561us/step - loss: 0.5395 - acc: 0.8314 - val_loss: 1.0079 - val_acc: 0.6818\nEpoch 314/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5713 - acc: 0.8314 - val_loss: 1.0079 - val_acc: 0.6818\nEpoch 315/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5438 - acc: 0.8245 - val_loss: 1.0079 - val_acc: 0.6818\nEpoch 316/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5018 - acc: 0.8522 - val_loss: 1.0077 - val_acc: 0.6818\nEpoch 317/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5469 - acc: 0.8199 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 318/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5099 - acc: 0.8360 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 319/500\n433/433 [==============================] - 0s 550us/step - loss: 0.4946 - acc: 0.8568 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 320/500\n433/433 [==============================] - 0s 531us/step - loss: 0.4907 - acc: 0.8453 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 321/500\n433/433 [==============================] - 0s 580us/step - loss: 0.5142 - acc: 0.8614 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 322/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5201 - acc: 0.8383 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 323/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5141 - acc: 0.8360 - val_loss: 1.0075 - val_acc: 0.6875\nEpoch 324/500\n433/433 [==============================] - 0s 550us/step - loss: 0.4858 - acc: 0.8476 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 325/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5475 - acc: 0.8360 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 326/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5529 - acc: 0.8176 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 327/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5535 - acc: 0.8314 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 328/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5558 - acc: 0.8245 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 329/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5600 - acc: 0.8406 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 330/500\n433/433 [==============================] - 0s 573us/step - loss: 0.5439 - acc: 0.8268 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 331/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5415 - acc: 0.8406 - val_loss: 1.0075 - val_acc: 0.6875\nEpoch 332/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5341 - acc: 0.8383 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 333/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5320 - acc: 0.8199 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 334/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5367 - acc: 0.8383 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 335/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5140 - acc: 0.8383 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 336/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5478 - acc: 0.8291 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 337/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5497 - acc: 0.8383 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 338/500\n433/433 [==============================] - 0s 538us/step - loss: 0.5549 - acc: 0.8268 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 339/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5094 - acc: 0.8314 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 340/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5649 - acc: 0.8222 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 341/500\n433/433 [==============================] - 0s 581us/step - loss: 0.5395 - acc: 0.8406 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 342/500\n433/433 [==============================] - 0s 567us/step - loss: 0.4982 - acc: 0.8614 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 343/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5194 - acc: 0.8383 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 344/500\n433/433 [==============================] - 0s 536us/step - loss: 0.5541 - acc: 0.8037 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 345/500\n433/433 [==============================] - 0s 580us/step - loss: 0.5489 - acc: 0.8430 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 346/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5633 - acc: 0.8152 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 347/500\n433/433 [==============================] - 0s 543us/step - loss: 0.4995 - acc: 0.8383 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 348/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5166 - acc: 0.8314 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 349/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5220 - acc: 0.8337 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 350/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5608 - acc: 0.8083 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 351/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5821 - acc: 0.8176 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 352/500\n433/433 [==============================] - 0s 552us/step - loss: 0.4984 - acc: 0.8476 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 353/500\n433/433 [==============================] - 0s 567us/step - loss: 0.5770 - acc: 0.8129 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 354/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5698 - acc: 0.8176 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 355/500\n433/433 [==============================] - 0s 579us/step - loss: 0.5476 - acc: 0.8453 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 356/500\n433/433 [==============================] - 0s 548us/step - loss: 0.4941 - acc: 0.8476 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 357/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5135 - acc: 0.8430 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 358/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5503 - acc: 0.8383 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 359/500\n433/433 [==============================] - 0s 539us/step - loss: 0.4941 - acc: 0.8383 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 360/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5169 - acc: 0.8268 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 361/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5293 - acc: 0.8568 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 362/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5205 - acc: 0.8476 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 363/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5104 - acc: 0.8476 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 364/500\n433/433 [==============================] - 0s 574us/step - loss: 0.5077 - acc: 0.8499 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 365/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5155 - acc: 0.8291 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 366/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5150 - acc: 0.8499 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 367/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5583 - acc: 0.8152 - val_loss: 1.0075 - val_acc: 0.6875\nEpoch 368/500\n433/433 [==============================] - 0s 566us/step - loss: 0.5273 - acc: 0.8360 - val_loss: 1.0075 - val_acc: 0.6875\nEpoch 369/500\n433/433 [==============================] - 0s 569us/step - loss: 0.4951 - acc: 0.8337 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 370/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5344 - acc: 0.8291 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 371/500\n433/433 [==============================] - 0s 581us/step - loss: 0.5129 - acc: 0.8476 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 372/500\n433/433 [==============================] - 0s 558us/step - loss: 0.4921 - acc: 0.8591 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 373/500\n433/433 [==============================] - 0s 561us/step - loss: 0.4816 - acc: 0.8522 - val_loss: 1.0070 - val_acc: 0.6875\nEpoch 374/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5304 - acc: 0.8314 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 375/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5202 - acc: 0.8360 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 376/500\n433/433 [==============================] - 0s 567us/step - loss: 0.5647 - acc: 0.8199 - val_loss: 1.0070 - val_acc: 0.6875\nEpoch 377/500\n433/433 [==============================] - 0s 571us/step - loss: 0.5462 - acc: 0.8545 - val_loss: 1.0068 - val_acc: 0.6875\nEpoch 378/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5272 - acc: 0.8476 - val_loss: 1.0067 - val_acc: 0.6875\nEpoch 379/500\n433/433 [==============================] - 0s 539us/step - loss: 0.5296 - acc: 0.8314 - val_loss: 1.0066 - val_acc: 0.6875\nEpoch 380/500\n433/433 [==============================] - 0s 540us/step - loss: 0.4968 - acc: 0.8360 - val_loss: 1.0067 - val_acc: 0.6875\nEpoch 381/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5176 - acc: 0.8430 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 382/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5000 - acc: 0.8453 - val_loss: 1.0070 - val_acc: 0.6875\nEpoch 383/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5173 - acc: 0.8245 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 384/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5533 - acc: 0.8222 - val_loss: 1.0070 - val_acc: 0.6875\nEpoch 385/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5676 - acc: 0.8337 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 386/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5044 - acc: 0.8499 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 387/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5324 - acc: 0.8430 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 388/500\n433/433 [==============================] - 0s 533us/step - loss: 0.5327 - acc: 0.8430 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 389/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5697 - acc: 0.8176 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 390/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5680 - acc: 0.8060 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 391/500\n433/433 [==============================] - 0s 540us/step - loss: 0.5219 - acc: 0.8337 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 392/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5684 - acc: 0.8106 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 393/500\n433/433 [==============================] - 0s 568us/step - loss: 0.5613 - acc: 0.8199 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 394/500\n433/433 [==============================] - 0s 573us/step - loss: 0.5469 - acc: 0.8337 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 395/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5612 - acc: 0.8199 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 396/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5532 - acc: 0.8453 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 397/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5748 - acc: 0.8222 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 398/500\n433/433 [==============================] - 0s 556us/step - loss: 0.4977 - acc: 0.8406 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 399/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5204 - acc: 0.8268 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 400/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5579 - acc: 0.8106 - val_loss: 1.0068 - val_acc: 0.6875\nEpoch 401/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5665 - acc: 0.7968 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 402/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5200 - acc: 0.8430 - val_loss: 1.0069 - val_acc: 0.6875\nEpoch 403/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5736 - acc: 0.8199 - val_loss: 1.0070 - val_acc: 0.6875\nEpoch 404/500\n433/433 [==============================] - 0s 585us/step - loss: 0.5101 - acc: 0.8383 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 405/500\n433/433 [==============================] - 0s 562us/step - loss: 0.5058 - acc: 0.8499 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 406/500\n433/433 [==============================] - 0s 563us/step - loss: 0.5682 - acc: 0.8476 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 407/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5294 - acc: 0.8199 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 408/500\n433/433 [==============================] - 0s 594us/step - loss: 0.4914 - acc: 0.8661 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 409/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5434 - acc: 0.8406 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 410/500\n433/433 [==============================] - 0s 573us/step - loss: 0.5743 - acc: 0.8199 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 411/500\n433/433 [==============================] - 0s 565us/step - loss: 0.5523 - acc: 0.8291 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 412/500\n433/433 [==============================] - 0s 542us/step - loss: 0.4870 - acc: 0.8591 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 413/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5042 - acc: 0.8291 - val_loss: 1.0072 - val_acc: 0.6875\nEpoch 414/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5305 - acc: 0.8430 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 415/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5082 - acc: 0.8406 - val_loss: 1.0071 - val_acc: 0.6875\nEpoch 416/500\n433/433 [==============================] - 0s 559us/step - loss: 0.4989 - acc: 0.8476 - val_loss: 1.0071 - val_acc: 0.6818\nEpoch 417/500\n433/433 [==============================] - 0s 575us/step - loss: 0.5253 - acc: 0.8337 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 418/500\n433/433 [==============================] - 0s 568us/step - loss: 0.4906 - acc: 0.8430 - val_loss: 1.0074 - val_acc: 0.6875\nEpoch 419/500\n433/433 [==============================] - 0s 545us/step - loss: 0.5161 - acc: 0.8383 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 420/500\n433/433 [==============================] - 0s 556us/step - loss: 0.4985 - acc: 0.8591 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 421/500\n433/433 [==============================] - 0s 557us/step - loss: 0.5098 - acc: 0.8406 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 422/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5431 - acc: 0.8314 - val_loss: 1.0077 - val_acc: 0.6818\nEpoch 423/500\n433/433 [==============================] - 0s 560us/step - loss: 0.5350 - acc: 0.8314 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 424/500\n433/433 [==============================] - 0s 533us/step - loss: 0.5129 - acc: 0.8453 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 425/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5275 - acc: 0.8314 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 426/500\n433/433 [==============================] - 0s 578us/step - loss: 0.5059 - acc: 0.8591 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 427/500\n433/433 [==============================] - 0s 542us/step - loss: 0.4707 - acc: 0.8522 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 428/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5335 - acc: 0.8222 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 429/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5446 - acc: 0.8222 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 430/500\n433/433 [==============================] - 0s 580us/step - loss: 0.5506 - acc: 0.8083 - val_loss: 1.0077 - val_acc: 0.6818\nEpoch 431/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5236 - acc: 0.8199 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 432/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5360 - acc: 0.8314 - val_loss: 1.0078 - val_acc: 0.6818\nEpoch 433/500\n433/433 [==============================] - 0s 545us/step - loss: 0.4798 - acc: 0.8568 - val_loss: 1.0076 - val_acc: 0.6818\nEpoch 434/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5721 - acc: 0.8314 - val_loss: 1.0077 - val_acc: 0.6818\nEpoch 435/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5363 - acc: 0.8360 - val_loss: 1.0077 - val_acc: 0.6818\nEpoch 436/500\n433/433 [==============================] - 0s 550us/step - loss: 0.5691 - acc: 0.8291 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 437/500\n433/433 [==============================] - 0s 564us/step - loss: 0.4754 - acc: 0.8453 - val_loss: 1.0080 - val_acc: 0.6818\nEpoch 438/500\n433/433 [==============================] - 0s 537us/step - loss: 0.5447 - acc: 0.8291 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 439/500\n433/433 [==============================] - 0s 546us/step - loss: 0.4747 - acc: 0.8614 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 440/500\n433/433 [==============================] - 0s 541us/step - loss: 0.4807 - acc: 0.8637 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 441/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5254 - acc: 0.8314 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 442/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5291 - acc: 0.8291 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 443/500\n433/433 [==============================] - 0s 572us/step - loss: 0.5172 - acc: 0.8430 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 444/500\n433/433 [==============================] - 0s 582us/step - loss: 0.5387 - acc: 0.8337 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 445/500\n433/433 [==============================] - 0s 572us/step - loss: 0.4751 - acc: 0.8591 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 446/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5022 - acc: 0.8499 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 447/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5677 - acc: 0.8106 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 448/500\n433/433 [==============================] - 0s 564us/step - loss: 0.4674 - acc: 0.8522 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 449/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5407 - acc: 0.8568 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 450/500\n433/433 [==============================] - 0s 548us/step - loss: 0.4926 - acc: 0.8591 - val_loss: 1.0080 - val_acc: 0.6875\nEpoch 451/500\n433/433 [==============================] - 0s 571us/step - loss: 0.5184 - acc: 0.8499 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 452/500\n433/433 [==============================] - 0s 553us/step - loss: 0.5448 - acc: 0.8314 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 453/500\n433/433 [==============================] - 0s 567us/step - loss: 0.4880 - acc: 0.8545 - val_loss: 1.0082 - val_acc: 0.6875\nEpoch 454/500\n433/433 [==============================] - 0s 534us/step - loss: 0.4912 - acc: 0.8591 - val_loss: 1.0083 - val_acc: 0.6875\nEpoch 455/500\n433/433 [==============================] - 0s 535us/step - loss: 0.5330 - acc: 0.8337 - val_loss: 1.0084 - val_acc: 0.6875\nEpoch 456/500\n433/433 [==============================] - 0s 526us/step - loss: 0.4984 - acc: 0.8499 - val_loss: 1.0085 - val_acc: 0.6875\nEpoch 457/500\n433/433 [==============================] - 0s 558us/step - loss: 0.5189 - acc: 0.8176 - val_loss: 1.0084 - val_acc: 0.6875\nEpoch 458/500\n433/433 [==============================] - 0s 542us/step - loss: 0.4928 - acc: 0.8430 - val_loss: 1.0084 - val_acc: 0.6875\nEpoch 459/500\n433/433 [==============================] - 0s 548us/step - loss: 0.4946 - acc: 0.8661 - val_loss: 1.0083 - val_acc: 0.6875\nEpoch 460/500\n433/433 [==============================] - 0s 524us/step - loss: 0.5508 - acc: 0.8245 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 461/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5439 - acc: 0.8337 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 462/500\n433/433 [==============================] - 0s 532us/step - loss: 0.5192 - acc: 0.8176 - val_loss: 1.0081 - val_acc: 0.6875\nEpoch 463/500\n433/433 [==============================] - 0s 543us/step - loss: 0.5351 - acc: 0.8222 - val_loss: 1.0080 - val_acc: 0.6818\nEpoch 464/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5334 - acc: 0.8291 - val_loss: 1.0079 - val_acc: 0.6875\nEpoch 465/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5264 - acc: 0.8383 - val_loss: 1.0077 - val_acc: 0.6875\nEpoch 466/500\n433/433 [==============================] - 0s 534us/step - loss: 0.5119 - acc: 0.8360 - val_loss: 1.0078 - val_acc: 0.6875\nEpoch 467/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5051 - acc: 0.8499 - val_loss: 1.0076 - val_acc: 0.6875\nEpoch 468/500\n433/433 [==============================] - 0s 526us/step - loss: 0.5059 - acc: 0.8430 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 469/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5252 - acc: 0.8406 - val_loss: 1.0073 - val_acc: 0.6875\nEpoch 470/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5389 - acc: 0.8360 - val_loss: 1.0072 - val_acc: 0.6818\nEpoch 471/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5653 - acc: 0.8199 - val_loss: 1.0071 - val_acc: 0.6818\nEpoch 472/500\n433/433 [==============================] - 0s 537us/step - loss: 0.5206 - acc: 0.8406 - val_loss: 1.0068 - val_acc: 0.6818\nEpoch 473/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5532 - acc: 0.8268 - val_loss: 1.0068 - val_acc: 0.6818\nEpoch 474/500\n433/433 [==============================] - 0s 548us/step - loss: 0.5415 - acc: 0.8337 - val_loss: 1.0064 - val_acc: 0.6875\nEpoch 475/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5025 - acc: 0.8522 - val_loss: 1.0063 - val_acc: 0.6875\nEpoch 476/500\n433/433 [==============================] - 0s 552us/step - loss: 0.5039 - acc: 0.8406 - val_loss: 1.0063 - val_acc: 0.6875\nEpoch 477/500\n433/433 [==============================] - 0s 556us/step - loss: 0.5116 - acc: 0.8430 - val_loss: 1.0063 - val_acc: 0.6875\nEpoch 478/500\n433/433 [==============================] - 0s 527us/step - loss: 0.5130 - acc: 0.8291 - val_loss: 1.0063 - val_acc: 0.6818\nEpoch 479/500\n433/433 [==============================] - 0s 570us/step - loss: 0.5632 - acc: 0.8314 - val_loss: 1.0063 - val_acc: 0.6818\nEpoch 480/500\n433/433 [==============================] - 0s 547us/step - loss: 0.5230 - acc: 0.8337 - val_loss: 1.0064 - val_acc: 0.6818\nEpoch 481/500\n433/433 [==============================] - 0s 548us/step - loss: 0.4884 - acc: 0.8522 - val_loss: 1.0063 - val_acc: 0.6818\nEpoch 482/500\n433/433 [==============================] - 0s 549us/step - loss: 0.5518 - acc: 0.8337 - val_loss: 1.0062 - val_acc: 0.6818\nEpoch 483/500\n433/433 [==============================] - 0s 568us/step - loss: 0.5470 - acc: 0.8383 - val_loss: 1.0062 - val_acc: 0.6818\nEpoch 484/500\n433/433 [==============================] - 0s 565us/step - loss: 0.4972 - acc: 0.8430 - val_loss: 1.0060 - val_acc: 0.6818\nEpoch 485/500\n433/433 [==============================] - 0s 555us/step - loss: 0.5059 - acc: 0.8522 - val_loss: 1.0061 - val_acc: 0.6818\nEpoch 486/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5289 - acc: 0.8406 - val_loss: 1.0062 - val_acc: 0.6818\nEpoch 487/500\n433/433 [==============================] - 0s 569us/step - loss: 0.5085 - acc: 0.8568 - val_loss: 1.0063 - val_acc: 0.6818\nEpoch 488/500\n433/433 [==============================] - 0s 559us/step - loss: 0.5472 - acc: 0.8199 - val_loss: 1.0065 - val_acc: 0.6818\nEpoch 489/500\n433/433 [==============================] - 0s 541us/step - loss: 0.5329 - acc: 0.8430 - val_loss: 1.0065 - val_acc: 0.6818\nEpoch 490/500\n433/433 [==============================] - 0s 546us/step - loss: 0.5323 - acc: 0.8360 - val_loss: 1.0066 - val_acc: 0.6818\nEpoch 491/500\n433/433 [==============================] - 0s 554us/step - loss: 0.5199 - acc: 0.8614 - val_loss: 1.0065 - val_acc: 0.6818\nEpoch 492/500\n433/433 [==============================] - 0s 555us/step - loss: 0.4977 - acc: 0.8430 - val_loss: 1.0066 - val_acc: 0.6818\nEpoch 493/500\n433/433 [==============================] - 0s 534us/step - loss: 0.5335 - acc: 0.8499 - val_loss: 1.0065 - val_acc: 0.6818\nEpoch 494/500\n433/433 [==============================] - 0s 579us/step - loss: 0.4943 - acc: 0.8360 - val_loss: 1.0066 - val_acc: 0.6818\nEpoch 495/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5461 - acc: 0.8337 - val_loss: 1.0067 - val_acc: 0.6818\nEpoch 496/500\n433/433 [==============================] - 0s 542us/step - loss: 0.5159 - acc: 0.8199 - val_loss: 1.0067 - val_acc: 0.6818\nEpoch 497/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5680 - acc: 0.8014 - val_loss: 1.0067 - val_acc: 0.6818\nEpoch 498/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5670 - acc: 0.8476 - val_loss: 1.0066 - val_acc: 0.6818\nEpoch 499/500\n433/433 [==============================] - 0s 544us/step - loss: 0.5305 - acc: 0.8476 - val_loss: 1.0065 - val_acc: 0.6818\nEpoch 500/500\n433/433 [==============================] - 0s 551us/step - loss: 0.5306 - acc: 0.8291 - val_loss: 1.0065 - val_acc: 0.6818\n"
],
[
"# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Train and test on GT_split 3",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nimport pickle\ndrive.mount('/content/drive')\nDATA_PATH1 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ninfile = open(DATA_PATH1+'/GT_train_3.pkl','rb')\nTrain = pickle.load(infile)\nDATA_PATH2 = \"/content/drive/My Drive/Colab Notebooks/Data\"\ntestfile= open(DATA_PATH2+'/GT_test_3.pkl','rb')\nTest = pickle.load(testfile)\n\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(Train['label'])\n\nX_0,X_1,Y = data_generator(Train,C,le)\nX_test_0,X_test_1,Y_test = data_generator(Test,C,le)",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"# Re-initialize weights, since training and testing data switch\nDD_Net = build_DD_Net(C)",
"_____no_output_____"
],
[
"import keras\nlr = 1e-3\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=1e-5)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=600,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )\n\nlr = 1e-3\nDD_Net.compile(loss=\"categorical_crossentropy\",optimizer=adam(lr),metrics=['accuracy'])\nlrScheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, cooldown=5, min_lr=5e-6)\nhistory = DD_Net.fit([X_0,X_1],Y,\n batch_size=len(Y),\n epochs=500,\n verbose=True,\n shuffle=True,\n callbacks=[lrScheduler],\n validation_data=([X_test_0,X_test_1],Y_test) \n )",
"Train on 434 samples, validate on 175 samples\nEpoch 1/600\n434/434 [==============================] - 10s 24ms/step - loss: 3.7275 - acc: 0.0576 - val_loss: 3.0493 - val_acc: 0.0400\nEpoch 2/600\n434/434 [==============================] - 0s 523us/step - loss: 3.4548 - acc: 0.0691 - val_loss: 2.9580 - val_acc: 0.0857\nEpoch 3/600\n434/434 [==============================] - 0s 565us/step - loss: 3.5344 - acc: 0.0806 - val_loss: 2.8831 - val_acc: 0.0800\nEpoch 4/600\n434/434 [==============================] - 0s 528us/step - loss: 3.2725 - acc: 0.0968 - val_loss: 2.8193 - val_acc: 0.1429\nEpoch 5/600\n434/434 [==============================] - 0s 529us/step - loss: 3.1764 - acc: 0.1037 - val_loss: 2.7552 - val_acc: 0.2000\nEpoch 6/600\n434/434 [==============================] - 0s 544us/step - loss: 3.0640 - acc: 0.1613 - val_loss: 2.6946 - val_acc: 0.2114\nEpoch 7/600\n434/434 [==============================] - 0s 525us/step - loss: 2.9907 - acc: 0.1175 - val_loss: 2.6390 - val_acc: 0.2400\nEpoch 8/600\n434/434 [==============================] - 0s 561us/step - loss: 2.9494 - acc: 0.1705 - val_loss: 2.5873 - val_acc: 0.2686\nEpoch 9/600\n434/434 [==============================] - 0s 535us/step - loss: 2.8000 - acc: 0.2074 - val_loss: 2.5360 - val_acc: 0.2800\nEpoch 10/600\n434/434 [==============================] - 0s 535us/step - loss: 2.8389 - acc: 0.1820 - val_loss: 2.4877 - val_acc: 0.2971\nEpoch 11/600\n434/434 [==============================] - 0s 547us/step - loss: 2.6916 - acc: 0.2143 - val_loss: 2.4394 - val_acc: 0.3086\nEpoch 12/600\n434/434 [==============================] - 0s 530us/step - loss: 2.7003 - acc: 0.2304 - val_loss: 2.3887 - val_acc: 0.3314\nEpoch 13/600\n434/434 [==============================] - 0s 525us/step - loss: 2.6501 - acc: 0.2373 - val_loss: 2.3401 - val_acc: 0.3543\nEpoch 14/600\n434/434 [==============================] - 0s 530us/step - loss: 2.5211 - acc: 0.2581 - val_loss: 2.2917 - val_acc: 0.3657\nEpoch 15/600\n434/434 [==============================] - 0s 537us/step - loss: 2.4543 - acc: 0.2742 - val_loss: 2.2423 - val_acc: 0.3943\nEpoch 16/600\n434/434 [==============================] - 0s 544us/step - loss: 2.4266 - acc: 0.2811 - val_loss: 2.1931 - val_acc: 0.4114\nEpoch 17/600\n434/434 [==============================] - 0s 531us/step - loss: 2.4213 - acc: 0.3157 - val_loss: 2.1484 - val_acc: 0.4114\nEpoch 18/600\n434/434 [==============================] - 0s 528us/step - loss: 2.4103 - acc: 0.2972 - val_loss: 2.1059 - val_acc: 0.4343\nEpoch 19/600\n434/434 [==============================] - 0s 544us/step - loss: 2.3748 - acc: 0.2972 - val_loss: 2.0624 - val_acc: 0.4229\nEpoch 20/600\n434/434 [==============================] - 0s 522us/step - loss: 2.3348 - acc: 0.3134 - val_loss: 2.0206 - val_acc: 0.4229\nEpoch 21/600\n434/434 [==============================] - 0s 533us/step - loss: 2.1664 - acc: 0.3479 - val_loss: 1.9830 - val_acc: 0.4400\nEpoch 22/600\n434/434 [==============================] - 0s 538us/step - loss: 2.2344 - acc: 0.3433 - val_loss: 1.9498 - val_acc: 0.4400\nEpoch 23/600\n434/434 [==============================] - 0s 550us/step - loss: 2.1825 - acc: 0.3594 - val_loss: 1.9236 - val_acc: 0.4457\nEpoch 24/600\n434/434 [==============================] - 0s 528us/step - loss: 2.0267 - acc: 0.3963 - val_loss: 1.8994 - val_acc: 0.4400\nEpoch 25/600\n434/434 [==============================] - 0s 535us/step - loss: 2.0988 - acc: 0.3594 - val_loss: 1.8777 - val_acc: 0.4457\nEpoch 26/600\n434/434 [==============================] - 0s 527us/step - loss: 2.0036 - acc: 0.3802 - val_loss: 1.8599 - val_acc: 0.4400\nEpoch 27/600\n434/434 [==============================] - 0s 536us/step - loss: 1.8597 - acc: 0.4263 - val_loss: 1.8435 - val_acc: 0.4343\nEpoch 28/600\n434/434 [==============================] - 0s 531us/step - loss: 1.9485 - acc: 0.4032 - val_loss: 1.8306 - val_acc: 0.4171\nEpoch 29/600\n434/434 [==============================] - 0s 526us/step - loss: 1.9229 - acc: 0.3917 - val_loss: 1.8141 - val_acc: 0.4171\nEpoch 30/600\n434/434 [==============================] - 0s 537us/step - loss: 1.9043 - acc: 0.4194 - val_loss: 1.7951 - val_acc: 0.4457\nEpoch 31/600\n434/434 [==============================] - 0s 538us/step - loss: 1.8315 - acc: 0.4147 - val_loss: 1.7746 - val_acc: 0.4514\nEpoch 32/600\n434/434 [==============================] - 0s 558us/step - loss: 1.8051 - acc: 0.4355 - val_loss: 1.7533 - val_acc: 0.4800\nEpoch 33/600\n434/434 [==============================] - 0s 527us/step - loss: 1.8253 - acc: 0.4378 - val_loss: 1.7291 - val_acc: 0.4800\nEpoch 34/600\n434/434 [==============================] - 0s 529us/step - loss: 1.8634 - acc: 0.3802 - val_loss: 1.7015 - val_acc: 0.4914\nEpoch 35/600\n434/434 [==============================] - 0s 523us/step - loss: 1.7006 - acc: 0.4378 - val_loss: 1.6724 - val_acc: 0.4914\nEpoch 36/600\n434/434 [==============================] - 0s 558us/step - loss: 1.6507 - acc: 0.4862 - val_loss: 1.6451 - val_acc: 0.5086\nEpoch 37/600\n434/434 [==============================] - 0s 549us/step - loss: 1.6855 - acc: 0.4378 - val_loss: 1.6141 - val_acc: 0.4857\nEpoch 38/600\n434/434 [==============================] - 0s 555us/step - loss: 1.5600 - acc: 0.4724 - val_loss: 1.5840 - val_acc: 0.4857\nEpoch 39/600\n434/434 [==============================] - 0s 529us/step - loss: 1.6221 - acc: 0.4977 - val_loss: 1.5578 - val_acc: 0.4800\nEpoch 40/600\n434/434 [==============================] - 0s 547us/step - loss: 1.5667 - acc: 0.4931 - val_loss: 1.5344 - val_acc: 0.5029\nEpoch 41/600\n434/434 [==============================] - 0s 535us/step - loss: 1.5528 - acc: 0.4816 - val_loss: 1.5132 - val_acc: 0.5143\nEpoch 42/600\n434/434 [==============================] - 0s 547us/step - loss: 1.5543 - acc: 0.5069 - val_loss: 1.4936 - val_acc: 0.5143\nEpoch 43/600\n434/434 [==============================] - 0s 522us/step - loss: 1.5447 - acc: 0.4954 - val_loss: 1.4763 - val_acc: 0.5257\nEpoch 44/600\n434/434 [==============================] - 0s 555us/step - loss: 1.4852 - acc: 0.5046 - val_loss: 1.4624 - val_acc: 0.5314\nEpoch 45/600\n434/434 [==============================] - 0s 533us/step - loss: 1.4715 - acc: 0.5392 - val_loss: 1.4497 - val_acc: 0.5314\nEpoch 46/600\n434/434 [==============================] - 0s 525us/step - loss: 1.4151 - acc: 0.5415 - val_loss: 1.4381 - val_acc: 0.5429\nEpoch 47/600\n434/434 [==============================] - 0s 529us/step - loss: 1.3813 - acc: 0.5576 - val_loss: 1.4264 - val_acc: 0.5600\nEpoch 48/600\n434/434 [==============================] - 0s 538us/step - loss: 1.3429 - acc: 0.5945 - val_loss: 1.4149 - val_acc: 0.5600\nEpoch 49/600\n434/434 [==============================] - 0s 524us/step - loss: 1.4019 - acc: 0.5645 - val_loss: 1.4018 - val_acc: 0.5771\nEpoch 50/600\n434/434 [==============================] - 0s 535us/step - loss: 1.3426 - acc: 0.5806 - val_loss: 1.3889 - val_acc: 0.5771\nEpoch 51/600\n434/434 [==============================] - 0s 539us/step - loss: 1.3642 - acc: 0.5484 - val_loss: 1.3768 - val_acc: 0.5714\nEpoch 52/600\n434/434 [==============================] - 0s 536us/step - loss: 1.3126 - acc: 0.5461 - val_loss: 1.3628 - val_acc: 0.5714\nEpoch 53/600\n434/434 [==============================] - 0s 534us/step - loss: 1.2936 - acc: 0.5438 - val_loss: 1.3483 - val_acc: 0.5714\nEpoch 54/600\n434/434 [==============================] - 0s 535us/step - loss: 1.2830 - acc: 0.5668 - val_loss: 1.3308 - val_acc: 0.5657\nEpoch 55/600\n434/434 [==============================] - 0s 530us/step - loss: 1.2535 - acc: 0.5853 - val_loss: 1.3115 - val_acc: 0.5657\nEpoch 56/600\n434/434 [==============================] - 0s 547us/step - loss: 1.2924 - acc: 0.5853 - val_loss: 1.2891 - val_acc: 0.5771\nEpoch 57/600\n434/434 [==============================] - 0s 549us/step - loss: 1.2546 - acc: 0.5806 - val_loss: 1.2684 - val_acc: 0.6057\nEpoch 58/600\n434/434 [==============================] - 0s 532us/step - loss: 1.2365 - acc: 0.6152 - val_loss: 1.2485 - val_acc: 0.6057\nEpoch 59/600\n434/434 [==============================] - 0s 537us/step - loss: 1.1923 - acc: 0.6037 - val_loss: 1.2282 - val_acc: 0.6000\nEpoch 60/600\n434/434 [==============================] - 0s 547us/step - loss: 1.1697 - acc: 0.6152 - val_loss: 1.2144 - val_acc: 0.6000\nEpoch 61/600\n434/434 [==============================] - 0s 529us/step - loss: 1.2176 - acc: 0.5945 - val_loss: 1.2042 - val_acc: 0.6057\nEpoch 62/600\n434/434 [==============================] - 0s 553us/step - loss: 1.1187 - acc: 0.6267 - val_loss: 1.1968 - val_acc: 0.5943\nEpoch 63/600\n434/434 [==============================] - 0s 533us/step - loss: 1.1624 - acc: 0.5922 - val_loss: 1.1933 - val_acc: 0.5943\nEpoch 64/600\n434/434 [==============================] - 0s 535us/step - loss: 1.1526 - acc: 0.5853 - val_loss: 1.1939 - val_acc: 0.5943\nEpoch 65/600\n434/434 [==============================] - 0s 554us/step - loss: 1.1475 - acc: 0.6037 - val_loss: 1.1961 - val_acc: 0.6000\nEpoch 66/600\n434/434 [==============================] - 0s 526us/step - loss: 1.0768 - acc: 0.6613 - val_loss: 1.1996 - val_acc: 0.5886\nEpoch 67/600\n434/434 [==============================] - 0s 548us/step - loss: 1.0690 - acc: 0.6382 - val_loss: 1.2050 - val_acc: 0.5943\nEpoch 68/600\n434/434 [==============================] - 0s 548us/step - loss: 1.0435 - acc: 0.6751 - val_loss: 1.2076 - val_acc: 0.5943\nEpoch 69/600\n434/434 [==============================] - 0s 546us/step - loss: 1.0822 - acc: 0.6636 - val_loss: 1.2079 - val_acc: 0.6057\nEpoch 70/600\n434/434 [==============================] - 0s 539us/step - loss: 0.9846 - acc: 0.6636 - val_loss: 1.2037 - val_acc: 0.6057\nEpoch 71/600\n434/434 [==============================] - 0s 547us/step - loss: 1.0265 - acc: 0.6613 - val_loss: 1.2017 - val_acc: 0.6114\nEpoch 72/600\n434/434 [==============================] - 0s 537us/step - loss: 1.0002 - acc: 0.6636 - val_loss: 1.2004 - val_acc: 0.6114\nEpoch 73/600\n434/434 [==============================] - 0s 548us/step - loss: 0.9413 - acc: 0.6659 - val_loss: 1.1999 - val_acc: 0.6114\nEpoch 74/600\n434/434 [==============================] - 0s 574us/step - loss: 0.9644 - acc: 0.6728 - val_loss: 1.1975 - val_acc: 0.6057\nEpoch 75/600\n434/434 [==============================] - 0s 544us/step - loss: 1.0005 - acc: 0.6567 - val_loss: 1.1972 - val_acc: 0.5943\nEpoch 76/600\n434/434 [==============================] - 0s 558us/step - loss: 0.9306 - acc: 0.6889 - val_loss: 1.2000 - val_acc: 0.5829\nEpoch 77/600\n434/434 [==============================] - 0s 545us/step - loss: 0.9903 - acc: 0.6636 - val_loss: 1.2051 - val_acc: 0.5714\nEpoch 78/600\n434/434 [==============================] - 0s 525us/step - loss: 0.8863 - acc: 0.7120 - val_loss: 1.2079 - val_acc: 0.5829\nEpoch 79/600\n434/434 [==============================] - 0s 549us/step - loss: 0.9192 - acc: 0.6820 - val_loss: 1.2145 - val_acc: 0.5829\nEpoch 80/600\n434/434 [==============================] - 0s 548us/step - loss: 0.8573 - acc: 0.7189 - val_loss: 1.2220 - val_acc: 0.5771\nEpoch 81/600\n434/434 [==============================] - 0s 562us/step - loss: 0.8536 - acc: 0.7028 - val_loss: 1.2305 - val_acc: 0.5714\nEpoch 82/600\n434/434 [==============================] - 0s 529us/step - loss: 0.8735 - acc: 0.6982 - val_loss: 1.2390 - val_acc: 0.5714\nEpoch 83/600\n434/434 [==============================] - 0s 554us/step - loss: 0.8661 - acc: 0.7097 - val_loss: 1.2478 - val_acc: 0.5714\nEpoch 84/600\n434/434 [==============================] - 0s 532us/step - loss: 0.8103 - acc: 0.7304 - val_loss: 1.2541 - val_acc: 0.5600\nEpoch 85/600\n434/434 [==============================] - 0s 569us/step - loss: 0.8950 - acc: 0.7212 - val_loss: 1.2573 - val_acc: 0.5714\nEpoch 86/600\n434/434 [==============================] - 0s 559us/step - loss: 0.8017 - acc: 0.7373 - val_loss: 1.2556 - val_acc: 0.5714\nEpoch 87/600\n434/434 [==============================] - 0s 551us/step - loss: 0.8722 - acc: 0.7120 - val_loss: 1.2484 - val_acc: 0.5657\nEpoch 88/600\n434/434 [==============================] - 0s 565us/step - loss: 0.7883 - acc: 0.7465 - val_loss: 1.2368 - val_acc: 0.5714\nEpoch 89/600\n434/434 [==============================] - 0s 541us/step - loss: 0.7216 - acc: 0.7650 - val_loss: 1.2197 - val_acc: 0.5943\nEpoch 90/600\n434/434 [==============================] - 0s 566us/step - loss: 0.7278 - acc: 0.7742 - val_loss: 1.2067 - val_acc: 0.6114\nEpoch 91/600\n434/434 [==============================] - 0s 546us/step - loss: 0.7914 - acc: 0.7558 - val_loss: 1.1931 - val_acc: 0.6229\nEpoch 92/600\n434/434 [==============================] - 0s 539us/step - loss: 0.7309 - acc: 0.7535 - val_loss: 1.1826 - val_acc: 0.6286\nEpoch 93/600\n434/434 [==============================] - 0s 553us/step - loss: 0.7708 - acc: 0.7558 - val_loss: 1.1774 - val_acc: 0.6286\nEpoch 94/600\n434/434 [==============================] - 0s 554us/step - loss: 0.7992 - acc: 0.7212 - val_loss: 1.1730 - val_acc: 0.6343\nEpoch 95/600\n434/434 [==============================] - 0s 539us/step - loss: 0.7076 - acc: 0.7880 - val_loss: 1.1721 - val_acc: 0.6286\nEpoch 96/600\n434/434 [==============================] - 0s 518us/step - loss: 0.7188 - acc: 0.7811 - val_loss: 1.1724 - val_acc: 0.6286\nEpoch 97/600\n434/434 [==============================] - 0s 527us/step - loss: 0.7488 - acc: 0.7719 - val_loss: 1.1707 - val_acc: 0.6343\nEpoch 98/600\n434/434 [==============================] - 0s 552us/step - loss: 0.7464 - acc: 0.7442 - val_loss: 1.1705 - val_acc: 0.6343\nEpoch 99/600\n434/434 [==============================] - 0s 527us/step - loss: 0.6928 - acc: 0.7834 - val_loss: 1.1703 - val_acc: 0.6229\nEpoch 100/600\n434/434 [==============================] - 0s 531us/step - loss: 0.6802 - acc: 0.7788 - val_loss: 1.1696 - val_acc: 0.6171\nEpoch 101/600\n434/434 [==============================] - 0s 528us/step - loss: 0.7489 - acc: 0.7512 - val_loss: 1.1699 - val_acc: 0.6229\nEpoch 102/600\n434/434 [==============================] - 0s 527us/step - loss: 0.6552 - acc: 0.7811 - val_loss: 1.1703 - val_acc: 0.6229\nEpoch 103/600\n434/434 [==============================] - 0s 534us/step - loss: 0.7009 - acc: 0.7673 - val_loss: 1.1730 - val_acc: 0.6171\nEpoch 104/600\n434/434 [==============================] - 0s 538us/step - loss: 0.6856 - acc: 0.7719 - val_loss: 1.1741 - val_acc: 0.6114\nEpoch 105/600\n434/434 [==============================] - 0s 533us/step - loss: 0.6345 - acc: 0.8018 - val_loss: 1.1751 - val_acc: 0.6057\nEpoch 106/600\n434/434 [==============================] - 0s 558us/step - loss: 0.6952 - acc: 0.7696 - val_loss: 1.1789 - val_acc: 0.6057\nEpoch 107/600\n434/434 [==============================] - 0s 530us/step - loss: 0.6279 - acc: 0.8018 - val_loss: 1.1860 - val_acc: 0.6000\nEpoch 108/600\n434/434 [==============================] - 0s 545us/step - loss: 0.6910 - acc: 0.7880 - val_loss: 1.1912 - val_acc: 0.5943\nEpoch 109/600\n434/434 [==============================] - 0s 542us/step - loss: 0.6388 - acc: 0.8065 - val_loss: 1.1921 - val_acc: 0.5943\nEpoch 110/600\n434/434 [==============================] - 0s 561us/step - loss: 0.6412 - acc: 0.7972 - val_loss: 1.1920 - val_acc: 0.5943\nEpoch 111/600\n434/434 [==============================] - 0s 556us/step - loss: 0.6821 - acc: 0.7765 - val_loss: 1.1894 - val_acc: 0.6057\nEpoch 112/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5866 - acc: 0.8226 - val_loss: 1.1871 - val_acc: 0.6114\nEpoch 113/600\n434/434 [==============================] - 0s 555us/step - loss: 0.6224 - acc: 0.8041 - val_loss: 1.1877 - val_acc: 0.6114\nEpoch 114/600\n434/434 [==============================] - 0s 549us/step - loss: 0.6661 - acc: 0.7765 - val_loss: 1.1870 - val_acc: 0.6171\nEpoch 115/600\n434/434 [==============================] - 0s 552us/step - loss: 0.6420 - acc: 0.8018 - val_loss: 1.1877 - val_acc: 0.6171\nEpoch 116/600\n434/434 [==============================] - 0s 527us/step - loss: 0.6074 - acc: 0.7811 - val_loss: 1.1907 - val_acc: 0.6171\nEpoch 117/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5806 - acc: 0.8203 - val_loss: 1.1922 - val_acc: 0.6171\nEpoch 118/600\n434/434 [==============================] - 0s 530us/step - loss: 0.6360 - acc: 0.7765 - val_loss: 1.1897 - val_acc: 0.6171\nEpoch 119/600\n434/434 [==============================] - 0s 551us/step - loss: 0.6279 - acc: 0.8065 - val_loss: 1.1870 - val_acc: 0.6114\nEpoch 120/600\n434/434 [==============================] - 0s 527us/step - loss: 0.5468 - acc: 0.8502 - val_loss: 1.1855 - val_acc: 0.6171\nEpoch 121/600\n434/434 [==============================] - 0s 539us/step - loss: 0.6213 - acc: 0.7834 - val_loss: 1.1844 - val_acc: 0.6229\nEpoch 122/600\n434/434 [==============================] - 0s 557us/step - loss: 0.6251 - acc: 0.8272 - val_loss: 1.1818 - val_acc: 0.6457\nEpoch 123/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5930 - acc: 0.8018 - val_loss: 1.1784 - val_acc: 0.6514\nEpoch 124/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5783 - acc: 0.8157 - val_loss: 1.1746 - val_acc: 0.6514\nEpoch 125/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5682 - acc: 0.8111 - val_loss: 1.1676 - val_acc: 0.6514\nEpoch 126/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5976 - acc: 0.8272 - val_loss: 1.1622 - val_acc: 0.6571\nEpoch 127/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5749 - acc: 0.8041 - val_loss: 1.1556 - val_acc: 0.6629\nEpoch 128/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5873 - acc: 0.8341 - val_loss: 1.1504 - val_acc: 0.6686\nEpoch 129/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5207 - acc: 0.8479 - val_loss: 1.1460 - val_acc: 0.6686\nEpoch 130/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5724 - acc: 0.8157 - val_loss: 1.1421 - val_acc: 0.6686\nEpoch 131/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5618 - acc: 0.8364 - val_loss: 1.1387 - val_acc: 0.6686\nEpoch 132/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5787 - acc: 0.8272 - val_loss: 1.1361 - val_acc: 0.6743\nEpoch 133/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5658 - acc: 0.8157 - val_loss: 1.1338 - val_acc: 0.6686\nEpoch 134/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5545 - acc: 0.8318 - val_loss: 1.1313 - val_acc: 0.6686\nEpoch 135/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5700 - acc: 0.8203 - val_loss: 1.1285 - val_acc: 0.6686\nEpoch 136/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5882 - acc: 0.8157 - val_loss: 1.1258 - val_acc: 0.6686\nEpoch 137/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5704 - acc: 0.8272 - val_loss: 1.1228 - val_acc: 0.6686\nEpoch 138/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5316 - acc: 0.8433 - val_loss: 1.1207 - val_acc: 0.6686\nEpoch 139/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5851 - acc: 0.8065 - val_loss: 1.1189 - val_acc: 0.6686\nEpoch 140/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5427 - acc: 0.8341 - val_loss: 1.1176 - val_acc: 0.6686\nEpoch 141/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5531 - acc: 0.8272 - val_loss: 1.1167 - val_acc: 0.6743\nEpoch 142/600\n434/434 [==============================] - 0s 533us/step - loss: 0.4777 - acc: 0.8548 - val_loss: 1.1159 - val_acc: 0.6743\nEpoch 143/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5382 - acc: 0.8502 - val_loss: 1.1152 - val_acc: 0.6743\nEpoch 144/600\n434/434 [==============================] - 0s 539us/step - loss: 0.6063 - acc: 0.8088 - val_loss: 1.1149 - val_acc: 0.6743\nEpoch 145/600\n434/434 [==============================] - 0s 558us/step - loss: 0.4947 - acc: 0.8364 - val_loss: 1.1142 - val_acc: 0.6743\nEpoch 146/600\n434/434 [==============================] - 0s 562us/step - loss: 0.5984 - acc: 0.8134 - val_loss: 1.1137 - val_acc: 0.6743\nEpoch 147/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5817 - acc: 0.7949 - val_loss: 1.1130 - val_acc: 0.6743\nEpoch 148/600\n434/434 [==============================] - 0s 522us/step - loss: 0.5250 - acc: 0.8479 - val_loss: 1.1121 - val_acc: 0.6743\nEpoch 149/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5253 - acc: 0.8571 - val_loss: 1.1111 - val_acc: 0.6686\nEpoch 150/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5278 - acc: 0.8295 - val_loss: 1.1101 - val_acc: 0.6686\nEpoch 151/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5720 - acc: 0.8341 - val_loss: 1.1094 - val_acc: 0.6686\nEpoch 152/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5535 - acc: 0.8226 - val_loss: 1.1083 - val_acc: 0.6686\nEpoch 153/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5194 - acc: 0.8387 - val_loss: 1.1073 - val_acc: 0.6686\nEpoch 154/600\n434/434 [==============================] - 0s 527us/step - loss: 0.5228 - acc: 0.8433 - val_loss: 1.1057 - val_acc: 0.6686\nEpoch 155/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5303 - acc: 0.8525 - val_loss: 1.1039 - val_acc: 0.6686\nEpoch 156/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5237 - acc: 0.8249 - val_loss: 1.1023 - val_acc: 0.6686\nEpoch 157/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5280 - acc: 0.8387 - val_loss: 1.1009 - val_acc: 0.6686\nEpoch 158/600\n434/434 [==============================] - 0s 559us/step - loss: 0.4983 - acc: 0.8571 - val_loss: 1.0996 - val_acc: 0.6686\nEpoch 159/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5186 - acc: 0.8548 - val_loss: 1.0982 - val_acc: 0.6743\nEpoch 160/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4878 - acc: 0.8410 - val_loss: 1.0968 - val_acc: 0.6743\nEpoch 161/600\n434/434 [==============================] - 0s 562us/step - loss: 0.5161 - acc: 0.8571 - val_loss: 1.0955 - val_acc: 0.6743\nEpoch 162/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5252 - acc: 0.8295 - val_loss: 1.0943 - val_acc: 0.6743\nEpoch 163/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5727 - acc: 0.8341 - val_loss: 1.0934 - val_acc: 0.6686\nEpoch 164/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5576 - acc: 0.8525 - val_loss: 1.0921 - val_acc: 0.6686\nEpoch 165/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5523 - acc: 0.8410 - val_loss: 1.0911 - val_acc: 0.6743\nEpoch 166/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5321 - acc: 0.8318 - val_loss: 1.0902 - val_acc: 0.6743\nEpoch 167/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5253 - acc: 0.8203 - val_loss: 1.0891 - val_acc: 0.6743\nEpoch 168/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5747 - acc: 0.8272 - val_loss: 1.0881 - val_acc: 0.6743\nEpoch 169/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5316 - acc: 0.8249 - val_loss: 1.0871 - val_acc: 0.6743\nEpoch 170/600\n434/434 [==============================] - 0s 545us/step - loss: 0.4966 - acc: 0.8548 - val_loss: 1.0861 - val_acc: 0.6743\nEpoch 171/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5311 - acc: 0.8525 - val_loss: 1.0852 - val_acc: 0.6743\nEpoch 172/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5060 - acc: 0.8618 - val_loss: 1.0842 - val_acc: 0.6743\nEpoch 173/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5928 - acc: 0.8180 - val_loss: 1.0831 - val_acc: 0.6743\nEpoch 174/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5535 - acc: 0.8272 - val_loss: 1.0819 - val_acc: 0.6800\nEpoch 175/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5266 - acc: 0.8525 - val_loss: 1.0808 - val_acc: 0.6800\nEpoch 176/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5509 - acc: 0.8226 - val_loss: 1.0798 - val_acc: 0.6800\nEpoch 177/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5136 - acc: 0.8548 - val_loss: 1.0788 - val_acc: 0.6800\nEpoch 178/600\n434/434 [==============================] - 0s 563us/step - loss: 0.4904 - acc: 0.8733 - val_loss: 1.0779 - val_acc: 0.6800\nEpoch 179/600\n434/434 [==============================] - 0s 562us/step - loss: 0.4762 - acc: 0.8733 - val_loss: 1.0770 - val_acc: 0.6800\nEpoch 180/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5631 - acc: 0.8111 - val_loss: 1.0761 - val_acc: 0.6800\nEpoch 181/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5022 - acc: 0.8641 - val_loss: 1.0752 - val_acc: 0.6800\nEpoch 182/600\n434/434 [==============================] - 0s 568us/step - loss: 0.4869 - acc: 0.8410 - val_loss: 1.0742 - val_acc: 0.6800\nEpoch 183/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5384 - acc: 0.8456 - val_loss: 1.0731 - val_acc: 0.6800\nEpoch 184/600\n434/434 [==============================] - 0s 587us/step - loss: 0.5206 - acc: 0.8410 - val_loss: 1.0722 - val_acc: 0.6800\nEpoch 185/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5232 - acc: 0.8249 - val_loss: 1.0714 - val_acc: 0.6800\nEpoch 186/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4999 - acc: 0.8502 - val_loss: 1.0704 - val_acc: 0.6800\nEpoch 187/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5180 - acc: 0.8387 - val_loss: 1.0695 - val_acc: 0.6800\nEpoch 188/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5337 - acc: 0.8341 - val_loss: 1.0686 - val_acc: 0.6800\nEpoch 189/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5232 - acc: 0.8318 - val_loss: 1.0680 - val_acc: 0.6800\nEpoch 190/600\n434/434 [==============================] - 0s 564us/step - loss: 0.5360 - acc: 0.8364 - val_loss: 1.0672 - val_acc: 0.6800\nEpoch 191/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5345 - acc: 0.8249 - val_loss: 1.0663 - val_acc: 0.6800\nEpoch 192/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4879 - acc: 0.8479 - val_loss: 1.0656 - val_acc: 0.6800\nEpoch 193/600\n434/434 [==============================] - 0s 571us/step - loss: 0.5642 - acc: 0.8226 - val_loss: 1.0651 - val_acc: 0.6800\nEpoch 194/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5436 - acc: 0.8641 - val_loss: 1.0646 - val_acc: 0.6800\nEpoch 195/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5851 - acc: 0.8134 - val_loss: 1.0640 - val_acc: 0.6800\nEpoch 196/600\n434/434 [==============================] - 0s 595us/step - loss: 0.5936 - acc: 0.8364 - val_loss: 1.0634 - val_acc: 0.6800\nEpoch 197/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5163 - acc: 0.8456 - val_loss: 1.0629 - val_acc: 0.6800\nEpoch 198/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5641 - acc: 0.8111 - val_loss: 1.0623 - val_acc: 0.6800\nEpoch 199/600\n434/434 [==============================] - 0s 536us/step - loss: 0.5987 - acc: 0.8157 - val_loss: 1.0619 - val_acc: 0.6800\nEpoch 200/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5409 - acc: 0.8571 - val_loss: 1.0612 - val_acc: 0.6800\nEpoch 201/600\n434/434 [==============================] - 0s 541us/step - loss: 0.4932 - acc: 0.8525 - val_loss: 1.0607 - val_acc: 0.6800\nEpoch 202/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5438 - acc: 0.8341 - val_loss: 1.0601 - val_acc: 0.6800\nEpoch 203/600\n434/434 [==============================] - 0s 539us/step - loss: 0.5703 - acc: 0.8088 - val_loss: 1.0595 - val_acc: 0.6800\nEpoch 204/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5579 - acc: 0.8272 - val_loss: 1.0590 - val_acc: 0.6743\nEpoch 205/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5464 - acc: 0.8272 - val_loss: 1.0584 - val_acc: 0.6743\nEpoch 206/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5020 - acc: 0.8548 - val_loss: 1.0580 - val_acc: 0.6686\nEpoch 207/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5355 - acc: 0.8203 - val_loss: 1.0575 - val_acc: 0.6686\nEpoch 208/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4959 - acc: 0.8387 - val_loss: 1.0572 - val_acc: 0.6686\nEpoch 209/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5004 - acc: 0.8525 - val_loss: 1.0567 - val_acc: 0.6686\nEpoch 210/600\n434/434 [==============================] - 0s 565us/step - loss: 0.5273 - acc: 0.8387 - val_loss: 1.0563 - val_acc: 0.6686\nEpoch 211/600\n434/434 [==============================] - 0s 560us/step - loss: 0.5399 - acc: 0.8387 - val_loss: 1.0557 - val_acc: 0.6686\nEpoch 212/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5296 - acc: 0.8433 - val_loss: 1.0553 - val_acc: 0.6686\nEpoch 213/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5936 - acc: 0.8180 - val_loss: 1.0548 - val_acc: 0.6686\nEpoch 214/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5474 - acc: 0.8295 - val_loss: 1.0543 - val_acc: 0.6686\nEpoch 215/600\n434/434 [==============================] - 0s 528us/step - loss: 0.5691 - acc: 0.8018 - val_loss: 1.0537 - val_acc: 0.6629\nEpoch 216/600\n434/434 [==============================] - 0s 569us/step - loss: 0.5218 - acc: 0.8641 - val_loss: 1.0531 - val_acc: 0.6629\nEpoch 217/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5555 - acc: 0.8341 - val_loss: 1.0526 - val_acc: 0.6629\nEpoch 218/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5018 - acc: 0.8456 - val_loss: 1.0522 - val_acc: 0.6629\nEpoch 219/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5166 - acc: 0.8641 - val_loss: 1.0517 - val_acc: 0.6629\nEpoch 220/600\n434/434 [==============================] - 0s 545us/step - loss: 0.4990 - acc: 0.8525 - val_loss: 1.0512 - val_acc: 0.6629\nEpoch 221/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5708 - acc: 0.8180 - val_loss: 1.0507 - val_acc: 0.6629\nEpoch 222/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5328 - acc: 0.8203 - val_loss: 1.0502 - val_acc: 0.6629\nEpoch 223/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5601 - acc: 0.8341 - val_loss: 1.0497 - val_acc: 0.6629\nEpoch 224/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4959 - acc: 0.8571 - val_loss: 1.0492 - val_acc: 0.6629\nEpoch 225/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5392 - acc: 0.8548 - val_loss: 1.0488 - val_acc: 0.6629\nEpoch 226/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5857 - acc: 0.8157 - val_loss: 1.0483 - val_acc: 0.6629\nEpoch 227/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5311 - acc: 0.8272 - val_loss: 1.0477 - val_acc: 0.6629\nEpoch 228/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5909 - acc: 0.8041 - val_loss: 1.0472 - val_acc: 0.6629\nEpoch 229/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5543 - acc: 0.8295 - val_loss: 1.0466 - val_acc: 0.6629\nEpoch 230/600\n434/434 [==============================] - 0s 532us/step - loss: 0.5003 - acc: 0.8410 - val_loss: 1.0463 - val_acc: 0.6629\nEpoch 231/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5072 - acc: 0.8272 - val_loss: 1.0458 - val_acc: 0.6629\nEpoch 232/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4735 - acc: 0.8641 - val_loss: 1.0451 - val_acc: 0.6629\nEpoch 233/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5157 - acc: 0.8618 - val_loss: 1.0447 - val_acc: 0.6686\nEpoch 234/600\n434/434 [==============================] - 0s 538us/step - loss: 0.5816 - acc: 0.8249 - val_loss: 1.0442 - val_acc: 0.6686\nEpoch 235/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4496 - acc: 0.8871 - val_loss: 1.0437 - val_acc: 0.6686\nEpoch 236/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5035 - acc: 0.8525 - val_loss: 1.0432 - val_acc: 0.6686\nEpoch 237/600\n434/434 [==============================] - 0s 552us/step - loss: 0.5480 - acc: 0.8318 - val_loss: 1.0426 - val_acc: 0.6686\nEpoch 238/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5542 - acc: 0.8410 - val_loss: 1.0422 - val_acc: 0.6686\nEpoch 239/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5125 - acc: 0.8479 - val_loss: 1.0417 - val_acc: 0.6686\nEpoch 240/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5128 - acc: 0.8387 - val_loss: 1.0412 - val_acc: 0.6686\nEpoch 241/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5826 - acc: 0.8180 - val_loss: 1.0409 - val_acc: 0.6686\nEpoch 242/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5152 - acc: 0.8364 - val_loss: 1.0404 - val_acc: 0.6686\nEpoch 243/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5298 - acc: 0.8318 - val_loss: 1.0401 - val_acc: 0.6686\nEpoch 244/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5503 - acc: 0.8249 - val_loss: 1.0397 - val_acc: 0.6686\nEpoch 245/600\n434/434 [==============================] - 0s 559us/step - loss: 0.5168 - acc: 0.8456 - val_loss: 1.0395 - val_acc: 0.6686\nEpoch 246/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5292 - acc: 0.8341 - val_loss: 1.0392 - val_acc: 0.6686\nEpoch 247/600\n434/434 [==============================] - 0s 571us/step - loss: 0.5025 - acc: 0.8594 - val_loss: 1.0388 - val_acc: 0.6686\nEpoch 248/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5468 - acc: 0.8134 - val_loss: 1.0387 - val_acc: 0.6686\nEpoch 249/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5124 - acc: 0.8479 - val_loss: 1.0383 - val_acc: 0.6686\nEpoch 250/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5141 - acc: 0.8433 - val_loss: 1.0380 - val_acc: 0.6686\nEpoch 251/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5369 - acc: 0.8387 - val_loss: 1.0376 - val_acc: 0.6686\nEpoch 252/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5117 - acc: 0.8410 - val_loss: 1.0373 - val_acc: 0.6686\nEpoch 253/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5150 - acc: 0.8502 - val_loss: 1.0369 - val_acc: 0.6686\nEpoch 254/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5195 - acc: 0.8571 - val_loss: 1.0367 - val_acc: 0.6686\nEpoch 255/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5443 - acc: 0.8157 - val_loss: 1.0364 - val_acc: 0.6686\nEpoch 256/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5012 - acc: 0.8571 - val_loss: 1.0363 - val_acc: 0.6686\nEpoch 257/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5399 - acc: 0.8456 - val_loss: 1.0359 - val_acc: 0.6686\nEpoch 258/600\n434/434 [==============================] - 0s 574us/step - loss: 0.4791 - acc: 0.8571 - val_loss: 1.0355 - val_acc: 0.6686\nEpoch 259/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5411 - acc: 0.8157 - val_loss: 1.0351 - val_acc: 0.6686\nEpoch 260/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5170 - acc: 0.8364 - val_loss: 1.0348 - val_acc: 0.6743\nEpoch 261/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5090 - acc: 0.8249 - val_loss: 1.0344 - val_acc: 0.6743\nEpoch 262/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5359 - acc: 0.8364 - val_loss: 1.0340 - val_acc: 0.6743\nEpoch 263/600\n434/434 [==============================] - 0s 534us/step - loss: 0.4766 - acc: 0.8641 - val_loss: 1.0338 - val_acc: 0.6743\nEpoch 264/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5049 - acc: 0.8571 - val_loss: 1.0337 - val_acc: 0.6743\nEpoch 265/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5279 - acc: 0.8410 - val_loss: 1.0334 - val_acc: 0.6743\nEpoch 266/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5215 - acc: 0.8295 - val_loss: 1.0333 - val_acc: 0.6743\nEpoch 267/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5260 - acc: 0.8456 - val_loss: 1.0332 - val_acc: 0.6743\nEpoch 268/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5595 - acc: 0.8249 - val_loss: 1.0330 - val_acc: 0.6800\nEpoch 269/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5555 - acc: 0.8203 - val_loss: 1.0328 - val_acc: 0.6800\nEpoch 270/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4754 - acc: 0.8687 - val_loss: 1.0326 - val_acc: 0.6800\nEpoch 271/600\n434/434 [==============================] - 0s 569us/step - loss: 0.5154 - acc: 0.8479 - val_loss: 1.0324 - val_acc: 0.6800\nEpoch 272/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5675 - acc: 0.8410 - val_loss: 1.0321 - val_acc: 0.6800\nEpoch 273/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4831 - acc: 0.8548 - val_loss: 1.0319 - val_acc: 0.6800\nEpoch 274/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5343 - acc: 0.8364 - val_loss: 1.0317 - val_acc: 0.6800\nEpoch 275/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5657 - acc: 0.8456 - val_loss: 1.0316 - val_acc: 0.6800\nEpoch 276/600\n434/434 [==============================] - 0s 555us/step - loss: 0.4881 - acc: 0.8502 - val_loss: 1.0313 - val_acc: 0.6800\nEpoch 277/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5097 - acc: 0.8410 - val_loss: 1.0311 - val_acc: 0.6800\nEpoch 278/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4834 - acc: 0.8641 - val_loss: 1.0310 - val_acc: 0.6800\nEpoch 279/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5086 - acc: 0.8410 - val_loss: 1.0308 - val_acc: 0.6800\nEpoch 280/600\n434/434 [==============================] - 0s 558us/step - loss: 0.4842 - acc: 0.8756 - val_loss: 1.0308 - val_acc: 0.6800\nEpoch 281/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5366 - acc: 0.8410 - val_loss: 1.0306 - val_acc: 0.6800\nEpoch 282/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5238 - acc: 0.8502 - val_loss: 1.0304 - val_acc: 0.6800\nEpoch 283/600\n434/434 [==============================] - 0s 564us/step - loss: 0.5629 - acc: 0.8318 - val_loss: 1.0304 - val_acc: 0.6800\nEpoch 284/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5614 - acc: 0.8157 - val_loss: 1.0303 - val_acc: 0.6800\nEpoch 285/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5291 - acc: 0.8479 - val_loss: 1.0302 - val_acc: 0.6800\nEpoch 286/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5001 - acc: 0.8502 - val_loss: 1.0303 - val_acc: 0.6800\nEpoch 287/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5024 - acc: 0.8641 - val_loss: 1.0303 - val_acc: 0.6800\nEpoch 288/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5407 - acc: 0.8272 - val_loss: 1.0303 - val_acc: 0.6800\nEpoch 289/600\n434/434 [==============================] - 0s 558us/step - loss: 0.4984 - acc: 0.8456 - val_loss: 1.0301 - val_acc: 0.6800\nEpoch 290/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4870 - acc: 0.8571 - val_loss: 1.0300 - val_acc: 0.6800\nEpoch 291/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5713 - acc: 0.8226 - val_loss: 1.0299 - val_acc: 0.6800\nEpoch 292/600\n434/434 [==============================] - 0s 533us/step - loss: 0.5241 - acc: 0.8295 - val_loss: 1.0297 - val_acc: 0.6743\nEpoch 293/600\n434/434 [==============================] - 0s 532us/step - loss: 0.5074 - acc: 0.8618 - val_loss: 1.0296 - val_acc: 0.6743\nEpoch 294/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5709 - acc: 0.8226 - val_loss: 1.0295 - val_acc: 0.6743\nEpoch 295/600\n434/434 [==============================] - 0s 517us/step - loss: 0.5588 - acc: 0.8295 - val_loss: 1.0293 - val_acc: 0.6743\nEpoch 296/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5904 - acc: 0.8134 - val_loss: 1.0291 - val_acc: 0.6743\nEpoch 297/600\n434/434 [==============================] - 0s 538us/step - loss: 0.4894 - acc: 0.8525 - val_loss: 1.0289 - val_acc: 0.6743\nEpoch 298/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5456 - acc: 0.8341 - val_loss: 1.0288 - val_acc: 0.6743\nEpoch 299/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5285 - acc: 0.8387 - val_loss: 1.0288 - val_acc: 0.6743\nEpoch 300/600\n434/434 [==============================] - 0s 560us/step - loss: 0.5410 - acc: 0.8364 - val_loss: 1.0284 - val_acc: 0.6743\nEpoch 301/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5101 - acc: 0.8733 - val_loss: 1.0282 - val_acc: 0.6743\nEpoch 302/600\n434/434 [==============================] - 0s 556us/step - loss: 0.4339 - acc: 0.8594 - val_loss: 1.0281 - val_acc: 0.6743\nEpoch 303/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5134 - acc: 0.8571 - val_loss: 1.0280 - val_acc: 0.6743\nEpoch 304/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5096 - acc: 0.8525 - val_loss: 1.0277 - val_acc: 0.6743\nEpoch 305/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5512 - acc: 0.8318 - val_loss: 1.0276 - val_acc: 0.6743\nEpoch 306/600\n434/434 [==============================] - 0s 541us/step - loss: 0.5406 - acc: 0.8272 - val_loss: 1.0276 - val_acc: 0.6743\nEpoch 307/600\n434/434 [==============================] - 0s 571us/step - loss: 0.5234 - acc: 0.8479 - val_loss: 1.0275 - val_acc: 0.6743\nEpoch 308/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5074 - acc: 0.8479 - val_loss: 1.0273 - val_acc: 0.6743\nEpoch 309/600\n434/434 [==============================] - 0s 573us/step - loss: 0.5068 - acc: 0.8364 - val_loss: 1.0270 - val_acc: 0.6743\nEpoch 310/600\n434/434 [==============================] - 0s 573us/step - loss: 0.5720 - acc: 0.8180 - val_loss: 1.0269 - val_acc: 0.6743\nEpoch 311/600\n434/434 [==============================] - 0s 539us/step - loss: 0.5353 - acc: 0.8364 - val_loss: 1.0267 - val_acc: 0.6743\nEpoch 312/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5306 - acc: 0.8364 - val_loss: 1.0266 - val_acc: 0.6743\nEpoch 313/600\n434/434 [==============================] - 0s 545us/step - loss: 0.4994 - acc: 0.8456 - val_loss: 1.0265 - val_acc: 0.6743\nEpoch 314/600\n434/434 [==============================] - 0s 568us/step - loss: 0.5018 - acc: 0.8502 - val_loss: 1.0264 - val_acc: 0.6743\nEpoch 315/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5312 - acc: 0.8134 - val_loss: 1.0261 - val_acc: 0.6743\nEpoch 316/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5034 - acc: 0.8433 - val_loss: 1.0260 - val_acc: 0.6743\nEpoch 317/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5080 - acc: 0.8664 - val_loss: 1.0258 - val_acc: 0.6743\nEpoch 318/600\n434/434 [==============================] - 0s 538us/step - loss: 0.5101 - acc: 0.8502 - val_loss: 1.0257 - val_acc: 0.6743\nEpoch 319/600\n434/434 [==============================] - 0s 522us/step - loss: 0.4964 - acc: 0.8525 - val_loss: 1.0255 - val_acc: 0.6743\nEpoch 320/600\n434/434 [==============================] - 0s 546us/step - loss: 0.4921 - acc: 0.8456 - val_loss: 1.0255 - val_acc: 0.6743\nEpoch 321/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5889 - acc: 0.8180 - val_loss: 1.0254 - val_acc: 0.6800\nEpoch 322/600\n434/434 [==============================] - 0s 532us/step - loss: 0.5309 - acc: 0.8433 - val_loss: 1.0251 - val_acc: 0.6800\nEpoch 323/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5104 - acc: 0.8571 - val_loss: 1.0250 - val_acc: 0.6800\nEpoch 324/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5270 - acc: 0.8341 - val_loss: 1.0251 - val_acc: 0.6800\nEpoch 325/600\n434/434 [==============================] - 0s 573us/step - loss: 0.5114 - acc: 0.8502 - val_loss: 1.0250 - val_acc: 0.6800\nEpoch 326/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5123 - acc: 0.8479 - val_loss: 1.0250 - val_acc: 0.6800\nEpoch 327/600\n434/434 [==============================] - 0s 548us/step - loss: 0.4925 - acc: 0.8664 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 328/600\n434/434 [==============================] - 0s 538us/step - loss: 0.5411 - acc: 0.8341 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 329/600\n434/434 [==============================] - 0s 528us/step - loss: 0.4974 - acc: 0.8594 - val_loss: 1.0249 - val_acc: 0.6800\nEpoch 330/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5444 - acc: 0.8180 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 331/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5303 - acc: 0.8272 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 332/600\n434/434 [==============================] - 0s 533us/step - loss: 0.4926 - acc: 0.8502 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 333/600\n434/434 [==============================] - 0s 555us/step - loss: 0.4705 - acc: 0.8571 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 334/600\n434/434 [==============================] - 0s 559us/step - loss: 0.5421 - acc: 0.8548 - val_loss: 1.0247 - val_acc: 0.6800\nEpoch 335/600\n434/434 [==============================] - 0s 564us/step - loss: 0.4986 - acc: 0.8387 - val_loss: 1.0246 - val_acc: 0.6800\nEpoch 336/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5181 - acc: 0.8410 - val_loss: 1.0246 - val_acc: 0.6800\nEpoch 337/600\n434/434 [==============================] - 0s 588us/step - loss: 0.5306 - acc: 0.8387 - val_loss: 1.0248 - val_acc: 0.6800\nEpoch 338/600\n434/434 [==============================] - 0s 546us/step - loss: 0.4954 - acc: 0.8433 - val_loss: 1.0247 - val_acc: 0.6800\nEpoch 339/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5525 - acc: 0.8226 - val_loss: 1.0247 - val_acc: 0.6800\nEpoch 340/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5132 - acc: 0.8456 - val_loss: 1.0246 - val_acc: 0.6800\nEpoch 341/600\n434/434 [==============================] - 0s 548us/step - loss: 0.4946 - acc: 0.8410 - val_loss: 1.0246 - val_acc: 0.6800\nEpoch 342/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5809 - acc: 0.8134 - val_loss: 1.0245 - val_acc: 0.6800\nEpoch 343/600\n434/434 [==============================] - 0s 528us/step - loss: 0.5243 - acc: 0.8249 - val_loss: 1.0245 - val_acc: 0.6800\nEpoch 344/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5248 - acc: 0.8433 - val_loss: 1.0244 - val_acc: 0.6800\nEpoch 345/600\n434/434 [==============================] - 0s 568us/step - loss: 0.4761 - acc: 0.8456 - val_loss: 1.0245 - val_acc: 0.6800\nEpoch 346/600\n434/434 [==============================] - 0s 538us/step - loss: 0.4941 - acc: 0.8364 - val_loss: 1.0245 - val_acc: 0.6800\nEpoch 347/600\n434/434 [==============================] - 0s 543us/step - loss: 0.4996 - acc: 0.8479 - val_loss: 1.0243 - val_acc: 0.6800\nEpoch 348/600\n434/434 [==============================] - 0s 573us/step - loss: 0.5409 - acc: 0.8180 - val_loss: 1.0244 - val_acc: 0.6800\nEpoch 349/600\n434/434 [==============================] - 0s 565us/step - loss: 0.5513 - acc: 0.8111 - val_loss: 1.0244 - val_acc: 0.6800\nEpoch 350/600\n434/434 [==============================] - 0s 559us/step - loss: 0.5289 - acc: 0.8341 - val_loss: 1.0244 - val_acc: 0.6800\nEpoch 351/600\n434/434 [==============================] - 0s 562us/step - loss: 0.5291 - acc: 0.8364 - val_loss: 1.0244 - val_acc: 0.6800\nEpoch 352/600\n434/434 [==============================] - 0s 561us/step - loss: 0.4904 - acc: 0.8456 - val_loss: 1.0243 - val_acc: 0.6800\nEpoch 353/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5047 - acc: 0.8502 - val_loss: 1.0242 - val_acc: 0.6800\nEpoch 354/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5196 - acc: 0.8548 - val_loss: 1.0240 - val_acc: 0.6800\nEpoch 355/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4860 - acc: 0.8571 - val_loss: 1.0238 - val_acc: 0.6800\nEpoch 356/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5247 - acc: 0.8295 - val_loss: 1.0237 - val_acc: 0.6800\nEpoch 357/600\n434/434 [==============================] - 0s 571us/step - loss: 0.5385 - acc: 0.8479 - val_loss: 1.0236 - val_acc: 0.6800\nEpoch 358/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5465 - acc: 0.8272 - val_loss: 1.0234 - val_acc: 0.6800\nEpoch 359/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5160 - acc: 0.8341 - val_loss: 1.0231 - val_acc: 0.6800\nEpoch 360/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5314 - acc: 0.8226 - val_loss: 1.0230 - val_acc: 0.6800\nEpoch 361/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5467 - acc: 0.8433 - val_loss: 1.0228 - val_acc: 0.6800\nEpoch 362/600\n434/434 [==============================] - 0s 555us/step - loss: 0.4910 - acc: 0.8525 - val_loss: 1.0226 - val_acc: 0.6800\nEpoch 363/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4872 - acc: 0.8594 - val_loss: 1.0224 - val_acc: 0.6800\nEpoch 364/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5099 - acc: 0.8479 - val_loss: 1.0222 - val_acc: 0.6800\nEpoch 365/600\n434/434 [==============================] - 0s 553us/step - loss: 0.4717 - acc: 0.8733 - val_loss: 1.0221 - val_acc: 0.6800\nEpoch 366/600\n434/434 [==============================] - 0s 559us/step - loss: 0.5022 - acc: 0.8456 - val_loss: 1.0219 - val_acc: 0.6800\nEpoch 367/600\n434/434 [==============================] - 0s 576us/step - loss: 0.5502 - acc: 0.8203 - val_loss: 1.0218 - val_acc: 0.6800\nEpoch 368/600\n434/434 [==============================] - 0s 552us/step - loss: 0.5022 - acc: 0.8479 - val_loss: 1.0217 - val_acc: 0.6800\nEpoch 369/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5030 - acc: 0.8341 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 370/600\n434/434 [==============================] - 0s 555us/step - loss: 0.4956 - acc: 0.8410 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 371/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4837 - acc: 0.8664 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 372/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4945 - acc: 0.8548 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 373/600\n434/434 [==============================] - 0s 565us/step - loss: 0.4758 - acc: 0.8687 - val_loss: 1.0216 - val_acc: 0.6800\nEpoch 374/600\n434/434 [==============================] - 0s 593us/step - loss: 0.4974 - acc: 0.8664 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 375/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5299 - acc: 0.8479 - val_loss: 1.0215 - val_acc: 0.6800\nEpoch 376/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5263 - acc: 0.8479 - val_loss: 1.0214 - val_acc: 0.6800\nEpoch 377/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5315 - acc: 0.8272 - val_loss: 1.0212 - val_acc: 0.6800\nEpoch 378/600\n434/434 [==============================] - 0s 563us/step - loss: 0.5441 - acc: 0.8272 - val_loss: 1.0212 - val_acc: 0.6800\nEpoch 379/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5319 - acc: 0.8364 - val_loss: 1.0210 - val_acc: 0.6800\nEpoch 380/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4261 - acc: 0.8986 - val_loss: 1.0208 - val_acc: 0.6800\nEpoch 381/600\n434/434 [==============================] - 0s 563us/step - loss: 0.5343 - acc: 0.8272 - val_loss: 1.0207 - val_acc: 0.6800\nEpoch 382/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5118 - acc: 0.8295 - val_loss: 1.0206 - val_acc: 0.6800\nEpoch 383/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5726 - acc: 0.8065 - val_loss: 1.0205 - val_acc: 0.6800\nEpoch 384/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5210 - acc: 0.8410 - val_loss: 1.0203 - val_acc: 0.6800\nEpoch 385/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5130 - acc: 0.8502 - val_loss: 1.0202 - val_acc: 0.6800\nEpoch 386/600\n434/434 [==============================] - 0s 538us/step - loss: 0.4897 - acc: 0.8756 - val_loss: 1.0200 - val_acc: 0.6800\nEpoch 387/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4833 - acc: 0.8525 - val_loss: 1.0199 - val_acc: 0.6800\nEpoch 388/600\n434/434 [==============================] - 0s 520us/step - loss: 0.4880 - acc: 0.8548 - val_loss: 1.0198 - val_acc: 0.6800\nEpoch 389/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5169 - acc: 0.8410 - val_loss: 1.0196 - val_acc: 0.6800\nEpoch 390/600\n434/434 [==============================] - 0s 525us/step - loss: 0.5170 - acc: 0.8502 - val_loss: 1.0196 - val_acc: 0.6800\nEpoch 391/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4748 - acc: 0.8710 - val_loss: 1.0195 - val_acc: 0.6800\nEpoch 392/600\n434/434 [==============================] - 0s 529us/step - loss: 0.5189 - acc: 0.8410 - val_loss: 1.0194 - val_acc: 0.6800\nEpoch 393/600\n434/434 [==============================] - 0s 531us/step - loss: 0.5325 - acc: 0.8479 - val_loss: 1.0194 - val_acc: 0.6800\nEpoch 394/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4869 - acc: 0.8433 - val_loss: 1.0194 - val_acc: 0.6800\nEpoch 395/600\n434/434 [==============================] - 0s 563us/step - loss: 0.5260 - acc: 0.8295 - val_loss: 1.0194 - val_acc: 0.6800\nEpoch 396/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5221 - acc: 0.8594 - val_loss: 1.0193 - val_acc: 0.6800\nEpoch 397/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4811 - acc: 0.8594 - val_loss: 1.0192 - val_acc: 0.6800\nEpoch 398/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4877 - acc: 0.8387 - val_loss: 1.0192 - val_acc: 0.6800\nEpoch 399/600\n434/434 [==============================] - 0s 526us/step - loss: 0.4944 - acc: 0.8641 - val_loss: 1.0192 - val_acc: 0.6800\nEpoch 400/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5317 - acc: 0.8249 - val_loss: 1.0192 - val_acc: 0.6800\nEpoch 401/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5218 - acc: 0.8364 - val_loss: 1.0192 - val_acc: 0.6800\nEpoch 402/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4895 - acc: 0.8548 - val_loss: 1.0190 - val_acc: 0.6800\nEpoch 403/600\n434/434 [==============================] - 0s 552us/step - loss: 0.5188 - acc: 0.8594 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 404/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5289 - acc: 0.8502 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 405/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5046 - acc: 0.8502 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 406/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5651 - acc: 0.8410 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 407/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5136 - acc: 0.8479 - val_loss: 1.0190 - val_acc: 0.6800\nEpoch 408/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4920 - acc: 0.8687 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 409/600\n434/434 [==============================] - 0s 552us/step - loss: 0.5412 - acc: 0.8157 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 410/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5062 - acc: 0.8641 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 411/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5256 - acc: 0.8433 - val_loss: 1.0190 - val_acc: 0.6800\nEpoch 412/600\n434/434 [==============================] - 0s 573us/step - loss: 0.4669 - acc: 0.8594 - val_loss: 1.0191 - val_acc: 0.6800\nEpoch 413/600\n434/434 [==============================] - 0s 565us/step - loss: 0.5337 - acc: 0.8203 - val_loss: 1.0190 - val_acc: 0.6800\nEpoch 414/600\n434/434 [==============================] - 0s 560us/step - loss: 0.5312 - acc: 0.8318 - val_loss: 1.0189 - val_acc: 0.6800\nEpoch 415/600\n434/434 [==============================] - 0s 569us/step - loss: 0.5248 - acc: 0.8548 - val_loss: 1.0187 - val_acc: 0.6800\nEpoch 416/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4856 - acc: 0.8641 - val_loss: 1.0187 - val_acc: 0.6800\nEpoch 417/600\n434/434 [==============================] - 0s 612us/step - loss: 0.4878 - acc: 0.8641 - val_loss: 1.0185 - val_acc: 0.6800\nEpoch 418/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5388 - acc: 0.8456 - val_loss: 1.0184 - val_acc: 0.6800\nEpoch 419/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5400 - acc: 0.8272 - val_loss: 1.0183 - val_acc: 0.6800\nEpoch 420/600\n434/434 [==============================] - 0s 538us/step - loss: 0.5206 - acc: 0.8410 - val_loss: 1.0184 - val_acc: 0.6800\nEpoch 421/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4714 - acc: 0.8618 - val_loss: 1.0183 - val_acc: 0.6800\nEpoch 422/600\n434/434 [==============================] - 0s 537us/step - loss: 0.4933 - acc: 0.8618 - val_loss: 1.0182 - val_acc: 0.6800\nEpoch 423/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5043 - acc: 0.8387 - val_loss: 1.0182 - val_acc: 0.6800\nEpoch 424/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4690 - acc: 0.8733 - val_loss: 1.0181 - val_acc: 0.6800\nEpoch 425/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4741 - acc: 0.8479 - val_loss: 1.0181 - val_acc: 0.6800\nEpoch 426/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5322 - acc: 0.8456 - val_loss: 1.0181 - val_acc: 0.6800\nEpoch 427/600\n434/434 [==============================] - 0s 544us/step - loss: 0.5017 - acc: 0.8410 - val_loss: 1.0182 - val_acc: 0.6800\nEpoch 428/600\n434/434 [==============================] - 0s 546us/step - loss: 0.4946 - acc: 0.8479 - val_loss: 1.0183 - val_acc: 0.6800\nEpoch 429/600\n434/434 [==============================] - 0s 542us/step - loss: 0.4939 - acc: 0.8548 - val_loss: 1.0184 - val_acc: 0.6800\nEpoch 430/600\n434/434 [==============================] - 0s 566us/step - loss: 0.4804 - acc: 0.8525 - val_loss: 1.0183 - val_acc: 0.6800\nEpoch 431/600\n434/434 [==============================] - 0s 562us/step - loss: 0.4845 - acc: 0.8756 - val_loss: 1.0181 - val_acc: 0.6800\nEpoch 432/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5226 - acc: 0.8249 - val_loss: 1.0180 - val_acc: 0.6800\nEpoch 433/600\n434/434 [==============================] - 0s 568us/step - loss: 0.5420 - acc: 0.8272 - val_loss: 1.0179 - val_acc: 0.6800\nEpoch 434/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5306 - acc: 0.8272 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 435/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5143 - acc: 0.8433 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 436/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5335 - acc: 0.8295 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 437/600\n434/434 [==============================] - 0s 548us/step - loss: 0.4742 - acc: 0.8687 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 438/600\n434/434 [==============================] - 0s 544us/step - loss: 0.4683 - acc: 0.8710 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 439/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5496 - acc: 0.8272 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 440/600\n434/434 [==============================] - 0s 539us/step - loss: 0.4902 - acc: 0.8641 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 441/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5494 - acc: 0.8710 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 442/600\n434/434 [==============================] - 0s 572us/step - loss: 0.4699 - acc: 0.8618 - val_loss: 1.0178 - val_acc: 0.6800\nEpoch 443/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5074 - acc: 0.8548 - val_loss: 1.0177 - val_acc: 0.6800\nEpoch 444/600\n434/434 [==============================] - 0s 553us/step - loss: 0.4640 - acc: 0.8525 - val_loss: 1.0176 - val_acc: 0.6800\nEpoch 445/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5287 - acc: 0.8272 - val_loss: 1.0176 - val_acc: 0.6800\nEpoch 446/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4647 - acc: 0.8618 - val_loss: 1.0176 - val_acc: 0.6800\nEpoch 447/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5410 - acc: 0.8180 - val_loss: 1.0175 - val_acc: 0.6800\nEpoch 448/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5175 - acc: 0.8341 - val_loss: 1.0172 - val_acc: 0.6800\nEpoch 449/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4884 - acc: 0.8502 - val_loss: 1.0171 - val_acc: 0.6800\nEpoch 450/600\n434/434 [==============================] - 0s 540us/step - loss: 0.5217 - acc: 0.8456 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 451/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5101 - acc: 0.8295 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 452/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4786 - acc: 0.8479 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 453/600\n434/434 [==============================] - 0s 563us/step - loss: 0.5388 - acc: 0.8456 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 454/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5259 - acc: 0.8525 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 455/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5042 - acc: 0.8226 - val_loss: 1.0169 - val_acc: 0.6800\nEpoch 456/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5250 - acc: 0.8364 - val_loss: 1.0167 - val_acc: 0.6800\nEpoch 457/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4946 - acc: 0.8456 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 458/600\n434/434 [==============================] - 0s 559us/step - loss: 0.4762 - acc: 0.8433 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 459/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5045 - acc: 0.8571 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 460/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4610 - acc: 0.8594 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 461/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5022 - acc: 0.8433 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 462/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5106 - acc: 0.8410 - val_loss: 1.0166 - val_acc: 0.6800\nEpoch 463/600\n434/434 [==============================] - 0s 637us/step - loss: 0.4866 - acc: 0.8433 - val_loss: 1.0166 - val_acc: 0.6800\nEpoch 464/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5338 - acc: 0.8088 - val_loss: 1.0166 - val_acc: 0.6800\nEpoch 465/600\n434/434 [==============================] - 0s 553us/step - loss: 0.4971 - acc: 0.8525 - val_loss: 1.0166 - val_acc: 0.6800\nEpoch 466/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4316 - acc: 0.8687 - val_loss: 1.0169 - val_acc: 0.6800\nEpoch 467/600\n434/434 [==============================] - 0s 556us/step - loss: 0.4422 - acc: 0.8802 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 468/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4900 - acc: 0.8571 - val_loss: 1.0171 - val_acc: 0.6800\nEpoch 469/600\n434/434 [==============================] - 0s 539us/step - loss: 0.5081 - acc: 0.8410 - val_loss: 1.0173 - val_acc: 0.6800\nEpoch 470/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5330 - acc: 0.8295 - val_loss: 1.0173 - val_acc: 0.6800\nEpoch 471/600\n434/434 [==============================] - 0s 533us/step - loss: 0.5005 - acc: 0.8525 - val_loss: 1.0174 - val_acc: 0.6800\nEpoch 472/600\n434/434 [==============================] - 0s 537us/step - loss: 0.4651 - acc: 0.8618 - val_loss: 1.0172 - val_acc: 0.6800\nEpoch 473/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4984 - acc: 0.8479 - val_loss: 1.0171 - val_acc: 0.6800\nEpoch 474/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4612 - acc: 0.8779 - val_loss: 1.0170 - val_acc: 0.6800\nEpoch 475/600\n434/434 [==============================] - 0s 556us/step - loss: 0.4703 - acc: 0.8410 - val_loss: 1.0169 - val_acc: 0.6800\nEpoch 476/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4667 - acc: 0.8687 - val_loss: 1.0167 - val_acc: 0.6800\nEpoch 477/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5376 - acc: 0.8387 - val_loss: 1.0167 - val_acc: 0.6800\nEpoch 478/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5081 - acc: 0.8456 - val_loss: 1.0166 - val_acc: 0.6800\nEpoch 479/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4878 - acc: 0.8479 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 480/600\n434/434 [==============================] - 0s 554us/step - loss: 0.5168 - acc: 0.8456 - val_loss: 1.0163 - val_acc: 0.6800\nEpoch 481/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5208 - acc: 0.8525 - val_loss: 1.0163 - val_acc: 0.6800\nEpoch 482/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4590 - acc: 0.8664 - val_loss: 1.0161 - val_acc: 0.6800\nEpoch 483/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4719 - acc: 0.8548 - val_loss: 1.0160 - val_acc: 0.6800\nEpoch 484/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4980 - acc: 0.8594 - val_loss: 1.0160 - val_acc: 0.6800\nEpoch 485/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4913 - acc: 0.8525 - val_loss: 1.0161 - val_acc: 0.6800\nEpoch 486/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5524 - acc: 0.8249 - val_loss: 1.0162 - val_acc: 0.6800\nEpoch 487/600\n434/434 [==============================] - 0s 567us/step - loss: 0.4769 - acc: 0.8548 - val_loss: 1.0163 - val_acc: 0.6800\nEpoch 488/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5246 - acc: 0.8295 - val_loss: 1.0164 - val_acc: 0.6800\nEpoch 489/600\n434/434 [==============================] - 0s 559us/step - loss: 0.5534 - acc: 0.8364 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 490/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4818 - acc: 0.8525 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 491/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5033 - acc: 0.8479 - val_loss: 1.0164 - val_acc: 0.6800\nEpoch 492/600\n434/434 [==============================] - 0s 576us/step - loss: 0.5041 - acc: 0.8502 - val_loss: 1.0164 - val_acc: 0.6800\nEpoch 493/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5181 - acc: 0.8341 - val_loss: 1.0165 - val_acc: 0.6800\nEpoch 494/600\n434/434 [==============================] - 0s 571us/step - loss: 0.5060 - acc: 0.8387 - val_loss: 1.0163 - val_acc: 0.6800\nEpoch 495/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5377 - acc: 0.8456 - val_loss: 1.0162 - val_acc: 0.6800\nEpoch 496/600\n434/434 [==============================] - 0s 566us/step - loss: 0.5248 - acc: 0.8456 - val_loss: 1.0162 - val_acc: 0.6800\nEpoch 497/600\n434/434 [==============================] - 0s 541us/step - loss: 0.4264 - acc: 0.8802 - val_loss: 1.0161 - val_acc: 0.6800\nEpoch 498/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5423 - acc: 0.8318 - val_loss: 1.0161 - val_acc: 0.6800\nEpoch 499/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4869 - acc: 0.8548 - val_loss: 1.0159 - val_acc: 0.6800\nEpoch 500/600\n434/434 [==============================] - 0s 543us/step - loss: 0.4942 - acc: 0.8594 - val_loss: 1.0158 - val_acc: 0.6800\nEpoch 501/600\n434/434 [==============================] - 0s 573us/step - loss: 0.4336 - acc: 0.8802 - val_loss: 1.0157 - val_acc: 0.6800\nEpoch 502/600\n434/434 [==============================] - 0s 535us/step - loss: 0.4843 - acc: 0.8664 - val_loss: 1.0156 - val_acc: 0.6800\nEpoch 503/600\n434/434 [==============================] - 0s 575us/step - loss: 0.4959 - acc: 0.8594 - val_loss: 1.0155 - val_acc: 0.6800\nEpoch 504/600\n434/434 [==============================] - 0s 542us/step - loss: 0.4974 - acc: 0.8594 - val_loss: 1.0154 - val_acc: 0.6800\nEpoch 505/600\n434/434 [==============================] - 0s 606us/step - loss: 0.4894 - acc: 0.8433 - val_loss: 1.0152 - val_acc: 0.6800\nEpoch 506/600\n434/434 [==============================] - 0s 546us/step - loss: 0.4743 - acc: 0.8571 - val_loss: 1.0151 - val_acc: 0.6800\nEpoch 507/600\n434/434 [==============================] - 0s 561us/step - loss: 0.4818 - acc: 0.8479 - val_loss: 1.0150 - val_acc: 0.6800\nEpoch 508/600\n434/434 [==============================] - 0s 542us/step - loss: 0.4580 - acc: 0.8502 - val_loss: 1.0150 - val_acc: 0.6800\nEpoch 509/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5313 - acc: 0.8203 - val_loss: 1.0150 - val_acc: 0.6800\nEpoch 510/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5523 - acc: 0.8318 - val_loss: 1.0150 - val_acc: 0.6800\nEpoch 511/600\n434/434 [==============================] - 0s 555us/step - loss: 0.4899 - acc: 0.8548 - val_loss: 1.0149 - val_acc: 0.6800\nEpoch 512/600\n434/434 [==============================] - 0s 556us/step - loss: 0.5322 - acc: 0.8387 - val_loss: 1.0149 - val_acc: 0.6800\nEpoch 513/600\n434/434 [==============================] - 0s 550us/step - loss: 0.4724 - acc: 0.8687 - val_loss: 1.0148 - val_acc: 0.6800\nEpoch 514/600\n434/434 [==============================] - 0s 526us/step - loss: 0.5164 - acc: 0.8272 - val_loss: 1.0148 - val_acc: 0.6800\nEpoch 515/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5125 - acc: 0.8364 - val_loss: 1.0148 - val_acc: 0.6800\nEpoch 516/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5265 - acc: 0.8272 - val_loss: 1.0148 - val_acc: 0.6800\nEpoch 517/600\n434/434 [==============================] - 0s 559us/step - loss: 0.4832 - acc: 0.8571 - val_loss: 1.0146 - val_acc: 0.6800\nEpoch 518/600\n434/434 [==============================] - 0s 543us/step - loss: 0.5472 - acc: 0.8226 - val_loss: 1.0145 - val_acc: 0.6800\nEpoch 519/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5361 - acc: 0.8318 - val_loss: 1.0144 - val_acc: 0.6800\nEpoch 520/600\n434/434 [==============================] - 0s 572us/step - loss: 0.5093 - acc: 0.8318 - val_loss: 1.0144 - val_acc: 0.6800\nEpoch 521/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5137 - acc: 0.8525 - val_loss: 1.0143 - val_acc: 0.6800\nEpoch 522/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5150 - acc: 0.8571 - val_loss: 1.0142 - val_acc: 0.6800\nEpoch 523/600\n434/434 [==============================] - 0s 535us/step - loss: 0.5108 - acc: 0.8387 - val_loss: 1.0141 - val_acc: 0.6800\nEpoch 524/600\n434/434 [==============================] - 0s 534us/step - loss: 0.4647 - acc: 0.8641 - val_loss: 1.0141 - val_acc: 0.6800\nEpoch 525/600\n434/434 [==============================] - 0s 559us/step - loss: 0.4823 - acc: 0.8433 - val_loss: 1.0141 - val_acc: 0.6800\nEpoch 526/600\n434/434 [==============================] - 0s 558us/step - loss: 0.5052 - acc: 0.8479 - val_loss: 1.0140 - val_acc: 0.6800\nEpoch 527/600\n434/434 [==============================] - 0s 560us/step - loss: 0.5150 - acc: 0.8341 - val_loss: 1.0140 - val_acc: 0.6800\nEpoch 528/600\n434/434 [==============================] - 0s 558us/step - loss: 0.4735 - acc: 0.8756 - val_loss: 1.0138 - val_acc: 0.6800\nEpoch 529/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5458 - acc: 0.8203 - val_loss: 1.0138 - val_acc: 0.6800\nEpoch 530/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5047 - acc: 0.8548 - val_loss: 1.0137 - val_acc: 0.6800\nEpoch 531/600\n434/434 [==============================] - 0s 548us/step - loss: 0.4681 - acc: 0.8710 - val_loss: 1.0135 - val_acc: 0.6800\nEpoch 532/600\n434/434 [==============================] - 0s 542us/step - loss: 0.4928 - acc: 0.8433 - val_loss: 1.0134 - val_acc: 0.6800\nEpoch 533/600\n434/434 [==============================] - 0s 526us/step - loss: 0.4944 - acc: 0.8525 - val_loss: 1.0133 - val_acc: 0.6800\nEpoch 534/600\n434/434 [==============================] - 0s 564us/step - loss: 0.5174 - acc: 0.8203 - val_loss: 1.0133 - val_acc: 0.6743\nEpoch 535/600\n434/434 [==============================] - 0s 570us/step - loss: 0.4945 - acc: 0.8387 - val_loss: 1.0132 - val_acc: 0.6743\nEpoch 536/600\n434/434 [==============================] - 0s 538us/step - loss: 0.5369 - acc: 0.8433 - val_loss: 1.0132 - val_acc: 0.6743\nEpoch 537/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5012 - acc: 0.8387 - val_loss: 1.0131 - val_acc: 0.6743\nEpoch 538/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5180 - acc: 0.8341 - val_loss: 1.0131 - val_acc: 0.6743\nEpoch 539/600\n434/434 [==============================] - 0s 561us/step - loss: 0.4555 - acc: 0.8733 - val_loss: 1.0131 - val_acc: 0.6743\nEpoch 540/600\n434/434 [==============================] - 0s 563us/step - loss: 0.5053 - acc: 0.8594 - val_loss: 1.0130 - val_acc: 0.6743\nEpoch 541/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4624 - acc: 0.8710 - val_loss: 1.0129 - val_acc: 0.6743\nEpoch 542/600\n434/434 [==============================] - 0s 547us/step - loss: 0.4714 - acc: 0.8687 - val_loss: 1.0129 - val_acc: 0.6743\nEpoch 543/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5424 - acc: 0.8364 - val_loss: 1.0127 - val_acc: 0.6743\nEpoch 544/600\n434/434 [==============================] - 0s 548us/step - loss: 0.5239 - acc: 0.8548 - val_loss: 1.0125 - val_acc: 0.6743\nEpoch 545/600\n434/434 [==============================] - 0s 530us/step - loss: 0.5000 - acc: 0.8456 - val_loss: 1.0123 - val_acc: 0.6743\nEpoch 546/600\n434/434 [==============================] - 0s 565us/step - loss: 0.4890 - acc: 0.8641 - val_loss: 1.0123 - val_acc: 0.6743\nEpoch 547/600\n434/434 [==============================] - 0s 560us/step - loss: 0.4807 - acc: 0.8664 - val_loss: 1.0121 - val_acc: 0.6743\nEpoch 548/600\n434/434 [==============================] - 0s 546us/step - loss: 0.5614 - acc: 0.8065 - val_loss: 1.0119 - val_acc: 0.6743\nEpoch 549/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4938 - acc: 0.8641 - val_loss: 1.0116 - val_acc: 0.6743\nEpoch 550/600\n434/434 [==============================] - 0s 536us/step - loss: 0.4812 - acc: 0.8456 - val_loss: 1.0115 - val_acc: 0.6743\nEpoch 551/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4398 - acc: 0.8756 - val_loss: 1.0114 - val_acc: 0.6743\nEpoch 552/600\n434/434 [==============================] - 0s 573us/step - loss: 0.5213 - acc: 0.8364 - val_loss: 1.0113 - val_acc: 0.6743\nEpoch 553/600\n434/434 [==============================] - 0s 604us/step - loss: 0.4980 - acc: 0.8433 - val_loss: 1.0113 - val_acc: 0.6743\nEpoch 554/600\n434/434 [==============================] - 0s 553us/step - loss: 0.4571 - acc: 0.8618 - val_loss: 1.0112 - val_acc: 0.6743\nEpoch 555/600\n434/434 [==============================] - 0s 561us/step - loss: 0.5065 - acc: 0.8410 - val_loss: 1.0112 - val_acc: 0.6743\nEpoch 556/600\n434/434 [==============================] - 0s 565us/step - loss: 0.4862 - acc: 0.8710 - val_loss: 1.0110 - val_acc: 0.6743\nEpoch 557/600\n434/434 [==============================] - 0s 545us/step - loss: 0.5083 - acc: 0.8571 - val_loss: 1.0109 - val_acc: 0.6743\nEpoch 558/600\n434/434 [==============================] - 0s 561us/step - loss: 0.4775 - acc: 0.8479 - val_loss: 1.0107 - val_acc: 0.6743\nEpoch 559/600\n434/434 [==============================] - 0s 562us/step - loss: 0.5129 - acc: 0.8502 - val_loss: 1.0107 - val_acc: 0.6743\nEpoch 560/600\n434/434 [==============================] - 0s 534us/step - loss: 0.5190 - acc: 0.8433 - val_loss: 1.0107 - val_acc: 0.6743\nEpoch 561/600\n434/434 [==============================] - 0s 544us/step - loss: 0.4674 - acc: 0.8594 - val_loss: 1.0106 - val_acc: 0.6743\nEpoch 562/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5554 - acc: 0.8134 - val_loss: 1.0105 - val_acc: 0.6743\nEpoch 563/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5242 - acc: 0.8525 - val_loss: 1.0104 - val_acc: 0.6743\nEpoch 564/600\n434/434 [==============================] - 0s 566us/step - loss: 0.4883 - acc: 0.8410 - val_loss: 1.0103 - val_acc: 0.6743\nEpoch 565/600\n434/434 [==============================] - 0s 520us/step - loss: 0.5170 - acc: 0.8341 - val_loss: 1.0103 - val_acc: 0.6743\nEpoch 566/600\n434/434 [==============================] - 0s 577us/step - loss: 0.5290 - acc: 0.8318 - val_loss: 1.0101 - val_acc: 0.6743\nEpoch 567/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5064 - acc: 0.8433 - val_loss: 1.0101 - val_acc: 0.6743\nEpoch 568/600\n434/434 [==============================] - 0s 556us/step - loss: 0.4702 - acc: 0.8825 - val_loss: 1.0099 - val_acc: 0.6743\nEpoch 569/600\n434/434 [==============================] - 0s 557us/step - loss: 0.4771 - acc: 0.8641 - val_loss: 1.0098 - val_acc: 0.6743\nEpoch 570/600\n434/434 [==============================] - 0s 554us/step - loss: 0.4822 - acc: 0.8502 - val_loss: 1.0096 - val_acc: 0.6743\nEpoch 571/600\n434/434 [==============================] - 0s 561us/step - loss: 0.4433 - acc: 0.8756 - val_loss: 1.0095 - val_acc: 0.6857\nEpoch 572/600\n434/434 [==============================] - 0s 543us/step - loss: 0.4741 - acc: 0.8479 - val_loss: 1.0094 - val_acc: 0.6857\nEpoch 573/600\n434/434 [==============================] - 0s 551us/step - loss: 0.5079 - acc: 0.8571 - val_loss: 1.0092 - val_acc: 0.6857\nEpoch 574/600\n434/434 [==============================] - 0s 545us/step - loss: 0.4747 - acc: 0.8548 - val_loss: 1.0091 - val_acc: 0.6857\nEpoch 575/600\n434/434 [==============================] - 0s 552us/step - loss: 0.4978 - acc: 0.8594 - val_loss: 1.0091 - val_acc: 0.6857\nEpoch 576/600\n434/434 [==============================] - 0s 543us/step - loss: 0.4948 - acc: 0.8341 - val_loss: 1.0090 - val_acc: 0.6857\nEpoch 577/600\n434/434 [==============================] - 0s 568us/step - loss: 0.5284 - acc: 0.8295 - val_loss: 1.0090 - val_acc: 0.6857\nEpoch 578/600\n434/434 [==============================] - 0s 549us/step - loss: 0.5136 - acc: 0.8456 - val_loss: 1.0090 - val_acc: 0.6857\nEpoch 579/600\n434/434 [==============================] - 0s 540us/step - loss: 0.4952 - acc: 0.8456 - val_loss: 1.0089 - val_acc: 0.6857\nEpoch 580/600\n434/434 [==============================] - 0s 565us/step - loss: 0.5040 - acc: 0.8387 - val_loss: 1.0089 - val_acc: 0.6857\nEpoch 581/600\n434/434 [==============================] - 0s 565us/step - loss: 0.4817 - acc: 0.8525 - val_loss: 1.0089 - val_acc: 0.6857\nEpoch 582/600\n434/434 [==============================] - 0s 553us/step - loss: 0.5341 - acc: 0.8364 - val_loss: 1.0090 - val_acc: 0.6857\nEpoch 583/600\n434/434 [==============================] - 0s 542us/step - loss: 0.5240 - acc: 0.8456 - val_loss: 1.0088 - val_acc: 0.6857\nEpoch 584/600\n434/434 [==============================] - 0s 549us/step - loss: 0.4772 - acc: 0.8502 - val_loss: 1.0087 - val_acc: 0.6857\nEpoch 585/600\n434/434 [==============================] - 0s 537us/step - loss: 0.5119 - acc: 0.8479 - val_loss: 1.0085 - val_acc: 0.6800\nEpoch 586/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5426 - acc: 0.8364 - val_loss: 1.0086 - val_acc: 0.6857\nEpoch 587/600\n434/434 [==============================] - 0s 550us/step - loss: 0.5101 - acc: 0.8433 - val_loss: 1.0085 - val_acc: 0.6857\nEpoch 588/600\n434/434 [==============================] - 0s 541us/step - loss: 0.4972 - acc: 0.8387 - val_loss: 1.0085 - val_acc: 0.6857\nEpoch 589/600\n434/434 [==============================] - 0s 562us/step - loss: 0.5277 - acc: 0.8272 - val_loss: 1.0084 - val_acc: 0.6857\nEpoch 590/600\n434/434 [==============================] - 0s 568us/step - loss: 0.4953 - acc: 0.8502 - val_loss: 1.0084 - val_acc: 0.6857\nEpoch 591/600\n434/434 [==============================] - 0s 563us/step - loss: 0.4369 - acc: 0.8848 - val_loss: 1.0084 - val_acc: 0.6857\nEpoch 592/600\n434/434 [==============================] - 0s 538us/step - loss: 0.4820 - acc: 0.8410 - val_loss: 1.0085 - val_acc: 0.6857\nEpoch 593/600\n434/434 [==============================] - 0s 551us/step - loss: 0.4639 - acc: 0.8664 - val_loss: 1.0085 - val_acc: 0.6857\nEpoch 594/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5050 - acc: 0.8456 - val_loss: 1.0084 - val_acc: 0.6857\nEpoch 595/600\n434/434 [==============================] - 0s 601us/step - loss: 0.5005 - acc: 0.8433 - val_loss: 1.0083 - val_acc: 0.6857\nEpoch 596/600\n434/434 [==============================] - 0s 553us/step - loss: 0.4679 - acc: 0.8525 - val_loss: 1.0082 - val_acc: 0.6857\nEpoch 597/600\n434/434 [==============================] - 0s 547us/step - loss: 0.5315 - acc: 0.8479 - val_loss: 1.0082 - val_acc: 0.6857\nEpoch 598/600\n434/434 [==============================] - 0s 536us/step - loss: 0.4561 - acc: 0.8687 - val_loss: 1.0083 - val_acc: 0.6800\nEpoch 599/600\n434/434 [==============================] - 0s 555us/step - loss: 0.5375 - acc: 0.8387 - val_loss: 1.0084 - val_acc: 0.6800\nEpoch 600/600\n434/434 [==============================] - 0s 557us/step - loss: 0.5112 - acc: 0.8341 - val_loss: 1.0082 - val_acc: 0.6800\nTrain on 434 samples, validate on 175 samples\nEpoch 1/500\n434/434 [==============================] - 11s 25ms/step - loss: 0.5153 - acc: 0.8410 - val_loss: 1.0296 - val_acc: 0.6743\nEpoch 2/500\n434/434 [==============================] - 0s 554us/step - loss: 0.5564 - acc: 0.8341 - val_loss: 1.0384 - val_acc: 0.6857\nEpoch 3/500\n434/434 [==============================] - 0s 532us/step - loss: 0.4764 - acc: 0.8641 - val_loss: 1.0525 - val_acc: 0.6629\nEpoch 4/500\n434/434 [==============================] - 0s 555us/step - loss: 0.5067 - acc: 0.8387 - val_loss: 1.0719 - val_acc: 0.6571\nEpoch 5/500\n434/434 [==============================] - 0s 546us/step - loss: 0.4746 - acc: 0.8664 - val_loss: 1.0944 - val_acc: 0.6629\nEpoch 6/500\n434/434 [==============================] - 0s 560us/step - loss: 0.4778 - acc: 0.8318 - val_loss: 1.1077 - val_acc: 0.6800\nEpoch 7/500\n434/434 [==============================] - 0s 541us/step - loss: 0.4515 - acc: 0.8687 - val_loss: 1.1126 - val_acc: 0.6857\nEpoch 8/500\n434/434 [==============================] - 0s 525us/step - loss: 0.4673 - acc: 0.8641 - val_loss: 1.1135 - val_acc: 0.6857\nEpoch 9/500\n434/434 [==============================] - 0s 548us/step - loss: 0.4403 - acc: 0.8618 - val_loss: 1.1133 - val_acc: 0.6686\nEpoch 10/500\n434/434 [==============================] - 0s 551us/step - loss: 0.4647 - acc: 0.8525 - val_loss: 1.1099 - val_acc: 0.6914\nEpoch 11/500\n434/434 [==============================] - 0s 531us/step - loss: 0.3775 - acc: 0.8802 - val_loss: 1.1098 - val_acc: 0.6629\nEpoch 12/500\n434/434 [==============================] - 0s 525us/step - loss: 0.4666 - acc: 0.8525 - val_loss: 1.1194 - val_acc: 0.6571\nEpoch 13/500\n434/434 [==============================] - 0s 546us/step - loss: 0.4137 - acc: 0.8940 - val_loss: 1.1229 - val_acc: 0.6514\nEpoch 14/500\n434/434 [==============================] - 0s 546us/step - loss: 0.4203 - acc: 0.8756 - val_loss: 1.1182 - val_acc: 0.6571\nEpoch 15/500\n434/434 [==============================] - 0s 536us/step - loss: 0.3955 - acc: 0.8802 - val_loss: 1.1191 - val_acc: 0.6457\nEpoch 16/500\n434/434 [==============================] - 0s 559us/step - loss: 0.4048 - acc: 0.8825 - val_loss: 1.1318 - val_acc: 0.6457\nEpoch 17/500\n434/434 [==============================] - 0s 580us/step - loss: 0.4032 - acc: 0.8641 - val_loss: 1.1412 - val_acc: 0.6400\nEpoch 18/500\n434/434 [==============================] - 0s 534us/step - loss: 0.3924 - acc: 0.8848 - val_loss: 1.1477 - val_acc: 0.6400\nEpoch 19/500\n434/434 [==============================] - 0s 524us/step - loss: 0.3808 - acc: 0.8963 - val_loss: 1.1503 - val_acc: 0.6457\nEpoch 20/500\n434/434 [==============================] - 0s 548us/step - loss: 0.3736 - acc: 0.8733 - val_loss: 1.1490 - val_acc: 0.6571\nEpoch 21/500\n434/434 [==============================] - 0s 539us/step - loss: 0.3445 - acc: 0.8940 - val_loss: 1.1465 - val_acc: 0.6629\nEpoch 22/500\n434/434 [==============================] - 0s 542us/step - loss: 0.3721 - acc: 0.8848 - val_loss: 1.1454 - val_acc: 0.6629\nEpoch 23/500\n434/434 [==============================] - 0s 560us/step - loss: 0.3707 - acc: 0.8825 - val_loss: 1.1438 - val_acc: 0.6686\nEpoch 24/500\n434/434 [==============================] - 0s 534us/step - loss: 0.3310 - acc: 0.9101 - val_loss: 1.1402 - val_acc: 0.6800\nEpoch 25/500\n434/434 [==============================] - 0s 555us/step - loss: 0.3521 - acc: 0.9055 - val_loss: 1.1337 - val_acc: 0.6800\nEpoch 26/500\n434/434 [==============================] - 0s 542us/step - loss: 0.3230 - acc: 0.9078 - val_loss: 1.1243 - val_acc: 0.6800\nEpoch 27/500\n434/434 [==============================] - 0s 559us/step - loss: 0.3775 - acc: 0.8664 - val_loss: 1.1142 - val_acc: 0.6743\nEpoch 28/500\n434/434 [==============================] - 0s 517us/step - loss: 0.3669 - acc: 0.8802 - val_loss: 1.1015 - val_acc: 0.6800\nEpoch 29/500\n434/434 [==============================] - 0s 521us/step - loss: 0.3422 - acc: 0.8963 - val_loss: 1.0908 - val_acc: 0.6800\nEpoch 30/500\n434/434 [==============================] - 0s 538us/step - loss: 0.3361 - acc: 0.8894 - val_loss: 1.0798 - val_acc: 0.6914\nEpoch 31/500\n434/434 [==============================] - 0s 540us/step - loss: 0.3167 - acc: 0.9009 - val_loss: 1.0698 - val_acc: 0.6971\nEpoch 32/500\n434/434 [==============================] - 0s 532us/step - loss: 0.3975 - acc: 0.8687 - val_loss: 1.0636 - val_acc: 0.7029\nEpoch 33/500\n434/434 [==============================] - 0s 526us/step - loss: 0.3026 - acc: 0.9217 - val_loss: 1.0601 - val_acc: 0.7143\nEpoch 34/500\n434/434 [==============================] - 0s 554us/step - loss: 0.3337 - acc: 0.9101 - val_loss: 1.0597 - val_acc: 0.7143\nEpoch 35/500\n434/434 [==============================] - 0s 537us/step - loss: 0.3142 - acc: 0.9147 - val_loss: 1.0609 - val_acc: 0.7143\nEpoch 36/500\n434/434 [==============================] - 0s 538us/step - loss: 0.3172 - acc: 0.9078 - val_loss: 1.0632 - val_acc: 0.7086\nEpoch 37/500\n434/434 [==============================] - 0s 517us/step - loss: 0.3513 - acc: 0.8917 - val_loss: 1.0631 - val_acc: 0.7029\nEpoch 38/500\n434/434 [==============================] - 0s 547us/step - loss: 0.3506 - acc: 0.9078 - val_loss: 1.0638 - val_acc: 0.7029\nEpoch 39/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2886 - acc: 0.9147 - val_loss: 1.0645 - val_acc: 0.6971\nEpoch 40/500\n434/434 [==============================] - 0s 542us/step - loss: 0.3027 - acc: 0.9147 - val_loss: 1.0653 - val_acc: 0.6971\nEpoch 41/500\n434/434 [==============================] - 0s 540us/step - loss: 0.3694 - acc: 0.8940 - val_loss: 1.0647 - val_acc: 0.7029\nEpoch 42/500\n434/434 [==============================] - 0s 548us/step - loss: 0.3077 - acc: 0.8894 - val_loss: 1.0615 - val_acc: 0.7086\nEpoch 43/500\n434/434 [==============================] - 0s 536us/step - loss: 0.3163 - acc: 0.9032 - val_loss: 1.0553 - val_acc: 0.7143\nEpoch 44/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2859 - acc: 0.9263 - val_loss: 1.0489 - val_acc: 0.7086\nEpoch 45/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2942 - acc: 0.9147 - val_loss: 1.0439 - val_acc: 0.7086\nEpoch 46/500\n434/434 [==============================] - 0s 545us/step - loss: 0.3441 - acc: 0.8871 - val_loss: 1.0402 - val_acc: 0.7029\nEpoch 47/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2969 - acc: 0.9240 - val_loss: 1.0366 - val_acc: 0.7029\nEpoch 48/500\n434/434 [==============================] - 0s 530us/step - loss: 0.3147 - acc: 0.9194 - val_loss: 1.0335 - val_acc: 0.7029\nEpoch 49/500\n434/434 [==============================] - 0s 531us/step - loss: 0.3130 - acc: 0.9124 - val_loss: 1.0309 - val_acc: 0.7029\nEpoch 50/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2722 - acc: 0.9309 - val_loss: 1.0285 - val_acc: 0.7029\nEpoch 51/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2758 - acc: 0.9309 - val_loss: 1.0267 - val_acc: 0.7086\nEpoch 52/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2497 - acc: 0.9355 - val_loss: 1.0241 - val_acc: 0.7029\nEpoch 53/500\n434/434 [==============================] - 0s 544us/step - loss: 0.3007 - acc: 0.9078 - val_loss: 1.0214 - val_acc: 0.7029\nEpoch 54/500\n434/434 [==============================] - 0s 586us/step - loss: 0.2849 - acc: 0.9078 - val_loss: 1.0190 - val_acc: 0.7143\nEpoch 55/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2804 - acc: 0.9217 - val_loss: 1.0165 - val_acc: 0.7143\nEpoch 56/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2586 - acc: 0.9263 - val_loss: 1.0136 - val_acc: 0.7143\nEpoch 57/500\n434/434 [==============================] - 0s 528us/step - loss: 0.2831 - acc: 0.9240 - val_loss: 1.0110 - val_acc: 0.7200\nEpoch 58/500\n434/434 [==============================] - 0s 567us/step - loss: 0.2821 - acc: 0.9171 - val_loss: 1.0087 - val_acc: 0.7200\nEpoch 59/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2650 - acc: 0.9240 - val_loss: 1.0072 - val_acc: 0.7200\nEpoch 60/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2395 - acc: 0.9263 - val_loss: 1.0062 - val_acc: 0.7200\nEpoch 61/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2815 - acc: 0.9217 - val_loss: 1.0050 - val_acc: 0.7200\nEpoch 62/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2485 - acc: 0.9332 - val_loss: 1.0041 - val_acc: 0.7200\nEpoch 63/500\n434/434 [==============================] - 0s 557us/step - loss: 0.2786 - acc: 0.9309 - val_loss: 1.0032 - val_acc: 0.7200\nEpoch 64/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2580 - acc: 0.9332 - val_loss: 1.0021 - val_acc: 0.7200\nEpoch 65/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2910 - acc: 0.9286 - val_loss: 1.0011 - val_acc: 0.7200\nEpoch 66/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2919 - acc: 0.9171 - val_loss: 1.0004 - val_acc: 0.7200\nEpoch 67/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2692 - acc: 0.9147 - val_loss: 1.0003 - val_acc: 0.7200\nEpoch 68/500\n434/434 [==============================] - 0s 550us/step - loss: 0.3163 - acc: 0.9055 - val_loss: 0.9999 - val_acc: 0.7200\nEpoch 69/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2828 - acc: 0.9217 - val_loss: 0.9990 - val_acc: 0.7200\nEpoch 70/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2556 - acc: 0.9171 - val_loss: 0.9983 - val_acc: 0.7200\nEpoch 71/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2357 - acc: 0.9378 - val_loss: 0.9976 - val_acc: 0.7200\nEpoch 72/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2902 - acc: 0.9124 - val_loss: 0.9970 - val_acc: 0.7200\nEpoch 73/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2630 - acc: 0.9355 - val_loss: 0.9959 - val_acc: 0.7200\nEpoch 74/500\n434/434 [==============================] - 0s 520us/step - loss: 0.3184 - acc: 0.9101 - val_loss: 0.9951 - val_acc: 0.7200\nEpoch 75/500\n434/434 [==============================] - 0s 545us/step - loss: 0.3213 - acc: 0.9078 - val_loss: 0.9942 - val_acc: 0.7200\nEpoch 76/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2456 - acc: 0.9378 - val_loss: 0.9937 - val_acc: 0.7200\nEpoch 77/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2920 - acc: 0.9309 - val_loss: 0.9930 - val_acc: 0.7200\nEpoch 78/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2746 - acc: 0.9240 - val_loss: 0.9926 - val_acc: 0.7200\nEpoch 79/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2643 - acc: 0.9240 - val_loss: 0.9922 - val_acc: 0.7200\nEpoch 80/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2330 - acc: 0.9493 - val_loss: 0.9916 - val_acc: 0.7200\nEpoch 81/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2543 - acc: 0.9378 - val_loss: 0.9908 - val_acc: 0.7200\nEpoch 82/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2369 - acc: 0.9447 - val_loss: 0.9904 - val_acc: 0.7200\nEpoch 83/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2652 - acc: 0.9309 - val_loss: 0.9896 - val_acc: 0.7257\nEpoch 84/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2807 - acc: 0.9355 - val_loss: 0.9889 - val_acc: 0.7257\nEpoch 85/500\n434/434 [==============================] - 0s 531us/step - loss: 0.2763 - acc: 0.9194 - val_loss: 0.9883 - val_acc: 0.7257\nEpoch 86/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2771 - acc: 0.9286 - val_loss: 0.9878 - val_acc: 0.7314\nEpoch 87/500\n434/434 [==============================] - 0s 548us/step - loss: 0.3047 - acc: 0.9171 - val_loss: 0.9870 - val_acc: 0.7314\nEpoch 88/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2518 - acc: 0.9424 - val_loss: 0.9865 - val_acc: 0.7314\nEpoch 89/500\n434/434 [==============================] - 0s 526us/step - loss: 0.3034 - acc: 0.9171 - val_loss: 0.9858 - val_acc: 0.7314\nEpoch 90/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2645 - acc: 0.9263 - val_loss: 0.9852 - val_acc: 0.7257\nEpoch 91/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2234 - acc: 0.9539 - val_loss: 0.9845 - val_acc: 0.7257\nEpoch 92/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2535 - acc: 0.9171 - val_loss: 0.9841 - val_acc: 0.7257\nEpoch 93/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2795 - acc: 0.9124 - val_loss: 0.9837 - val_acc: 0.7257\nEpoch 94/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2530 - acc: 0.9332 - val_loss: 0.9832 - val_acc: 0.7257\nEpoch 95/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2881 - acc: 0.9194 - val_loss: 0.9826 - val_acc: 0.7257\nEpoch 96/500\n434/434 [==============================] - 0s 516us/step - loss: 0.2421 - acc: 0.9286 - val_loss: 0.9819 - val_acc: 0.7257\nEpoch 97/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2788 - acc: 0.9217 - val_loss: 0.9815 - val_acc: 0.7257\nEpoch 98/500\n434/434 [==============================] - 0s 605us/step - loss: 0.2866 - acc: 0.9378 - val_loss: 0.9811 - val_acc: 0.7257\nEpoch 99/500\n434/434 [==============================] - 0s 533us/step - loss: 0.3125 - acc: 0.9101 - val_loss: 0.9806 - val_acc: 0.7257\nEpoch 100/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2876 - acc: 0.9171 - val_loss: 0.9801 - val_acc: 0.7257\nEpoch 101/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2884 - acc: 0.9286 - val_loss: 0.9796 - val_acc: 0.7257\nEpoch 102/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2395 - acc: 0.9447 - val_loss: 0.9792 - val_acc: 0.7257\nEpoch 103/500\n434/434 [==============================] - 0s 532us/step - loss: 0.3143 - acc: 0.9171 - val_loss: 0.9787 - val_acc: 0.7257\nEpoch 104/500\n434/434 [==============================] - 0s 560us/step - loss: 0.2954 - acc: 0.9171 - val_loss: 0.9784 - val_acc: 0.7257\nEpoch 105/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2750 - acc: 0.9194 - val_loss: 0.9780 - val_acc: 0.7257\nEpoch 106/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2714 - acc: 0.9286 - val_loss: 0.9778 - val_acc: 0.7257\nEpoch 107/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2333 - acc: 0.9401 - val_loss: 0.9774 - val_acc: 0.7257\nEpoch 108/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2622 - acc: 0.9332 - val_loss: 0.9770 - val_acc: 0.7257\nEpoch 109/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2904 - acc: 0.9240 - val_loss: 0.9764 - val_acc: 0.7257\nEpoch 110/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2714 - acc: 0.9194 - val_loss: 0.9760 - val_acc: 0.7257\nEpoch 111/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2587 - acc: 0.9332 - val_loss: 0.9756 - val_acc: 0.7257\nEpoch 112/500\n434/434 [==============================] - 0s 548us/step - loss: 0.3042 - acc: 0.9101 - val_loss: 0.9752 - val_acc: 0.7257\nEpoch 113/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2745 - acc: 0.9101 - val_loss: 0.9748 - val_acc: 0.7257\nEpoch 114/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2487 - acc: 0.9424 - val_loss: 0.9743 - val_acc: 0.7257\nEpoch 115/500\n434/434 [==============================] - 0s 566us/step - loss: 0.3008 - acc: 0.9171 - val_loss: 0.9741 - val_acc: 0.7257\nEpoch 116/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2901 - acc: 0.9240 - val_loss: 0.9739 - val_acc: 0.7257\nEpoch 117/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2504 - acc: 0.9263 - val_loss: 0.9736 - val_acc: 0.7200\nEpoch 118/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2654 - acc: 0.9240 - val_loss: 0.9731 - val_acc: 0.7200\nEpoch 119/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2424 - acc: 0.9401 - val_loss: 0.9727 - val_acc: 0.7200\nEpoch 120/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2759 - acc: 0.9263 - val_loss: 0.9724 - val_acc: 0.7200\nEpoch 121/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2979 - acc: 0.9124 - val_loss: 0.9722 - val_acc: 0.7200\nEpoch 122/500\n434/434 [==============================] - 0s 569us/step - loss: 0.2706 - acc: 0.9286 - val_loss: 0.9720 - val_acc: 0.7200\nEpoch 123/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2986 - acc: 0.9124 - val_loss: 0.9717 - val_acc: 0.7200\nEpoch 124/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2639 - acc: 0.9171 - val_loss: 0.9713 - val_acc: 0.7200\nEpoch 125/500\n434/434 [==============================] - 0s 531us/step - loss: 0.2902 - acc: 0.9171 - val_loss: 0.9710 - val_acc: 0.7200\nEpoch 126/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2615 - acc: 0.9124 - val_loss: 0.9706 - val_acc: 0.7257\nEpoch 127/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2473 - acc: 0.9240 - val_loss: 0.9703 - val_acc: 0.7200\nEpoch 128/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2621 - acc: 0.9147 - val_loss: 0.9700 - val_acc: 0.7257\nEpoch 129/500\n434/434 [==============================] - 0s 573us/step - loss: 0.2786 - acc: 0.9309 - val_loss: 0.9700 - val_acc: 0.7257\nEpoch 130/500\n434/434 [==============================] - 0s 537us/step - loss: 0.3118 - acc: 0.9009 - val_loss: 0.9698 - val_acc: 0.7200\nEpoch 131/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2910 - acc: 0.9355 - val_loss: 0.9695 - val_acc: 0.7200\nEpoch 132/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2822 - acc: 0.9147 - val_loss: 0.9691 - val_acc: 0.7200\nEpoch 133/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2687 - acc: 0.9309 - val_loss: 0.9689 - val_acc: 0.7200\nEpoch 134/500\n434/434 [==============================] - 0s 555us/step - loss: 0.2674 - acc: 0.9171 - val_loss: 0.9686 - val_acc: 0.7200\nEpoch 135/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2409 - acc: 0.9401 - val_loss: 0.9682 - val_acc: 0.7200\nEpoch 136/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2477 - acc: 0.9447 - val_loss: 0.9680 - val_acc: 0.7200\nEpoch 137/500\n434/434 [==============================] - 0s 545us/step - loss: 0.1831 - acc: 0.9562 - val_loss: 0.9676 - val_acc: 0.7200\nEpoch 138/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2424 - acc: 0.9263 - val_loss: 0.9672 - val_acc: 0.7200\nEpoch 139/500\n434/434 [==============================] - 0s 571us/step - loss: 0.2779 - acc: 0.9194 - val_loss: 0.9671 - val_acc: 0.7200\nEpoch 140/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2623 - acc: 0.9286 - val_loss: 0.9668 - val_acc: 0.7200\nEpoch 141/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2566 - acc: 0.9309 - val_loss: 0.9665 - val_acc: 0.7200\nEpoch 142/500\n434/434 [==============================] - 0s 588us/step - loss: 0.2346 - acc: 0.9401 - val_loss: 0.9662 - val_acc: 0.7200\nEpoch 143/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2618 - acc: 0.9332 - val_loss: 0.9659 - val_acc: 0.7200\nEpoch 144/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2825 - acc: 0.9286 - val_loss: 0.9657 - val_acc: 0.7200\nEpoch 145/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2927 - acc: 0.9217 - val_loss: 0.9655 - val_acc: 0.7200\nEpoch 146/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2802 - acc: 0.9124 - val_loss: 0.9650 - val_acc: 0.7200\nEpoch 147/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2304 - acc: 0.9286 - val_loss: 0.9647 - val_acc: 0.7200\nEpoch 148/500\n434/434 [==============================] - 0s 516us/step - loss: 0.2955 - acc: 0.9171 - val_loss: 0.9645 - val_acc: 0.7200\nEpoch 149/500\n434/434 [==============================] - 0s 556us/step - loss: 0.2944 - acc: 0.9171 - val_loss: 0.9644 - val_acc: 0.7200\nEpoch 150/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2996 - acc: 0.9194 - val_loss: 0.9642 - val_acc: 0.7200\nEpoch 151/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2555 - acc: 0.9194 - val_loss: 0.9641 - val_acc: 0.7200\nEpoch 152/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2977 - acc: 0.9124 - val_loss: 0.9639 - val_acc: 0.7200\nEpoch 153/500\n434/434 [==============================] - 0s 550us/step - loss: 0.3124 - acc: 0.9078 - val_loss: 0.9636 - val_acc: 0.7200\nEpoch 154/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2821 - acc: 0.9078 - val_loss: 0.9635 - val_acc: 0.7200\nEpoch 155/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2584 - acc: 0.9378 - val_loss: 0.9632 - val_acc: 0.7200\nEpoch 156/500\n434/434 [==============================] - 0s 529us/step - loss: 0.2966 - acc: 0.9171 - val_loss: 0.9630 - val_acc: 0.7200\nEpoch 157/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2306 - acc: 0.9332 - val_loss: 0.9629 - val_acc: 0.7200\nEpoch 158/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2401 - acc: 0.9424 - val_loss: 0.9628 - val_acc: 0.7200\nEpoch 159/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2629 - acc: 0.9194 - val_loss: 0.9627 - val_acc: 0.7200\nEpoch 160/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2665 - acc: 0.9332 - val_loss: 0.9624 - val_acc: 0.7200\nEpoch 161/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2536 - acc: 0.9447 - val_loss: 0.9622 - val_acc: 0.7200\nEpoch 162/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2433 - acc: 0.9355 - val_loss: 0.9621 - val_acc: 0.7200\nEpoch 163/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2279 - acc: 0.9424 - val_loss: 0.9620 - val_acc: 0.7200\nEpoch 164/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2256 - acc: 0.9309 - val_loss: 0.9620 - val_acc: 0.7200\nEpoch 165/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2891 - acc: 0.9217 - val_loss: 0.9620 - val_acc: 0.7200\nEpoch 166/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2483 - acc: 0.9286 - val_loss: 0.9619 - val_acc: 0.7200\nEpoch 167/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2599 - acc: 0.9240 - val_loss: 0.9617 - val_acc: 0.7200\nEpoch 168/500\n434/434 [==============================] - 0s 533us/step - loss: 0.3139 - acc: 0.9032 - val_loss: 0.9616 - val_acc: 0.7200\nEpoch 169/500\n434/434 [==============================] - 0s 545us/step - loss: 0.1944 - acc: 0.9585 - val_loss: 0.9615 - val_acc: 0.7200\nEpoch 170/500\n434/434 [==============================] - 0s 511us/step - loss: 0.3016 - acc: 0.9171 - val_loss: 0.9612 - val_acc: 0.7200\nEpoch 171/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2655 - acc: 0.9332 - val_loss: 0.9611 - val_acc: 0.7200\nEpoch 172/500\n434/434 [==============================] - 0s 543us/step - loss: 0.3054 - acc: 0.9171 - val_loss: 0.9611 - val_acc: 0.7200\nEpoch 173/500\n434/434 [==============================] - 0s 560us/step - loss: 0.2699 - acc: 0.9171 - val_loss: 0.9611 - val_acc: 0.7200\nEpoch 174/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2958 - acc: 0.9240 - val_loss: 0.9611 - val_acc: 0.7200\nEpoch 175/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2782 - acc: 0.9240 - val_loss: 0.9609 - val_acc: 0.7200\nEpoch 176/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2392 - acc: 0.9401 - val_loss: 0.9607 - val_acc: 0.7200\nEpoch 177/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2455 - acc: 0.9447 - val_loss: 0.9607 - val_acc: 0.7200\nEpoch 178/500\n434/434 [==============================] - 0s 518us/step - loss: 0.2298 - acc: 0.9447 - val_loss: 0.9605 - val_acc: 0.7200\nEpoch 179/500\n434/434 [==============================] - 0s 531us/step - loss: 0.2897 - acc: 0.9194 - val_loss: 0.9603 - val_acc: 0.7200\nEpoch 180/500\n434/434 [==============================] - 0s 524us/step - loss: 0.2925 - acc: 0.9032 - val_loss: 0.9602 - val_acc: 0.7200\nEpoch 181/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2629 - acc: 0.9217 - val_loss: 0.9600 - val_acc: 0.7200\nEpoch 182/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2671 - acc: 0.9355 - val_loss: 0.9600 - val_acc: 0.7200\nEpoch 183/500\n434/434 [==============================] - 0s 562us/step - loss: 0.3315 - acc: 0.8963 - val_loss: 0.9600 - val_acc: 0.7200\nEpoch 184/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2673 - acc: 0.9286 - val_loss: 0.9598 - val_acc: 0.7200\nEpoch 185/500\n434/434 [==============================] - 0s 598us/step - loss: 0.2700 - acc: 0.9263 - val_loss: 0.9598 - val_acc: 0.7200\nEpoch 186/500\n434/434 [==============================] - 0s 565us/step - loss: 0.3059 - acc: 0.9286 - val_loss: 0.9595 - val_acc: 0.7200\nEpoch 187/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2726 - acc: 0.9263 - val_loss: 0.9592 - val_acc: 0.7200\nEpoch 188/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2442 - acc: 0.9470 - val_loss: 0.9591 - val_acc: 0.7200\nEpoch 189/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2599 - acc: 0.9355 - val_loss: 0.9591 - val_acc: 0.7200\nEpoch 190/500\n434/434 [==============================] - 0s 557us/step - loss: 0.3043 - acc: 0.9171 - val_loss: 0.9590 - val_acc: 0.7200\nEpoch 191/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2626 - acc: 0.9424 - val_loss: 0.9589 - val_acc: 0.7200\nEpoch 192/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2545 - acc: 0.9194 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 193/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2785 - acc: 0.9217 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 194/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2439 - acc: 0.9424 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 195/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2604 - acc: 0.9240 - val_loss: 0.9589 - val_acc: 0.7200\nEpoch 196/500\n434/434 [==============================] - 0s 560us/step - loss: 0.2777 - acc: 0.9217 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 197/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2560 - acc: 0.9240 - val_loss: 0.9589 - val_acc: 0.7200\nEpoch 198/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2753 - acc: 0.9240 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 199/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2469 - acc: 0.9240 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 200/500\n434/434 [==============================] - 0s 564us/step - loss: 0.2823 - acc: 0.9217 - val_loss: 0.9589 - val_acc: 0.7200\nEpoch 201/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2171 - acc: 0.9516 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 202/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2820 - acc: 0.9078 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 203/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2411 - acc: 0.9240 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 204/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2596 - acc: 0.9355 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 205/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2646 - acc: 0.9171 - val_loss: 0.9587 - val_acc: 0.7200\nEpoch 206/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2633 - acc: 0.9240 - val_loss: 0.9588 - val_acc: 0.7200\nEpoch 207/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2751 - acc: 0.9263 - val_loss: 0.9586 - val_acc: 0.7200\nEpoch 208/500\n434/434 [==============================] - 0s 574us/step - loss: 0.2823 - acc: 0.9194 - val_loss: 0.9587 - val_acc: 0.7200\nEpoch 209/500\n434/434 [==============================] - 0s 556us/step - loss: 0.2697 - acc: 0.9401 - val_loss: 0.9586 - val_acc: 0.7200\nEpoch 210/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2584 - acc: 0.9286 - val_loss: 0.9585 - val_acc: 0.7200\nEpoch 211/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2644 - acc: 0.9240 - val_loss: 0.9584 - val_acc: 0.7200\nEpoch 212/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2427 - acc: 0.9378 - val_loss: 0.9582 - val_acc: 0.7200\nEpoch 213/500\n434/434 [==============================] - 0s 515us/step - loss: 0.2660 - acc: 0.9217 - val_loss: 0.9582 - val_acc: 0.7200\nEpoch 214/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2739 - acc: 0.9147 - val_loss: 0.9579 - val_acc: 0.7143\nEpoch 215/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2086 - acc: 0.9401 - val_loss: 0.9578 - val_acc: 0.7143\nEpoch 216/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2497 - acc: 0.9401 - val_loss: 0.9575 - val_acc: 0.7143\nEpoch 217/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2659 - acc: 0.9240 - val_loss: 0.9576 - val_acc: 0.7143\nEpoch 218/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2500 - acc: 0.9309 - val_loss: 0.9578 - val_acc: 0.7143\nEpoch 219/500\n434/434 [==============================] - 0s 522us/step - loss: 0.3157 - acc: 0.9217 - val_loss: 0.9579 - val_acc: 0.7143\nEpoch 220/500\n434/434 [==============================] - 0s 569us/step - loss: 0.2340 - acc: 0.9470 - val_loss: 0.9579 - val_acc: 0.7143\nEpoch 221/500\n434/434 [==============================] - 0s 529us/step - loss: 0.2746 - acc: 0.9171 - val_loss: 0.9579 - val_acc: 0.7143\nEpoch 222/500\n434/434 [==============================] - 0s 557us/step - loss: 0.2557 - acc: 0.9263 - val_loss: 0.9579 - val_acc: 0.7143\nEpoch 223/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2555 - acc: 0.9516 - val_loss: 0.9578 - val_acc: 0.7143\nEpoch 224/500\n434/434 [==============================] - 0s 546us/step - loss: 0.3046 - acc: 0.9194 - val_loss: 0.9576 - val_acc: 0.7143\nEpoch 225/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2740 - acc: 0.9171 - val_loss: 0.9577 - val_acc: 0.7143\nEpoch 226/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2205 - acc: 0.9470 - val_loss: 0.9578 - val_acc: 0.7143\nEpoch 227/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2507 - acc: 0.9355 - val_loss: 0.9577 - val_acc: 0.7143\nEpoch 228/500\n434/434 [==============================] - 0s 537us/step - loss: 0.3121 - acc: 0.9124 - val_loss: 0.9577 - val_acc: 0.7143\nEpoch 229/500\n434/434 [==============================] - 0s 592us/step - loss: 0.2449 - acc: 0.9286 - val_loss: 0.9576 - val_acc: 0.7143\nEpoch 230/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2537 - acc: 0.9171 - val_loss: 0.9576 - val_acc: 0.7143\nEpoch 231/500\n434/434 [==============================] - 0s 568us/step - loss: 0.2624 - acc: 0.9171 - val_loss: 0.9575 - val_acc: 0.7143\nEpoch 232/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2664 - acc: 0.9378 - val_loss: 0.9574 - val_acc: 0.7143\nEpoch 233/500\n434/434 [==============================] - 0s 555us/step - loss: 0.2399 - acc: 0.9355 - val_loss: 0.9574 - val_acc: 0.7143\nEpoch 234/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2358 - acc: 0.9355 - val_loss: 0.9573 - val_acc: 0.7143\nEpoch 235/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2500 - acc: 0.9286 - val_loss: 0.9572 - val_acc: 0.7143\nEpoch 236/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2684 - acc: 0.9240 - val_loss: 0.9571 - val_acc: 0.7143\nEpoch 237/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2622 - acc: 0.9378 - val_loss: 0.9569 - val_acc: 0.7143\nEpoch 238/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2709 - acc: 0.9286 - val_loss: 0.9569 - val_acc: 0.7143\nEpoch 239/500\n434/434 [==============================] - 0s 558us/step - loss: 0.2928 - acc: 0.9286 - val_loss: 0.9568 - val_acc: 0.7143\nEpoch 240/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2374 - acc: 0.9493 - val_loss: 0.9567 - val_acc: 0.7143\nEpoch 241/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2895 - acc: 0.9171 - val_loss: 0.9568 - val_acc: 0.7143\nEpoch 242/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2936 - acc: 0.9124 - val_loss: 0.9567 - val_acc: 0.7143\nEpoch 243/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2704 - acc: 0.9147 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 244/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2552 - acc: 0.9309 - val_loss: 0.9563 - val_acc: 0.7143\nEpoch 245/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2362 - acc: 0.9493 - val_loss: 0.9562 - val_acc: 0.7143\nEpoch 246/500\n434/434 [==============================] - 0s 528us/step - loss: 0.2670 - acc: 0.9263 - val_loss: 0.9562 - val_acc: 0.7143\nEpoch 247/500\n434/434 [==============================] - 0s 577us/step - loss: 0.2865 - acc: 0.9332 - val_loss: 0.9561 - val_acc: 0.7143\nEpoch 248/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2150 - acc: 0.9355 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 249/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2254 - acc: 0.9355 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 250/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2870 - acc: 0.8963 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 251/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2296 - acc: 0.9539 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 252/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2784 - acc: 0.9078 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 253/500\n434/434 [==============================] - 0s 600us/step - loss: 0.3133 - acc: 0.9147 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 254/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2883 - acc: 0.9286 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 255/500\n434/434 [==============================] - 0s 577us/step - loss: 0.2751 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 256/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2871 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 257/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2729 - acc: 0.9286 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 258/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2385 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 259/500\n434/434 [==============================] - 0s 574us/step - loss: 0.2572 - acc: 0.9309 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 260/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2592 - acc: 0.9309 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 261/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2480 - acc: 0.9332 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 262/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2521 - acc: 0.9286 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 263/500\n434/434 [==============================] - 0s 561us/step - loss: 0.2635 - acc: 0.9332 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 264/500\n434/434 [==============================] - 0s 522us/step - loss: 0.3052 - acc: 0.9240 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 265/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2736 - acc: 0.9171 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 266/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2495 - acc: 0.9309 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 267/500\n434/434 [==============================] - 0s 612us/step - loss: 0.2883 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 268/500\n434/434 [==============================] - 0s 565us/step - loss: 0.2457 - acc: 0.9378 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 269/500\n434/434 [==============================] - 0s 558us/step - loss: 0.2557 - acc: 0.9332 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 270/500\n434/434 [==============================] - 0s 558us/step - loss: 0.2512 - acc: 0.9378 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 271/500\n434/434 [==============================] - 0s 562us/step - loss: 0.2534 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 272/500\n434/434 [==============================] - 0s 599us/step - loss: 0.2675 - acc: 0.9171 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 273/500\n434/434 [==============================] - 0s 560us/step - loss: 0.2375 - acc: 0.9424 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 274/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2659 - acc: 0.9171 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 275/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2461 - acc: 0.9309 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 276/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2429 - acc: 0.9424 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 277/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2319 - acc: 0.9424 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 278/500\n434/434 [==============================] - 0s 566us/step - loss: 0.2872 - acc: 0.9124 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 279/500\n434/434 [==============================] - 0s 563us/step - loss: 0.2806 - acc: 0.9101 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 280/500\n434/434 [==============================] - 0s 558us/step - loss: 0.2929 - acc: 0.9009 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 281/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2427 - acc: 0.9263 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 282/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2131 - acc: 0.9470 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 283/500\n434/434 [==============================] - 0s 581us/step - loss: 0.2101 - acc: 0.9424 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 284/500\n434/434 [==============================] - 0s 546us/step - loss: 0.3048 - acc: 0.9124 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 285/500\n434/434 [==============================] - 0s 553us/step - loss: 0.2334 - acc: 0.9516 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 286/500\n434/434 [==============================] - 0s 572us/step - loss: 0.2238 - acc: 0.9424 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 287/500\n434/434 [==============================] - 0s 556us/step - loss: 0.2179 - acc: 0.9539 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 288/500\n434/434 [==============================] - 0s 560us/step - loss: 0.2262 - acc: 0.9470 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 289/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2843 - acc: 0.9194 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 290/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2663 - acc: 0.9309 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 291/500\n434/434 [==============================] - 0s 572us/step - loss: 0.2706 - acc: 0.9171 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 292/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2626 - acc: 0.9332 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 293/500\n434/434 [==============================] - 0s 546us/step - loss: 0.2398 - acc: 0.9378 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 294/500\n434/434 [==============================] - 0s 523us/step - loss: 0.2675 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 295/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2618 - acc: 0.9194 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 296/500\n434/434 [==============================] - 0s 518us/step - loss: 0.2953 - acc: 0.9055 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 297/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2392 - acc: 0.9355 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 298/500\n434/434 [==============================] - 0s 514us/step - loss: 0.2489 - acc: 0.9147 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 299/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2797 - acc: 0.9171 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 300/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2547 - acc: 0.9240 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 301/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2585 - acc: 0.9194 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 302/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2555 - acc: 0.9309 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 303/500\n434/434 [==============================] - 0s 552us/step - loss: 0.3311 - acc: 0.9101 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 304/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2739 - acc: 0.9147 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 305/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2773 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 306/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2415 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 307/500\n434/434 [==============================] - 0s 557us/step - loss: 0.2527 - acc: 0.9194 - val_loss: 0.9553 - val_acc: 0.7143\nEpoch 308/500\n434/434 [==============================] - 0s 524us/step - loss: 0.2769 - acc: 0.9355 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 309/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2292 - acc: 0.9493 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 310/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2617 - acc: 0.9147 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 311/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2627 - acc: 0.9309 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 312/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2714 - acc: 0.9217 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 313/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2560 - acc: 0.9263 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 314/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2693 - acc: 0.9194 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 315/500\n434/434 [==============================] - 0s 577us/step - loss: 0.2205 - acc: 0.9401 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 316/500\n434/434 [==============================] - 0s 576us/step - loss: 0.2723 - acc: 0.9240 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 317/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2647 - acc: 0.9309 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 318/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2603 - acc: 0.9309 - val_loss: 0.9548 - val_acc: 0.7143\nEpoch 319/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2491 - acc: 0.9378 - val_loss: 0.9548 - val_acc: 0.7143\nEpoch 320/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2658 - acc: 0.9263 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 321/500\n434/434 [==============================] - 0s 521us/step - loss: 0.2558 - acc: 0.9309 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 322/500\n434/434 [==============================] - 0s 529us/step - loss: 0.2473 - acc: 0.9286 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 323/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2530 - acc: 0.9309 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 324/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2705 - acc: 0.9240 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 325/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2691 - acc: 0.9194 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 326/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2850 - acc: 0.9171 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 327/500\n434/434 [==============================] - 0s 512us/step - loss: 0.3093 - acc: 0.9055 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 328/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2804 - acc: 0.9240 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 329/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2230 - acc: 0.9401 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 330/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2731 - acc: 0.9124 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 331/500\n434/434 [==============================] - 0s 510us/step - loss: 0.2577 - acc: 0.9309 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 332/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2530 - acc: 0.9447 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 333/500\n434/434 [==============================] - 0s 517us/step - loss: 0.2689 - acc: 0.9309 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 334/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2526 - acc: 0.9263 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 335/500\n434/434 [==============================] - 0s 518us/step - loss: 0.2781 - acc: 0.9240 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 336/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2008 - acc: 0.9493 - val_loss: 0.9553 - val_acc: 0.7143\nEpoch 337/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2491 - acc: 0.9263 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 338/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2551 - acc: 0.9378 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 339/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2580 - acc: 0.9217 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 340/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2587 - acc: 0.9309 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 341/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2392 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 342/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2520 - acc: 0.9401 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 343/500\n434/434 [==============================] - 0s 573us/step - loss: 0.2410 - acc: 0.9286 - val_loss: 0.9553 - val_acc: 0.7143\nEpoch 344/500\n434/434 [==============================] - 0s 546us/step - loss: 0.3135 - acc: 0.9009 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 345/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2345 - acc: 0.9355 - val_loss: 0.9553 - val_acc: 0.7143\nEpoch 346/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2648 - acc: 0.9286 - val_loss: 0.9553 - val_acc: 0.7143\nEpoch 347/500\n434/434 [==============================] - 0s 517us/step - loss: 0.2819 - acc: 0.9194 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 348/500\n434/434 [==============================] - 0s 521us/step - loss: 0.2445 - acc: 0.9263 - val_loss: 0.9554 - val_acc: 0.7143\nEpoch 349/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2912 - acc: 0.9147 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 350/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2382 - acc: 0.9470 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 351/500\n434/434 [==============================] - 0s 514us/step - loss: 0.2645 - acc: 0.9263 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 352/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2629 - acc: 0.9378 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 353/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2635 - acc: 0.9286 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 354/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2624 - acc: 0.9286 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 355/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2441 - acc: 0.9470 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 356/500\n434/434 [==============================] - 0s 566us/step - loss: 0.2716 - acc: 0.9263 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 357/500\n434/434 [==============================] - 0s 534us/step - loss: 0.3133 - acc: 0.8871 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 358/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2656 - acc: 0.9171 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 359/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2623 - acc: 0.9309 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 360/500\n434/434 [==============================] - 0s 585us/step - loss: 0.2667 - acc: 0.9309 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 361/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2774 - acc: 0.9240 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 362/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2457 - acc: 0.9378 - val_loss: 0.9551 - val_acc: 0.7200\nEpoch 363/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2724 - acc: 0.9240 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 364/500\n434/434 [==============================] - 0s 530us/step - loss: 0.3136 - acc: 0.9171 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 365/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2480 - acc: 0.9378 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 366/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2370 - acc: 0.9286 - val_loss: 0.9551 - val_acc: 0.7200\nEpoch 367/500\n434/434 [==============================] - 0s 525us/step - loss: 0.2787 - acc: 0.9217 - val_loss: 0.9551 - val_acc: 0.7200\nEpoch 368/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2860 - acc: 0.9355 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 369/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2452 - acc: 0.9263 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 370/500\n434/434 [==============================] - 0s 550us/step - loss: 0.3083 - acc: 0.9194 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 371/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2470 - acc: 0.9263 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 372/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2684 - acc: 0.9424 - val_loss: 0.9555 - val_acc: 0.7200\nEpoch 373/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2923 - acc: 0.9101 - val_loss: 0.9555 - val_acc: 0.7200\nEpoch 374/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2257 - acc: 0.9378 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 375/500\n434/434 [==============================] - 0s 521us/step - loss: 0.2704 - acc: 0.9217 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 376/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2484 - acc: 0.9217 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 377/500\n434/434 [==============================] - 0s 566us/step - loss: 0.2414 - acc: 0.9401 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 378/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2458 - acc: 0.9309 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 379/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2453 - acc: 0.9378 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 380/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2563 - acc: 0.9171 - val_loss: 0.9555 - val_acc: 0.7200\nEpoch 381/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2653 - acc: 0.9263 - val_loss: 0.9558 - val_acc: 0.7200\nEpoch 382/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2542 - acc: 0.9355 - val_loss: 0.9558 - val_acc: 0.7200\nEpoch 383/500\n434/434 [==============================] - 0s 528us/step - loss: 0.2378 - acc: 0.9562 - val_loss: 0.9558 - val_acc: 0.7200\nEpoch 384/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2591 - acc: 0.9286 - val_loss: 0.9558 - val_acc: 0.7200\nEpoch 385/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2574 - acc: 0.9332 - val_loss: 0.9557 - val_acc: 0.7200\nEpoch 386/500\n434/434 [==============================] - 0s 552us/step - loss: 0.2656 - acc: 0.9217 - val_loss: 0.9556 - val_acc: 0.7200\nEpoch 387/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2561 - acc: 0.9217 - val_loss: 0.9557 - val_acc: 0.7200\nEpoch 388/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2466 - acc: 0.9332 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 389/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2331 - acc: 0.9217 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 390/500\n434/434 [==============================] - 0s 573us/step - loss: 0.2602 - acc: 0.9194 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 391/500\n434/434 [==============================] - 0s 567us/step - loss: 0.2400 - acc: 0.9470 - val_loss: 0.9557 - val_acc: 0.7200\nEpoch 392/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2981 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 393/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2449 - acc: 0.9332 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 394/500\n434/434 [==============================] - 0s 557us/step - loss: 0.2622 - acc: 0.9378 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 395/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2535 - acc: 0.9286 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 396/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2235 - acc: 0.9378 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 397/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2337 - acc: 0.9424 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 398/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2434 - acc: 0.9355 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 399/500\n434/434 [==============================] - 0s 526us/step - loss: 0.2356 - acc: 0.9447 - val_loss: 0.9561 - val_acc: 0.7143\nEpoch 400/500\n434/434 [==============================] - 0s 529us/step - loss: 0.2469 - acc: 0.9309 - val_loss: 0.9561 - val_acc: 0.7143\nEpoch 401/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2777 - acc: 0.9217 - val_loss: 0.9562 - val_acc: 0.7143\nEpoch 402/500\n434/434 [==============================] - 0s 531us/step - loss: 0.3257 - acc: 0.9055 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 403/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2276 - acc: 0.9493 - val_loss: 0.9565 - val_acc: 0.7143\nEpoch 404/500\n434/434 [==============================] - 0s 567us/step - loss: 0.2429 - acc: 0.9171 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 405/500\n434/434 [==============================] - 0s 556us/step - loss: 0.2929 - acc: 0.9147 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 406/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2962 - acc: 0.9078 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 407/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2532 - acc: 0.9401 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 408/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2680 - acc: 0.9217 - val_loss: 0.9563 - val_acc: 0.7143\nEpoch 409/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2242 - acc: 0.9332 - val_loss: 0.9563 - val_acc: 0.7143\nEpoch 410/500\n434/434 [==============================] - 0s 540us/step - loss: 0.2553 - acc: 0.9263 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 411/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2352 - acc: 0.9470 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 412/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2534 - acc: 0.9263 - val_loss: 0.9564 - val_acc: 0.7143\nEpoch 413/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2625 - acc: 0.9355 - val_loss: 0.9562 - val_acc: 0.7143\nEpoch 414/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2462 - acc: 0.9194 - val_loss: 0.9562 - val_acc: 0.7143\nEpoch 415/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2534 - acc: 0.9194 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 416/500\n434/434 [==============================] - 0s 564us/step - loss: 0.2526 - acc: 0.9355 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 417/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2658 - acc: 0.9286 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 418/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2685 - acc: 0.9194 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 419/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2777 - acc: 0.9171 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 420/500\n434/434 [==============================] - 0s 565us/step - loss: 0.2718 - acc: 0.9217 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 421/500\n434/434 [==============================] - 0s 549us/step - loss: 0.2492 - acc: 0.9424 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 422/500\n434/434 [==============================] - 0s 534us/step - loss: 0.2870 - acc: 0.9194 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 423/500\n434/434 [==============================] - 0s 531us/step - loss: 0.2863 - acc: 0.9240 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 424/500\n434/434 [==============================] - 0s 534us/step - loss: 0.3100 - acc: 0.9194 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 425/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2675 - acc: 0.9217 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 426/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2635 - acc: 0.9240 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 427/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2474 - acc: 0.9332 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 428/500\n434/434 [==============================] - 0s 524us/step - loss: 0.2884 - acc: 0.9147 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 429/500\n434/434 [==============================] - 0s 518us/step - loss: 0.2547 - acc: 0.9240 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 430/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2398 - acc: 0.9309 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 431/500\n434/434 [==============================] - 0s 522us/step - loss: 0.2875 - acc: 0.9171 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 432/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2523 - acc: 0.9263 - val_loss: 0.9557 - val_acc: 0.7200\nEpoch 433/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2186 - acc: 0.9263 - val_loss: 0.9556 - val_acc: 0.7200\nEpoch 434/500\n434/434 [==============================] - 0s 529us/step - loss: 0.2326 - acc: 0.9355 - val_loss: 0.9556 - val_acc: 0.7143\nEpoch 435/500\n434/434 [==============================] - 0s 544us/step - loss: 0.2243 - acc: 0.9470 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 436/500\n434/434 [==============================] - 0s 547us/step - loss: 0.1967 - acc: 0.9608 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 437/500\n434/434 [==============================] - 0s 537us/step - loss: 0.2522 - acc: 0.9286 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 438/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2371 - acc: 0.9332 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 439/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2378 - acc: 0.9378 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 440/500\n434/434 [==============================] - 0s 555us/step - loss: 0.2658 - acc: 0.9171 - val_loss: 0.9560 - val_acc: 0.7143\nEpoch 441/500\n434/434 [==============================] - 0s 514us/step - loss: 0.2625 - acc: 0.9424 - val_loss: 0.9559 - val_acc: 0.7143\nEpoch 442/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2378 - acc: 0.9493 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 443/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2718 - acc: 0.9194 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 444/500\n434/434 [==============================] - 0s 555us/step - loss: 0.2749 - acc: 0.9217 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 445/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2461 - acc: 0.9286 - val_loss: 0.9558 - val_acc: 0.7143\nEpoch 446/500\n434/434 [==============================] - 0s 551us/step - loss: 0.3036 - acc: 0.9332 - val_loss: 0.9557 - val_acc: 0.7143\nEpoch 447/500\n434/434 [==============================] - 0s 550us/step - loss: 0.2464 - acc: 0.9286 - val_loss: 0.9555 - val_acc: 0.7143\nEpoch 448/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2287 - acc: 0.9355 - val_loss: 0.9555 - val_acc: 0.7200\nEpoch 449/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2470 - acc: 0.9355 - val_loss: 0.9554 - val_acc: 0.7200\nEpoch 450/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2904 - acc: 0.9217 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 451/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2415 - acc: 0.9286 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 452/500\n434/434 [==============================] - 0s 551us/step - loss: 0.2510 - acc: 0.9355 - val_loss: 0.9553 - val_acc: 0.7200\nEpoch 453/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2655 - acc: 0.9286 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 454/500\n434/434 [==============================] - 0s 545us/step - loss: 0.3215 - acc: 0.9147 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 455/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2240 - acc: 0.9424 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 456/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2416 - acc: 0.9401 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 457/500\n434/434 [==============================] - 0s 516us/step - loss: 0.2563 - acc: 0.9447 - val_loss: 0.9552 - val_acc: 0.7200\nEpoch 458/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2734 - acc: 0.9263 - val_loss: 0.9551 - val_acc: 0.7200\nEpoch 459/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2478 - acc: 0.9378 - val_loss: 0.9550 - val_acc: 0.7200\nEpoch 460/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2437 - acc: 0.9286 - val_loss: 0.9549 - val_acc: 0.7200\nEpoch 461/500\n434/434 [==============================] - 0s 531us/step - loss: 0.2541 - acc: 0.9309 - val_loss: 0.9549 - val_acc: 0.7200\nEpoch 462/500\n434/434 [==============================] - 0s 532us/step - loss: 0.2911 - acc: 0.9217 - val_loss: 0.9550 - val_acc: 0.7143\nEpoch 463/500\n434/434 [==============================] - 0s 536us/step - loss: 0.2813 - acc: 0.9101 - val_loss: 0.9550 - val_acc: 0.7200\nEpoch 464/500\n434/434 [==============================] - 0s 542us/step - loss: 0.2881 - acc: 0.9032 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 465/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2924 - acc: 0.9240 - val_loss: 0.9552 - val_acc: 0.7143\nEpoch 466/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2538 - acc: 0.9332 - val_loss: 0.9551 - val_acc: 0.7086\nEpoch 467/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2422 - acc: 0.9309 - val_loss: 0.9551 - val_acc: 0.7143\nEpoch 468/500\n434/434 [==============================] - 0s 517us/step - loss: 0.2731 - acc: 0.9240 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 469/500\n434/434 [==============================] - 0s 523us/step - loss: 0.2509 - acc: 0.9240 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 470/500\n434/434 [==============================] - 0s 555us/step - loss: 0.2469 - acc: 0.9309 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 471/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2152 - acc: 0.9562 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 472/500\n434/434 [==============================] - 0s 538us/step - loss: 0.2654 - acc: 0.9286 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 473/500\n434/434 [==============================] - 0s 510us/step - loss: 0.2738 - acc: 0.9286 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 474/500\n434/434 [==============================] - 0s 559us/step - loss: 0.2773 - acc: 0.9286 - val_loss: 0.9548 - val_acc: 0.7143\nEpoch 475/500\n434/434 [==============================] - 0s 523us/step - loss: 0.2520 - acc: 0.9240 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 476/500\n434/434 [==============================] - 0s 554us/step - loss: 0.2771 - acc: 0.9263 - val_loss: 0.9549 - val_acc: 0.7143\nEpoch 477/500\n434/434 [==============================] - 0s 520us/step - loss: 0.2457 - acc: 0.9171 - val_loss: 0.9550 - val_acc: 0.7086\nEpoch 478/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2702 - acc: 0.9147 - val_loss: 0.9549 - val_acc: 0.7086\nEpoch 479/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2378 - acc: 0.9470 - val_loss: 0.9548 - val_acc: 0.7086\nEpoch 480/500\n434/434 [==============================] - 0s 547us/step - loss: 0.2243 - acc: 0.9516 - val_loss: 0.9548 - val_acc: 0.7086\nEpoch 481/500\n434/434 [==============================] - 0s 541us/step - loss: 0.2582 - acc: 0.9286 - val_loss: 0.9547 - val_acc: 0.7086\nEpoch 482/500\n434/434 [==============================] - 0s 545us/step - loss: 0.2484 - acc: 0.9424 - val_loss: 0.9547 - val_acc: 0.7086\nEpoch 483/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2580 - acc: 0.9447 - val_loss: 0.9548 - val_acc: 0.7086\nEpoch 484/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2994 - acc: 0.9171 - val_loss: 0.9549 - val_acc: 0.7086\nEpoch 485/500\n434/434 [==============================] - 0s 539us/step - loss: 0.2650 - acc: 0.9217 - val_loss: 0.9549 - val_acc: 0.7086\nEpoch 486/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2872 - acc: 0.9171 - val_loss: 0.9549 - val_acc: 0.7086\nEpoch 487/500\n434/434 [==============================] - 0s 535us/step - loss: 0.2336 - acc: 0.9355 - val_loss: 0.9547 - val_acc: 0.7086\nEpoch 488/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2566 - acc: 0.9263 - val_loss: 0.9547 - val_acc: 0.7143\nEpoch 489/500\n434/434 [==============================] - 0s 527us/step - loss: 0.2815 - acc: 0.9240 - val_loss: 0.9546 - val_acc: 0.7143\nEpoch 490/500\n434/434 [==============================] - 0s 530us/step - loss: 0.2594 - acc: 0.9240 - val_loss: 0.9546 - val_acc: 0.7143\nEpoch 491/500\n434/434 [==============================] - 0s 517us/step - loss: 0.2239 - acc: 0.9424 - val_loss: 0.9545 - val_acc: 0.7143\nEpoch 492/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2793 - acc: 0.9263 - val_loss: 0.9543 - val_acc: 0.7143\nEpoch 493/500\n434/434 [==============================] - 0s 519us/step - loss: 0.2419 - acc: 0.9332 - val_loss: 0.9543 - val_acc: 0.7143\nEpoch 494/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2224 - acc: 0.9401 - val_loss: 0.9543 - val_acc: 0.7143\nEpoch 495/500\n434/434 [==============================] - 0s 523us/step - loss: 0.2546 - acc: 0.9332 - val_loss: 0.9542 - val_acc: 0.7143\nEpoch 496/500\n434/434 [==============================] - 0s 548us/step - loss: 0.2818 - acc: 0.9171 - val_loss: 0.9541 - val_acc: 0.7143\nEpoch 497/500\n434/434 [==============================] - 0s 543us/step - loss: 0.2898 - acc: 0.9240 - val_loss: 0.9543 - val_acc: 0.7143\nEpoch 498/500\n434/434 [==============================] - 0s 556us/step - loss: 0.2428 - acc: 0.9401 - val_loss: 0.9541 - val_acc: 0.7143\nEpoch 499/500\n434/434 [==============================] - 0s 528us/step - loss: 0.2351 - acc: 0.9332 - val_loss: 0.9540 - val_acc: 0.7143\nEpoch 500/500\n434/434 [==============================] - 0s 533us/step - loss: 0.2645 - acc: 0.9147 - val_loss: 0.9539 - val_acc: 0.7143\n"
],
[
"# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Calculate average ",
"_____no_output_____"
]
],
[
[
"(0.63+0.66+0.68)/3",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0ec1496beb81a426bf943aa5ecc7371c40a0beb | 134,109 | ipynb | Jupyter Notebook | Face_Rec_System.ipynb | zeynepCankara/Face-Recognition-Tensorflow | eefef4eca3ae85ed85bd29161d1fba3b24ad43de | [
"MIT"
] | 6 | 2019-02-26T14:54:03.000Z | 2022-02-12T17:06:34.000Z | Face_Rec_System.ipynb | zeynepCankara/Face-Recognition-Tensorflow | eefef4eca3ae85ed85bd29161d1fba3b24ad43de | [
"MIT"
] | null | null | null | Face_Rec_System.ipynb | zeynepCankara/Face-Recognition-Tensorflow | eefef4eca3ae85ed85bd29161d1fba3b24ad43de | [
"MIT"
] | 3 | 2019-09-11T09:58:53.000Z | 2022-01-17T14:23:54.000Z | 211.195276 | 111,396 | 0.896584 | [
[
[
"# Face Recognition & Verification for Person Identification\n\nInspired by Coursera deeplearning.ai's assignment of programming a face recognition for happy house, I wanted to give it a try implementing a face recognition system by using face detection library(https://github.com/ageitgey/face_recognition) and face_recognition model from deeplearning.ai course specialization. \n\nIn this notebook, I implemented a person identification system by using pre-trained model to map face images into 128 dimensional encodings.\n\n\nIn the notebook,\n- I tried to implement pre-processing process for the images by using face detection library\n- Kept track of encodings of a person and try to improve performance by adding more pictures of a person (more embeddings of the same person)\n- Detect and identify people given an specific image\n- Implement triple loss function\n- Implement face verification and face recognition step\n- I save unknown encodings in the database dictionary for later identification\n",
"_____no_output_____"
]
],
[
[
"#import the necessary packages\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.core import Lambda, Flatten, Dense\nfrom keras.initializers import glorot_uniform\nfrom keras.engine.topology import Layer\nfrom keras import backend as K\nK.set_image_data_format('channels_first')\nimport cv2\nimport os\nimport numpy as np\nfrom numpy import genfromtxt\nimport pandas as pd\nimport tensorflow as tf\nfrom fr_utils import *\nfrom inception_blocks import *\nimport matplotlib.pyplot as plt\nimport face_recognition\nfrom PIL import Image\n\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nnp.set_printoptions(threshold=np.nan)",
"Using TensorFlow backend.\n"
],
[
"# Initialize the model\n# The model takes images with shape (3, 96, 96) 'channels first' \nFRmodel = faceRecoModel(input_shape=(3, 96, 96))",
"_____no_output_____"
],
[
"#Showing the architecture of the model\nFRmodel.summary()",
"_____no_output_____"
],
[
"# using triplets of images, for triplet loss function\n# anchor (A): picture of the person\n# positive (P): picture of the same person of the anchor image\n# negative (N): picture of a different person than the anchor image(person)\n# Goal: Individual's encoding should be closer to the positive image and further away from negative image by margin alpha\n\ndef triplet_loss(y_true, y_pred, alpha = 0.2):\n \"\"\"\n Implementation of the triplet loss\n \n Arguments:\n y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.\n y_pred -- python list containing three objects:\n anchor -- the encodings for the anchor images, of shape (None, 128)\n positive -- the encodings for the positive images, of shape (None, 128)\n negative -- the encodings for the negative images, of shape (None, 128)\n \n Returns:\n loss -- real number, value of the loss\n \"\"\"\n \n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\n \n # (encoding) distance between the anchor and the positive\n pos_dist = tf.square(tf.subtract(anchor, positive)) \n # (encoding) distance between the anchor and the negative\n neg_dist = tf.square(tf.subtract(anchor, negative)) \n # Subtracting the two previous distances and adding an alpha.\n basic_loss = tf.add(tf.reduce_sum(tf.subtract(pos_dist, neg_dist)), alpha)\n # Taking the maximum of basic_loss and 0.0. Summing over the training examples.\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0))\n \n return loss",
"_____no_output_____"
],
[
"# Compile the model\nFRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\nload_weights_from_FaceNet(FRmodel)",
"_____no_output_____"
],
[
"#Function for resizing an image\ndef pre_process_image(img, image_size):\n \"\"\"\n Resizes an image into given image_size (height, width, channel)\n \n Arguments:\n img -- original image, array\n image_size -- tuple containing width, height, channel of the image (h, w, c)\n \n Returns:\n img -- resized image\n \"\"\"\n height, width, channels = image_size\n img = cv2.resize(img, dsize=(height, width))\n return img",
"_____no_output_____"
],
[
"# Function for identifying face locations on an image\n\ndef find_face_locations(image_path):\n \"\"\"\n returns the bounding box locations of the faces, image from the path\n \n Arguments:\n image_path -- destination of the original image\n image_size -- tuple containing width and height of the image (h, w)\n \n Returns:\n (top, right, bottom, left), image -- bounding box\n if multiple faces present in the picture returns a list of tuples, \n image obtained from image_path\n \"\"\"\n \n # Use face recognition module to detect faces\n image = face_recognition.load_image_file(image_path)\n \n #Test: print(\"Shape of the image: \" + str(image.shape))\n \n face_locations = face_recognition.face_locations(image)\n for face_location in face_locations:\n\n # Print the location of each face in this image\n top, right, bottom, left = face_location\n print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\n return face_locations, image\n\n # access the actual face itself and print\n #face_image = image[top:bottom, left:right]\n #pil_image = Image.fromarray(face_image)\n #pil_image.show()",
"_____no_output_____"
]
],
[
[
"## Image to Embedding\n\n`face_img_to_encoding(image_path, model)` : basically runs the forward propagation of the model on the specified image.",
"_____no_output_____"
]
],
[
[
"def face_img_to_encoding(image_path, model):\n \"\"\"\n returns the embedding vector of the specific image from the path\n \n Arguments:\n image_path -- Destination of the original image\n model -- Inception model instance in Keras\n \n Returns:\n embeddings -- List containing embeddings of the people in the image\n \"\"\"\n \n # obtain the face locations and the image\n face_locations, image = find_face_locations(image_path)\n \n #initialize the embeddings list\n embeddings = []\n \n #initialize embeddings list\n for face_location in face_locations:\n\n # Print the location of each face in this image\n top, right, bottom, left = face_location\n\n # access the actual face itself\n face_image = image[top:bottom, left:right]\n \n # resize the cropped face image\n image_size = (96, 96, 3)\n img = pre_process_image(face_image, image_size)\n \n # pre-process the face image\n img = img[...,::-1]\n img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)\n x_train = np.array([img])\n embedding = model.predict_on_batch(x_train)\n embeddings.append(embedding)\n \n return embeddings",
"_____no_output_____"
]
],
[
[
"## Create the Database",
"_____no_output_____"
]
],
[
[
"# Create a initial database for identifying people\ndatabase = {}\ndatabase[\"leonardo dicaprio\"] = face_img_to_encoding(\"my_images/dicaprio.jpg\", FRmodel)\ndatabase[\"brad pitt\"] = face_img_to_encoding(\"my_images/bradPitt1.jpg\", FRmodel)\ndatabase[\"matt damon\"] = face_img_to_encoding(\"my_images/mattDamon.jpg\", FRmodel)\ndatabase[\"unknown\"] = face_img_to_encoding(\"my_images/unknown.jpg\", FRmodel)",
"A face is located at pixel location Top: 142, Left: 82, Bottom: 409, Right: 349\nA face is located at pixel location Top: 66, Left: 56, Bottom: 156, Right: 145\nA face is located at pixel location Top: 167, Left: 241, Bottom: 390, Right: 464\nA face is located at pixel location Top: 56, Left: 91, Bottom: 163, Right: 199\n"
],
[
"# Test for face_img_to_encoding\nembedding = face_img_to_encoding(\"my_images/dicaprio.jpg\", FRmodel)\nimg = cv2.imread(\"my_images/dicaprio.jpg\")\n#Visualize the image\nplt.imshow(img)\n#Visualize the embedding\nprint(embedding)",
"A face is located at pixel location Top: 142, Left: 82, Bottom: 409, Right: 349\n[array([[ 1.78886820e-02, 5.65266944e-02, 2.81436834e-02,\n -3.98353711e-02, -4.05346490e-02, 2.42677256e-02,\n -1.41860060e-02, 1.34195894e-01, 1.05123438e-01,\n 8.81131622e-04, 1.01466812e-01, -8.59745964e-02,\n 3.61334421e-02, 1.36936437e-02, 1.07706212e-01,\n -9.76482034e-02, 1.09040879e-01, -3.92250493e-02,\n -1.59164774e-03, 8.56322497e-02, -6.27493719e-03,\n -7.53134266e-02, 4.57189083e-02, 1.09545767e-01,\n 1.58418100e-02, -6.24672361e-02, 7.43721724e-02,\n -4.84097283e-03, 8.69202614e-02, -1.08194493e-01,\n 1.53262347e-01, -1.19411446e-01, 1.46491051e-01,\n 6.78103715e-02, 1.05306681e-03, 6.50675818e-02,\n 1.28551364e-01, 2.16996863e-01, -6.92940950e-02,\n -1.01205513e-01, 2.92131654e-03, -3.01704537e-02,\n -7.62767419e-02, -1.59392293e-04, -5.51992096e-02,\n -7.61034936e-02, -2.71737501e-02, 3.36873084e-02,\n -5.50678819e-02, 1.64355487e-02, -2.01823935e-01,\n 2.80639101e-02, 2.62900982e-02, -5.68179227e-03,\n 7.80988671e-03, 4.80307676e-02, -1.23724811e-01,\n 2.57097576e-02, -9.84600559e-02, -1.50076762e-01,\n -1.83070391e-01, -7.95650557e-02, 9.46223363e-02,\n -1.57554477e-01, 8.20735283e-03, 1.56251803e-01,\n 4.62346263e-02, 4.09039203e-03, -1.08973466e-01,\n -1.49349812e-02, -3.98177989e-02, -1.01187043e-01,\n -1.19685106e-01, -2.06102151e-02, 1.08234286e-01,\n -1.04007803e-01, 6.58156872e-02, 8.16550404e-02,\n 9.94403884e-02, -3.92596098e-03, -2.03501552e-01,\n -2.72905082e-02, -4.40525971e-02, -8.73954594e-02,\n 1.25980958e-01, -5.50476387e-02, -4.90165763e-02,\n 7.92811736e-02, 1.47447377e-01, 7.64459521e-02,\n 1.38689131e-01, -1.13438167e-01, 4.61512730e-02,\n 2.68166736e-02, 1.10981353e-01, -9.36262831e-02,\n -2.34077014e-02, 1.31480377e-02, -4.84763570e-02,\n -3.79955210e-02, -5.08448854e-02, -4.28983606e-02,\n -6.73183352e-02, -3.99561785e-02, -5.91101646e-02,\n 1.40449509e-01, 7.40163699e-02, -1.36735305e-01,\n 1.53942287e-01, -1.40752524e-01, -3.72800492e-02,\n 1.12037450e-01, -4.00800118e-03, 6.33173957e-02,\n 1.35813758e-01, -5.72601743e-02, -1.13909235e-02,\n -1.52757317e-01, 1.16520829e-01, 1.04254454e-01,\n 1.83233805e-02, -2.00256743e-02, -4.75163721e-02,\n 1.59628108e-01, 5.24705462e-02, -2.48438977e-02,\n 1.44212216e-01, -3.13714966e-02]], dtype=float32)]\n"
]
],
[
[
"## Face Verification\n\nFace Verification is a 1:1 matching problem given identity of a person program identifies if the picture of a person matches with identity\n\n- verify() function below implements simple face-verification functionality",
"_____no_output_____"
]
],
[
[
"def verify(image_path, identity, database, model):\n \"\"\"\n Function that verifies if the person on the \"image_path\" image is \"identity\".\n \n Arguments:\n image_path -- path to an image\n identity -- string, name of the person.\n database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).\n model -- Inception model instance in Keras\n \n Returns:\n dist -- distance between the image_path and the image of \"identity\" in the database.\n match -- True, if person(embedding) matches with the identity(embedding) .\n \"\"\"\n \n # Encodings in the image. \n encodings = face_img_to_encoding(image_path, FRmodel)\n \n #Loop inside encodings to obtain encoding of each person\n for encoding in encodings:\n # Step 2: Compute distance with identity's image \n dist = np.linalg.norm(encoding - database[identity])\n \n # Step 3: Match if dist < 0.8\n if dist < 0.8:\n print(str(identity) + \", you are verified\")\n match = True\n else:\n print(\"You're not \" + str(identity) + \"!!!\")\n match = False\n \n return dist, match",
"_____no_output_____"
]
],
[
[
"## Let's see if we can verify Matt Damon",
"_____no_output_____"
]
],
[
[
"verify(\"my_images/dicaprio.jpg\", \"matt damon\", database, FRmodel)",
"A face is located at pixel location Top: 142, Left: 82, Bottom: 409, Right: 349\nYou're not matt damon!!!\n"
],
[
"verify(\"my_images/mattDamon1.jpg\", \"matt damon\", database, FRmodel)",
"A face is located at pixel location Top: 98, Left: 98, Bottom: 253, Right: 253\nmatt damon, you are verified\n"
]
],
[
[
"## Face Recognition\n\nIdentifies the person withou needing to provide an identity. This is a 1:K matching problem. \n\nSteps:\n1. Compute the target encoding of the image from image_path\n2. Find the encoding from the database that has smallest distance with the target encoding. ",
"_____no_output_____"
]
],
[
[
"def recognize(image_path, database, model):\n \"\"\"\n Implements face recognition by finding who is the person on the image_path image.\n \n Arguments:\n image_path -- path to an image\n database -- database containing image encodings along with the name of the person on the image\n model -- Inception model instance in Keras\n \n Returns:\n identities -- list, containing names of the predicted people on the image_path image\n \"\"\"\n \n ## Step 1: Compute the encodings\n encodings = face_img_to_encoding(image_path, model)\n \n # Initialize the lists for keeping track of people in the picture\n identities = []\n unknown_encodings = []\n \n # Loop over person encodings in the specific image\n for encoding in encodings:\n \n ## Step 2: Find the closest encoding ##\n \n # Initializing \"min_dist\" to a large value, say 100 \n min_dist = 100\n \n # Loop over the database dictionary's names and encodings.\n for (name, db_encodings) in database.items():\n \n for db_enc in db_encodings:\n \n # Compute L2 distance between the target \"encoding\" and the current \"emb\" from the database. \n dist = np.linalg.norm(encoding - db_enc)\n\n # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. \n if dist < min_dist:\n min_dist = dist\n identity = name\n \n if min_dist > 0.8:\n print(\"Not in the database.\")\n #Add the encoding in the database for unknown encodings\n unknown_encodings.append(encoding)\n \n else:\n if identity not in identities and identity != \"unknown\":\n print (\"You're \" + str(identity) + \", the distance is \" + str(min_dist))\n #Add the encoding to the known person's encoding list so that model can become more robust.\n identities.append(identity)\n face_encodings = database[str(identity)]\n face_encodings.append(encoding)\n database[str(identity)] = face_encodings\n \n for encoding in unknown_encodings:\n unknown = database[\"unknown\"]\n unknown.append(encoding)\n database[\"unknown\"] = unknown\n \n return identities",
"_____no_output_____"
]
],
[
[
"## Let's see if the database can recognize unseen picture of Matt Damon",
"_____no_output_____"
]
],
[
[
"recognize(\"my_images/mattDamon1.jpg\", database, FRmodel)",
"A face is located at pixel location Top: 98, Left: 98, Bottom: 253, Right: 253\nNot in the database.\nNot in the database.\nYou're matt damon, the distance is 0.74256897\n"
]
],
[
[
"### End of The Recognition & Verification, Congratulations\n\nKeep Learning...",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0ec1e6ce1eba9ee7247e7b7de27155755606c7a | 3,921 | ipynb | Jupyter Notebook | Practice/04-15.ipynb | GD-park/python_basic | d585a96f1510681937c3e9c9e3b3fccdab2bd7fb | [
"MIT"
] | null | null | null | Practice/04-15.ipynb | GD-park/python_basic | d585a96f1510681937c3e9c9e3b3fccdab2bd7fb | [
"MIT"
] | null | null | null | Practice/04-15.ipynb | GD-park/python_basic | d585a96f1510681937c3e9c9e3b3fccdab2bd7fb | [
"MIT"
] | null | null | null | 23.620482 | 98 | 0.470543 | [
[
[
"def binary_search(nums, x):\n s = 0\n e = len(nums) - 1\n \n while s <= e:\n mid = (s + e) / 2\n if nums[mid] == x:\n return mid\n elif nums[mid] > x:\n e = mid - 1\n else:\n s = mid + 1\n # 찾고자 하는 수가 구간안에 없으면 mid값보다 한칸 더 큰값으로 옮겨줌\n \n return -1\n # 찾고자 하는 수가 list에 없으면 -1값으로 변환\n\nnums = [1, 2, 4, 5, 7, 8, 10, 22, 34, 56, 89, 100]\nprint binary_search(nums, 8)\nprint binary_search(nums, 1)\nprint binary_search(nums, 89)\nprint binary_search(nums, 88)\nprint binary_search(nums, 100)\nprint binary_search(nums, 101)",
"_____no_output_____"
],
[
"def factorial(n):\n mul = 1\n for i in range(2, n+1):\n mul *= i\n \n return mul\n\n\n# assert는 True가 전달되면 아무런 동작을 하지 않고, False가 전달되면 예외 발생\nassert(factorial(5) == 120)\nassert(factorial(4) == 24)\nassert(factorial(1) == 1)",
"_____no_output_____"
],
[
"def recursive_fibonacci(n):\n if n == 1 or n == 2:\n return 1\n \n return recursive_fibonacci(n-1) + recursive_fibonacci(n-2)\n\nprint recursive_fibonacci(15)\n%timeit recursive_fibonacci(20)",
"_____no_output_____"
],
[
"fib_cache = {}\n\ndef recursive_fibonacci_memo(n):\n if n in fib_cache:\n return fib_cache[n]\n else:\n if n == 1 or n == 2:\n fib_cache[n] = 1\n else:\n fib_cache[n] = recursive_fibonacci_memo(n-2) + recursive_fibonacci_memo(n-1)\n \n return fib_cache[n]\n\nprint recursive_fibonacci_memo(15)\n%timeit recursive_fibonacci_memo(20)",
"_____no_output_____"
],
[
"def square(x): \n return x ** 2\n\nlambda x : x**2 #위에 코드를 더 간결하게 1줄로 줄일 수 있는 함수\n\nsquare2 = lambda x : x**2\n\nprint square\nprint square2\n\nprint square(4), square2(4)",
"_____no_output_____"
],
[
"nums = [(1, 2), (9, 5), (8, 4), (7, 6), (10, 2), (4, 5)]\n\ndef get_key(item):\n return item[1]\n\nprint sorted(nums)\nprint sorted(nums, key = get_key)\nprint sorted(nums, key = lambda item : item[1])\nprint sorted(nums, key = lambda item : item[0])\nprint sorted(nums, key = lambda item : item[0], reverse = True) #내림차순으로 정렬할 때",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec1fcf8ba956449247bd3d924f979e96bbb36b | 27,941 | ipynb | Jupyter Notebook | bmcCompletedTutorials/tutorials/keras/basic_text_classification.ipynb | meisben/docs | 3e29793bf7d6ad8b474739a17a85872f6ebdd244 | [
"Apache-2.0"
] | null | null | null | bmcCompletedTutorials/tutorials/keras/basic_text_classification.ipynb | meisben/docs | 3e29793bf7d6ad8b474739a17a85872f6ebdd244 | [
"Apache-2.0"
] | null | null | null | bmcCompletedTutorials/tutorials/keras/basic_text_classification.ipynb | meisben/docs | 3e29793bf7d6ad8b474739a17a85872f6ebdd244 | [
"Apache-2.0"
] | null | null | null | 35.684547 | 454 | 0.545184 | [
[
[
"<a href=\"https://colab.research.google.com/github/meisben/docs/blob/master/bmcCompletedTutorials/tutorials/keras/basic_text_classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"##### Copyright 2018 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
],
[
"#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.",
"_____no_output_____"
]
],
[
[
"# Text classification with movie reviews",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/keras/basic_text_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"\nThis notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem.\n\nWe'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews.\n\nThis notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).",
"_____no_output_____"
]
],
[
[
"# keras.datasets.imdb is broken in 1.13 and 1.14, by np 1.16.3\n!pip install tf_nightly",
"_____no_output_____"
],
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\n\nprint(tf.__version__)",
"_____no_output_____"
]
],
[
[
"## Download the IMDB dataset\n\nThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.\n\nThe following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):",
"_____no_output_____"
]
],
[
[
"imdb = keras.datasets.imdb\n\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)",
"_____no_output_____"
]
],
[
[
"The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable.",
"_____no_output_____"
],
[
"## Explore the data\n\nLet's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.",
"_____no_output_____"
]
],
[
[
"print(\"Training entries: {}, labels: {}\".format(len(train_data), len(train_labels)))",
"_____no_output_____"
]
],
[
[
"The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:",
"_____no_output_____"
]
],
[
[
"print(train_data[0])",
"_____no_output_____"
]
],
[
[
"Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.",
"_____no_output_____"
]
],
[
[
"len(train_data[0]), len(train_data[1])",
"_____no_output_____"
],
[
"dir(imdb)\nimdb.get_word_index?",
"_____no_output_____"
]
],
[
[
"### Convert the integers back to words\n\nIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:",
"_____no_output_____"
]
],
[
[
"# A dictionary mapping words to an integer index\nword_index = imdb.get_word_index()\n\n# The first indices are reserved\nword_index = {k:(v+3) for k,v in word_index.items()}\nword_index[\"<PAD>\"] = 0\nword_index[\"<START>\"] = 1\nword_index[\"<UNK>\"] = 2 # unknown\nword_index[\"<UNUSED>\"] = 3\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])",
"_____no_output_____"
]
],
[
[
"Let's have a look at the propterties of the word_index dictionary\n\n",
"_____no_output_____"
]
],
[
[
"#imdb.load_data?\nprint(type(word_index))\nprint(len(word_index))\n#print(word_index)\nprint(decode_review([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]))\nprint(decode_review([10003,1002,10001,10000]))",
"_____no_output_____"
]
],
[
[
"Now we can use the `decode_review` function to display the text for the first review:",
"_____no_output_____"
]
],
[
[
"decode_review(train_data[0])",
"_____no_output_____"
]
],
[
[
"## Prepare the data\n\nThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:\n\n* Convert the arrays into vectors of 0s and 1s indicating word occurrence, similar to a one-hot encoding. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.\n\n* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `max_length * num_reviews`. We can use an embedding layer capable of handling this shape as the first layer in our network.\n\nIn this tutorial, we will use the second approach.\n\nSince the movie reviews must be the same length, we will use the [pad_sequences](https://keras.io/preprocessing/sequence/#pad_sequences) function to standardize the lengths:",
"_____no_output_____"
]
],
[
[
"train_data = keras.preprocessing.sequence.pad_sequences(train_data,\n value=word_index[\"<PAD>\"],\n padding='post',\n maxlen=256)\n\ntest_data = keras.preprocessing.sequence.pad_sequences(test_data,\n value=word_index[\"<PAD>\"],\n padding='post',\n maxlen=256)",
"_____no_output_____"
]
],
[
[
"Let's look at the length of the examples now:",
"_____no_output_____"
]
],
[
[
"len(train_data[0]), len(train_data[1])",
"_____no_output_____"
]
],
[
[
"And inspect the (now padded) first review:",
"_____no_output_____"
]
],
[
[
"print(train_data[0])",
"_____no_output_____"
]
],
[
[
"## Build the model\n\nThe neural network is created by stacking layers—this requires two main architectural decisions:\n\n* How many layers to use in the model?\n* How many *hidden units* to use for each layer?\n\nIn this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:",
"_____no_output_____"
]
],
[
[
"# input shape is the vocabulary count used for the movie reviews (10,000 words)\nvocab_size = 10000\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(vocab_size, 16))\nmodel.add(keras.layers.GlobalAveragePooling1D())\nmodel.add(keras.layers.Dense(16, activation=tf.nn.relu))\nmodel.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\n\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"The layers are stacked sequentially to build the classifier:\n\n1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.\n2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible.\n3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.\n4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level.",
"_____no_output_____"
],
[
"### Hidden units\n\nThe above model has two intermediate or \"hidden\" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.\n\nIf a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later.",
"_____no_output_____"
],
[
"### Loss function and optimizer\n\nA model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function.\n\nThis isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the \"distance\" between probability distributions, or in our case, between the ground-truth distribution and the predictions.\n\nLater, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.\n\nNow, configure the model to use an optimizer and a loss function:",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc'])",
"_____no_output_____"
]
],
[
[
"## Create a validation set\n\nWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).",
"_____no_output_____"
]
],
[
[
"x_val = train_data[:10000]\npartial_x_train = train_data[10000:]\n\ny_val = train_labels[:10000]\npartial_y_train = train_labels[10000:]",
"_____no_output_____"
]
],
[
[
"## Train the model\n\nTrain the model for 40 epochs in mini-batches of 512 samples. This is 40 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:",
"_____no_output_____"
]
],
[
[
"history = model.fit(partial_x_train,\n partial_y_train,\n epochs=40,\n batch_size=512,\n validation_data=(x_val, y_val),\n verbose=1)",
"_____no_output_____"
]
],
[
[
"## Evaluate the model\n\nAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.",
"_____no_output_____"
]
],
[
[
"results = model.evaluate(test_data, test_labels)\n\nprint(results)",
"_____no_output_____"
]
],
[
[
"This fairly naive approach achieves an accuracy of about 87%. With more advanced approaches, the model should get closer to 95%.",
"_____no_output_____"
],
[
"## Create a graph of accuracy and loss over time\n\n`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:",
"_____no_output_____"
]
],
[
[
"history_dict = history.history\nhistory_dict.keys()",
"_____no_output_____"
]
],
[
[
"There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nacc = history_dict['acc']\nval_acc = history_dict['val_acc']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# \"bo\" is for \"blue dot\"\nplt.plot(epochs, loss, 'bo', label='Training loss')\n# b is for \"solid blue line\"\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.clf() # clear figure\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"\nIn this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy.\n\nNotice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration.\n\nThis isn't the case for the validation loss and accuracy—they seem to peak after about twenty epochs. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data.\n\nFor this particular case, we could prevent overfitting by simply stopping the training after twenty or so epochs. Later, you'll see how to do this automatically with a callback.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d0ec2b28b6341966df27cc5da56bb8afba2479a4 | 18,046 | ipynb | Jupyter Notebook | test1.ipynb | Abhishekbhagwat/scalable-ml-workshop | f7fff04451ce6ee55a8b0c4e581a2b6566a929c0 | [
"MIT"
] | null | null | null | test1.ipynb | Abhishekbhagwat/scalable-ml-workshop | f7fff04451ce6ee55a8b0c4e581a2b6566a929c0 | [
"MIT"
] | null | null | null | test1.ipynb | Abhishekbhagwat/scalable-ml-workshop | f7fff04451ce6ee55a8b0c4e581a2b6566a929c0 | [
"MIT"
] | 1 | 2019-10-12T06:56:55.000Z | 2019-10-12T06:56:55.000Z | 9,023 | 18,045 | 0.636318 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0ec3b88423497fecac5bdb68d09b3856ccca642 | 18,594 | ipynb | Jupyter Notebook | notebooks/01_Data_Fetching/06 - Hydro Features for Training Tiles.ipynb | valentina-s/stand_mapping | 86a0c7a64064cc60c070611ced210bbbfbb2d270 | [
"BSD-3-Clause"
] | null | null | null | notebooks/01_Data_Fetching/06 - Hydro Features for Training Tiles.ipynb | valentina-s/stand_mapping | 86a0c7a64064cc60c070611ced210bbbfbb2d270 | [
"BSD-3-Clause"
] | 13 | 2020-04-14T23:57:49.000Z | 2021-02-28T02:17:24.000Z | notebooks/01_Data_Fetching/06 - Hydro Features for Training Tiles.ipynb | valentina-s/stand_mapping | 86a0c7a64064cc60c070611ced210bbbfbb2d270 | [
"BSD-3-Clause"
] | 4 | 2020-04-12T03:07:07.000Z | 2021-02-04T19:58:43.000Z | 18,594 | 18,594 | 0.604066 | [
[
[
"# Fetching Hydrology Data\nWe have prepared shapefiles containing the USGS quarter quadrangles that have good coverage of forest stand delineations that we want to grab other data for.",
"_____no_output_____"
],
[
"# Mount Google Drive \nSo we can access our files showing tile locations, and save the rasters we will generate from the elevation data.",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive', force_remount=True)",
"Mounted at /content/drive\n"
],
[
"! sudo apt-get install -y libspatialindex-dev",
"Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n libspatialindex-c4v5 libspatialindex4v5\nThe following NEW packages will be installed:\n libspatialindex-c4v5 libspatialindex-dev libspatialindex4v5\n0 upgraded, 3 newly installed, 0 to remove and 21 not upgraded.\nNeed to get 555 kB of archives.\nAfter this operation, 3,308 kB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libspatialindex4v5 amd64 1.8.5-5 [219 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libspatialindex-c4v5 amd64 1.8.5-5 [51.7 kB]\nGet:3 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libspatialindex-dev amd64 1.8.5-5 [285 kB]\nFetched 555 kB in 1s (702 kB/s)\ndebconf: unable to initialize frontend: Dialog\ndebconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 3.)\ndebconf: falling back to frontend: Readline\ndebconf: unable to initialize frontend: Readline\ndebconf: (This frontend requires a controlling tty.)\ndebconf: falling back to frontend: Teletype\ndpkg-preconfigure: unable to re-open stdin: \nSelecting previously unselected package libspatialindex4v5:amd64.\n(Reading database ... 144611 files and directories currently installed.)\nPreparing to unpack .../libspatialindex4v5_1.8.5-5_amd64.deb ...\nUnpacking libspatialindex4v5:amd64 (1.8.5-5) ...\nSelecting previously unselected package libspatialindex-c4v5:amd64.\nPreparing to unpack .../libspatialindex-c4v5_1.8.5-5_amd64.deb ...\nUnpacking libspatialindex-c4v5:amd64 (1.8.5-5) ...\nSelecting previously unselected package libspatialindex-dev:amd64.\nPreparing to unpack .../libspatialindex-dev_1.8.5-5_amd64.deb ...\nUnpacking libspatialindex-dev:amd64 (1.8.5-5) ...\nSetting up libspatialindex4v5:amd64 (1.8.5-5) ...\nSetting up libspatialindex-c4v5:amd64 (1.8.5-5) ...\nSetting up libspatialindex-dev:amd64 (1.8.5-5) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\n"
],
[
"! pip install geopandas rtree -q",
"\u001b[K |████████████████████████████████| 972kB 2.8MB/s \n\u001b[K |████████████████████████████████| 71kB 8.3MB/s \n\u001b[K |████████████████████████████████| 10.9MB 14.9MB/s \n\u001b[K |████████████████████████████████| 14.8MB 307kB/s \n\u001b[?25h Building wheel for rtree (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
],
[
"import numpy as np\nimport geopandas as gpd\nimport os\nimport requests\nfrom shapely.geometry import box, Polygon",
"_____no_output_____"
]
],
[
[
"The following function will retrieve the hydro data from The National Map's web service.",
"_____no_output_____"
]
],
[
[
"def nhd_from_tnm(nhd_layer,\n bbox,\n inSR=4326,\n **kwargs):\n \"\"\"Returns features from the National Hydrography Dataset Plus High\n Resolution web service from The National Map.\n\n Available layers are:\n\n ========= ======================\n NHD Layer Description\n ========= ======================\n 0 NHDPlusSink\n 1 NHDPoint\n 2 NetworkNHDFlowline\n 3 NonNetworkNHDFlowline\n 4 FlowDirection\n 5 NHDPlusWall\n 6 NHDPlusBurnLineEvent\n 7 NHDLine\n 8 NHDArea\n 9 NHDWaterbody\n 10 NHDPlusCatchment\n 11 WBDHU12\n ========= ======================\n\n Parameters\n ----------\n nhd_layer : int\n a value from 0-11 indicating the feature layer to retrieve.\n bbox : list-like\n list of bounding box coordinates (minx, miny, maxx, maxy).\n inSR : int\n spatial reference for bounding box, such as an EPSG code (e.g., 4326)\n\n Returns\n -------\n clip_gdf : GeoDataFrame\n features in vector format, clipped to bbox\n \"\"\"\n BASE_URL = ''.join([\n 'https://hydro.nationalmap.gov/arcgis/rest/services/NHDPlus_HR/',\n 'MapServer/',\n str(nhd_layer), '/query?'\n ])\n\n params = dict(where=None,\n text=None,\n objectIds=None,\n time=None,\n geometry=','.join([str(x) for x in bbox]),\n geometryType='esriGeometryEnvelope',\n inSR=inSR,\n spatialRel='esriSpatialRelIntersects',\n relationParam=None,\n outFields='*',\n returnGeometry='true',\n returnTrueCurves='false',\n maxAllowableOffset=None,\n geometryPrecision=None,\n outSR=inSR,\n having=None,\n returnIdsOnly='false',\n returnCountOnly='false',\n orderByFields=None,\n groupByFieldsForStatistics=None,\n outStatistics=None,\n returnZ='false',\n returnM='false',\n gdbVersion=None,\n historicMoment=None,\n returnDistinctValues='false',\n resultOffset=None,\n resultRecordCount=None,\n queryByDistance=None,\n returnExtentOnly='false',\n datumTransformation=None,\n parameterValues=None,\n rangeValues=None,\n quantizationParameters=None,\n featureEncoding='esriDefault',\n f='geojson')\n for key, value in kwargs.items():\n params.update({key: value})\n\n r = requests.get(BASE_URL, params=params)\n jsn = r.json()\n if len(jsn['features']) == 0:\n clip_gdf = gpd.GeoDataFrame(geometry=[Polygon()], crs=inSR)\n else:\n try:\n gdf = gpd.GeoDataFrame.from_features(jsn, crs=inSR)\n\n # this API seems to return M and Z values even if not requested\n # this catches the error and keeps only the first two coordinates (x and y)\n except AssertionError:\n for f in jsn['features']:\n f['geometry'].update({\n 'coordinates': [c[0:2] for c in f['geometry']['coordinates']]\n })\n gdf = gpd.GeoDataFrame.from_features(jsn)\n\n clip_gdf = gpd.clip(gdf, box(*bbox))\n if len(clip_gdf) == 0:\n clip_gdf = gpd.GeoDataFrame(geometry=[Polygon()], crs=inSR)\n\n return clip_gdf",
"_____no_output_____"
]
],
[
[
"# Download Data for Training Tiles",
"_____no_output_____"
],
[
"This function will loop through a GeoDataFrame, fetch the relevant data, and write data to disk in the appropriate format.",
"_____no_output_____"
]
],
[
[
"def fetch_hydro(gdf, state, overwrite=False):\n epsg = gdf.crs.to_epsg()\n print('Fetching hydro data for {:,d} tiles'.format(len(gdf)))\n\n ## loop through all the geometries in the geodataframe\n\n for idx, row in gdf.iterrows():\n xmin, ymin, xmax, ymax = row['geometry'].bounds\n xmin, ymin = np.floor((xmin, ymin))\n xmax, ymax = np.ceil((xmax, ymax))\n\n bbox = [xmin, ymin, xmax, ymax]\n\n ## don't bother fetching data if we already have processed this tile\n OUTROOT = '/content/drive/Shared drives/stand_mapping/data/interim/training_tiles'\n outfolder = f'{state.lower()}/hydro'\n outdir = os.path.join(OUTROOT, outfolder)\n\n flow_outname = f'{row.CELL_ID}_flowlines.geojson'\n waterbody_outname = f'{row.CELL_ID}_waterbodies.geojson'\n\n flow_outfile = os.path.join(outdir, flow_outname) \n waterbody_outfile = os.path.join(outdir, waterbody_outname) \n\n if (os.path.exists(flow_outfile) and os.path.exists(waterbody_outfile)) and not overwrite:\n if idx % 100 == 0:\n print()\n if idx % 10 == 0:\n print(idx, end='')\n else:\n print('.', end='')\n continue\n \n flow = nhd_from_tnm(4, bbox, epsg)\n waterbody = nhd_from_tnm(9, bbox, epsg)\n\n flow.to_file(flow_outfile, driver='GeoJSON')\n waterbody.to_file(waterbody_outfile, driver='GeoJSON')\n\n ## report progress\n if idx % 100 == 0:\n print()\n if idx % 10 == 0:\n print(idx, end='')\n else:\n print('.', end='')",
"_____no_output_____"
]
],
[
[
"## Fetch Hydro Layers for each tile",
"_____no_output_____"
]
],
[
[
"SHP_DIR = '/content/drive/Shared drives/stand_mapping/data/interim'\n\nWA11_SHP = 'washington_utm11n_training_quads_epsg6340.shp'\nWA10_SHP = 'washington_utm10n_training_quads_epsg6339.shp'\nOR10_SHP = 'oregon_utm10n_training_quads_epsg6339.shp'\nOR11_SHP = 'oregon_utm11n_training_quads_epsg6340.shp'\n\nor10_gdf = gpd.read_file(os.path.join(SHP_DIR, OR10_SHP))\nor11_gdf = gpd.read_file(os.path.join(SHP_DIR, OR11_SHP))\nwa10_gdf = gpd.read_file(os.path.join(SHP_DIR, WA10_SHP))\nwa11_gdf = gpd.read_file(os.path.join(SHP_DIR, WA11_SHP))",
"_____no_output_____"
],
[
"GDF = wa11_gdf\nSTATE = 'washington'\n\nfetch_hydro(GDF, STATE)",
"Fetching hydro data for 82 tiles\n\n0.........10.........20.........30.........40.........50.........60.........70.........80."
],
[
"GDF = wa10_gdf\nSTATE = 'washington'\n\nfetch_hydro(GDF, STATE)",
"Fetching hydro data for 277 tiles\n\n0.........10.........20.........30.........40.........50.........60.........70.........80.........90.........\n100.........110.........120.........130.........140.........150.........160.........170.........180.........190.........\n200.........210.........220.........230.........240.........250.........260.........270......"
],
[
"GDF = or10_gdf\nSTATE = 'oregon'\n\nfetch_hydro(GDF, STATE)",
"Fetching hydro data for 607 tiles\n\n0.........10.........20.........30.........40.........50.........60.........70.........80.........90.........\n100.........110.........120.........130.........140.........150.........160.........170.........180.........190.........\n200.........210.........220.........230.........240.........250.........260.........270.........280.........290.........\n300.........310.........320.........330.........340.........350.........360.........370.........380.........390.........\n400.........410.........420.........430.........440.........450.........460.........470.........480.........490.........\n500.........510.........520.........530.........540.........550.........560.........570.........580.........590.........\n600......"
],
[
"GDF = or11_gdf\nSTATE = 'oregon'\n\nfetch_hydro(GDF, STATE)",
"Fetching hydro data for 524 tiles\n\n0.........10.........20.........30.........40.........50.........60.........70.........80.........90.........\n100.........110.........120.........130.........140.........150.........160.........170.........180.........190.........\n200.........210.........220.........230.........240.........250.........260.........270.........280.........290.........\n300.........310.........320.........330.........340.........350.........360.........370.........380.........390.........\n400.........410.........420.........430.........440.........450.........460.........470.........480.........490.........\n500.........510.........520..."
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec3e621dadf5cfadccb33aabee7e26489e4fcf | 28,186 | ipynb | Jupyter Notebook | Appendix-A-Installation.ipynb | torjuskd/Kalman-and-Bayesian-Filters-in-Python | 850c7fc0dd274e7f461eb21264e0d3cbb5323337 | [
"CC-BY-4.0"
] | 5 | 2018-03-16T17:24:14.000Z | 2021-03-17T12:51:23.000Z | Appendix-A-Installation.ipynb | torjuskd/Kalman-and-Bayesian-Filters-in-Python | 850c7fc0dd274e7f461eb21264e0d3cbb5323337 | [
"CC-BY-4.0"
] | null | null | null | Appendix-A-Installation.ipynb | torjuskd/Kalman-and-Bayesian-Filters-in-Python | 850c7fc0dd274e7f461eb21264e0d3cbb5323337 | [
"CC-BY-4.0"
] | 5 | 2019-10-21T22:31:52.000Z | 2020-12-09T14:41:31.000Z | 36.989501 | 659 | 0.527177 | [
[
[
"[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)",
"_____no_output_____"
]
],
[
[
"\\appendix",
"_____no_output_____"
]
],
[
[
"# Installation",
"_____no_output_____"
]
],
[
[
"#format the book\n%matplotlib inline\nfrom __future__ import division, print_function\nfrom book_format import load_style\nload_style()",
"_____no_output_____"
]
],
[
[
"This book is written in Jupyter Notebook, a browser based interactive Python environment that mixes Python, text, and math. I choose it because of the interactive features - I found Kalman filtering nearly impossible to learn until I started working in an interactive environment. It is difficult to form an intuition about many of the parameters until you can change them and immediately see the output. An interactive environment also allows you to play 'what if' scenarios. \"What if I set $\\mathbf{Q}$ to zero?\" It is trivial to find out with Jupyter Notebook.\n\nAnother reason I choose it is because most textbooks leaves many things opaque. For example, there might be a beautiful plot next to some pseudocode. That plot was produced by software, but software that is not available to the reader. I want everything that went into producing this book to be available to you. How do you plot a covariance ellipse? You won't know if you read most books. With Jupyter Notebook all you have to do is look at the source code.\n\nEven if you choose to read the book online you will want Python and the SciPy stack installed so that you can write your own Kalman filters. There are many different ways to install these libraries, and I cannot cover them all, but I will cover a few typical scenarios.",
"_____no_output_____"
],
[
"## Installing the SciPy Stack",
"_____no_output_____"
],
[
"This book requires IPython, Jupyter, NumPy, SciPy, SymPy, and Matplotlib. The SciPy stack of NumPy, SciPy, and Matplotlib depends on third party Fortran and C code, and is not trivial to install from source code. The SciPy website strongly urges using a pre-built installation, and I concur with this advice.\n\nJupyter notebook is the software that allows you to run Python inside of the browser - the book is a collection of Jupyter notebooks. IPython provides the infrastructure for Jupyter and data visualization. NumPy and Scipy are packages which provide the linear algebra implementation that the filters use. Sympy performs symbolic math - I use it to find derivatives of algebraic equations. Finally, Matplotlib provides plotting capability. \n\nI use the Anaconda distribution from Continuum Analytics. This is an excellent distribution that combines all of the packages listed above, plus many others. IPython recommends this package to install Ipython. Installation is very straightforward, and it can be done alongside other Python installations you might already have on your machine. It is free to use. You may download it from here: http://continuum.io/downloads I strongly recommend using the latest Python 3 version that they provide. For now I support Python 2.7, but perhaps not much longer. \n\nThere are other choices for installing the SciPy stack. You can find instructions here: http://scipy.org/install.html It can be very cumbersome, and I do not support it or provide any instructions on how to do it.\n\nMany Linux distributions come with these packages pre-installed. However, they are often somewhat dated and they will need to be updated as the book depends on recent versions of all. Updating a specific Linux installation is beyond the scope of this book. An advantage of the Anaconda distribution is that it does not modify your local Python installation, so you can install it and not break your linux distribution. Some people have been tripped up by this. They install Anaconda, but the installed Python remains the default version and then the book's software doesn't run correctly.\n\nI do not run regression tests on old versions of these libraries. In fact, I know the code will not run on older versions (say, from 2014-2015). I do not want to spend my life doing tech support for a book, thus I put the burden on you to install a recent version of Python and the SciPy stack. \n\nYou will need Python 2.7 or later installed. Almost all of my work is done in Python 3.6, but I periodically test on 2.7. I do not promise any specific check in will work in 2.7 however. I use Python's `from __future__ import ...` statement to help with compatibility. For example, all prints need to use parenthesis. If you try to add, say, `print x` into the book your script will fail; you must write `print(x)` as in Python 3.X.\n\nPlease submit a bug report at the book's [github repository](https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python) if you have installed the latest Anaconda and something does not work - I will continue to ensure the book will run with the latest Anaconda release. I'm rather indifferent if the book will not run on an older installation. I'm sorry, but I just don't have time to provide support for everyone's different setups. Packages like `jupyter notebook` are evolving rapidly, and I cannot keep up with all the changes *and* remain backwards compatible as well. \n\nIf you need older versions of the software for other projects, note that Anaconda allows you to install multiple versions side-by-side. Documentation for this is here:\n\nhttps://conda.io/docs/user-guide/tasks/manage-python.html\n",
"_____no_output_____"
],
[
"## Installing FilterPy\n\nFilterPy is a Python library that implements all of the filters used in this book, and quite a few others. Installation is easy using `pip`. Issue the following from the command prompt:\n\n pip install filterpy\n \n \nFilterPy is written by me, and the latest development version is always available at https://github.com/rlabbe/filterpy.",
"_____no_output_____"
],
[
"## Downloading and Running the Book",
"_____no_output_____"
],
[
"The book is stored in a github repository. From the command line type the following:\n\n git clone --depth=1 https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python.git\n\nThis will create a directory named Kalman-and-Bayesian-Filters-in-Python. The `depth` parameter just gets you the latest version. Unless you need to see my entire commit history this is a lot faster and saves space.\n\nIf you do not have git installed, browse to https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python where you can download the book via your browser.\n\nNow, from the command prompt change to the directory that was just created, and then run Jupyter notebook:\n\n cd Kalman-and-Bayesian-Filters-in-Python\n juptyer notebook\n\nA browser window should launch showing you all of the chapters in the book. Browse to the first chapter by clicking on it, then open the notebook in that subdirectory by clicking on the link.\n\nMore information about running the notebook can be found here:\n\nhttp://jupyter-notebook-beginner-guide.readthedocs.org/en/latest/execute.html",
"_____no_output_____"
],
[
"## Companion Software",
"_____no_output_____"
],
[
"Code that is specific to the book is stored with the book in the subdirectory *./kf_book*. This code is in a state of flux; I do not wish to document it here yet. I do mention in the book when I use code from this directory, so it should not be a mystery.\n\nIn the *kf_book* subdirectory there are Python files with a name like *xxx*_internal.py. I use these to store functions that are useful for a specific chapter. This allows me to hide away Python code that is not particularly interesting to read - I may be generating a plot or chart, and I want you to focus on the contents of the chart, not the mechanics of how I generate that chart with Python. If you are curious as to the mechanics of that, just go and browse the source.\n\nSome chapters introduce functions that are useful for the rest of the book. Those functions are initially defined within the Notebook itself, but the code is also stored in a Python file that is imported if needed in later chapters. I do document when I do this where the function is first defined, but this is still a work in progress. I try to avoid this because then I always face the issue of code in the directory becoming out of sync with the code in the book. However, IPython Notebook does not give us a way to refer to code cells in other notebooks, so this is the only mechanism I know of to share functionality across notebooks.\n\nThere is an undocumented directory called **experiments**. This is where I write and test code prior to putting it in the book. There is some interesting stuff in there, and feel free to look at it. As the book evolves I plan to create examples and projects, and a lot of this material will end up there. Small experiments will eventually just be deleted. If you are just interested in reading the book you can safely ignore this directory. \n\nThe subdirectory *./kf_book* contains a css file containing the style guide for the book. The default look and feel of IPython Notebook is rather plain. Work is being done on this. I have followed the examples set by books such as [Probabilistic Programming and Bayesian Methods for Hackers](http://nbviewer.ipython.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter1_Introduction/Chapter1.ipynb). I have also been very influenced by Professor Lorena Barba's fantastic work, [available here](https://github.com/barbagroup/CFDPython). I owe all of my look and feel to the work of these projects. \n",
"_____no_output_____"
],
[
"## Using Juptyer Notebook",
"_____no_output_____"
],
[
"A complete tutorial on Jupyter Notebook is beyond the scope of this book. Many are available online. In short, Python code is placed in cells. These are prefaced with text like `In [1]:`, and the code itself is in a boxed area. If you press CTRL-ENTER while focus is inside the box the code will run and the results will be displayed below the box. Like this:",
"_____no_output_____"
]
],
[
[
"print(3+7.2)",
"10.2\n"
]
],
[
[
"If you have this open in Jupyter Notebook now, go ahead and modify that code by changing the expression inside the print statement and pressing CTRL+ENTER. The output should be changed to reflect what you typed in the code cell.",
"_____no_output_____"
],
[
"## SymPy",
"_____no_output_____"
],
[
"SymPy is a Python package for performing symbolic mathematics. The full scope of its abilities are beyond this book, but it can perform algebra, integrate and differentiate equations, find solutions to differential equations, and much more. For example, we use use it to compute the Jacobian of matrices and expected value integral computations.\n\nFirst, a simple example. We will import SymPy, initialize its pretty print functionality (which will print equations using LaTeX). We will then declare a symbol for SymPy to use.",
"_____no_output_____"
]
],
[
[
"import sympy\nsympy.init_printing(use_latex='mathjax')\n\nphi, x = sympy.symbols('\\phi, x')\nphi",
"_____no_output_____"
]
],
[
[
"Notice how it prints the symbol `phi` using LaTeX. Now let's do some math. What is the derivative of $\\sqrt{\\phi}$?",
"_____no_output_____"
]
],
[
[
"sympy.diff('sqrt(phi)')",
"_____no_output_____"
]
],
[
[
"We can factor equations",
"_____no_output_____"
]
],
[
[
"sympy.factor(phi**3 -phi**2 + phi - 1)",
"_____no_output_____"
]
],
[
[
"and we can expand them.",
"_____no_output_____"
]
],
[
[
"((phi+1)*(phi-4)).expand()",
"_____no_output_____"
]
],
[
[
"You can evauate an equation for specific values of its variables:",
"_____no_output_____"
]
],
[
[
"w =x**2 -3*x +4\nprint(w.subs(x, 4))\nprint(w.subs(x, 12))",
"8\n112\n"
]
],
[
[
"You can also use strings for equations that use symbols that you have not defined:",
"_____no_output_____"
]
],
[
[
"x = sympy.expand('(t+1)*2')\nx",
"_____no_output_____"
]
],
[
[
"Now let's use SymPy to compute the Jacobian of a matrix. Given the function\n\n$$h=\\sqrt{(x^2 + z^2)}$$\n\nfind the Jacobian with respect to x, y, and z.",
"_____no_output_____"
]
],
[
[
"x, y, z = sympy.symbols('x y z')\n\nH = sympy.Matrix([sympy.sqrt(x**2 + z**2)])\n\nstate = sympy.Matrix([x, y, z])\nH.jacobian(state)",
"_____no_output_____"
]
],
[
[
"Now let's compute the discrete process noise matrix $\\mathbf Q$ given the continuous process noise matrix \n$$\\mathbf Q = \\Phi_s \\begin{bmatrix}0&0&0\\\\0&0&0\\\\0&0&1\\end{bmatrix}$$\n\nThe integral is \n\n$$\\mathbf Q = \\int_0^{\\Delta t} \\mathbf F(t)\\mathbf Q\\mathbf F^T(t)\\, dt$$\n\nwhere \n$$\\mathbf F(\\Delta t) = \\begin{bmatrix}1 & \\Delta t & {\\Delta t}^2/2 \\\\ 0 & 1 & \\Delta t\\\\ 0& 0& 1\\end{bmatrix}$$",
"_____no_output_____"
]
],
[
[
"dt = sympy.symbols('\\Delta{t}')\nF_k = sympy.Matrix([[1, dt, dt**2/2],\n [0, 1, dt],\n [0, 0, 1]])\nQ = sympy.Matrix([[0,0,0],\n [0,0,0],\n [0,0,1]])\n\nsympy.integrate(F_k*Q*F_k.T,(dt, 0, dt))",
"_____no_output_____"
]
],
[
[
"## Various Links",
"_____no_output_____"
],
[
"https://ipython.org/\n\nhttps://jupyter.org/\n\nhttps://www.scipy.org/",
"_____no_output_____"
]
]
] | [
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
d0ec43654ddf53d73139ed2e1ae998f63d494567 | 183,116 | ipynb | Jupyter Notebook | v1_exploration/dmdt_comparison.ipynb | kushaltirumala/outlier_kplr | a03a43808efd017414b7e1d82783a4956ec21ef2 | [
"MIT"
] | null | null | null | v1_exploration/dmdt_comparison.ipynb | kushaltirumala/outlier_kplr | a03a43808efd017414b7e1d82783a4956ec21ef2 | [
"MIT"
] | null | null | null | v1_exploration/dmdt_comparison.ipynb | kushaltirumala/outlier_kplr | a03a43808efd017414b7e1d82783a4956ec21ef2 | [
"MIT"
] | null | null | null | 338.476895 | 28,576 | 0.923628 | [
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport os",
"_____no_output_____"
],
[
"kdata = np.load('KeplerSampleFullQ.npy',encoding='bytes')\nprint(kdata.shape)\nprint(len(kdata[250][0]))",
"(2500, 3)\n3534\n"
],
[
"import os\ndmints = [-1.2,-0.3,-0.1,-0.05, -0.02,-0.01, -0.006, -0.005, -0.004, -0.0012, \n -0.001, -0.0006, -0.0003, 0, 0.0003, 0.0006, 0.001, 0.0012, 0.003, \n 0.004, 0.005, 0.006, 0.01, 0.02, 0.05, 0.1, 0.3, 0.6, 1.2]\ndtints = [-1.0/145, 1.0/47, 2.0/47, 3.0/47, 4.0/47, 6.0/47, 10.0/47, 15.0/47,\n 20.0/47, 30.0/47, 40.0/47, 1.0, 1.2, \n 1.4, 1.5, 1.7, 2, 2.25, 2.5, 3.0, 4, 6 , 9, 15, 20, 30, 45, 60, 90]\n\ndef pairwisediffs(arrayoned):\n x = arrayoned.reshape((1,len(arrayoned)))\n xdm = x[:] - np.transpose(x[:])\n xd = xdm[np.triu_indices(len(x[0]), k = 1)]\n return(xd)\ndef get2dhist(lightcurve):\n xd = pairwisediffs(lightcurve[0])\n yd = pairwisediffs(lightcurve[1])\n H,xe,ye = np.histogram2d(xd,yd,bins=[dtints,dmints],range=None,normed=False)\n G = 255*H/np.sum(H)\n return G\n\ndef load_data():\n data = []\n for file in os.listdir(\"full_dmdt\"):\n data.append(np.load(\"full_dmdt/\"+file))\n data = np.array(data)\n return data\n \ndata = load_data()\n ",
"_____no_output_____"
],
[
"import umap\nimport sklearn\nfrom sklearn.manifold import TSNE\ntt = data.reshape(2500, 784)\nx_embedded_tsne_first = TSNE(n_components=2).fit_transform(tt)\nx_embedded_umap_first = umap.UMAP().fit_transform(tt)\nplt.scatter(x_embedded_tsne_first[:, 0], x_embedded_tsne_first[:, 1])",
"_____no_output_____"
],
[
"plt.scatter(x_embedded_umap_first[:, 0], x_embedded_umap_first[:, 1])\n",
"_____no_output_____"
]
],
[
[
"# first 30 points",
"_____no_output_____"
]
],
[
[
"kdata = np.load('KeplerSampleFullQ.npy',encoding='bytes')\nprint(kdata.shape)\nprint(len(kdata[250][0]))",
"(2500, 3)\n3534\n"
],
[
"kdata[0][0].shape",
"_____no_output_____"
],
[
"normalized_x_flux = []\nnormalized_y_flux = []\nfor i, _ in enumerate(kdata):\n if len(kdata[i][1]) == 3534:\n normalized_x_flux.append(kdata[i][0])\n normalized_y_flux.append(kdata[i][1])\n \nnx = np.array(normalized_x_flux)\nny = np.array(normalized_y_flux)\nnx = nx[:, :1350]\nny = ny[:, :1350]\n\nfastdmdt = get2dhist([nx[0],ny[0]])\nplt.imshow(fastdmdt.T, norm = LogNorm(), origin=\"lower\")\nplt.colorbar()",
"_____no_output_____"
],
[
"first_30_points.shape",
"_____no_output_____"
],
[
"def first_n_points(n, dir_name):\n normalized_x_flux = []\n normalized_y_flux = []\n for i, _ in enumerate(kdata):\n if len(kdata[i][1]) == 3534:\n normalized_x_flux.append(kdata[i][0])\n normalized_y_flux.append(kdata[i][1])\n\n nx = np.array(normalized_x_flux)\n ny = np.array(normalized_y_flux)\n nx = nx[:, :n]\n ny = ny[:, :n]\n data = []\n for i, _ in enumerate(nx):\n fastdmdt = get2dhist([nx[i],ny[i]])\n np.save(dir_name + \"/\" + str(i), fastdmdt.T)\n data.append(fastdmdt)\n \n \n return np.array(data)\n\npoints_30 = first_n_points(30, \"full_30_points\")\nprint points_30.shape",
"(2196, 28, 28)\n"
],
[
"tt = points_30.reshape(2196, 784)\nx_embedded_tsne_first = TSNE(n_components=2).fit_transform(tt)\nx_embedded_umap_first = umap.UMAP().fit_transform(tt)\nplt.scatter(x_embedded_tsne_first[:, 0], x_embedded_tsne_first[:, 1])",
"_____no_output_____"
],
[
"plt.scatter(x_embedded_umap_first[:, 0], x_embedded_umap_first[:, 1])",
"_____no_output_____"
]
],
[
[
"# random 1/2 of the points",
"_____no_output_____"
]
],
[
[
"import random\ndef random_n_points(n, dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n \n normalized_x_flux = []\n normalized_y_flux = []\n for i, _ in enumerate(kdata):\n if len(kdata[i][1]) == 3534:\n normalized_x_flux.append(kdata[i][0])\n normalized_y_flux.append(kdata[i][1])\n\n nx = np.array(normalized_x_flux)\n ny = np.array(normalized_y_flux)\n start = random.randint(1, len(normalized_x_flux[0])-n)\n random_x_points = nx[:, start: start+n]\n random_y_points = ny[:, start: start+n]\n\n data = []\n for i, _ in enumerate(nx):\n fastdmdt = get2dhist([random_x_points[i],random_y_points[i]])\n np.save(dir_name + \"/\" + str(i), fastdmdt.T)\n data.append(fastdmdt)\n if i%500 == 0:\n print \"ON ITERATION \" + str(i)\n \n \n return np.array(data)\n\nr_half_points = random_n_points(1090, \"random_half_points\")\nprint r_half_points.shape",
"ON ITERATION 0\nON ITERATION 500\nON ITERATION 1000\nON ITERATION 1500\nON ITERATION 2000\n(2196, 28, 28)\n"
],
[
"tt = r_half_points.reshape(2196, 784)\nx_embedded_tsne_first = TSNE(n_components=2).fit_transform(tt)\nx_embedded_umap_first = umap.UMAP().fit_transform(tt)\nplt.scatter(x_embedded_tsne_first[:, 0], x_embedded_tsne_first[:, 1])",
"_____no_output_____"
],
[
"plt.scatter(x_embedded_umap_first[:, 0], x_embedded_umap_first[:, 1])",
"_____no_output_____"
],
[
"plt.scatter(x_embedded_umap_first[:, 0], x_embedded_umap_first[:, 1])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d0ec4ecaf98d9fc3aecec1be90568201755e9622 | 7,780 | ipynb | Jupyter Notebook | index.ipynb | katrinafandrich/ClimateModeling_courseware | 6f13fd38706cfef91e81f7e7065d9fab6fb8bb2f | [
"MIT"
] | null | null | null | index.ipynb | katrinafandrich/ClimateModeling_courseware | 6f13fd38706cfef91e81f7e7065d9fab6fb8bb2f | [
"MIT"
] | null | null | null | index.ipynb | katrinafandrich/ClimateModeling_courseware | 6f13fd38706cfef91e81f7e7065d9fab6fb8bb2f | [
"MIT"
] | 1 | 2021-02-20T03:10:31.000Z | 2021-02-20T03:10:31.000Z | 47.439024 | 354 | 0.679434 | [
[
[
"# [ATM 623: Climate Modeling](index.ipynb)\n\nA graduate-level course on the hands-on use of climate models for understanding climate processes.\n\n### [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html)\nUniversity at Albany, Department of Atmospheric and Environmental Sciences\n\n\n[Course home page](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/)",
"_____no_output_____"
],
[
"### About these notes:\n\nThis document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:\n\n- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware\n- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)\n- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).\n\n[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).\n\nMany of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab",
"_____no_output_____"
],
[
"This page is the top-level notebook with links to all notes and assignments.",
"_____no_output_____"
],
[
"## Lecture notes\n\n1. [Planetary energy budget](Lectures/Lecture01 -- Planetary energy budget.ipynb)\n2. [Solving the zero-dimensional EBM](Lectures/Lecture02 -- Solving the zero-dimensional EBM.ipynb)\n3. [Climate Sensitivity and Feedback](Lectures/Lecture03 -- Climate sensitivity and feedback.ipynb)\n4. [The climate system and climate models](Lectures/Lecture04 -- Climate system and climate models.ipynb)\n5. [A Brief Review of Radiation](Lectures/Lecture05 -- Radiation.ipynb)\n6. [Elementary greenhouse models](Lectures/Lecture06 -- Elementary greenhouse models.ipynb)\n7. [Grey radiation modeling with climlab](Lectures/Lecture07 -- Grey radiation modeling with climlab.ipynb)\n8. [Modeling non-scattering radiative transfer](Lectures/Lecture08 -- Modeling non-scattering radiative transfer.ipynb)\n9. [Who needs spectral bands? We do. Some baby steps...](Lectures/Lecture09 -- Who needs spectral bands.ipynb)\n10. [Radiative-Convective Equilibrium](Lectures/Lecture10 -- Radiative-Convective Equilibrium.ipynb)\n11. [Clouds and cloud feedback](Lectures/Lecture11 -- Clouds and cloud feedback.ipynb)\n12. [Insolation](Lectures/Lecture12 -- Insolation.ipynb)\n13. [Orbital variations, insolation, and the ice ages](Lectures/Lecture13 -- Orbital variations.ipynb)\n14. [Heat transport](Lectures/Lecture14 -- Heat transport.ipynb)\n15. [The one-dimensional energy balance model](Lectures/Lecture15 -- Diffusive energy balance model.ipynb)\n16. [Seasonal cycle and heat capacity](Lectures/Lecture16 -- Seasonal cycle and heat capacity.ipynb)\n17. [A peak at numerical methods for diffusion models](Lectures/Lecture17 -- Numerical methods for diffusion models.ipynb)\n18. [Ice albedo feedback in the EBM](Lectures/Lecture18 -- Ice albedo feedback in the EBM.ipynb)\n19. [Snowball Earth and Large Ice Cap Instability in the EBM](Lectures/Lecture19 -- Snowball Earth in the EBM.ipynb)\n20. [The surface energy balance](Lectures/Lecture20 -- The surface energy balance.ipynb)\n21. [Water, water everywhere](Lectures/Lecture21 -- Water, water everywhere!.ipynb)",
"_____no_output_____"
],
[
"## Assignments\n\n1. [Feedback in the zero-dimensional EBM](Assignments/Assignment01 -- Feedback in the zero-dimensional EBM.ipynb)\n2. [Introducing CESM](Assignments/Assignment02 -- Introducing CESM.ipynb)\n3. [Energy budget in CESM](Assignments/Assignment03 -- Energy budget in CESM.ipynb)\n4. [Radiative forcing in a grey radiation atmosphere](Assignments/Assignment04 -- Radiative forcing in a grey radiation atmosphere.ipynb)\n5. [Height-Dependent Water Vapor Changes](Assignments/Assignment05 -- Height-Dependent Water Vapor Changes.ipynb)\n6. [Orbital variations and insolation](Assignments/Assignment06 -- Orbital variations and insolation.ipynb)\n7. Numerical solution of the diffusion equation using the implicit method (see end of [Lecture 17](Lectures/Lecture16 -- Numerical methods for diffusion models.ipynb))",
"_____no_output_____"
],
[
"____________\n\n\n## Dependencies and installation\n\nThese notebooks use the following packages:\n\n- Python (compatible with Python 2 and 3)\n- numpy (array-based numerical computing)\n- scipy (specialized numerical recipes)\n- matplotlib (graphics and animation)\n- xarray (labeled datasets)\n- sympy (symbolic math)\n- climlab (climate modeling engine)\n- ffmpeg (video conversion tool used under-the-hood for interactive animations)\n- version_information (display information about package version)\n\nWe highly recommend using [Anaconda Python](https://www.continuum.io/downloads). For example, the following commands will create a self-contained [conda environment](https://conda.io/docs/using/envs.html) with everything you need to run these notebooks (Mac, Linux and Windows):\n\n```\nconda config --add channels conda-forge\nconda create --name atm623 python jupyter xarray sympy climlab version_information ffmpeg\n```",
"_____no_output_____"
],
[
"____________\n\n## Credits\n\nThe author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.\n\nIt was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php)\n\nDevelopment of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.\n____________",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ec5584bc625350239e8df6e94dbdcc97af1ce5 | 31,938 | ipynb | Jupyter Notebook | NLP_Resume.ipynb | TheMLGuy/NLPResumeScanner | d54b185936c2b2a6a5d784265ef6bc8d90922db4 | [
"MIT"
] | null | null | null | NLP_Resume.ipynb | TheMLGuy/NLPResumeScanner | d54b185936c2b2a6a5d784265ef6bc8d90922db4 | [
"MIT"
] | null | null | null | NLP_Resume.ipynb | TheMLGuy/NLPResumeScanner | d54b185936c2b2a6a5d784265ef6bc8d90922db4 | [
"MIT"
] | null | null | null | 70.039474 | 5,540 | 0.466592 | [
[
[
"# importing required modules\nimport PyPDF2\nimport nltk\nimport os\nfrom os import walk\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nimport helper\n\n \nr_dir = 'resumes/' # contains all file types of resumes\nr_text_dir = 'resumes_text/' # stores the text version of resume\n",
"_____no_output_____"
],
[
"f = helper.get_all_files(r_dir) # get all files in resumes directory\n\nfor file_ in f:\n filename, file_extension = os.path.splitext(file_) # get file name, extension\n \n if file_extension.lower() == '.pdf': # if the file type is pdf, save the text file version\n file_text = helper.get_pdf_corpus(r_dir + file_) # read pdf text \n helper.save_text_file( r_text_dir + filename + '.txt', file_text) # save as text file",
"_____no_output_____"
],
[
"for file_ in helper.get_all_files(r_text_dir):\n bag_word_count, clean_sentences = helper.get_dict_words(helper.read_text_file(r_text_dir + file_))\n print(bag_word_count)\n",
"{'Git': 1, 'Flask': 1, 'Graduate': 1, 'Systems': 1, 'ServiceS': 1, 'Anvesh': 1, 'principal': 1, 'removed': 1, 'Service': 1, 'Business': 1, 'Keras': 1, 'Premises': 1, '(': 2, 'Leveraging': 1, 'Kansas': 2, 'AWS': 1, 'Matplotlib': 1, 'environment': 1, 'ElasticSearch': 1, 'Designed': 1, 'Apache': 1, 'Nov': 1, 'Lambda': 1, '@': 1, 'Level': 1, 'H': 1, 'GPA': 3, 'Graphs': 1, 'Angular4': 1, 'Scikit': 1, 'Validation': 1, 'Technologies': 1, 'securely': 1, 'aggle': 1, 'skills': 1, 'h': 1, 'Visitor': 1, 'solution': 1, 'EXPERIENCE': 1, 'skewed': 1, 'SNS': 1, 'view': 1, 'Y': 1, 'Tuning': 1, 'Currently': 1, 'ted': 1, 'ResNet': 1, 'Smartcab': 1, 'Exploration': 1, 'Unity': 1, 'Finding': 1, 'Teaching': 1, '2.0': 1, 'techniques': 1, '#': 1, 'Ability': 1, 'CharityML': 1, 'Continuous': 1, 'Junit': 1, '3': 1, 'HTML': 1, 'Grid': 1, 'Prices': 1, 'Video': 1, 'learned': 1, 'Pradesh': 1, 'C': 1, 'full': 1, 'Implemen': 1, 'anvesh525': 1, 'K': 1, 'component': 1, 'UMKC': 1, 'ranked': 1, 'Team': 1, '100': 1, 'teaches': 1, 'ackathon': 1, 'AND': 1, 'Selection': 1, 'Dean': 1, 'policy': 2, 'CAREER': 1, 'DevExpress': 1, 'MO': 1, 'Boston': 1, 'experience': 1, 'Transforming': 1, 'Computer': 2, 'Choosing': 1, 'Big': 1, 'login': 1, 'Technology': 1, 'Competetion': 1, 'PaySafe': 1, 'Binary': 1, 'ervices': 1, 'Aug': 2, 'LOC': 1, 'S3': 1, 'Top': 1, 'Initial': 1, 'Final': 1, ',': 7, 'tection': 1, 'wit': 1, 'Award': 1, 'Visualizing': 1, 'tools': 1, 'PROJECTS': 1, 'challenging': 1, 'use': 1, 'GitHub': 1, 'tuning': 1, 'Degree': 1, 'Bus': 1, 'Predicting': 1, 'June': 1, '.': 1, 'Phone': 1, 'mation': 1, 'Train': 1, 'Cleaning': 1, 'https': 1, 'C2': 1, 'prototype': 1, 'Recipient': 1, 'Identity': 1, 'Hackathon': 1, 'share': 1, 'Normalizing': 1, 'ElasticCache': 1, 'Time': 1, 'OBJECTIVE': 1, 'Shuffle': 1, 'Software': 2, 'SKILL': 1, 'Co': 1, 'PERSONAL': 1, 'CDN': 1, 'Hack': 1, 'Dashboards': 1, 'Coll': 1, 'Caching': 1, 'tokenization': 1, 'Laws': 1, 'learn': 1, 'Battle': 1, 'Route53': 1, 'scatter': 1, 'City': 2, 'MKC': 1, 'IBM': 1, 'Transfer': 1, 'Segments': 1, 'transfor': 1, 'Redis': 1, 'Feature': 2, 'helped': 1, 'vs': 1, 'Mail': 1, 'regression': 1, 'SOA': 1, 'Python': 1, 'Identifying': 1, 'feature': 1, 'till': 1, 'party': 1, 'Spark': 1, 'Classfication': 1, 'Preprocessin': 1, 'profile': 1, 'used': 1, 'outlier': 1, 'Bootstrap': 1, 'REST': 1, 'Recruit': 1, 'Q': 1, 'applications': 1, 'Airport': 1, 'tack': 1, 'architectures': 1, 'OAuth': 1, 'highly': 1, 'Model': 1, 'data': 1, 'Serach': 1, 'Limited': 1, 'third': 1, '3.97/4.0': 1, 'Complexity': 1, 'analysis': 1, 'Reinforcement': 1, ')': 2, 'solve': 1, 'EBS': 1, 'Algorithms': 1, 'Ship': 1, 'Regression': 1, 'ege': 1, 'Visualization': 1, 'serving': 1, 'Recovery': 1, 'Nano': 1, 'Jasmine': 1, 'Housing': 1, 'Jan': 1, 'Reduction': 1, 'dacity': 1, 'human': 1, 'Numpy': 1, 'Oct': 1, 'features': 1, 'web': 1, 'ACADEMIC': 1, 'thinking': 1, 'Developed': 1, 'Kaggle': 1, 'Biplot': 1, 'Augmentation': 1, '2018': 1, 'easy': 1, '2015': 6, '2014': 2, '2017': 1, '2016': 1, 'PickMeUp': 1, 'transformation': 1, '2013': 1, '2012': 1, 'On': 1, 'customers': 1, 'good': 1, 'combination': 1, 'Restaurant': 1, 'L': 1, 'Analysis': 1, 'Motion': 1, 'refactoring': 1, 'game': 1, 'AWARDS': 1, 'T': 1, 'gmail.com': 1, 'Missouri': 1, 'using': 1, 'Tested': 1, 'Steganography': 1, 'like': 1, 'Newton': 1, 'India': 1, '50': 1, 'server': 1, 'API': 1, 'Identify': 1, 'Migration': 1, 'Learning': 1, 'Visakhapatnam': 1, 'Implemented': 1, 'reduced': 1, 'Clustering': 2, 'Management': 1, 'Java': 1, 'ed': 1, 'Winner': 1, 'Percent': 1, 'KMeans': 1, '703': 1, 'Extraction': 1, 'static': 1, 'Split': 1, 'Giraph': 1, 'images': 1, '7133': 1, 'Importance': 1, 'Yourmembership': 1, 'JQuery': 1, 'Donors': 1, 'network': 1, 'space': 1, 'Evaluation': 2, 'maps': 1, 'Facebook': 1, 'cloud': 1, 'architecture': 1, 'This': 1, 'FullStack': 1, 'University': 2, 'Work': 1, '4.0/4.0': 1, 'SET': 1, 'Architecture': 1, 'CSS': 1, 'Creating': 1, 'Forecasting': 1, 'g': 1, 'created': 1, 'og': 1, 'December': 2, 'App': 1, 'programming': 1, 'eGourmet': 1, '+1': 1, 'Analyzing': 1, 'Solutions': 1, '3.80/4.0': 1, 'Message': 1, 'EDUCATION': 1, 'processor': 1, 'Best': 1, 'Developer': 1, 'Knowledge': 1, 'Elastic': 1, 'TECHNICAL': 1, 'Mashup': 1, 'Oriented': 1, 'OpenCV': 1, 'Pickup': 1, 'Critical': 1, 'RedisMQ': 1, 'STRENGTHS': 1, 'Pandas': 1, 'everaged': 1, 'clustering': 1, 'Used': 1, 'sates': 1, 'Assistant': 1, 'breed': 1, 'Days': 1, 'custom': 1, 'Bachelor': 1, 'Andhra': 2, 'FreeBase': 1, ':': 1, 'Data': 1, '568': 1, '//github.com/atmc9': 1, 'Search': 1, 'Scholar': 1, 'Science': 3, 'Extracting': 1, 'heat': 1, 'R': 1, 'Dec': 3, 'Ebola': 1, 'Features': 2, 'Sep': 1, 'MongoDB': 1, 'Prestoo': 1, 'State': 1, 'project': 1, 'Cluster': 1, 'Engineering': 2, 'Master': 1, 'problem': 1, 'Engineer': 1, 'files': 1, 'Dimensionality': 1, 'Develop': 1, 'USA': 1, 'Seaborn': 1, 'Cross': 1, 'LinkedIn': 1, 'Machine': 1, 'Analytic': 1, 'elevance': 1, 'right': 1, 'Curves': 1, 'Optimal': 1, 'ing': 1, 'HONORS': 1, 'latency': 1, 'date': 1, 'May': 1, 'Numerical': 1, '-': 3, 'Loyalty': 1, '40': 1, 'application': 1, 'Preprocessing': 1, 'resembling': 1, 'PROFESSIONAL': 1, '3D': 1, 'Customer': 1, 'Advance': 1, 'E': 1, 'Tweets': 1, 'problems': 1, 'Drive': 1, 'CloudFront': 1, 'Skewed': 1, 'April': 2, 'U': 1, 'log': 1, 'TensorFlow': 1, 'CNN': 2, 'services': 1, 'Login': 1, 'Audio': 1, 'payment': 1, 'matrix': 1, 'correlated': 1, 'e': 1, 'oTube': 1, 'consumed': 1, 'SQL': 1, 'Dynamic': 1, 'Private': 1, '2nd': 1, 'Intern': 1, 'management': 1, 'Tummala': 1, 'Techniques': 1}\n{'213': 1, 'neural': 1, 'Administered': 1, 'observed': 1, 'Matplo': 1, 'Game': 1, 'Hadoop': 1, 'Research': 1, 'product': 1, 'Technological': 1, 'charting': 1, 'query': 1, 'LSTM': 1, 'layers': 1, 'web': 1, 'Mixture': 1, 'based': 1, 'system': 2, 'parameters': 1, 'Keras': 1, '(': 1, 'technique': 1, ',': 1, 'workflow': 1, 'desktop': 1, 'better': 1, '2019': 1, 'systems': 1, 'Apache': 1, '2014': 1, '2017': 1, 'insights': 1, 'Aggregated': 1, 'Present': 1, 'Applied': 1, '@': 1, 'customers': 1, 'mixture': 1, 'risk': 2, 'Music': 1, 'means': 1, 'read': 2, 'MyS': 1, 'overall': 1, 'expectation': 1, 'song': 1, 'Provo': 1, '2D': 1, 'Micro': 1, 'game': 1, 'Performer': 1, 'GB': 1, 'Unix/Linux': 1, 'clustered': 1, 'using': 1, 'Graduate': 1, 'VEMENTS': 1, 'July': 1, 'attr': 1, 'Los': 1, 'pokemons': 2, 'presented': 1, 'bytes': 2, 'metadata': 1, 'EXPERIENCE': 1, 'teams': 1, 'specific': 1, 'item': 1, 'ACHIE': 1, 'Council': 1, 'SQL': 1, 'release': 1, 'Spa': 1, 'Ideated': 1, 'OS': 1, 'soft': 1, 'ence': 1, 'VENKATESHA': 1, 'set': 1, 'Java': 1, 'learning': 1, 'Exploratory': 1, 'existing': 1, 'India': 1, 'hard': 1, 'maximization': 1, 'prediction': 1, 'related': 1, 'identified': 1, 'synergy': 1, 'ttribute': 1, 'Analyzed': 1, 'growth': 1, 'Master': 1, 'Coordinated': 1, 'versus': 1, 'Employed': 1, 'implemented': 1, 'extract': 1, 'input': 1, 'reviews': 1, '#': 1, ')': 1, 'ess': 1, 'network': 1, 'sentiment': 2, 'space': 1, 'performs': 1, '7631': 1, 'recommendation': 1, 'artificial': 1, 'Institute': 1, '3': 1, 'various': 1, 'precision': 1, '//github.com/TheMLGuy': 1, 'Informatics': 1, 'Bangalore': 1, 'three': 1, 'predict': 1, 'rk': 1, 'dataset': 1, '25': 1, 'Programming': 1, 'University': 2, 'Acc': 1, 'Identified': 1, 'resulted': 1, 'chine': 1, 'S': 1, 'Place': 1, 'ibutes': 1, 'training': 1, 'usc.edu': 1, 'Ellendale': 1, 'analyzing': 1, 'acc': 1, 'c': 1, 'milestones': 1, 'contribution': 1, 'Security': 1, 'Developed': 1, 'Focus': 1, 'Apt': 1, 'Sentimental': 1, 'EDUCATION': 1, 'genre': 1, '431': 1, 'ci': 1, 'umulating': 1, 'inclusion': 1, 'TECHNICAL': 1, 'June': 1, 'Top': 1, '90007': 1, 'environments': 1, 'comprised': 1, 'Sciences': 1, 'NetIQ': 1, 'initiative': 1, 'anomalies': 1, 'associated': 1, 'Model': 2, 'PROJECTS': 1, 'Accuracy': 1, 'clustering': 2, 'Used': 1, 'August': 1, '8000': 1, 'WORK': 1, 'Southern': 1, 'ashwinve': 1, '.': 1, 'The': 1, 'comparing': 1, 'Bachelor': 1, 'Firebase': 1, 'California': 1, 'charts': 1, 'https': 1, 'Automated': 1, 'collected': 1, 'tensorflow': 1, ':': 5, 'Data': 1, '2816': 1, 'Analysed': 1, 'automate': 1, 'Tensorfow': 1, 'sessions': 1, 'Science': 1, 'CA': 2, 'J': 1, 'tlib': 1, 'KMeans': 1, '2015': 1, 'gain': 1, 'automated': 1, 'Oracle': 1, 'fuzzy': 1, 'Sept': 1, 'Software': 1, '11': 1, 'detection': 1, 'datasets': 1, 'applying': 1, 'look': 1, 'Classification': 1, 'platforms': 1, 'ed': 1, 'anomaly': 1, 'points': 1, 'database': 1, 'Kmeans': 1, 'recall': 1, 'frameworks': 1, 'classes': 1, 'Performed': 1, 'GN': 1, 'learn': 1, 'accrued': 1, 'ASHWIN': 1, 'Application': 1, 'called': 1, 'Engineer': 1, '0.9': 1, 'Information': 2, 'compare': 1, 'USC': 1, 'IBM': 1, 'almost': 1, 'attribute': 1, 'Responsible': 1, 'Selenium': 1, 'pie': 1, 'cluster': 1, 'testcases': 1, 'application': 1, 'owing': 1, 'analysis': 1, 'TFlearn': 1, 'Tensorflow': 1, 'ML': 1, 'ava': 1, 'Mac': 1, '150': 1, '%': 1, 'Associate': 1, 'May': 1, 'Scikit': 1, '-': 4, 'feature': 1, '40': 1, 'write': 3, 'grouped': 1, 'authentication': 1, 'internal': 1, 'Achieved': 1, 'algorithm': 1, 'STAF': 1, 'dropout': 1, 'classify': 1, 'accuracy': 1, 'Directed': 1, 'Visvesvaraya': 1, 'scikit': 1, 'E': 1, 'plans': 1, '1.8': 1, 'Windows': 1, 'update': 1, 'automation': 2, 'Angeles': 1, '76': 1, 'poke': 1, 'Orchestrated': 1, 'designed': 1, 'Databases': 1, 'recommender': 2, 'ngineering': 1, 'data': 1, 'model': 1, 'types': 1, 'curve': 1, 'executions': 1, 'Python': 1, 'I': 1, 'calls': 3, 'unsupervised': 1, 'SKILLS': 1, 'performances': 1, 'popularity': 1, 'modules': 1, 'Gaussian': 1, 'Novell': 1, 'patterns': 1, 'Manager': 1, 'client': 1, 'Recommendation': 1, 'visualizations': 1, 'Programmer': 1, 'Tools': 1, 'QL': 1, 'mon': 1}\n"
],
[
"#compare common words between 2 resumes\nkeys_a = set(updated_corpus_all_1.keys())\nkeys_b = set(updated_corpus_all_2.keys())\n\nintersection = keys_a & keys_b\nprint(intersection)\nfor key in intersection:\n print(key, updated_corpus_all_1[key], updated_corpus_all_2[key])",
"(u':', 5)\n(u'-', 4)\n(u'calls', 3)\n(u'write', 3)\n(u'CA', 2)\n(u'Information', 2)\n(u'Model', 2)\n(u'University', 2)\n(u'automation', 2)\n(u'bytes', 2)\n(u'clustering', 2)\n(u'pokemons', 2)\n(u'read', 2)\n(u'recommender', 2)\n(u'risk', 2)\n(u'sentiment', 2)\n(u'system', 2)\n(u'#', 1)\n(u'%', 1)\n(u'(', 1)\n(u')', 1)\n(u',', 1)\n(u'.', 1)\n(u'//github.com/TheMLGuy', 1)\n(u'0.9', 1)\n(u'1.8', 1)\n(u'11', 1)\n(u'150', 1)\n(u'2014', 1)\n(u'2015', 1)\n(u'2017', 1)\n(u'2019', 1)\n(u'213', 1)\n(u'25', 1)\n(u'2816', 1)\n(u'2D', 1)\n(u'3', 1)\n(u'40', 1)\n(u'431', 1)\n(u'76', 1)\n(u'7631', 1)\n(u'8000', 1)\n(u'90007', 1)\n(u'@', 1)\n(u'ACHIE', 1)\n(u'ASHWIN', 1)\n(u'Acc', 1)\n(u'Accuracy', 1)\n(u'Achieved', 1)\n(u'Administered', 1)\n(u'Aggregated', 1)\n(u'Analysed', 1)\n(u'Analyzed', 1)\n(u'Angeles', 1)\n(u'Apache', 1)\n(u'Application', 1)\n(u'Applied', 1)\n(u'Apt', 1)\n(u'Associate', 1)\n(u'August', 1)\n(u'Automated', 1)\n(u'Bachelor', 1)\n(u'Bangalore', 1)\n(u'California', 1)\n(u'Classification', 1)\n(u'Coordinated', 1)\n(u'Council', 1)\n(u'Data', 1)\n(u'Databases', 1)\n(u'Developed', 1)\n(u'Directed', 1)\n(u'E', 1)\n(u'EDUCATION', 1)\n(u'EXPERIENCE', 1)\n(u'Ellendale', 1)\n(u'Employed', 1)\n(u'Engineer', 1)\n(u'Exploratory', 1)\n(u'Firebase', 1)\n(u'Focus', 1)\n(u'GB', 1)\n(u'GN', 1)\n(u'Game', 1)\n(u'Gaussian', 1)\n(u'Graduate', 1)\n(u'Hadoop', 1)\n(u'I', 1)\n(u'IBM', 1)\n(u'Ideated', 1)\n(u'Identified', 1)\n(u'India', 1)\n(u'Informatics', 1)\n(u'Institute', 1)\n(u'J', 1)\n(u'Java', 1)\n(u'July', 1)\n(u'June', 1)\n(u'KMeans', 1)\n(u'Keras', 1)\n(u'Kmeans', 1)\n(u'LSTM', 1)\n(u'Los', 1)\n(u'ML', 1)\n(u'Mac', 1)\n(u'Manager', 1)\n(u'Master', 1)\n(u'Matplo', 1)\n(u'May', 1)\n(u'Micro', 1)\n(u'Mixture', 1)\n(u'Music', 1)\n(u'MyS', 1)\n(u'NetIQ', 1)\n(u'Novell', 1)\n(u'OS', 1)\n(u'Oracle', 1)\n(u'Orchestrated', 1)\n(u'PROJECTS', 1)\n(u'Performed', 1)\n(u'Performer', 1)\n(u'Place', 1)\n(u'Present', 1)\n(u'Programmer', 1)\n(u'Programming', 1)\n(u'Provo', 1)\n(u'Python', 1)\n(u'QL', 1)\n(u'Recommendation', 1)\n(u'Research', 1)\n(u'Responsible', 1)\n(u'S', 1)\n(u'SKILLS', 1)\n(u'SQL', 1)\n(u'STAF', 1)\n(u'Science', 1)\n(u'Sciences', 1)\n(u'Scikit', 1)\n(u'Security', 1)\n(u'Selenium', 1)\n(u'Sentimental', 1)\n(u'Sept', 1)\n(u'Software', 1)\n(u'Southern', 1)\n(u'Spa', 1)\n(u'TECHNICAL', 1)\n(u'TFlearn', 1)\n(u'Technological', 1)\n(u'Tensorflow', 1)\n(u'Tensorfow', 1)\n(u'The', 1)\n(u'Tools', 1)\n(u'Top', 1)\n(u'USC', 1)\n(u'Unix/Linux', 1)\n(u'Used', 1)\n(u'VEMENTS', 1)\n(u'VENKATESHA', 1)\n(u'Visvesvaraya', 1)\n(u'WORK', 1)\n(u'Windows', 1)\n(u'acc', 1)\n(u'accrued', 1)\n(u'accuracy', 1)\n(u'algorithm', 1)\n(u'almost', 1)\n(u'analysis', 1)\n(u'analyzing', 1)\n(u'anomalies', 1)\n(u'anomaly', 1)\n(u'application', 1)\n(u'applying', 1)\n(u'artificial', 1)\n(u'ashwinve', 1)\n(u'associated', 1)\n(u'attr', 1)\n(u'attribute', 1)\n(u'authentication', 1)\n(u'automate', 1)\n(u'automated', 1)\n(u'ava', 1)\n(u'based', 1)\n(u'better', 1)\n(u'c', 1)\n(u'called', 1)\n(u'charting', 1)\n(u'charts', 1)\n(u'chine', 1)\n(u'ci', 1)\n(u'classes', 1)\n(u'classify', 1)\n(u'client', 1)\n(u'cluster', 1)\n(u'clustered', 1)\n(u'collected', 1)\n(u'compare', 1)\n(u'comparing', 1)\n(u'comprised', 1)\n(u'contribution', 1)\n(u'curve', 1)\n(u'customers', 1)\n(u'data', 1)\n(u'database', 1)\n(u'dataset', 1)\n(u'datasets', 1)\n(u'designed', 1)\n(u'desktop', 1)\n(u'detection', 1)\n(u'dropout', 1)\n(u'ed', 1)\n(u'ence', 1)\n(u'environments', 1)\n(u'ess', 1)\n(u'executions', 1)\n(u'existing', 1)\n(u'expectation', 1)\n(u'extract', 1)\n(u'feature', 1)\n(u'frameworks', 1)\n(u'fuzzy', 1)\n(u'gain', 1)\n(u'game', 1)\n(u'genre', 1)\n(u'grouped', 1)\n(u'growth', 1)\n(u'hard', 1)\n(u'https', 1)\n(u'ibutes', 1)\n(u'identified', 1)\n(u'implemented', 1)\n(u'inclusion', 1)\n(u'initiative', 1)\n(u'input', 1)\n(u'insights', 1)\n(u'internal', 1)\n(u'item', 1)\n(u'layers', 1)\n(u'learn', 1)\n(u'learning', 1)\n(u'look', 1)\n(u'maximization', 1)\n(u'means', 1)\n(u'metadata', 1)\n(u'milestones', 1)\n(u'mixture', 1)\n(u'model', 1)\n(u'modules', 1)\n(u'mon', 1)\n(u'network', 1)\n(u'neural', 1)\n(u'ngineering', 1)\n(u'observed', 1)\n(u'overall', 1)\n(u'owing', 1)\n(u'parameters', 1)\n(u'patterns', 1)\n(u'performances', 1)\n(u'performs', 1)\n(u'pie', 1)\n(u'plans', 1)\n(u'platforms', 1)\n(u'points', 1)\n(u'poke', 1)\n(u'popularity', 1)\n(u'precision', 1)\n(u'predict', 1)\n(u'prediction', 1)\n(u'presented', 1)\n(u'product', 1)\n(u'query', 1)\n(u'recall', 1)\n(u'recommendation', 1)\n(u'related', 1)\n(u'release', 1)\n(u'resulted', 1)\n(u'reviews', 1)\n(u'rk', 1)\n(u'scikit', 1)\n(u'sessions', 1)\n(u'set', 1)\n(u'soft', 1)\n(u'song', 1)\n(u'space', 1)\n(u'specific', 1)\n(u'synergy', 1)\n(u'systems', 1)\n(u'teams', 1)\n(u'technique', 1)\n(u'tensorflow', 1)\n(u'testcases', 1)\n(u'three', 1)\n(u'tlib', 1)\n(u'training', 1)\n(u'ttribute', 1)\n(u'types', 1)\n(u'umulating', 1)\n(u'unsupervised', 1)\n(u'update', 1)\n(u'usc.edu', 1)\n(u'using', 1)\n(u'various', 1)\n(u'versus', 1)\n(u'visualizations', 1)\n(u'web', 1)\n(u'workflow', 1)\n"
],
[
"for i in updated_sentences:\n print(nltk.pos_tag(i))",
"[(u'challenging', 'VBG'), (u'//github.com/atmc9', '$'), (u'703', 'CD'), (u'Machine', 'NNP'), (u'Mail', 'NNP'), (u'7133', 'CD'), (u'Anvesh', 'NNP'), (u'techniques', 'NNS'), (u'GitHub', 'NNP'), (u'-', ':'), (u'.', '.'), (u'Phone', 'NN'), (u'https', 'NN'), (u':', ':'), (u'568', 'CD'), (u'@', 'NN'), (u'E', 'NNP'), (u'anvesh525', 'NN'), (u'problems', 'NNS'), (u'T', 'NNP'), (u'gmail.com', 'VBD'), (u'OBJECTIVE', 'NNP'), (u'using', 'VBG'), (u'CAREER', 'NNP'), (u'+1', 'NNP'), (u'solve', 'VB'), (u'Learning', 'NNP'), (u'Tummala', 'NNP')]\n[(u'City', 'NNP'), (u'Develop', 'NNP'), (u'Nano', 'NNP'), (u'India', 'NNP'), (u'Coll', 'NNP'), (u',', ','), (u'Machine', 'NNP'), (u'Master', 'NNP'), (u'Systems', 'NNPS'), (u'ServiceS', 'NNP'), (u'Yourmembership', 'NNP'), (u'4.0/4.0', 'CD'), (u'USA', 'NNP'), (u'Degree', 'NNP'), (u'May', 'NNP'), (u'Kansas', 'NNP'), (u'-', ':'), (u'Kaggle', 'NNP'), (u'dacity', 'NN'), (u'.', '.'), (u'till', 'VB'), (u'Bachelor', 'NNP'), (u'API', 'NNP'), (u'Andhra', 'NNP'), (u'ed', 'NN'), (u'2015', 'CD'), (u'PROFESSIONAL', 'NNP'), (u'2017', 'CD'), (u':', ':'), (u'Pradesh', 'JJ'), (u'2013', 'CD'), (u'Developer', 'NNP'), (u'Advance', 'NNP'), (u'Level', 'NNP'), (u'Science', 'NNP'), (u'University', 'NNP'), (u'FullStack', 'NNP'), (u'GPA', 'NNP'), (u'REST', 'NNP'), (u'Angular4', 'NNP'), (u'tack', 'NN'), (u'U', 'NNP'), (u'Missouri', 'NNP'), (u'date', 'NN'), (u'using', 'VBG'), (u'Jan', 'NNP'), (u'2016', 'CD'), (u'Co', 'NNP'), (u'created', 'VBD'), (u'December', 'NNP'), (u'MO', 'NNP'), (u'3.97/4.0', 'CD'), (u'EXPERIENCE', 'NNP'), (u'Engineering', 'NNP'), (u'Computer', 'NNP'), (u'Learning', 'NNP'), (u'Visakhapatnam', 'NNP'), (u'3.80/4.0', 'CD'), (u'EDUCATION', 'NNP'), (u'Technology', 'NNP'), (u'consumed', 'VBD'), (u'ege', 'NNS'), (u'Engineer', 'NNP')]\n[(u'Tested', 'VBN'), (u'.', '.'), (u'Jasmine', 'NNP'), (u'Junit', 'NNP'), (u',', ',')]\n[(u'Redis', 'NNP'), (u'Caching', 'NNP'), (u'RedisMQ', 'NNP'), (u'everaged', 'VBD'), (u')', ')'), (u'Bus', 'NNP'), (u'AWS', 'NNP'), (u',', ','), (u'.', '.'), (u'tokenization', 'NN'), (u'used', 'VBN'), (u'Implemen', 'NNP'), (u'ted', 'VBD'), (u'L', 'NNP'), (u'architectures', 'NNS'), (u'services', 'NNS'), (u'payment', 'NN'), (u'SNS', 'NNP'), (u'(', '('), (u'Message', 'NNP'), (u'processor', 'NN'), (u'PaySafe', 'NNP')]\n[(u'customers', 'NNS'), (u'management', 'NN'), (u'third', 'JJ'), (u'h', 'NN'), (u'share', 'NN'), (u',', ','), (u'server', 'RB'), (u'custom', 'NN'), (u'helped', 'VBD'), (u'applications', 'NNS'), (u'wit', 'VBP'), (u'2.0', 'CD'), (u'securely', 'RB'), (u'.', '.'), (u'OAuth', 'NNP'), (u'party', 'NN'), (u'Implemented', 'NNP'), (u'data', 'NN'), (u'Identity', 'NN')]\n[(u',', ','), (u'LinkedIn', 'NNP'), (u'.', '.'), (u'helped', 'VBD'), (u'Facebook', 'NNP'), (u'experience', 'NN'), (u'easy', 'JJ'), (u'Login', 'NNP'), (u'Implemented', 'NNP'), (u'login', 'NN')]\n[(u'ervices', 'NNS'), (u'S3', 'NNP'), (u'-', ':'), (u'Redis', 'NNP'), (u'cloud', 'VBP'), (u'web', 'NN'), (u'Premises', 'NNP'), (u'AWS', 'NNP'), (u',', ','), (u'.', '.'), (u'environment', 'NN'), (u'application', 'NN'), (u'ElasticSearch', 'NNP'), (u'C2', 'NNP'), (u'On', 'IN'), (u'good', 'JJ'), (u'E', 'NN'), (u'CloudFront', 'NNP'), (u'ElasticCache', 'NNP'), (u'Lambda', 'NNP'), (u'like', 'IN'), (u'experience', 'NN'), (u'Migration', 'NNP'), (u'EBS', 'NNP'), (u'Route53', 'NNP')]\n[(u'Serach', 'NNP'), (u'use', 'NN'), (u'Elastic', 'NNP'), (u'application', 'NN'), (u'Leveraging', 'NNP'), (u'AWS', 'NNP'), (u'Search', 'NNP'), (u'server', 'NN'), (u'refactoring', 'NN'), (u'services', 'NNS'), (u'.', '.')]\n[(u'files', 'NNS'), (u'latency', 'NN'), (u'serving', 'VBG'), (u'network', 'NN'), (u'S3', 'NNP'), (u'CDN', 'NNP'), (u'-', ':'), (u'solution', 'NN'), (u'CloudFront', 'NNP'), (u',', ','), (u'Used', 'VBD'), (u'static', 'JJ'), (u'.', '.'), (u'reduced', 'VBN')]\n[(u'SOA', 'NNP'), (u'Oriented', 'NNP'), (u'Dec', 'NNP'), (u'Implemented', 'NNP'), (u'Business', 'NNP'), (u'-', ':'), (u'Service', 'NNP'), (u'June', 'NNP'), (u'Dashboards', 'NNP'), (u'Architecture', 'NNP'), (u'eGourmet', 'VBD'), (u'Analytic', 'NNP'), (u'Intern', 'NNP'), (u'Solutions', 'NNP'), (u'.', '.'), (u'2015', 'CD'), (u'using', 'VBG'), (u':', ':'), (u'Software', 'NN')]\n[(u'Developed', 'NNP'), (u'consumed', 'VBD'), (u'Redis', 'NNP'), (u',', ','), (u'REST', 'NNP'), (u'.', '.'), (u'API', 'NNP'), (u'DevExpress', 'NNP'), (u'Caching', 'NNP'), (u'using', 'VBG'), (u'tools', 'NNS')]\n[(u'Newton', 'NNP'), (u'MKC', 'NNP'), (u'Graduate', 'NNP'), (u'Jan', 'NNP'), (u'Unity', 'NNP'), (u'Teaching', 'NNP'), (u'3D', 'CD'), (u'May', 'NNP'), (u'Assistant', 'NNP'), (u'-', ':'), (u'.', '.'), (u'Designed', 'VBN'), (u'2015', 'CD'), (u'2014', 'CD'), (u'prototype', 'NN'), (u'2013', 'CD'), (u'Motion', 'NNP'), (u'game', 'NN'), (u'U', 'NNP'), (u'using', 'VBG'), (u'teaches', 'NNS'), (u'Software', 'NNP'), (u'Limited', 'VBD'), (u':', ':'), (u'Sep', 'NNP'), (u'Developed', 'NNP'), (u'Prestoo', 'NNP'), (u'Private', 'NNP'), (u'Laws', 'NNP'), (u'Developer', 'NNP')]\n[(u'Visualization', 'NNP'), (u'Git', 'NNP'), (u'Seaborn', 'NNP'), (u'TECHNICAL', 'NNP'), (u'programming', 'VBG'), (u'Redis', 'NNP'), (u'OpenCV', 'NNP'), (u'RedisMQ', 'NNP'), (u'Numpy', 'NNP'), (u'Pandas', 'NNP'), (u'#', '#'), (u'SET', 'NNP'), (u'Keras', 'NNP'), (u'Python', 'NNP'), (u'Big', 'NNP'), (u'Scikit', 'NNP'), (u'-', ':'), (u',', ','), (u'Matplotlib', 'NNP'), (u'.', '.'), (u'Flask', 'NNP'), (u'Preprocessing', 'VBG'), (u'Cleaning', 'NNP'), (u'Spark', 'NNP'), (u':', ':'), (u'Data', 'NNP'), (u'C', 'NNP'), (u'SQL', 'NNP'), (u'Analysis', 'NNP'), (u'Angular4', 'NNP'), (u'Algorithms', 'NNP'), (u'Dynamic', 'NNP'), (u'learn', 'VBD'), (u'TensorFlow', 'NNP'), (u'SKILL', 'NNP')]\n[(u'Time', 'NNP'), (u'Management', 'NNP'), (u'IBM', 'NNP'), (u'Aug', 'NNP'), (u'Top', 'NNP'), (u'Winner', 'NNP'), (u'Percent', 'NNP'), (u'Loyalty', 'NNP'), (u'Jan', 'NNP'), (u'Critical', 'NNP'), (u'40', 'CD'), (u'STRENGTHS', 'NNP'), (u'HONORS', 'NNP'), (u'PROJECTS', 'NNP'), (u'Ability', 'NNP'), (u'thinking', 'VBG'), (u'Competetion', 'NNP'), (u',', ','), (u'Award', 'NNP'), (u'.', '.'), (u'ACADEMIC', 'NNP'), (u'2018', 'CD'), (u'2015', 'CD'), (u'2014', 'CD'), (u'2017', 'CD'), (u':', ':'), (u'Recipient', 'NNP'), (u'Currently', 'NNP'), (u'Scholar', 'NNP'), (u'Restaurant', 'NNP'), (u'H', 'NNP'), (u'K', 'NNP'), (u'Work', 'NNP'), (u'Recruit', 'NNP'), (u'UMKC', 'NNP'), (u'April', 'NNP'), (u'ranked', 'VBD'), (u'AWARDS', 'NNP'), (u'Team', 'NNP'), (u'Dec', 'NNP'), (u'ackathon', 'NN'), (u'AND', 'NNP'), (u'Forecasting', 'NNP'), (u'Dean', 'NNP'), (u'aggle', 'VBP'), (u'PERSONAL', 'NNP'), (u'Visitor', 'NNP'), (u'skills', 'VBZ'), (u'Learning', 'NNP'), (u'Ship', 'NNP')]\n[(u'clustering', 'VBG'), (u'combination', 'NN'), (u'This', 'DT'), (u'.', '.'), (u'project', 'NN'), (u'problem', 'NN'), (u'regression', 'NN')]\n[(u':', ':'), (u'Knowledge', 'NN'), (u'Identifying', 'NNP'), (u'og', 'NN'), (u'Transfer', 'NNP'), (u'breed', 'VBD'), (u'-', ':'), (u'ResNet', 'NN'), (u'tuning', 'VBG'), (u'50', 'CD'), (u'images', 'NNS'), (u'CNN', 'NNP'), (u'architecture', 'NN'), (u'human', 'NN'), (u'.', '.'), (u'resembling', 'VBG'), (u'Augmentation', 'NN'), (u'2017', 'CD'), (u'Dec', 'NNP'), (u',', ','), (u'Techniques', 'NNP')]\n[(u'right', 'JJ'), (u'Smartcab', 'NNP'), (u'State', 'NNP'), (u'vs', 'NN'), (u'Optimal', 'NNP'), (u'ing', 'NN'), (u'2017', 'CD'), (u'sates', 'VBZ'), (u'space', 'NN'), (u'-', ':'), (u',', ','), (u'.', '.'), (u'Train', 'NN'), (u'policy', 'NN'), (u'learned', 'VBN'), (u':', ':'), (u'Drive', 'JJ'), (u'Reinforcement', 'NNP'), (u'Q', 'NNP'), (u'Nov', 'NNP'), (u'Identify', 'NNP'), (u'Learning', 'NNP'), (u'Techniques', 'NNP')]\n[(u'Clustering', 'VBG'), (u'Visualization', 'NNP'), (u'Dimensionality', 'NNP'), (u'features', 'VBZ'), (u'Biplot', 'NNP'), (u'Segments', 'NNP'), (u'transfor', 'JJ'), (u'Feature', 'NNP'), (u'highly', 'RB'), (u'tection', 'NN'), (u'Reduction', 'NNP'), (u'using', 'VBG'), (u'Visualizing', 'VBG'), (u'principal', 'JJ'), (u'Creating', 'NNP'), (u'-', ':'), (u',', ','), (u'.', '.'), (u'maps', 'NNS'), (u'mation', 'NN'), (u'Nov', 'NNP'), (u':', ':'), (u'Data', 'NNP'), (u'transformation', 'NN'), (u'Customer', 'NNP'), (u'outlier', 'NN'), (u'component', 'NN'), (u'KMeans', 'NNP'), (u'heat', 'NN'), (u'R', 'NNP'), (u'log', 'NN'), (u'feature', 'NN'), (u'removed', 'VBN'), (u'data', 'NNS'), (u'2017', 'CD'), (u'matrix', 'NN'), (u'correlated', 'VBD'), (u'e', 'JJ'), (u'elevance', 'NN'), (u'Recovery', 'NNP'), (u'analysis', 'NN'), (u'skewed', 'VBD'), (u'Cluster', 'NNP'), (u'scatter', 'NN'), (u'Techniques', 'NNS')]\n[(u'Binary', 'NNP'), (u'Continuous', 'NNP'), (u'Tuning', 'NNP'), (u'Initial', 'NNP'), (u'Feature', 'NNP'), (u'Split', 'NNP'), (u'Finding', 'NNP'), (u'.', '.'), (u'Evaluation', 'NNP'), (u'Oct', 'NNP'), (u'Donors', 'NNP'), (u'Classfication', 'NNP'), (u'CharityML', 'NNP'), (u'Numerical', 'NNP'), (u'-', ':'), (u',', ','), (u'Final', 'NNP'), (u'Grid', 'NNP'), (u'2017', 'CD'), (u':', ':'), (u'Data', 'NNP'), (u'Preprocessin', 'NNP'), (u'Search', 'NNP'), (u'Skewed', 'NNP'), (u'Exploration', 'NNP'), (u'Model', 'NNP'), (u'Normalizing', 'NNP'), (u'Extracting', 'NNP'), (u'Selection', 'NNP'), (u'Shuffle', 'NNP'), (u'Features', 'NNP'), (u'g', 'VBP'), (u'Transforming', 'VBG'), (u'Choosing', 'NNP'), (u'Importance', 'NNP'), (u'Best', 'NNP'), (u'Techniques', 'NNS')]\n[(u'Model', 'NNP'), (u'Search', 'NNP'), (u'Validation', 'NNP'), (u'Aug', 'NNP'), (u'Boston', 'NNP'), (u'Housing', 'NNP'), (u'Predicting', 'NNP'), (u'-', ':'), (u',', ','), (u'Curves', 'VBZ'), (u'Cross', 'NNP'), (u'Graphs', 'NNP'), (u'Complexity', 'NNP'), (u'Grid', 'NNP'), (u'Learning', 'NNP'), (u'.', '.'), (u'Prices', 'NNS'), (u'2017', 'CD'), (u':', ':'), (u'Regression', 'NN'), (u'Techniques', 'NNS')]\n[(u'Visualization', 'NNP'), (u'Java', 'NNP'), (u'Recovery', 'NNP'), (u'Aug', 'NNP'), (u'LOC', 'NNP'), (u'FreeBase', 'NNP'), (u'Mashup', 'NNP'), (u'Winner', 'NNP'), (u'2nd', 'CD'), (u'Jan', 'NNP'), (u'Extraction', 'NNP'), (u'Pickup', 'NNP'), (u'Giraph', 'NNP'), (u'Apache', 'NNP'), (u'Tweets', 'NNP'), (u'JQuery', 'NNP'), (u'Audio', 'NNP'), (u')', ')'), (u'Big', 'NNP'), (u'-', ':'), (u',', ','), (u'3', 'CD'), (u'HTML', 'NNP'), (u'(', '('), (u'Hackathon', 'NNP'), (u'2015', 'CD'), (u'2014', 'CD'), (u'Ebola', 'NNP'), (u':', ':'), (u'Data', 'NN'), (u'2013', 'CD'), (u'2012', 'CD'), (u'files', 'NNS'), (u'PickMeUp', 'NNP'), (u'full', 'JJ'), (u'K', 'NNP'), (u'Bootstrap', 'NNP'), (u'Days', 'NNP'), (u'UMKC', 'NNP'), (u'April', 'NNP'), (u'Airport', 'NNP'), (u'profile', 'NN'), (u'Architecture', 'NNP'), (u'Y', 'NNP'), (u'using', 'VBG'), (u'100', 'CD'), (u'Dec', 'NNP'), (u'Technologies', 'NNPS'), (u'CSS', 'NNP'), (u'Steganography', 'NNP'), (u'oTube', 'MD'), (u'MongoDB', 'NNP'), (u'App', 'NNP'), (u'project', 'NN'), (u'May', 'NNP'), (u'API', 'NNP'), (u'Analyzing', 'NNP'), (u'Hack', 'NNP'), (u'Battle', 'NNP'), (u'Video', 'NNP'), (u'view', 'NN')]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec57db10d22b106da56ba6902b09f96cc1a7b0 | 4,264 | ipynb | Jupyter Notebook | notebooks/Widget.ipynb | SamantaDilm/voila | a420cccbf0caa7c131b59819eac89375d06d4b9a | [
"BSD-3-Clause"
] | null | null | null | notebooks/Widget.ipynb | SamantaDilm/voila | a420cccbf0caa7c131b59819eac89375d06d4b9a | [
"BSD-3-Clause"
] | null | null | null | notebooks/Widget.ipynb | SamantaDilm/voila | a420cccbf0caa7c131b59819eac89375d06d4b9a | [
"BSD-3-Clause"
] | null | null | null | 39.119266 | 598 | 0.660178 | [
[
[
"import ipywidgets as widgets\nimport pandas as pd\nimport numpy as np\nfrom ipywidgets import interact\nimport matplotlib.pyplot as plt\n\n__author__ = \"Samanta Dil Mohamed\"\n__StudentID__ = \"i6222109\"\n__date__ = \"08/02/2022\"\n\nprint(f'Finally, and sadly, this is the final project. \\nFor this project, my research question is: \"Does the human development index(HDI) have a relation with the amount of vaccinations taken?\"')\nprint(f'Thereafter, I wonder:\" Does the amount of vaccinations make sense if you take the total amount of deaths or stringency index per country into account?\"\\n')\nprint(f'To answer this research question, the covid database containing the percentage of people vaccinated, fully vaccinated and boostered have been shown.')\nprint(f'Additionally, the human development index, stringency index and total deaths per million inhabitants have been shown below: ')\n\n",
"_____no_output_____"
],
[
"covid = pd.read_csv(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\", parse_dates=['date'])\n\ndef create_plot( yaxis):\n plt.scatter(covid.human_development_index,y = covid[yaxis], color= 'plum')\n plt.xlabel(\"human_development_index\")\n plt.ylabel(yaxis)\n plt.show()",
"_____no_output_____"
],
[
"print('With this widget all research questions can be answered. \\nJust pick the y-value and see for youself what influence the HDI has on the y-value.' )\n",
"_____no_output_____"
],
[
"widgets.interact(create_plot, yaxis=['people_vaccinated_per_hundred', 'people_fully_vaccinated_per_hundred', 'total_boosters_per_hundred', 'total_deaths_per_million', 'stringency_index'])",
"_____no_output_____"
],
[
"print('Conclusion:')\nprint('After analyzing these figures, we can conclude that the human development index does have \\na relation with the amount of vaccinations taken. Especially for the booster vaccine, a clear relation is visible. \\nThe amount of deaths each country had during the crisis, also had a slight influence on the amount of vaccines taken. \\nsome similarities betweeen the graph of the deaths and vaccines were visible. However, stringency had very little effect\\non the amount of vaccines taken. Almost every country, no mattter the HDI, had very strict regulations during the corona crisis ')",
"Conclusion:\nAfter analyzing these figures, we can conclude that the human development index does have \na relation with the amount of vaccinations taken. Especially for the booster vaccine, a clear relation is visible. \nThe amount of deaths each country had during the crisis, also had a slight influence on the amount of vaccines taken. \nsome similarities betweeen the graph of the deaths and vaccines were visible. However, stringency had very little effect\non the amount of vaccines taken. Almost every country, no mattter the HDI, had very strict regulations during the corona crisis \n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec5f6f163a52b40d47350814dece846fd8de1a | 5,785 | ipynb | Jupyter Notebook | notebooks/Examples/JSON-LD Playground.ipynb | dfreeman06/wxyz | 663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd | [
"BSD-3-Clause"
] | 1 | 2021-06-20T12:21:27.000Z | 2021-06-20T12:21:27.000Z | notebooks/Examples/JSON-LD Playground.ipynb | dfreeman06/wxyz | 663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd | [
"BSD-3-Clause"
] | null | null | null | notebooks/Examples/JSON-LD Playground.ipynb | dfreeman06/wxyz | 663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd | [
"BSD-3-Clause"
] | null | null | null | 28.638614 | 95 | 0.480035 | [
[
[
"import ipywidgets as W\nfrom wxyz.jsonld.widget_jsonld import Expand, Compact, Flatten, Frame, Normalize\nfrom wxyz.lab.widget_dock import DockBox \nfrom wxyz.lab.widget_editor import Editor\nfrom wxyz.core.widget_json import JSON",
"_____no_output_____"
],
[
"flex = lambda x=1: dict(layout=dict(flex=f\"{x}\"))",
"_____no_output_____"
],
[
"context = JSON(\"\"\"{\n \"@context\": {\n \"@vocab\": \"http://schema.org/\" \n }\n}\"\"\")\ndocument = JSON(\"\"\"{\n \"@graph\": [{\n \"@type\": \"Person\",\n \"@id\": \"this-guy\",\n \"name\": \"Jekyll\",\n \"jobTitle\": \"Doctor\"\n },{\n \"@type\": \"Person\",\n \"@id\": \"this-guy\",\n \"name\": \"Hyde\",\n \"jobTitle\": \"Mister\"\n }]\n}\"\"\")",
"_____no_output_____"
],
[
"context_source = Editor(description=\"JSON-LD Context\", **flex())\ndocument_source = Editor(description=\"JSON Document\", **flex())\nW.jslink((context, \"source\"), (context_source, \"value\"))\nW.jslink((document, \"source\"), (document_source, \"value\"))",
"_____no_output_____"
],
[
"expand = Expand()\nexpand_output = Editor(description=\"Expanded\")\nW.jslink((expand, \"value\"), (expand_output, \"value\"))\nW.jslink((document, \"value\"), (expand, \"source\"))\nW.jslink((context, \"value\"), (expand, \"expand_context\"))",
"_____no_output_____"
],
[
"compact = Compact()\ncompact_output = Editor(description=\"Compacted\")\nW.jslink((compact, \"value\"), (compact_output, \"value\"))\nW.jslink((document, \"value\"), (compact, \"source\"))\nW.jslink((context, \"value\"), (compact, \"context\"))\nW.jslink((context, \"value\"), (compact, \"expand_context\"))",
"_____no_output_____"
],
[
"flatten = Flatten()\nflatten_output = Editor(description=\"Flattened\")\nW.jslink((flatten, \"value\"), (flatten_output, \"value\"))\nW.jslink((document, \"value\"), (flatten, \"source\"))\nW.jslink((context, \"value\"), (flatten, \"context\"))\nW.jslink((context, \"value\"), (flatten, \"expand_context\"))",
"_____no_output_____"
],
[
"error = Editor(\"errors will appear here\", description=\"errors be here\", **flex(1))\nW.jslink((expand, \"error\"), (error, \"value\"))\nW.jslink((compact, \"error\"), (error, \"value\"))\nW.jslink((flatten, \"error\"), (error, \"value\"))",
"_____no_output_____"
],
[
"jsonld_playground = DockBox([\n document_source, \n context_source, \n expand_output, \n compact_output, \n flatten_output, \n error\n], layout=dict(height=\"60vh\"))",
"_____no_output_____"
],
[
"@jsonld_playground.on_displayed\ndef on_display(*args, **kwargs):\n jsonld_playground.dock_layout = {\n 'type': 'split-area',\n 'orientation': 'horizontal',\n 'children': [\n {'type': 'split-area', 'orientation': 'vertical', 'children': [\n {'type': 'tab-area', 'widgets': [0], 'currentIndex': 0},\n {'type': 'tab-area', 'widgets': [1], 'currentIndex': 0},\n ], 'sizes': [2, 1]},\n\n {'type': 'split-area', 'orientation': 'vertical', 'children': [\n {'type': 'tab-area', 'widgets': [2], 'currentIndex': 0},\n {'type': 'tab-area', 'widgets': [3], 'currentIndex': 0},\n ], 'sizes': [1, 1]},\n\n {'type': 'split-area', 'orientation': 'vertical', 'children': [\n {'type': 'tab-area', 'widgets': [4], 'currentIndex': 0},\n {'type': 'tab-area', 'widgets': [5], 'currentIndex': 0}\n ], 'sizes': [1, 1]},\n ],\n 'sizes': [1, 1, 1]\n }",
"_____no_output_____"
],
[
"jsonld_playground",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec5f759744d98bc719cfe9cb5ebfe92e08ec9c | 3,925 | ipynb | Jupyter Notebook | Index.ipynb | SalchiPapa/CursoPython | 4cf6b50f30bf30862abe105963175755989fd31b | [
"CC0-1.0"
] | null | null | null | Index.ipynb | SalchiPapa/CursoPython | 4cf6b50f30bf30862abe105963175755989fd31b | [
"CC0-1.0"
] | null | null | null | Index.ipynb | SalchiPapa/CursoPython | 4cf6b50f30bf30862abe105963175755989fd31b | [
"CC0-1.0"
] | null | null | null | 35.044643 | 125 | 0.637452 | [
[
[
"# A Whirlwind Tour of Python",
"_____no_output_____"
],
[
"*Jake VanderPlas*",
"_____no_output_____"
],
[
"<img src=\"fig/cover-large.gif\">",
"_____no_output_____"
],
[
"These are the Jupyter Notebooks behind my O'Reilly report,\n[*A Whirlwind Tour of Python*](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp).\nThe full notebook listing is available [on Github](https://github.com/jakevdp/WhirlwindTourOfPython).\n\n*A Whirlwind Tour of Python* is a fast-paced introduction to essential\ncomponents of the Python language for researchers and developers who are\nalready familiar with programming in another language.\n\nThe material is particularly aimed at those who wish to use Python for data \nscience and/or scientific programming, and in this capacity serves as an\nintroduction to my upcoming book, *The Python Data Science Handbook*.\nThese notebooks are adapted from lectures and workshops I've given on these\ntopics at University of Washington and at various conferences, meetings, and\nworkshops around the world.",
"_____no_output_____"
],
[
"## Index\n\n1. [Introduction](00-Introduction.ipynb)\n2. [How to Run Python Code](01-How-to-Run-Python-Code.ipynb)\n3. [Basic Python Syntax](02-Basic-Python-Syntax.ipynb)\n4. [Python Semantics: Variables](03-Semantics-Variables.ipynb)\n5. [Python Semantics: Operators](04-Semantics-Operators.ipynb)\n6. [Built-In Scalar Types](05-Built-in-Scalar-Types.ipynb)\n7. [Built-In Data Structures](06-Built-in-Data-Structures.ipynb)\n8. [Control Flow Statements](07-Control-Flow-Statements.ipynb)\n9. [Defining Functions](08-Defining-Functions.ipynb)\n10. [Errors and Exceptions](09-Errors-and-Exceptions.ipynb)\n11. [Iterators](10-Iterators.ipynb)\n12. [List Comprehensions](11-List-Comprehensions.ipynb)\n13. [Generators and Generator Expressions](12-Generators.ipynb)\n14. [Modules and Packages](13-Modules-and-Packages.ipynb)\n15. [Strings and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb)\n16. [Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb)\n17. [Resources for Further Learning](16-Further-Resources.ipynb)\n18. [Appendix: Code To Reproduce Figures](17-Figures.ipynb)",
"_____no_output_____"
],
[
"## License\n\nThis material is released under the \"No Rights Reserved\" [CC0](LICENSE)\nlicense, and thus you are free to re-use, modify, build-on, and enhance\nthis material for any purpose.\n\nThat said, I request (but do not require) that if you use or adapt this material,\nyou include a proper attribution and/or citation; for example\n\n> *A Whirlwind Tour of Python* by Jake VanderPlas (O’Reilly). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1\n\nRead more about CC0 [here](https://creativecommons.org/share-your-work/public-domain/cc0/).",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ec606d56bf23519462fb3271b2c5bacd86f9b6 | 56,293 | ipynb | Jupyter Notebook | courses/machine_learning/deepdive/10_recommend/content_based_using_neural_networks.ipynb | Keetaekpark/training-data-analyst | 4a2f07fe20a6060f4ac92d2ed667cdea192cb2b8 | [
"Apache-2.0"
] | 2 | 2020-06-23T11:33:45.000Z | 2020-07-31T15:57:18.000Z | courses/machine_learning/deepdive/10_recommend/content_based_using_neural_networks.ipynb | Keetaekpark/training-data-analyst | 4a2f07fe20a6060f4ac92d2ed667cdea192cb2b8 | [
"Apache-2.0"
] | 11 | 2020-01-28T22:37:30.000Z | 2022-03-11T23:44:20.000Z | courses/machine_learning/deepdive/10_recommend/content_based_using_neural_networks.ipynb | Keetaekpark/training-data-analyst | 4a2f07fe20a6060f4ac92d2ed667cdea192cb2b8 | [
"Apache-2.0"
] | null | null | null | 54.336873 | 643 | 0.694154 | [
[
[
"## Content-Based Filtering Using Neural Networks",
"_____no_output_____"
],
[
"This notebook relies on files created in the [content_based_preproc.ipynb](./content_based_preproc.ipynb) notebook. Be sure to run the code in there before completing this notebook. \nAlso, we'll be using the **python3** kernel from here on out so don't forget to change the kernel if it's still Python2.",
"_____no_output_____"
],
[
"This lab illustrates:\n1. how to build feature columns for a model using tf.feature_column\n2. how to create custom evaluation metrics and add them to Tensorboard\n3. how to train a model and make predictions with the saved model",
"_____no_output_____"
],
[
"Tensorflow Hub should already be installed. You can check that it is by using \"pip freeze\".",
"_____no_output_____"
]
],
[
[
"%bash\npip freeze | grep tensor",
"tensorboard==1.8.0\ntensorflow==1.8.0\ntensorflow-hub==0.1.1\n"
]
],
[
[
"If 'tensorflow-hub' isn't one of the outputs above, then you'll need to install it. Uncomment the cell below and execute the commands. After doing the pip install, click **\"Reset Session\"** on the notebook so that the Python environment picks up the new packages.",
"_____no_output_____"
]
],
[
[
"#%bash\n#pip install tensorflow-hub",
"_____no_output_____"
],
[
"import os\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow_hub as hub\nimport shutil\n\nPROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID\nBUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME\nREGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# do not change these\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['TFVERSION'] = '1.8'",
"/usr/local/envs/py3env/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n"
],
[
"%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION",
"Updated property [core/project].\nUpdated property [compute/region].\n"
]
],
[
[
"### Build the feature columns for the model.",
"_____no_output_____"
],
[
"To start, we'll load the list of categories, authors and article ids we created in the previous **Create Datasets** notebook.",
"_____no_output_____"
]
],
[
[
"categories_list = open(\"categories.txt\").read().splitlines()\nauthors_list = open(\"authors.txt\").read().splitlines()\ncontent_ids_list = open(\"content_ids.txt\").read().splitlines()\nmean_months_since_epoch = 523",
"_____no_output_____"
]
],
[
[
"In the cell below we'll define the feature columns to use in our model. If necessary, remind yourself the [various feature columns](https://www.tensorflow.org/api_docs/python/tf/feature_column) to use. \nFor the embedded_title_column feature column, use a Tensorflow Hub Module to create an embedding of the article title. Since the articles and titles are in German, you'll want to use a German language embedding module. \nExplore the text embedding Tensorflow Hub modules [available here](https://alpha.tfhub.dev/). Filter by setting the language to 'German'. The 50 dimensional embedding should be sufficient for our purposes. ",
"_____no_output_____"
]
],
[
[
"embedded_title_column = hub.text_embedding_column(\n key=\"title\", \n module_spec=\"https://tfhub.dev/google/nnlm-de-dim50/1\",\n trainable=False)\n\ncontent_id_column = tf.feature_column.categorical_column_with_hash_bucket(\n key=\"content_id\",\n hash_bucket_size= len(content_ids_list) + 1)\nembedded_content_column = tf.feature_column.embedding_column(\n categorical_column=content_id_column,\n dimension=10)\n\nauthor_column = tf.feature_column.categorical_column_with_hash_bucket(key=\"author\",\n hash_bucket_size=len(authors_list) + 1)\nembedded_author_column = tf.feature_column.embedding_column(\n categorical_column=author_column,\n dimension=3)\n\ncategory_column_categorical = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"category\",\n vocabulary_list=categories_list,\n num_oov_buckets=1)\ncategory_column = tf.feature_column.indicator_column(category_column_categorical)\n\nmonths_since_epoch_boundaries = list(range(400,700,20))\nmonths_since_epoch_column = tf.feature_column.numeric_column(\n key=\"months_since_epoch\")\nmonths_since_epoch_bucketized = tf.feature_column.bucketized_column(\n source_column = months_since_epoch_column,\n boundaries = months_since_epoch_boundaries)\n\ncrossed_months_since_category_column = tf.feature_column.indicator_column(tf.feature_column.crossed_column(\n keys = [category_column_categorical, months_since_epoch_bucketized], \n hash_bucket_size = len(months_since_epoch_boundaries) * (len(categories_list) + 1)))\n\nfeature_columns = [embedded_content_column,\n embedded_author_column,\n category_column,\n embedded_title_column,\n crossed_months_since_category_column] ",
"INFO:tensorflow:Using /tmp/tfhub_modules to cache modules.\nINFO:tensorflow:Downloading TF-Hub Module 'https://tfhub.dev/google/nnlm-de-dim50/1'.\nINFO:tensorflow:Downloaded TF-Hub Module 'https://tfhub.dev/google/nnlm-de-dim50/1'.\n"
]
],
[
[
"### Create the input function.\n\nNext we'll create the input function for our model. This input function reads the data from the csv files we created in the previous labs. ",
"_____no_output_____"
]
],
[
[
"record_defaults = [[\"Unknown\"], [\"Unknown\"],[\"Unknown\"],[\"Unknown\"],[\"Unknown\"],[mean_months_since_epoch],[\"Unknown\"]]\ncolumn_keys = [\"visitor_id\", \"content_id\", \"category\", \"title\", \"author\", \"months_since_epoch\", \"next_content_id\"]\nlabel_key = \"next_content_id\"\ndef read_dataset(filename, mode, batch_size = 512):\n def _input_fn():\n def decode_csv(value_column):\n columns = tf.decode_csv(value_column,record_defaults=record_defaults)\n features = dict(zip(column_keys, columns)) \n label = features.pop(label_key) \n return features, label\n\n # Create list of files that match pattern\n file_list = tf.gfile.Glob(filename)\n\n # Create dataset from file list\n dataset = tf.data.TextLineDataset(file_list).map(decode_csv)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # indefinitely\n dataset = dataset.shuffle(buffer_size = 10 * batch_size)\n else:\n num_epochs = 1 # end-of-input after this\n\n dataset = dataset.repeat(num_epochs).batch(batch_size)\n return dataset.make_one_shot_iterator().get_next()\n return _input_fn",
"_____no_output_____"
]
],
[
[
"### Create the model and train/evaluate\n\n\nNext, we'll build our model which recommends an article for a visitor to the Kurier.at website. Look through the code below. We use the input_layer feature column to create the dense input layer to our network. This is just a sigle layer network where we can adjust the number of hidden units as a parameter.\n\nCurrently, we compute the accuracy between our predicted 'next article' and the actual 'next article' read next by the visitor. We'll also add an additional performance metric of top 10 accuracy to assess our model. To accomplish this, we compute the top 10 accuracy metric, add it to the metrics dictionary below and add it to the tf.summary so that this value is reported to Tensorboard as well.",
"_____no_output_____"
]
],
[
[
"def model_fn(features, labels, mode, params):\n net = tf.feature_column.input_layer(features, params['feature_columns'])\n for units in params['hidden_units']:\n net = tf.layers.dense(net, units=units, activation=tf.nn.relu)\n # Compute logits (1 per class).\n logits = tf.layers.dense(net, params['n_classes'], activation=None) \n\n predicted_classes = tf.argmax(logits, 1)\n from tensorflow.python.lib.io import file_io\n \n with file_io.FileIO('content_ids.txt', mode='r') as ifp:\n content = tf.constant([x.rstrip() for x in ifp])\n predicted_class_names = tf.gather(content, predicted_classes)\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'class_ids': predicted_classes[:, tf.newaxis],\n 'class_names' : predicted_class_names[:, tf.newaxis],\n 'probabilities': tf.nn.softmax(logits),\n 'logits': logits,\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n table = tf.contrib.lookup.index_table_from_file(vocabulary_file=\"content_ids.txt\")\n labels = table.lookup(labels)\n # Compute loss.\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Compute evaluation metrics.\n accuracy = tf.metrics.accuracy(labels=labels,\n predictions=predicted_classes,\n name='acc_op')\n top_10_accuracy = tf.metrics.mean(tf.nn.in_top_k(predictions=logits, \n targets=labels, \n k=10))\n \n metrics = {\n 'accuracy': accuracy,\n 'top_10_accuracy' : top_10_accuracy}\n \n tf.summary.scalar('accuracy', accuracy[1])\n tf.summary.scalar('top_10_accuracy', top_10_accuracy[1])\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(\n mode, loss=loss, eval_metric_ops=metrics)\n\n # Create training op.\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)",
"_____no_output_____"
]
],
[
[
"### Train and Evaluate",
"_____no_output_____"
]
],
[
[
"outdir = 'content_based_model_trained'\nshutil.rmtree(outdir, ignore_errors = True) # start fresh each time\ntf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file\nestimator = tf.estimator.Estimator(\n model_fn=model_fn,\n model_dir = outdir,\n params={\n 'feature_columns': feature_columns,\n 'hidden_units': [200, 100, 50],\n 'n_classes': len(content_ids_list)\n })\n\ntrain_spec = tf.estimator.TrainSpec(\n input_fn = read_dataset(\"training_set.csv\", tf.estimator.ModeKeys.TRAIN),\n max_steps = 2000)\n\neval_spec = tf.estimator.EvalSpec(\n input_fn = read_dataset(\"test_set.csv\", tf.estimator.ModeKeys.EVAL),\n steps = None,\n start_delay_secs = 30,\n throttle_secs = 60)\n\ntf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)",
"INFO:tensorflow:Using default config.\nINFO:tensorflow:Using config: {'_master': '', '_task_type': 'worker', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f902158b1d0>, '_task_id': 0, '_tf_random_seed': None, '_log_step_count_steps': 100, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_train_distribute': None, '_save_summary_steps': 100, '_num_worker_replicas': 1, '_service': None, '_is_chief': True, '_save_checkpoints_secs': 600, '_num_ps_replicas': 0, '_model_dir': 'content_based_model_trained', '_keep_checkpoint_every_n_hours': 10000, '_save_checkpoints_steps': None, '_session_config': None, '_evaluation_master': ''}\nINFO:tensorflow:Running training and evaluation locally (non-distributed).\nINFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 60 secs (eval_spec.throttle_secs) or training is finished.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 1 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:step = 1, loss = 9.656794\nINFO:tensorflow:global_step/sec: 2.07024\nINFO:tensorflow:step = 101, loss = 5.4598856 (48.311 sec)\nINFO:tensorflow:Saving checkpoints for 115 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:Loss for final step: 5.4961843.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2018-10-02-12:24:30\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-115\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Finished evaluation at 2018-10-02-12:24:48\nINFO:tensorflow:Saving dict for global step 115: accuracy = 0.031016836, global_step = 115, loss = 5.426173, top_10_accuracy = 0.20047659\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-115\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 116 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:step = 116, loss = 5.378599\nINFO:tensorflow:global_step/sec: 2.02106\nINFO:tensorflow:step = 216, loss = 5.2218914 (49.486 sec)\nINFO:tensorflow:Saving checkpoints for 230 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:Loss for final step: 5.2376986.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2018-10-02-12:25:52\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-230\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Finished evaluation at 2018-10-02-12:26:10\nINFO:tensorflow:Saving dict for global step 230: accuracy = 0.02824329, global_step = 230, loss = 5.2551446, top_10_accuracy = 0.23301691\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-230\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 231 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:step = 231, loss = 5.2069583\nINFO:tensorflow:global_step/sec: 2.02675\nINFO:tensorflow:step = 331, loss = 5.187145 (49.348 sec)\nINFO:tensorflow:Saving checkpoints for 345 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:Loss for final step: 5.129216.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2018-10-02-12:27:13\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-345\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Finished evaluation at 2018-10-02-12:27:31\nINFO:tensorflow:Saving dict for global step 345: accuracy = 0.036837377, global_step = 345, loss = 5.176561, top_10_accuracy = 0.2491113\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-345\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 346 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:step = 346, loss = 5.0593753\nINFO:tensorflow:global_step/sec: 2.0316\nINFO:tensorflow:step = 446, loss = 5.0945272 (49.228 sec)\nINFO:tensorflow:Saving checkpoints for 460 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:Loss for final step: 5.102105.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2018-10-02-12:28:34\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-460\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Finished evaluation at 2018-10-02-12:28:52\nINFO:tensorflow:Saving dict for global step 460: accuracy = 0.036524866, global_step = 460, loss = 5.146585, top_10_accuracy = 0.2616118\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Initialize variable input_layer/title_hub_module_embedding/module/embeddings/part_0:0 from checkpoint b'/tmp/tfhub_modules/e40ef097142ae1de637df7021ce148ffe836e262/variables/variables' with embeddings\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from content_based_model_trained/model.ckpt-460\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 461 into content_based_model_trained/model.ckpt.\nINFO:tensorflow:step = 461, loss = 5.0675087\nINFO:tensorflow:global_step/sec: 1.98427\nINFO:tensorflow:step = 561, loss = 5.087888 (50.403 sec)\nINFO:tensorflow:Saving checkpoints for 573 into content_based_model_trained/model.ckpt.\n"
]
],
[
[
"This takes a while to complete but in the end, I get about **30% top 10 accuracy**.",
"_____no_output_____"
],
[
"### Make predictions with the trained model. \n\nWith the model now trained, we can make predictions by calling the predict method on the estimator. Let's look at how our model predicts on the first five examples of the training set. \nTo start, we'll create a new file 'first_5.csv' which contains the first five elements of our training set. We'll also save the target values to a file 'first_5_content_ids' so we can compare our results. ",
"_____no_output_____"
]
],
[
[
"%%bash\nhead -5 training_set.csv > first_5.csv\nhead first_5.csv\nawk -F \"\\\"*,\\\"*\" '{print $2}' first_5.csv > first_5_content_ids",
"1000148716229112932,299913368,News,U4-Störung legt Wiener Frühverkehr lahm ,Yvonne Widler,574,299931241\n1000148716229112932,299931241,Stars & Kultur,Regisseur Michael Haneke kritisiert Flüchtlingspolitik,,574,299913879\n1000360453832106474,299925700,Lifestyle,Nach Tod von Vater: Tochter bekommt jedes Jahr Blumen,Marlene Patsalidis,574,299922662\n1000360453832106474,299922662,Lifestyle,Australischer Fernsehstar rechtfertigt Sexismus mit Asperger-Syndrom,Marlene Patsalidis,574,299826775\n1001846185946874596,299930679,News,Wintereinbruch naht: Erster Schnee im Osten möglich,Daniela Wahl,574,299930679\n"
]
],
[
[
"Recall, to make predictions on the trained model we pass a list of examples through the input function. Complete the code below to make predicitons on the examples contained in the \"first_5.csv\" file we created above. ",
"_____no_output_____"
]
],
[
[
"output = list(estimator.predict(input_fn=read_dataset(\"first_5.csv\", tf.estimator.ModeKeys.PREDICT)))",
"_____no_output_____"
],
[
"import numpy as np\nrecommended_content_ids = [np.asscalar(d[\"class_names\"]).decode('UTF-8') for d in output]\ncontent_ids = open(\"first_5_content_ids\").read().splitlines()",
"_____no_output_____"
]
],
[
[
"Finally, we map the content id back to the article title. Let's compare our model's recommendation for the first example. This can be done in BigQuery. Look through the query below and make sure it is clear what is being returned.",
"_____no_output_____"
]
],
[
[
"import google.datalab.bigquery as bq\nrecommended_title_sql=\"\"\"\n#standardSQL\nSELECT\n(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title\nFROM `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\nWHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) = \\\"{}\\\"\nLIMIT 1\"\"\".format(recommended_content_ids[0])\n\ncurrent_title_sql=\"\"\"\n#standardSQL\nSELECT\n(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title\nFROM `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\nWHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) = \\\"{}\\\"\nLIMIT 1\"\"\".format(content_ids[0])\nrecommended_title = bq.Query(recommended_title_sql).execute().result().to_dataframe()['title'].tolist()[0]\ncurrent_title = bq.Query(current_title_sql).execute().result().to_dataframe()['title'].tolist()[0]\nprint(\"Current title: {} \".format(current_title))\nprint(\"Recommended title: {}\".format(recommended_title))",
"Current title: U4-Störung legt Wiener Frühverkehr lahm \nRecommended title: Britische Urlauberin filmte in Australien eigenen Krokodil-Angriff\n"
]
],
[
[
"### Tensorboard\n\nAs usual, we can monitor the performance of our training job using Tensorboard. ",
"_____no_output_____"
]
],
[
[
"from google.datalab.ml import TensorBoard\nTensorBoard().start('content_based_model_trained')",
"_____no_output_____"
],
[
"for pid in TensorBoard.list()['pid']:\n TensorBoard().stop(pid)\n print(\"Stopped TensorBoard with pid {}\".format(pid))",
"Stopped TensorBoard with pid 12206\nStopped TensorBoard with pid 12311\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0ec67d75715174dd512dc11ddf8a8ed9a98c765 | 3,026 | ipynb | Jupyter Notebook | tool-configuration/Anaconda Setup Tutorial/AnacondaTutorial.ipynb | samlexrod/sakeoflearning | c8df791b9d5b9bc3ba287b6342b79236c7cebd40 | [
"MIT-0"
] | 4 | 2019-03-07T06:11:49.000Z | 2019-11-15T16:25:26.000Z | tool-configuration/Anaconda Setup Tutorial/AnacondaTutorial.ipynb | sammyrod/Sake_of_Learning | c8df791b9d5b9bc3ba287b6342b79236c7cebd40 | [
"MIT-0"
] | null | null | null | tool-configuration/Anaconda Setup Tutorial/AnacondaTutorial.ipynb | sammyrod/Sake_of_Learning | c8df791b9d5b9bc3ba287b6342b79236c7cebd40 | [
"MIT-0"
] | null | null | null | 34.386364 | 182 | 0.627231 | [
[
[
"# Anaconda Distribution",
"_____no_output_____"
],
[
"To install Anaconda distribution, you must go to their website www.anaconda.com. Or you can click this link: [Anaconda Distribution](https://www.anaconda.com/distribution/)",
"_____no_output_____"
],
[
"# Anaconda Distribution on Another Drive",
"_____no_output_____"
],
[
"If your anaconda installation is in another drive other than in the system (os) drive, you must add several anaconda distribution paths to PATH in the Environment Variables.\n\nYou can go to the to the environment variables by typing ***Environment Variables*** in the start menu or search bar.\nThese are the steps:\n1. Click on the ***Edit the environment variables*** (not the one ending on **for your account**).\n1. Clieck on the ***Environment Variables...*** button on the botton right.\n1. Double-click on the Path row of the User variables for [yourusername] or the System variables for all users.\n1. Click on new and add these 3 paths:\n * D:\\ProgramData\\Anaconda3\\Scripts\n * D:\\ProgramData\\Anaconda3\n * D:\\ProgramData\\Anaconda3\\Library\\bin\n\nNote: You can alias the anaconda path by creating a new system variable\nE.g. CONDA_HOME='D:\\ProgramData\\Anaconda3'. Click OK twice to exit the Environment Variables window.\nYou can add path using shortcuts just use %CONDA_HOME%\\Scripts, etc.\n1. To test your environment you should ***open the Command Prompt*** (Not Anaconda Prompt) and try these steps:\n 1. Make sure to close the current Command Prompt if it is open and re-open it.\n 1. Type ***conda*** in the cmd line. You should get the positional arguments.\n 1. Type ***python*** in the cmd line. You should enter the python environmnet.\n1. If the steps above where succesful. You succesfully mapped your Anaconda distribution to your system.",
"_____no_output_____"
],
[
"Other Notes:\nIf the windows store opens when you type python in the cmd prompt, go to the app execution aliases and turn off the app installer python.exe and python3.exe",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ec74d072e0eee134f57a0f74502d4fc84a1751 | 63,049 | ipynb | Jupyter Notebook | jupyter/13-Functional-Programming.ipynb | c-Bung/OnJava8 | fdca19e5518d3c6c63877ae864561df50306c474 | [
"MIT"
] | 183 | 2020-11-25T07:55:03.000Z | 2022-03-29T06:58:13.000Z | jupyter/13-Functional-Programming.ipynb | c-Bung/OnJava8 | fdca19e5518d3c6c63877ae864561df50306c474 | [
"MIT"
] | null | null | null | jupyter/13-Functional-Programming.ipynb | c-Bung/OnJava8 | fdca19e5518d3c6c63877ae864561df50306c474 | [
"MIT"
] | 241 | 2020-10-06T14:16:52.000Z | 2022-03-31T15:10:15.000Z | 26.314274 | 416 | 0.510555 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0ec889ebf83741c13a82e8b857fd7ad3345aa19 | 293,252 | ipynb | Jupyter Notebook | Assignment (8) PCA (2).ipynb | shraddhaghadage/PCA | 676ebe818cc195f80c7af04314e3b9b7550b006d | [
"Apache-2.0"
] | 2 | 2021-12-10T03:37:11.000Z | 2022-01-23T18:56:40.000Z | Assignment (8) PCA (2).ipynb | shraddhaghadage/PCA | 676ebe818cc195f80c7af04314e3b9b7550b006d | [
"Apache-2.0"
] | null | null | null | Assignment (8) PCA (2).ipynb | shraddhaghadage/PCA | 676ebe818cc195f80c7af04314e3b9b7550b006d | [
"Apache-2.0"
] | null | null | null | 132.994104 | 83,732 | 0.821075 | [
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import scale",
"_____no_output_____"
],
[
"wines=pd.read_csv(\"wine.csv\")\nwines",
"_____no_output_____"
],
[
"wines.describe()",
"_____no_output_____"
],
[
"wines.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 178 entries, 0 to 177\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Type 178 non-null int64 \n 1 Alcohol 178 non-null float64\n 2 Malic 178 non-null float64\n 3 Ash 178 non-null float64\n 4 Alcalinity 178 non-null float64\n 5 Magnesium 178 non-null int64 \n 6 Phenols 178 non-null float64\n 7 Flavanoids 178 non-null float64\n 8 Nonflavanoids 178 non-null float64\n 9 Proanthocyanins 178 non-null float64\n 10 Color 178 non-null float64\n 11 Hue 178 non-null float64\n 12 Dilution 178 non-null float64\n 13 Proline 178 non-null int64 \ndtypes: float64(11), int64(3)\nmemory usage: 19.6 KB\n"
],
[
"wines_ary=wines.values\nwines_ary",
"_____no_output_____"
],
[
"wines_normal = scale(wines_ary)\nwines_normal",
"_____no_output_____"
]
],
[
[
"# PCA Implementation\n",
"_____no_output_____"
]
],
[
[
"pca = PCA()\npca_values = pca.fit_transform(wines_normal)\npca_values",
"_____no_output_____"
],
[
"var = pca.explained_variance_ratio_\nvar",
"_____no_output_____"
],
[
"var1 = np.cumsum(np.round(var,decimals = 4)*100)\nvar1",
"_____no_output_____"
],
[
"pca.components_",
"_____no_output_____"
],
[
"plt.plot(var1, color='red', marker = 'o',linestyle = '--')",
"_____no_output_____"
],
[
"# Final Dataframe\nfinalDf =pd.concat([wines['Type'],pd.DataFrame(pca_values[:,0:3], columns=['pc1','pc2','pc3'])] ,axis = 1)\nfinalDf",
"_____no_output_____"
],
[
"finalDf = pd.concat([pd.DataFrame(pca_values[:,0:3],columns=['pc1','pc2','pc3']), wines['Type']], axis = 1)\nfinalDf",
"_____no_output_____"
],
[
"# Visualization of PCAs\nfig=plt.figure(figsize=(16,12))\nsns.scatterplot(data=finalDf)",
"_____no_output_____"
],
[
"sns.scatterplot(data=finalDf,x='pc1',y='pc2', hue='Type')",
"_____no_output_____"
],
[
"sns.scatterplot(data=finalDf,x='pc1',y='pc3', hue='Type')",
"_____no_output_____"
],
[
"sns.scatterplot(data=finalDf,x='pc2',y='pc3', hue='Type')",
"_____no_output_____"
]
],
[
[
"# Checking with other Clustering Algorithms",
"_____no_output_____"
],
[
"# 1. Hierarchical Clustering",
"_____no_output_____"
]
],
[
[
"# Import Libraries\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.preprocessing import normalize",
"_____no_output_____"
],
[
"# As we already have normalized data, create Dendrograms\nplt.figure(figsize=(10,8))\ndendrogram=sch.dendrogram(sch.linkage(finalDf,method='average'))",
"_____no_output_____"
],
[
"hc=AgglomerativeClustering(n_clusters=6, affinity='euclidean', linkage = 'average')\nhc",
"_____no_output_____"
],
[
"y_hc=pd.DataFrame(hc.fit_predict(finalDf),columns=['clustersid'])\ny_hc['clustersid'].value_counts()",
"_____no_output_____"
],
[
"# Adding clusters to dataset\nwine3=wines.copy()\nwine3['clustersid']=hc.labels_\nwine3",
"_____no_output_____"
]
],
[
[
"# 2. K-Means Clustering",
"_____no_output_____"
]
],
[
[
"# Import Libraries\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaled_wines= scaler.fit_transform(wines.iloc[:,1:])",
"_____no_output_____"
],
[
"scaled_wines",
"_____no_output_____"
],
[
"# within-cluster sum-of-squares criterion \nwcss=[]\nfor i in range (1,11):\n kmeans=KMeans(n_clusters=i,random_state=0)\n kmeans.fit(finalDf)\n wcss.append(kmeans.inertia_)",
"C:\\Users\\shrad\\anaconda3\\lib\\site-packages\\sklearn\\cluster\\_kmeans.py:881: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n warnings.warn(\n"
],
[
"# Plot K values range vs WCSS to get Elbow graph for choosing K (no. of clusters)\nplt.plot(range(1,11),wcss, marker = 'o', linestyle = '--')\nplt.title('Elbow Graph')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Build Cluster algorithm using K=4",
"_____no_output_____"
]
],
[
[
"# Cluster algorithm using K=4\nclusters3=KMeans(4,random_state=30).fit(finalDf)\nclusters3",
"_____no_output_____"
],
[
"clusters3.labels_\n",
"_____no_output_____"
],
[
"# Assign clusters to the data set\nwine4=wines.copy()\nwine4['clustersid']=clusters3.labels_\nwine4",
"_____no_output_____"
],
[
"wine4['clustersid'].value_counts()",
"_____no_output_____"
],
[
"scaled_wines",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0ec8a2e63ab6d02814b718883fb4bbd069db859 | 252,163 | ipynb | Jupyter Notebook | RealisticKvMaps/scaling_tracer_flux_paper1_clean.ipynb | UBC-MOAD/outputanalysisnotebooks | 50839cde3832d26bac6641427fed03c818fbe170 | [
"Apache-2.0"
] | null | null | null | RealisticKvMaps/scaling_tracer_flux_paper1_clean.ipynb | UBC-MOAD/outputanalysisnotebooks | 50839cde3832d26bac6641427fed03c818fbe170 | [
"Apache-2.0"
] | null | null | null | RealisticKvMaps/scaling_tracer_flux_paper1_clean.ipynb | UBC-MOAD/outputanalysisnotebooks | 50839cde3832d26bac6641427fed03c818fbe170 | [
"Apache-2.0"
] | null | null | null | 148.856553 | 81,566 | 0.83015 | [
[
[
"## Final rescale for paper 1",
"_____no_output_____"
],
[
"Final figures for the scaling section of paper 1 and cleaner fits for:\n\n* Maximum and minimum squeezing of isopyncals (max $N^2/N^2_0$, min $N^2/N^2_0$ )\n* Effective stratification ($N_{eff}$)\n* Upwelling flux induced by the canyon ($\\Phi$)\n\n* Maximum and minimum squeezing of isopyncals iso-concentration lines (max $\\partial_zC/\\partial_zC_0$, min $\\partial_zC/\\partial_zC_0$ ) *These won't be necessary for the paper(?)* \n* Mean concentration just above the rim during the advective phase ($\\bar{C}$)\n* Tracer upwelling flux induced by the canyon ($\\Phi_{Tr}$)",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport matplotlib.gridspec as gspec\nfrom matplotlib.ticker import FormatStrFormatter\nfrom netCDF4 import Dataset\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport sys\nimport scipy.stats\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport canyon_tools.readout_tools as rout \nimport canyon_tools.metrics_tools as mpt",
"_____no_output_____"
],
[
"from IPython.display import HTML\n\nHTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to toggle on/off the raw code.\"></form>''')\n",
"_____no_output_____"
],
[
"sns.set_context('paper')\nsns.set_style('white')",
"_____no_output_____"
],
[
"CanyonGrid='/data/kramosmu/results/TracerExperiments/CNTDIFF/run38/gridGlob.nc'\nCanyonGridOut = Dataset(CanyonGrid)\n\nCanyonGridNoC='/data/kramosmu/results/TracerExperiments/CNTDIFF/run68/gridGlob.nc'\nCanyonGridOutNoC = Dataset(CanyonGridNoC)\n\nCanyonState='/data/kramosmu/results/TracerExperiments/CNTDIFF/run38/stateGlob.nc'\nCanyonStateOut = Dataset(CanyonState)\n\n# Grid variables\nnx = 616\nny = 360\nnz = 90\nnt = 19 # t dimension size \ntime = CanyonStateOut.variables['T']\nRC = CanyonGridOut.variables['RC']",
"_____no_output_____"
],
[
"# Constants and scales\n\ng = 9.81 # accel. gravity\nHs = 149.8 # Shelf break depth\ns = 0.0061 # shelf slope \n\ndef Dh(f,L,N):\n '''Vertical scale Dh'''\n return((f*L)/(N))\n \ndef Ro(U,f,R):\n '''Rossby number'''\n return(U/(f*R))\n\ndef F(Ro):\n '''Function that estimates the ability of the flow to follow isobaths'''\n return(Ro/(0.9+Ro))\n\ndef Bu(N,f,W,Hs):\n '''Burger number'''\n return((N*Hs)/(f*W))\n\ndef RossbyRad(N,Hs,f):\n '''1st Rossby radius of deformation'''\n return((N*Hs)/f)\n\ndef SE(s,N,f,Fw,Rl):\n '''Slope effect '''\n return((s*N)/(f*(Fw/Rl)**0.5))",
"_____no_output_____"
],
[
"# Information for all runs is stored in canyon_records.py\nlib_path = os.path.abspath('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/PythonScripts/Paper1Figures/') # Add absolute path to my python scripts\nsys.path.append(lib_path)\n\nimport canyon_records \nrecords = canyon_records.main()\n\nimport nocanyon_records \nrecordsNoC = nocanyon_records.main()",
"_____no_output_____"
]
],
[
[
"### Not all runs are used to fit all variables\n\n**records_dyn** has all runs where f, N or U vary. USe this list to fit upwelling flux $\\Phi$ and modify *Howatt and Allen 2013*.\n\n**records_epsilon** has all runs in records_dyn plus the runs with a heaviside Kv profile. Use this list to fit $\\Phi$ with scaled N.\n\n**records_real** has all runs in records_epsilon plus the runs with a Kv profile inspired in observations.",
"_____no_output_____"
]
],
[
[
"# Indices of all runs that will be considered for paper 1\n\nselect_rec = [0,1,2,3,4,5,51,6,7,8,9,10,17,18,19,20,21,\n 22,23,24,25,26,27,29,30,31,32,33,\n 34,35,38,39,41,42,43,44,45,46,47,48,49]#,52, 53, 54, 55, 56, 57, 58, 59, 60]\n\nfor ii in select_rec:\n print(ii,records[ii].label2, records[ii].name)",
"0 base case CNTDIFF_run38\n1 $\\uparrow$ $K_{bg}$ CNTDIFF_run37\n2 $\\uparrow \\uparrow$ $K_{bg}$ CNTDIFF_run36\n3 $\\uparrow$ $N_0$ CNTDIFF_run45\n4 $\\uparrow \\uparrow$ $N_0$ CNTDIFF_run73\n5 $\\Downarrow$ $N_0$ CNTDIFF_run75\n51 $\\downarrow \\downarrow N_0$ CNTDIFF_run81\n6 $\\downarrow$ $N_0$ CNTDIFF_run79\n7 $\\uparrow f$ CNTDIFF_run67\n8 $\\downarrow \\downarrow$ $f$ CNTDIFF_run51\n9 $\\downarrow f$ CNTDIFF_run69\n10 $\\Downarrow f$ CNTDIFF_run71\n17 $\\downarrow$ U LOWER_BF_run01\n18 $\\downarrow \\downarrow$ U LOW_BF_run01\n19 $\\Downarrow$ U LOWEST_BF_run01\n20 $\\Downarrow$ U, $\\downarrow \\downarrow$ $N_0$ LOWEST_BF_run03\n21 $\\Downarrow$ U, $\\uparrow \\uparrow$ $N_0$ LOWEST_BF_run05\n22 $\\Downarrow$ U, $\\Downarrow$ $f$ LOWEST_BF_run07\n23 $\\Downarrow$ U, $\\uparrow \\uparrow K_{can}$ LOWEST_BF_run11\n24 $K_{can}$ Monterey (bot) 3DVISC_REALISTIC_run01\n25 $K_{can}$ Eel (bot) 3DVISC_REALISTIC_run02\n26 $K_{can}$ Monterey 3DVISC_REALISTIC_run03\n27 $K_{can}$ Ascension (bot) 3DVISC_REALISTIC_run05\n29 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 10$ 3DVISC_REALISTIC_run07\n30 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 25$ 3DVISC_REALISTIC_run08\n31 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 50$ 3DVISC_REALISTIC_run09\n32 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 100$ 3DVISC_REALISTIC_run10\n33 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 15$ 3DVISC_REALISTIC_run11\n34 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 75$ 3DVISC_REALISTIC_run12\n35 $\\Uparrow \\Uparrow K_{can}$, $\\epsilon 150$ 3DVISC_REALISTIC_run13\n38 $\\uparrow \\uparrow K_{can}$ 3DVISC_REALISTIC_run16\n39 $\\Uparrow \\uparrow K_{can}$ 3DVISC_REALISTIC_run17\n41 $\\uparrow \\uparrow K_{can}$ $\\epsilon 25$ 3DVISC_REALISTIC_run19\n42 $\\uparrow \\uparrow K_{can}$ $\\epsilon 100$ 3DVISC_REALISTIC_run20\n43 $\\Uparrow \\uparrow K_{can}$ $\\epsilon 25$ 3DVISC_REALISTIC_run21\n44 $\\Uparrow \\uparrow K_{can}$, $\\epsilon 100$ 3DVISC_REALISTIC_run22\n45 $\\Uparrow \\uparrow \\uparrow K_{can}$ 3DVISC_REALISTIC_run23\n46 $\\Uparrow \\Uparrow \\uparrow K_{can}$ 3DVISC_REALISTIC_run24\n47 $\\Uparrow K_{can}$ 3DVISC_REALISTIC_run25\n48 $\\uparrow K_{can}$ 3DVISC_REALISTIC_run26\n49 $\\Uparrow \\Uparrow K_{can}$ 3DVISC_REALISTIC_run27\n"
],
[
"# records_dyn has all the runs without the ones where K_bg changes. Use these ones for fitting the data HA2013\nind = [0,3,4,5,51,6,7,8,9,10,17,18,19,20,21,22] \nrecords_dyn = []\nfor ii in ind:\n records_dyn.append(records[ii])\n\n# records_epsilon has all the runs in records_step plus the epsilon runs (use these to fit Nmax+Nmin)\nind = [0,3,4,5,51,6,7,8,9,10,17,18,19,20,21,22,29,30,31,32,33,\n 34,38,39,41,42,43,44,45,46,47,48,49]#,52, 53, 54, 55, 56, 57, 58, 59, 60]\nrecords_epsilon = []\nfor ii in ind:\n records_epsilon.append(records[ii])\n\n# records_real has all the runs in records_epsilon plus the realistic runs\nind = [0,3,4,5,51,6,7,8,9,10,17,18,19,20,21,22,29,30,31,32,33,\n 34,38,41,42,39,43,44,45,46,47,48,49,24,25,26,27]#,52,53, 54, 55, 56, 57, 58, 59, 60]\nrecords_real = []\nfor ii in ind:\n records_real.append(records[ii])",
"_____no_output_____"
],
[
"records_sel = []\n\nfor ind in select_rec:\n records_sel.append(records[ind])\n file = ('/data/kramosmu/results/TracerExperiments/%s/HCW_TrMass_%s%s.csv' \n %(records[ind].exp_code,records[ind].exp_code,records[ind].run_num))\n dfcan = pd.read_csv(file)\n records[ind].HCW = dfcan['HCW']\n records[ind].HCWTr1 = dfcan['HCWTr1']\n records[ind].TrMass = dfcan['TrMassHCW']\n records[ind].TrMassTr1 = dfcan['TrMassHCWTr1']\n records[ind].TrMassTr2 = dfcan['TrMassHCWTr2']\n records[ind].TrMassTot = dfcan['TotTrMass']\n records[ind].TrMassTotTr2 = dfcan['TotTrMassTr2']\n records[ind].TrMassTotTr1 = dfcan['TotTrMassTr1']",
"_____no_output_____"
],
[
"t=6.5\nstname = 'UwH' #Station downstream head of canyon\n\nkeys2 = ['N_tt12','N_tt14']\n\nfor ind in select_rec:\n \n filename1 = ('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/results/metricsDataFrames/N_%s_%s.csv' %\n (records[ind].name,stname))\n df = pd.read_csv(filename1)\n Nab = np.empty(len(keys2))\n Nbe = np.empty(len(keys2))\n \n if records[ind].L > 13000:\n print(records[ind].L)\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.max(df[keys2[ii]][:]) \n Nbe[ii] = np.min(df[keys2[ii]][12:16]) \n \n elif (records[ind].L < 13000) & (records[ind].L > 8500):\n print(records[ind].L)\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.max(df[keys2[ii]][:]) \n Nbe[ii] = np.min(df[keys2[ii]][16:19]) \n \n \n else:\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.max(df[keys2[ii]][:]) \n Nbe[ii] = np.min(df[keys2[ii]][20:23]) \n \n \n records[ind].Nab = np.mean(Nab)\n records[ind].Nbe = np.mean(Nbe)\n records[ind].Nab_std = np.std(Nab)\n records[ind].Nbe_std = np.std(Nbe)",
"_____no_output_____"
],
[
"keys2 = ['dTrdz_tt08','dTrdz_tt10','dTrdz_tt12','dTrdz_tt14','dTrdz_tt16','dTrdz_tt18']\n\nfor ind in select_rec:\n \n filename1 = ('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/results/metricsDataFrames/dTr1dz_%s_%s.csv' %\n (records[ind].name,stname))\n df = pd.read_csv(filename1)\n dTrab = 0\n dTrbe = 0\n \n if records[ind].L > 13000:\n for key,ii in zip(keys2, range(len(keys2))):\n dTrab = dTrab + np.min(df[keys2[ii]][:]) \n dTrbe = dTrbe + np.max(df[keys2[ii]][12:16]) \n records[ind].dTr0 = df['dTrdz_tt00'][10] \n \n elif (records[ind].L < 13000) & (records[ind].L > 8500):\n for key,ii in zip(keys2, range(len(keys2))):\n dTrab = dTrab + np.min(df[keys2[ii]][:]) \n dTrbe = dTrbe + np.max(df[keys2[ii]][16:19]) \n records[ind].dTr0 = df['dTrdz_tt00'][10] \n \n else:\n for key,ii in zip(keys2, range(len(keys2))):\n dTrab = dTrab + np.min(df[keys2[ii]][:]) #0:20\n dTrbe = dTrbe + np.max(df[keys2[ii]][20:23]) #20:24\n records[ind].dTr0 = df['dTrdz_tt00'][10]\n records[ind].dTr_ab = dTrab/ len(keys2)\n records[ind].dTr_be = dTrbe/ len(keys2)\n ",
"_____no_output_____"
],
[
"keys2 = ['Tr_profile_tt08','Tr_profile_tt10','Tr_profile_tt12','Tr_profile_tt14','Tr_profile_tt16','Tr_profile_tt18']\n\nfor ind in select_rec:\n \n filename1 = ('/ocean/kramosmu/OutputAnalysis/outputanalysisnotebooks/results/metricsDataFrames/Tr1_profile_%s_%s.csv' %\n (records[ind].name,stname))\n df = pd.read_csv(filename1)\n Nab = np.zeros(len(keys2))\n \n if records[ind].L > 13000:\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.nanmean(df[keys2[ii]][11:13]) # just above rim depth\n records[ind].Tr0 = (df['Tr_profile_tt00'][13])\n elif (records[ind].L < 13000) & (records[ind].L > 8500):\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.nanmean(df[keys2[ii]][15:17]) # just above rim depth\n records[ind].Tr0 = (df['Tr_profile_tt00'][17])\n else:\n for key,ii in zip(keys2, range(len(keys2))):\n Nab[ii] = np.nanmean(df[keys2[ii]][19:21]) # just above rim depth\n records[ind].Tr0 = (df['Tr_profile_tt00'][21])\n \n records[ind].Tr = np.nanmean(Nab)\n records[ind].Tr_std = np.std(Nab)\n ",
"_____no_output_____"
]
],
[
[
"## Stratification and upwelling flux\n\nIn previous notebooks I found that the upwelling flux is porportional to an effective stratification $N_{eff}$ given by the weighted sum of the maximum stratification above the rim, near the head and the minimum stratification below the rim:\n\n$$N_{eff} = {0.75N_{max}+0.25N_{min}}$$\n\nSo first, we scale $N_{max}$ and $N_{min}$ using the information we got from the 1D model and modifications to the 1D model due to the enhanced diffusion above the rim when $\\epsilon$ is larger than the step case.\n\nOnce we get both N's, we can scale $N_{eff}$ and use it in the depth scale $D_h$ in the scaling for $\\Phi$ by Howatt and Allen as $D_{eff}=fL/N_{eff}$, with proper fitting parameters.",
"_____no_output_____"
]
],
[
[
"# Get kv form initial files\n\nrecords_kv_files = [24,25,26,27,29,30,31,32,33,34,38,39,41,42,43,44,45,46,47,48,49]\n\nkv_dir = '/ocean/kramosmu/Building_canyon/BuildCanyon/Stratification/616x360x90/'\nini_kv_files = [kv_dir + 'KrDiff_Mty_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_Eel_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_Mty_rim_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_Asc_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e10_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e25_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e50_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e100_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e15_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e75_kv1E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_kv1E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_kv5E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e25_kv1E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e100_kv1E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e25_kv5E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e100_kv5E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_kv8E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_exact1p2E2_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_kv2p5E3_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_kv5E4_90zlev_616x360_Quad.bin',\n kv_dir + 'KrDiff_e05_exact_nosmooth_90zlev_616x360_Quad.bin',\n ]\n\ndt = np.dtype('>f8') # float 64 big endian\nst = [240, 200] # y, x indices of UwH station\nHrim = 135\ndd = 1\nini_kv_profiles = np.zeros((len(ini_kv_files,nz))\n\nfor file, ii in zip(ini_kv_files, records_kv_files):\n data = np.fromfile(file, dt)\n ini_kv = np.reshape(data,(90,360,616),order='C')\n \n KK = ini_kv[:, st[0], st[1]] \n \n records[ii].Zdif = (((KK[int(Hrim/5)+1]-KK[int(Hrim/5)-1]))*t*3600*24)**0.5\n records[ii].dk = KK[int(Hrim/5)+1]-KK[int(Hrim/5)-1]\n records[ii].Kz = KK[int(Hrim/5)-4]\n records[ii].Kz_be = KK[int(Hrim/5)+4]",
"_____no_output_____"
],
[
"for rec in records_real:\n\n Dz = abs(RC[int(Hrim/5)+1]-RC[int(Hrim/5)-1])\n rec.Z = ((rec.f*rec.u_mod*F(Ro(rec.u_mod,rec.f,rec.R))*rec.L)**(0.5))/rec.N\n \n if rec.kv == rec.kbg:\n rec.Zdif = 0\n rec.Sdif_min = np.exp(-0.15*rec.Zdif/Dz) # -0.1 comes from the 1D model\n rec.dk = 0\n rec.Kz = 1E-5\n rec.Kz_be = 1E-5\n rec.Sdif_max = (rec.Zdif/Dz)*np.exp(-(rec.Kz*t*3600*24)/((rec.epsilon)**2))\n \n else:\n rec.Sdif_min = np.exp(-0.15*rec.Zdif/Dz)\n rec.Sdif_max = (rec.Zdif/Dz)*np.exp(-(rec.Kz*t*3600*24)/((rec.epsilon)**2))\n \n rec.S_max = (rec.Z/rec.Hh)*np.exp(-rec.Kz*t*3600*24/rec.Z**2)\n rec.S_min = (rec.Z/rec.Hh)*np.exp(-rec.Kz_be*t*3600*24/rec.Z**2)",
"_____no_output_____"
],
[
"print(rec.name)",
"3DVISC_REALISTIC_run05\n"
],
[
"X1_be = np.array([rec.S_min for rec in records_epsilon])\nX2_be = np.array([rec.Sdif_min for rec in records_epsilon])\nY_be = np.array([(rec.Nbe)**2/(rec.N**2) for rec in records_epsilon])\n\nX1_ab = np.array([rec.S_max for rec in records_epsilon])\nX2_ab = np.array([rec.Sdif_max for rec in records_epsilon])\nY_ab = np.array([(rec.Nab)**2/(rec.N**2) for rec in records_epsilon])",
"_____no_output_____"
],
[
"from sklearn import linear_model\nreg_be = linear_model.LinearRegression()\nreg_be.fit (np.transpose([X1_be,X2_be]),np.transpose(Y_be) )\nprint(r'min $N^2/N^2_0$ = %1.2f $S^-$ + %1.2f $(1-S^-_{diff})$ %1.2f ' %\n (reg_be.coef_[0],reg_be.coef_[1],reg_be.intercept_))\n\nreg_ab = linear_model.LinearRegression()\nreg_ab.fit (np.transpose([X1_ab, X2_ab]),np.transpose(Y_ab) )\nprint(r'max $N^2/N^2_0$ = %1.2f $S^+$ + %1.2f $S^+_{diff}$ + %1.2f ' %\n (reg_ab.coef_[0],reg_ab.coef_[1],reg_ab.intercept_))",
"min $N^2/N^2_0$ = 2.72 $S^-$ + 2.19 $(1-S^-_{diff})$ -1.13 \nmax $N^2/N^2_0$ = 7.35 $S^+$ + 0.21 $S^+_{diff}$ + 0.82 \n"
],
[
"# Save values of N_eff and Phi\n\nfor rec in records_sel: \n can_eff = rec.HCW\n Phi = np.mean(np.array([(can_eff[ii]-can_eff[ii-1])/(time[ii]-time[ii-1]) \n for ii in range (8,18)]))\n Phi_std = np.std(np.array([(can_eff[ii]-can_eff[ii-1])/(time[ii]-time[ii-1]) \n for ii in range (8,18)]))\n rec.Phi = Phi\n rec.Phi_std = Phi_std\n\nfor rec in records_real:\n rec.Nbe_scaled = np.sqrt(reg_be.coef_[0]*rec.S_min +\n reg_be.coef_[1]*rec.Sdif_min +\n reg_be.intercept_)*rec.N\n rec.Nab_scaled = np.sqrt(reg_ab.coef_[0]*rec.S_max +\n reg_ab.coef_[1]*rec.Sdif_max +\n reg_ab.intercept_)*rec.N\n \n if (reg_be.coef_[0]*rec.S_min+ reg_be.coef_[1]*rec.Sdif_min +\n reg_be.intercept_)< 0 :\n rec.N_eff_scaled = (0.75*rec.Nab_scaled)\n else:\n rec.N_eff_scaled = (0.75*rec.Nab_scaled + 0.25*rec.Nbe_scaled) \n \n rec.Neff = (0.75*rec.Nab+0.25*rec.Nbe)",
"_____no_output_____"
],
[
"# find best slope parameter to use\n\nfor param in np.linspace(0.4, 1.3, 80):\n for rec in records_real:\n Se = SE(s, rec.N, rec.f, F(Ro(rec.u_mod,rec.f,rec.Wiso)), Ro(rec.u_mod,rec.f,rec.L))\n rec.X = ((F(Ro(rec.u_mod,rec.f,rec.Wiso)))**(1.5))*((Ro(rec.u_mod,rec.f,rec.L))**(0.5))*((1-param*Se)**3)\n\n rec.Phi_nonDim = rec.Phi/(rec.u_mod*rec.W*Dh(rec.f,rec.L,rec.N_eff_scaled))\n \n Y_array = np.array([rec.Phi_nonDim for rec in records_epsilon])\n X_array = np.array([rec.X for rec in records_epsilon])\n\n slope2, intercept2, r_value2, p_value2, std_err2 = scipy.stats.linregress(X_array,Y_array)\n\n print('Using parameter %1.2f: slope = %1.2f, intercept = %1.3f, r-value = %1.3f' %(param, slope2, intercept2, r_value2))",
"Using parameter 0.40: slope = 1.53, intercept = 0.004, r-value = 0.718\nUsing parameter 0.41: slope = 1.59, intercept = 0.003, r-value = 0.729\nUsing parameter 0.42: slope = 1.66, intercept = 0.002, r-value = 0.740\nUsing parameter 0.43: slope = 1.73, intercept = 0.000, r-value = 0.751\nUsing parameter 0.45: slope = 1.80, intercept = -0.001, r-value = 0.762\nUsing parameter 0.46: slope = 1.87, intercept = -0.002, r-value = 0.772\nUsing parameter 0.47: slope = 1.95, intercept = -0.003, r-value = 0.783\nUsing parameter 0.48: slope = 2.02, intercept = -0.005, r-value = 0.793\nUsing parameter 0.49: slope = 2.10, intercept = -0.006, r-value = 0.804\nUsing parameter 0.50: slope = 2.18, intercept = -0.007, r-value = 0.814\nUsing parameter 0.51: slope = 2.26, intercept = -0.008, r-value = 0.824\nUsing parameter 0.53: slope = 2.35, intercept = -0.009, r-value = 0.834\nUsing parameter 0.54: slope = 2.43, intercept = -0.010, r-value = 0.843\nUsing parameter 0.55: slope = 2.52, intercept = -0.011, r-value = 0.853\nUsing parameter 0.56: slope = 2.61, intercept = -0.012, r-value = 0.862\nUsing parameter 0.57: slope = 2.70, intercept = -0.013, r-value = 0.871\nUsing parameter 0.58: slope = 2.79, intercept = -0.014, r-value = 0.879\nUsing parameter 0.59: slope = 2.88, intercept = -0.014, r-value = 0.887\nUsing parameter 0.61: slope = 2.97, intercept = -0.015, r-value = 0.895\nUsing parameter 0.62: slope = 3.07, intercept = -0.016, r-value = 0.903\nUsing parameter 0.63: slope = 3.16, intercept = -0.016, r-value = 0.910\nUsing parameter 0.64: slope = 3.25, intercept = -0.017, r-value = 0.917\nUsing parameter 0.65: slope = 3.35, intercept = -0.017, r-value = 0.923\nUsing parameter 0.66: slope = 3.44, intercept = -0.017, r-value = 0.929\nUsing parameter 0.67: slope = 3.54, intercept = -0.017, r-value = 0.935\nUsing parameter 0.68: slope = 3.63, intercept = -0.018, r-value = 0.940\nUsing parameter 0.70: slope = 3.72, intercept = -0.018, r-value = 0.945\nUsing parameter 0.71: slope = 3.82, intercept = -0.018, r-value = 0.949\nUsing parameter 0.72: slope = 3.91, intercept = -0.018, r-value = 0.954\nUsing parameter 0.73: slope = 4.00, intercept = -0.017, r-value = 0.957\nUsing parameter 0.74: slope = 4.10, intercept = -0.017, r-value = 0.961\nUsing parameter 0.75: slope = 4.19, intercept = -0.017, r-value = 0.963\nUsing parameter 0.76: slope = 4.28, intercept = -0.017, r-value = 0.966\nUsing parameter 0.78: slope = 4.37, intercept = -0.016, r-value = 0.968\nUsing parameter 0.79: slope = 4.46, intercept = -0.016, r-value = 0.970\nUsing parameter 0.80: slope = 4.55, intercept = -0.015, r-value = 0.971\nUsing parameter 0.81: slope = 4.64, intercept = -0.015, r-value = 0.973\nUsing parameter 0.82: slope = 4.73, intercept = -0.014, r-value = 0.973\nUsing parameter 0.83: slope = 4.82, intercept = -0.013, r-value = 0.974\nUsing parameter 0.84: slope = 4.91, intercept = -0.013, r-value = 0.974\nUsing parameter 0.86: slope = 5.00, intercept = -0.012, r-value = 0.974\nUsing parameter 0.87: slope = 5.09, intercept = -0.011, r-value = 0.974\nUsing parameter 0.88: slope = 5.18, intercept = -0.010, r-value = 0.973\nUsing parameter 0.89: slope = 5.27, intercept = -0.010, r-value = 0.973\nUsing parameter 0.90: slope = 5.36, intercept = -0.009, r-value = 0.972\nUsing parameter 0.91: slope = 5.45, intercept = -0.008, r-value = 0.970\nUsing parameter 0.92: slope = 5.55, intercept = -0.007, r-value = 0.969\nUsing parameter 0.94: slope = 5.64, intercept = -0.006, r-value = 0.968\nUsing parameter 0.95: slope = 5.73, intercept = -0.005, r-value = 0.966\nUsing parameter 0.96: slope = 5.83, intercept = -0.004, r-value = 0.964\nUsing parameter 0.97: slope = 5.93, intercept = -0.003, r-value = 0.962\nUsing parameter 0.98: slope = 6.03, intercept = -0.002, r-value = 0.961\nUsing parameter 0.99: slope = 6.13, intercept = -0.002, r-value = 0.958\nUsing parameter 1.00: slope = 6.24, intercept = -0.001, r-value = 0.956\nUsing parameter 1.02: slope = 6.34, intercept = 0.000, r-value = 0.954\nUsing parameter 1.03: slope = 6.45, intercept = 0.001, r-value = 0.952\nUsing parameter 1.04: slope = 6.57, intercept = 0.002, r-value = 0.950\nUsing parameter 1.05: slope = 6.68, intercept = 0.003, r-value = 0.947\nUsing parameter 1.06: slope = 6.80, intercept = 0.004, r-value = 0.945\nUsing parameter 1.07: slope = 6.93, intercept = 0.005, r-value = 0.942\nUsing parameter 1.08: slope = 7.06, intercept = 0.005, r-value = 0.940\nUsing parameter 1.09: slope = 7.19, intercept = 0.006, r-value = 0.938\nUsing parameter 1.11: slope = 7.33, intercept = 0.007, r-value = 0.935\nUsing parameter 1.12: slope = 7.47, intercept = 0.008, r-value = 0.933\nUsing parameter 1.13: slope = 7.62, intercept = 0.009, r-value = 0.930\nUsing parameter 1.14: slope = 7.77, intercept = 0.009, r-value = 0.928\nUsing parameter 1.15: slope = 7.93, intercept = 0.010, r-value = 0.925\nUsing parameter 1.16: slope = 8.09, intercept = 0.011, r-value = 0.923\nUsing parameter 1.17: slope = 8.26, intercept = 0.012, r-value = 0.920\nUsing parameter 1.19: slope = 8.44, intercept = 0.013, r-value = 0.918\nUsing parameter 1.20: slope = 8.62, intercept = 0.013, r-value = 0.915\nUsing parameter 1.21: slope = 8.81, intercept = 0.014, r-value = 0.913\nUsing parameter 1.22: slope = 9.01, intercept = 0.015, r-value = 0.910\nUsing parameter 1.23: slope = 9.21, intercept = 0.015, r-value = 0.907\nUsing parameter 1.24: slope = 9.42, intercept = 0.016, r-value = 0.905\nUsing parameter 1.25: slope = 9.64, intercept = 0.017, r-value = 0.902\nUsing parameter 1.27: slope = 9.87, intercept = 0.017, r-value = 0.899\nUsing parameter 1.28: slope = 10.11, intercept = 0.018, r-value = 0.897\nUsing parameter 1.29: slope = 10.35, intercept = 0.019, r-value = 0.894\nUsing parameter 1.30: slope = 10.61, intercept = 0.019, r-value = 0.891\n"
],
[
"# My re-fit of Howatt and Allen's function for Phi gave:\nslope = 2.10\nparam = 0.40\nintercept = -0.004\n\n#Using parameter 0.86: slope = 5.00, intercept = -0.012, r-value = 0.974, choose largest r-value from above\nslope2 = 5.00\nparam2 = 0.86\nintercept2 = -0.012\n\nfor rec in records_real:\n \n Se = SE(s, rec.N, rec.f, F(Ro(rec.u_mod,rec.f,rec.Wiso)), Ro(rec.u_mod,rec.f,rec.L))\n \n HA2013=((slope*(F(Ro(rec.u_mod,rec.f,rec.Wiso))**(3/2))*(Ro(rec.u_mod,rec.f,rec.L)**(1/2))*((1-param*Se)**3))+intercept)\n RA2018 = (slope2*(F(Ro(rec.u_mod,rec.f,rec.Wiso))**(3/2))*(Ro(rec.u_mod,rec.f,rec.L)**(1/2))*((1-param2*Se)**3))+intercept2\n\n rec.HA2013 = HA2013\n rec.HA2013_sqe = (rec.Phi-rec.HA2013)**2\n \n rec.RA2018 = RA2018\n rec.RA2018_sqe = (rec.Phi-rec.RA2018)**2",
"_____no_output_____"
]
],
[
[
"### Tracer gradient",
"_____no_output_____"
]
],
[
[
"X1_be = np.array([rec.S_min for rec in records_epsilon])\nX2_be = np.array([rec.Sdif_min for rec in records_epsilon])\nY_be = np.array([(rec.dTr_be)/(rec.dTr0) for rec in records_epsilon])\n\nX1_ab = np.array([rec.S_max for rec in records_epsilon])\nX2_ab = np.array([rec.Sdif_max for rec in records_epsilon])\nY_ab = np.array([(rec.dTr_ab)/(rec.dTr0) for rec in records_epsilon])",
"_____no_output_____"
],
[
"from sklearn import linear_model\nreg_be_dTr = linear_model.LinearRegression()\nreg_be_dTr.fit (np.transpose([X1_be,X2_be]),np.transpose(Y_be) )\nprint(r'min $dzC/dzCo$ = %1.2f $S^-$ + %1.2f $S^-_{diff}$ %1.2f ' %\n (reg_be_dTr.coef_[0],reg_be_dTr.coef_[1],reg_be_dTr.intercept_))\n\nreg_ab_dTr = linear_model.LinearRegression()\nreg_ab_dTr.fit (np.transpose([X1_ab, X2_ab]),np.transpose(Y_ab) )\nprint(r'max $dzC/dzCo$ = %1.2f $S^+$ + %1.2f $S^+_{diff}$ %1.2f ' %\n (reg_ab_dTr.coef_[0],reg_ab_dTr.coef_[1],reg_ab_dTr.intercept_))\n",
"min $dzC/dzCo$ = 3.53 $S^-$ + 1.56 $S^-_{diff}$ -0.99 \nmax $dzC/dzCo$ = 7.30 $S^+$ + 0.23 $S^+_{diff}$ 0.82 \n"
],
[
"# save values of dTr scaled and PhiTr\nfor rec in records_sel:\n can_eff = rec.TrMass\n Phi_Tr = np.mean(np.array([(can_eff[ii]-can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (12,18)]))\n Phi_Tr_std = np.std(np.array([(can_eff[ii]-can_eff[ii-1])/(time[ii]-time[ii-1]) for ii in range (12,18)]))\n \n rec.PhiTr = Phi_Tr\n rec.PhiTr_std = Phi_Tr_std\n\nfor rec in records_real:\n rec.dTr_ab_scaled = (reg_ab_dTr.coef_[0]*rec.S_max+\n reg_ab_dTr.coef_[1]*rec.Sdif_max+\n reg_ab_dTr.intercept_)*rec.dTr0\n rec.dTr_be_scaled = (reg_be_dTr.coef_[0]*rec.S_min+\n reg_be_dTr.coef_[1]*(1-rec.Sdif_min)+\n reg_be_dTr.intercept_)*rec.dTr0",
"_____no_output_____"
]
],
[
[
"### Concentration",
"_____no_output_____"
]
],
[
[
"# Fit mean concentration just above the rim - I know using the max \n# worked slightly better (smaller mse). Co is the initial concentration just above rim depth\n\nX1_tr = np.array([rec.S_max for rec in records_epsilon])\nX2_tr = np.array([rec.Sdif_max for rec in records_epsilon])\nY_tr = np.array([(rec.Tr/rec.Tr0) for rec in records_epsilon])\n\nreg_Tr = linear_model.LinearRegression()\nreg_Tr.fit (np.transpose([X1_tr,X2_tr]),np.transpose(Y_tr) )\nprint(r'$\\bar{C}/Co$ = %1.2f $S^+$ + %1.2f $S^+_{diff}$ %1.2f ' %\n (reg_Tr.coef_[0],reg_Tr.coef_[1],reg_Tr.intercept_))\n\nfor rec in records_real:\n rec.Tr_scaled = (reg_Tr.coef_[0]*(rec.S_max)+ reg_Tr.coef_[1]*(rec.Sdif_max)+\n reg_Tr.intercept_)*rec.Tr0\nprint(rec.Tr0)\nprint(rec.dTr0)",
"$\\bar{C}/Co$ = 0.33 $S^+$ + 0.06 $S^+_{diff}$ 1.00 \n5.77824258804\n-0.0359832525253\n"
]
],
[
[
"### Tracer flux",
"_____no_output_____"
]
],
[
[
"Y_array = np.array([rec.PhiTr for rec in records_epsilon])\nX_array = np.array([rec.RA2018*(rec.u_mod*rec.W*Dh(rec.f,rec.L,rec.N_eff_scaled))*\n (rec.Tr_scaled) for rec in records_epsilon])\nslope6, intercept6, r_value6, p_value6, std_err6 = scipy.stats.linregress(np.squeeze(X_array),\n np.squeeze(Y_array))\n \nfor rec in records_real:\n \n depth_scale = Dh(rec.f,rec.L,rec.N_eff_scaled)\n \n rec.Phi_scaled = rec.RA2018*(rec.u_mod*rec.W*depth_scale)\n rec.PhiTr_scaled = np.squeeze((slope6*rec.Phi_scaled*(rec.Tr_scaled))+intercept6)\n \nprint('\\Phi/UWD_{eff} = %1.2f Fw^{3/2} Ro^{1/2} (1-%1.2f *Se)^3 +%1.2f' %(slope2,param2,intercept2)) \nprint('\\Phi_{Tr} = %1.2f $\\Phi \\ bar{C}$ + %1.2f ' %(slope6, intercept6)) ",
"\\Phi/UWD_{eff} = 5.00 Fw^{3/2} Ro^{1/2} (1-0.86 *Se)^3 +-0.01\n\\Phi_{Tr} = 1.01 $\\Phi \\ bar{C}$ + 551.08 \n"
]
],
[
[
"## Figures",
"_____no_output_____"
]
],
[
[
"sns.set_context('paper')\nplt.rcParams['font.size'] = 11.0\nf = plt.figure(figsize = (5,5)) # 190mm = 7.48 in, 230cm = 9.05in\n\ngs = gspec.GridSpec(2, 2,wspace=0.2)\nax1 = plt.subplot(gs[0,0])\nax0 = plt.subplot(gs[0,1])\nax3 = plt.subplot(gs[1,0])\nax2 = plt.subplot(gs[1,1])\n\n\n# ---- plot 1:1 line ----\nax0.plot(np.linspace(-0.1,3.5,20),np.linspace(-0.1,3.5,20), '-',color='0.5')\nax1.plot(np.linspace(2,8,20),np.linspace(2,8,20), '-', color='0.5')\nax2.plot(np.linspace(5, 17, 20),np.linspace(5,17, 20),'-', color='0.5')\nax3.plot(np.linspace(1, 1.8, 20),np.linspace(1, 1.8, 20),'-', color='0.5')\n\n# ---- plot error -----\n# MSE ax0 \nphi_array = np.array([(rec.Nbe_scaled/rec.N)**2 for rec in records_epsilon])\nsca_array = np.array([(rec.Nbe)**2/(rec.N**2) for rec in records_epsilon])\nx_fit = np.linspace(-0.1, 3.5, 50)\nmean_sq_err = np.nanmean(((phi_array)-(sca_array))**2)\nupper_bound = ax0.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax0.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# MSE ax1 \nphi_array = np.array([(rec.Nab_scaled/rec.N)**2 for rec in records_epsilon])\nsca_array = np.array([(rec.Nab)**2/(rec.N**2) for rec in records_epsilon])\nx_fit = np.linspace(2,8, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax1.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax1.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# MSE ax2 \nphi_array = np.array([rec.N_eff_scaled/1E-3 for rec in records_epsilon])\nsca_array = np.array([rec.Neff/1E-3 for rec in records_epsilon])\nx_fit = np.linspace(5,17, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax2.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax2.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# MSE ax3 \nphi_array = np.squeeze(np.array([rec.Tr_scaled/rec.Tr0 for rec in records_epsilon]))\nsca_array = np.squeeze(np.array([rec.Tr/rec.Tr0 for rec in records_epsilon]))\nx_fit = np.linspace(1,1.8, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nprint('MSE for Cbar/Co is %f and RMSE is %f ' %(mean_sq_err, mean_sq_err**(1/2)))\nupper_bound = ax3.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax3.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# ---- plot scaling ----\nfor rec in records_real:\n \n plt0 = ax0.plot((rec.Nbe_scaled/rec.N)**2,(rec.Nbe)**2/(rec.N**2),\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n \n plt1 = ax1.plot((rec.Nab_scaled/rec.N)**2,(rec.Nab)**2/(rec.N**2),\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2) \n \n plt2 = ax2.plot(rec.N_eff_scaled/1E-3,rec.Neff/1E-3,\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n print(rec.name, rec.N_eff_scaled)\n plt3 = ax3.plot(rec.Tr_scaled/rec.Tr0,\n (rec.Tr/rec.Tr0),\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n\n \n \n# ---- aesthetics -----\nax0.set_xlim(-0.1,3.5)\nax0.set_ylim(-0.1,3.5)\n\nax1.set_xlim(2,8)#\nax1.set_ylim(2,8)\n\nax2.set_xlim(7,15.5)\nax2.set_ylim(7,15.5)\n\nax3.set_xlim(1,1.62)\nax3.set_ylim(1,1.62)\n\n\nax0.set_ylabel('min $N^2/N^2_0$',labelpad=-1.5)\nax1.set_ylabel('max $N^2/N^2_0$',labelpad=-1.5)\nax0.set_xlabel(r'%1.2f$S^-$ + %1.2f$(1-S^-_{dif})$ %1.2f' %(reg_be.coef_[0], \n reg_be.coef_[1], \n reg_be.intercept_),labelpad=0.5)\nax1.set_xlabel(r'%1.2f$S^+$ + %1.2f$S^+_{dif}$ + %1.2f' %(reg_ab.coef_[0], \n reg_ab.coef_[1], \n reg_ab.intercept_),labelpad=0.5)\n\nax2.set_ylabel('$N_{eff}$ model / $10^{-3}$ s$^{-1}$', labelpad=0)\nax2.set_xlabel('$N_{eff}$ scaled / $10^{-3}$ s$^{-1}$',labelpad=0.0)\nax3.set_ylabel(r'$C_{rim}$ model /$C_0$', labelpad=0)\nax3.set_xlabel(r'$\\bar{C}$ /$C_0$', labelpad=0.0)\n\nax0.tick_params(axis='x', pad=2)\nax1.tick_params(axis='x', pad=2)\nax3.tick_params(axis='x', pad=2)\nax2.tick_params(axis='x', pad=2)\n\nax0.tick_params(axis='y', pad=2)\nax1.tick_params(axis='y', pad=2)\nax3.tick_params(axis='y', pad=2)\nax2.tick_params(axis='y', pad=2)\n\nax0.legend(bbox_to_anchor=(1.05,1.3), ncol=1,columnspacing=0.1,labelspacing=0.1,frameon=True )\n\nax0.set_aspect(1)\nax1.set_aspect(1)\nax2.set_aspect(1)\nax3.set_aspect(1)\n\nax1.text(0.1,0.85,'Eqn. 22',transform=ax1.transAxes)\nax0.text(0.1,0.85,'Eqn. 24',transform=ax0.transAxes)\nax2.text(0.1,0.85,'Eqn. 27',transform=ax2.transAxes)\nax3.text(0.1,0.85,'Eqn. 25',transform=ax3.transAxes)\n\nax1.text(0.87,0.05,'(a)',transform=ax1.transAxes)\nax0.text(0.87,0.05,'(b)',transform=ax0.transAxes)\nax3.text(0.87,0.05,'(c)',transform=ax3.transAxes)\nax2.text(0.87,0.05,'(d)',transform=ax2.transAxes)\n\nplt.savefig('figure10_v2.eps',format='eps',bbox_inches='tight')\n",
"MSE for Cbar/Co is 0.001174 and RMSE is 0.034271 \nCNTDIFF_run38 0.0120675109304\nCNTDIFF_run45 0.0132436717651\nCNTDIFF_run73 0.0148103096414\nCNTDIFF_run75 0.0106759245929\nCNTDIFF_run81 0.0108314846474\nCNTDIFF_run79 0.0112948113406\nCNTDIFF_run67 0.0120612652605\nCNTDIFF_run51 0.0120707755862\nCNTDIFF_run69 0.0120816047982\nCNTDIFF_run71 0.0120266062954\nLOWER_BF_run01 0.0114910374771\nLOW_BF_run01 0.0108064534845\nLOWEST_BF_run01 0.00896613542819\nLOWEST_BF_run03 0.00784943096432\nLOWEST_BF_run05 0.0111767771218\nLOWEST_BF_run07 0.00908497125432\n3DVISC_REALISTIC_run07 0.0109864022971\n3DVISC_REALISTIC_run08 0.0110979078117\n3DVISC_REALISTIC_run09 0.0102663437031\n3DVISC_REALISTIC_run10 0.009697018108\n3DVISC_REALISTIC_run11 0.011176191314\n3DVISC_REALISTIC_run12 0.0098774774203\n3DVISC_REALISTIC_run16 0.0119276079234\n3DVISC_REALISTIC_run19 0.0119500906073\n3DVISC_REALISTIC_run20 0.0117853704547\n3DVISC_REALISTIC_run17 0.0115273549724\n3DVISC_REALISTIC_run21 0.0115432982509\n3DVISC_REALISTIC_run22 0.0107737634838\n3DVISC_REALISTIC_run23 0.0112124143625\n3DVISC_REALISTIC_run24 0.0108098963366\n3DVISC_REALISTIC_run25 0.0117604830116\n3DVISC_REALISTIC_run26 0.0119805204001\n3DVISC_REALISTIC_run27 0.0107045070504\n3DVISC_REALISTIC_run01 nan\n3DVISC_REALISTIC_run02 0.0118368040315\n3DVISC_REALISTIC_run03 0.0116455934201\n3DVISC_REALISTIC_run05 0.0108092048944\n"
],
[
"sns.set_context('paper')\nplt.rcParams['font.size'] = 11.0\nf = plt.figure(figsize = (5,7)) # 190mm = 7.48 in, 230cm = 9.05in\n\ngs = gspec.GridSpec(1, 2,wspace=0.4)\nax2 = plt.subplot(gs[0])\nax3 = plt.subplot(gs[1])\n\n\n# ---- plot 1:1 line ----\nax3.plot(np.linspace(5, 17, 20),np.linspace(5,17, 20),'-', color='0.5')\nax2.plot(np.linspace(1, 1.8, 20),np.linspace(1, 1.8, 20),'-', color='0.5')\n\n# ---- plot error -----\n# MSE ax2 \nphi_array = np.array([rec.N_eff_scaled/1E-3 for rec in records_epsilon])\nsca_array = np.array([rec.Neff/1E-3 for rec in records_epsilon])\nx_fit = np.linspace(5,17, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax3.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax3.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# MSE ax3 \nphi_array = np.squeeze(np.array([rec.Tr_scaled/rec.Tr0 for rec in records_epsilon]))\nsca_array = np.squeeze(np.array([rec.Tr/rec.Tr0 for rec in records_epsilon]))\nx_fit = np.linspace(1,1.8, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax2.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax2.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nprint('MSE for Cbar/Co is %f and RMSE is %f ' %(mean_sq_err, mean_sq_err**(1/2)))\n\n# ---- plot scaling ----\nfor rec in records_real:\n \n plt2 = ax3.plot(rec.N_eff_scaled/1E-3,rec.Neff/1E-3,\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n \n plt3 = ax2.plot(rec.Tr_scaled/rec.Tr0,\n (rec.Tr/rec.Tr0),\n marker = rec.mstyle,\n markersize = 7,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n\n \n \n# ---- aesthetics -----\n#ax3.set_xlim(5,15.5)\n#ax3.set_ylim(5,15.5)\n\n#ax2.set_xlim(1,1.62)\n#ax2.set_ylim(1,1.62)\n\n\nax3.set_ylabel('$N_{eff}$ model / $10^{-3}$ s$^{-1}$', labelpad=0)\nax3.set_xlabel('$N_{eff}$ scaled / $10^{-3}$ s$^{-1}$',labelpad=0.0)\nax2.set_ylabel(r'$C_{rim}$ model /$C_0$', labelpad=0)\nax2.set_xlabel(r'$\\bar{C}$ /$C_0$', labelpad=0.0)\n\nax3.tick_params(axis='x', pad=2)\nax2.tick_params(axis='x', pad=2)\n\nax3.tick_params(axis='y', pad=2)\nax2.tick_params(axis='y', pad=2)\n\nax2.legend(bbox_to_anchor=(3.2,-0.2), ncol=5,columnspacing=0.1,labelspacing=0.1,frameon=True )\n\nax2.set_aspect(1)\nax3.set_aspect(1)\n\nax3.text(0.1,0.85,'Eqn. 27',transform=ax3.transAxes)\nax2.text(0.1,0.85,'Eqn. 25',transform=ax2.transAxes)\n\nax3.text(0.87,0.05,'(b)',transform=ax3.transAxes)\nax2.text(0.87,0.05,'(a)',transform=ax2.transAxes)\n\nplt.savefig('scaling_w_Ls.eps',format='eps',bbox_inches='tight')\n",
"MSE for Cbar/Co is 0.001174 and RMSE is 0.034271 \n"
],
[
"sns.set_context('paper')\nplt.rcParams['font.size'] = 10.0\nf = plt.figure(figsize = (6,3)) # 190mm = 7.48 in, 230cm = 9.05in\n\ngs = gspec.GridSpec(1, 2)\nax0 = plt.subplot(gs[0,0])\nax1 = plt.subplot(gs[0,1])\n\n# ---- plot 1:1 lines ----\nax0.plot(np.linspace(0,7,50),np.linspace(0,7,50),'-', color='0.5')\nax1.plot(np.linspace(0,5.5,50),np.linspace(0,5.5,50),'-', color='0.5')\n\n# ---- plot errors ----\n# MSE ax0 \nphi_array = np.array([rec.Phi/1E4 for rec in records_dyn])\nsca_array = np.array([rec.Phi_scaled/1E4 for rec in records_dyn])\nx_fit = np.linspace(0,8, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax0.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax0.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# MSE ax1 \nphi_array = np.array([rec.PhiTr_scaled/1E5 for rec in records_epsilon])\nsca_array = np.array([rec.PhiTr/1E5 for rec in records_epsilon])\nx_fit = np.linspace(0,6, 50)\nmean_sq_err = np.mean(((phi_array)-(sca_array))**2)\nupper_bound = ax1.plot(x_fit,x_fit+(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\nlower_bound = ax1.plot(x_fit,x_fit-(mean_sq_err)**(0.5),linestyle = '--',color='0.5')\n\n# ---- plot scaling ----\nfor rec in records_real:\n \n plt1 = ax0.errorbar(rec.Phi_scaled/1E4,\n rec.Phi/1E4,\n yerr=rec.Phi_std/1E4,\n marker = rec.mstyle,\n markersize = 8,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n \n plt1 = ax1.errorbar(rec.PhiTr_scaled/1E5,\n rec.PhiTr/1E5,\n yerr=rec.PhiTr_std/1E5,\n marker = rec.mstyle,\n markersize = 8,\n color = sns.xkcd_rgb[rec.color2],\n markeredgewidth=0.5,\n markeredgecolor = 'k',\n label=rec.label2)\n\n\n# ---- aesthetics ----\nax0.set_ylabel('Phi',labelpad=0.5)\nax0.set_ylabel('Upwelling flux / $10^4$ m$^3$s$^{-1}$', labelpad=-0.5)\nax1.set_ylabel('Tracer flux / $10^5$ $\\mu$Mm$^3$s$^{-1}$', labelpad=-0.5)\n\nax0.set_xlabel(r'$\\Phi$ / $10^4$ m$^3$s$^{-1}$', labelpad=-0.5 )\nax1.set_xlabel(r'$\\Phi_{Tr}$ / $10^5$ $\\mu$Mm$^3$s$^{-1}$', labelpad=-0.5)\n\nax0.set_xlim(-0.2,7.2)\nax0.set_ylim(-0.2,7.2)\n\nax1.set_xlim(-0.2,5.8)\nax1.set_ylim(-0.2,5.8)\n\nax0.text(0.1,0.85,'Eqn. 28',transform=ax0.transAxes)\nax1.text(0.1,0.85,'Eqn. 29',transform=ax1.transAxes)\n\nax0.text(0.9,0.05,'(a)',transform=ax0.transAxes)\nax1.text(0.9,0.05,'(b)',transform=ax1.transAxes)\n\nax0.tick_params(axis='x', pad=2)\nax1.tick_params(axis='x', pad=2)\n\nax0.tick_params(axis='y', pad=2)\nax1.tick_params(axis='y', pad=2)\n\nax0.set_aspect(1)\nax1.set_aspect(1)\n#ax0.plot(57261.9871812/1E4,40630.372436/1E4, 'o', color='brown')\n#ax1.plot(520483.538981/1E5,298566.920079/1E5, 'o', color='brown')\nplt.savefig('figure11_v2.eps',format='eps',bbox_inches='tight')\n",
"_____no_output_____"
],
[
"sns.set_context('paper')\nsns.set_style(\"white\")\nplt.rcParams['font.size'] = 10.0\nf = plt.figure(figsize = (6,2.2)) # 190mm = 7.48 in, 230cm = 9.05in\n\ngs = gspec.GridSpec(1, 2, width_ratios=(1.0,1), wspace=0.1)\nax0 = plt.subplot(gs[0])\nax1 = plt.subplot(gs[1])\n\n# ---- plot scaling ----\nfor rec in records_real[:]:\n print(rec.label)\n plt1 = ax0.errorbar(rec.Phi/1E4,\n rec.PhiTr/1E5,\n yerr=rec.PhiTr_std/1E5,\n xerr = rec.Phi_std/1E4,\n marker = '.',\n markersize = 12,\n color = 'yellowgreen',\n markeredgewidth=1,\n markeredgecolor = 'k',\n label=rec.label2,\n capsize=2,\n ecolor = '0.7')\n if rec.kv <= 1E-5:\n plt1 = ax1.scatter(Ro(U=rec.u_mod, f=rec.f, R=rec.Wiso),\n Bu(rec.N, rec.f,rec.W,Hs),\n c = rec.PhiTr/1E5,\n vmin=0, vmax=5,\n cmap='Blues',\n marker = 'o',\n s = (rec.PhiTr/1E5)*25,\n linewidths=1,\n edgecolors='k',\n )\n else:\n plt1 = ax1.scatter(Ro(U=rec.u_mod, f=rec.f, R=rec.Wiso),\n Bu(rec.N, rec.f,rec.W,Hs),\n c = rec.PhiTr/1E5,\n vmin=0, vmax=5,\n cmap='Blues',\n marker = 'o',\n s = (rec.PhiTr/1E5)*25,\n linewidths=1,\n edgecolors='r',\n )\n# Longer canyon runs \ncb=plt.colorbar(plt1 )\ncb.set_label('$10^5$ $\\mu$Mm$^3$s$^{-1}$')\n# ---- aesthetics ----\nax0.set_xlabel('Upwelling flux / $10^4$ m$^3$s$^{-1}$', labelpad=-0.5)\nax0.set_ylabel('Tracer flux / $10^5$ $\\mu$Mm$^3$s$^{-1}$', labelpad=-0.5)\n\nax1.set_xlabel('$R_W$', labelpad=-0.5)\nax1.set_ylabel('$Bu$', labelpad=-0.5)\n\nax1.set_xlim(0,0.65)\nax1.set_ylim(0.0,0.65)\n\nax0.tick_params(axis='x', pad=2)\nax0.tick_params(axis='y', pad=2)\n\nax1.tick_params(axis='x', pad=2)\nax1.tick_params(axis='y', pad=2)\n\nax0.set_aspect(1)\nax1.set_aspect(1)\n\nax1.text(0.11,0.05,'Tracer Flux')\n\nax0.text(0.9,0.05,'(a)',transform=ax0.transAxes)\nax1.text(0.9,0.05,'(b)',transform=ax1.transAxes)\nplt.savefig('figure_fluxes_comparison.eps',format='eps',bbox_inches='tight')\n",
"base\nhigher $N$\nhighest $N$\nlower $N$\nhigher $N$\nmedium $N$\nhigher $f$\nlower $f$\nlow $f$\nlowest $f$\nlower $U$\nlow $U$\nlowest $U$\nlowest $U$, lowest $N$\nlowest $U$, highest $N$\nlowest $U$, lowest $f$\n$\\K_{can}=10^{-2}$, $\\epsilon=10$\n$\\K_{can}$, $\\epsilon=25$\n$\\K_{can}$, $\\epsilon=50$\n$\\K_{can}$, $\\epsilon=100$\n$\\K_{can}$, $\\epsilon=15$\n$\\K_{can}$, $\\epsilon=75$\n$\\K_{can}=5\\times10^{-3}$, $\\epsilon=5$\n$\\K_{can}=1\\times10^{-3}$, $\\epsilon=25$\n$\\K_{can}=1\\times10^{-3}$, $\\epsilon=100$\n$\\K_{can}=10^{-3}$, $\\epsilon=5$\n$\\K_{can}=5\\times10^{-3}$, $\\epsilon=25$\n$\\K_{can}=5\\times10^{-3}$, $\\epsilon=100$\n$K_{can}=8\\times10^{-3}$, $\\epsilon=5$\n$K_{can}=1.2\\times10^{-2}$, $\\epsilon=5$\n$K_{can}=2.5\\times10^{-3}$, $\\epsilon=5$\n$K_{can}=5\\times10^{-4}$, $\\epsilon=5$\n$K_{can}=10^{-2}$, $\\epsilon=5$\n$\\K_{can}$ Mty, bottom\n$\\K_{can}$ Eel, bottom\n$\\K_{can}$ Mty, rim\n$\\K_{can}$ Asc, bottom\n"
]
],
[
[
"### Tables",
"_____no_output_____"
]
],
[
[
"print (\"\\t\".join(['Experiment &','$\\kappa_{bg}$ &','$\\kappa_{can}$&','$\\epsilon$&'\n ]))\n\n \nfor rec in records_sel:\n print (\"\\t\".join(['%s\\t&$%0.2e$\\t&$%0.2e$\\t&$%1.0f$\\t ' \n % (rec.label2,\n rec.kbg,\n rec.kv, \n rec.epsilon,\n )\n ]))",
"Experiment &\t$\\kappa_{bg}$ &\t$\\kappa_{can}$&\t$\\epsilon$&\nbase case\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\uparrow$ $K_{bg}$\t&$1.00e-04$\t&$1.00e-04$\t&$5$\t \n$\\uparrow \\uparrow$ $K_{bg}$\t&$1.00e-03$\t&$1.00e-03$\t&$5$\t \n$\\uparrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\uparrow \\uparrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow \\downarrow N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\uparrow f$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow \\downarrow$ $f$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow f$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow f$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow$ U\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\downarrow \\downarrow$ U\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ U\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ U, $\\downarrow \\downarrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ U, $\\uparrow \\uparrow$ $N_0$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ U, $\\Downarrow$ $f$\t&$1.00e-05$\t&$1.00e-05$\t&$5$\t \n$\\Downarrow$ U, $\\uparrow \\uparrow K_{can}$\t&$1.00e-03$\t&$1.00e-03$\t&$5$\t \n$K_{can}$ Monterey (bot)\t&$6.84e-04$\t&$9.10e-03$\t&$10$\t \n$K_{can}$ Eel (bot)\t&$8.52e-05$\t&$1.22e-03$\t&$10$\t \n$K_{can}$ Monterey\t&$2.83e-04$\t&$8.11e-03$\t&$10$\t \n$K_{can}$ Ascension (bot)\t&$6.53e-04$\t&$1.50e-03$\t&$10$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 10$\t&$1.00e-05$\t&$1.00e-02$\t&$10$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 25$\t&$1.00e-05$\t&$1.00e-02$\t&$25$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 50$\t&$1.00e-05$\t&$1.00e-02$\t&$50$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 100$\t&$1.00e-05$\t&$1.00e-02$\t&$100$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 15$\t&$1.00e-05$\t&$1.00e-02$\t&$15$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 75$\t&$1.00e-05$\t&$1.00e-02$\t&$75$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 150$\t&$1.00e-05$\t&$1.00e-02$\t&$150$\t \n$\\uparrow \\uparrow K_{can}$\t&$1.00e-05$\t&$1.00e-03$\t&$5$\t \n$\\Uparrow \\uparrow K_{can}$\t&$1.00e-05$\t&$5.00e-03$\t&$5$\t \n$\\uparrow \\uparrow K_{can}$ $\\epsilon 25$\t&$1.00e-05$\t&$1.00e-03$\t&$25$\t \n$\\uparrow \\uparrow K_{can}$ $\\epsilon 100$\t&$1.00e-05$\t&$1.00e-03$\t&$100$\t \n$\\Uparrow \\uparrow K_{can}$ $\\epsilon 25$\t&$1.00e-05$\t&$5.00e-03$\t&$25$\t \n$\\Uparrow \\uparrow K_{can}$, $\\epsilon 100$\t&$1.00e-05$\t&$5.00e-03$\t&$100$\t \n$\\Uparrow \\uparrow \\uparrow K_{can}$\t&$1.00e-05$\t&$8.00e-03$\t&$5$\t \n$\\Uparrow \\Uparrow \\uparrow K_{can}$\t&$1.00e-05$\t&$1.20e-02$\t&$5$\t \n$\\Uparrow K_{can}$\t&$1.00e-05$\t&$2.50e-03$\t&$5$\t \n$\\uparrow K_{can}$\t&$1.00e-05$\t&$5.00e-04$\t&$5$\t \n$\\Uparrow \\Uparrow K_{can}$\t&$1.00e-05$\t&$1.00e-02$\t&$5$\t \n"
],
[
"print (\"\\t\".join(['Experiment &', '$N$ (s$^{-1}$)$&', \n '$f$ (s$^{-1}$)&', 'U (ms$^{-1}$)&', '$Bu$&','$R_L$' ,'$R_W$' ,\n ]))\n\n \nfor rec in records_sel:\n print (\"\\t\".join(['%s\\t\\t&$%.1e$\\t&$%.2e$\\t&$%.2f$\\t&$%.2f$\\t&$%.2f$\\t&$%.2f$\\t ' \n % (rec.label2, \n rec.N,\n rec.f,\n rec.u_mod,\n Bu(rec.N, rec.f, rec.W, Hs),\n Ro(U=rec.u_mod, f=rec.f, R=rec.L),\n Ro(U=rec.u_mod, f=rec.f, R=rec.Wiso),\n )\n ]))",
"Experiment &\t$N$ (s$^{-1}$)$&\t$f$ (s$^{-1}$)&\tU (ms$^{-1}$)&\t$Bu$&\t$R_L$\t$R_W$\nbase case\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.45$\t&$0.31$\t \n$\\uparrow$ $K_{bg}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.45$\t&$0.30$\t \n$\\uparrow \\uparrow$ $K_{bg}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.32$\t&$0.40$\t&$0.40$\t&$0.27$\t \n$\\uparrow$ $N_0$\t\t&$6.3e-03$\t&$9.66e-05$\t&$0.38$\t&$0.46$\t&$0.47$\t&$0.32$\t \n$\\uparrow \\uparrow$ $N_0$\t\t&$7.4e-03$\t&$9.66e-05$\t&$0.40$\t&$0.54$\t&$0.49$\t&$0.33$\t \n$\\Downarrow$ $N_0$\t\t&$4.6e-03$\t&$9.66e-05$\t&$0.35$\t&$0.34$\t&$0.43$\t&$0.29$\t \n$\\downarrow \\downarrow N_0$\t\t&$4.7e-03$\t&$9.66e-05$\t&$0.35$\t&$0.34$\t&$0.43$\t&$0.29$\t \n$\\downarrow$ $N_0$\t\t&$5.0e-03$\t&$9.66e-05$\t&$0.35$\t&$0.37$\t&$0.44$\t&$0.30$\t \n$\\uparrow f$\t\t&$5.5e-03$\t&$1.00e-04$\t&$0.36$\t&$0.39$\t&$0.43$\t&$0.29$\t \n$\\downarrow \\downarrow$ $f$\t\t&$5.5e-03$\t&$7.68e-05$\t&$0.39$\t&$0.51$\t&$0.61$\t&$0.41$\t \n$\\downarrow f$\t\t&$5.5e-03$\t&$8.60e-05$\t&$0.38$\t&$0.45$\t&$0.53$\t&$0.36$\t \n$\\Downarrow f$\t\t&$5.5e-03$\t&$6.40e-05$\t&$0.41$\t&$0.61$\t&$0.78$\t&$0.53$\t \n$\\downarrow$ U\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.31$\t&$0.40$\t&$0.39$\t&$0.26$\t \n$\\downarrow \\downarrow$ U\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.26$\t&$0.40$\t&$0.32$\t&$0.22$\t \n$\\Downarrow$ U\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.14$\t&$0.40$\t&$0.18$\t&$0.12$\t \n$\\Downarrow$ U, $\\downarrow \\downarrow$ $N_0$\t\t&$4.6e-03$\t&$9.66e-05$\t&$0.13$\t&$0.34$\t&$0.17$\t&$0.11$\t \n$\\Downarrow$ U, $\\uparrow \\uparrow$ $N_0$\t\t&$7.4e-03$\t&$9.66e-05$\t&$0.15$\t&$0.54$\t&$0.19$\t&$0.13$\t \n$\\Downarrow$ U, $\\Downarrow$ $f$\t\t&$5.5e-03$\t&$7.00e-05$\t&$0.15$\t&$0.56$\t&$0.27$\t&$0.18$\t \n$\\Downarrow$ U, $\\uparrow \\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.12$\t&$0.40$\t&$0.15$\t&$0.10$\t \n$K_{can}$ Monterey (bot)\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.29$\t \n$K_{can}$ Eel (bot)\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$K_{can}$ Monterey\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$K_{can}$ Ascension (bot)\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 10$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 25$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.29$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 50$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.28$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 100$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.28$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 15$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 75$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.28$\t \n$\\Uparrow \\Uparrow K_{can}$, $\\epsilon 150$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.42$\t&$0.28$\t \n$\\uparrow \\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\Uparrow \\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\uparrow \\uparrow K_{can}$ $\\epsilon 25$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\uparrow \\uparrow K_{can}$ $\\epsilon 100$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\Uparrow \\uparrow K_{can}$ $\\epsilon 25$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\Uparrow \\uparrow K_{can}$, $\\epsilon 100$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.34$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\Uparrow \\uparrow \\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.43$\t&$0.29$\t \n$\\Uparrow \\Uparrow \\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\Uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.36$\t&$0.40$\t&$0.44$\t&$0.30$\t \n$\\Uparrow \\Uparrow K_{can}$\t\t&$5.5e-03$\t&$9.66e-05$\t&$0.35$\t&$0.40$\t&$0.44$\t&$0.30$\t \n"
]
],
[
[
"#### MISC.",
"_____no_output_____"
],
[
"Conversion form $\\mu Mm^3$ of $NO^-_3$ to kg of $NO^-_3$:\n\nmolecular weight of $NO^-_3$ = 3x16 O + 1x14 N = 62 g/mol\n\n$\\mu$Mm$^3$ = 1 x $10^{-6}$ x mol/0.001 m$^3$ x 1 m$^3$ = $10^{-3}$ mol\n\n$10^{-3}$ mol $NO_3$ = $10^{-3}$ mol x 62 g/mol = 0.062 g = $6.2 \\times 10^{-5}$ kg",
"_____no_output_____"
]
],
[
[
"\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
d0ec8f4fb57dca16b79fc99b65e02f887b12e36d | 111,516 | ipynb | Jupyter Notebook | hw2/ex2_reg.ipynb | chenyuw1/coursera-ml-hw | 0858ab8845d37fc596f83cf7070697cbc462e0b6 | [
"MIT"
] | null | null | null | hw2/ex2_reg.ipynb | chenyuw1/coursera-ml-hw | 0858ab8845d37fc596f83cf7070697cbc462e0b6 | [
"MIT"
] | null | null | null | hw2/ex2_reg.ipynb | chenyuw1/coursera-ml-hw | 0858ab8845d37fc596f83cf7070697cbc462e0b6 | [
"MIT"
] | null | null | null | 184.019802 | 32,416 | 0.891621 | [
[
[
"# import packages\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.optimize import fmin_bfgs\n\n%matplotlib inline",
"_____no_output_____"
],
[
"#loc = 'https://raw.githubusercontent.com/chenyuw1/coursera-ml-hw/master/hw2/ex2data2.txt'\nloc = r'C:\\Users\\c0w00f8.WMSC\\Documents\\Coursera\\1. Machine Learning\\machine-learning-ex2\\ex2\\ex2data2.txt'\ndata = pd.read_csv(loc, sep = ',', header = None)\ndata.shape",
"_____no_output_____"
],
[
"data.columns = ['Test1', 'Test2', 'y']\ndata.head()",
"_____no_output_____"
],
[
"# visualizing the data\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\ntitle = ax.set_title('Plot of Training Data')\nplot1 = ax.scatter(data[data.y == 1].Test1, data[data.y == 1].Test2, marker = 'P', c = 'green',\n label = 'y = 1')\nplot2 = ax.scatter(data[data.y == 0].Test1, data[data.y == 0].Test2, marker = '8', c = 'brown',\n label = 'y = 0')\nax.legend()\nfig.canvas.draw()",
"_____no_output_____"
],
[
"# Feature mapping\ndef mapFeature(x1, x2, degree): \n df0 = pd.DataFrame({'x1': x1, 'x2': x2})\n \n #else: \n # df0 = pd.concat([x1, x2], axis = 1)\n df = pd.DataFrame()\n for deg in range(degree + 1):\n for i in range(deg + 1):\n #print (\"deg: \", deg)\n #print (\"i: \", i)\n col1 = x1 ** i \n col2 = x2 ** (deg - i)\n col = [ col1[j] * col2[j] for j in range(len(df0)) ]\n col = pd.DataFrame(col) \n df = pd.concat([df, col], axis = 1)\n \n #print (df.shape)\n return df",
"_____no_output_____"
],
[
"data_mapped = mapFeature(data.iloc[:, 0], data.iloc[:, 1], 6)\ndata_mapped.shape",
"_____no_output_____"
],
[
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n# sigmoid for vector/matrix\nsigmd = np.vectorize(sigmoid)",
"_____no_output_____"
],
[
"# cost func\ndef costRegOpt(theta, x, y, l):\n m = len(y) \n hx = sigmd(np.dot(x, theta))\n \n if (hx.all() != 0) and ((1 - hx).all() != 0):\n \n theta_2 = [ theta[i] ** 2 for i in range(len(theta)) ]\n j = (-y.T * np.log(hx) - (1 - y.T) * np.log(1 - hx)).sum() / m + sum(theta_2) * l / 2 / m\n \n else: j = 1000000\n \n return j",
"_____no_output_____"
],
[
"def gradReg(theta, x, y, l):\n m = len(y)\n hx = sigmd(np.dot(x, theta))\n \n grad = np.dot((hx - y), x) / m + l * theta / m\n grad[0] = ((hx[0] - y[0]) * x.iloc[:, 0]).sum() / m\n \n return grad",
"_____no_output_____"
],
[
"def init_theta(x):\n n = x.shape[1]\n return [0] * n",
"_____no_output_____"
],
[
"# prepare data\nx = data_mapped\ny = data.iloc[:, -1]",
"_____no_output_____"
],
[
"# cost function test\ntheta = init_theta(x)\ncostRegOpt(theta, x, y, 1)",
"_____no_output_____"
],
[
"# optimize using fmin_bfgs\ntheta = init_theta(x)\nl = 1\nmyargs = (x, y, l)\ntheta_opt = fmin_bfgs(costRegOpt, theta, args = myargs)\ntheta_opt",
"Optimization terminated successfully.\n Current function value: 0.535160\n Iterations: 46\n Function evaluations: 1410\n Gradient evaluations: 47\n"
],
[
"# plot prep\nu = np.linspace(-1, 1.5, 50)\nv = np.linspace(-1, 1.5, 50)\nz = np.zeros(shape=(len(u), len(v)))\nfor i in range(len(u)):\n for j in range(len(v)):\n ui = np.array([u[i]])\n vj = np.array([v[j]])\n z_i_j = mapFeature(ui, vj, 6)\n z[i, j] = z_i_j.dot(theta_opt)",
"_____no_output_____"
],
[
"# plot\n\n# visualizing the data\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\ntitle = ax.set_title('lambda = %f' % l)\nxlabel = ax.set_xlabel('Test 1')\nylabel = ax.set_ylabel('Test 2')\nplot1 = ax.scatter(data[data.y == 1].Test1, data[data.y == 1].Test2, marker = 'P', c = 'green',\n label = 'y = 1')\nplot2 = ax.scatter(data[data.y == 0].Test1, data[data.y == 0].Test2, marker = '8', c = 'brown',\n label = 'y = 0')\n\nz = z.T\nplot3 = ax.contour(u, v, z, levels = [0.5])\nplot3.clabel(inline = True, fontsize = 9)\n\nax.legend()\nfig.canvas.draw()",
"_____no_output_____"
],
[
"# test lambda = 0\ntheta = init_theta(x)\nl = 0\nmyargs = (x, y, l)\ntheta_opt = fmin_bfgs(costRegOpt, theta, args = myargs)\ntheta_opt",
"Optimization terminated successfully.\n Current function value: 0.224570\n Iterations: 592\n Function evaluations: 18090\n Gradient evaluations: 603\n"
],
[
"# plot prep\nu = np.linspace(-1, 1.5, 50)\nv = np.linspace(-1, 1.5, 50)\nz = np.zeros(shape=(len(u), len(v)))\nfor i in range(len(u)):\n for j in range(len(v)):\n ui = np.array([u[i]])\n vj = np.array([v[j]])\n z_i_j = mapFeature(ui, vj, 6)\n z[i, j] = z_i_j.dot(theta_opt)",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\ntitle = ax.set_title('lambda = %f' % l)\nxlabel = ax.set_xlabel('Test 1')\nylabel = ax.set_ylabel('Test 2')\nplot1 = ax.scatter(data[data.y == 1].Test1, data[data.y == 1].Test2, marker = 'P', c = 'green',\n label = 'y = 1')\nplot2 = ax.scatter(data[data.y == 0].Test1, data[data.y == 0].Test2, marker = '8', c = 'brown',\n label = 'y = 0')\n\nz = z.T\nplot3 = ax.contour(u, v, z, levels = [0.5])\nplot3.clabel(inline = True, fontsize = 9)\n\nax.legend()\nfig.canvas.draw()",
"_____no_output_____"
],
[
"# test lambda = 100\ntheta = init_theta(x)\nl = 100\nmyargs = (x, y, l)\ntheta_opt = fmin_bfgs(costRegOpt, theta, args = myargs)\ntheta_opt",
"Optimization terminated successfully.\n Current function value: 0.686527\n Iterations: 4\n Function evaluations: 150\n Gradient evaluations: 5\n"
],
[
"# plot prep\nu = np.linspace(-1, 1.5, 50)\nv = np.linspace(-1, 1.5, 50)\nz = np.zeros(shape=(len(u), len(v)))\nfor i in range(len(u)):\n for j in range(len(v)):\n ui = np.array([u[i]])\n vj = np.array([v[j]])\n z_i_j = mapFeature(ui, vj, 6)\n z[i, j] = z_i_j.dot(theta_opt)",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\ntitle = ax.set_title('lambda = %f' % l)\nxlabel = ax.set_xlabel('Test 1')\nylabel = ax.set_ylabel('Test 2')\nplot1 = ax.scatter(data[data.y == 1].Test1, data[data.y == 1].Test2, marker = 'P', c = 'green',\n label = 'y = 1')\nplot2 = ax.scatter(data[data.y == 0].Test1, data[data.y == 0].Test2, marker = '8', c = 'brown',\n label = 'y = 0')\n\nz = z.T\nplot3 = ax.contour(u, v, z)\nplot3.clabel(inline = True, fontsize = 9)\n\nax.legend()\nfig.canvas.draw()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eca56ebc097a178bd07bf5420020b3167548b5 | 10,832 | ipynb | Jupyter Notebook | Untitled.ipynb | baicha12/snow-fox | 7c72ec193acd8c1b4280420c7755697e059eef41 | [
"Apache-2.0"
] | null | null | null | Untitled.ipynb | baicha12/snow-fox | 7c72ec193acd8c1b4280420c7755697e059eef41 | [
"Apache-2.0"
] | null | null | null | Untitled.ipynb | baicha12/snow-fox | 7c72ec193acd8c1b4280420c7755697e059eef41 | [
"Apache-2.0"
] | null | null | null | 19.137809 | 166 | 0.388109 | [
[
[
"# 循环结构",
"_____no_output_____"
],
[
"在程序中我们需要执行重复重复再重复的东东,使用循环结构。\n一种是for-in循环\n一种是while循环",
"_____no_output_____"
]
],
[
[
"\"\"\"\nfor循环实现1~100 求和\n\n\"\"\"\nsum = 0\nfor x in range(101):\n sum += x\nprint(sum)",
"5050\n"
],
[
"sum = 0\nfor x in range(1,101):\n sum += x\nprint(sum)",
"5050\n"
],
[
"####偶数求和\nsum = 0\nfor x in range(2,101,2):\n sum += x\nprint(sum)",
"2550\n"
]
],
[
[
"while循环 :常用于死循环进行取值",
"_____no_output_____"
]
],
[
[
"\"\"\"\nfor循环实现1~100 求和\n\n\"\"\"\nsum = 0\nnum = 1\nwhile num <= 100:\n sum += num\n num += 1\nprint(sum)",
"5050\n"
],
[
"\"\"\"\nfor循环实现1~100 偶数求和\n\n\"\"\"\nsum = 0\nnum = 2\nwhile num <= 100:\n sum += num\n num += 2\nprint(sum)",
"2550\n"
]
],
[
[
"# 函数和模块的使用",
"_____no_output_____"
],
[
"def关键字来定义函数,和变量一样每个函数也有一个响亮的名字,而且命名规则跟变量的命名规则是一致的。在函数名后面的圆括号中可以放置传递给函数的参数,这一点和数学上的函数非常相似,程序中函数的参数就相当于是数学上说的函数的自变量,而函数执行完成后我们可以通过return关键字来返回一个值,这相当于数学上说的函数的因变量。",
"_____no_output_____"
]
],
[
[
"###在参数名前面的*表示args是一个可变参数(不定长参数)\n###记载调用add函数时可以传入0个或多个参数\ndef add (*args):\n total = 0\n for val in args:\n total += val\n return total\n\nprint(add())\nprint(add(1))\nprint(add(1,2,3,4))",
"0\n1\n10\n"
],
[
"##1一元二次方程\ndef gongshi():\n a = float(input('请输入a:'))\n b = float(input('请输入b:'))\n c = float(input('请输入c:'))\n r1 = (-b + ((b ** 2) - 4*a*c)**0.5) / 2 * a\n r2 = (-b - ((b ** 2) - 4*a*c)**0.5) / 2 * a\n s = (b**2 - (4 * a * c))\n if s > 0:\n print(r1,r2)\n elif s ==0:\n print(r1)\n else:\n print('The equation has no real roots') \ngongshi()\n",
"请输入a:1\n请输入b:2.0\n请输入c:1\n-1.0\n"
],
[
"##2学习加法\n\n\nimport random\ndef chengxu():\n ## num1 = int(input())\n ## num2 = int(input())\n num1 = random.randint(1,100)\n num2 = random.randint(1,100)\n num = int(input(\"输入两数之和:\"))\n if num == num1 + num2:\n print('真')\n else :\n print(\"程序为假\")\n \nchengxu()\n",
"输入两数之和:20\n程序为假\n"
],
[
"##3预测之后的天\ndef rizi():\n a = int(input(\"星期\"))\n b = int(input(\"天数\"))\n tian = b % 7\n xia = a + tian\n tai = xia % 7\n print(tai)\nrizi()\n \n",
"星期2\n天数5\n0\n"
],
[
"##4三个整数比大小\ndef tishi():\n one = input(\"请输入\")\n two = input(\"请输入\")\n three = input(\"请输入\")\n\n a = [one,two,three]\n a.sort()\n print(a)\n\ntishi()",
"请输入5\n请输入2\n请输入1\n['1', '2', '5']\n"
],
[
"##5比较价钱\ndef tishi():\n weight1 = float(input(\"package1\"))\n price1 = float(input())\n weight2 = float(input(\"package2\"))\n price2 = float(input())\n danjia1 = weight1 / price1\n danjia2 = weight2 / price2\n if danjia1 > danjia2:\n print(\"package1 hao\")\n else :\n print(\"package2 hao\")\n\ntishi()\n",
"package150\n24.59\npackage225\n11.99\npackage2 hao\n"
],
[
"##6。找出一个月中的天数\ndef tishi():\n a = int(input(\"月\"))\n b = int(input(\"年\"))\n list1 = [1,3,5,7,8,10,12]\n list2 = [4,6,9,11]\n if a in list1 :\n print(\"31天\")\n elif a in list2:\n print(\"30天\")\n else:\n if b % 4 != 0:\n print(\"28天\")\n else: \n print(\"29天\")\ntishi()",
"月5\n年2016\n31天\n"
],
[
"##7 猜硬币\nimport random\ndef tishi():\n num1 = random.randint(1,3)\n a = int(input(\"\"))\n if a == num1:\n print(\"对了\")\n else:\n print(\"错了\")\n \ntishi()",
"1\n错了\n"
],
[
"##8\nimport random\n# import numpy as np\n# res = np.random.choice(['石头','✂️','🙅🙅♀️'])\n# print(res)\nimport os\n# pywin32\nC_res = random.randint(0,2)\nU_res = int(input('0:石头,1:剪刀,2:布'))\nif C_res == U_res:\n print('平局')\nelse:\n if C_res == 0 and U_res == 1:\n print('电脑赢了 😢')\n #os.system('say you loser.')\n elif C_res == 1 and U_res == 2:\n print('电脑赢了 😢')\n # os.system('say you loser.')\n elif C_res == 2 and U_res == 0:\n print('电脑赢了 😢')\n # os.system('say you loser.')\n else:\n print('你赢了 😄')\n # os.system('say you winer.')",
"0:石头,1:剪刀,2:布0\n电脑赢了 😢\n"
],
[
"##9\ndef function():\n year = int(input('年'))\n mounth = int(input('月'))\n data = int(input('天'))\n k = year % 100\n j = year / 100\n h = (data + (26 * (mounth + 1) / 10) + k + (k / 4) +(j/4) + 5 * j ) % 7\n print('这一天是星期%d'%h)\n\nfunction()",
"年2013\n月1\n天25\n这一天是星期5\n"
],
[
"##11。回文数\ndef tishi():\n a = input()\n gw = a[2]\n bw = a[0]\n if gw == bw:\n print(\"回文\") \n else :\n print(\"非回文\") \n \n\n \ntishi()\n\n",
"121\n回文\n"
],
[
"##12. 计算三角形的周长\ndef tishi():\n a = int(input(\"请输入\"))\n b = int(input(\"请输入\"))\n c = int(input(\"请输入\"))\n zhaochang = (a + b + c)\n\n if a + b > c and a + c > b and b + c > a:\n print (zhaochang)\n else :\n print(\"错了\")\n\n\n \ntishi()\n",
"请输入1\n请输入1\n请输入1\n3\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eca5ef4141f9d577f7a183fe8c4c5fd113d257 | 1,013,064 | ipynb | Jupyter Notebook | scripts/LearningRate.ipynb | leoll2/MedicalCNN | 73188bf33c147276a813fbd09c403f7e553d5045 | [
"MIT"
] | 20 | 2020-03-31T13:18:25.000Z | 2022-02-10T05:23:58.000Z | scripts/LearningRate.ipynb | leoll2/MedicalCNN | 73188bf33c147276a813fbd09c403f7e553d5045 | [
"MIT"
] | 4 | 2020-03-31T11:30:15.000Z | 2021-06-20T16:14:28.000Z | scripts/LearningRate.ipynb | leoll2/MedicalCNN | 73188bf33c147276a813fbd09c403f7e553d5045 | [
"MIT"
] | 1 | 2021-04-14T17:10:06.000Z | 2021-04-14T17:10:06.000Z | 199.815385 | 97,114 | 0.736287 | [
[
[
"This notebook contains a bunch of experiments to determine the optimal learning rate value for different optimizers. The reference model is a CNN with 3 convolutional blocks; the dataset is an augmented version of the CBIS dataset.",
"_____no_output_____"
],
[
"# Environment setup",
"_____no_output_____"
]
],
[
[
"# Connect to Google Drive\n\nfrom google.colab import drive\ndrive.mount('/content/gdrive')",
"_____no_output_____"
],
[
"# Copy the dataset from Google Drive to local\n\n!cp \"/content/gdrive/My Drive/CBIS_DDSM.zip\" .\n!unzip -qq CBIS_DDSM.zip\n!rm CBIS_DDSM.zip\ncbis_path = 'CBIS_DDSM'",
"_____no_output_____"
],
[
"# Import libraries\n\n%tensorflow_version 1.x\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, Callback\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import RMSprop, SGD, Adam, Nadam",
"_____no_output_____"
]
],
[
[
"# Data pre-processing",
"_____no_output_____"
]
],
[
[
"def load_training():\n \"\"\"\n Load the training set (excluding baseline patches)\n \"\"\"\n images = np.load(os.path.join(cbis_path, 'numpy data', 'train_tensor.npy'))[1::2]\n labels = np.load(os.path.join(cbis_path, 'numpy data', 'train_labels.npy'))[1::2]\n return images, labels\n\n\ndef load_testing():\n \"\"\"\n Load the test set (abnormalities patches and labels, no baseline)\n \"\"\"\n images = np.load(os.path.join(cbis_path, 'numpy data', 'public_test_tensor.npy'))[1::2]\n labels = np.load(os.path.join(cbis_path, 'numpy data', 'public_test_labels.npy'))[1::2]\n return images, labels\n\n\ndef remap_label(l):\n \"\"\"\n Remap the labels to 0->mass 1->calcification\n \"\"\"\n if l == 1 or l == 2:\n return 0\n elif l == 3 or l == 4:\n return 1\n else:\n print(\"[WARN] Unrecognized label (%d)\" % l)\n return None",
"_____no_output_____"
],
[
"# Load training and test images (abnormalities only, no baseline)\ntrain_images, train_labels= load_training()\ntest_images, test_labels= load_testing()\n\n# Number of images\nn_train_img = train_images.shape[0]\nn_test_img = test_images.shape[0]\nprint(\"Train size: %d \\t Test size: %d\" % (n_train_img, n_test_img))\n\n# Compute width and height of images\nimg_w = train_images.shape[1]\nimg_h = train_images.shape[2]\nprint(\"Image size: %dx%d\" % (img_w, img_h))\n\n# Remap labels\ntrain_labels = np.array([remap_label(l) for l in train_labels])\ntest_labels = np.array([remap_label(l) for l in test_labels])\n\n# Create a new dimension for color in the images arrays\ntrain_images = train_images.reshape((n_train_img, img_w, img_h, 1))\ntest_images = test_images.reshape((n_test_img, img_w, img_h, 1))\n\n# Convert from 16-bit (0-65535) to float (0-1)\ntrain_images = train_images.astype('uint16') / 65535\ntest_images = test_images.astype('uint16') / 65535\n\n# Shuffle the training set (originally sorted by label)\nperm = np.random.permutation(n_train_img)\ntrain_images = train_images[perm]\ntrain_labels = train_labels[perm]\n\n# Create a generator for training images\ntrain_datagen = ImageDataGenerator(\n validation_split=0.2,\n rotation_range=180,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='reflect'\n)\n\n# Fit the generator with some images\ntrain_datagen.fit(train_images)\n\n# Split train images into actual training and validation\ntrain_generator = train_datagen.flow(train_images, train_labels, batch_size=128, subset='training')\nvalidation_generator = train_datagen.flow(train_images, train_labels, batch_size=128, subset='validation')",
"Train size: 2676 \t Test size: 336\nImage size: 150x150\n"
],
[
"# Visualize one image from the dataset and its label, just to make sure the data format is correct\n\nidx = 0\n\nplt.imshow(train_images[idx][:,:,0], cmap='gray')\nplt.show()\n\nprint(\"Label: \" + str(train_labels[idx]))",
"_____no_output_____"
],
[
"def create_cnn():\n\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(48, activation='relu'))\n model.add(layers.Dense(1, activation='sigmoid'))\n\n return model",
"_____no_output_____"
]
],
[
[
"# Learning rate estimation",
"_____no_output_____"
],
[
"The following experiment involves four of the most popular optimizers for NNs: SGD, RMSprop, Adam and Nadam.\nIn order to roughly approximate the range of reasonable values of learning rate for each optimizer, a simple strategy is adopted. Starting with a very low LR, its value is slightly increased at the end of each epoch. Initially, the network will learn slowly, because a small LR does now allow large weight updates, hence the loss will remain more or less constant. \nThen, the LR increases and the loss starts decreasing. At some point, however, the learning rate becomes so big that updates cause large and unpredictable fluctuations of the loss, and the network basically starts diverging.\n\nIn practice, the loss will start from a value around 0.69, corresponding to random prediction. After it gets lower a certain threshold, let's say 0.6, one can safely assume that the network started learning, and will eventually reach a loss minimum. Later, as soon as the weights diverge and the network goes back to random prediction, the training is stopped.\n",
"_____no_output_____"
]
],
[
[
"loss_lower_threshold = 0.60\nloss_upper_threshold = 0.69\n\nclass StopOnDivergingLoss(Callback): \n def on_epoch_end(self, epoch, logs={}):\n global low_reached\n if logs.get('loss') < loss_lower_threshold:\n low_reached = True\n if logs.get('loss') > loss_upper_threshold and low_reached: \n print(\"\\nStopping training!\") \n self.model.stop_training = True\n\n# Callback for monitoring the loss at each learning rate\nclass LossLRCallback(Callback):\n def on_epoch_end(self, epoch, logs=None):\n lr2loss[opt][0].append(keras.backend.eval(self.model.optimizer.lr))\n lr2loss[opt][1].append(logs['loss'])\n\n# Callback to update the learning rate\nlr_inc_rate = 1.1\ndef lr_scheduler(epoch):\n new_lr = lr_begin*(lr_inc_rate**epoch)\n print(\"Learning rate: %.7f\" % new_lr)\n return new_lr",
"_____no_output_____"
],
[
"opts = [SGD, RMSprop, Adam, Nadam]\n\ninitial_lr = {\n SGD: 1e-3,\n RMSprop: 1e-5,\n Adam: 1e-6,\n Nadam: 1e-6\n}\n\nlr2loss = {\n SGD: [[], []],\n RMSprop: [[], []],\n Adam: [[], []],\n Nadam: [[], []]\n}\n\n# For each optimizer, perform a run incrementing the learning rate after every\n# epoch, and keep track of the results\nfor opt in opts:\n print(\"Optimizer: \" + opt.__name__)\n cnn = create_cnn()\n\n lr_begin = initial_lr[opt]\n low_reached = False\n stop_on_diverging_loss = StopOnDivergingLoss()\n losslrcb = LossLRCallback()\n lrschedulecb = keras.callbacks.LearningRateScheduler(lr_scheduler)\n\n cnn.compile(\n optimizer=opt(learning_rate=lr_begin),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = cnn.fit_generator(\n train_generator,\n steps_per_epoch=n_train_img // 128,\n epochs=300,\n validation_data=validation_generator,\n callbacks=[stop_on_diverging_loss, losslrcb, lrschedulecb],\n shuffle=True,\n verbose=1,\n initial_epoch=0)",
"Optimizer: SGD\nLearning rate: 0.0010000\nEpoch 1/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6916 - acc: 0.5507Epoch 1/300\n20/20 [==============================] - 7s 366ms/step - loss: 0.6914 - acc: 0.5541 - val_loss: 0.6925 - val_acc: 0.5290\nLearning rate: 0.0011000\nEpoch 2/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6920 - acc: 0.5357Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6919 - acc: 0.5378 - val_loss: 0.6925 - val_acc: 0.5308\nLearning rate: 0.0012100\nEpoch 3/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6906 - acc: 0.5494Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6905 - acc: 0.5517 - val_loss: 0.6923 - val_acc: 0.5308\nLearning rate: 0.0013310\nEpoch 4/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6909 - acc: 0.5419Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.6908 - acc: 0.5434 - val_loss: 0.6922 - val_acc: 0.5308\nLearning rate: 0.0014641\nEpoch 5/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6905 - acc: 0.5461Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6902 - acc: 0.5489 - val_loss: 0.6923 - val_acc: 0.5308\nLearning rate: 0.0016105\nEpoch 6/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6898 - acc: 0.5503Epoch 1/300\n20/20 [==============================] - 7s 355ms/step - loss: 0.6899 - acc: 0.5497 - val_loss: 0.6921 - val_acc: 0.5308\nLearning rate: 0.0017716\nEpoch 7/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5533Epoch 1/300\n20/20 [==============================] - 6s 304ms/step - loss: 0.6892 - acc: 0.5530 - val_loss: 0.6927 - val_acc: 0.5308\nLearning rate: 0.0019487\nEpoch 8/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6903 - acc: 0.5444Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6902 - acc: 0.5449 - val_loss: 0.6925 - val_acc: 0.5308\nLearning rate: 0.0021436\nEpoch 9/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6895 - acc: 0.5504Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6897 - acc: 0.5486 - val_loss: 0.6898 - val_acc: 0.5308\nLearning rate: 0.0023579\nEpoch 10/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6896 - acc: 0.5482Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6897 - acc: 0.5473 - val_loss: 0.6897 - val_acc: 0.5308\nLearning rate: 0.0025937\nEpoch 11/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5502Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.6894 - acc: 0.5500 - val_loss: 0.6893 - val_acc: 0.5308\nLearning rate: 0.0028531\nEpoch 12/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6898 - acc: 0.5466Epoch 1/300\n20/20 [==============================] - 7s 356ms/step - loss: 0.6896 - acc: 0.5486 - val_loss: 0.6893 - val_acc: 0.5308\nLearning rate: 0.0031384\nEpoch 13/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6891 - acc: 0.5466Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6892 - acc: 0.5458 - val_loss: 0.6895 - val_acc: 0.5308\nLearning rate: 0.0034523\nEpoch 14/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5456Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6895 - acc: 0.5445 - val_loss: 0.6891 - val_acc: 0.5308\nLearning rate: 0.0037975\nEpoch 15/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6894 - acc: 0.5440Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6895 - acc: 0.5430 - val_loss: 0.6893 - val_acc: 0.5308\nLearning rate: 0.0041772\nEpoch 16/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6871 - acc: 0.5597Epoch 1/300\n20/20 [==============================] - 6s 305ms/step - loss: 0.6872 - acc: 0.5594 - val_loss: 0.6886 - val_acc: 0.5308\nLearning rate: 0.0045950\nEpoch 17/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6883 - acc: 0.5461Epoch 1/300\n20/20 [==============================] - 7s 337ms/step - loss: 0.6882 - acc: 0.5473 - val_loss: 0.6884 - val_acc: 0.5308\nLearning rate: 0.0050545\nEpoch 18/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6873 - acc: 0.5528Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.6871 - acc: 0.5533 - val_loss: 0.6887 - val_acc: 0.5308\nLearning rate: 0.0055599\nEpoch 19/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6874 - acc: 0.5487Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6877 - acc: 0.5478 - val_loss: 0.6915 - val_acc: 0.5308\nLearning rate: 0.0061159\nEpoch 20/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6885 - acc: 0.5436Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6881 - acc: 0.5461 - val_loss: 0.6903 - val_acc: 0.5308\nLearning rate: 0.0067275\nEpoch 21/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6869 - acc: 0.5469Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6868 - acc: 0.5473 - val_loss: 0.6901 - val_acc: 0.5308\nLearning rate: 0.0074002\nEpoch 22/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6869 - acc: 0.5469Epoch 1/300\n20/20 [==============================] - 6s 309ms/step - loss: 0.6867 - acc: 0.5485 - val_loss: 0.6892 - val_acc: 0.5308\nLearning rate: 0.0081403\nEpoch 23/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6860 - acc: 0.5469Epoch 1/300\n20/20 [==============================] - 7s 347ms/step - loss: 0.6862 - acc: 0.5446 - val_loss: 0.6868 - val_acc: 0.5290\nLearning rate: 0.0089543\nEpoch 24/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6853 - acc: 0.5448Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6855 - acc: 0.5422 - val_loss: 0.6861 - val_acc: 0.5458\nLearning rate: 0.0098497\nEpoch 25/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6823 - acc: 0.5627Epoch 1/300\n20/20 [==============================] - 6s 303ms/step - loss: 0.6821 - acc: 0.5635 - val_loss: 0.6833 - val_acc: 0.5308\nLearning rate: 0.0108347\nEpoch 26/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6848 - acc: 0.5580Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6847 - acc: 0.5574 - val_loss: 0.6854 - val_acc: 0.5290\nLearning rate: 0.0119182\nEpoch 27/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6833 - acc: 0.5403Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6834 - acc: 0.5426 - val_loss: 0.6840 - val_acc: 0.6916\nLearning rate: 0.0131100\nEpoch 28/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6814 - acc: 0.5533Epoch 1/300\n20/20 [==============================] - 6s 305ms/step - loss: 0.6812 - acc: 0.5538 - val_loss: 0.6904 - val_acc: 0.5290\nLearning rate: 0.0144210\nEpoch 29/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6790 - acc: 0.5603Epoch 1/300\n20/20 [==============================] - 7s 352ms/step - loss: 0.6792 - acc: 0.5580 - val_loss: 0.6811 - val_acc: 0.5776\nLearning rate: 0.0158631\nEpoch 30/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6758 - acc: 0.5624Epoch 1/300\n20/20 [==============================] - 6s 302ms/step - loss: 0.6749 - acc: 0.5647 - val_loss: 0.6974 - val_acc: 0.5290\nLearning rate: 0.0174494\nEpoch 31/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6728 - acc: 0.5953Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6720 - acc: 0.5945 - val_loss: 0.6846 - val_acc: 0.5271\nLearning rate: 0.0191943\nEpoch 32/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6711 - acc: 0.6102Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6707 - acc: 0.6107 - val_loss: 0.6757 - val_acc: 0.5514\nLearning rate: 0.0211138\nEpoch 33/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6711 - acc: 0.6160Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6705 - acc: 0.6223 - val_loss: 0.6756 - val_acc: 0.5458\nLearning rate: 0.0232252\nEpoch 34/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6749 - acc: 0.5986Epoch 1/300\n20/20 [==============================] - 7s 343ms/step - loss: 0.6772 - acc: 0.5896 - val_loss: 0.6690 - val_acc: 0.6243\nLearning rate: 0.0255477\nEpoch 35/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6640 - acc: 0.6262Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.6652 - acc: 0.6213 - val_loss: 0.6975 - val_acc: 0.5252\nLearning rate: 0.0281024\nEpoch 36/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6492 - acc: 0.6387Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6495 - acc: 0.6352 - val_loss: 0.6387 - val_acc: 0.7308\nLearning rate: 0.0309127\nEpoch 37/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6501 - acc: 0.6443Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6514 - acc: 0.6414 - val_loss: 0.6431 - val_acc: 0.6505\nLearning rate: 0.0340039\nEpoch 38/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6392 - acc: 0.6416Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6375 - acc: 0.6424 - val_loss: 0.6397 - val_acc: 0.6131\nLearning rate: 0.0374043\nEpoch 39/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6408 - acc: 0.6346Epoch 1/300\n20/20 [==============================] - 6s 307ms/step - loss: 0.6397 - acc: 0.6394 - val_loss: 0.6039 - val_acc: 0.7215\nLearning rate: 0.0411448\nEpoch 40/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6235 - acc: 0.6587Epoch 1/300\n20/20 [==============================] - 7s 351ms/step - loss: 0.6273 - acc: 0.6531 - val_loss: 0.6302 - val_acc: 0.5701\nLearning rate: 0.0452593\nEpoch 41/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6160 - acc: 0.6725Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6178 - acc: 0.6709 - val_loss: 0.6479 - val_acc: 0.5757\nLearning rate: 0.0497852\nEpoch 42/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6227 - acc: 0.6633Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6236 - acc: 0.6653 - val_loss: 0.6946 - val_acc: 0.5664\nLearning rate: 0.0547637\nEpoch 43/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6324 - acc: 0.6683Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6312 - acc: 0.6681 - val_loss: 0.6083 - val_acc: 0.7028\nLearning rate: 0.0602401\nEpoch 44/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6157 - acc: 0.6825Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6128 - acc: 0.6824 - val_loss: 0.6196 - val_acc: 0.6804\nLearning rate: 0.0662641\nEpoch 45/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6260 - acc: 0.6746Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6274 - acc: 0.6735 - val_loss: 0.6300 - val_acc: 0.7009\nLearning rate: 0.0728905\nEpoch 46/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6046 - acc: 0.6963Epoch 1/300\n20/20 [==============================] - 7s 362ms/step - loss: 0.6058 - acc: 0.6954 - val_loss: 0.5531 - val_acc: 0.7084\nLearning rate: 0.0801795\nEpoch 47/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6140 - acc: 0.6867Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6129 - acc: 0.6879 - val_loss: 0.5702 - val_acc: 0.7327\nLearning rate: 0.0881975\nEpoch 48/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6144 - acc: 0.6740Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.6165 - acc: 0.6715 - val_loss: 0.6287 - val_acc: 0.6467\nLearning rate: 0.0970172\nEpoch 49/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6302 - acc: 0.6817Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.6297 - acc: 0.6808 - val_loss: 0.6055 - val_acc: 0.7346\nLearning rate: 0.1067190\nEpoch 50/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6120 - acc: 0.6838Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.6142 - acc: 0.6836 - val_loss: 0.7582 - val_acc: 0.5346\nLearning rate: 0.1173909\nEpoch 51/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6240 - acc: 0.6817Epoch 1/300\n20/20 [==============================] - 7s 349ms/step - loss: 0.6276 - acc: 0.6792 - val_loss: 0.6365 - val_acc: 0.7327\nLearning rate: 0.1291299\nEpoch 52/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5893 - acc: 0.7046Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.5864 - acc: 0.7050 - val_loss: 0.5523 - val_acc: 0.7477\nLearning rate: 0.1420429\nEpoch 53/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6529 - acc: 0.6334Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6554 - acc: 0.6289 - val_loss: 0.6699 - val_acc: 0.5234\nLearning rate: 0.1562472\nEpoch 54/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6273 - acc: 0.6419Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6309 - acc: 0.6395 - val_loss: 0.7264 - val_acc: 0.5084\nLearning rate: 0.1718719\nEpoch 55/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6468 - acc: 0.6439Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6435 - acc: 0.6470 - val_loss: 0.5493 - val_acc: 0.7514\nLearning rate: 0.1890591\nEpoch 56/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6398 - acc: 0.6350Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6364 - acc: 0.6372 - val_loss: 0.6259 - val_acc: 0.5794\nLearning rate: 0.2079651\nEpoch 57/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6458 - acc: 0.6575Epoch 1/300\n20/20 [==============================] - 7s 354ms/step - loss: 0.6450 - acc: 0.6543 - val_loss: 0.6598 - val_acc: 0.5682\nLearning rate: 0.2287616\nEpoch 58/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6340 - acc: 0.6320Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6322 - acc: 0.6352 - val_loss: 0.5505 - val_acc: 0.7533\nLearning rate: 0.2516377\nEpoch 59/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6307 - acc: 0.6696Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6324 - acc: 0.6634 - val_loss: 0.6058 - val_acc: 0.6636\nLearning rate: 0.2768015\nEpoch 60/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.7234 - acc: 0.5686Epoch 1/300\n 4/20 [=====>........................] - ETA: 6s - loss: 0.6946 - acc: 0.5273\nStopping training!\n20/20 [==============================] - 6s 319ms/step - loss: 0.7212 - acc: 0.5695 - val_loss: 0.6904 - val_acc: 0.5308\nOptimizer: RMSprop\nLearning rate: 0.0000100\nEpoch 1/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6912 - acc: 0.5353Epoch 1/300\n20/20 [==============================] - 7s 374ms/step - loss: 0.6912 - acc: 0.5382 - val_loss: 0.6902 - val_acc: 0.5308\nLearning rate: 0.0000110\nEpoch 2/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6900 - acc: 0.5457Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6900 - acc: 0.5442 - val_loss: 0.6891 - val_acc: 0.5308\nLearning rate: 0.0000121\nEpoch 3/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6873 - acc: 0.5565Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6874 - acc: 0.5552 - val_loss: 0.6874 - val_acc: 0.5308\nLearning rate: 0.0000133\nEpoch 4/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6861 - acc: 0.5524Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.6862 - acc: 0.5521 - val_loss: 0.6869 - val_acc: 0.5271\nLearning rate: 0.0000146\nEpoch 5/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6866 - acc: 0.5615Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.6862 - acc: 0.5620 - val_loss: 0.6915 - val_acc: 0.5308\nLearning rate: 0.0000161\nEpoch 6/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6849 - acc: 0.5559Epoch 1/300\n20/20 [==============================] - 7s 352ms/step - loss: 0.6849 - acc: 0.5554 - val_loss: 0.6885 - val_acc: 0.5308\nLearning rate: 0.0000177\nEpoch 7/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6806 - acc: 0.5872Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6806 - acc: 0.5900 - val_loss: 0.6813 - val_acc: 0.6206\nLearning rate: 0.0000195\nEpoch 8/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6759 - acc: 0.5872Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.6756 - acc: 0.5867 - val_loss: 0.6842 - val_acc: 0.5308\nLearning rate: 0.0000214\nEpoch 9/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6739 - acc: 0.6067Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6732 - acc: 0.6169 - val_loss: 0.6794 - val_acc: 0.5308\nLearning rate: 0.0000236\nEpoch 10/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6660 - acc: 0.5954Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.6663 - acc: 0.5992 - val_loss: 0.6687 - val_acc: 0.5308\nLearning rate: 0.0000259\nEpoch 11/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6613 - acc: 0.6397Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.6611 - acc: 0.6410 - val_loss: 0.6596 - val_acc: 0.6243\nLearning rate: 0.0000285\nEpoch 12/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6545 - acc: 0.6483Epoch 1/300\n20/20 [==============================] - 7s 359ms/step - loss: 0.6537 - acc: 0.6491 - val_loss: 0.6490 - val_acc: 0.6916\nLearning rate: 0.0000314\nEpoch 13/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6461 - acc: 0.6384Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6455 - acc: 0.6386 - val_loss: 0.6782 - val_acc: 0.5383\nLearning rate: 0.0000345\nEpoch 14/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6450 - acc: 0.6365Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6443 - acc: 0.6426 - val_loss: 0.6385 - val_acc: 0.6505\nLearning rate: 0.0000380\nEpoch 15/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6248 - acc: 0.6733Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.6263 - acc: 0.6713 - val_loss: 0.6367 - val_acc: 0.5832\nLearning rate: 0.0000418\nEpoch 16/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6281 - acc: 0.6655Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6269 - acc: 0.6647 - val_loss: 0.6102 - val_acc: 0.7308\nLearning rate: 0.0000459\nEpoch 17/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6219 - acc: 0.6667Epoch 1/300\n20/20 [==============================] - 7s 347ms/step - loss: 0.6219 - acc: 0.6626 - val_loss: 0.6257 - val_acc: 0.6486\nLearning rate: 0.0000505\nEpoch 18/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6204 - acc: 0.6500Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6193 - acc: 0.6507 - val_loss: 0.5966 - val_acc: 0.7327\nLearning rate: 0.0000556\nEpoch 19/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6131 - acc: 0.6629Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6122 - acc: 0.6673 - val_loss: 0.6297 - val_acc: 0.5664\nLearning rate: 0.0000612\nEpoch 20/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6115 - acc: 0.6401Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.6107 - acc: 0.6438 - val_loss: 0.5902 - val_acc: 0.6598\nLearning rate: 0.0000673\nEpoch 21/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6075 - acc: 0.6600Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6058 - acc: 0.6598 - val_loss: 0.6189 - val_acc: 0.6187\nLearning rate: 0.0000740\nEpoch 22/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6111 - acc: 0.6879Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6089 - acc: 0.6931 - val_loss: 0.5721 - val_acc: 0.7757\nLearning rate: 0.0000814\nEpoch 23/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5984 - acc: 0.6959Epoch 1/300\n20/20 [==============================] - 7s 357ms/step - loss: 0.5960 - acc: 0.6994 - val_loss: 0.5690 - val_acc: 0.7514\nLearning rate: 0.0000895\nEpoch 24/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5977 - acc: 0.6846Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6001 - acc: 0.6779 - val_loss: 0.6456 - val_acc: 0.5757\nLearning rate: 0.0000985\nEpoch 25/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5975 - acc: 0.6500Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.5957 - acc: 0.6535 - val_loss: 0.5803 - val_acc: 0.6897\nLearning rate: 0.0001083\nEpoch 26/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5743 - acc: 0.6980Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.5708 - acc: 0.6994 - val_loss: 0.5420 - val_acc: 0.7963\nLearning rate: 0.0001192\nEpoch 27/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5746 - acc: 0.7093Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.5737 - acc: 0.7093 - val_loss: 0.5043 - val_acc: 0.7907\nLearning rate: 0.0001311\nEpoch 28/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5434 - acc: 0.7405Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5442 - acc: 0.7347 - val_loss: 0.5969 - val_acc: 0.6598\nLearning rate: 0.0001442\nEpoch 29/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5275 - acc: 0.7509Epoch 1/300\n20/20 [==============================] - 7s 366ms/step - loss: 0.5256 - acc: 0.7541 - val_loss: 0.5479 - val_acc: 0.7234\nLearning rate: 0.0001586\nEpoch 30/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5223 - acc: 0.7497Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.5208 - acc: 0.7489 - val_loss: 0.5741 - val_acc: 0.7402\nLearning rate: 0.0001745\nEpoch 31/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5131 - acc: 0.7647Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.5170 - acc: 0.7604 - val_loss: 0.5031 - val_acc: 0.8037\nLearning rate: 0.0001919\nEpoch 32/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5274 - acc: 0.7597Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.5251 - acc: 0.7620 - val_loss: 0.5075 - val_acc: 0.7907\nLearning rate: 0.0002111\nEpoch 33/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5362 - acc: 0.7426Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5393 - acc: 0.7382 - val_loss: 0.5080 - val_acc: 0.8093\nLearning rate: 0.0002323\nEpoch 34/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4992 - acc: 0.7714Epoch 1/300\n20/20 [==============================] - 7s 348ms/step - loss: 0.4973 - acc: 0.7735 - val_loss: 0.5251 - val_acc: 0.7944\nLearning rate: 0.0002555\nEpoch 35/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5164 - acc: 0.7530Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.5211 - acc: 0.7481 - val_loss: 0.6099 - val_acc: 0.6710\nLearning rate: 0.0002810\nEpoch 36/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5185 - acc: 0.7559Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.5151 - acc: 0.7568 - val_loss: 0.9628 - val_acc: 0.5477\nLearning rate: 0.0003091\nEpoch 37/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5704 - acc: 0.7184Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.5648 - acc: 0.7232 - val_loss: 0.4801 - val_acc: 0.7570\nLearning rate: 0.0003400\nEpoch 38/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5363 - acc: 0.7379Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5320 - acc: 0.7418 - val_loss: 0.6904 - val_acc: 0.6299\nLearning rate: 0.0003740\nEpoch 39/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5644 - acc: 0.7284Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.5598 - acc: 0.7279 - val_loss: 0.5467 - val_acc: 0.7570\nLearning rate: 0.0004114\nEpoch 40/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5244 - acc: 0.7330Epoch 1/300\n20/20 [==============================] - 7s 344ms/step - loss: 0.5200 - acc: 0.7357 - val_loss: 0.4090 - val_acc: 0.8150\nLearning rate: 0.0004526\nEpoch 41/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5331 - acc: 0.7159Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.5289 - acc: 0.7184 - val_loss: 0.5773 - val_acc: 0.7009\nLearning rate: 0.0004979\nEpoch 42/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5604 - acc: 0.7253Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.5557 - acc: 0.7289 - val_loss: 0.5066 - val_acc: 0.7047\nLearning rate: 0.0005476\nEpoch 43/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5992 - acc: 0.6914Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.5931 - acc: 0.6948 - val_loss: 0.4547 - val_acc: 0.8243\nLearning rate: 0.0006024\nEpoch 44/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5759 - acc: 0.6850Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.5712 - acc: 0.6911 - val_loss: 0.4876 - val_acc: 0.8019\nLearning rate: 0.0006626\nEpoch 45/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5670 - acc: 0.7000Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.5659 - acc: 0.7026 - val_loss: 0.5202 - val_acc: 0.7981\nLearning rate: 0.0007289\nEpoch 46/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6261 - acc: 0.6750Epoch 1/300\n20/20 [==============================] - 7s 363ms/step - loss: 0.6242 - acc: 0.6792 - val_loss: 0.5667 - val_acc: 0.7738\nLearning rate: 0.0008018\nEpoch 47/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6708 - acc: 0.6838Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6663 - acc: 0.6871 - val_loss: 0.8013 - val_acc: 0.5738\nLearning rate: 0.0008820\nEpoch 48/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5889 - acc: 0.7096Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.5852 - acc: 0.7145 - val_loss: 1.1821 - val_acc: 0.5047\nLearning rate: 0.0009702\nEpoch 49/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6334 - acc: 0.6770Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6356 - acc: 0.6735 - val_loss: 0.6081 - val_acc: 0.7720\nLearning rate: 0.0010672\nEpoch 50/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.7007 - acc: 0.6854Epoch 1/300\n 4/20 [=====>........................] - ETA: 5s - loss: 0.5803 - acc: 0.6328\nStopping training!\n20/20 [==============================] - 6s 317ms/step - loss: 0.7044 - acc: 0.6804 - val_loss: 0.6041 - val_acc: 0.6355\nOptimizer: Adam\nLearning rate: 0.0000010\nEpoch 1/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6966 - acc: 0.4531Epoch 1/300\n20/20 [==============================] - 7s 365ms/step - loss: 0.6965 - acc: 0.4539 - val_loss: 0.6950 - val_acc: 0.4710\nLearning rate: 0.0000011\nEpoch 2/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6935 - acc: 0.4826Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6934 - acc: 0.4871 - val_loss: 0.6922 - val_acc: 0.5271\nLearning rate: 0.0000012\nEpoch 3/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6924 - acc: 0.5350Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6923 - acc: 0.5363 - val_loss: 0.6915 - val_acc: 0.5327\nLearning rate: 0.0000013\nEpoch 4/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6919 - acc: 0.5390Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6919 - acc: 0.5386 - val_loss: 0.6898 - val_acc: 0.5308\nLearning rate: 0.0000015\nEpoch 5/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6891 - acc: 0.5615Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6891 - acc: 0.5616 - val_loss: 0.6893 - val_acc: 0.5308\nLearning rate: 0.0000016\nEpoch 6/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6907 - acc: 0.5428Epoch 1/300\n20/20 [==============================] - 7s 355ms/step - loss: 0.6908 - acc: 0.5414 - val_loss: 0.6897 - val_acc: 0.5308\nLearning rate: 0.0000018\nEpoch 7/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6897 - acc: 0.5482Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6897 - acc: 0.5481 - val_loss: 0.6892 - val_acc: 0.5308\nLearning rate: 0.0000019\nEpoch 8/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6897 - acc: 0.5478Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6898 - acc: 0.5465 - val_loss: 0.6891 - val_acc: 0.5308\nLearning rate: 0.0000021\nEpoch 9/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6894 - acc: 0.5461Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.6894 - acc: 0.5454 - val_loss: 0.6887 - val_acc: 0.5308\nLearning rate: 0.0000024\nEpoch 10/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6874 - acc: 0.5611Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6882 - acc: 0.5560 - val_loss: 0.6885 - val_acc: 0.5308\nLearning rate: 0.0000026\nEpoch 11/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6900 - acc: 0.5423Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6902 - acc: 0.5414 - val_loss: 0.6883 - val_acc: 0.5308\nLearning rate: 0.0000029\nEpoch 12/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6890 - acc: 0.5465Epoch 1/300\n20/20 [==============================] - 7s 356ms/step - loss: 0.6891 - acc: 0.5453 - val_loss: 0.6882 - val_acc: 0.5308\nLearning rate: 0.0000031\nEpoch 13/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6882 - acc: 0.5482Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6887 - acc: 0.5450 - val_loss: 0.6873 - val_acc: 0.5308\nLearning rate: 0.0000035\nEpoch 14/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6870 - acc: 0.5574Epoch 1/300\n20/20 [==============================] - 6s 301ms/step - loss: 0.6872 - acc: 0.5562 - val_loss: 0.6876 - val_acc: 0.5308\nLearning rate: 0.0000038\nEpoch 15/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5382Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6891 - acc: 0.5394 - val_loss: 0.6870 - val_acc: 0.5308\nLearning rate: 0.0000042\nEpoch 16/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6871 - acc: 0.5540Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6873 - acc: 0.5521 - val_loss: 0.6865 - val_acc: 0.5308\nLearning rate: 0.0000046\nEpoch 17/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6867 - acc: 0.5486Epoch 1/300\n20/20 [==============================] - 7s 349ms/step - loss: 0.6868 - acc: 0.5481 - val_loss: 0.6872 - val_acc: 0.5308\nLearning rate: 0.0000051\nEpoch 18/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6865 - acc: 0.5469Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6863 - acc: 0.5477 - val_loss: 0.6868 - val_acc: 0.5308\nLearning rate: 0.0000056\nEpoch 19/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6855 - acc: 0.5494Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6859 - acc: 0.5473 - val_loss: 0.6855 - val_acc: 0.5290\nLearning rate: 0.0000061\nEpoch 20/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6848 - acc: 0.5419Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6846 - acc: 0.5446 - val_loss: 0.6849 - val_acc: 0.5271\nLearning rate: 0.0000067\nEpoch 21/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6840 - acc: 0.5453Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6838 - acc: 0.5474 - val_loss: 0.6829 - val_acc: 0.5252\nLearning rate: 0.0000074\nEpoch 22/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6823 - acc: 0.5599Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6824 - acc: 0.5580 - val_loss: 0.6815 - val_acc: 0.5290\nLearning rate: 0.0000081\nEpoch 23/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6803 - acc: 0.5519Epoch 1/300\n20/20 [==============================] - 7s 350ms/step - loss: 0.6806 - acc: 0.5501 - val_loss: 0.6791 - val_acc: 0.5308\nLearning rate: 0.0000090\nEpoch 24/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6778 - acc: 0.5632Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6784 - acc: 0.5588 - val_loss: 0.6778 - val_acc: 0.5308\nLearning rate: 0.0000098\nEpoch 25/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6748 - acc: 0.5593Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.6748 - acc: 0.5582 - val_loss: 0.6736 - val_acc: 0.5514\nLearning rate: 0.0000108\nEpoch 26/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6707 - acc: 0.5745Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6707 - acc: 0.5723 - val_loss: 0.6714 - val_acc: 0.5271\nLearning rate: 0.0000119\nEpoch 27/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6651 - acc: 0.5824Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6645 - acc: 0.5830 - val_loss: 0.6646 - val_acc: 0.5757\nLearning rate: 0.0000131\nEpoch 28/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6623 - acc: 0.6406Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6619 - acc: 0.6444 - val_loss: 0.6588 - val_acc: 0.7439\nLearning rate: 0.0000144\nEpoch 29/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6524 - acc: 0.6491Epoch 1/300\n20/20 [==============================] - 7s 363ms/step - loss: 0.6528 - acc: 0.6463 - val_loss: 0.6459 - val_acc: 0.6187\nLearning rate: 0.0000159\nEpoch 30/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6435 - acc: 0.6600Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6436 - acc: 0.6630 - val_loss: 0.6370 - val_acc: 0.7271\nLearning rate: 0.0000174\nEpoch 31/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6363 - acc: 0.6529Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6368 - acc: 0.6523 - val_loss: 0.6284 - val_acc: 0.5888\nLearning rate: 0.0000192\nEpoch 32/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6250 - acc: 0.6688Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6260 - acc: 0.6681 - val_loss: 0.6151 - val_acc: 0.7383\nLearning rate: 0.0000211\nEpoch 33/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6094 - acc: 0.6854Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6095 - acc: 0.6803 - val_loss: 0.5983 - val_acc: 0.6841\nLearning rate: 0.0000232\nEpoch 34/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5969 - acc: 0.7021Epoch 1/300\n20/20 [==============================] - 7s 341ms/step - loss: 0.5981 - acc: 0.6962 - val_loss: 0.5875 - val_acc: 0.6766\nLearning rate: 0.0000255\nEpoch 35/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5845 - acc: 0.7130Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5846 - acc: 0.7108 - val_loss: 0.5688 - val_acc: 0.7346\nLearning rate: 0.0000281\nEpoch 36/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5813 - acc: 0.7142Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.5831 - acc: 0.7113 - val_loss: 0.5557 - val_acc: 0.7626\nLearning rate: 0.0000309\nEpoch 37/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5620 - acc: 0.7218Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.5617 - acc: 0.7245 - val_loss: 0.5401 - val_acc: 0.7701\nLearning rate: 0.0000340\nEpoch 38/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5547 - acc: 0.7409Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.5545 - acc: 0.7414 - val_loss: 0.5268 - val_acc: 0.7664\nLearning rate: 0.0000374\nEpoch 39/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5406 - acc: 0.7405Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.5407 - acc: 0.7375 - val_loss: 0.5079 - val_acc: 0.7645\nLearning rate: 0.0000411\nEpoch 40/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5259 - acc: 0.7608Epoch 1/300\n20/20 [==============================] - 7s 349ms/step - loss: 0.5310 - acc: 0.7538 - val_loss: 0.4941 - val_acc: 0.7458\nLearning rate: 0.0000453\nEpoch 41/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5205 - acc: 0.7447Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.5213 - acc: 0.7469 - val_loss: 0.4935 - val_acc: 0.7533\nLearning rate: 0.0000498\nEpoch 42/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5235 - acc: 0.7426Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.5296 - acc: 0.7365 - val_loss: 0.5872 - val_acc: 0.6748\nLearning rate: 0.0000548\nEpoch 43/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5163 - acc: 0.7529Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.5154 - acc: 0.7535 - val_loss: 0.4850 - val_acc: 0.7402\nLearning rate: 0.0000602\nEpoch 44/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5020 - acc: 0.7731Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.5021 - acc: 0.7707 - val_loss: 0.4602 - val_acc: 0.7720\nLearning rate: 0.0000663\nEpoch 45/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4898 - acc: 0.7701Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.4905 - acc: 0.7667 - val_loss: 0.4510 - val_acc: 0.7925\nLearning rate: 0.0000729\nEpoch 46/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5116 - acc: 0.7557Epoch 1/300\n20/20 [==============================] - 7s 356ms/step - loss: 0.5077 - acc: 0.7574 - val_loss: 0.4652 - val_acc: 0.7346\nLearning rate: 0.0000802\nEpoch 47/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5043 - acc: 0.7488Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.5042 - acc: 0.7500 - val_loss: 0.4540 - val_acc: 0.7626\nLearning rate: 0.0000882\nEpoch 48/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4594 - acc: 0.7843Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.4588 - acc: 0.7850 - val_loss: 0.4340 - val_acc: 0.8206\nLearning rate: 0.0000970\nEpoch 49/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4545 - acc: 0.7913Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.4549 - acc: 0.7908 - val_loss: 0.4661 - val_acc: 0.8093\nLearning rate: 0.0001067\nEpoch 50/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4727 - acc: 0.7655Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.4740 - acc: 0.7632 - val_loss: 0.4543 - val_acc: 0.7701\nLearning rate: 0.0001174\nEpoch 51/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4415 - acc: 0.7864Epoch 1/300\n20/20 [==============================] - 7s 331ms/step - loss: 0.4430 - acc: 0.7861 - val_loss: 0.4307 - val_acc: 0.8206\nLearning rate: 0.0001291\nEpoch 52/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4236 - acc: 0.8078Epoch 1/300\n20/20 [==============================] - 6s 306ms/step - loss: 0.4228 - acc: 0.8056 - val_loss: 0.4441 - val_acc: 0.7738\nLearning rate: 0.0001420\nEpoch 53/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4470 - acc: 0.7899Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.4478 - acc: 0.7891 - val_loss: 0.4523 - val_acc: 0.7738\nLearning rate: 0.0001562\nEpoch 54/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4196 - acc: 0.8048Epoch 1/300\n20/20 [==============================] - 6s 306ms/step - loss: 0.4186 - acc: 0.8051 - val_loss: 0.4193 - val_acc: 0.8280\nLearning rate: 0.0001719\nEpoch 55/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4077 - acc: 0.8164Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.4093 - acc: 0.8170 - val_loss: 0.4317 - val_acc: 0.8206\nLearning rate: 0.0001891\nEpoch 56/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4110 - acc: 0.8158Epoch 1/300\n20/20 [==============================] - 6s 308ms/step - loss: 0.4105 - acc: 0.8169 - val_loss: 0.4173 - val_acc: 0.8262\nLearning rate: 0.0002080\nEpoch 57/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4076 - acc: 0.8118Epoch 1/300\n20/20 [==============================] - 7s 342ms/step - loss: 0.4074 - acc: 0.8127 - val_loss: 0.4159 - val_acc: 0.8206\nLearning rate: 0.0002288\nEpoch 58/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4024 - acc: 0.8118Epoch 1/300\n20/20 [==============================] - 6s 306ms/step - loss: 0.4020 - acc: 0.8123 - val_loss: 0.4072 - val_acc: 0.8280\nLearning rate: 0.0002516\nEpoch 59/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.3820 - acc: 0.8302Epoch 1/300\n20/20 [==============================] - 6s 306ms/step - loss: 0.3848 - acc: 0.8281 - val_loss: 0.4064 - val_acc: 0.8355\nLearning rate: 0.0002768\nEpoch 60/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4017 - acc: 0.8189Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.4031 - acc: 0.8190 - val_loss: 0.4794 - val_acc: 0.7907\nLearning rate: 0.0003045\nEpoch 61/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4158 - acc: 0.8095Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.4124 - acc: 0.8108 - val_loss: 0.4257 - val_acc: 0.8168\nLearning rate: 0.0003349\nEpoch 62/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4066 - acc: 0.8189Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.4048 - acc: 0.8194 - val_loss: 0.3997 - val_acc: 0.8411\nLearning rate: 0.0003684\nEpoch 63/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.3892 - acc: 0.8269Epoch 1/300\n20/20 [==============================] - 7s 358ms/step - loss: 0.3935 - acc: 0.8242 - val_loss: 0.4194 - val_acc: 0.8280\nLearning rate: 0.0004053\nEpoch 64/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.3948 - acc: 0.8173Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.3919 - acc: 0.8194 - val_loss: 0.4686 - val_acc: 0.8037\nLearning rate: 0.0004458\nEpoch 65/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4336 - acc: 0.7981Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.4317 - acc: 0.8000 - val_loss: 0.3972 - val_acc: 0.8318\nLearning rate: 0.0004904\nEpoch 66/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4163 - acc: 0.8146Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.4149 - acc: 0.8169 - val_loss: 0.4170 - val_acc: 0.8318\nLearning rate: 0.0005394\nEpoch 67/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5333 - acc: 0.7351Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5315 - acc: 0.7390 - val_loss: 0.4805 - val_acc: 0.7701\nLearning rate: 0.0005933\nEpoch 68/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4974 - acc: 0.7676Epoch 1/300\n20/20 [==============================] - 7s 349ms/step - loss: 0.4936 - acc: 0.7675 - val_loss: 0.4521 - val_acc: 0.8019\nLearning rate: 0.0006527\nEpoch 69/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4377 - acc: 0.7993Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.4448 - acc: 0.7952 - val_loss: 0.4808 - val_acc: 0.7682\nLearning rate: 0.0007180\nEpoch 70/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4563 - acc: 0.7856Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.4513 - acc: 0.7881 - val_loss: 0.4113 - val_acc: 0.8000\nLearning rate: 0.0007897\nEpoch 71/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4350 - acc: 0.7951Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.4390 - acc: 0.7940 - val_loss: 0.5256 - val_acc: 0.7402\nLearning rate: 0.0008687\nEpoch 72/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4429 - acc: 0.7960Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.4386 - acc: 0.7984 - val_loss: 0.3978 - val_acc: 0.8037\nLearning rate: 0.0009556\nEpoch 73/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4061 - acc: 0.8102Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.4093 - acc: 0.8099 - val_loss: 0.3822 - val_acc: 0.8318\nLearning rate: 0.0010512\nEpoch 74/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4519 - acc: 0.7860Epoch 1/300\n20/20 [==============================] - 7s 347ms/step - loss: 0.4558 - acc: 0.7853 - val_loss: 0.4582 - val_acc: 0.7757\nLearning rate: 0.0011563\nEpoch 75/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4389 - acc: 0.7947Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.4424 - acc: 0.7929 - val_loss: 0.4469 - val_acc: 0.7850\nLearning rate: 0.0012719\nEpoch 76/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5505 - acc: 0.7341Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.5477 - acc: 0.7386 - val_loss: 0.4923 - val_acc: 0.7551\nLearning rate: 0.0013991\nEpoch 77/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4276 - acc: 0.7993Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.4208 - acc: 0.8028 - val_loss: 0.5516 - val_acc: 0.7308\nLearning rate: 0.0015390\nEpoch 78/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4098 - acc: 0.8173Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.4118 - acc: 0.8139 - val_loss: 0.3803 - val_acc: 0.8355\nLearning rate: 0.0016929\nEpoch 79/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.3911 - acc: 0.8294Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.3946 - acc: 0.8253 - val_loss: 0.3863 - val_acc: 0.8355\nLearning rate: 0.0018622\nEpoch 80/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.3951 - acc: 0.8152Epoch 1/300\n20/20 [==============================] - 7s 363ms/step - loss: 0.4101 - acc: 0.8075 - val_loss: 0.4829 - val_acc: 0.7682\nLearning rate: 0.0020484\nEpoch 81/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6369 - acc: 0.6404Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6366 - acc: 0.6404 - val_loss: 0.6436 - val_acc: 0.7065\nLearning rate: 0.0022532\nEpoch 82/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6856 - acc: 0.6253Epoch 1/300\n20/20 [==============================] - 6s 309ms/step - loss: 0.6837 - acc: 0.6293 - val_loss: 0.6068 - val_acc: 0.7065\nLearning rate: 0.0024786\nEpoch 83/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.7220 - acc: 0.6180Epoch 1/300\n 5/20 [======>.......................] - ETA: 5s - loss: 0.6815 - acc: 0.5308\nStopping training!\n20/20 [==============================] - 6s 320ms/step - loss: 0.7215 - acc: 0.6121 - val_loss: 0.6815 - val_acc: 0.5308\nOptimizer: Nadam\nLearning rate: 0.0000010\nEpoch 1/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6919 - acc: 0.5432Epoch 1/300\n20/20 [==============================] - 8s 381ms/step - loss: 0.6919 - acc: 0.5430 - val_loss: 0.6924 - val_acc: 0.5308\nLearning rate: 0.0000011\nEpoch 2/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6905 - acc: 0.5549Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.6906 - acc: 0.5533 - val_loss: 0.6923 - val_acc: 0.5308\nLearning rate: 0.0000012\nEpoch 3/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6914 - acc: 0.5411Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6912 - acc: 0.5438 - val_loss: 0.6921 - val_acc: 0.5308\nLearning rate: 0.0000013\nEpoch 4/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6903 - acc: 0.5491Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6901 - acc: 0.5518 - val_loss: 0.6920 - val_acc: 0.5308\nLearning rate: 0.0000015\nEpoch 5/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6898 - acc: 0.5506Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.6901 - acc: 0.5489 - val_loss: 0.6922 - val_acc: 0.5308\nLearning rate: 0.0000016\nEpoch 6/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6899 - acc: 0.5494Epoch 1/300\n20/20 [==============================] - 7s 361ms/step - loss: 0.6900 - acc: 0.5481 - val_loss: 0.6922 - val_acc: 0.5308\nLearning rate: 0.0000018\nEpoch 7/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6891 - acc: 0.5553Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6891 - acc: 0.5552 - val_loss: 0.6919 - val_acc: 0.5308\nLearning rate: 0.0000019\nEpoch 8/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6908 - acc: 0.5390Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.6904 - acc: 0.5426 - val_loss: 0.6913 - val_acc: 0.5308\nLearning rate: 0.0000021\nEpoch 9/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6888 - acc: 0.5532Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6888 - acc: 0.5530 - val_loss: 0.6887 - val_acc: 0.5308\nLearning rate: 0.0000024\nEpoch 10/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6888 - acc: 0.5490Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6888 - acc: 0.5489 - val_loss: 0.6882 - val_acc: 0.5308\nLearning rate: 0.0000026\nEpoch 11/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6901 - acc: 0.5399Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6899 - acc: 0.5406 - val_loss: 0.6879 - val_acc: 0.5308\nLearning rate: 0.0000029\nEpoch 12/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6876 - acc: 0.5519Epoch 1/300\n20/20 [==============================] - 7s 359ms/step - loss: 0.6877 - acc: 0.5517 - val_loss: 0.6871 - val_acc: 0.5308\nLearning rate: 0.0000031\nEpoch 13/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6886 - acc: 0.5432Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6890 - acc: 0.5410 - val_loss: 0.6952 - val_acc: 0.5308\nLearning rate: 0.0000035\nEpoch 14/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6862 - acc: 0.5563Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.6867 - acc: 0.5535 - val_loss: 0.6963 - val_acc: 0.5308\nLearning rate: 0.0000038\nEpoch 15/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6857 - acc: 0.5559Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.6861 - acc: 0.5534 - val_loss: 0.6909 - val_acc: 0.5308\nLearning rate: 0.0000042\nEpoch 16/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6868 - acc: 0.5448Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.6866 - acc: 0.5457 - val_loss: 0.6894 - val_acc: 0.5308\nLearning rate: 0.0000046\nEpoch 17/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6852 - acc: 0.5487Epoch 1/300\n20/20 [==============================] - 7s 340ms/step - loss: 0.6853 - acc: 0.5474 - val_loss: 0.6893 - val_acc: 0.5308\nLearning rate: 0.0000051\nEpoch 18/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6847 - acc: 0.5494Epoch 1/300\n20/20 [==============================] - 6s 325ms/step - loss: 0.6844 - acc: 0.5521 - val_loss: 0.6891 - val_acc: 0.5308\nLearning rate: 0.0000056\nEpoch 19/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6845 - acc: 0.5419Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.6843 - acc: 0.5422 - val_loss: 0.6875 - val_acc: 0.5308\nLearning rate: 0.0000061\nEpoch 20/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6828 - acc: 0.5485Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6823 - acc: 0.5523 - val_loss: 0.6861 - val_acc: 0.5290\nLearning rate: 0.0000067\nEpoch 21/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6811 - acc: 0.5483Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6811 - acc: 0.5478 - val_loss: 0.6841 - val_acc: 0.5308\nLearning rate: 0.0000074\nEpoch 22/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6789 - acc: 0.5503Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6786 - acc: 0.5513 - val_loss: 0.6828 - val_acc: 0.5290\nLearning rate: 0.0000081\nEpoch 23/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6762 - acc: 0.5499Epoch 1/300\n20/20 [==============================] - 7s 358ms/step - loss: 0.6761 - acc: 0.5505 - val_loss: 0.6781 - val_acc: 0.5346\nLearning rate: 0.0000090\nEpoch 24/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6731 - acc: 0.5782Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6728 - acc: 0.5786 - val_loss: 0.6798 - val_acc: 0.5290\nLearning rate: 0.0000098\nEpoch 25/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6696 - acc: 0.5872Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.6696 - acc: 0.5876 - val_loss: 0.6680 - val_acc: 0.6019\nLearning rate: 0.0000108\nEpoch 26/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6644 - acc: 0.6395Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.6640 - acc: 0.6356 - val_loss: 0.6651 - val_acc: 0.5439\nLearning rate: 0.0000119\nEpoch 27/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6583 - acc: 0.6057Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6575 - acc: 0.6090 - val_loss: 0.6603 - val_acc: 0.5533\nLearning rate: 0.0000131\nEpoch 28/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6488 - acc: 0.6355Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.6490 - acc: 0.6341 - val_loss: 0.6442 - val_acc: 0.6953\nLearning rate: 0.0000144\nEpoch 29/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6401 - acc: 0.6567Epoch 1/300\n20/20 [==============================] - 7s 367ms/step - loss: 0.6396 - acc: 0.6547 - val_loss: 0.6410 - val_acc: 0.6093\nLearning rate: 0.0000159\nEpoch 30/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6333 - acc: 0.6617Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.6330 - acc: 0.6590 - val_loss: 0.6383 - val_acc: 0.5944\nLearning rate: 0.0000174\nEpoch 31/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6217 - acc: 0.6740Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6206 - acc: 0.6735 - val_loss: 0.6179 - val_acc: 0.7009\nLearning rate: 0.0000192\nEpoch 32/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6211 - acc: 0.6733Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6213 - acc: 0.6749 - val_loss: 0.6096 - val_acc: 0.7028\nLearning rate: 0.0000211\nEpoch 33/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6057 - acc: 0.6884Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6032 - acc: 0.6915 - val_loss: 0.6362 - val_acc: 0.5607\nLearning rate: 0.0000232\nEpoch 34/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6050 - acc: 0.6817Epoch 1/300\n20/20 [==============================] - 7s 352ms/step - loss: 0.6052 - acc: 0.6844 - val_loss: 0.6033 - val_acc: 0.6505\nLearning rate: 0.0000255\nEpoch 35/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5893 - acc: 0.7030Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.5903 - acc: 0.7002 - val_loss: 0.5753 - val_acc: 0.7626\nLearning rate: 0.0000281\nEpoch 36/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5845 - acc: 0.7025Epoch 1/300\n20/20 [==============================] - 7s 327ms/step - loss: 0.5844 - acc: 0.7034 - val_loss: 0.5743 - val_acc: 0.7402\nLearning rate: 0.0000309\nEpoch 37/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5807 - acc: 0.7013Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.5788 - acc: 0.7053 - val_loss: 0.5607 - val_acc: 0.6935\nLearning rate: 0.0000340\nEpoch 38/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5709 - acc: 0.7036Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.5709 - acc: 0.7056 - val_loss: 0.5494 - val_acc: 0.7215\nLearning rate: 0.0000374\nEpoch 39/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5597 - acc: 0.7247Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.5595 - acc: 0.7259 - val_loss: 0.5610 - val_acc: 0.7963\nLearning rate: 0.0000411\nEpoch 40/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5395 - acc: 0.7526Epoch 1/300\n20/20 [==============================] - 7s 359ms/step - loss: 0.5407 - acc: 0.7513 - val_loss: 0.5542 - val_acc: 0.7271\nLearning rate: 0.0000453\nEpoch 41/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5361 - acc: 0.7288Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.5359 - acc: 0.7307 - val_loss: 0.5584 - val_acc: 0.7701\nLearning rate: 0.0000498\nEpoch 42/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5512 - acc: 0.7248Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.5488 - acc: 0.7253 - val_loss: 0.5401 - val_acc: 0.8000\nLearning rate: 0.0000548\nEpoch 43/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5338 - acc: 0.7405Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.5303 - acc: 0.7457 - val_loss: 0.5330 - val_acc: 0.7963\nLearning rate: 0.0000602\nEpoch 44/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4977 - acc: 0.7739Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.4974 - acc: 0.7766 - val_loss: 0.5418 - val_acc: 0.6991\nLearning rate: 0.0000663\nEpoch 45/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4997 - acc: 0.7689Epoch 1/300\n20/20 [==============================] - 6s 309ms/step - loss: 0.5105 - acc: 0.7596 - val_loss: 0.6044 - val_acc: 0.7234\nLearning rate: 0.0000729\nEpoch 46/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4953 - acc: 0.7739Epoch 1/300\n20/20 [==============================] - 7s 365ms/step - loss: 0.4959 - acc: 0.7743 - val_loss: 0.5044 - val_acc: 0.7383\nLearning rate: 0.0000802\nEpoch 47/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4749 - acc: 0.7826Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.4742 - acc: 0.7834 - val_loss: 0.5392 - val_acc: 0.6953\nLearning rate: 0.0000882\nEpoch 48/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4611 - acc: 0.7875Epoch 1/300\n20/20 [==============================] - 6s 312ms/step - loss: 0.4653 - acc: 0.7831 - val_loss: 0.5381 - val_acc: 0.7645\nLearning rate: 0.0000970\nEpoch 49/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4524 - acc: 0.7947Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.4541 - acc: 0.7909 - val_loss: 0.4906 - val_acc: 0.7925\nLearning rate: 0.0001067\nEpoch 50/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4462 - acc: 0.7910Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.4475 - acc: 0.7913 - val_loss: 0.4239 - val_acc: 0.8318\nLearning rate: 0.0001174\nEpoch 51/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4510 - acc: 0.7922Epoch 1/300\n20/20 [==============================] - 7s 351ms/step - loss: 0.4521 - acc: 0.7905 - val_loss: 0.4180 - val_acc: 0.7981\nLearning rate: 0.0001291\nEpoch 52/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4567 - acc: 0.7860Epoch 1/300\n20/20 [==============================] - 6s 321ms/step - loss: 0.4602 - acc: 0.7850 - val_loss: 0.4500 - val_acc: 0.8075\nLearning rate: 0.0001420\nEpoch 53/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4088 - acc: 0.8141Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.4111 - acc: 0.8116 - val_loss: 0.3982 - val_acc: 0.8318\nLearning rate: 0.0001562\nEpoch 54/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4163 - acc: 0.8100Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.4136 - acc: 0.8125 - val_loss: 0.3870 - val_acc: 0.8449\nLearning rate: 0.0001719\nEpoch 55/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4860 - acc: 0.7697Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.4841 - acc: 0.7703 - val_loss: 0.4116 - val_acc: 0.8280\nLearning rate: 0.0001891\nEpoch 56/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4024 - acc: 0.8098Epoch 1/300\n20/20 [==============================] - 7s 326ms/step - loss: 0.4009 - acc: 0.8107 - val_loss: 0.4449 - val_acc: 0.8093\nLearning rate: 0.0002080\nEpoch 57/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4100 - acc: 0.8039Epoch 1/300\n20/20 [==============================] - 7s 356ms/step - loss: 0.4127 - acc: 0.8008 - val_loss: 0.3946 - val_acc: 0.8318\nLearning rate: 0.0002288\nEpoch 58/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4664 - acc: 0.7756Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.4689 - acc: 0.7703 - val_loss: 0.5693 - val_acc: 0.7421\nLearning rate: 0.0002516\nEpoch 59/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4961 - acc: 0.7680Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.4910 - acc: 0.7723 - val_loss: 0.4988 - val_acc: 0.7850\nLearning rate: 0.0002768\nEpoch 60/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5208 - acc: 0.7530Epoch 1/300\n20/20 [==============================] - 6s 317ms/step - loss: 0.5360 - acc: 0.7422 - val_loss: 0.5664 - val_acc: 0.6916\nLearning rate: 0.0003045\nEpoch 61/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4772 - acc: 0.7772Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.4745 - acc: 0.7790 - val_loss: 0.5224 - val_acc: 0.7121\nLearning rate: 0.0003349\nEpoch 62/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5165 - acc: 0.7397Epoch 1/300\n20/20 [==============================] - 6s 320ms/step - loss: 0.5078 - acc: 0.7438 - val_loss: 0.4815 - val_acc: 0.7402\nLearning rate: 0.0003684\nEpoch 63/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5365 - acc: 0.7347Epoch 1/300\n20/20 [==============================] - 7s 362ms/step - loss: 0.5331 - acc: 0.7353 - val_loss: 0.4395 - val_acc: 0.7794\nLearning rate: 0.0004053\nEpoch 64/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5284 - acc: 0.7247Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.5319 - acc: 0.7184 - val_loss: 0.4530 - val_acc: 0.8037\nLearning rate: 0.0004458\nEpoch 65/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5045 - acc: 0.7514Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.5053 - acc: 0.7521 - val_loss: 0.5031 - val_acc: 0.7626\nLearning rate: 0.0004904\nEpoch 66/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4982 - acc: 0.7568Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.4940 - acc: 0.7572 - val_loss: 0.4960 - val_acc: 0.7477\nLearning rate: 0.0005394\nEpoch 67/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.4322 - acc: 0.8056Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.4435 - acc: 0.7968 - val_loss: 0.4780 - val_acc: 0.8112\nLearning rate: 0.0005933\nEpoch 68/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5863 - acc: 0.7180Epoch 1/300\n20/20 [==============================] - 7s 346ms/step - loss: 0.5916 - acc: 0.7160 - val_loss: 0.6811 - val_acc: 0.7364\nLearning rate: 0.0006527\nEpoch 69/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6583 - acc: 0.6508Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.6541 - acc: 0.6562 - val_loss: 0.5730 - val_acc: 0.7533\nLearning rate: 0.0007180\nEpoch 70/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6295 - acc: 0.6683Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.6259 - acc: 0.6717 - val_loss: 0.5270 - val_acc: 0.7664\nLearning rate: 0.0007897\nEpoch 71/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6077 - acc: 0.7117Epoch 1/300\n20/20 [==============================] - 6s 314ms/step - loss: 0.6058 - acc: 0.7116 - val_loss: 0.5724 - val_acc: 0.7383\nLearning rate: 0.0008687\nEpoch 72/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5603 - acc: 0.7188Epoch 1/300\n20/20 [==============================] - 6s 324ms/step - loss: 0.5638 - acc: 0.7152 - val_loss: 0.8043 - val_acc: 0.5794\nLearning rate: 0.0009556\nEpoch 73/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6499 - acc: 0.6757Epoch 1/300\n20/20 [==============================] - 6s 316ms/step - loss: 0.6475 - acc: 0.6783 - val_loss: 0.5844 - val_acc: 0.7383\nLearning rate: 0.0010512\nEpoch 74/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5998 - acc: 0.6967Epoch 1/300\n20/20 [==============================] - 7s 358ms/step - loss: 0.6047 - acc: 0.6927 - val_loss: 0.7704 - val_acc: 0.5383\nLearning rate: 0.0011563\nEpoch 75/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.6365 - acc: 0.6834Epoch 1/300\n20/20 [==============================] - 6s 323ms/step - loss: 0.6353 - acc: 0.6848 - val_loss: 0.5461 - val_acc: 0.7776\nLearning rate: 0.0012719\nEpoch 76/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5899 - acc: 0.7213Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.5878 - acc: 0.7240 - val_loss: 0.5787 - val_acc: 0.7290\nLearning rate: 0.0013991\nEpoch 77/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5831 - acc: 0.7401Epoch 1/300\n20/20 [==============================] - 6s 311ms/step - loss: 0.5827 - acc: 0.7398 - val_loss: 0.6647 - val_acc: 0.5869\nLearning rate: 0.0015390\nEpoch 78/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5807 - acc: 0.7084Epoch 1/300\n20/20 [==============================] - 6s 319ms/step - loss: 0.5806 - acc: 0.7117 - val_loss: 0.5530 - val_acc: 0.7421\nLearning rate: 0.0016929\nEpoch 79/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5630 - acc: 0.7467Epoch 1/300\n20/20 [==============================] - 6s 318ms/step - loss: 0.5591 - acc: 0.7480 - val_loss: 0.5562 - val_acc: 0.7178\nLearning rate: 0.0018622\nEpoch 80/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5260 - acc: 0.7604Epoch 1/300\n20/20 [==============================] - 7s 357ms/step - loss: 0.5310 - acc: 0.7590 - val_loss: 0.5233 - val_acc: 0.7495\nLearning rate: 0.0020484\nEpoch 81/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5327 - acc: 0.7455Epoch 1/300\n20/20 [==============================] - 6s 322ms/step - loss: 0.5302 - acc: 0.7485 - val_loss: 0.5242 - val_acc: 0.7832\nLearning rate: 0.0022532\nEpoch 82/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5710 - acc: 0.7351Epoch 1/300\n20/20 [==============================] - 6s 315ms/step - loss: 0.5678 - acc: 0.7347 - val_loss: 0.5673 - val_acc: 0.7458\nLearning rate: 0.0024786\nEpoch 83/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5256 - acc: 0.7514Epoch 1/300\n20/20 [==============================] - 6s 310ms/step - loss: 0.5274 - acc: 0.7481 - val_loss: 0.5756 - val_acc: 0.7664\nLearning rate: 0.0027264\nEpoch 84/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5096 - acc: 0.7743Epoch 1/300\n20/20 [==============================] - 6s 313ms/step - loss: 0.5073 - acc: 0.7727 - val_loss: 0.5200 - val_acc: 0.7682\nLearning rate: 0.0029991\nEpoch 85/300\n19/20 [===========================>..] - ETA: 0s - loss: 0.5106 - acc: 0.7476Epoch 1/300\n20/20 [==============================] - 7s 349ms/step - loss: 0.5199 - acc: 0.7438 - val_loss: 0.6203 - val_acc: 0.6654\nLearning rate: 0.0032990\nEpoch 86/300\n19/20 [===========================>..] - ETA: 0s - loss: 1.0081 - acc: 0.5661Epoch 1/300\n 4/20 [=====>........................] - ETA: 6s - loss: 0.6395 - acc: 0.6797\nStopping training!\n20/20 [==============================] - 6s 319ms/step - loss: 0.9917 - acc: 0.5683 - val_loss: 0.6452 - val_acc: 0.6804\n"
],
[
"# Plot the loss obtained at different learning rates\n\nplt.figure(figsize=(9, 8), dpi=80, facecolor='w', edgecolor='k')\nfor opt in opts:\n plt.xscale('log')\n plt.ylim(0.35, 0.8)\n plt.plot(lr2loss[opt][0], lr2loss[opt][1], label=opt.__name__)\n plt.title(' Loss-LR curve')\nplt.ylabel('Loss')\nplt.xlabel('Learning rate')\nplt.legend(loc='lower right')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The graph above clearly shows that learning rate plays a decisive role during the training. When it is too high, weight updates become too large and the network becomes unstable, failing to converge towards the loss minimum. On the other hand, if it set too low, the network learns slowly and we observe modest improvements between two consecutive epochs.\nThe global minimum of the Loss-LR curve indicates the point where the learning rate starts causing instabilities, hence choosing a greater value is discouraged.\nIdeally, the best one is in the region with the fastest descent of loss function, that is where the plotted curve is steepest (negatively). It should be also noted that, in a stable network, loss variations naturally decrease over time, even if the LR remains constant, as a consequence of the gradual convergence of the weights towards the optimum. Thus, the steepest point represents may not directly represent the optimal LR, but a lower bound for it.\nThat said, a practical way to choose an adequate LR for an optimizer is to pick a value between the steepest point and the minimum, e.g. in the middle of this region.\n\nIn this case, reasonable choices are:\n\n* **SGD** : 3e-2\n* **RMSProp** : 1e-4\n* **Adam** : 1e-4\n* **Nadam** : 1e-4\n\nNote how these values slightly differ from the Keras default ones.",
"_____no_output_____"
],
[
"# Optimizers comparison",
"_____no_output_____"
],
[
"In the following experiment each optimizer runs once for 100 epochs, with the previously determined learning rate.",
"_____no_output_____"
]
],
[
[
"# For each optimizer, execute a training run with the previously determined best learning rate\n\noptimal_lr = {\n SGD: 3e-2,\n RMSprop: 1e-4,\n Adam: 1e-4,\n Nadam: 1e-4\n}\n\nhistories = {}\n\nfor opt in opts:\n print(\"Optimizer: \" + opt.__name__)\n cnn = create_cnn()\n\n cnn.compile(\n optimizer=opt(learning_rate=optimal_lr[opt]),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n histories[opt] = cnn.fit_generator(\n train_generator,\n steps_per_epoch=n_train_img // 128,\n epochs=100,\n validation_data=validation_generator,\n shuffle=True,\n verbose=1,\n initial_epoch=0)",
"Optimizer: SGD\nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6923 - acc: 0.5257Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.6923 - acc: 0.5259 - val_loss: 0.6889 - val_acc: 0.5645\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6907 - acc: 0.5453Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.6908 - acc: 0.5434 - val_loss: 0.6864 - val_acc: 0.5645\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5512Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6894 - acc: 0.5498 - val_loss: 0.6855 - val_acc: 0.5645\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6895 - acc: 0.5336Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6897 - acc: 0.5319 - val_loss: 0.6869 - val_acc: 0.5645\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6877 - acc: 0.5499Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.6880 - acc: 0.5469 - val_loss: 0.6866 - val_acc: 0.5607\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6862 - acc: 0.5699Epoch 1/100\n20/20 [==============================] - 5s 264ms/step - loss: 0.6863 - acc: 0.5707 - val_loss: 0.6899 - val_acc: 0.5290\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6842 - acc: 0.5624Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.6846 - acc: 0.5584 - val_loss: 0.6834 - val_acc: 0.5645\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6829 - acc: 0.5674Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6832 - acc: 0.5651 - val_loss: 0.6804 - val_acc: 0.5645\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6801 - acc: 0.5640Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6803 - acc: 0.5632 - val_loss: 0.6788 - val_acc: 0.6505\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6731 - acc: 0.6137Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.6732 - acc: 0.6103 - val_loss: 0.6705 - val_acc: 0.5589\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6730 - acc: 0.5866Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.6735 - acc: 0.5814 - val_loss: 0.6905 - val_acc: 0.4935\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6645 - acc: 0.5928Epoch 1/100\n20/20 [==============================] - 5s 274ms/step - loss: 0.6646 - acc: 0.5905 - val_loss: 0.6666 - val_acc: 0.6206\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6690 - acc: 0.5720Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6684 - acc: 0.5739 - val_loss: 0.6506 - val_acc: 0.6766\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6507 - acc: 0.6245Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.6498 - acc: 0.6297 - val_loss: 0.6422 - val_acc: 0.6075\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6448 - acc: 0.6521Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.6437 - acc: 0.6507 - val_loss: 0.6268 - val_acc: 0.7103\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6427 - acc: 0.6187Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.6491 - acc: 0.6116 - val_loss: 0.6823 - val_acc: 0.5290\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6339 - acc: 0.6366Epoch 1/100\n20/20 [==============================] - 5s 254ms/step - loss: 0.6325 - acc: 0.6364 - val_loss: 0.6103 - val_acc: 0.7196\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6178 - acc: 0.6713Epoch 1/100\n20/20 [==============================] - 5s 249ms/step - loss: 0.6155 - acc: 0.6768 - val_loss: 0.5960 - val_acc: 0.7159\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6059 - acc: 0.6859Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6048 - acc: 0.6875 - val_loss: 0.5794 - val_acc: 0.7271\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5980 - acc: 0.6859Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5952 - acc: 0.6891 - val_loss: 0.5702 - val_acc: 0.7364\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6058 - acc: 0.6791Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.6038 - acc: 0.6787 - val_loss: 0.5914 - val_acc: 0.6879\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5878 - acc: 0.6975Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.5881 - acc: 0.6962 - val_loss: 0.5656 - val_acc: 0.7234\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5878 - acc: 0.6909Epoch 1/100\n20/20 [==============================] - 5s 264ms/step - loss: 0.5901 - acc: 0.6875 - val_loss: 0.5554 - val_acc: 0.7327\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5749 - acc: 0.7005Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5749 - acc: 0.7016 - val_loss: 0.5528 - val_acc: 0.7346\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5764 - acc: 0.7093Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5764 - acc: 0.7090 - val_loss: 0.5635 - val_acc: 0.7458\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5845 - acc: 0.7041Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5802 - acc: 0.7068 - val_loss: 0.5409 - val_acc: 0.7533\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5667 - acc: 0.7159Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.5663 - acc: 0.7168 - val_loss: 0.7924 - val_acc: 0.6056\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5733 - acc: 0.7052Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.5716 - acc: 0.7066 - val_loss: 0.5475 - val_acc: 0.7570\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5607 - acc: 0.7086Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.5586 - acc: 0.7079 - val_loss: 0.5454 - val_acc: 0.7551\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5671 - acc: 0.7159Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5653 - acc: 0.7152 - val_loss: 0.5380 - val_acc: 0.7701\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5828 - acc: 0.7053Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.5845 - acc: 0.7016 - val_loss: 0.6078 - val_acc: 0.7439\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5776 - acc: 0.7151Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5774 - acc: 0.7145 - val_loss: 0.5278 - val_acc: 0.7439\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5536 - acc: 0.7272Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.5540 - acc: 0.7271 - val_loss: 0.5305 - val_acc: 0.7495\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5582 - acc: 0.7222Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.5591 - acc: 0.7212 - val_loss: 0.5363 - val_acc: 0.7533\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5674 - acc: 0.7192Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5721 - acc: 0.7133 - val_loss: 0.5673 - val_acc: 0.7776\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5431 - acc: 0.7359Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5443 - acc: 0.7358 - val_loss: 0.5814 - val_acc: 0.6972\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5542 - acc: 0.7303Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5529 - acc: 0.7305 - val_loss: 0.5048 - val_acc: 0.7514\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5552 - acc: 0.7251Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5537 - acc: 0.7259 - val_loss: 0.5083 - val_acc: 0.7607\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5462 - acc: 0.7393Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.5464 - acc: 0.7395 - val_loss: 0.6149 - val_acc: 0.6561\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5468 - acc: 0.7299Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.5509 - acc: 0.7269 - val_loss: 0.5173 - val_acc: 0.7645\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5464 - acc: 0.7455Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5465 - acc: 0.7457 - val_loss: 0.5120 - val_acc: 0.7458\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5388 - acc: 0.7375Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5451 - acc: 0.7305 - val_loss: 0.6539 - val_acc: 0.6224\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5434 - acc: 0.7376Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5430 - acc: 0.7390 - val_loss: 0.4936 - val_acc: 0.7869\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5686 - acc: 0.7201Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.5661 - acc: 0.7228 - val_loss: 0.5267 - val_acc: 0.7664\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5279 - acc: 0.7526Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.5325 - acc: 0.7493 - val_loss: 0.5104 - val_acc: 0.7907\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5296 - acc: 0.7459Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.5305 - acc: 0.7430 - val_loss: 0.5015 - val_acc: 0.8019\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5347 - acc: 0.7511Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5369 - acc: 0.7470 - val_loss: 0.5652 - val_acc: 0.7252\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5318 - acc: 0.7479Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5395 - acc: 0.7402 - val_loss: 0.5270 - val_acc: 0.7701\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5186 - acc: 0.7604Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5182 - acc: 0.7606 - val_loss: 0.5142 - val_acc: 0.7813\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5341 - acc: 0.7360Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5295 - acc: 0.7410 - val_loss: 0.5107 - val_acc: 0.7925\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5224 - acc: 0.7595Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.5204 - acc: 0.7594 - val_loss: 0.4913 - val_acc: 0.7682\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5255 - acc: 0.7493Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.5260 - acc: 0.7469 - val_loss: 0.5030 - val_acc: 0.7925\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5203 - acc: 0.7559Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5227 - acc: 0.7545 - val_loss: 0.4990 - val_acc: 0.7776\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5102 - acc: 0.7593Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.5093 - acc: 0.7600 - val_loss: 0.5239 - val_acc: 0.7869\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5143 - acc: 0.7572Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5109 - acc: 0.7590 - val_loss: 0.5078 - val_acc: 0.7981\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5184 - acc: 0.7501Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.5206 - acc: 0.7450 - val_loss: 0.5088 - val_acc: 0.7813\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5188 - acc: 0.7472Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.5205 - acc: 0.7434 - val_loss: 0.4840 - val_acc: 0.7944\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5009 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5067 - acc: 0.7663 - val_loss: 0.4963 - val_acc: 0.7907\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5010 - acc: 0.7660Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.5018 - acc: 0.7651 - val_loss: 0.5207 - val_acc: 0.7664\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5171 - acc: 0.7523Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.5130 - acc: 0.7534 - val_loss: 0.4814 - val_acc: 0.7981\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5180 - acc: 0.7541Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5152 - acc: 0.7551 - val_loss: 0.4935 - val_acc: 0.7645\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5142 - acc: 0.7502Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.5144 - acc: 0.7518 - val_loss: 0.5311 - val_acc: 0.7607\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5056 - acc: 0.7564Epoch 1/100\n20/20 [==============================] - 5s 272ms/step - loss: 0.5091 - acc: 0.7545 - val_loss: 0.4827 - val_acc: 0.7981\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5092 - acc: 0.7630Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5126 - acc: 0.7616 - val_loss: 0.4668 - val_acc: 0.7925\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5032 - acc: 0.7610Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5001 - acc: 0.7648 - val_loss: 0.5101 - val_acc: 0.7701\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5012 - acc: 0.7722Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5044 - acc: 0.7683 - val_loss: 0.4656 - val_acc: 0.8075\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5092 - acc: 0.7597Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.5096 - acc: 0.7576 - val_loss: 0.4926 - val_acc: 0.7963\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5025 - acc: 0.7664Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.5033 - acc: 0.7640 - val_loss: 0.4676 - val_acc: 0.8131\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5011 - acc: 0.7618Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4980 - acc: 0.7648 - val_loss: 0.5017 - val_acc: 0.7720\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5119 - acc: 0.7557Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5184 - acc: 0.7526 - val_loss: 0.4994 - val_acc: 0.7701\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4938 - acc: 0.7697Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4944 - acc: 0.7691 - val_loss: 0.4972 - val_acc: 0.7888\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4737 - acc: 0.7811Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4792 - acc: 0.7771 - val_loss: 0.4662 - val_acc: 0.8037\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5005 - acc: 0.7669Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.4984 - acc: 0.7679 - val_loss: 0.5197 - val_acc: 0.7888\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4979 - acc: 0.7660Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.4965 - acc: 0.7659 - val_loss: 0.5005 - val_acc: 0.7813\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4977 - acc: 0.7643Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4959 - acc: 0.7667 - val_loss: 0.4870 - val_acc: 0.8112\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4878 - acc: 0.7626Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4897 - acc: 0.7632 - val_loss: 0.4943 - val_acc: 0.8075\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4905 - acc: 0.7765Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4898 - acc: 0.7759 - val_loss: 0.4964 - val_acc: 0.8019\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4911 - acc: 0.7726Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4896 - acc: 0.7727 - val_loss: 0.4986 - val_acc: 0.7907\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4921 - acc: 0.7572Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.4913 - acc: 0.7584 - val_loss: 0.5114 - val_acc: 0.7907\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4954 - acc: 0.7647Epoch 1/100\n20/20 [==============================] - 5s 270ms/step - loss: 0.4965 - acc: 0.7624 - val_loss: 0.5098 - val_acc: 0.7925\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4726 - acc: 0.7797Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4729 - acc: 0.7798 - val_loss: 0.4448 - val_acc: 0.8037\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4930 - acc: 0.7693Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4907 - acc: 0.7699 - val_loss: 0.4472 - val_acc: 0.8056\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4902 - acc: 0.7730Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4857 - acc: 0.7754 - val_loss: 0.4564 - val_acc: 0.7813\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4642 - acc: 0.7771Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.4683 - acc: 0.7762 - val_loss: 0.5272 - val_acc: 0.7645\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4989 - acc: 0.7685Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.4994 - acc: 0.7675 - val_loss: 0.4626 - val_acc: 0.7981\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4787 - acc: 0.7747Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4781 - acc: 0.7762 - val_loss: 0.4521 - val_acc: 0.8206\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4792 - acc: 0.7777Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4849 - acc: 0.7731 - val_loss: 0.5038 - val_acc: 0.8019\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4784 - acc: 0.7776Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4774 - acc: 0.7790 - val_loss: 0.4610 - val_acc: 0.8131\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4691 - acc: 0.7772Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.4667 - acc: 0.7794 - val_loss: 0.4640 - val_acc: 0.7981\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4737 - acc: 0.7826Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.4746 - acc: 0.7822 - val_loss: 0.4605 - val_acc: 0.8000\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4860 - acc: 0.7726Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.4900 - acc: 0.7699 - val_loss: 0.4564 - val_acc: 0.8112\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4627 - acc: 0.7904Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4621 - acc: 0.7908 - val_loss: 0.4752 - val_acc: 0.8168\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4570 - acc: 0.7924Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4575 - acc: 0.7914 - val_loss: 0.4939 - val_acc: 0.8187\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4743 - acc: 0.7764Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4726 - acc: 0.7766 - val_loss: 0.4556 - val_acc: 0.8243\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4622 - acc: 0.7832Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4628 - acc: 0.7831 - val_loss: 0.4802 - val_acc: 0.7944\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4751 - acc: 0.7768Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.4711 - acc: 0.7794 - val_loss: 0.4776 - val_acc: 0.8112\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4660 - acc: 0.7785Epoch 1/100\n20/20 [==============================] - 6s 278ms/step - loss: 0.4639 - acc: 0.7822 - val_loss: 0.4479 - val_acc: 0.8075\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4702 - acc: 0.7785Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4693 - acc: 0.7810 - val_loss: 0.4497 - val_acc: 0.8093\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4815 - acc: 0.7722Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4800 - acc: 0.7731 - val_loss: 0.4877 - val_acc: 0.7776\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4576 - acc: 0.7877Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4586 - acc: 0.7881 - val_loss: 0.4668 - val_acc: 0.8112\nOptimizer: RMSprop\nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6964 - acc: 0.5357Epoch 1/100\n20/20 [==============================] - 6s 299ms/step - loss: 0.6961 - acc: 0.5335 - val_loss: 0.6886 - val_acc: 0.5645\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6867 - acc: 0.5491Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6868 - acc: 0.5470 - val_loss: 0.6918 - val_acc: 0.6673\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6860 - acc: 0.5424Epoch 1/100\n20/20 [==============================] - 5s 251ms/step - loss: 0.6860 - acc: 0.5504 - val_loss: 0.6726 - val_acc: 0.5645\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6772 - acc: 0.5694Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6779 - acc: 0.5707 - val_loss: 0.6861 - val_acc: 0.7009\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6650 - acc: 0.5970Epoch 1/100\n20/20 [==============================] - 5s 225ms/step - loss: 0.6652 - acc: 0.5980 - val_loss: 0.6594 - val_acc: 0.5720\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6694 - acc: 0.6045Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.6685 - acc: 0.6024 - val_loss: 0.6471 - val_acc: 0.6112\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6429 - acc: 0.6245Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.6414 - acc: 0.6222 - val_loss: 0.6372 - val_acc: 0.7084\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6406 - acc: 0.6537Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.6413 - acc: 0.6547 - val_loss: 0.6197 - val_acc: 0.6449\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6187 - acc: 0.6588Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.6165 - acc: 0.6578 - val_loss: 0.5873 - val_acc: 0.7364\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6222 - acc: 0.6809Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6198 - acc: 0.6776 - val_loss: 0.5884 - val_acc: 0.7720\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5856 - acc: 0.6963Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.5880 - acc: 0.6994 - val_loss: 0.5681 - val_acc: 0.7645\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5775 - acc: 0.7213Epoch 1/100\n20/20 [==============================] - 6s 280ms/step - loss: 0.5755 - acc: 0.7244 - val_loss: 0.5293 - val_acc: 0.6822\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5694 - acc: 0.7350Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5657 - acc: 0.7390 - val_loss: 0.5069 - val_acc: 0.7159\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5444 - acc: 0.7348Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.5454 - acc: 0.7359 - val_loss: 0.4972 - val_acc: 0.7738\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5253 - acc: 0.7553Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.5270 - acc: 0.7554 - val_loss: 0.4817 - val_acc: 0.7514\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5116 - acc: 0.7580Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.5120 - acc: 0.7592 - val_loss: 0.5190 - val_acc: 0.6710\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5262 - acc: 0.7447Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.5244 - acc: 0.7461 - val_loss: 0.4881 - val_acc: 0.7215\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5023 - acc: 0.7551Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.5019 - acc: 0.7564 - val_loss: 0.4796 - val_acc: 0.8131\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4885 - acc: 0.7764Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.4905 - acc: 0.7735 - val_loss: 0.6019 - val_acc: 0.7009\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4976 - acc: 0.7572Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4955 - acc: 0.7592 - val_loss: 0.4676 - val_acc: 0.7533\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4946 - acc: 0.7663Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4953 - acc: 0.7663 - val_loss: 0.4448 - val_acc: 0.7757\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4774 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.4802 - acc: 0.7683 - val_loss: 0.4568 - val_acc: 0.7832\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4710 - acc: 0.7781Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.4679 - acc: 0.7794 - val_loss: 0.4452 - val_acc: 0.8093\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4772 - acc: 0.7718Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4845 - acc: 0.7667 - val_loss: 0.4833 - val_acc: 0.7888\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4629 - acc: 0.7835Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4615 - acc: 0.7861 - val_loss: 0.4547 - val_acc: 0.8224\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4726 - acc: 0.7904Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4706 - acc: 0.7916 - val_loss: 0.4859 - val_acc: 0.7963\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4714 - acc: 0.7810Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4698 - acc: 0.7818 - val_loss: 0.4481 - val_acc: 0.8019\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4536 - acc: 0.7897Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.4524 - acc: 0.7897 - val_loss: 0.4428 - val_acc: 0.8280\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4622 - acc: 0.7872Epoch 1/100\n20/20 [==============================] - 5s 275ms/step - loss: 0.4663 - acc: 0.7834 - val_loss: 0.4326 - val_acc: 0.8243\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4693 - acc: 0.7866Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4643 - acc: 0.7888 - val_loss: 0.4400 - val_acc: 0.8000\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4533 - acc: 0.7889Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4591 - acc: 0.7842 - val_loss: 0.4683 - val_acc: 0.8150\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4524 - acc: 0.7897Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4475 - acc: 0.7945 - val_loss: 0.4387 - val_acc: 0.8131\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4597 - acc: 0.7889Epoch 1/100\n20/20 [==============================] - 5s 225ms/step - loss: 0.4598 - acc: 0.7885 - val_loss: 0.4491 - val_acc: 0.8243\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4462 - acc: 0.7985Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.4453 - acc: 0.7988 - val_loss: 0.4394 - val_acc: 0.8280\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4490 - acc: 0.7997Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4521 - acc: 0.7968 - val_loss: 0.5356 - val_acc: 0.7589\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4715 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4709 - acc: 0.7723 - val_loss: 0.4270 - val_acc: 0.8206\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4429 - acc: 0.7947Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4417 - acc: 0.7960 - val_loss: 0.4789 - val_acc: 0.7981\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4329 - acc: 0.8022Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4339 - acc: 0.7996 - val_loss: 0.5185 - val_acc: 0.7645\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4551 - acc: 0.7964Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.4566 - acc: 0.7937 - val_loss: 0.4280 - val_acc: 0.8243\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4340 - acc: 0.8086Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.4318 - acc: 0.8092 - val_loss: 0.4342 - val_acc: 0.8056\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4577 - acc: 0.7868Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4559 - acc: 0.7885 - val_loss: 0.4260 - val_acc: 0.8374\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4292 - acc: 0.8064Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4269 - acc: 0.8068 - val_loss: 0.4462 - val_acc: 0.8224\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4331 - acc: 0.8056Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4312 - acc: 0.8079 - val_loss: 0.4353 - val_acc: 0.8150\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4471 - acc: 0.7964Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4476 - acc: 0.7964 - val_loss: 0.4106 - val_acc: 0.8411\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4264 - acc: 0.8118Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.4222 - acc: 0.8150 - val_loss: 0.4009 - val_acc: 0.7925\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4409 - acc: 0.7977Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.4376 - acc: 0.8012 - val_loss: 0.4024 - val_acc: 0.8430\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4184 - acc: 0.8112Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4171 - acc: 0.8137 - val_loss: 0.3606 - val_acc: 0.8505\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4500 - acc: 0.7939Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4511 - acc: 0.7941 - val_loss: 0.4016 - val_acc: 0.8374\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4262 - acc: 0.8026Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4280 - acc: 0.8043 - val_loss: 0.3812 - val_acc: 0.8449\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4206 - acc: 0.8095Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.4186 - acc: 0.8112 - val_loss: 0.3507 - val_acc: 0.8449\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4219 - acc: 0.8093Epoch 1/100\n20/20 [==============================] - 5s 250ms/step - loss: 0.4220 - acc: 0.8111 - val_loss: 0.3861 - val_acc: 0.8411\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4141 - acc: 0.8123Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4135 - acc: 0.8111 - val_loss: 0.3704 - val_acc: 0.8224\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4209 - acc: 0.8118Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4167 - acc: 0.8154 - val_loss: 0.3879 - val_acc: 0.8131\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4269 - acc: 0.8137Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4248 - acc: 0.8145 - val_loss: 0.3754 - val_acc: 0.8187\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4084 - acc: 0.8174Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4095 - acc: 0.8172 - val_loss: 0.3721 - val_acc: 0.8299\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4232 - acc: 0.8065Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.4254 - acc: 0.8056 - val_loss: 0.3812 - val_acc: 0.8374\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4201 - acc: 0.8123Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.4191 - acc: 0.8127 - val_loss: 0.3886 - val_acc: 0.8056\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4172 - acc: 0.8127Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4143 - acc: 0.8145 - val_loss: 0.3613 - val_acc: 0.8523\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4095 - acc: 0.8191Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4154 - acc: 0.8180 - val_loss: 0.3712 - val_acc: 0.8579\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4040 - acc: 0.8223Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4038 - acc: 0.8214 - val_loss: 0.4225 - val_acc: 0.8131\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4320 - acc: 0.8081Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.4264 - acc: 0.8100 - val_loss: 0.3603 - val_acc: 0.8393\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3840 - acc: 0.8277Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.3879 - acc: 0.8265 - val_loss: 0.3691 - val_acc: 0.8579\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4205 - acc: 0.8073Epoch 1/100\n20/20 [==============================] - 5s 271ms/step - loss: 0.4171 - acc: 0.8111 - val_loss: 0.3692 - val_acc: 0.8542\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4122 - acc: 0.8227Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4106 - acc: 0.8257 - val_loss: 0.3648 - val_acc: 0.8636\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4034 - acc: 0.8281Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4042 - acc: 0.8261 - val_loss: 0.3502 - val_acc: 0.8542\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4071 - acc: 0.8209Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4056 - acc: 0.8229 - val_loss: 0.4089 - val_acc: 0.8355\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4126 - acc: 0.8224Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.4097 - acc: 0.8250 - val_loss: 0.3514 - val_acc: 0.8523\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3974 - acc: 0.8307Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.3955 - acc: 0.8313 - val_loss: 0.3763 - val_acc: 0.8542\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4068 - acc: 0.8252Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4060 - acc: 0.8253 - val_loss: 0.3479 - val_acc: 0.8505\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4040 - acc: 0.8260Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4066 - acc: 0.8261 - val_loss: 0.3608 - val_acc: 0.8486\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3988 - acc: 0.8307Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4022 - acc: 0.8293 - val_loss: 0.3514 - val_acc: 0.8561\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3935 - acc: 0.8269Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3945 - acc: 0.8254 - val_loss: 0.3519 - val_acc: 0.8579\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4035 - acc: 0.8192Epoch 1/100\n20/20 [==============================] - 4s 212ms/step - loss: 0.4059 - acc: 0.8173 - val_loss: 0.3538 - val_acc: 0.8336\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3985 - acc: 0.8281Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.3965 - acc: 0.8297 - val_loss: 0.3698 - val_acc: 0.8243\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4159 - acc: 0.8169Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4119 - acc: 0.8177 - val_loss: 0.3447 - val_acc: 0.8486\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3938 - acc: 0.8191Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3930 - acc: 0.8199 - val_loss: 0.3369 - val_acc: 0.8579\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4163 - acc: 0.8235Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4169 - acc: 0.8213 - val_loss: 0.3867 - val_acc: 0.8280\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3899 - acc: 0.8326Epoch 1/100\n20/20 [==============================] - 5s 225ms/step - loss: 0.3895 - acc: 0.8317 - val_loss: 0.3882 - val_acc: 0.8486\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4189 - acc: 0.8169Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.4175 - acc: 0.8174 - val_loss: 0.3395 - val_acc: 0.8636\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3915 - acc: 0.8319Epoch 1/100\n20/20 [==============================] - 5s 266ms/step - loss: 0.3898 - acc: 0.8333 - val_loss: 0.3460 - val_acc: 0.8449\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4107 - acc: 0.8239Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4115 - acc: 0.8226 - val_loss: 0.3462 - val_acc: 0.8561\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3895 - acc: 0.8356Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3919 - acc: 0.8341 - val_loss: 0.3983 - val_acc: 0.8131\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3966 - acc: 0.8315Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4025 - acc: 0.8297 - val_loss: 0.3429 - val_acc: 0.8673\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3894 - acc: 0.8377Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.3912 - acc: 0.8360 - val_loss: 0.3783 - val_acc: 0.8299\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3936 - acc: 0.8268Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.3910 - acc: 0.8289 - val_loss: 0.3483 - val_acc: 0.8280\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4042 - acc: 0.8244Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3988 - acc: 0.8289 - val_loss: 0.3284 - val_acc: 0.8617\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3896 - acc: 0.8365Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3885 - acc: 0.8376 - val_loss: 0.3319 - val_acc: 0.8636\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3805 - acc: 0.8365Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3855 - acc: 0.8341 - val_loss: 0.3311 - val_acc: 0.8636\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3901 - acc: 0.8344Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3965 - acc: 0.8333 - val_loss: 0.3245 - val_acc: 0.8692\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3879 - acc: 0.8357Epoch 1/100\n20/20 [==============================] - 4s 213ms/step - loss: 0.3893 - acc: 0.8345 - val_loss: 0.3403 - val_acc: 0.8542\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3994 - acc: 0.8252Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.3972 - acc: 0.8261 - val_loss: 0.3447 - val_acc: 0.8505\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3885 - acc: 0.8370Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3898 - acc: 0.8373 - val_loss: 0.4320 - val_acc: 0.8075\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3819 - acc: 0.8384Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3824 - acc: 0.8395 - val_loss: 0.3939 - val_acc: 0.8299\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4100 - acc: 0.8235Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4081 - acc: 0.8249 - val_loss: 0.3526 - val_acc: 0.8561\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3788 - acc: 0.8369Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3824 - acc: 0.8380 - val_loss: 0.3491 - val_acc: 0.8617\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4075 - acc: 0.8281Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.4102 - acc: 0.8277 - val_loss: 0.3463 - val_acc: 0.8636\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3762 - acc: 0.8394Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.3765 - acc: 0.8396 - val_loss: 0.3231 - val_acc: 0.8617\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3948 - acc: 0.8281Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3888 - acc: 0.8309 - val_loss: 0.3307 - val_acc: 0.8636\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3921 - acc: 0.8340Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3917 - acc: 0.8333 - val_loss: 0.3277 - val_acc: 0.8692\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3808 - acc: 0.8366Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3782 - acc: 0.8365 - val_loss: 0.3380 - val_acc: 0.8579\nOptimizer: Adam\nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6891 - acc: 0.5407Epoch 1/100\n20/20 [==============================] - 6s 284ms/step - loss: 0.6888 - acc: 0.5406 - val_loss: 0.6987 - val_acc: 0.4355\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6860 - acc: 0.5645Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6859 - acc: 0.5620 - val_loss: 0.6715 - val_acc: 0.5682\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6729 - acc: 0.6166Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6725 - acc: 0.6214 - val_loss: 0.6611 - val_acc: 0.6841\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6450 - acc: 0.6237Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.6459 - acc: 0.6209 - val_loss: 0.6679 - val_acc: 0.5738\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6391 - acc: 0.6316Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.6379 - acc: 0.6376 - val_loss: 0.6123 - val_acc: 0.6168\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6147 - acc: 0.6533Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.6127 - acc: 0.6570 - val_loss: 0.5843 - val_acc: 0.7551\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5818 - acc: 0.7176Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.5807 - acc: 0.7157 - val_loss: 0.5700 - val_acc: 0.6505\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5777 - acc: 0.6920Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5764 - acc: 0.6949 - val_loss: 0.5518 - val_acc: 0.7794\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5427 - acc: 0.7564Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.5390 - acc: 0.7600 - val_loss: 0.5218 - val_acc: 0.8056\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5243 - acc: 0.7536Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.5271 - acc: 0.7502 - val_loss: 0.5438 - val_acc: 0.7514\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5205 - acc: 0.7455Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.5194 - acc: 0.7465 - val_loss: 0.4815 - val_acc: 0.7888\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4930 - acc: 0.7776Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.4929 - acc: 0.7778 - val_loss: 0.4591 - val_acc: 0.8093\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4856 - acc: 0.7743Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4814 - acc: 0.7786 - val_loss: 0.4450 - val_acc: 0.8056\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4691 - acc: 0.7856Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4695 - acc: 0.7850 - val_loss: 0.4333 - val_acc: 0.8168\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4543 - acc: 0.7930Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4528 - acc: 0.7904 - val_loss: 0.4241 - val_acc: 0.8075\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4322 - acc: 0.8123Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.4348 - acc: 0.8091 - val_loss: 0.4295 - val_acc: 0.7645\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4546 - acc: 0.7868Epoch 1/100\n20/20 [==============================] - 5s 250ms/step - loss: 0.4531 - acc: 0.7889 - val_loss: 0.4060 - val_acc: 0.7944\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4536 - acc: 0.7881Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4544 - acc: 0.7889 - val_loss: 0.4466 - val_acc: 0.8093\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4365 - acc: 0.7942Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4352 - acc: 0.7940 - val_loss: 0.4444 - val_acc: 0.7439\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4318 - acc: 0.8039Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4322 - acc: 0.8062 - val_loss: 0.3893 - val_acc: 0.8393\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4035 - acc: 0.8213Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4055 - acc: 0.8197 - val_loss: 0.3597 - val_acc: 0.8280\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4199 - acc: 0.8173Epoch 1/100\n20/20 [==============================] - 4s 213ms/step - loss: 0.4218 - acc: 0.8154 - val_loss: 0.3788 - val_acc: 0.8430\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4044 - acc: 0.8235Epoch 1/100\n20/20 [==============================] - 5s 258ms/step - loss: 0.4024 - acc: 0.8238 - val_loss: 0.3679 - val_acc: 0.8430\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4076 - acc: 0.8185Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4057 - acc: 0.8194 - val_loss: 0.4130 - val_acc: 0.8336\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4385 - acc: 0.7972Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4354 - acc: 0.7996 - val_loss: 0.3855 - val_acc: 0.8430\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4002 - acc: 0.8281Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.4021 - acc: 0.8285 - val_loss: 0.3632 - val_acc: 0.8430\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3898 - acc: 0.8302Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3874 - acc: 0.8325 - val_loss: 0.3770 - val_acc: 0.8467\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3814 - acc: 0.8369Epoch 1/100\n20/20 [==============================] - 4s 211ms/step - loss: 0.3809 - acc: 0.8372 - val_loss: 0.3554 - val_acc: 0.8561\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3898 - acc: 0.8290Epoch 1/100\n20/20 [==============================] - 5s 266ms/step - loss: 0.3900 - acc: 0.8293 - val_loss: 0.3483 - val_acc: 0.8542\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3726 - acc: 0.8406Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3748 - acc: 0.8388 - val_loss: 0.3373 - val_acc: 0.8523\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3663 - acc: 0.8423Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3680 - acc: 0.8432 - val_loss: 0.3277 - val_acc: 0.8692\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3518 - acc: 0.8480Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3509 - acc: 0.8482 - val_loss: 0.3272 - val_acc: 0.8654\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3505 - acc: 0.8498Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.3516 - acc: 0.8479 - val_loss: 0.3134 - val_acc: 0.8804\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3578 - acc: 0.8540Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.3604 - acc: 0.8531 - val_loss: 0.3162 - val_acc: 0.8692\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3581 - acc: 0.8544Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3601 - acc: 0.8518 - val_loss: 0.3583 - val_acc: 0.8561\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3388 - acc: 0.8549Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3421 - acc: 0.8531 - val_loss: 0.3143 - val_acc: 0.8766\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3480 - acc: 0.8469Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3481 - acc: 0.8463 - val_loss: 0.3128 - val_acc: 0.8636\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3619 - acc: 0.8577Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3630 - acc: 0.8558 - val_loss: 0.3610 - val_acc: 0.8336\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3526 - acc: 0.8548Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.3525 - acc: 0.8539 - val_loss: 0.3298 - val_acc: 0.8804\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3583 - acc: 0.8402Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.3571 - acc: 0.8416 - val_loss: 0.3198 - val_acc: 0.8673\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3282 - acc: 0.8657Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3312 - acc: 0.8638 - val_loss: 0.3185 - val_acc: 0.8822\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3231 - acc: 0.8627Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3222 - acc: 0.8634 - val_loss: 0.3615 - val_acc: 0.8654\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3544 - acc: 0.8467Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3532 - acc: 0.8474 - val_loss: 0.3438 - val_acc: 0.8692\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3226 - acc: 0.8648Epoch 1/100\n20/20 [==============================] - 5s 225ms/step - loss: 0.3241 - acc: 0.8638 - val_loss: 0.3030 - val_acc: 0.8953\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3331 - acc: 0.8528Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3316 - acc: 0.8550 - val_loss: 0.3213 - val_acc: 0.8879\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3323 - acc: 0.8527Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.3357 - acc: 0.8519 - val_loss: 0.3170 - val_acc: 0.8804\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3299 - acc: 0.8636Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3275 - acc: 0.8653 - val_loss: 0.3431 - val_acc: 0.8785\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3373 - acc: 0.8607Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3404 - acc: 0.8574 - val_loss: 0.3948 - val_acc: 0.8336\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3544 - acc: 0.8400Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3572 - acc: 0.8398 - val_loss: 0.3711 - val_acc: 0.8523\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3407 - acc: 0.8615Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.3381 - acc: 0.8614 - val_loss: 0.3019 - val_acc: 0.8785\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3232 - acc: 0.8627Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.3257 - acc: 0.8638 - val_loss: 0.3235 - val_acc: 0.8841\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3223 - acc: 0.8673Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3243 - acc: 0.8653 - val_loss: 0.3286 - val_acc: 0.8860\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3117 - acc: 0.8761Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3108 - acc: 0.8760 - val_loss: 0.3034 - val_acc: 0.8916\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3063 - acc: 0.8744Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3066 - acc: 0.8733 - val_loss: 0.3020 - val_acc: 0.8822\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3087 - acc: 0.8778Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3081 - acc: 0.8772 - val_loss: 0.2948 - val_acc: 0.8673\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3229 - acc: 0.8654Epoch 1/100\n20/20 [==============================] - 4s 213ms/step - loss: 0.3230 - acc: 0.8635 - val_loss: 0.2926 - val_acc: 0.8766\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3228 - acc: 0.8594Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.3269 - acc: 0.8558 - val_loss: 0.2773 - val_acc: 0.8897\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3016 - acc: 0.8794Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3007 - acc: 0.8792 - val_loss: 0.2951 - val_acc: 0.8860\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3212 - acc: 0.8679Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.3192 - acc: 0.8691 - val_loss: 0.3038 - val_acc: 0.8916\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3079 - acc: 0.8740Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3105 - acc: 0.8741 - val_loss: 0.2769 - val_acc: 0.8879\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3136 - acc: 0.8697Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3105 - acc: 0.8715 - val_loss: 0.3061 - val_acc: 0.8822\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3118 - acc: 0.8730Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3110 - acc: 0.8731 - val_loss: 0.3182 - val_acc: 0.8748\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3100 - acc: 0.8644Epoch 1/100\n20/20 [==============================] - 5s 270ms/step - loss: 0.3108 - acc: 0.8646 - val_loss: 0.3081 - val_acc: 0.8785\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3239 - acc: 0.8565Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3229 - acc: 0.8558 - val_loss: 0.3115 - val_acc: 0.8822\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3510 - acc: 0.8452Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3465 - acc: 0.8475 - val_loss: 0.3396 - val_acc: 0.8879\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3054 - acc: 0.8728Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3050 - acc: 0.8737 - val_loss: 0.2831 - val_acc: 0.8972\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3212 - acc: 0.8594Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.3178 - acc: 0.8606 - val_loss: 0.3093 - val_acc: 0.8785\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3279 - acc: 0.8552Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.3254 - acc: 0.8562 - val_loss: 0.2939 - val_acc: 0.8879\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3294 - acc: 0.8690Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3281 - acc: 0.8689 - val_loss: 0.3362 - val_acc: 0.8636\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3316 - acc: 0.8607Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3310 - acc: 0.8610 - val_loss: 0.2898 - val_acc: 0.8935\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3068 - acc: 0.8719Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3071 - acc: 0.8721 - val_loss: 0.3076 - val_acc: 0.8804\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3109 - acc: 0.8753Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3194 - acc: 0.8729 - val_loss: 0.2985 - val_acc: 0.8935\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3147 - acc: 0.8632Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3120 - acc: 0.8657 - val_loss: 0.3041 - val_acc: 0.8916\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2988 - acc: 0.8736Epoch 1/100\n20/20 [==============================] - 5s 258ms/step - loss: 0.3004 - acc: 0.8733 - val_loss: 0.3054 - val_acc: 0.8841\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3288 - acc: 0.8648Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3313 - acc: 0.8618 - val_loss: 0.3056 - val_acc: 0.8897\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2922 - acc: 0.8870Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.2949 - acc: 0.8855 - val_loss: 0.3421 - val_acc: 0.8710\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3167 - acc: 0.8607Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3214 - acc: 0.8602 - val_loss: 0.2600 - val_acc: 0.8879\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2856 - acc: 0.8845Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.2861 - acc: 0.8863 - val_loss: 0.2630 - val_acc: 0.8935\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3079 - acc: 0.8704Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.3062 - acc: 0.8735 - val_loss: 0.2679 - val_acc: 0.9028\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2987 - acc: 0.8778Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.2974 - acc: 0.8788 - val_loss: 0.2519 - val_acc: 0.8935\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2942 - acc: 0.8803Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.2936 - acc: 0.8820 - val_loss: 0.2571 - val_acc: 0.8935\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3065 - acc: 0.8721Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3055 - acc: 0.8731 - val_loss: 0.2470 - val_acc: 0.9047\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2781 - acc: 0.8840Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.2813 - acc: 0.8816 - val_loss: 0.2677 - val_acc: 0.8879\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3066 - acc: 0.8765Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.3088 - acc: 0.8752 - val_loss: 0.2497 - val_acc: 0.9009\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3047 - acc: 0.8678Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.3004 - acc: 0.8713 - val_loss: 0.2577 - val_acc: 0.9084\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3095 - acc: 0.8711Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3085 - acc: 0.8709 - val_loss: 0.2582 - val_acc: 0.9028\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3190 - acc: 0.8682Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3190 - acc: 0.8669 - val_loss: 0.2613 - val_acc: 0.8897\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3063 - acc: 0.8715Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.3064 - acc: 0.8713 - val_loss: 0.2640 - val_acc: 0.8972\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2999 - acc: 0.8802Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.2972 - acc: 0.8811 - val_loss: 0.2514 - val_acc: 0.8935\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2887 - acc: 0.8799Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.2949 - acc: 0.8793 - val_loss: 0.2507 - val_acc: 0.8953\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3238 - acc: 0.8616Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.3235 - acc: 0.8610 - val_loss: 0.2637 - val_acc: 0.8879\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3144 - acc: 0.8652Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3129 - acc: 0.8665 - val_loss: 0.2743 - val_acc: 0.8953\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2972 - acc: 0.8811Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.2938 - acc: 0.8820 - val_loss: 0.2646 - val_acc: 0.8972\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3123 - acc: 0.8748Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3119 - acc: 0.8733 - val_loss: 0.2584 - val_acc: 0.8991\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2830 - acc: 0.8802Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.2853 - acc: 0.8791 - val_loss: 0.2510 - val_acc: 0.9009\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3055 - acc: 0.8794Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3071 - acc: 0.8780 - val_loss: 0.2949 - val_acc: 0.8748\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2990 - acc: 0.8761Epoch 1/100\n20/20 [==============================] - 5s 269ms/step - loss: 0.2948 - acc: 0.8780 - val_loss: 0.2682 - val_acc: 0.8935\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2927 - acc: 0.8738Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.2936 - acc: 0.8731 - val_loss: 0.2512 - val_acc: 0.9084\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2772 - acc: 0.8902Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.2866 - acc: 0.8863 - val_loss: 0.2525 - val_acc: 0.8953\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3025 - acc: 0.8776Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3022 - acc: 0.8787 - val_loss: 0.2490 - val_acc: 0.9009\nOptimizer: Nadam\nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6915 - acc: 0.5515Epoch 1/100\n20/20 [==============================] - 6s 305ms/step - loss: 0.6917 - acc: 0.5461 - val_loss: 0.6828 - val_acc: 0.5645\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6868 - acc: 0.5665Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6868 - acc: 0.5631 - val_loss: 0.6850 - val_acc: 0.6673\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6783 - acc: 0.5791Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6799 - acc: 0.5731 - val_loss: 0.6953 - val_acc: 0.4617\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6739 - acc: 0.6049Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6724 - acc: 0.6127 - val_loss: 0.6472 - val_acc: 0.6374\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6510 - acc: 0.6081Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.6512 - acc: 0.6079 - val_loss: 0.6215 - val_acc: 0.6355\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6433 - acc: 0.6262Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.6435 - acc: 0.6234 - val_loss: 0.6097 - val_acc: 0.5925\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6218 - acc: 0.6646Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6211 - acc: 0.6717 - val_loss: 0.5903 - val_acc: 0.7514\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5967 - acc: 0.6905Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.5921 - acc: 0.6940 - val_loss: 0.5556 - val_acc: 0.6953\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5719 - acc: 0.7138Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5792 - acc: 0.7070 - val_loss: 0.5464 - val_acc: 0.7009\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5550 - acc: 0.7205Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.5520 - acc: 0.7267 - val_loss: 0.5172 - val_acc: 0.7626\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5411 - acc: 0.7367Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.5381 - acc: 0.7402 - val_loss: 0.5049 - val_acc: 0.7103\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5412 - acc: 0.7317Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.5416 - acc: 0.7335 - val_loss: 0.4837 - val_acc: 0.7664\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4966 - acc: 0.7676Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4945 - acc: 0.7711 - val_loss: 0.4680 - val_acc: 0.8131\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4970 - acc: 0.7605Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4963 - acc: 0.7632 - val_loss: 0.4679 - val_acc: 0.7364\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5080 - acc: 0.7526Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5055 - acc: 0.7580 - val_loss: 0.4470 - val_acc: 0.8075\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4809 - acc: 0.7731Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4820 - acc: 0.7695 - val_loss: 0.4400 - val_acc: 0.7813\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4687 - acc: 0.7785Epoch 1/100\n20/20 [==============================] - 5s 251ms/step - loss: 0.4695 - acc: 0.7778 - val_loss: 0.4395 - val_acc: 0.8187\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4521 - acc: 0.7839Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4500 - acc: 0.7847 - val_loss: 0.4245 - val_acc: 0.8150\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4521 - acc: 0.7911Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4516 - acc: 0.7902 - val_loss: 0.4165 - val_acc: 0.8243\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4450 - acc: 0.7960Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.4477 - acc: 0.7941 - val_loss: 0.4651 - val_acc: 0.7533\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4516 - acc: 0.7815Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4501 - acc: 0.7831 - val_loss: 0.4029 - val_acc: 0.8374\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4221 - acc: 0.8043Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.4207 - acc: 0.8055 - val_loss: 0.3983 - val_acc: 0.8299\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4276 - acc: 0.8014Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.4254 - acc: 0.8040 - val_loss: 0.3749 - val_acc: 0.8355\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4298 - acc: 0.7977Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.4297 - acc: 0.7980 - val_loss: 0.3834 - val_acc: 0.8243\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4198 - acc: 0.8089Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4202 - acc: 0.8067 - val_loss: 0.3978 - val_acc: 0.8037\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4232 - acc: 0.8026Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4207 - acc: 0.8047 - val_loss: 0.3888 - val_acc: 0.8093\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4061 - acc: 0.8137Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.4069 - acc: 0.8141 - val_loss: 0.3778 - val_acc: 0.8224\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4854 - acc: 0.7676Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.4826 - acc: 0.7707 - val_loss: 0.4022 - val_acc: 0.8093\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4229 - acc: 0.7935Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.4232 - acc: 0.7925 - val_loss: 0.4393 - val_acc: 0.8542\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4104 - acc: 0.8123Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4096 - acc: 0.8135 - val_loss: 0.3972 - val_acc: 0.8449\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4056 - acc: 0.8181Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4058 - acc: 0.8170 - val_loss: 0.4042 - val_acc: 0.8262\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3894 - acc: 0.8231Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3901 - acc: 0.8242 - val_loss: 0.3982 - val_acc: 0.8486\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4178 - acc: 0.8057Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.4162 - acc: 0.8052 - val_loss: 0.3928 - val_acc: 0.8393\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3804 - acc: 0.8310Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3830 - acc: 0.8313 - val_loss: 0.3898 - val_acc: 0.8243\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3799 - acc: 0.8331Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3834 - acc: 0.8293 - val_loss: 0.3845 - val_acc: 0.8449\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3803 - acc: 0.8344Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3827 - acc: 0.8356 - val_loss: 0.4058 - val_acc: 0.8598\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3811 - acc: 0.8285Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3807 - acc: 0.8281 - val_loss: 0.3726 - val_acc: 0.8692\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4092 - acc: 0.8102Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4091 - acc: 0.8115 - val_loss: 0.3840 - val_acc: 0.8636\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3672 - acc: 0.8344Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3629 - acc: 0.8384 - val_loss: 0.3850 - val_acc: 0.8486\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3786 - acc: 0.8352Epoch 1/100\n20/20 [==============================] - 5s 264ms/step - loss: 0.3796 - acc: 0.8345 - val_loss: 0.3780 - val_acc: 0.8579\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3677 - acc: 0.8385Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3683 - acc: 0.8394 - val_loss: 0.3889 - val_acc: 0.8598\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3582 - acc: 0.8396Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3718 - acc: 0.8320 - val_loss: 0.5407 - val_acc: 0.7626\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4858 - acc: 0.7638Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4822 - acc: 0.7679 - val_loss: 0.4243 - val_acc: 0.8467\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3684 - acc: 0.8381Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3692 - acc: 0.8380 - val_loss: 0.3924 - val_acc: 0.8542\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3875 - acc: 0.8314Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.3851 - acc: 0.8332 - val_loss: 0.4008 - val_acc: 0.8411\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3679 - acc: 0.8429Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.3680 - acc: 0.8406 - val_loss: 0.3698 - val_acc: 0.8579\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3711 - acc: 0.8373Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3712 - acc: 0.8364 - val_loss: 0.3725 - val_acc: 0.8467\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3798 - acc: 0.8335Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3773 - acc: 0.8349 - val_loss: 0.3711 - val_acc: 0.8654\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3602 - acc: 0.8395Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3587 - acc: 0.8414 - val_loss: 0.3909 - val_acc: 0.8692\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3588 - acc: 0.8406Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3561 - acc: 0.8412 - val_loss: 0.3696 - val_acc: 0.8617\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3765 - acc: 0.8340Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.3745 - acc: 0.8349 - val_loss: 0.3849 - val_acc: 0.8636\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3751 - acc: 0.8387Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3763 - acc: 0.8382 - val_loss: 0.3872 - val_acc: 0.8374\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3690 - acc: 0.8373Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3665 - acc: 0.8388 - val_loss: 0.3764 - val_acc: 0.8654\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3739 - acc: 0.8335Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3723 - acc: 0.8352 - val_loss: 0.3586 - val_acc: 0.8636\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3591 - acc: 0.8391Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3563 - acc: 0.8414 - val_loss: 0.3680 - val_acc: 0.8561\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3819 - acc: 0.8310Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.3794 - acc: 0.8337 - val_loss: 0.3629 - val_acc: 0.8617\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3821 - acc: 0.8340Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.3828 - acc: 0.8313 - val_loss: 0.4168 - val_acc: 0.7869\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3696 - acc: 0.8425Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3664 - acc: 0.8442 - val_loss: 0.3745 - val_acc: 0.8486\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3664 - acc: 0.8343Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3668 - acc: 0.8371 - val_loss: 0.3605 - val_acc: 0.8561\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3504 - acc: 0.8366Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3543 - acc: 0.8361 - val_loss: 0.3685 - val_acc: 0.8710\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3837 - acc: 0.8260Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.3882 - acc: 0.8238 - val_loss: 0.4273 - val_acc: 0.8393\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3909 - acc: 0.8239Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.3898 - acc: 0.8242 - val_loss: 0.3625 - val_acc: 0.8617\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3526 - acc: 0.8540Epoch 1/100\n20/20 [==============================] - 5s 271ms/step - loss: 0.3524 - acc: 0.8523 - val_loss: 0.4140 - val_acc: 0.8486\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3612 - acc: 0.8456Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3588 - acc: 0.8463 - val_loss: 0.3534 - val_acc: 0.8729\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3430 - acc: 0.8450Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3411 - acc: 0.8478 - val_loss: 0.3491 - val_acc: 0.8561\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3623 - acc: 0.8448Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3601 - acc: 0.8471 - val_loss: 0.3671 - val_acc: 0.8505\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3361 - acc: 0.8536Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.3375 - acc: 0.8519 - val_loss: 0.3895 - val_acc: 0.8766\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3767 - acc: 0.8348Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.3753 - acc: 0.8349 - val_loss: 0.3689 - val_acc: 0.8617\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3588 - acc: 0.8456Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3534 - acc: 0.8474 - val_loss: 0.3576 - val_acc: 0.8598\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3510 - acc: 0.8470Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3565 - acc: 0.8457 - val_loss: 0.3474 - val_acc: 0.8804\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3506 - acc: 0.8472Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3530 - acc: 0.8470 - val_loss: 0.3618 - val_acc: 0.8561\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3372 - acc: 0.8581Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3349 - acc: 0.8598 - val_loss: 0.3581 - val_acc: 0.8710\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3380 - acc: 0.8603Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.3395 - acc: 0.8594 - val_loss: 0.3465 - val_acc: 0.8710\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3459 - acc: 0.8473Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.3458 - acc: 0.8475 - val_loss: 0.3388 - val_acc: 0.8729\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3426 - acc: 0.8527Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3486 - acc: 0.8507 - val_loss: 0.3473 - val_acc: 0.8841\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3375 - acc: 0.8518Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.3404 - acc: 0.8506 - val_loss: 0.3743 - val_acc: 0.8299\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3466 - acc: 0.8532Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3432 - acc: 0.8551 - val_loss: 0.3387 - val_acc: 0.8692\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3300 - acc: 0.8641Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3342 - acc: 0.8606 - val_loss: 0.3573 - val_acc: 0.8542\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3486 - acc: 0.8515Epoch 1/100\n20/20 [==============================] - 4s 212ms/step - loss: 0.3466 - acc: 0.8535 - val_loss: 0.3486 - val_acc: 0.8748\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3372 - acc: 0.8540Epoch 1/100\n20/20 [==============================] - 5s 264ms/step - loss: 0.3441 - acc: 0.8527 - val_loss: 0.4175 - val_acc: 0.8523\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3396 - acc: 0.8552Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3337 - acc: 0.8590 - val_loss: 0.3598 - val_acc: 0.8654\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3575 - acc: 0.8536Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3581 - acc: 0.8543 - val_loss: 0.3504 - val_acc: 0.8766\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3191 - acc: 0.8747Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3201 - acc: 0.8719 - val_loss: 0.3225 - val_acc: 0.8729\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3374 - acc: 0.8590Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3318 - acc: 0.8622 - val_loss: 0.3313 - val_acc: 0.8804\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3440 - acc: 0.8536Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.3483 - acc: 0.8511 - val_loss: 0.4659 - val_acc: 0.8318\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3702 - acc: 0.8352Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3660 - acc: 0.8380 - val_loss: 0.3618 - val_acc: 0.8636\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3458 - acc: 0.8561Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.3429 - acc: 0.8586 - val_loss: 0.3391 - val_acc: 0.8748\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3500 - acc: 0.8495Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3468 - acc: 0.8504 - val_loss: 0.3457 - val_acc: 0.8785\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3247 - acc: 0.8641Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3274 - acc: 0.8631 - val_loss: 0.3637 - val_acc: 0.8598\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3381 - acc: 0.8561Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.3382 - acc: 0.8550 - val_loss: 0.4269 - val_acc: 0.8486\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3413 - acc: 0.8452Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.3419 - acc: 0.8466 - val_loss: 0.3437 - val_acc: 0.8710\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3843 - acc: 0.8285Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3821 - acc: 0.8297 - val_loss: 0.3869 - val_acc: 0.8355\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3388 - acc: 0.8598Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3374 - acc: 0.8605 - val_loss: 0.3406 - val_acc: 0.8785\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3623 - acc: 0.8402Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.3628 - acc: 0.8396 - val_loss: 0.3564 - val_acc: 0.8841\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3382 - acc: 0.8548Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3366 - acc: 0.8558 - val_loss: 0.3301 - val_acc: 0.8860\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3319 - acc: 0.8623Epoch 1/100\n20/20 [==============================] - 4s 210ms/step - loss: 0.3304 - acc: 0.8634 - val_loss: 0.3443 - val_acc: 0.8692\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3154 - acc: 0.8740Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.3156 - acc: 0.8721 - val_loss: 0.3044 - val_acc: 0.8729\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3996 - acc: 0.8290Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3955 - acc: 0.8317 - val_loss: 0.3661 - val_acc: 0.8636\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3490 - acc: 0.8477Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3491 - acc: 0.8479 - val_loss: 0.3314 - val_acc: 0.8636\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3199 - acc: 0.8753Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3222 - acc: 0.8749 - val_loss: 0.3007 - val_acc: 0.8879\n"
],
[
"# Validation accuracy\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation accuracy comparison')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nfor opt in opts:\n val_acc = histories[opt].history['val_acc']\n epochs = range(1, len(val_acc)+1)\n plt.plot(epochs, val_acc, label=opt.__name__)\nplt.legend(loc='lower right')\n\n# Validation loss\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation loss comparison')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nfor opt in opts:\n val_loss = histories[opt].history['val_loss']\n epochs = range(1, len(val_loss)+1)\n plt.plot(epochs, val_loss, label=opt.__name__)\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"The graphs shows that SGD is relatively weak with respect to the other optimizers. Adam converges faster than RMSprop and Nadam, whose curves are quite similar.",
"_____no_output_____"
],
[
"# Learning rate verification",
"_____no_output_____"
],
[
"Now that approximate values for the learning rate have been discovered, one may try to directly experiment with different nearby values and find which one works best.",
"_____no_output_____"
],
[
"## RMSprop",
"_____no_output_____"
]
],
[
[
"# Try RMSprop with different learning rates\n\nlr_to_test = (1e-5, 1e-4, 1e-3)\nopt = RMSprop\n\nhistories = {}\n\nfor lr in lr_to_test:\n print(\"RMS [lr = %.5f]: \" % lr)\n cnn = create_cnn()\n cnn.compile(\n optimizer=opt(learning_rate=lr),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n histories[lr] = cnn.fit_generator(\n train_generator,\n steps_per_epoch=n_train_img // 128,\n epochs=100,\n validation_data=validation_generator,\n shuffle=True,\n verbose=1,\n initial_epoch=0)",
"RMS [lr = 0.00001]: \nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6915 - acc: 0.5254Epoch 1/100\n20/20 [==============================] - 11s 528ms/step - loss: 0.6913 - acc: 0.5277 - val_loss: 0.6843 - val_acc: 0.5701\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6916 - acc: 0.5373Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6915 - acc: 0.5386 - val_loss: 0.6859 - val_acc: 0.5701\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6910 - acc: 0.5345Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.6910 - acc: 0.5332 - val_loss: 0.6886 - val_acc: 0.5589\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6890 - acc: 0.5457Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6892 - acc: 0.5434 - val_loss: 0.6833 - val_acc: 0.5701\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6886 - acc: 0.5399Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.6887 - acc: 0.5379 - val_loss: 0.6832 - val_acc: 0.5701\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6876 - acc: 0.5440Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.6878 - acc: 0.5394 - val_loss: 0.6863 - val_acc: 0.7215\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6853 - acc: 0.5653Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6858 - acc: 0.5624 - val_loss: 0.6781 - val_acc: 0.5701\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6855 - acc: 0.5574Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6851 - acc: 0.5640 - val_loss: 0.6776 - val_acc: 0.5682\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6831 - acc: 0.5495Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.6837 - acc: 0.5458 - val_loss: 0.6769 - val_acc: 0.5720\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6810 - acc: 0.5590Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6809 - acc: 0.5584 - val_loss: 0.6770 - val_acc: 0.5738\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6800 - acc: 0.6058Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.6801 - acc: 0.6075 - val_loss: 0.6771 - val_acc: 0.6037\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6780 - acc: 0.5736Epoch 1/100\n20/20 [==============================] - 5s 274ms/step - loss: 0.6779 - acc: 0.5786 - val_loss: 0.6742 - val_acc: 0.5925\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6748 - acc: 0.5838Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6746 - acc: 0.5827 - val_loss: 0.6695 - val_acc: 0.5720\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6735 - acc: 0.6299Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6733 - acc: 0.6309 - val_loss: 0.6682 - val_acc: 0.5925\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6680 - acc: 0.5868Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6680 - acc: 0.5855 - val_loss: 0.6654 - val_acc: 0.5738\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6687 - acc: 0.6708Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6679 - acc: 0.6681 - val_loss: 0.6642 - val_acc: 0.5701\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6658 - acc: 0.6220Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.6660 - acc: 0.6154 - val_loss: 0.6662 - val_acc: 0.7738\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6628 - acc: 0.6391Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6627 - acc: 0.6364 - val_loss: 0.6586 - val_acc: 0.6505\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6596 - acc: 0.6558Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6597 - acc: 0.6542 - val_loss: 0.6608 - val_acc: 0.7383\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6539 - acc: 0.6717Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.6541 - acc: 0.6689 - val_loss: 0.6537 - val_acc: 0.6093\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6550 - acc: 0.6537Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6551 - acc: 0.6507 - val_loss: 0.6572 - val_acc: 0.7458\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6470 - acc: 0.6669Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.6478 - acc: 0.6637 - val_loss: 0.6516 - val_acc: 0.7364\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6447 - acc: 0.6837Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.6445 - acc: 0.6839 - val_loss: 0.6345 - val_acc: 0.6579\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6442 - acc: 0.6708Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6434 - acc: 0.6729 - val_loss: 0.6343 - val_acc: 0.6953\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6395 - acc: 0.6871Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6389 - acc: 0.6839 - val_loss: 0.6312 - val_acc: 0.7140\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6343 - acc: 0.6725Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.6346 - acc: 0.6729 - val_loss: 0.6297 - val_acc: 0.7271\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6328 - acc: 0.6767Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6324 - acc: 0.6812 - val_loss: 0.6289 - val_acc: 0.7308\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6290 - acc: 0.6733Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.6294 - acc: 0.6760 - val_loss: 0.6273 - val_acc: 0.7383\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6252 - acc: 0.6838Epoch 1/100\n20/20 [==============================] - 5s 275ms/step - loss: 0.6245 - acc: 0.6875 - val_loss: 0.6256 - val_acc: 0.7364\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6256 - acc: 0.6909Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.6261 - acc: 0.6879 - val_loss: 0.6140 - val_acc: 0.7009\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6169 - acc: 0.6880Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6186 - acc: 0.6819 - val_loss: 0.6163 - val_acc: 0.7458\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6174 - acc: 0.7005Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.6175 - acc: 0.6990 - val_loss: 0.6290 - val_acc: 0.7383\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6122 - acc: 0.7059Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.6129 - acc: 0.7057 - val_loss: 0.6176 - val_acc: 0.7402\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6107 - acc: 0.7067Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.6112 - acc: 0.7034 - val_loss: 0.6057 - val_acc: 0.7421\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6089 - acc: 0.6963Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6083 - acc: 0.6994 - val_loss: 0.5919 - val_acc: 0.7047\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6093 - acc: 0.6896Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6079 - acc: 0.6907 - val_loss: 0.6023 - val_acc: 0.7477\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6029 - acc: 0.7142Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6017 - acc: 0.7161 - val_loss: 0.5926 - val_acc: 0.7084\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6003 - acc: 0.7150Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.6006 - acc: 0.7137 - val_loss: 0.5869 - val_acc: 0.6953\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5990 - acc: 0.7062Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.5986 - acc: 0.7100 - val_loss: 0.5881 - val_acc: 0.7458\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5941 - acc: 0.7076Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.5947 - acc: 0.7077 - val_loss: 0.5918 - val_acc: 0.7682\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5948 - acc: 0.7201Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5930 - acc: 0.7224 - val_loss: 0.5830 - val_acc: 0.7477\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5918 - acc: 0.7155Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.5897 - acc: 0.7189 - val_loss: 0.5773 - val_acc: 0.7159\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5887 - acc: 0.7192Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.5876 - acc: 0.7192 - val_loss: 0.5966 - val_acc: 0.7421\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5818 - acc: 0.7253Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5815 - acc: 0.7242 - val_loss: 0.5946 - val_acc: 0.7477\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5754 - acc: 0.7312Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.5764 - acc: 0.7305 - val_loss: 0.5912 - val_acc: 0.7477\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5791 - acc: 0.7263Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.5785 - acc: 0.7275 - val_loss: 0.5775 - val_acc: 0.7682\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5751 - acc: 0.7299Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5752 - acc: 0.7301 - val_loss: 0.5993 - val_acc: 0.7084\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5728 - acc: 0.7305Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5727 - acc: 0.7315 - val_loss: 0.5563 - val_acc: 0.7607\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5746 - acc: 0.7234Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.5737 - acc: 0.7259 - val_loss: 0.5614 - val_acc: 0.7682\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5656 - acc: 0.7401Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.5646 - acc: 0.7418 - val_loss: 0.5520 - val_acc: 0.7570\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5658 - acc: 0.7409Epoch 1/100\n20/20 [==============================] - 5s 254ms/step - loss: 0.5648 - acc: 0.7418 - val_loss: 0.5555 - val_acc: 0.7626\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5676 - acc: 0.7330Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5680 - acc: 0.7343 - val_loss: 0.5438 - val_acc: 0.7458\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5600 - acc: 0.7451Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.5613 - acc: 0.7430 - val_loss: 0.5505 - val_acc: 0.7645\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5513 - acc: 0.7500Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5490 - acc: 0.7531 - val_loss: 0.5467 - val_acc: 0.7514\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5596 - acc: 0.7459Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5598 - acc: 0.7442 - val_loss: 0.5375 - val_acc: 0.7682\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5584 - acc: 0.7367Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.5604 - acc: 0.7333 - val_loss: 0.5395 - val_acc: 0.7869\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5487 - acc: 0.7501Epoch 1/100\n20/20 [==============================] - 5s 263ms/step - loss: 0.5498 - acc: 0.7493 - val_loss: 0.5687 - val_acc: 0.7589\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5538 - acc: 0.7368Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5537 - acc: 0.7390 - val_loss: 0.5596 - val_acc: 0.8019\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5417 - acc: 0.7564Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5439 - acc: 0.7525 - val_loss: 0.5694 - val_acc: 0.7720\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5466 - acc: 0.7578Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5429 - acc: 0.7631 - val_loss: 0.5341 - val_acc: 0.7776\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5512 - acc: 0.7414Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5523 - acc: 0.7398 - val_loss: 0.5255 - val_acc: 0.7813\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5468 - acc: 0.7477Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.5460 - acc: 0.7494 - val_loss: 0.5310 - val_acc: 0.7439\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5398 - acc: 0.7564Epoch 1/100\n20/20 [==============================] - 5s 269ms/step - loss: 0.5378 - acc: 0.7576 - val_loss: 0.5322 - val_acc: 0.7701\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5474 - acc: 0.7476Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5472 - acc: 0.7481 - val_loss: 0.5341 - val_acc: 0.7963\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5345 - acc: 0.7597Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5354 - acc: 0.7592 - val_loss: 0.5301 - val_acc: 0.7121\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5313 - acc: 0.7689Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.5298 - acc: 0.7691 - val_loss: 0.5385 - val_acc: 0.6822\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5444 - acc: 0.7476Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.5434 - acc: 0.7498 - val_loss: 0.5215 - val_acc: 0.8000\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5248 - acc: 0.7626Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.5232 - acc: 0.7644 - val_loss: 0.5439 - val_acc: 0.7607\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5360 - acc: 0.7580Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.5360 - acc: 0.7568 - val_loss: 0.5237 - val_acc: 0.7682\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5258 - acc: 0.7589Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5262 - acc: 0.7576 - val_loss: 0.5100 - val_acc: 0.7607\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5269 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5288 - acc: 0.7659 - val_loss: 0.5203 - val_acc: 0.7776\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5229 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5240 - acc: 0.7691 - val_loss: 0.5356 - val_acc: 0.6748\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5256 - acc: 0.7497Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.5228 - acc: 0.7493 - val_loss: 0.5301 - val_acc: 0.7869\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5161 - acc: 0.7714Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.5186 - acc: 0.7675 - val_loss: 0.4978 - val_acc: 0.7832\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5158 - acc: 0.7760Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5158 - acc: 0.7770 - val_loss: 0.5091 - val_acc: 0.7907\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5232 - acc: 0.7643Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5220 - acc: 0.7667 - val_loss: 0.4988 - val_acc: 0.7850\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5086 - acc: 0.7705Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5089 - acc: 0.7691 - val_loss: 0.5039 - val_acc: 0.7813\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5156 - acc: 0.7673Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.5118 - acc: 0.7711 - val_loss: 0.5022 - val_acc: 0.7963\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5129 - acc: 0.7697Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.5134 - acc: 0.7675 - val_loss: 0.5164 - val_acc: 0.7888\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5144 - acc: 0.7639Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.5154 - acc: 0.7640 - val_loss: 0.4975 - val_acc: 0.7925\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5086 - acc: 0.7839Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5082 - acc: 0.7814 - val_loss: 0.5279 - val_acc: 0.7645\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5046 - acc: 0.7789Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5079 - acc: 0.7754 - val_loss: 0.5069 - val_acc: 0.7813\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5030 - acc: 0.7672Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5000 - acc: 0.7687 - val_loss: 0.4978 - val_acc: 0.7981\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5116 - acc: 0.7627Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.5113 - acc: 0.7648 - val_loss: 0.4894 - val_acc: 0.7907\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5069 - acc: 0.7806Epoch 1/100\n20/20 [==============================] - 5s 250ms/step - loss: 0.5057 - acc: 0.7818 - val_loss: 0.4862 - val_acc: 0.8131\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5027 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5030 - acc: 0.7723 - val_loss: 0.4832 - val_acc: 0.7981\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4918 - acc: 0.7877Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4905 - acc: 0.7877 - val_loss: 0.4914 - val_acc: 0.7888\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5061 - acc: 0.7663Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5073 - acc: 0.7647 - val_loss: 0.4877 - val_acc: 0.7963\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5040 - acc: 0.7833Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4999 - acc: 0.7852 - val_loss: 0.4794 - val_acc: 0.7963\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5012 - acc: 0.7807Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.4996 - acc: 0.7803 - val_loss: 0.4869 - val_acc: 0.7963\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5028 - acc: 0.7751Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.5019 - acc: 0.7754 - val_loss: 0.4840 - val_acc: 0.8037\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4960 - acc: 0.7815Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4964 - acc: 0.7827 - val_loss: 0.4882 - val_acc: 0.7850\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4925 - acc: 0.7882Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4955 - acc: 0.7865 - val_loss: 0.4744 - val_acc: 0.8206\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4881 - acc: 0.7797Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.4898 - acc: 0.7782 - val_loss: 0.4719 - val_acc: 0.7925\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4891 - acc: 0.7789Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4894 - acc: 0.7786 - val_loss: 0.4983 - val_acc: 0.7888\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4896 - acc: 0.7801Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.4889 - acc: 0.7814 - val_loss: 0.4793 - val_acc: 0.8000\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4869 - acc: 0.7885Epoch 1/100\n20/20 [==============================] - 5s 275ms/step - loss: 0.4855 - acc: 0.7893 - val_loss: 0.4922 - val_acc: 0.7738\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4952 - acc: 0.7789Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4947 - acc: 0.7806 - val_loss: 0.4644 - val_acc: 0.8112\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4755 - acc: 0.7904Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4793 - acc: 0.7876 - val_loss: 0.4879 - val_acc: 0.7757\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4854 - acc: 0.7808Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4884 - acc: 0.7773 - val_loss: 0.4642 - val_acc: 0.7925\nRMS [lr = 0.00010]: \nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6938 - acc: 0.5269Epoch 1/100\n20/20 [==============================] - 6s 302ms/step - loss: 0.6933 - acc: 0.5291 - val_loss: 0.6742 - val_acc: 0.5701\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6861 - acc: 0.5565Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6859 - acc: 0.5636 - val_loss: 0.6684 - val_acc: 0.5701\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6792 - acc: 0.5686Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.6796 - acc: 0.5659 - val_loss: 0.6612 - val_acc: 0.5701\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6697 - acc: 0.5907Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6692 - acc: 0.5885 - val_loss: 0.6617 - val_acc: 0.7308\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6564 - acc: 0.6216Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.6564 - acc: 0.6182 - val_loss: 0.6740 - val_acc: 0.5701\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6444 - acc: 0.6245Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.6444 - acc: 0.6289 - val_loss: 0.6279 - val_acc: 0.6822\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6237 - acc: 0.6700Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6232 - acc: 0.6685 - val_loss: 0.6359 - val_acc: 0.6542\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6047 - acc: 0.6719Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6042 - acc: 0.6687 - val_loss: 0.6068 - val_acc: 0.7570\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5909 - acc: 0.6909Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5910 - acc: 0.6863 - val_loss: 0.6479 - val_acc: 0.6000\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5691 - acc: 0.7309Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5699 - acc: 0.7291 - val_loss: 0.5299 - val_acc: 0.7850\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5584 - acc: 0.7276Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.5590 - acc: 0.7263 - val_loss: 0.5140 - val_acc: 0.7290\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5310 - acc: 0.7476Epoch 1/100\n20/20 [==============================] - 5s 270ms/step - loss: 0.5309 - acc: 0.7497 - val_loss: 0.5039 - val_acc: 0.7907\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5094 - acc: 0.7635Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5116 - acc: 0.7600 - val_loss: 0.6193 - val_acc: 0.6542\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5134 - acc: 0.7710Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5140 - acc: 0.7699 - val_loss: 0.4670 - val_acc: 0.7701\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4934 - acc: 0.7790Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4931 - acc: 0.7811 - val_loss: 0.4501 - val_acc: 0.7794\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4864 - acc: 0.7726Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.4845 - acc: 0.7739 - val_loss: 0.4693 - val_acc: 0.7421\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4993 - acc: 0.7643Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.4957 - acc: 0.7659 - val_loss: 0.4338 - val_acc: 0.7776\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4658 - acc: 0.7897Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4677 - acc: 0.7877 - val_loss: 0.5207 - val_acc: 0.7832\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4697 - acc: 0.7885Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4663 - acc: 0.7905 - val_loss: 0.4189 - val_acc: 0.8056\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4725 - acc: 0.7889Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4654 - acc: 0.7949 - val_loss: 0.4310 - val_acc: 0.7907\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4677 - acc: 0.7860Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.4634 - acc: 0.7884 - val_loss: 0.4294 - val_acc: 0.7757\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4656 - acc: 0.7887Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.4653 - acc: 0.7906 - val_loss: 0.4216 - val_acc: 0.8075\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4457 - acc: 0.8040Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.4482 - acc: 0.8036 - val_loss: 0.4791 - val_acc: 0.7757\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4402 - acc: 0.8106Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4386 - acc: 0.8107 - val_loss: 0.4255 - val_acc: 0.7925\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4529 - acc: 0.7977Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4507 - acc: 0.7964 - val_loss: 0.4360 - val_acc: 0.8056\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4418 - acc: 0.7951Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4448 - acc: 0.7932 - val_loss: 0.4349 - val_acc: 0.8150\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4548 - acc: 0.7918Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4500 - acc: 0.7960 - val_loss: 0.4139 - val_acc: 0.8093\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4178 - acc: 0.8194Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.4205 - acc: 0.8166 - val_loss: 0.4871 - val_acc: 0.7794\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4464 - acc: 0.7939Epoch 1/100\n20/20 [==============================] - 5s 270ms/step - loss: 0.4538 - acc: 0.7877 - val_loss: 0.4428 - val_acc: 0.8037\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4160 - acc: 0.8148Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4157 - acc: 0.8162 - val_loss: 0.3890 - val_acc: 0.8131\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4278 - acc: 0.8077Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4322 - acc: 0.8059 - val_loss: 0.4670 - val_acc: 0.7832\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4247 - acc: 0.8139Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4297 - acc: 0.8107 - val_loss: 0.4196 - val_acc: 0.8075\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4446 - acc: 0.8010Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.4439 - acc: 0.8016 - val_loss: 0.3968 - val_acc: 0.8262\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4293 - acc: 0.8137Epoch 1/100\n20/20 [==============================] - 5s 258ms/step - loss: 0.4272 - acc: 0.8154 - val_loss: 0.3978 - val_acc: 0.8187\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4166 - acc: 0.8194Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4137 - acc: 0.8218 - val_loss: 0.5223 - val_acc: 0.7421\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4257 - acc: 0.8098Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4254 - acc: 0.8095 - val_loss: 0.3963 - val_acc: 0.8168\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4277 - acc: 0.8098Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4255 - acc: 0.8119 - val_loss: 0.4076 - val_acc: 0.8131\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4105 - acc: 0.8273Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4114 - acc: 0.8265 - val_loss: 0.4108 - val_acc: 0.8037\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4209 - acc: 0.8065Epoch 1/100\n20/20 [==============================] - 4s 211ms/step - loss: 0.4168 - acc: 0.8104 - val_loss: 0.3961 - val_acc: 0.8168\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4182 - acc: 0.8135Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.4184 - acc: 0.8131 - val_loss: 0.4023 - val_acc: 0.8131\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4143 - acc: 0.8185Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4109 - acc: 0.8214 - val_loss: 0.3982 - val_acc: 0.8075\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4239 - acc: 0.8120Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4236 - acc: 0.8120 - val_loss: 0.4159 - val_acc: 0.7907\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4079 - acc: 0.8174Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4096 - acc: 0.8172 - val_loss: 0.3832 - val_acc: 0.8224\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4098 - acc: 0.8069Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4097 - acc: 0.8088 - val_loss: 0.3992 - val_acc: 0.8280\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4001 - acc: 0.8306Epoch 1/100\n20/20 [==============================] - 4s 217ms/step - loss: 0.3998 - acc: 0.8309 - val_loss: 0.3987 - val_acc: 0.8168\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4075 - acc: 0.8185Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.4068 - acc: 0.8194 - val_loss: 0.3903 - val_acc: 0.8224\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4096 - acc: 0.8131Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.4099 - acc: 0.8119 - val_loss: 0.3982 - val_acc: 0.8093\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4086 - acc: 0.8206Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4056 - acc: 0.8222 - val_loss: 0.3994 - val_acc: 0.8243\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3968 - acc: 0.8226Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3942 - acc: 0.8237 - val_loss: 0.3977 - val_acc: 0.8187\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3957 - acc: 0.8211Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3950 - acc: 0.8211 - val_loss: 0.3974 - val_acc: 0.8168\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3991 - acc: 0.8201Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.3972 - acc: 0.8225 - val_loss: 0.3854 - val_acc: 0.8093\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3919 - acc: 0.8214Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3874 - acc: 0.8234 - val_loss: 0.3890 - val_acc: 0.8187\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3974 - acc: 0.8202Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3945 - acc: 0.8209 - val_loss: 0.4116 - val_acc: 0.7981\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3890 - acc: 0.8269Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.3871 - acc: 0.8293 - val_loss: 0.3797 - val_acc: 0.8093\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3826 - acc: 0.8290Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3788 - acc: 0.8313 - val_loss: 0.3891 - val_acc: 0.8224\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3956 - acc: 0.8189Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.3929 - acc: 0.8190 - val_loss: 0.3948 - val_acc: 0.8299\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3754 - acc: 0.8264Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.3711 - acc: 0.8277 - val_loss: 0.4080 - val_acc: 0.8262\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3737 - acc: 0.8302Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3680 - acc: 0.8336 - val_loss: 0.3842 - val_acc: 0.8280\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3943 - acc: 0.8239Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3935 - acc: 0.8253 - val_loss: 0.3949 - val_acc: 0.8467\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3951 - acc: 0.8180Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.3900 - acc: 0.8197 - val_loss: 0.4086 - val_acc: 0.8374\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3667 - acc: 0.8372Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3664 - acc: 0.8367 - val_loss: 0.4060 - val_acc: 0.8280\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3786 - acc: 0.8459Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.3780 - acc: 0.8458 - val_loss: 0.4451 - val_acc: 0.8093\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3746 - acc: 0.8323Epoch 1/100\n20/20 [==============================] - 6s 280ms/step - loss: 0.3742 - acc: 0.8325 - val_loss: 0.4040 - val_acc: 0.8355\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3605 - acc: 0.8373Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3605 - acc: 0.8380 - val_loss: 0.4500 - val_acc: 0.8093\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3900 - acc: 0.8181Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3906 - acc: 0.8186 - val_loss: 0.4007 - val_acc: 0.8393\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3516 - acc: 0.8501Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3488 - acc: 0.8522 - val_loss: 0.4223 - val_acc: 0.8318\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3781 - acc: 0.8298Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.3770 - acc: 0.8325 - val_loss: 0.3945 - val_acc: 0.8355\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3599 - acc: 0.8431Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.3611 - acc: 0.8424 - val_loss: 0.4015 - val_acc: 0.8355\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3551 - acc: 0.8489Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3646 - acc: 0.8446 - val_loss: 0.4904 - val_acc: 0.7757\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3575 - acc: 0.8415Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3570 - acc: 0.8416 - val_loss: 0.3947 - val_acc: 0.8411\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3532 - acc: 0.8438Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3561 - acc: 0.8441 - val_loss: 0.4541 - val_acc: 0.7981\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3676 - acc: 0.8390Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3660 - acc: 0.8388 - val_loss: 0.4178 - val_acc: 0.8318\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3696 - acc: 0.8421Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.3684 - acc: 0.8426 - val_loss: 0.4114 - val_acc: 0.8299\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3631 - acc: 0.8473Epoch 1/100\n20/20 [==============================] - 5s 263ms/step - loss: 0.3631 - acc: 0.8475 - val_loss: 0.4753 - val_acc: 0.8112\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3562 - acc: 0.8494Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3567 - acc: 0.8495 - val_loss: 0.3720 - val_acc: 0.8318\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3543 - acc: 0.8502Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3564 - acc: 0.8483 - val_loss: 0.4330 - val_acc: 0.8075\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3670 - acc: 0.8378Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3637 - acc: 0.8410 - val_loss: 0.3789 - val_acc: 0.8224\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3543 - acc: 0.8423Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3556 - acc: 0.8404 - val_loss: 0.3720 - val_acc: 0.8243\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3405 - acc: 0.8531Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.3445 - acc: 0.8539 - val_loss: 0.4166 - val_acc: 0.8168\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3605 - acc: 0.8477Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.3591 - acc: 0.8495 - val_loss: 0.3505 - val_acc: 0.8355\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3562 - acc: 0.8385Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3551 - acc: 0.8396 - val_loss: 0.3511 - val_acc: 0.8336\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3499 - acc: 0.8501Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3477 - acc: 0.8518 - val_loss: 0.4465 - val_acc: 0.8430\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3561 - acc: 0.8469Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3513 - acc: 0.8499 - val_loss: 0.4502 - val_acc: 0.8430\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3545 - acc: 0.8519Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3530 - acc: 0.8523 - val_loss: 0.4510 - val_acc: 0.8187\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3628 - acc: 0.8469Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.3605 - acc: 0.8475 - val_loss: 0.4461 - val_acc: 0.8430\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3535 - acc: 0.8556Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3534 - acc: 0.8546 - val_loss: 0.4662 - val_acc: 0.8411\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3448 - acc: 0.8506Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.3488 - acc: 0.8487 - val_loss: 0.4471 - val_acc: 0.8486\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3457 - acc: 0.8490Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3483 - acc: 0.8475 - val_loss: 0.4334 - val_acc: 0.8486\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3505 - acc: 0.8618Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3470 - acc: 0.8625 - val_loss: 0.4687 - val_acc: 0.8505\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3325 - acc: 0.8620Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.3322 - acc: 0.8622 - val_loss: 0.4637 - val_acc: 0.8355\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3515 - acc: 0.8498Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.3509 - acc: 0.8507 - val_loss: 0.4469 - val_acc: 0.8505\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3258 - acc: 0.8678Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3238 - acc: 0.8693 - val_loss: 0.4536 - val_acc: 0.8654\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3643 - acc: 0.8377Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3654 - acc: 0.8376 - val_loss: 0.4787 - val_acc: 0.8374\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3483 - acc: 0.8477Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3445 - acc: 0.8495 - val_loss: 0.4606 - val_acc: 0.8336\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3237 - acc: 0.8692Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3232 - acc: 0.8691 - val_loss: 0.5385 - val_acc: 0.8112\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3430 - acc: 0.8565Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.3456 - acc: 0.8539 - val_loss: 0.4492 - val_acc: 0.8523\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3278 - acc: 0.8657Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.3286 - acc: 0.8657 - val_loss: 0.3536 - val_acc: 0.8617\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3279 - acc: 0.8594Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3339 - acc: 0.8566 - val_loss: 0.3676 - val_acc: 0.8262\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3314 - acc: 0.8627Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3376 - acc: 0.8586 - val_loss: 0.3795 - val_acc: 0.8449\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3737 - acc: 0.8353Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.3674 - acc: 0.8394 - val_loss: 0.3506 - val_acc: 0.8561\nRMS [lr = 0.00100]: \nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.9910 - acc: 0.5244Epoch 1/100\n20/20 [==============================] - 6s 295ms/step - loss: 0.9763 - acc: 0.5240 - val_loss: 0.6858 - val_acc: 0.5701\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6888 - acc: 0.5465Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6881 - acc: 0.5473 - val_loss: 0.6888 - val_acc: 0.5701\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6967 - acc: 0.5415Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6956 - acc: 0.5434 - val_loss: 0.6878 - val_acc: 0.5701\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6898 - acc: 0.5654Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6898 - acc: 0.5664 - val_loss: 0.6850 - val_acc: 0.6617\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6831 - acc: 0.6016Epoch 1/100\n20/20 [==============================] - 4s 212ms/step - loss: 0.6821 - acc: 0.6036 - val_loss: 0.6859 - val_acc: 0.4841\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6701 - acc: 0.6037Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.6699 - acc: 0.6067 - val_loss: 0.6659 - val_acc: 0.5028\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6443 - acc: 0.6554Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6430 - acc: 0.6590 - val_loss: 0.6205 - val_acc: 0.6561\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6246 - acc: 0.6829Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6211 - acc: 0.6876 - val_loss: 0.6229 - val_acc: 0.6355\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6214 - acc: 0.6727Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6207 - acc: 0.6750 - val_loss: 0.5896 - val_acc: 0.6673\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6102 - acc: 0.6774Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.6106 - acc: 0.6787 - val_loss: 0.5494 - val_acc: 0.6748\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6098 - acc: 0.6887Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.6098 - acc: 0.6887 - val_loss: 0.5846 - val_acc: 0.7290\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5710 - acc: 0.7244Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.5743 - acc: 0.7185 - val_loss: 0.5663 - val_acc: 0.7084\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5661 - acc: 0.7134Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5638 - acc: 0.7160 - val_loss: 0.4995 - val_acc: 0.7533\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5456 - acc: 0.7180Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5501 - acc: 0.7141 - val_loss: 0.6030 - val_acc: 0.6822\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5480 - acc: 0.7377Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5504 - acc: 0.7344 - val_loss: 0.5110 - val_acc: 0.7720\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5111 - acc: 0.7643Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.5107 - acc: 0.7635 - val_loss: 0.4552 - val_acc: 0.7720\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5199 - acc: 0.7639Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.5246 - acc: 0.7620 - val_loss: 0.9436 - val_acc: 0.5925\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5116 - acc: 0.7580Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5063 - acc: 0.7616 - val_loss: 0.9454 - val_acc: 0.5794\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5333 - acc: 0.7447Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5261 - acc: 0.7489 - val_loss: 0.4609 - val_acc: 0.8000\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4948 - acc: 0.7544Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4946 - acc: 0.7558 - val_loss: 0.4589 - val_acc: 0.7645\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5130 - acc: 0.7459Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5110 - acc: 0.7485 - val_loss: 0.5075 - val_acc: 0.7776\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4639 - acc: 0.7872Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.4587 - acc: 0.7901 - val_loss: 0.4567 - val_acc: 0.7925\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4565 - acc: 0.7822Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.4545 - acc: 0.7838 - val_loss: 0.3864 - val_acc: 0.8206\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4901 - acc: 0.7601Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.4861 - acc: 0.7632 - val_loss: 0.5900 - val_acc: 0.6075\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6375 - acc: 0.7209Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6277 - acc: 0.7251 - val_loss: 0.4779 - val_acc: 0.7776\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4666 - acc: 0.7881Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4614 - acc: 0.7913 - val_loss: 0.4156 - val_acc: 0.8019\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4385 - acc: 0.7917Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4512 - acc: 0.7847 - val_loss: 0.6440 - val_acc: 0.6692\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4524 - acc: 0.7924Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.4517 - acc: 0.7925 - val_loss: 0.4323 - val_acc: 0.8187\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4839 - acc: 0.7664Epoch 1/100\n20/20 [==============================] - 5s 275ms/step - loss: 0.4859 - acc: 0.7616 - val_loss: 0.5031 - val_acc: 0.8000\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5379 - acc: 0.7831Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.5301 - acc: 0.7853 - val_loss: 0.3867 - val_acc: 0.8224\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4392 - acc: 0.7854Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.4319 - acc: 0.7900 - val_loss: 0.3884 - val_acc: 0.8243\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4663 - acc: 0.7697Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4655 - acc: 0.7699 - val_loss: 0.4180 - val_acc: 0.8206\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4321 - acc: 0.8014Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.4372 - acc: 0.7980 - val_loss: 0.4147 - val_acc: 0.8112\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4385 - acc: 0.7972Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.4362 - acc: 0.8004 - val_loss: 0.4203 - val_acc: 0.8467\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4568 - acc: 0.7826Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4574 - acc: 0.7826 - val_loss: 0.4374 - val_acc: 0.8037\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4541 - acc: 0.7843Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4560 - acc: 0.7838 - val_loss: 0.4905 - val_acc: 0.7888\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4143 - acc: 0.8135Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4129 - acc: 0.8150 - val_loss: 0.4385 - val_acc: 0.8299\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4460 - acc: 0.7989Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4427 - acc: 0.7996 - val_loss: 0.3920 - val_acc: 0.8243\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4021 - acc: 0.8194Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.4056 - acc: 0.8186 - val_loss: 0.4254 - val_acc: 0.8150\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4205 - acc: 0.8077Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.4253 - acc: 0.8024 - val_loss: 0.5285 - val_acc: 0.7981\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4410 - acc: 0.7931Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4392 - acc: 0.7936 - val_loss: 0.4089 - val_acc: 0.8131\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4163 - acc: 0.8148Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4173 - acc: 0.8131 - val_loss: 0.4446 - val_acc: 0.8299\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3968 - acc: 0.8227Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4012 - acc: 0.8210 - val_loss: 0.4074 - val_acc: 0.8000\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4277 - acc: 0.8006Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.4245 - acc: 0.8028 - val_loss: 0.3849 - val_acc: 0.8262\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4345 - acc: 0.7935Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.4269 - acc: 0.7968 - val_loss: 0.4104 - val_acc: 0.8336\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4214 - acc: 0.8068Epoch 1/100\n20/20 [==============================] - 5s 269ms/step - loss: 0.4166 - acc: 0.8087 - val_loss: 0.4350 - val_acc: 0.8000\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5792 - acc: 0.7875Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.5733 - acc: 0.7884 - val_loss: 0.4096 - val_acc: 0.8374\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3992 - acc: 0.8109Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3937 - acc: 0.8145 - val_loss: 0.3830 - val_acc: 0.8280\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4013 - acc: 0.8173Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3994 - acc: 0.8182 - val_loss: 0.3956 - val_acc: 0.8355\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4101 - acc: 0.8222Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.4083 - acc: 0.8225 - val_loss: 0.3811 - val_acc: 0.8374\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3875 - acc: 0.8298Epoch 1/100\n20/20 [==============================] - 5s 251ms/step - loss: 0.3937 - acc: 0.8253 - val_loss: 0.3917 - val_acc: 0.8224\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4431 - acc: 0.8093Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4409 - acc: 0.8091 - val_loss: 0.3938 - val_acc: 0.8131\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3579 - acc: 0.8472Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3621 - acc: 0.8446 - val_loss: 0.3709 - val_acc: 0.8393\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3903 - acc: 0.8269Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3939 - acc: 0.8223 - val_loss: 0.4103 - val_acc: 0.8093\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3857 - acc: 0.8298Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3826 - acc: 0.8317 - val_loss: 0.4196 - val_acc: 0.8168\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3756 - acc: 0.8374Epoch 1/100\n20/20 [==============================] - 4s 214ms/step - loss: 0.3733 - acc: 0.8402 - val_loss: 0.4021 - val_acc: 0.8299\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3805 - acc: 0.8248Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.3842 - acc: 0.8242 - val_loss: 0.5909 - val_acc: 0.6523\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3577 - acc: 0.8419Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3620 - acc: 0.8400 - val_loss: 0.5329 - val_acc: 0.7888\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4375 - acc: 0.8144Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.4350 - acc: 0.8150 - val_loss: 0.3655 - val_acc: 0.8262\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3745 - acc: 0.8277Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3741 - acc: 0.8277 - val_loss: 0.4057 - val_acc: 0.8168\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3787 - acc: 0.8356Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3818 - acc: 0.8329 - val_loss: 0.3628 - val_acc: 0.8505\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3613 - acc: 0.8381Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.3600 - acc: 0.8384 - val_loss: 0.3898 - val_acc: 0.8505\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4488 - acc: 0.8231Epoch 1/100\n20/20 [==============================] - 5s 268ms/step - loss: 0.4430 - acc: 0.8250 - val_loss: 0.3513 - val_acc: 0.8486\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3567 - acc: 0.8438Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3542 - acc: 0.8442 - val_loss: 0.3362 - val_acc: 0.8542\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3669 - acc: 0.8452Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3687 - acc: 0.8451 - val_loss: 0.3728 - val_acc: 0.8393\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3534 - acc: 0.8511Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3566 - acc: 0.8491 - val_loss: 0.4154 - val_acc: 0.8131\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3679 - acc: 0.8381Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3660 - acc: 0.8408 - val_loss: 0.4034 - val_acc: 0.8318\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4109 - acc: 0.8294Epoch 1/100\n20/20 [==============================] - 5s 256ms/step - loss: 0.4047 - acc: 0.8305 - val_loss: 0.4102 - val_acc: 0.8093\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3623 - acc: 0.8440Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3593 - acc: 0.8440 - val_loss: 0.4397 - val_acc: 0.8168\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3488 - acc: 0.8481Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3498 - acc: 0.8471 - val_loss: 0.6133 - val_acc: 0.7140\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3383 - acc: 0.8519Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3420 - acc: 0.8491 - val_loss: 0.4208 - val_acc: 0.8187\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3660 - acc: 0.8417Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3622 - acc: 0.8442 - val_loss: 0.3601 - val_acc: 0.8710\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3369 - acc: 0.8573Epoch 1/100\n20/20 [==============================] - 4s 215ms/step - loss: 0.3323 - acc: 0.8606 - val_loss: 0.3743 - val_acc: 0.8579\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3617 - acc: 0.8402Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.3554 - acc: 0.8436 - val_loss: 0.4357 - val_acc: 0.8486\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3489 - acc: 0.8511Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3430 - acc: 0.8531 - val_loss: 0.4123 - val_acc: 0.8617\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3599 - acc: 0.8395Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3557 - acc: 0.8414 - val_loss: 0.4044 - val_acc: 0.8523\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3501 - acc: 0.8436Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3504 - acc: 0.8444 - val_loss: 0.3860 - val_acc: 0.8486\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3030 - acc: 0.8828Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3034 - acc: 0.8816 - val_loss: 0.4027 - val_acc: 0.8486\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3393 - acc: 0.8518Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.3398 - acc: 0.8534 - val_loss: 0.3906 - val_acc: 0.8617\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3399 - acc: 0.8569Epoch 1/100\n20/20 [==============================] - 5s 269ms/step - loss: 0.3385 - acc: 0.8582 - val_loss: 0.3832 - val_acc: 0.8579\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3255 - acc: 0.8569Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3231 - acc: 0.8594 - val_loss: 0.3829 - val_acc: 0.8822\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3409 - acc: 0.8557Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3449 - acc: 0.8519 - val_loss: 0.4480 - val_acc: 0.8243\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3246 - acc: 0.8679Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3254 - acc: 0.8683 - val_loss: 0.4085 - val_acc: 0.8150\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3117 - acc: 0.8723Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.3185 - acc: 0.8685 - val_loss: 0.3537 - val_acc: 0.8710\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3193 - acc: 0.8602Epoch 1/100\n20/20 [==============================] - 5s 251ms/step - loss: 0.3197 - acc: 0.8598 - val_loss: 0.3961 - val_acc: 0.8393\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3219 - acc: 0.8652Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3216 - acc: 0.8653 - val_loss: 0.3315 - val_acc: 0.8804\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3190 - acc: 0.8598Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3175 - acc: 0.8594 - val_loss: 0.3900 - val_acc: 0.8449\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3183 - acc: 0.8709Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3163 - acc: 0.8715 - val_loss: 0.3153 - val_acc: 0.8804\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3150 - acc: 0.8660Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3139 - acc: 0.8672 - val_loss: 0.3063 - val_acc: 0.8897\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3040 - acc: 0.8823Epoch 1/100\n20/20 [==============================] - 4s 212ms/step - loss: 0.3035 - acc: 0.8839 - val_loss: 0.3506 - val_acc: 0.8579\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3158 - acc: 0.8694Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.3148 - acc: 0.8697 - val_loss: 0.3642 - val_acc: 0.8542\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3233 - acc: 0.8654Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.3215 - acc: 0.8667 - val_loss: 0.3690 - val_acc: 0.8542\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2902 - acc: 0.8816Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3056 - acc: 0.8764 - val_loss: 0.4084 - val_acc: 0.8393\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3032 - acc: 0.8740Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3037 - acc: 0.8733 - val_loss: 0.5459 - val_acc: 0.8112\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3322 - acc: 0.8528Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3257 - acc: 0.8559 - val_loss: 0.3716 - val_acc: 0.8673\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3248 - acc: 0.8603Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.3211 - acc: 0.8627 - val_loss: 0.3908 - val_acc: 0.8598\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2982 - acc: 0.8698Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.3050 - acc: 0.8661 - val_loss: 0.3954 - val_acc: 0.8561\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3072 - acc: 0.8686Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3114 - acc: 0.8657 - val_loss: 0.3397 - val_acc: 0.8766\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3287 - acc: 0.8514Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3250 - acc: 0.8526 - val_loss: 0.3591 - val_acc: 0.8692\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2818 - acc: 0.8828Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.2837 - acc: 0.8812 - val_loss: 0.4058 - val_acc: 0.8542\n"
],
[
"# Validation accuracy\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation accuracy comparison')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nfor lr in lr_to_test:\n val_acc = histories[lr].history['val_acc']\n epochs = range(1, len(val_acc)+1)\n plt.plot(epochs, val_acc, label=(\"%s, lr=%f\" % (opt.__name__, lr)))\nplt.legend(loc='lower right')\n\n# Validation loss\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation loss comparison')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nfor lr in lr_to_test:\n val_loss = histories[lr].history['val_loss']\n epochs = range(1, len(val_loss)+1)\n plt.plot(epochs, val_loss, label=(\"%s, lr=%f\" % (opt.__name__, lr)))\nplt.legend(loc='upper right');",
"_____no_output_____"
]
],
[
[
"**Result**: Values between 1e-4 and 1e-3 produce similar results. 1e-3 is more noisy, but converges a bit faster. On the other hand, 1e-5 represents an excessively low value.",
"_____no_output_____"
],
[
"## Adam",
"_____no_output_____"
]
],
[
[
"# Try Adam with different learning rates\n\nlr_to_test = (1e-5, 1e-4, 1e-3)\nopt = Adam\n\nhistories = {}\n\nfor lr in lr_to_test:\n print(\"Adam [lr = %.5f]: \" % lr)\n cnn = create_cnn()\n cnn.compile(\n optimizer=opt(learning_rate=lr),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n histories[lr] = cnn.fit_generator(\n train_generator,\n steps_per_epoch=n_train_img // 128,\n epochs=100,\n validation_data=validation_generator,\n shuffle=True,\n verbose=1,\n initial_epoch=0)",
"Adam [lr = 0.00001]: \nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6918 - acc: 0.5394Epoch 1/100\n20/20 [==============================] - 6s 297ms/step - loss: 0.6918 - acc: 0.5386 - val_loss: 0.6858 - val_acc: 0.5701\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6911 - acc: 0.5350Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.6907 - acc: 0.5387 - val_loss: 0.6856 - val_acc: 0.5701\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6903 - acc: 0.5288Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.6897 - acc: 0.5361 - val_loss: 0.6852 - val_acc: 0.5701\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6898 - acc: 0.5361Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6893 - acc: 0.5410 - val_loss: 0.6839 - val_acc: 0.5701\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6874 - acc: 0.5423Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.6876 - acc: 0.5394 - val_loss: 0.6817 - val_acc: 0.5701\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6871 - acc: 0.5382Epoch 1/100\n20/20 [==============================] - 5s 271ms/step - loss: 0.6871 - acc: 0.5386 - val_loss: 0.6809 - val_acc: 0.5682\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6861 - acc: 0.5348Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.6858 - acc: 0.5378 - val_loss: 0.6802 - val_acc: 0.5701\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6830 - acc: 0.5593Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6827 - acc: 0.5606 - val_loss: 0.6743 - val_acc: 0.5682\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6825 - acc: 0.5832Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.6824 - acc: 0.5814 - val_loss: 0.6747 - val_acc: 0.5738\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6786 - acc: 0.5478Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6784 - acc: 0.5501 - val_loss: 0.6726 - val_acc: 0.5907\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6760 - acc: 0.5728Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.6756 - acc: 0.5723 - val_loss: 0.6653 - val_acc: 0.5757\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6734 - acc: 0.5699Epoch 1/100\n20/20 [==============================] - 5s 274ms/step - loss: 0.6728 - acc: 0.5731 - val_loss: 0.6664 - val_acc: 0.6467\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6697 - acc: 0.6283Epoch 1/100\n20/20 [==============================] - 5s 247ms/step - loss: 0.6692 - acc: 0.6305 - val_loss: 0.6619 - val_acc: 0.6766\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6660 - acc: 0.6249Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.6654 - acc: 0.6297 - val_loss: 0.6598 - val_acc: 0.7178\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6603 - acc: 0.6528Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.6606 - acc: 0.6538 - val_loss: 0.6563 - val_acc: 0.7551\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6547 - acc: 0.6155Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.6548 - acc: 0.6184 - val_loss: 0.6603 - val_acc: 0.6991\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6515 - acc: 0.6689Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.6510 - acc: 0.6743 - val_loss: 0.6492 - val_acc: 0.7364\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6458 - acc: 0.6646Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.6450 - acc: 0.6681 - val_loss: 0.6452 - val_acc: 0.7402\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6368 - acc: 0.6681Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6379 - acc: 0.6627 - val_loss: 0.6282 - val_acc: 0.6654\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6316 - acc: 0.6990Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.6313 - acc: 0.6961 - val_loss: 0.6222 - val_acc: 0.6822\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6262 - acc: 0.6761Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.6257 - acc: 0.6791 - val_loss: 0.6377 - val_acc: 0.6804\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6237 - acc: 0.6917Epoch 1/100\n20/20 [==============================] - 5s 225ms/step - loss: 0.6230 - acc: 0.6911 - val_loss: 0.6147 - val_acc: 0.7383\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6150 - acc: 0.6959Epoch 1/100\n20/20 [==============================] - 5s 264ms/step - loss: 0.6154 - acc: 0.6927 - val_loss: 0.6043 - val_acc: 0.6598\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6089 - acc: 0.7125Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.6080 - acc: 0.7141 - val_loss: 0.6156 - val_acc: 0.7495\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6066 - acc: 0.6982Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.6037 - acc: 0.7055 - val_loss: 0.6119 - val_acc: 0.7421\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6014 - acc: 0.7168Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6004 - acc: 0.7185 - val_loss: 0.6082 - val_acc: 0.7383\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5901 - acc: 0.7213Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5896 - acc: 0.7240 - val_loss: 0.5805 - val_acc: 0.7346\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5999 - acc: 0.6884Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.5989 - acc: 0.6915 - val_loss: 0.5783 - val_acc: 0.7533\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5921 - acc: 0.6984Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.5915 - acc: 0.7006 - val_loss: 0.5908 - val_acc: 0.7271\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5902 - acc: 0.7134Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5899 - acc: 0.7145 - val_loss: 0.5785 - val_acc: 0.7626\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5731 - acc: 0.7305Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.5751 - acc: 0.7263 - val_loss: 0.5589 - val_acc: 0.7439\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5720 - acc: 0.7316Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.5739 - acc: 0.7277 - val_loss: 0.5520 - val_acc: 0.7383\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5753 - acc: 0.7359Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.5735 - acc: 0.7398 - val_loss: 0.5514 - val_acc: 0.7720\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5599 - acc: 0.7443Epoch 1/100\n20/20 [==============================] - 5s 259ms/step - loss: 0.5618 - acc: 0.7426 - val_loss: 0.5538 - val_acc: 0.7813\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5624 - acc: 0.7413Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.5609 - acc: 0.7402 - val_loss: 0.5410 - val_acc: 0.7421\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5604 - acc: 0.7523Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.5600 - acc: 0.7510 - val_loss: 0.5399 - val_acc: 0.7869\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5523 - acc: 0.7479Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5517 - acc: 0.7505 - val_loss: 0.5378 - val_acc: 0.7682\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5512 - acc: 0.7489Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5529 - acc: 0.7450 - val_loss: 0.5426 - val_acc: 0.7664\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5452 - acc: 0.7580Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.5440 - acc: 0.7600 - val_loss: 0.5391 - val_acc: 0.7869\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5427 - acc: 0.7509Epoch 1/100\n20/20 [==============================] - 5s 270ms/step - loss: 0.5411 - acc: 0.7541 - val_loss: 0.5370 - val_acc: 0.7813\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5400 - acc: 0.7597Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5383 - acc: 0.7592 - val_loss: 0.5298 - val_acc: 0.7477\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5396 - acc: 0.7635Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.5398 - acc: 0.7636 - val_loss: 0.5263 - val_acc: 0.7832\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5401 - acc: 0.7515Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5376 - acc: 0.7546 - val_loss: 0.5274 - val_acc: 0.7850\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5325 - acc: 0.7589Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5307 - acc: 0.7592 - val_loss: 0.5145 - val_acc: 0.7738\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5266 - acc: 0.7580Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.5256 - acc: 0.7592 - val_loss: 0.5178 - val_acc: 0.7888\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5253 - acc: 0.7680Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.5279 - acc: 0.7628 - val_loss: 0.5285 - val_acc: 0.7346\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5175 - acc: 0.7622Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5207 - acc: 0.7570 - val_loss: 0.5271 - val_acc: 0.7589\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5294 - acc: 0.7525Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.5285 - acc: 0.7543 - val_loss: 0.5253 - val_acc: 0.7776\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5251 - acc: 0.7650Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.5230 - acc: 0.7659 - val_loss: 0.5180 - val_acc: 0.7645\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5207 - acc: 0.7584Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.5218 - acc: 0.7572 - val_loss: 0.5185 - val_acc: 0.7738\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5197 - acc: 0.7610Epoch 1/100\n20/20 [==============================] - 5s 257ms/step - loss: 0.5190 - acc: 0.7624 - val_loss: 0.5008 - val_acc: 0.7850\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5094 - acc: 0.7789Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.5094 - acc: 0.7786 - val_loss: 0.5091 - val_acc: 0.7850\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5162 - acc: 0.7610Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.5176 - acc: 0.7610 - val_loss: 0.5044 - val_acc: 0.7869\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5064 - acc: 0.7781Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.5039 - acc: 0.7802 - val_loss: 0.5102 - val_acc: 0.7907\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5086 - acc: 0.7714Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.5086 - acc: 0.7711 - val_loss: 0.5030 - val_acc: 0.7888\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4983 - acc: 0.7764Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.4998 - acc: 0.7758 - val_loss: 0.5045 - val_acc: 0.7757\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5043 - acc: 0.7760Epoch 1/100\n20/20 [==============================] - 5s 272ms/step - loss: 0.5019 - acc: 0.7774 - val_loss: 0.4882 - val_acc: 0.7907\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4974 - acc: 0.7877Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4973 - acc: 0.7869 - val_loss: 0.5104 - val_acc: 0.7776\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5105 - acc: 0.7540Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.5073 - acc: 0.7562 - val_loss: 0.4877 - val_acc: 0.7607\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4942 - acc: 0.7837Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.4961 - acc: 0.7834 - val_loss: 0.5096 - val_acc: 0.7850\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4978 - acc: 0.7784Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4961 - acc: 0.7797 - val_loss: 0.5192 - val_acc: 0.7682\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4923 - acc: 0.7684Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.4943 - acc: 0.7663 - val_loss: 0.5211 - val_acc: 0.7794\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4971 - acc: 0.7768Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.4975 - acc: 0.7770 - val_loss: 0.5101 - val_acc: 0.7813\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4963 - acc: 0.7764Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4982 - acc: 0.7750 - val_loss: 0.4949 - val_acc: 0.7907\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4924 - acc: 0.7798Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4938 - acc: 0.7787 - val_loss: 0.4969 - val_acc: 0.7981\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4915 - acc: 0.7796Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.4905 - acc: 0.7812 - val_loss: 0.5012 - val_acc: 0.7869\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4869 - acc: 0.7789Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4846 - acc: 0.7794 - val_loss: 0.4943 - val_acc: 0.7645\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4861 - acc: 0.7790Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.4867 - acc: 0.7783 - val_loss: 0.4985 - val_acc: 0.7794\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4850 - acc: 0.7843Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4841 - acc: 0.7846 - val_loss: 0.4866 - val_acc: 0.8056\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4866 - acc: 0.7826Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4907 - acc: 0.7815 - val_loss: 0.4983 - val_acc: 0.7907\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4724 - acc: 0.7881Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4714 - acc: 0.7881 - val_loss: 0.4622 - val_acc: 0.7963\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4810 - acc: 0.7826Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4833 - acc: 0.7790 - val_loss: 0.4617 - val_acc: 0.7925\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4743 - acc: 0.7856Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.4708 - acc: 0.7869 - val_loss: 0.4633 - val_acc: 0.7981\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4845 - acc: 0.7810Epoch 1/100\n20/20 [==============================] - 5s 272ms/step - loss: 0.4877 - acc: 0.7798 - val_loss: 0.4664 - val_acc: 0.7944\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4766 - acc: 0.7902Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4753 - acc: 0.7917 - val_loss: 0.4838 - val_acc: 0.7869\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4760 - acc: 0.7811Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4793 - acc: 0.7799 - val_loss: 0.4572 - val_acc: 0.8037\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4715 - acc: 0.7856Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4671 - acc: 0.7889 - val_loss: 0.4674 - val_acc: 0.8075\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4760 - acc: 0.7881Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4752 - acc: 0.7901 - val_loss: 0.4587 - val_acc: 0.7869\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4754 - acc: 0.7835Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.4773 - acc: 0.7842 - val_loss: 0.4659 - val_acc: 0.7757\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4654 - acc: 0.7897Epoch 1/100\n20/20 [==============================] - 6s 279ms/step - loss: 0.4647 - acc: 0.7897 - val_loss: 0.4426 - val_acc: 0.7869\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4696 - acc: 0.7835Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.4673 - acc: 0.7846 - val_loss: 0.4477 - val_acc: 0.8056\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4504 - acc: 0.8081Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4509 - acc: 0.8059 - val_loss: 0.4419 - val_acc: 0.8000\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4673 - acc: 0.8002Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4668 - acc: 0.8024 - val_loss: 0.4892 - val_acc: 0.7944\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4585 - acc: 0.7930Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4602 - acc: 0.7940 - val_loss: 0.4747 - val_acc: 0.8037\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4671 - acc: 0.7997Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.4642 - acc: 0.8024 - val_loss: 0.4768 - val_acc: 0.8000\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4630 - acc: 0.7964Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4646 - acc: 0.7960 - val_loss: 0.4634 - val_acc: 0.8037\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4496 - acc: 0.8095Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4508 - acc: 0.8068 - val_loss: 0.4691 - val_acc: 0.8000\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4602 - acc: 0.8043Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4578 - acc: 0.8067 - val_loss: 0.4609 - val_acc: 0.7963\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4423 - acc: 0.8039Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.4422 - acc: 0.8035 - val_loss: 0.4711 - val_acc: 0.8112\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4555 - acc: 0.7964Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.4570 - acc: 0.7956 - val_loss: 0.4940 - val_acc: 0.7907\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4552 - acc: 0.8077Epoch 1/100\n20/20 [==============================] - 5s 266ms/step - loss: 0.4612 - acc: 0.8020 - val_loss: 0.4394 - val_acc: 0.7907\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4480 - acc: 0.8065Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4496 - acc: 0.8060 - val_loss: 0.4311 - val_acc: 0.7907\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4381 - acc: 0.8109Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4418 - acc: 0.8094 - val_loss: 0.4217 - val_acc: 0.8056\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4533 - acc: 0.7977Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4546 - acc: 0.7972 - val_loss: 0.4266 - val_acc: 0.8075\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4399 - acc: 0.8112Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4376 - acc: 0.8129 - val_loss: 0.4295 - val_acc: 0.7963\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4427 - acc: 0.8114Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.4418 - acc: 0.8115 - val_loss: 0.4174 - val_acc: 0.8075\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4382 - acc: 0.8135Epoch 1/100\n20/20 [==============================] - 6s 275ms/step - loss: 0.4390 - acc: 0.8127 - val_loss: 0.4227 - val_acc: 0.8037\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4452 - acc: 0.8127Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4403 - acc: 0.8154 - val_loss: 0.4259 - val_acc: 0.7963\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4431 - acc: 0.8110Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.4395 - acc: 0.8127 - val_loss: 0.4117 - val_acc: 0.8150\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4544 - acc: 0.7968Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4499 - acc: 0.8008 - val_loss: 0.4149 - val_acc: 0.8093\nAdam [lr = 0.00010]: \nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6931 - acc: 0.5215Epoch 1/100\n20/20 [==============================] - 6s 304ms/step - loss: 0.6928 - acc: 0.5229 - val_loss: 0.6846 - val_acc: 0.5701\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6893 - acc: 0.5350Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.6891 - acc: 0.5406 - val_loss: 0.6846 - val_acc: 0.6748\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6808 - acc: 0.5876Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.6808 - acc: 0.5847 - val_loss: 0.6690 - val_acc: 0.5701\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6773 - acc: 0.5665Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.6766 - acc: 0.5695 - val_loss: 0.6682 - val_acc: 0.6374\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6513 - acc: 0.6295Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.6514 - acc: 0.6262 - val_loss: 0.6406 - val_acc: 0.6673\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6280 - acc: 0.6719Epoch 1/100\n20/20 [==============================] - 5s 272ms/step - loss: 0.6272 - acc: 0.6727 - val_loss: 0.6500 - val_acc: 0.6168\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6077 - acc: 0.6854Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.6067 - acc: 0.6899 - val_loss: 0.6012 - val_acc: 0.7402\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5688 - acc: 0.7267Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5688 - acc: 0.7283 - val_loss: 0.5610 - val_acc: 0.7720\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5600 - acc: 0.7355Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5627 - acc: 0.7343 - val_loss: 0.5542 - val_acc: 0.7645\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5401 - acc: 0.7434Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5386 - acc: 0.7430 - val_loss: 0.5535 - val_acc: 0.6935\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5226 - acc: 0.7555Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.5235 - acc: 0.7545 - val_loss: 0.5117 - val_acc: 0.7551\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5034 - acc: 0.7651Epoch 1/100\n20/20 [==============================] - 5s 274ms/step - loss: 0.5033 - acc: 0.7644 - val_loss: 0.4939 - val_acc: 0.7551\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4888 - acc: 0.7739Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4892 - acc: 0.7735 - val_loss: 0.4680 - val_acc: 0.7757\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4671 - acc: 0.7885Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4663 - acc: 0.7869 - val_loss: 0.4736 - val_acc: 0.7925\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4623 - acc: 0.7854Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.4610 - acc: 0.7850 - val_loss: 0.4574 - val_acc: 0.7794\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4539 - acc: 0.7914Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.4520 - acc: 0.7909 - val_loss: 0.4463 - val_acc: 0.7907\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4392 - acc: 0.7918Epoch 1/100\n20/20 [==============================] - 5s 262ms/step - loss: 0.4431 - acc: 0.7889 - val_loss: 0.4387 - val_acc: 0.8000\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4128 - acc: 0.8114Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.4122 - acc: 0.8115 - val_loss: 0.4356 - val_acc: 0.8019\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4275 - acc: 0.8060Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4268 - acc: 0.8071 - val_loss: 0.4226 - val_acc: 0.8206\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4105 - acc: 0.8206Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.4101 - acc: 0.8218 - val_loss: 0.4340 - val_acc: 0.8150\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4071 - acc: 0.8133Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.4056 - acc: 0.8137 - val_loss: 0.4046 - val_acc: 0.8318\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3902 - acc: 0.8302Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.3886 - acc: 0.8325 - val_loss: 0.4111 - val_acc: 0.8224\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3886 - acc: 0.8294Epoch 1/100\n20/20 [==============================] - 5s 261ms/step - loss: 0.3879 - acc: 0.8297 - val_loss: 0.4124 - val_acc: 0.8187\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3853 - acc: 0.8331Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.3839 - acc: 0.8364 - val_loss: 0.4467 - val_acc: 0.8056\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3715 - acc: 0.8352Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.3738 - acc: 0.8345 - val_loss: 0.4022 - val_acc: 0.8280\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3595 - acc: 0.8421Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3630 - acc: 0.8373 - val_loss: 0.3749 - val_acc: 0.8206\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3659 - acc: 0.8415Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3694 - acc: 0.8396 - val_loss: 0.3613 - val_acc: 0.8411\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3621 - acc: 0.8427Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.3619 - acc: 0.8436 - val_loss: 0.3843 - val_acc: 0.8299\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3559 - acc: 0.8348Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.3591 - acc: 0.8356 - val_loss: 0.3645 - val_acc: 0.8449\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3499 - acc: 0.8506Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3507 - acc: 0.8510 - val_loss: 0.3616 - val_acc: 0.8430\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3571 - acc: 0.8469Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.3607 - acc: 0.8444 - val_loss: 0.3537 - val_acc: 0.8411\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3462 - acc: 0.8540Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.3462 - acc: 0.8547 - val_loss: 0.3615 - val_acc: 0.8374\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3479 - acc: 0.8477Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3516 - acc: 0.8479 - val_loss: 0.3682 - val_acc: 0.8374\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3480 - acc: 0.8520Epoch 1/100\n20/20 [==============================] - 5s 254ms/step - loss: 0.3532 - acc: 0.8487 - val_loss: 0.3676 - val_acc: 0.8355\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3448 - acc: 0.8486Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.3420 - acc: 0.8507 - val_loss: 0.3764 - val_acc: 0.8206\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3483 - acc: 0.8480Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3495 - acc: 0.8474 - val_loss: 0.3735 - val_acc: 0.8355\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3350 - acc: 0.8515Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3355 - acc: 0.8523 - val_loss: 0.3759 - val_acc: 0.8430\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3437 - acc: 0.8519Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.3381 - acc: 0.8558 - val_loss: 0.3508 - val_acc: 0.8430\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3226 - acc: 0.8602Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.3267 - acc: 0.8574 - val_loss: 0.3507 - val_acc: 0.8617\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3207 - acc: 0.8678Epoch 1/100\n20/20 [==============================] - 5s 263ms/step - loss: 0.3212 - acc: 0.8665 - val_loss: 0.3432 - val_acc: 0.8617\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3268 - acc: 0.8586Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3270 - acc: 0.8586 - val_loss: 0.3589 - val_acc: 0.8318\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3254 - acc: 0.8599Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3283 - acc: 0.8578 - val_loss: 0.3909 - val_acc: 0.8243\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3506 - acc: 0.8569Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.3502 - acc: 0.8562 - val_loss: 0.3506 - val_acc: 0.8430\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3274 - acc: 0.8657Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3268 - acc: 0.8661 - val_loss: 0.3782 - val_acc: 0.8299\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3465 - acc: 0.8477Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3515 - acc: 0.8444 - val_loss: 0.3571 - val_acc: 0.8467\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3228 - acc: 0.8640Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.3195 - acc: 0.8673 - val_loss: 0.3298 - val_acc: 0.8523\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3215 - acc: 0.8657Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3248 - acc: 0.8661 - val_loss: 0.3376 - val_acc: 0.8542\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3229 - acc: 0.8654Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3267 - acc: 0.8610 - val_loss: 0.3320 - val_acc: 0.8561\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3261 - acc: 0.8668Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3244 - acc: 0.8676 - val_loss: 0.3433 - val_acc: 0.8505\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3214 - acc: 0.8577Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.3222 - acc: 0.8590 - val_loss: 0.3461 - val_acc: 0.8486\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3232 - acc: 0.8675Epoch 1/100\n20/20 [==============================] - 5s 255ms/step - loss: 0.3238 - acc: 0.8663 - val_loss: 0.3356 - val_acc: 0.8579\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3208 - acc: 0.8669Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3186 - acc: 0.8685 - val_loss: 0.3506 - val_acc: 0.8523\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3269 - acc: 0.8636Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3286 - acc: 0.8635 - val_loss: 0.4686 - val_acc: 0.8598\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3104 - acc: 0.8705Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3074 - acc: 0.8715 - val_loss: 0.4593 - val_acc: 0.8505\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3025 - acc: 0.8707Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3064 - acc: 0.8689 - val_loss: 0.4503 - val_acc: 0.8654\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3014 - acc: 0.8776Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.3023 - acc: 0.8779 - val_loss: 0.3679 - val_acc: 0.8748\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2963 - acc: 0.8744Epoch 1/100\n20/20 [==============================] - 5s 274ms/step - loss: 0.3006 - acc: 0.8721 - val_loss: 0.3428 - val_acc: 0.8598\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3026 - acc: 0.8882Epoch 1/100\n20/20 [==============================] - 5s 245ms/step - loss: 0.2993 - acc: 0.8887 - val_loss: 0.3428 - val_acc: 0.8505\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3082 - acc: 0.8669Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3070 - acc: 0.8685 - val_loss: 0.3475 - val_acc: 0.8561\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2913 - acc: 0.8810Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.2930 - acc: 0.8795 - val_loss: 0.3063 - val_acc: 0.8804\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2909 - acc: 0.8865Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.2910 - acc: 0.8855 - val_loss: 0.3075 - val_acc: 0.8636\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2921 - acc: 0.8761Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.2890 - acc: 0.8776 - val_loss: 0.3036 - val_acc: 0.8673\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2855 - acc: 0.8773Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.2872 - acc: 0.8756 - val_loss: 0.3221 - val_acc: 0.8617\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2956 - acc: 0.8786Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.2969 - acc: 0.8776 - val_loss: 0.3086 - val_acc: 0.8748\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3099 - acc: 0.8744Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3117 - acc: 0.8733 - val_loss: 0.3488 - val_acc: 0.8318\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3103 - acc: 0.8721Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3078 - acc: 0.8731 - val_loss: 0.3076 - val_acc: 0.8673\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2939 - acc: 0.8753Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.2926 - acc: 0.8764 - val_loss: 0.3029 - val_acc: 0.8636\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2936 - acc: 0.8815Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.3004 - acc: 0.8776 - val_loss: 0.3285 - val_acc: 0.8523\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2856 - acc: 0.8849Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.2871 - acc: 0.8848 - val_loss: 0.3102 - val_acc: 0.8654\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2707 - acc: 0.8942Epoch 1/100\n20/20 [==============================] - 5s 250ms/step - loss: 0.2734 - acc: 0.8920 - val_loss: 0.3179 - val_acc: 0.8542\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2953 - acc: 0.8779Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.2931 - acc: 0.8801 - val_loss: 0.2931 - val_acc: 0.8841\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2993 - acc: 0.8857Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.2984 - acc: 0.8847 - val_loss: 0.3156 - val_acc: 0.8654\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2953 - acc: 0.8815Epoch 1/100\n20/20 [==============================] - 4s 218ms/step - loss: 0.2897 - acc: 0.8848 - val_loss: 0.3020 - val_acc: 0.8710\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2859 - acc: 0.8874Epoch 1/100\n20/20 [==============================] - 5s 271ms/step - loss: 0.2877 - acc: 0.8859 - val_loss: 0.3200 - val_acc: 0.8766\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2834 - acc: 0.8882Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.2844 - acc: 0.8876 - val_loss: 0.3216 - val_acc: 0.8710\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3028 - acc: 0.8742Epoch 1/100\n20/20 [==============================] - 5s 249ms/step - loss: 0.3030 - acc: 0.8738 - val_loss: 0.3176 - val_acc: 0.8598\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2953 - acc: 0.8785Epoch 1/100\n20/20 [==============================] - 5s 231ms/step - loss: 0.2966 - acc: 0.8779 - val_loss: 0.2991 - val_acc: 0.8729\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2919 - acc: 0.8757Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.2900 - acc: 0.8776 - val_loss: 0.3054 - val_acc: 0.8804\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2817 - acc: 0.8865Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.2784 - acc: 0.8891 - val_loss: 0.2843 - val_acc: 0.8822\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2846 - acc: 0.8869Epoch 1/100\n20/20 [==============================] - 6s 280ms/step - loss: 0.2811 - acc: 0.8887 - val_loss: 0.3217 - val_acc: 0.8654\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3018 - acc: 0.8757Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3001 - acc: 0.8772 - val_loss: 0.3066 - val_acc: 0.8673\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2888 - acc: 0.8836Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.2871 - acc: 0.8847 - val_loss: 0.3176 - val_acc: 0.8636\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2733 - acc: 0.8890Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.2751 - acc: 0.8875 - val_loss: 0.3192 - val_acc: 0.8673\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2997 - acc: 0.8791Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.2973 - acc: 0.8796 - val_loss: 0.3155 - val_acc: 0.8748\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2725 - acc: 0.8915Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.2727 - acc: 0.8927 - val_loss: 0.3143 - val_acc: 0.8766\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2931 - acc: 0.8807Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.2931 - acc: 0.8804 - val_loss: 0.3223 - val_acc: 0.8710\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2883 - acc: 0.8761Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.2921 - acc: 0.8752 - val_loss: 0.3197 - val_acc: 0.8636\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2723 - acc: 0.8946Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.2710 - acc: 0.8944 - val_loss: 0.3138 - val_acc: 0.8860\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2814 - acc: 0.8894Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.2810 - acc: 0.8887 - val_loss: 0.3021 - val_acc: 0.8729\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2894 - acc: 0.8874Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.2916 - acc: 0.8848 - val_loss: 0.3049 - val_acc: 0.8729\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2808 - acc: 0.8890Epoch 1/100\n20/20 [==============================] - 5s 263ms/step - loss: 0.2767 - acc: 0.8908 - val_loss: 0.3189 - val_acc: 0.8748\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2892 - acc: 0.8886Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.2871 - acc: 0.8887 - val_loss: 0.3112 - val_acc: 0.8860\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2531 - acc: 0.9014Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.2538 - acc: 0.9000 - val_loss: 0.3288 - val_acc: 0.8710\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2862 - acc: 0.8803Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.2878 - acc: 0.8809 - val_loss: 0.3223 - val_acc: 0.8729\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2815 - acc: 0.8890Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.2822 - acc: 0.8895 - val_loss: 0.3137 - val_acc: 0.8766\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2880 - acc: 0.8815Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.2868 - acc: 0.8807 - val_loss: 0.3215 - val_acc: 0.8692\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2754 - acc: 0.8899Epoch 1/100\n20/20 [==============================] - 6s 282ms/step - loss: 0.2715 - acc: 0.8915 - val_loss: 0.3242 - val_acc: 0.8729\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2939 - acc: 0.8736Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.2963 - acc: 0.8741 - val_loss: 0.3467 - val_acc: 0.8523\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2722 - acc: 0.8882Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.2711 - acc: 0.8892 - val_loss: 0.3135 - val_acc: 0.8822\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.2973 - acc: 0.8803Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.2926 - acc: 0.8813 - val_loss: 0.3389 - val_acc: 0.8692\nAdam [lr = 0.00100]: \nEpoch 1/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6891 - acc: 0.5490Epoch 1/100\n20/20 [==============================] - 6s 296ms/step - loss: 0.6899 - acc: 0.5434 - val_loss: 0.7076 - val_acc: 0.4299\nEpoch 2/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6802 - acc: 0.5580Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.6795 - acc: 0.5663 - val_loss: 0.6685 - val_acc: 0.7028\nEpoch 3/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6435 - acc: 0.6534Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.6443 - acc: 0.6484 - val_loss: 0.6560 - val_acc: 0.6000\nEpoch 4/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6158 - acc: 0.6838Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.6187 - acc: 0.6808 - val_loss: 0.7463 - val_acc: 0.5794\nEpoch 5/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.6218 - acc: 0.6905Epoch 1/100\n20/20 [==============================] - 4s 216ms/step - loss: 0.6185 - acc: 0.6924 - val_loss: 0.5914 - val_acc: 0.7252\nEpoch 6/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5498 - acc: 0.7263Epoch 1/100\n20/20 [==============================] - 5s 260ms/step - loss: 0.5475 - acc: 0.7279 - val_loss: 0.5402 - val_acc: 0.7439\nEpoch 7/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5231 - acc: 0.7505Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.5277 - acc: 0.7501 - val_loss: 0.5330 - val_acc: 0.7645\nEpoch 8/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5163 - acc: 0.7534Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.5158 - acc: 0.7538 - val_loss: 0.5301 - val_acc: 0.7495\nEpoch 9/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5151 - acc: 0.7430Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.5149 - acc: 0.7434 - val_loss: 0.4795 - val_acc: 0.7720\nEpoch 10/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4718 - acc: 0.7868Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.4685 - acc: 0.7893 - val_loss: 0.4975 - val_acc: 0.7364\nEpoch 11/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.5013 - acc: 0.7610Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.5067 - acc: 0.7572 - val_loss: 0.5208 - val_acc: 0.7813\nEpoch 12/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4860 - acc: 0.7726Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.4826 - acc: 0.7743 - val_loss: 0.4341 - val_acc: 0.7776\nEpoch 13/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4460 - acc: 0.7956Epoch 1/100\n20/20 [==============================] - 5s 248ms/step - loss: 0.4482 - acc: 0.7933 - val_loss: 0.4498 - val_acc: 0.8224\nEpoch 14/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4437 - acc: 0.8023Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.4441 - acc: 0.8020 - val_loss: 0.4785 - val_acc: 0.7981\nEpoch 15/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4349 - acc: 0.8057Epoch 1/100\n20/20 [==============================] - 5s 230ms/step - loss: 0.4383 - acc: 0.8028 - val_loss: 0.4387 - val_acc: 0.8056\nEpoch 16/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4459 - acc: 0.8031Epoch 1/100\n20/20 [==============================] - 5s 229ms/step - loss: 0.4440 - acc: 0.8044 - val_loss: 0.4265 - val_acc: 0.7850\nEpoch 17/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4247 - acc: 0.8056Epoch 1/100\n20/20 [==============================] - 5s 251ms/step - loss: 0.4227 - acc: 0.8075 - val_loss: 0.4023 - val_acc: 0.8224\nEpoch 18/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4053 - acc: 0.8290Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4059 - acc: 0.8257 - val_loss: 0.4081 - val_acc: 0.8112\nEpoch 19/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4178 - acc: 0.8226Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4163 - acc: 0.8245 - val_loss: 0.3985 - val_acc: 0.8411\nEpoch 20/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4172 - acc: 0.8189Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4156 - acc: 0.8210 - val_loss: 0.4189 - val_acc: 0.8280\nEpoch 21/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4175 - acc: 0.8109Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.4154 - acc: 0.8103 - val_loss: 0.4313 - val_acc: 0.8168\nEpoch 22/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3959 - acc: 0.8252Epoch 1/100\n20/20 [==============================] - 5s 228ms/step - loss: 0.3981 - acc: 0.8222 - val_loss: 0.4049 - val_acc: 0.8262\nEpoch 23/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3996 - acc: 0.8256Epoch 1/100\n20/20 [==============================] - 5s 267ms/step - loss: 0.3975 - acc: 0.8269 - val_loss: 0.3840 - val_acc: 0.8262\nEpoch 24/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4211 - acc: 0.8056Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.4211 - acc: 0.8044 - val_loss: 0.4118 - val_acc: 0.8355\nEpoch 25/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4072 - acc: 0.8214Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4052 - acc: 0.8217 - val_loss: 0.4073 - val_acc: 0.8449\nEpoch 26/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4118 - acc: 0.8154Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4159 - acc: 0.8148 - val_loss: 0.4348 - val_acc: 0.8150\nEpoch 27/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3986 - acc: 0.8167Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3945 - acc: 0.8197 - val_loss: 0.3903 - val_acc: 0.8393\nEpoch 28/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3901 - acc: 0.8194Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.3925 - acc: 0.8178 - val_loss: 0.4567 - val_acc: 0.7944\nEpoch 29/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4353 - acc: 0.7927Epoch 1/100\n20/20 [==============================] - 5s 272ms/step - loss: 0.4331 - acc: 0.7960 - val_loss: 0.4521 - val_acc: 0.8056\nEpoch 30/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4008 - acc: 0.8081Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4000 - acc: 0.8095 - val_loss: 0.4085 - val_acc: 0.8318\nEpoch 31/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3754 - acc: 0.8356Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3726 - acc: 0.8356 - val_loss: 0.3688 - val_acc: 0.8411\nEpoch 32/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4398 - acc: 0.7868Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.4410 - acc: 0.7869 - val_loss: 0.4840 - val_acc: 0.7701\nEpoch 33/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4524 - acc: 0.7917Epoch 1/100\n20/20 [==============================] - 4s 219ms/step - loss: 0.4489 - acc: 0.7912 - val_loss: 0.4210 - val_acc: 0.8150\nEpoch 34/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3939 - acc: 0.8360Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.3915 - acc: 0.8360 - val_loss: 0.4305 - val_acc: 0.8280\nEpoch 35/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4134 - acc: 0.8052Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.4118 - acc: 0.8063 - val_loss: 0.4601 - val_acc: 0.7533\nEpoch 36/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4616 - acc: 0.7793Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.4588 - acc: 0.7806 - val_loss: 0.4448 - val_acc: 0.7645\nEpoch 37/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4202 - acc: 0.8065Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4208 - acc: 0.8060 - val_loss: 0.4570 - val_acc: 0.7888\nEpoch 38/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3866 - acc: 0.8327Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3888 - acc: 0.8321 - val_loss: 0.4371 - val_acc: 0.8206\nEpoch 39/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3748 - acc: 0.8310Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3759 - acc: 0.8309 - val_loss: 0.3980 - val_acc: 0.8299\nEpoch 40/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3570 - acc: 0.8565Epoch 1/100\n20/20 [==============================] - 5s 269ms/step - loss: 0.3570 - acc: 0.8562 - val_loss: 0.3936 - val_acc: 0.8486\nEpoch 41/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4783 - acc: 0.7801Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.4838 - acc: 0.7750 - val_loss: 0.4596 - val_acc: 0.7813\nEpoch 42/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4846 - acc: 0.7824Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.4814 - acc: 0.7831 - val_loss: 0.4644 - val_acc: 0.7514\nEpoch 43/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4268 - acc: 0.8064Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.4271 - acc: 0.8071 - val_loss: 0.3782 - val_acc: 0.8262\nEpoch 44/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3692 - acc: 0.8440Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3707 - acc: 0.8436 - val_loss: 0.3736 - val_acc: 0.8336\nEpoch 45/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3735 - acc: 0.8394Epoch 1/100\n20/20 [==============================] - 4s 225ms/step - loss: 0.3869 - acc: 0.8313 - val_loss: 0.4126 - val_acc: 0.8206\nEpoch 46/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4500 - acc: 0.7902Epoch 1/100\n20/20 [==============================] - 5s 273ms/step - loss: 0.4472 - acc: 0.7917 - val_loss: 0.4283 - val_acc: 0.8000\nEpoch 47/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4219 - acc: 0.8118Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.4240 - acc: 0.8099 - val_loss: 0.4115 - val_acc: 0.8000\nEpoch 48/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3909 - acc: 0.8302Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3919 - acc: 0.8293 - val_loss: 0.4043 - val_acc: 0.8187\nEpoch 49/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3846 - acc: 0.8269Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3805 - acc: 0.8309 - val_loss: 0.4043 - val_acc: 0.8374\nEpoch 50/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3626 - acc: 0.8394Epoch 1/100\n20/20 [==============================] - 5s 226ms/step - loss: 0.3578 - acc: 0.8412 - val_loss: 0.3912 - val_acc: 0.8467\nEpoch 51/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3598 - acc: 0.8431Epoch 1/100\n20/20 [==============================] - 5s 253ms/step - loss: 0.3580 - acc: 0.8448 - val_loss: 0.3718 - val_acc: 0.8449\nEpoch 52/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3590 - acc: 0.8423Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3577 - acc: 0.8440 - val_loss: 0.3738 - val_acc: 0.8467\nEpoch 53/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3218 - acc: 0.8661Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3277 - acc: 0.8622 - val_loss: 0.4390 - val_acc: 0.8280\nEpoch 54/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3910 - acc: 0.8239Epoch 1/100\n20/20 [==============================] - 5s 233ms/step - loss: 0.3908 - acc: 0.8246 - val_loss: 0.3763 - val_acc: 0.8467\nEpoch 55/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3714 - acc: 0.8490Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3703 - acc: 0.8490 - val_loss: 0.3717 - val_acc: 0.8411\nEpoch 56/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3380 - acc: 0.8506Epoch 1/100\n20/20 [==============================] - 4s 220ms/step - loss: 0.3372 - acc: 0.8515 - val_loss: 0.3743 - val_acc: 0.8355\nEpoch 57/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3764 - acc: 0.8377Epoch 1/100\n20/20 [==============================] - 5s 266ms/step - loss: 0.3744 - acc: 0.8392 - val_loss: 0.3713 - val_acc: 0.8411\nEpoch 58/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3609 - acc: 0.8419Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3569 - acc: 0.8436 - val_loss: 0.3525 - val_acc: 0.8430\nEpoch 59/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3423 - acc: 0.8556Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3410 - acc: 0.8570 - val_loss: 0.3696 - val_acc: 0.8393\nEpoch 60/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3359 - acc: 0.8631Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3329 - acc: 0.8650 - val_loss: 0.3652 - val_acc: 0.8393\nEpoch 61/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3612 - acc: 0.8520Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3597 - acc: 0.8516 - val_loss: 0.3556 - val_acc: 0.8430\nEpoch 62/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3424 - acc: 0.8527Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3399 - acc: 0.8522 - val_loss: 0.3780 - val_acc: 0.8280\nEpoch 63/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4139 - acc: 0.8106Epoch 1/100\n20/20 [==============================] - 6s 282ms/step - loss: 0.4126 - acc: 0.8127 - val_loss: 0.4319 - val_acc: 0.7963\nEpoch 64/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4132 - acc: 0.8223Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.4142 - acc: 0.8210 - val_loss: 0.3706 - val_acc: 0.8336\nEpoch 65/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3741 - acc: 0.8373Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3761 - acc: 0.8356 - val_loss: 0.3706 - val_acc: 0.8374\nEpoch 66/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3462 - acc: 0.8510Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3440 - acc: 0.8518 - val_loss: 0.3718 - val_acc: 0.8467\nEpoch 67/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4446 - acc: 0.7964Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.4446 - acc: 0.7968 - val_loss: 0.4625 - val_acc: 0.8168\nEpoch 68/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4037 - acc: 0.8252Epoch 1/100\n20/20 [==============================] - 5s 254ms/step - loss: 0.4115 - acc: 0.8242 - val_loss: 0.3747 - val_acc: 0.8374\nEpoch 69/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3994 - acc: 0.8177Epoch 1/100\n20/20 [==============================] - 5s 244ms/step - loss: 0.3990 - acc: 0.8166 - val_loss: 0.4239 - val_acc: 0.8318\nEpoch 70/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3583 - acc: 0.8523Epoch 1/100\n20/20 [==============================] - 5s 241ms/step - loss: 0.3591 - acc: 0.8503 - val_loss: 0.3842 - val_acc: 0.8561\nEpoch 71/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3406 - acc: 0.8577Epoch 1/100\n20/20 [==============================] - 5s 243ms/step - loss: 0.3363 - acc: 0.8606 - val_loss: 0.3513 - val_acc: 0.8336\nEpoch 72/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3820 - acc: 0.8377Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3817 - acc: 0.8368 - val_loss: 0.3416 - val_acc: 0.8486\nEpoch 73/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3468 - acc: 0.8599Epoch 1/100\n20/20 [==============================] - 4s 221ms/step - loss: 0.3479 - acc: 0.8578 - val_loss: 0.3125 - val_acc: 0.8542\nEpoch 74/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3402 - acc: 0.8515Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.3443 - acc: 0.8499 - val_loss: 0.3657 - val_acc: 0.8206\nEpoch 75/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3573 - acc: 0.8427Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3556 - acc: 0.8440 - val_loss: 0.3408 - val_acc: 0.8598\nEpoch 76/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3366 - acc: 0.8565Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3399 - acc: 0.8546 - val_loss: 0.3374 - val_acc: 0.8299\nEpoch 77/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3993 - acc: 0.8195Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.3971 - acc: 0.8227 - val_loss: 0.3875 - val_acc: 0.8262\nEpoch 78/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3477 - acc: 0.8536Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3462 - acc: 0.8538 - val_loss: 0.3327 - val_acc: 0.8579\nEpoch 79/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3479 - acc: 0.8561Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3473 - acc: 0.8558 - val_loss: 0.3486 - val_acc: 0.8523\nEpoch 80/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3331 - acc: 0.8590Epoch 1/100\n20/20 [==============================] - 6s 277ms/step - loss: 0.3359 - acc: 0.8574 - val_loss: 0.3395 - val_acc: 0.8654\nEpoch 81/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3223 - acc: 0.8619Epoch 1/100\n20/20 [==============================] - 5s 239ms/step - loss: 0.3207 - acc: 0.8622 - val_loss: 0.3413 - val_acc: 0.8636\nEpoch 82/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3634 - acc: 0.8336Epoch 1/100\n20/20 [==============================] - 5s 236ms/step - loss: 0.3650 - acc: 0.8337 - val_loss: 0.3407 - val_acc: 0.8579\nEpoch 83/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3392 - acc: 0.8598Epoch 1/100\n20/20 [==============================] - 5s 235ms/step - loss: 0.3371 - acc: 0.8614 - val_loss: 0.3509 - val_acc: 0.8486\nEpoch 84/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3285 - acc: 0.8598Epoch 1/100\n20/20 [==============================] - 4s 224ms/step - loss: 0.3310 - acc: 0.8594 - val_loss: 0.3616 - val_acc: 0.8561\nEpoch 85/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3531 - acc: 0.8469Epoch 1/100\n20/20 [==============================] - 5s 252ms/step - loss: 0.3466 - acc: 0.8507 - val_loss: 0.3191 - val_acc: 0.8673\nEpoch 86/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3135 - acc: 0.8694Epoch 1/100\n20/20 [==============================] - 5s 240ms/step - loss: 0.3174 - acc: 0.8673 - val_loss: 0.3529 - val_acc: 0.8692\nEpoch 87/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3106 - acc: 0.8734Epoch 1/100\n20/20 [==============================] - 5s 238ms/step - loss: 0.3115 - acc: 0.8719 - val_loss: 0.3322 - val_acc: 0.8636\nEpoch 88/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3133 - acc: 0.8732Epoch 1/100\n20/20 [==============================] - 5s 242ms/step - loss: 0.3121 - acc: 0.8725 - val_loss: 0.3474 - val_acc: 0.8598\nEpoch 89/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3348 - acc: 0.8528Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3310 - acc: 0.8539 - val_loss: 0.3605 - val_acc: 0.8636\nEpoch 90/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3424 - acc: 0.8481Epoch 1/100\n20/20 [==============================] - 4s 223ms/step - loss: 0.3481 - acc: 0.8451 - val_loss: 0.3921 - val_acc: 0.8280\nEpoch 91/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.4147 - acc: 0.8185Epoch 1/100\n20/20 [==============================] - 5s 265ms/step - loss: 0.4104 - acc: 0.8214 - val_loss: 0.3943 - val_acc: 0.8243\nEpoch 92/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3923 - acc: 0.8336Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3921 - acc: 0.8341 - val_loss: 0.3659 - val_acc: 0.8280\nEpoch 93/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3667 - acc: 0.8499Epoch 1/100\n20/20 [==============================] - 5s 234ms/step - loss: 0.3628 - acc: 0.8515 - val_loss: 0.3574 - val_acc: 0.8374\nEpoch 94/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3329 - acc: 0.8606Epoch 1/100\n20/20 [==============================] - 5s 250ms/step - loss: 0.3377 - acc: 0.8570 - val_loss: 0.3384 - val_acc: 0.8449\nEpoch 95/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3753 - acc: 0.8434Epoch 1/100\n20/20 [==============================] - 5s 232ms/step - loss: 0.3753 - acc: 0.8434 - val_loss: 0.3918 - val_acc: 0.8262\nEpoch 96/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3405 - acc: 0.8548Epoch 1/100\n20/20 [==============================] - 4s 222ms/step - loss: 0.3432 - acc: 0.8547 - val_loss: 0.3724 - val_acc: 0.8374\nEpoch 97/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3482 - acc: 0.8519Epoch 1/100\n20/20 [==============================] - 6s 278ms/step - loss: 0.3435 - acc: 0.8535 - val_loss: 0.3537 - val_acc: 0.8449\nEpoch 98/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3516 - acc: 0.8461Epoch 1/100\n20/20 [==============================] - 5s 237ms/step - loss: 0.3490 - acc: 0.8463 - val_loss: 0.3554 - val_acc: 0.8374\nEpoch 99/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3165 - acc: 0.8711Epoch 1/100\n20/20 [==============================] - 5s 227ms/step - loss: 0.3201 - acc: 0.8687 - val_loss: 0.3297 - val_acc: 0.8542\nEpoch 100/100\n19/20 [===========================>..] - ETA: 0s - loss: 0.3654 - acc: 0.8427Epoch 1/100\n20/20 [==============================] - 5s 246ms/step - loss: 0.3610 - acc: 0.8459 - val_loss: 0.3380 - val_acc: 0.8654\n"
],
[
"# Validation accuracy\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation accuracy comparison')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nfor lr in lr_to_test:\n val_acc = histories[lr].history['val_acc']\n epochs = range(1, len(val_acc)+1)\n plt.plot(epochs, val_acc, label=(\"%s, lr=%f\" % (opt.__name__, lr)))\nplt.legend(loc='lower right')\n\n# Validation loss\n\nplt.figure(figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.title('Validation loss comparison')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nfor lr in lr_to_test:\n val_loss = histories[lr].history['val_loss']\n epochs = range(1, len(val_loss)+1)\n plt.plot(epochs, val_loss, label=(\"%s, lr=%f\" % (opt.__name__, lr)))\nplt.legend(loc='upper right');",
"_____no_output_____"
]
],
[
[
"**Result**: 1e-5 is definitely a bad choice, as the network converges very slowly. Interestingly, 1e-4 produces better results than 1e-3.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ecabc181d8a0f5637b900ed942a1cefbbabab0 | 28,838 | ipynb | Jupyter Notebook | Tutorial-ADMBSSN_tofrom_4metric.ipynb | rhaas80/nrpytutorial | 4398cd6b5a071c8fb8b2b584a01f07a4591dd5f4 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-ADMBSSN_tofrom_4metric.ipynb | rhaas80/nrpytutorial | 4398cd6b5a071c8fb8b2b584a01f07a4591dd5f4 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-ADMBSSN_tofrom_4metric.ipynb | rhaas80/nrpytutorial | 4398cd6b5a071c8fb8b2b584a01f07a4591dd5f4 | [
"BSD-2-Clause"
] | null | null | null | 41.138374 | 570 | 0.542132 | [
[
[
"# Converting between the 4-metric $g_{\\mu\\nu}$ and ADM variables $\\left\\{\\gamma_{ij}, \\alpha, \\beta^i\\right\\}$ or BSSN variables $\\left\\{h_{ij}, {\\rm cf}, \\alpha, {\\rm vet}^i\\right\\}$\n## Author: Zach Etienne\n\n[comment]: <> (Abstract: TODO)\n\n### We will often find it useful to convert between the 4-metric $g_{\\mu\\nu}$ and the ADM or BSSN variables. This notebook documents the NRPy+ Python module [`BSSN.ADMBSSN_tofrom_4metric`](../edit/BSSN/ADMBSSN_tofrom_4metric.py), which provides that functionality.\n\n**Notebook Status:** <font color='orange'><b> Self-validated, some additional tests performed </b></font>\n\n**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). In addition, the construction of $g_{\\mu\\nu}$ and $g^{\\mu\\nu}$ from BSSN variables has passed the test $g^{\\mu\\nu}g_{\\mu\\nu}=4$ [below](#validationcontraction). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**\n\n### NRPy+ Source Code for this module: [BSSN/ADMBSSN_tofrom_4metric.py](../edit/BSSN/ADMBSSN_tofrom_4metric.py)\n\n## Introduction:\n",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$ \n\nThis notebook is organized as follows\n\n1. [Step 1](#setup_ADM_quantities): `setup_ADM_quantities(inputvars)`: If `inputvars=\"ADM\"` declare ADM quantities $\\left\\{\\gamma_{ij},\\beta^i,\\alpha\\right\\}$; if `inputvars=\"ADM\"` define ADM quantities in terms of BSSN quantities\n1. [Step 2](#admbssn_to_fourmetric): Write 4-metric $g_{\\mu\\nu}$ and its inverse $g^{\\mu\\nu}$ in terms of ADM or BSSN quantities\n 1. [Step 2.a](#admbssn_to_fourmetric_lower): 4-metric $g_{\\mu\\nu}$ in terms of ADM or BSSN quantities\n 1. [Step 2.b](#admbssn_to_fourmetric_inv): 4-metric inverse $g^{\\mu\\nu}$ in terms of ADM or BSSN quantities\n 1. [Step 2.c](#validationcontraction): Validation check: Confirm $g_{\\mu\\nu}g^{\\mu\\nu}=4$\n1. [Step 3](#fourmetric_to_admbssn): Write ADM/BSSN metric quantities in terms of 4-metric $g_{\\mu\\nu}$ (Excludes extrinsic curvature $K_{ij}$ or the BSSN $\\bar{A}_{ij}$, $K$)\n 1. [Step 3.a](#adm_ito_fourmetric_validate): ADM in terms of 4-metric validation: Confirm $\\gamma_{ij}\\gamma^{ij}=3$\n 1. [Step 3.b](#bssn_ito_fourmetric_validate): BSSN in terms of 4-metric validation: Confirm $\\bar{\\gamma}_{ij}\\bar{\\gamma}^{ij}=3$\n1. [Step 4](#code_validation): Code Validation against `BSSN.ADMBSSN_tofrom_4metric` NRPy+ module\n1. [Step 5](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
],
[
"<a id='setup_ADM_quantities'></a>\n\n# Step 1: `setup_ADM_quantities(inputvars)`: If `inputvars=\"ADM\"` declare ADM quantities $\\left\\{\\gamma_{ij},\\beta^i,\\alpha\\right\\}$; if `inputvars=\"ADM\"` define ADM quantities in terms of BSSN quantities \\[Back to [top](#toc)\\]\n$$\\label{setup_ADM_quantities}$$",
"_____no_output_____"
]
],
[
[
"import sympy as sp\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport sys\n\ndef setup_ADM_quantities(inputvars):\n if inputvars == \"ADM\":\n gammaDD = ixp.declarerank2(\"gammaDD\", \"sym01\")\n betaU = ixp.declarerank1(\"betaU\")\n alpha = sp.symbols(\"alpha\", real=True)\n elif inputvars == \"BSSN\":\n import BSSN.ADM_in_terms_of_BSSN as AitoB\n\n # Construct gamma_{ij} in terms of cf & gammabar_{ij}\n AitoB.ADM_in_terms_of_BSSN()\n gammaDD = AitoB.gammaDD\n # Next construct beta^i in terms of vet^i and reference metric quantities\n import BSSN.BSSN_quantities as Bq\n\n Bq.BSSN_basic_tensors()\n betaU = Bq.betaU\n alpha = sp.symbols(\"alpha\", real=True)\n else:\n print(\"inputvars = \" + str(inputvars) + \" not supported. Please choose ADM or BSSN.\")\n sys.exit(1)\n return gammaDD,betaU,alpha",
"_____no_output_____"
]
],
[
[
"<a id='admbssn_to_fourmetric'></a>\n\n# Step 2: Write 4-metric $g_{\\mu\\nu}$ and its inverse $g^{\\mu\\nu}$ in terms of ADM or BSSN variables \\[Back to [top](#toc)\\]\n$$\\label{admbssn_to_fourmetric}$$\n\n<a id='admbssn_to_fourmetric_lower'></a>\n\n## Step 2.a: 4-metric $g_{\\mu\\nu}$ in terms of ADM or BSSN variables \\[Back to [top](#toc)\\]\n$$\\label{admbssn_to_fourmetric_lower}$$\n\nGiven ADM variables $\\left\\{\\gamma_{ij},\\beta^i,\\alpha \\right\\}$, which themselves may be written in terms of the rescaled BSSN curvilinear variables $\\left\\{h_{ij},{\\rm cf},\\mathcal{V}^i,\\alpha \\right\\}$ for our chosen reference metric via simple function calls to `ADM_in_terms_of_BSSN()` and `BSSN_quantities.BSSN_basic_tensors()`, we are to construct the 4-metric $g_{\\mu\\nu}$. \n\nWe accomplish this via Eq. 2.122 (which can be trivially derived from the ADM 3+1 line element) of Baumgarte & Shapiro's *Numerical Relativity* (henceforth B&S):\n$$\ng_{\\mu\\nu} = \\begin{pmatrix} \n-\\alpha^2 + \\beta^k \\beta_k & \\beta_i \\\\\n\\beta_j & \\gamma_{ij}\n\\end{pmatrix},\n$$\nwhere the shift vector $\\beta^i$ is lowered via (Eq. 2.121):\n\n$$\\beta_k = \\gamma_{ik} \\beta^i.$$",
"_____no_output_____"
]
],
[
[
"def g4DD_ito_BSSN_or_ADM(inputvars):\n # Step 0: Declare g4DD as globals, to make interfacing with other modules/functions easier\n global g4DD\n\n # Step 1: Check that inputvars is set to a supported value\n gammaDD,betaU,alpha = setup_ADM_quantities(inputvars)\n\n # Step 2: Compute g4DD = g_{mu nu}:\n # To get \\gamma_{\\mu \\nu} = gamma4DD[mu][nu], we'll need to construct the 4-metric, using Eq. 2.122 in B&S:\n g4DD = ixp.zerorank2(DIM=4)\n\n # Step 2.a: Compute beta_i via Eq. 2.121 in B&S\n betaD = ixp.zerorank1()\n for i in range(3):\n for j in range(3):\n betaD[i] += gammaDD[i][j] * betaU[j]\n\n # Step 2.b: Compute beta_i beta^i, the beta contraction.\n beta2 = sp.sympify(0)\n for i in range(3):\n beta2 += betaU[i] * betaD[i]\n\n # Step 2.c: Construct g4DD via Eq. 2.122 in B&S\n g4DD[0][0] = -alpha ** 2 + beta2\n for mu in range(1, 4):\n g4DD[mu][0] = g4DD[0][mu] = betaD[mu - 1]\n for mu in range(1, 4):\n for nu in range(1, 4):\n g4DD[mu][nu] = gammaDD[mu - 1][nu - 1]",
"_____no_output_____"
]
],
[
[
"<a id='admbssn_to_fourmetric_inv'></a>\n\n## Step 2.b: Inverse 4-metric $g^{\\mu\\nu}$ in terms of ADM or BSSN variables \\[Back to [top](#toc)\\]\n$$\\label{admbssn_to_fourmetric_inv}$$ \n\nB&S also provide a convenient form for the inverse 4-metric (Eq. 2.119; also Eq. 4.49 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf)):\n$$\ng^{\\mu\\nu} = \\gamma^{\\mu\\nu} - n^\\mu n^\\nu = \n\\begin{pmatrix} \n-\\frac{1}{\\alpha^2} & \\frac{\\beta^i}{\\alpha^2} \\\\\n\\frac{\\beta^i}{\\alpha^2} & \\gamma^{ij} - \\frac{\\beta^i\\beta^j}{\\alpha^2}\n\\end{pmatrix},\n$$\nwhere the unit normal vector to the hypersurface is given by $n^{\\mu} = \\left(\\alpha^{-1},-\\beta^i/\\alpha\\right)$.",
"_____no_output_____"
]
],
[
[
"def g4UU_ito_BSSN_or_ADM(inputvars):\n # Step 0: Declare g4UU as globals, to make interfacing with other modules/functions easier\n global g4UU\n\n # Step 1: Check that inputvars is set to a supported value\n gammaDD,betaU,alpha = setup_ADM_quantities(inputvars)\n\n # Step 2: Compute g4UU = g_{mu nu}:\n # To get \\gamma^{\\mu \\nu} = gamma4UU[mu][nu], we'll need to use Eq. 2.119 in B&S.\n g4UU = ixp.zerorank2(DIM=4)\n\n # Step 3: Construct g4UU = g^{mu nu}\n # Step 3.a: Compute gammaUU based on provided gammaDD:\n gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n\n # Then evaluate g4UU:\n g4UU = ixp.zerorank2(DIM=4)\n\n g4UU[0][0] = -1 / alpha**2\n for mu in range(1,4):\n g4UU[0][mu] = g4UU[mu][0] = betaU[mu-1]/alpha**2\n for mu in range(1,4):\n for nu in range(1,4):\n g4UU[mu][nu] = gammaUU[mu-1][nu-1] - betaU[mu-1]*betaU[nu-1]/alpha**2",
"_____no_output_____"
]
],
[
[
"<a id='validationcontraction'></a>\n\n## Step 2.c: Validation check: Confirm $g_{\\mu\\nu}g^{\\mu\\nu}=4$ \\[Back to [top](#toc)\\]\n$$\\label{validationcontraction}$$ \n\nNext we compute $g^{\\mu\\nu} g_{\\mu\\nu}$ as a validation check. It should equal 4:",
"_____no_output_____"
]
],
[
[
"g4DD_ito_BSSN_or_ADM(\"BSSN\")\ng4UU_ito_BSSN_or_ADM(\"BSSN\")\nsum = 0\nfor mu in range(4):\n for nu in range(4):\n sum += g4DD[mu][nu]*g4UU[mu][nu]\nif sp.simplify(sum) == sp.sympify(4):\n print(\"TEST PASSED!\")\nelse:\n print(\"TEST FAILED: \"+str(sum)+\" does not apparently equal 4.\")\n sys.exit(1)",
"TEST PASSED!\n"
]
],
[
[
"<a id='fourmetric_to_admbssn'></a>\n\n# Step 3: Write ADM/BSSN metric quantities in terms of 4-metric $g_{\\mu\\nu}$ (Excludes extrinsic curvature $K_{ij}$, the BSSN $a_{ij}$, $K$, and $\\lambda^i$) \\[Back to [top](#toc)\\]\n$$\\label{fourmetric_to_admbssn}$$ \n\nGiven $g_{\\mu\\nu}$, we now compute ADM/BSSN metric quantities, excluding extrinsic curvature. \n\nLet's start by computing the ADM quantities in terms of the 4-metric $g_{\\mu\\nu}$\n\nRecall that\n$$\ng_{\\mu\\nu} = \\begin{pmatrix} \n-\\alpha^2 + \\beta^k \\beta_k & \\beta_i \\\\\n\\beta_j & \\gamma_{ij}\n\\end{pmatrix}.\n$$\n\nFrom this equation we immediately obtain $\\gamma_{ij}$. However we need $\\beta^i$ and $\\alpha$. After computing the inverse of $\\gamma_{ij}$, $\\gamma^{ij}$, we raise $\\beta_j$ via $\\beta^i=\\gamma^{ij} \\beta_j$ and then compute $\\alpha$ via $\\alpha = \\sqrt{\\beta^k \\beta_k - g_{00}}$. To convert to BSSN variables $\\left\\{h_{ij},{\\rm cf},\\mathcal{V}^i,\\alpha \\right\\}$, we need only convert from ADM via function calls to [`BSSN.BSSN_in_terms_of_ADM`](../edit/BSSN/BSSN_in_terms_of_ADM.py) ([**tutorial**](Tutorial-BSSN_in_terms_of_ADM.ipynb)).",
"_____no_output_____"
]
],
[
[
"def BSSN_or_ADM_ito_g4DD(inputvars):\n # Step 0: Declare output variables as globals, to make interfacing with other modules/functions easier\n if inputvars == \"ADM\":\n global gammaDD,betaU,alpha\n elif inputvars == \"BSSN\":\n global hDD,cf,vetU,alpha\n else:\n print(\"inputvars = \" + str(inputvars) + \" not supported. Please choose ADM or BSSN.\")\n sys.exit(1)\n\n # Step 1: declare g4DD as symmetric rank-4 tensor:\n g4DD = ixp.declarerank2(\"g4DD\",\"sym01\",DIM=4)\n\n # Step 2: Compute gammaDD & betaD\n betaD = ixp.zerorank1()\n gammaDD = ixp.zerorank2()\n for i in range(3):\n betaD[i] = g4DD[0][i]\n for j in range(3):\n gammaDD[i][j] = g4DD[i+1][j+1]\n\n # Step 3: Compute betaU\n # Step 3.a: Compute gammaUU based on provided gammaDD\n gammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n\n # Step 3.b: Use gammaUU to raise betaU\n betaU = ixp.zerorank1()\n for i in range(3):\n for j in range(3):\n betaU[i] += gammaUU[i][j]*betaD[j]\n\n # Step 4: Compute alpha = sqrt(beta^2 - g_{00}):\n # Step 4.a: Compute beta^2 = beta^k beta_k:\n beta_squared = sp.sympify(0)\n for k in range(3):\n beta_squared += betaU[k]*betaD[k]\n\n # Step 4.b: alpha = sqrt(beta^2 - g_{00}):\n alpha = sp.sqrt(sp.simplify(beta_squared) - g4DD[0][0])\n\n # Step 5: If inputvars == \"ADM\", we are finished. Return.\n if inputvars == \"ADM\":\n return\n\n # Step 6: If inputvars == \"BSSN\", convert ADM to BSSN & return hDD, cf,\n import BSSN.BSSN_in_terms_of_ADM as BitoA\n dummyBU = ixp.zerorank1()\n BitoA.gammabarDD_hDD( gammaDD)\n BitoA.cf_from_gammaDD(gammaDD)\n BitoA.betU_vetU( betaU,dummyBU)\n hDD = BitoA.hDD\n cf = BitoA.cf\n vetU = BitoA.vetU",
"_____no_output_____"
]
],
[
[
"<a id='adm_ito_fourmetric_validate'></a>\n\n## Step 3.a: ADM in terms of 4-metric validation: Confirm $\\gamma_{ij}\\gamma^{ij}=3$ \\[Back to [top](#toc)\\]\n$$\\label{adm_ito_fourmetric_validate}$$\n\nNext we compute $\\gamma^{ij} \\gamma_{ij}$ as a validation check. It should equal 3:",
"_____no_output_____"
]
],
[
[
"BSSN_or_ADM_ito_g4DD(\"ADM\")\ngammaUU, gammaDET = ixp.symm_matrix_inverter3x3(gammaDD)\n\nsum = sp.sympify(0)\nfor i in range(3):\n for j in range(3):\n sum += gammaDD[i][j]*gammaUU[i][j]\nif sp.simplify(sum) == sp.sympify(3):\n print(\"TEST PASSED!\")\nelse:\n print(\"TEST FAILED: \"+str(sum)+\" does not apparently equal 3.\")\n sys.exit(1)",
"TEST PASSED!\n"
]
],
[
[
"<a id='bssn_ito_fourmetric_validate'></a>\n\n## Step 3.b: BSSN in terms of 4-metric validation: Confirm $\\bar{\\gamma}_{ij}\\bar{\\gamma}^{ij}=3$ \\[Back to [top](#toc)\\]\n$$\\label{bssn_ito_fourmetric_validate}$$\n\nNext we compute $\\bar{\\gamma}_{ij}\\bar{\\gamma}^{ij}$ as a validation check. It should equal 3:",
"_____no_output_____"
]
],
[
[
"import reference_metric as rfm\npar.set_parval_from_str(\"reference_metric::CoordSystem\",\"SinhCylindrical\")\nrfm.reference_metric()\n\nBSSN_or_ADM_ito_g4DD(\"BSSN\")\ngammabarDD = ixp.zerorank2()\nfor i in range(3):\n for j in range(3):\n # gammabar_{ij} = h_{ij}*ReDD[i][j] + gammahat_{ij}\n gammabarDD[i][j] = hDD[i][j] * rfm.ReDD[i][j] + rfm.ghatDD[i][j]\n\ngammabarUU, gammabarDET = ixp.symm_matrix_inverter3x3(gammabarDD)\n\nsum = sp.sympify(0)\nfor i in range(3):\n for j in range(3):\n sum += gammabarDD[i][j]*gammabarUU[i][j]\nif sp.simplify(sum) == sp.sympify(3):\n print(\"TEST PASSED!\")\nelse:\n print(\"TEST FAILED: \"+str(sum)+\" does not apparently equal 3.\")\n sys.exit(1)",
"TEST PASSED!\n"
]
],
[
[
"<a id='code_validation'></a>\n\n## Step 4: Code Validation against `BSSN.ADMBSSN_tofrom_4metric` NRPy+ module \\[Back to [top](#toc)\\]\n$$\\label{code_validation}$$\n\nHere, as a code validation check, we verify agreement in the SymPy expressions for BrillLindquist initial data between\n1. this tutorial and \n2. the NRPy+ [BSSN.ADMBSSN_tofrom_4metric](../edit/BSSN/ADMBSSN_tofrom_4metric.py) module.\n\nBy default, we analyze these expressions in SinhCylindrical coordinates, though other coordinate systems may be chosen.",
"_____no_output_____"
]
],
[
[
"par.set_parval_from_str(\"reference_metric::CoordSystem\",\"SinhCylindrical\")\nrfm.reference_metric()\n\nimport BSSN.ADMBSSN_tofrom_4metric as AB4m\nfor inputvars in [\"BSSN\",\"ADM\"]:\n g4DD_ito_BSSN_or_ADM(inputvars)\n AB4m.g4DD_ito_BSSN_or_ADM(inputvars)\n for i in range(4):\n for j in range(4):\n print(inputvars+\" input: g4DD[\"+str(i)+\"][\"+str(j)+\"] - g4DD_mod[\"+str(i)+\"][\"\n +str(j)+\"] = \"+str(g4DD[i][j]-AB4m.g4DD[i][j]))\n\n g4UU_ito_BSSN_or_ADM(inputvars)\n AB4m.g4UU_ito_BSSN_or_ADM(inputvars)\n for i in range(4):\n for j in range(4):\n print(inputvars+\" input: g4UU[\"+str(i)+\"][\"+str(j)+\"] - g4UU_mod[\"+str(i)+\"][\"\n +str(j)+\"] = \"+str(g4UU[i][j]-AB4m.g4UU[i][j]))\n\nBSSN_or_ADM_ito_g4DD(\"BSSN\")\nAB4m.BSSN_or_ADM_ito_g4DD(\"BSSN\")\nprint(\"BSSN QUANTITIES (ito 4-metric g4DD)\")\nprint(\"cf - mod_cf = \" + str(cf - AB4m.cf))\nprint(\"alpha - mod_alpha = \" + str(alpha - AB4m.alpha))\nfor i in range(3):\n print(\"vetU[\"+str(i)+\"] - mod_vetU[\"+str(i)+\"] = \" + str(vetU[i] - AB4m.vetU[i]))\n for j in range(3):\n print(\"hDD[\"+str(i)+\"][\"+str(j)+\"] - mod_hDD[\"+str(i)+\"][\"+str(j)+\"] = \"\n + str(hDD[i][j] - AB4m.hDD[i][j]))\n\nBSSN_or_ADM_ito_g4DD(\"ADM\")\nAB4m.BSSN_or_ADM_ito_g4DD(\"ADM\")\nprint(\"ADM QUANTITIES (ito 4-metric g4DD)\")\nprint(\"alpha - mod_alpha = \" + str(alpha - AB4m.alpha))\nfor i in range(3):\n print(\"betaU[\"+str(i)+\"] - mod_betaU[\"+str(i)+\"] = \" + str(betaU[i] - AB4m.betaU[i]))\n for j in range(3):\n print(\"gammaDD[\"+str(i)+\"][\"+str(j)+\"] - mod_gammaDD[\"+str(i)+\"][\"+str(j)+\"] = \"\n + str(gammaDD[i][j] - AB4m.gammaDD[i][j]))",
"BSSN input: g4DD[0][0] - g4DD_mod[0][0] = 0\nBSSN input: g4DD[0][1] - g4DD_mod[0][1] = 0\nBSSN input: g4DD[0][2] - g4DD_mod[0][2] = 0\nBSSN input: g4DD[0][3] - g4DD_mod[0][3] = 0\nBSSN input: g4DD[1][0] - g4DD_mod[1][0] = 0\nBSSN input: g4DD[1][1] - g4DD_mod[1][1] = 0\nBSSN input: g4DD[1][2] - g4DD_mod[1][2] = 0\nBSSN input: g4DD[1][3] - g4DD_mod[1][3] = 0\nBSSN input: g4DD[2][0] - g4DD_mod[2][0] = 0\nBSSN input: g4DD[2][1] - g4DD_mod[2][1] = 0\nBSSN input: g4DD[2][2] - g4DD_mod[2][2] = 0\nBSSN input: g4DD[2][3] - g4DD_mod[2][3] = 0\nBSSN input: g4DD[3][0] - g4DD_mod[3][0] = 0\nBSSN input: g4DD[3][1] - g4DD_mod[3][1] = 0\nBSSN input: g4DD[3][2] - g4DD_mod[3][2] = 0\nBSSN input: g4DD[3][3] - g4DD_mod[3][3] = 0\nBSSN input: g4UU[0][0] - g4UU_mod[0][0] = 0\nBSSN input: g4UU[0][1] - g4UU_mod[0][1] = 0\nBSSN input: g4UU[0][2] - g4UU_mod[0][2] = 0\nBSSN input: g4UU[0][3] - g4UU_mod[0][3] = 0\nBSSN input: g4UU[1][0] - g4UU_mod[1][0] = 0\nBSSN input: g4UU[1][1] - g4UU_mod[1][1] = 0\nBSSN input: g4UU[1][2] - g4UU_mod[1][2] = 0\nBSSN input: g4UU[1][3] - g4UU_mod[1][3] = 0\nBSSN input: g4UU[2][0] - g4UU_mod[2][0] = 0\nBSSN input: g4UU[2][1] - g4UU_mod[2][1] = 0\nBSSN input: g4UU[2][2] - g4UU_mod[2][2] = 0\nBSSN input: g4UU[2][3] - g4UU_mod[2][3] = 0\nBSSN input: g4UU[3][0] - g4UU_mod[3][0] = 0\nBSSN input: g4UU[3][1] - g4UU_mod[3][1] = 0\nBSSN input: g4UU[3][2] - g4UU_mod[3][2] = 0\nBSSN input: g4UU[3][3] - g4UU_mod[3][3] = 0\nADM input: g4DD[0][0] - g4DD_mod[0][0] = 0\nADM input: g4DD[0][1] - g4DD_mod[0][1] = 0\nADM input: g4DD[0][2] - g4DD_mod[0][2] = 0\nADM input: g4DD[0][3] - g4DD_mod[0][3] = 0\nADM input: g4DD[1][0] - g4DD_mod[1][0] = 0\nADM input: g4DD[1][1] - g4DD_mod[1][1] = 0\nADM input: g4DD[1][2] - g4DD_mod[1][2] = 0\nADM input: g4DD[1][3] - g4DD_mod[1][3] = 0\nADM input: g4DD[2][0] - g4DD_mod[2][0] = 0\nADM input: g4DD[2][1] - g4DD_mod[2][1] = 0\nADM input: g4DD[2][2] - g4DD_mod[2][2] = 0\nADM input: g4DD[2][3] - g4DD_mod[2][3] = 0\nADM input: g4DD[3][0] - g4DD_mod[3][0] = 0\nADM input: g4DD[3][1] - g4DD_mod[3][1] = 0\nADM input: g4DD[3][2] - g4DD_mod[3][2] = 0\nADM input: g4DD[3][3] - g4DD_mod[3][3] = 0\nADM input: g4UU[0][0] - g4UU_mod[0][0] = 0\nADM input: g4UU[0][1] - g4UU_mod[0][1] = 0\nADM input: g4UU[0][2] - g4UU_mod[0][2] = 0\nADM input: g4UU[0][3] - g4UU_mod[0][3] = 0\nADM input: g4UU[1][0] - g4UU_mod[1][0] = 0\nADM input: g4UU[1][1] - g4UU_mod[1][1] = 0\nADM input: g4UU[1][2] - g4UU_mod[1][2] = 0\nADM input: g4UU[1][3] - g4UU_mod[1][3] = 0\nADM input: g4UU[2][0] - g4UU_mod[2][0] = 0\nADM input: g4UU[2][1] - g4UU_mod[2][1] = 0\nADM input: g4UU[2][2] - g4UU_mod[2][2] = 0\nADM input: g4UU[2][3] - g4UU_mod[2][3] = 0\nADM input: g4UU[3][0] - g4UU_mod[3][0] = 0\nADM input: g4UU[3][1] - g4UU_mod[3][1] = 0\nADM input: g4UU[3][2] - g4UU_mod[3][2] = 0\nADM input: g4UU[3][3] - g4UU_mod[3][3] = 0\nBSSN QUANTITIES (ito 4-metric g4DD)\ncf - mod_cf = 0\nalpha - mod_alpha = 0\nvetU[0] - mod_vetU[0] = 0\nhDD[0][0] - mod_hDD[0][0] = 0\nhDD[0][1] - mod_hDD[0][1] = 0\nhDD[0][2] - mod_hDD[0][2] = 0\nvetU[1] - mod_vetU[1] = 0\nhDD[1][0] - mod_hDD[1][0] = 0\nhDD[1][1] - mod_hDD[1][1] = 0\nhDD[1][2] - mod_hDD[1][2] = 0\nvetU[2] - mod_vetU[2] = 0\nhDD[2][0] - mod_hDD[2][0] = 0\nhDD[2][1] - mod_hDD[2][1] = 0\nhDD[2][2] - mod_hDD[2][2] = 0\nADM QUANTITIES (ito 4-metric g4DD)\nalpha - mod_alpha = 0\nbetaU[0] - mod_betaU[0] = 0\ngammaDD[0][0] - mod_gammaDD[0][0] = 0\ngammaDD[0][1] - mod_gammaDD[0][1] = 0\ngammaDD[0][2] - mod_gammaDD[0][2] = 0\nbetaU[1] - mod_betaU[1] = 0\ngammaDD[1][0] - mod_gammaDD[1][0] = 0\ngammaDD[1][1] - mod_gammaDD[1][1] = 0\ngammaDD[1][2] - mod_gammaDD[1][2] = 0\nbetaU[2] - mod_betaU[2] = 0\ngammaDD[2][0] - mod_gammaDD[2][0] = 0\ngammaDD[2][1] - mod_gammaDD[2][1] = 0\ngammaDD[2][2] - mod_gammaDD[2][2] = 0\n"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 4: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ADMBSSN_tofrom_4metric.pdf](Tutorial-ADMBSSN_tofrom_4metric.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface\ncmd.output_Jupyter_notebook_to_LaTeXed_PDF(\"Tutorial-ADMBSSN_tofrom_4metric\")",
"Created Tutorial-ADMBSSN_tofrom_4metric.tex, and compiled LaTeX file to PDF\n file Tutorial-ADMBSSN_tofrom_4metric.pdf\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ecb73ca81ff2d6d75b3219f1633647f7c951d5 | 340,505 | ipynb | Jupyter Notebook | Tensorflow/06_SpeedTestWebApp.ipynb | marabout2015/AKSDeploymentTutorial | 8914ac1886405a8a3194e8ad2f9aafe58d19708f | [
"MIT"
] | 1 | 2020-08-06T10:37:49.000Z | 2020-08-06T10:37:49.000Z | Tensorflow/06_SpeedTestWebApp.ipynb | marabout2015/AKSDeploymentTutorial | 8914ac1886405a8a3194e8ad2f9aafe58d19708f | [
"MIT"
] | null | null | null | Tensorflow/06_SpeedTestWebApp.ipynb | marabout2015/AKSDeploymentTutorial | 8914ac1886405a8a3194e8ad2f9aafe58d19708f | [
"MIT"
] | null | null | null | 799.307512 | 164,636 | 0.941692 | [
[
[
"### Load Test deployed web application\nThis notebook pulls some images and tests them against the deployed web application. We submit requests asychronously which should reduce the contribution of latency.",
"_____no_output_____"
]
],
[
[
"import asyncio\nimport json\nimport random\nimport urllib.request\nfrom timeit import default_timer\n\nimport aiohttp\nimport matplotlib.pyplot as plt\nimport testing_utilities\nfrom tqdm import tqdm\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"We will test our deployed service with 100 calls. We will only have 4 requests concurrently at any time. We have only deployed one pod on one node and increasing the number of concurrent calls does not really increase throughput. Feel free to try different values and see how the service responds.",
"_____no_output_____"
]
],
[
[
"NUMBER_OF_REQUESTS = 100 # Total number of requests\nCONCURRENT_REQUESTS = 4 # Number of requests at a time",
"_____no_output_____"
]
],
[
[
"Get the IP address of our service",
"_____no_output_____"
]
],
[
[
"service_json = !kubectl get service azure-dl -o json\nservice_dict = json.loads(''.join(service_json))\napp_url = service_dict['status']['loadBalancer']['ingress'][0]['ip']",
"_____no_output_____"
],
[
"scoring_url = 'http://{}/score'.format(app_url)\nversion_url = 'http://{}/version'.format(app_url)",
"_____no_output_____"
],
[
"!curl $version_url # Reports the Tensorflow Version",
"1.4.1"
],
[
"IMAGEURL = \"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg\"\nplt.imshow(testing_utilities.to_img(IMAGEURL))",
"_____no_output_____"
],
[
"def gen_variations_of_one_image(num, label='image'):\n out_images = []\n img = testing_utilities.to_img(IMAGEURL).convert('RGB')\n # Flip the colours for one-pixel\n # \"Different Image\"\n for i in range(num):\n diff_img = img.copy()\n rndm_pixel_x_y = (random.randint(0, diff_img.size[0]-1), \n random.randint(0, diff_img.size[1]-1))\n current_color = diff_img.getpixel(rndm_pixel_x_y)\n diff_img.putpixel(rndm_pixel_x_y, current_color[::-1])\n b64img = testing_utilities.to_base64(diff_img)\n out_images.append(json.dumps({'input':{label:'\\\"{0}\\\"'.format(b64img)}}))\n return out_images",
"_____no_output_____"
],
[
"url_list = [[scoring_url, jsonimg] for jsonimg in gen_variations_of_one_image(NUMBER_OF_REQUESTS)]",
"_____no_output_____"
],
[
"def decode(result):\n return json.loads(result.decode(\"utf-8\"))",
"_____no_output_____"
],
[
"async def fetch(url, session, data, headers):\n start_time = default_timer()\n async with session.request('post', url, data=data, headers=headers) as response:\n resp = await response.read()\n elapsed = default_timer() - start_time\n return resp, elapsed",
"_____no_output_____"
],
[
"async def bound_fetch(sem, url, session, data, headers):\n # Getter function with semaphore.\n async with sem:\n return await fetch(url, session, data, headers)",
"_____no_output_____"
],
[
"async def await_with_progress(coros):\n results=[]\n for f in tqdm(asyncio.as_completed(coros), total=len(coros)):\n result = await f\n results.append((decode(result[0]),result[1]))\n return results",
"_____no_output_____"
],
[
"async def run(url_list, num_concurrent=CONCURRENT_REQUESTS):\n headers = {'content-type': 'application/json'}\n tasks = []\n # create instance of Semaphore\n sem = asyncio.Semaphore(num_concurrent)\n\n # Create client session that will ensure we dont open new connection\n # per each request.\n async with aiohttp.ClientSession() as session:\n for url, data in url_list:\n # pass Semaphore and session to every POST request\n task = asyncio.ensure_future(bound_fetch(sem, url, session, data, headers))\n tasks.append(task)\n return await await_with_progress(tasks)",
"_____no_output_____"
]
],
[
[
"Below we run the 100 requests against our deployed service",
"_____no_output_____"
]
],
[
[
"loop = asyncio.get_event_loop()\nstart_time = default_timer()\ncomplete_responses = loop.run_until_complete(asyncio.ensure_future(run(url_list, num_concurrent=CONCURRENT_REQUESTS)))\nelapsed = default_timer() - start_time\nprint('Total Elapsed {}'.format(elapsed))\nprint('Avg time taken {0:4.2f} ms'.format(1000*elapsed/len(url_list)))",
"100%|██████████| 100/100 [00:05<00:00, 19.73it/s]"
]
],
[
[
"Below we can see the output of some of our calls",
"_____no_output_____"
]
],
[
[
"complete_responses[:3]",
"_____no_output_____"
],
[
"num_succesful=[i[0]['result'][0]['image'][0][0] for i in complete_responses].count('n02127052 lynx, catamount')\nprint('Succesful {} out of {}'.format(num_succesful, len(url_list)))",
"Succesful 100 out of 100\n"
],
[
"# Example response\nplt.imshow(testing_utilities.to_img(IMAGEURL))\ncomplete_responses[0]",
"_____no_output_____"
]
],
[
[
"To tear down the cluster and all related resources go to the [deploy on AKS notebook](04_DeployOnAKS.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0ecd37489f01d270fc9ba374c1197702ff92833 | 114,423 | ipynb | Jupyter Notebook | notebooks/37_uncertainty_sampling_rgz.ipynb | chengsoonong/crowdastro | ce14432c36de0574b73d813304365b74446a61f8 | [
"MIT"
] | 13 | 2015-11-07T15:24:44.000Z | 2020-04-20T05:29:58.000Z | notebooks/37_uncertainty_sampling_rgz.ipynb | chengsoonong/crowdastro | ce14432c36de0574b73d813304365b74446a61f8 | [
"MIT"
] | 234 | 2016-02-21T23:53:16.000Z | 2019-06-27T00:26:08.000Z | notebooks/37_uncertainty_sampling_rgz.ipynb | chengsoonong/crowdastro | ce14432c36de0574b73d813304365b74446a61f8 | [
"MIT"
] | 3 | 2015-11-07T00:20:09.000Z | 2018-10-03T13:37:15.000Z | 258.291196 | 99,358 | 0.900457 | [
[
[
"# Uncertainty Sampling on the Radio Galaxy Zoo",
"_____no_output_____"
]
],
[
[
"import sys\n\nimport h5py, numpy, sklearn.neighbors\nfrom astropy.coordinates import SkyCoord\nimport matplotlib.pyplot as plt\n\nsys.path.insert(1, '..')\nimport crowdastro.train, crowdastro.test\n\nTRAINING_H5_PATH = '../training.h5'\nCROWDASTRO_H5_PATH = '../crowdastro.h5'\nNORRIS_DAT_PATH = '../data/norris_2006_atlas_classifications_ra_dec_only.dat'\nCLASSIFIER_OUT_PATH = '../classifier.pkl'\nASTRO_TRANSFORMER_OUT_PATH = '../astro_transformer.pkl'\nIMAGE_TRANSFORMER_OUT_PATH = '../image_transformer.pkl'\nIMAGE_SIZE = 200 * 200\nARCMIN = 1 / 60\nN_JOBS = 8\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# Load labels.\nwith h5py.File(TRAINING_H5_PATH, 'r') as training_h5:\n crowdsourced_labels = training_h5['labels'].value\n\nwith h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:\n ir_names = crowdastro_h5['/wise/cdfs/string'].value\n ir_positions = crowdastro_h5['/wise/cdfs/numeric'].value[:, :2]\nir_tree = sklearn.neighbors.KDTree(ir_positions)\n\nwith open(NORRIS_DAT_PATH, 'r') as norris_dat:\n norris_coords = [r.strip().split('|') for r in norris_dat]\n\nnorris_labels = numpy.zeros((len(ir_positions)))\nfor ra, dec in norris_coords:\n # Find a neighbour.\n skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))\n ra = skycoord.ra.degree\n dec = skycoord.dec.degree\n ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])\n if dist < 0.1:\n norris_labels[ir] = 1",
"_____no_output_____"
],
[
"def softmax(x):\n exp = numpy.exp(x - numpy.max(x))\n out = exp / exp.sum()\n return out\n\ndef train_and_test(hidden_atlas_training_indices):\n \"\"\"\n hidden_atlas_training_indices: ATLAS indices to hide.\n \"\"\"\n with h5py.File(TRAINING_H5_PATH, 'r') as training_h5, h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:\n n_static = 5 if training_h5.attrs['ir_survey'] == 'wise' else 6\n train_indices = training_h5['is_ir_train'].value\n atlas_train_indices = training_h5['is_atlas_train'].value\n \n # Remove all IR objects near hidden ATLAS objects.\n for atlas_index in hidden_atlas_training_indices:\n ir = crowdastro_h5['/atlas/cdfs/numeric'][atlas_index, n_static + IMAGE_SIZE:]\n nearby = (ir < ARCMIN).nonzero()[0]\n for ir_index in nearby:\n train_indices[ir_index] = 0\n n_ir = train_indices.sum()\n \n # We can now proceed as usual with training/testing.\n\n outputs = training_h5['labels'].value[train_indices]\n n = len(outputs)\n\n astro_inputs = numpy.minimum(\n training_h5['features'][train_indices, :n_static], 1500)\n image_inputs = training_h5['features'].value[train_indices, n_static:]\n\n astro_transformer = sklearn.pipeline.Pipeline([\n ('normalise', sklearn.preprocessing.Normalizer()),\n ('scale', sklearn.preprocessing.StandardScaler()),\n ])\n image_transformer = sklearn.pipeline.Pipeline([\n ('normalise', sklearn.preprocessing.Normalizer()),\n ])\n\n features = []\n features.append(astro_transformer.fit_transform(astro_inputs))\n features.append(image_transformer.fit_transform(image_inputs))\n inputs = numpy.hstack(features)\n\n classifier = sklearn.linear_model.LogisticRegression(\n class_weight='balanced', n_jobs=N_JOBS)\n classifier.fit(inputs, outputs)\n\n # Test the classifier.\n test_indices = training_h5['is_atlas_test'].value\n numeric_subjects = crowdastro_h5['/atlas/cdfs/numeric'][test_indices, :]\n\n n_norris_agree = 0\n n_crowdsourced_agree = 0\n n_all_agree = 0\n n_either_agree = 0\n n_no_host = 0\n n_total = 0\n for subject in numeric_subjects:\n swire = subject[2 + IMAGE_SIZE:]\n nearby = swire < ARCMIN\n astro_inputs = numpy.minimum(training_h5['features'][nearby, :n_static],\n 1500)\n image_inputs = training_h5['features'][nearby, n_static:]\n\n features = []\n features.append(astro_transformer.transform(astro_inputs))\n features.append(image_transformer.transform(image_inputs))\n inputs = numpy.hstack(features)\n\n crowdsourced_outputs = crowdsourced_labels[nearby]\n norris_outputs = norris_labels[nearby]\n\n if sum(crowdsourced_outputs) < 1 or sum(norris_outputs) < 1:\n # No hosts!\n n_no_host += 1\n continue\n\n selection = classifier.predict_proba(inputs)[:, 1].argmax()\n n_norris_agree += norris_outputs[selection]\n n_crowdsourced_agree += crowdsourced_outputs[selection]\n n_all_agree += norris_outputs[selection] * crowdsourced_outputs[selection]\n n_either_agree += norris_outputs[selection] or crowdsourced_outputs[selection]\n n_total += 1\n \n # Compute the uncertainties of the pool.\n pool_indices = training_h5['is_atlas_train'].value\n numeric_subjects = crowdastro_h5['/atlas/cdfs/numeric'][pool_indices, :]\n uncertainties = []\n\n for subject in numeric_subjects:\n swire = subject[2 + IMAGE_SIZE:]\n nearby = swire < ARCMIN\n astro_inputs = numpy.minimum(training_h5['features'][nearby, :n_static],\n 1500)\n image_inputs = training_h5['features'][nearby, n_static:]\n\n features = []\n features.append(astro_transformer.transform(astro_inputs))\n features.append(image_transformer.transform(image_inputs))\n inputs = numpy.hstack(features)\n\n probs = softmax(classifier.predict_proba(inputs)[:, 1])\n entropy = -numpy.sum(numpy.log(probs) * probs)\n uncertainties.append(entropy)\n\n return (n_norris_agree / n_total, n_crowdsourced_agree / n_total,\n n_all_agree / n_total, n_either_agree / n_total, uncertainties, n_ir)",
"_____no_output_____"
],
[
"# Randomly hide 90% of labels.\nwith h5py.File(TRAINING_H5_PATH, 'r') as training_h5:\n atlas_train_indices = training_h5['is_atlas_train'].value\n initial_hidden_atlas_training_indices = numpy.arange(atlas_train_indices.sum())\n numpy.random.shuffle(initial_hidden_atlas_training_indices)\n initial_hidden_atlas_training_indices = initial_hidden_atlas_training_indices[\n :9 * len(initial_hidden_atlas_training_indices) // 10]\n initial_hidden_atlas_training_indices.sort()",
"_____no_output_____"
],
[
"# Testing random label selection.\n\nnorris_accuracies_random = []\nrgz_accuracies_random = []\nall_accuracies_random = []\nany_accuracies_random = []\nn_ir_random = []\nn_batch = 100\nn_epochs = 25\n\nnumpy.random.seed(0)\nhidden_atlas_training_indices = initial_hidden_atlas_training_indices[:]\n\nfor epoch in range(n_epochs):\n print('Epoch {}/{}'.format(epoch + 1, n_epochs))\n\n # Train, test, and generate uncertainties.\n results = train_and_test(hidden_atlas_training_indices)\n\n norris_accuracies_random.append(results[0])\n rgz_accuracies_random.append(results[1])\n all_accuracies_random.append(results[2])\n any_accuracies_random.append(results[3])\n n_ir_random.append(results[5])\n\n # Choose n_batch new labels at random.\n if len(hidden_atlas_training_indices) < n_batch:\n break\n else:\n numpy.random.shuffle(hidden_atlas_training_indices)\n hidden_atlas_training_indices = hidden_atlas_training_indices[:-n_batch]\n hidden_atlas_training_indices.sort()",
"Epoch 1/25\nEpoch 2/25\nEpoch 3/25\nEpoch 4/25\nEpoch 5/25\nEpoch 6/25\nEpoch 7/25\nEpoch 8/25\nEpoch 9/25\nEpoch 10/25\nEpoch 11/25\nEpoch 12/25\nEpoch 13/25\nEpoch 14/25\nEpoch 15/25\nEpoch 16/25\nEpoch 17/25\n"
],
[
"# Testing uncertainty sampling label selection.\n\nnorris_accuracies_uncsample = []\nrgz_accuracies_uncsample = []\nall_accuracies_uncsample = []\nany_accuracies_uncsample = []\nn_ir_uncsample = []\n\nhidden_atlas_training_indices = initial_hidden_atlas_training_indices[:]\n\nfor epoch in range(n_epochs):\n print('Epoch {}/{}'.format(epoch + 1, n_epochs))\n\n # Train, test, and generate uncertainties.\n results = train_and_test(hidden_atlas_training_indices)\n uncertainties = results[4]\n\n norris_accuracies_uncsample.append(results[0])\n rgz_accuracies_uncsample.append(results[1])\n all_accuracies_uncsample.append(results[2])\n any_accuracies_uncsample.append(results[3])\n n_ir_uncsample.append(results[5])\n\n # Choose the n_batch most uncertain objects to label.\n if len(hidden_atlas_training_indices) < n_batch:\n break\n else:\n hidden_atlas_training_indices = numpy.array(\n sorted(hidden_atlas_training_indices, key=lambda z: uncertainties[z]))[:-n_batch]\n hidden_atlas_training_indices.sort()",
"Epoch 1/25\nEpoch 2/25\nEpoch 3/25\nEpoch 4/25\nEpoch 5/25\nEpoch 6/25\nEpoch 7/25\nEpoch 8/25\nEpoch 9/25\nEpoch 10/25\nEpoch 11/25\nEpoch 12/25\nEpoch 13/25\nEpoch 14/25\nEpoch 15/25\nEpoch 16/25\nEpoch 17/25\n"
],
[
"plt.figure(figsize=(15, 10))\n\nplt.subplot(2, 2, 1)\n\nplt.plot(all_accuracies_random, c='pink')\nplt.plot(any_accuracies_random, c='darkred')\n\nplt.plot(all_accuracies_uncsample, c='lightgreen')\nplt.plot(any_accuracies_uncsample, c='darkgreen')\n\nplt.xlabel('{}-batch epochs'.format(n_batch))\nplt.ylabel('Classification accuracy')\n\nplt.legend(['Norris & RGZ (passive)', 'Norris | RGZ (passive)',\n 'Norris & RGZ (unc)', 'Norris | RGZ (unc)'], loc='lower right')\n\nplt.subplot(2, 2, 2)\n\nplt.plot(norris_accuracies_random, c='red')\nplt.plot(norris_accuracies_uncsample, c='green')\n\nplt.legend(['Norris (passive)', 'Norris (unc)'], loc='lower right')\n\nplt.xlabel('{}-batch epochs'.format(n_batch))\nplt.ylabel('Classification accuracy')\n\nplt.subplot(2, 2, 3)\n\nplt.plot(rgz_accuracies_random, c='red')\nplt.plot(rgz_accuracies_uncsample, c='green')\n\nplt.legend(['RGZ (passive)', 'RGZ (unc)'], loc='lower right')\n\nplt.xlabel('{}-batch epochs'.format(n_batch))\nplt.ylabel('Classification accuracy')\n\nplt.subplot(2, 2, 4)\n\nplt.plot(numpy.array(n_ir_random) - numpy.array(n_ir_uncsample))\n\nplt.xlabel('{}-batch epochs'.format(n_batch))\nplt.ylabel('Difference in number of IR examples')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Conclusion: Uncertainty sampling with entropy doesn't work very well.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0ecd88221fce85a8d082bc44861d94e4b5c03a2 | 486,441 | ipynb | Jupyter Notebook | Copy_of_Style_Transfer_PyTorch.ipynb | lasyaistla/Ai.fellowshiplasya | d718b7abab530917adadc6e44d84e9e546bd8e46 | [
"Apache-2.0"
] | null | null | null | Copy_of_Style_Transfer_PyTorch.ipynb | lasyaistla/Ai.fellowshiplasya | d718b7abab530917adadc6e44d84e9e546bd8e46 | [
"Apache-2.0"
] | null | null | null | Copy_of_Style_Transfer_PyTorch.ipynb | lasyaistla/Ai.fellowshiplasya | d718b7abab530917adadc6e44d84e9e546bd8e46 | [
"Apache-2.0"
] | null | null | null | 511.504732 | 123,362 | 0.935645 | [
[
[
"<a href=\"https://colab.research.google.com/github/lasyaistla/Ai.fellowship/blob/main/Copy_of_Style_Transfer_PyTorch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Please complete the missing parts in the code below. Moreover, please correct the mistakes in the code if the performance is not satisfactory.",
"_____no_output_____"
],
[
"# Implementation of Neural Style Transfer with PyTorch",
"_____no_output_____"
]
],
[
[
"# importing libraries to implement style-transfer\n\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nimport copy",
"_____no_output_____"
],
[
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"_____no_output_____"
],
[
"# desired size of the output image\nimsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu\n\nloader = transforms.Compose([\n transforms.Resize(imsize), # scale imported image\n transforms.ToTensor()]) # transform it into a torch tensor\n\n\ndef image_loader(image_name):\n image = Image.open(image_name)\n # fake batch dimension required to fit network's input dimensions\n image = loader(image).unsqueeze(0)\n return image.to(device, torch.float)\n\n\nstyle_img = image_loader(\"/dancing.jpg\")\ncontent_img = image_loader(\"/picasso.jpg\")\n\nassert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"",
"_____no_output_____"
],
[
"unloader = transforms.ToPILImage() # reconvert into PIL image\n\nplt.ion()\n\ndef imshow(tensor, title=None):\n image = tensor.cpu().clone() # we clone the tensor to not do changes on it\n image = image.squeeze(0) # remove the fake batch dimension\n image = unloader(image)\n plt.imshow(image)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\nplt.figure()\nimshow(style_img, title='Style Image')\n\nplt.figure()\nimshow(content_img, title='Content Image')",
"_____no_output_____"
],
[
"class ContentLoss(nn.Module):\n\n def __init__(self, target,):\n super(ContentLoss, self).__init__()\n # we 'detach' the target content from the tree used\n # to dynamically compute the gradient: this is a stated value,\n # not a variable. Otherwise the forward method of the criterion\n # will throw an error.\n self.target = target.detach()\n\n def forward(self, input):\n self.loss = F.mse_loss(input, self.target)\n return input",
"_____no_output_____"
],
[
"def gram_matrix(input):\n a, b, c, d = input.size() # a=batch size(=1)\n # b=number of feature maps\n # (c,d)=dimensions of a f. map (N=c*d)\n\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\n\n G = torch.mm(features, features.t()) # compute the gram product\n\n # we 'normalize' the values of the gram matrix\n # by dividing by the number of element in each feature maps.\n return G.div(a * b * c * d)",
"_____no_output_____"
],
[
"class StyleLoss(nn.Module):\n\n def __init__(self, target_feature):\n super(StyleLoss, self).__init__()\n self.target = gram_matrix(target_feature).detach()\n\n def forward(self, input):\n G = gram_matrix(input)\n self.loss = F.mse_loss(G, self.target)\n return input",
"_____no_output_____"
],
[
"# importing vgg-16 pre-trained model\ncnn = models.vgg19(pretrained=True).features.to(device).eval()",
"Downloading: \"https://download.pytorch.org/models/vgg19-dcbb9e9d.pth\" to /root/.cache/torch/hub/checkpoints/vgg19-dcbb9e9d.pth\n"
],
[
"cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)\ncnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)\n\n# create a module to normalize input image so we can easily put it in a\n# nn.Sequential\nclass Normalization(nn.Module):\n def __init__(self, mean, std):\n super(Normalization, self).__init__()\n # .view the mean and std to make them [C x 1 x 1] so that they can\n # directly work with image Tensor of shape [B x C x H x W].\n # B is batch size. C is number of channels. H is height and W is width.\n self.mean = torch.tensor(mean).view(-1, 1, 1)\n self.std = torch.tensor(std).view(-1, 1, 1)\n\n def forward(self, img):\n # normalize img\n return (img - self.mean) / self.std",
"_____no_output_____"
],
[
"# desired depth layers to compute style/content losses :\ncontent_layers_default = ['conv_4']\nstyle_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\ndef get_style_model_and_losses(cnn, normalization_mean, normalization_std,\n style_img, content_img,\n content_layers=content_layers_default,\n style_layers=style_layers_default):\n # normalization module\n normalization = Normalization(normalization_mean, normalization_std).to(device)\n\n # just in order to have an iterable access to or list of content/syle\n # losses\n content_losses = []\n style_losses = []\n\n # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential\n # to put in modules that are supposed to be activated sequentially\n model = nn.Sequential(normalization)\n\n i = 0 # increment every time we see a conv\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # The in-place version doesn't play very nicely with the ContentLoss\n # and StyleLoss we insert below. So we replace with out-of-place\n # ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'pool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n else:\n raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))\n\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).detach()\n content_loss = ContentLoss(target)\n model.add_module(\"content_loss_{}\".format(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).detach()\n style_loss = StyleLoss(target_feature)\n model.add_module(\"style_loss_{}\".format(i), style_loss)\n style_losses.append(style_loss)\n\n # now we trim off the layers after the last content and style losses\n for i in range(len(model) - 1, -1, -1):\n if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):\n break\n\n model = model[:(i + 1)]\n\n return model, style_losses, content_losses",
"_____no_output_____"
],
[
"input_img = content_img.clone()\n# if you want to use white noise instead uncomment the below line:\n# input_img = torch.randn(content_img.data.size(), device=device)\n\n# add the original input image to the figure:\nplt.figure()\nimshow(input_img, title='Input Image')",
"_____no_output_____"
],
[
"def get_input_optimizer(input_img):\n # this line to show that input is a parameter that requires a gradient\n optimizer = optim.LBFGS([input_img])\n return optimizer",
"_____no_output_____"
],
[
"def run_style_transfer(cnn, normalization_mean, normalization_std,\n content_img, style_img, input_img, num_steps=300,\n style_weight=1000000, content_weight=1):\n \"\"\"Run the style transfer.\"\"\"\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n normalization_mean, normalization_std, style_img, content_img)\n\n # We want to optimize the input and not the model parameters so we\n # update all the requires_grad fields accordingly\n input_img.requires_grad_(True)\n model.requires_grad_(False)\n\n optimizer = get_input_optimizer(input_img)\n\n print('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n\n def closure():\n # correct the values of updated input image\n with torch.no_grad():\n input_img.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n print(\"run {}:\".format(run))\n print('Style Loss : {:4f} Content Loss: {:4f}'.format(\n style_score.item(), content_score.item()))\n print()\n\n return style_score + content_score\n\n optimizer.step(closure)\n\n # a last correction...\n with torch.no_grad():\n input_img.clamp_(0, 1)\n\n return input_img",
"_____no_output_____"
]
],
[
[
"### Show your output image",
"_____no_output_____"
]
],
[
[
"output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,\n content_img, style_img, input_img)\n\nplt.figure()\nimshow(output, title='Output Image')\n\n# sphinx_gallery_thumbnail_number = 4\nplt.ioff()\nplt.show()",
"Building the style transfer model..\nOptimizing..\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ecdcab1381e35d305b0531301e2202a0ae48f6 | 14,095 | ipynb | Jupyter Notebook | Assignment1.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | 1 | 2020-12-07T23:22:59.000Z | 2020-12-07T23:22:59.000Z | Assignment1.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | null | null | null | Assignment1.ipynb | rnbdsh/lstm_rnn | 5d181b382e8aa392ec949636fbe650b8fa9955c8 | [
"CC0-1.0"
] | null | null | null | 36.515544 | 138 | 0.505286 | [
[
[
"# Assignment 1: Numpy RNN\nImplement a RNN and run BPTT",
"_____no_output_____"
]
],
[
[
"from typing import Dict, Tuple\nimport numpy as np",
"_____no_output_____"
],
[
"class RNN(object):\n \"\"\"Numpy implementation of sequence-to-one recurrent neural network for regression tasks.\"\"\"\n \n def __init__(self, input_size: int, hidden_size: int, output_size: int):\n \"\"\"Initialization \n\n Parameters\n ----------\n input_size : int\n Number of input features per time step\n hidden_size : int\n Number of hidden units in the RNN\n output_size : int\n Number of output units.\n \"\"\"\n super(RNN, self).__init__()\n self.input_size = input_size # D in literature\n self.hidden_size = hidden_size # I in literature\n self.output_size = output_size # K in literature\n\n # create and initialize weights of the network\n # as 90% of the usages in the scriptum are W.T, R.T, V.T\n init = lambda shape: np.random.uniform(-0.2, 0.2, shape)\n self.W = init((hidden_size, input_size)) # I X D\n self.R = init((hidden_size, hidden_size)) # I x I\n self.bs = np.zeros((hidden_size))\n self.V = init((output_size, hidden_size)) # K x I\n self.by = np.zeros((output_size))\n\n # place holder to store intermediates for backprop\n self.a = None\n self.y_hat = None\n self.grads = None\n self.x = None\n\n \n def forward(self, x: np.ndarray) -> np.ndarray:\n \"\"\"Forward pass through the RNN.\n\n Parameters\n ----------\n x : np.ndarray\n Input sequence(s) of shape [sequence length, number of features]\n\n Returns\n -------\n NumPy array containing the network prediction for the input sample.\n \"\"\"\n self.x = x\n # as we have no activation function (f(t) is linear)\n # a(t) = f(s(t)) = s(t) = W^T . x(t) + R^T . a(t-1) + bs\n # = tanh( W^T . x(t) + R^T . a(t-1) + bs )\n self.a = np.zeros((self.input_size, self.hidden_size)) # to make accessing t = -1 possible\n for t in range(len(x)):\n self.a[t] = np.tanh(self.W @ x[t] + self.R @ self.a[t-1] + self.bs)\n self.y_hat = self.V @ self.a[t] + self.by\n return self.y_hat # sequence-to-1 model, so we only return the last \n\n \n def forward_fast(self, x: np.ndarray) -> np.ndarray:\n \"\"\" optimized method without saving to self.a \"\"\"\n a = np.tanh(self.W @ x[0] + self.bs)\n for t in range(1, len(x)):\n a = np.tanh(self.W @ x[t] + self.R @ a + self.bs)\n return self.V @ a + self.by\n\n \n def backward(self, d_loss: np.ndarray) -> Dict:\n \"\"\"Calculate the backward pass through the RNN.\n \n Parameters\n ----------\n d_loss : np.ndarray\n The gradient of the loss w.r.t the network output in the shape [output_size,]\n\n Returns\n -------\n Dictionary containing the gradients for each network weight as key-value pair.\n \"\"\"\n # create view, so that we don't have to reshape every time we call it\n a = self.a.reshape(self.a.shape[0], 1, self.a.shape[1])\n x = self.x.reshape(self.x.shape[0], 1, self.x.shape[1])\n \n # needs to be calculated only once\n d_V = d_loss @ a[-1]\n d_by = d_loss\n \n # init with 0 and sum it up\n d_W = np.zeros_like(self.W)\n d_R = np.zeros_like(self.R)\n d_bs = np.zeros_like(self.bs)\n \n # instead of using * diag, we use elementwise multiplication\n delta = d_loss.T @ self.V * (1 - a[-1] ** 2)\n \n for t in reversed(range(self.input_size)):\n d_bs += delta.reshape(self.bs.shape)\n d_W += delta.T @ x[t]\n if t > 0:\n d_R += delta.T @ a[t-1]\n # a[t] = tanh(..) -> derivation = 1-tanh² -> reuse already calculated tanh\n # calculate delta for the next step at t-1\n delta = delta @ self.R * (1 - a[t-1] ** 2)\n \n self.grads = {'W': d_W, 'R': d_R, 'V': d_V, 'bs': d_bs, 'by': d_by}\n return self.grads\n \n \n def update(self, lr: float):\n # update weights, aggregation is already done in backward\n w = self.get_weights()\n for name in w.keys():\n w[name] -= lr * self.grads[name]\n \n # reset internal class attributes\n self.grads = {}\n self.y_hat, self.a = None, None\n \n def get_weights(self) -> Dict:\n return {'W': self.W, 'R': self.R, 'V': self.V, 'bs': self.bs, 'by': self.by}\n \n def set_weights(self, weights: Dict):\n if not all(name in weights.keys() for name in ['W', 'R', 'V']):\n raise ValueError(\"Missing one of 'W', 'R', 'V' keys in the weight dictionary\")\n for name, w in weights.items():\n self.__dir__[\"name\"] = w",
"_____no_output_____"
]
],
[
[
"<h2 style=\"color:rgb(0,120,170)\">Numerical gradient check</h2>\n\nTo validate your implementation, especially the backward pass, use the two-sided gradient approximation given by the equation below.",
"_____no_output_____"
]
],
[
[
"def get_numerical_gradient(model: RNN, x: np.ndarray, eps: float=1e-7) -> Dict:\n \"\"\"Implementation of the two-sided numerical gradient approximation\n \n Parameters\n ----------\n model : RNN\n The RNN model object\n x : np.ndarray\n Input sequence(s) of shape [sequence length, number of features]\n eps : float\n The epsilon used for numerical gradient approximation\n \n Returns\n -------\n A dictionary containing the numerical gradients for each weight of the RNN. Make sure\n to name the dictionary keys like the names of the RNN gradients dictionary (e.g. \n 'd_R' for the weight 'R')\n \"\"\"\n g = {}\n # iterate all weight-matrices w and all positions i, and calculate the num. grad.\n for name, w in model.get_weights().items():\n # initialize weight gradients with zero\n wg = np.zeros_like(w)\n # this makes a backup copy of original weights\n for i, orig in np.ndenumerate(w): # can be 1d or 2d\n # caculate for +eps\n w[i] += eps\n plus = model.forward_fast(x)\n\n # calculate for -eps\n w[i] = orig - eps\n minus = model.forward_fast(x)\n\n w[i] = orig # reset\n # set weight gradient for this weight and this index\n wg[i] = np.sum(plus - minus) / (2*eps)\n # add calculated weights into return-weights\n g[name] = wg\n return g\n\n\ndef get_analytical_gradient(model: RNN, x: np.ndarray) -> Dict:\n \"\"\"Helper function to get the analytical gradient.\n \n Parameters\n ----------\n model : RNN\n The RNN model object\n x : np.ndarray\n Input sequence(s) of shape [sequence length, number of features]\n \n Returns\n -------\n A dictionary containing the analytical gradients for each weight of the RNN.\n \"\"\"\n loss = model.forward(x)\n return model.backward(np.ones((model.output_size, 1)))\n\n \ndef gradient_check(model: RNN, x: np.ndarray, treshold: float = 1e-7):\n \"\"\"Perform gradient checking.\n \n You don't have to do anything in this function.\n \n Parameters\n ----------\n model : RNN\n The RNN model object\n x : np.ndarray\n Input sequence(s) of shape [sequence length, number of features]\n eps : float\n The epsilon used for numerical gradient approximation \n \"\"\"\n numerical_grads = get_numerical_gradient(model, x)\n analytical_grads = get_analytical_gradient(model, x)\n \n for key, num_grad in numerical_grads.items():\n difference = np.linalg.norm(num_grad - analytical_grads[key])\n # assert num_grad.shape == analytical_grads[key].shape\n \n if difference < treshold:\n print(f\"Gradient check for {key} passed (difference {difference:.3e})\")\n else:\n print(f\"Gradient check for {key} failed (difference {difference:.3e})\")",
"_____no_output_____"
]
],
[
[
"<h2 style=\"color:rgb(0,120,170)\">Compare the time for gradient computation</h2>\nFinally, use the code below to investigate the benefit of being able to calculate the exact analytical gradient.",
"_____no_output_____"
]
],
[
[
"print(\"Gradient check with a single output neuron:\")\nmodel = RNN(input_size=5, hidden_size=10, output_size=1)\nx = np.random.rand(5, 5)\ngradient_check(model, x)\n\nprint(\"\\nGradient check with multiple output neurons:\")\nmodel = RNN(input_size=5, hidden_size=10, output_size=5)\nx = np.random.rand(5, 5)\ngradient_check(model, x)",
"Gradient check with a single output neuron:\nGradient check for W passed (difference 4.371e-10)\nGradient check for R passed (difference 4.748e-10)\nGradient check for V passed (difference 2.930e-11)\nGradient check for bs passed (difference 1.721e-10)\nGradient check for by passed (difference 5.939e-12)\n\nGradient check with multiple output neurons:\nGradient check for W passed (difference 5.764e-10)\nGradient check for R passed (difference 7.208e-10)\nGradient check for V passed (difference 1.329e-10)\nGradient check for bs passed (difference 2.681e-10)\nGradient check for by passed (difference 1.123e-10)\n"
],
[
"analytical_time = %timeit -o get_analytical_gradient(model, x)",
"126 µs ± 3.91 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
],
[
"numerical_time = %timeit -o get_numerical_gradient(model, x)",
"12.1 ms ± 328 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
],
[
"if analytical_time.average < numerical_time.average:\n fraction = numerical_time.average / analytical_time.average\n print(f\"The analytical gradient computation was {fraction:.0f} times faster\")\nelse:\n fraction = analytical_time.average / numerical_time.average\n print(f\"The numerical gradient computation was {fraction:.0f} times faster\")",
"The analytical gradient computation was 96 times faster\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d0eced785aa5fc8f1153c5ada9f43ceba6435e60 | 35,489 | ipynb | Jupyter Notebook | The_Basics_of_Supervised_Learning_For_Astronomers.ipynb | marixko/tutorial_classifiers | 3cd57a06edf7d4215bdad50c551eb2092a41d4f5 | [
"MIT"
] | 9 | 2019-04-24T03:24:51.000Z | 2019-10-25T18:49:40.000Z | The_Basics_of_Supervised_Learning_For_Astronomers.ipynb | marixko/tutorial_classifiers | 3cd57a06edf7d4215bdad50c551eb2092a41d4f5 | [
"MIT"
] | null | null | null | The_Basics_of_Supervised_Learning_For_Astronomers.ipynb | marixko/tutorial_classifiers | 3cd57a06edf7d4215bdad50c551eb2092a41d4f5 | [
"MIT"
] | 2 | 2019-04-24T20:44:40.000Z | 2021-01-29T18:45:48.000Z | 35.207341 | 426 | 0.522697 | [
[
[
"<a href=\"https://colab.research.google.com/github/marixko/Supervised_Learning_Tutorial/blob/master/The_Basics_of_Supervised_Learning_For_Astronomers.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"\n###**About Google's Colaboratory: **\n\nThis is a free Jupyter environment that runs in Google's cloud, which means you can run codes in your computer without having to install anything. You can create a copy of this tutorial in your own Google's Drive and make your own changes. Colaboratory also allows you to easily share your code with others! [Read more](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)\n\n\n\n---",
"_____no_output_____"
],
[
"# Introduction\n\n\n> **Author**: Lilianne M. I. Nakazono (email: [email protected]) \n\n> PhD student at Instituto de Astronomia, Geofísica e Ciências Atmosféricas -- Universidade de São Paulo (IAG-USP). Bachelor's degree in Statistics (IME-USP) and in Astronomy (IAG-USP). \n\n> **April 2019**\n\n---\n\n\n\n\n\n\n",
"_____no_output_____"
],
[
"\n\n###**What is Machine Learning?**\n\nFrom SAS: \n\n>> *\"Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention.\"*\n\n###**What is Supervised Learning?**#\n\nFrom S.B. Kotsiantis (2007): \n\n>> *\"Every instance in any dataset used by machine learning algorithms is represented using the same set of features. The features may be continuous, categorical or binary. If instances are given with known labels (the corresponding correct outputs) then the learning is called *supervised*, in contrast to *unsupervised learning*, where instances are unlabeled.\"*\n\n\n\n---\n\n\n###**STAR/GALAXY separation**#\n\nIn this tutorial we will perform a STAR/GALAXY separation using a real dataset from [S-PLUS](http://www.splus.iag.usp.br/). This data were already matched with [SDSS](https://www.sdss.org/) (DR15) spectroscopical data and it will be used to train and test the supervised classifiers. The final step (not included in this tutorial) is to use the trained model to predict the classification of your unknown objects.\n \n This tutorial will be entirely in Python 3 and we will go through the following topics:\n- Introduction to `Pandas` ([Documentation](https://pandas.pydata.org/))\n- Data visualization with `seaborn` ([Documentation](https://seaborn.pydata.org/))\n- Classification methods with `sklearn` ([Documentation](https://scikit-learn.org/stable/index.html))\n\n---\n\n",
"_____no_output_____"
],
[
"**Additional information about the data**\n\n\n\nID - Object ID Number\n\nRA - Right Ascension in decimal degrees [J2000]\n\nDec - Declination in decimal degrees [J2000]\n\nFWHM_n - Normalized Full width at half maximum to detection image seeing (pixels)\n\n A - Profile RMS along major axis (pixels)\n \nB - Profile RMS along minor axis (pixels)\n\nKrRadDet - Kron apertures in units of A or B (pixels)\n\nuJAVA_auto, F378_auto, F395_auto, F410_auto, F430_auto, g_auto, F515_auto, r_auto, F660_auto, i_auto, F861_auto, z_auto - Total-restricted magnitudes (AB) in corresponding filters\n\nclass - Spectroscopic classification from SDSS\n",
"_____no_output_____"
],
[
"#**1. Libraries and Functions**\n",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nfrom mlxtend.plotting import plot_decision_regions\nimport matplotlib as mpl \nimport matplotlib.gridspec as gridspec\nfrom sklearn import metrics\n\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)",
"_____no_output_____"
],
[
"# Modified from: https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()",
"_____no_output_____"
]
],
[
[
"#**2. Read Data**\n\nFor statistical/machine learning purposes it is **always** better to read the data in a dataframe (data structured in labels for rows and columns) format.\n",
"_____no_output_____"
]
],
[
[
"#Reading dataset from github and saving as dataframe\nurl = 'https://raw.githubusercontent.com/marixko/'\nfile = 'tutorial_classifiers/master/tutorial_data.txt'\ndf = pd.read_csv(url+file, delim_whitespace=True, low_memory=False)",
"_____no_output_____"
],
[
"# Run this cell to quickly check your dataset\ndf",
"_____no_output_____"
],
[
"# Check header\nlist(df)",
"_____no_output_____"
]
],
[
[
"#**3. Pre-analysis**",
"_____no_output_____"
],
[
"\n\nBefore applying any kind of analysis, you need to be aware of any problem in your dataset that can affect your training (e.g. missing values and outliers). Sometimes it will require pre-processing your dataset beforehand (e.g. for missing values, interpolating values or removing them from data may be necessary). ",
"_____no_output_____"
]
],
[
[
"# You can check your dataset by using describe(). \n# It will return the total count, mean, standard deviation,\n# minimum, Q1, Q2 (median), Q3 and maximum\n\ndf.describe()\n\n# If you want to check a specific feature use for instance:\n\n# df.FWHM_n.describe()",
"_____no_output_____"
]
],
[
[
"Another good practice is to check high correlations in your dataset, which can allow you to identify which features are redundant. Thus, you can also be able to reduce dimensionality of your dataset.\n\n\n>> *\"The fact that many features depend on one another often unduly influences the accuracy of supervised ML classification models. This problem can be addressed by construction new features from the basic feature set.\"* -- S.B. Kotsiantis (2007)\n\n(One way to deal with multicollinearity -- when 2 or more features are moderately or highly correlated -- is creating a new feature set using [Principal Component Analysis](https://en.wikipedia.org/wiki/Principal_component_analysis).)",
"_____no_output_____"
]
],
[
[
"plt.close()\nf, ax = plt.subplots(figsize=(8, 8))\nvar = ['FWHM_n', 'A', 'B', 'KrRadDet', 'uJAVA_auto', \n 'F378_auto', 'F395_auto', 'F410_auto', 'g_auto', 'F515_auto',\n 'r_auto', 'F660_auto', 'i_auto', 'F861_auto', 'z_auto']\ncorr = df[var].corr()\nsns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), \n cmap=sns.diverging_palette(220, 10, as_cmap=True),\n square=True, ax=ax, center=0, vmin=-1, vmax=1)\nplt.title('Correlation Matrix')\nplt.show()\n\n#It would also be interesting to check the correlation plot for each class",
"_____no_output_____"
]
],
[
[
"\n\nQualitative variables can also be included. In this case, however, there are no qualitative features that came from S-PLUS observations.\nBut let's check the classification label counts:",
"_____no_output_____"
]
],
[
[
"# For qualitative variables, use value_counts()\ndf['class'].value_counts()",
"_____no_output_____"
]
],
[
[
"Note that for this example the classes are balanced. It represents a best case scenario, which rarely happens in the real world. \n\nBe very careful with imbalanced datasets! Some methods and metrics are not good for imbalanced cases, some manipulation in your sampling method (e.g. over/under-sampling) or in your algorithm (e.g. penalized classification) may be necessary. \n\n\n",
"_____no_output_____"
],
[
"> **Note:** Supervised Learning is not suitable for problems like \"I want to find very rare objects that we have never found before!\". The learning process is based on your ground-truth samples, so you need to ask yourself \"Is my ground-truth sample representative of what I want to find?\"",
"_____no_output_____"
],
[
"#** 4. Feature Selection**",
"_____no_output_____"
],
[
"A very important step of the analysis is choosing your input features. Sometimes you already know which features you need to use to achieve your goals, which comes from your previous knowledge about the topic. However, you can also evaluate which features will give you the best performance. We will discuss more about it on the following sections. \n\nFor didactic purposes, let's consider two feature spaces:\n\n> `dim15` = {all useful information from the catalog}\n\n> `dim2` = {normalized FWHM, Profile RMS along major axis}",
"_____no_output_____"
]
],
[
[
"dim15 = ['FWHM_n', 'A', 'B', 'KrRadDet', 'uJAVA_auto', \n 'F378_auto', 'F395_auto', 'F410_auto', 'g_auto', 'F515_auto',\n 'r_auto', 'F660_auto', 'i_auto', 'F861_auto', 'z_auto']\n\ndim2 = ['FWHM_n','A']",
"_____no_output_____"
]
],
[
[
"#** 5. Sampling training and testing sets **",
"_____no_output_____"
],
[
"Regardless of the classification method you choose, you will want to estimate how accurately your predictive model will perform. This is called **cross-validation** and there are several ways to do it. Some examples are:\n\n* **Holdout method**: randomly separate your original dataset into the training and the testing set. It's very common to adopt 1:3 ratio for the size of test/training sets, although you can choose another ratio. Very simple and fast computationally, but you need to be cautious as it is a single run method. Thus, it may be subject to large variabilities\n\n* **Leave-p-out cross-validation**:\nUses p observations as the testing set and the remaining observations as the training set. Repeat to cover any sampling possibility\n\n* **k-fold cross-validation**: the original dataset is randomly partitioned into k equal sized subsamples. One subsample will be used as testing set and the other k-1 as training set. Repeat k times, until each subsample is used exactly once as the testing set.\n\n\nI strongly recommend that you also check the other methods before choosing one. For this tutorial we will use the **Holdout method**, for simplicity.",
"_____no_output_____"
]
],
[
[
"label = pd.DataFrame(df['class'])\n\n# Transform strings into numbered labels\nlabel.loc[label['class'] == 'STAR', 'class'] = 0\nlabel.loc[label['class'] == 'GALAXY', 'class'] = 1\n\n# Use train_test_split() to sample your training and testing sets\n# Let's fix a random_state=42 in order to have the same sets \n# on each run. Stratify parameter guarantees that the original \n# proportion of the classes is maintained \n\nX_train, X_test, y_train, y_test = train_test_split(df[dim15], label, \n test_size=0.3, \n random_state=42,\n stratify = label)",
"_____no_output_____"
]
],
[
[
"#** 6. Classification method: Support Vector Machine (SVM)**",
"_____no_output_____"
],
[
"We finally reached the point where we are going to run a classification algorithm. It is common to think, at first, that this would be the most complicated part, but a well-done job will require you to spend most of your time on the other steps. \n\nThere are several classification methods you can use, each of them has its own pros and cons, depending on your science goals and on your dataset. I will give you an example using Support Vector Machine (SVM) with linear kernel, but I recommend you to also check other methods (e.g. Random Forest, Logistic Regression, K-NN, ...)\n\n**DON'T FORGET TO:**\n \n - Learn the basic idea of the method. You don't need to know all the math behind it, but you need to know how it works intuitively\n - Check what are the assumptions of the method and if your dataset is in agreement with it\n - Learn what the parameters of your model (a.k.a. hyperparameters) do. Choosing them wisely can be crucial to have good results in the end. Note: the hyperparameters space can also be part of your validation tests",
"_____no_output_____"
],
[
"## 6.1. Basic idea",
"_____no_output_____"
],
[
"The SVM finds the hyperplane that best separates your data, based on maximizing the margin between each class. For instance, in one dimension SVM will find a point. For two dimensions, it will be a line. For three dimensions, it will be a plane.\n\nTo use a linear kernel, we assume that the data is linearly separable. Otherwise, we should use another kernel (e.g. polynomial).\n\n\nRead more about SVM [here](https://scikit-learn.org/stable/modules/svm.html#scores-probabilities)\n\n",
"_____no_output_____"
],
[
"## 6.2. Feature space: dim2",
"_____no_output_____"
]
],
[
[
"# Train your model:\nclf2 = SVC(kernel= 'linear')\nclf2.fit(X_train[dim2], y_train.values.ravel()) \n\n# Make the predictions: \ny_pred2 = clf2.predict(X_test[dim2])\n\n# Plot confusion matrix:\nmatrix = confusion_matrix(y_test['class'], y_pred2)\nfig = plot_confusion_matrix(matrix, classes=['STAR','GALAXY'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the confusion matrix above we can already see how good the results are: most of our stars (galaxies) are being assigned as stars (galaxies) and just a few percent were misclassified.\n\nNow let's check the plot and how the separation looks like:",
"_____no_output_____"
]
],
[
[
"plt.style.use('seaborn-pastel')\nfig = plt.figure(figsize=(18,6))\ngs = gridspec.GridSpec(1, 2)\nax = plt.subplot(gs[0,0])\nsns.scatterplot(x=X_train.FWHM_n, y=X_train.A, \n hue=y_train['class'])\n\n#Calculate margin (from https://scikit-learn.org/stable/auto_examples/svm/plot_svm_margin.html)\nw = clf2.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-5, 5)\nyy = a * xx - (clf2.intercept_[0]) / w[1]\nmargin = 1 / np.sqrt(np.sum(clf2.coef_ ** 2))\nyy_down = yy - np.sqrt(1 + a ** 2) * margin\nyy_up = yy + np.sqrt(1 + a ** 2) * margin\n\n#Plot margin\nplt.plot(xx, yy, 'k-')\nplt.plot(xx, yy_down, 'k--')\nplt.plot(xx, yy_up, 'k--')\nplt.xlabel('FWHM_n')\nplt.ylabel('A')\nplt.xlim(0,8)\nplt.ylim(0.8, 10)\n\nplt.title('Training set')\n\n\nax = plt.subplot(gs[0,1])\nsns.scatterplot(x=X_test.FWHM_n , y=X_test.A, hue=y_test['class'])\nplt.plot(xx, yy, 'k-')\nplt.plot(xx, yy_down, 'k--')\nplt.plot(xx, yy_up, 'k--')\nplt.xlim(0,8)\nplt.ylim(0.8, 10)\nplt.title('Testing set')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The solid line corresponds to the optimal threshold found by SVM. The dashed lines in the plots above correspond to the maximized margin that I mentioned in Section 6.1. \n\nThese are calculated using only a small part of the data: the objects around where the separation may occur, they are called the Support Vectors. Let's check which ones were considered for this classification:",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(9,7))\nsns.scatterplot(x=X_train[dim2].FWHM_n, y=X_train[dim2].A, \n hue=y_train['class'])\n\nplt.scatter(clf2.support_vectors_[:, 0], \nclf2.support_vectors_[:, 1], s=8, \nzorder=10,color='red', marker='+')\n\nplt.xlim(0.9,2)\nplt.ylim(0.8,5)\nplt.plot(xx, yy, 'k-')\nplt.plot(xx, yy_down, 'k--')\nplt.plot(xx, yy_up, 'k--')\nplt.title('Support vectors (Training set)')",
"_____no_output_____"
]
],
[
[
"## 6.3. Feature space: dim15",
"_____no_output_____"
],
[
"In the last section we saw how SVM works in a 2D space. In that case, it is possible to visually check the separation. However, we have much more information available. if we analyse them altogether, it can improve our results. Although, it is impossible to visually check the results, so we need to rely on performance metrics that we will discuss further on the next section. \n",
"_____no_output_____"
]
],
[
[
"# Train your model:\nclf15 = SVC(kernel= 'linear')\nclf15.fit(X_train, y_train.values.ravel())\n\n# Make predictions:\ny_pred = clf15.predict(X_test)\n\n# Plot confusion matrix:\nmatrix = confusion_matrix(y_test['class'], y_pred)\nfig = plot_confusion_matrix(matrix, classes=['STAR','GALAXY'])\nplt.show()\n\n# Yeah, as simple as that! :) ",
"_____no_output_____"
]
],
[
[
"#** 7. Validation and Model Selection**",
"_____no_output_____"
],
[
"How can we choose between two (or more) different models? \n\nFor that, we have several performance metrics that we can consider when selecting the best model and I will show a few of them.\n\nThe way you are going to analyze the metrics depends on your science goals. For instance: \n\n* In a STAR/GALAXY separation you are probably not interested in a specific class, but in the overall classification. You can evaluate your model using, for example, Accuracy or F-measure\n\n* Suppose you had a STAR/QSO problem instead, where your main goal is to find new QSOs. You can evaluate your model using, for example, Precision, Recall or F-measure. \n\n\n",
"_____no_output_____"
],
[
"## 7.1 Accuracy",
"_____no_output_____"
],
[
"Defined as the fraction of correct predictions.\n\n(Note: accuracy will be biased towards the class with higher frequency, don't rely on this measurement if you have an imbalanced dataset)",
"_____no_output_____"
]
],
[
[
"print(\"Accuracy\")\nprint(\" First model (dim2):\", \n np.round(100*metrics.accuracy_score(y_test, y_pred2),2), '%')\nprint(\" Second model (dim15):\", \n np.round(100*metrics.accuracy_score(y_test, y_pred),2), '%')",
"_____no_output_____"
]
],
[
[
"## 7.2. Precision",
"_____no_output_____"
],
[
"Defined as:\n\n> Precision $\\equiv \\frac{TP}{(TP+FP)}$\n\nTP - True Positive ; FP - False Positive\n\nNote that you need to define which class will be your \"positive\". For example:\n\n \n| STAR (predicted) | GALAXY (predicted)\n--- | ---\n**STAR** (true label) | True Negative | False Positive\n**GALAXY** (true label)| False Negative | True Positive\n\n\nIn Astronomy, it's called **purity**.",
"_____no_output_____"
]
],
[
[
"P2 = metrics.precision_score(y_test, y_pred2, pos_label=1)\nP = metrics.precision_score(y_test, y_pred, pos_label=1)\n\nprint(\"Galaxy Precision\")\nprint(\" First model (dim2):\", np.round(100*P2,2), '%')\nprint(\" Second model (dim15):\", np.round(100*P,2), '%')\n\n# Exercise: Calculate star precision for each model",
"_____no_output_____"
]
],
[
[
"## 7.3. Recall",
"_____no_output_____"
],
[
"Defined as:\n\n> Recall $\\equiv \\frac{TP}{(TP+FN)}$\n\nTP - True Positive ; FN - False Negative\n\nIn Astronomy, it's called **completeness**.",
"_____no_output_____"
]
],
[
[
"R2 = metrics.recall_score(y_test, y_pred2, pos_label=1)\nR = metrics.recall_score(y_test, y_pred, pos_label=1)\n\n\nprint(\"Galaxy Recall\")\nprint(\" First model (dim2):\", np.round(100*R2,2), '%')\nprint(\" Second model (dim15):\", np.round(100*R,2), '%')\n\n# Exercise: Calculate star recall for each model",
"_____no_output_____"
]
],
[
[
"## 7.4. F-measure",
"_____no_output_____"
],
[
"It's the harmonic mean of Precision and Recall:\n\n$F = \\frac{1}{2}\\Big(P_i^{-1}+R_i^{-1}\\Big)^{-1} = 2 \\times \\frac{P_iR_i}{P_i+R_i}, F \\in [0,1]$\n\n",
"_____no_output_____"
]
],
[
[
"print(\"F-measure\")\nprint(\" First model (dim2):\", np.round(metrics.f1_score(y_test, y_pred2),3))\nprint(\" Second model (dim15):\", np.round(metrics.f1_score(y_test, y_pred),3))",
"_____no_output_____"
]
],
[
[
"## Final message",
"_____no_output_____"
],
[
"We came to the end of this tutorial, yay! :)\n\nAlthough it is called \"Machine Learning\", you are still the one who is going to make crucial decisions. And that is hard work! I hope I was able to give you at least a brief idea of all the steps involved in the process. \n\nNow, play around with the code:\n* Try other algorithms with the same feature selection and compare your results using the performance metrics\n* Test changing the parameters of your model\n* Try it with your own dataset!\n\n\n\n\n\n",
"_____no_output_____"
],
[
"## Read more:\n\n[Supervised Machine Learning: A Review of Classification Techniques](https://books.google.com/books?hl=en&lr=&id=vLiTXDHr_sYC&oi=fnd&pg=PA3&dq=review+supervised+learning&ots=CYpwxt2Bnn&sig=Y79PK3w3Q8CefKaTh03keRFEwyg#v=onepage&q=review%20supervised%20learning&f=false) (S.B. Kotsiantis, 2007)\n\nAn Empirical Comparison of Supervised Learning Algorithms Rich (Rich Caruana and Alexandru Niculescu-Mizil, 2006)\n\nClassification of Imbalanced Data: a Review (Yanmin Sun, Andrew K. C. Wong and Mohamed S. Kamel, 2009)\n\n\n[Cross-validation](https://en.wikipedia.org/wiki/Cross-validation_(statistics)\n\n [A Practical Guide to Support Vector Classification](https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf) (Chih-Wei Hsu, Chih-Chung Chang, and Chih-Jen Lin, 2016)\n\n\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d0ecf445bece6998befc231609a37dda4fe11e8a | 412,093 | ipynb | Jupyter Notebook | notebooks/Regrid.ipynb | kallisons/kaggle_ds4g_emissions_factor | 0ac88939fee2db8e940870751270fe2dad650449 | [
"MIT"
] | null | null | null | notebooks/Regrid.ipynb | kallisons/kaggle_ds4g_emissions_factor | 0ac88939fee2db8e940870751270fe2dad650449 | [
"MIT"
] | null | null | null | notebooks/Regrid.ipynb | kallisons/kaggle_ds4g_emissions_factor | 0ac88939fee2db8e940870751270fe2dad650449 | [
"MIT"
] | null | null | null | 326.022943 | 44,472 | 0.928769 | [
[
[
"import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport xesmf as xe\nimport json\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs",
"_____no_output_____"
],
[
"def make_regridder(ds, ds_base, variable, algorithm='bilinear'): \n if 'latitude' in ds[variable].dims:\n ds = ds.rename({'latitude': 'lat', 'longitude': 'lon'}).set_coords(['lon', 'lat'])\n ds_regrid = xr.Dataset({'lat': (['lat'], np.arange(np.floor(ds_base['lat'].min().values*10)/10, np.ceil(ds_base['lat'].max().values*10)/10, 0.01)),\n 'lon': (['lon'], np.arange(np.floor(ds_base['lon'].min().values*10)/10, np.ceil(ds_base['lon'].max().values*10)/10, 0.01)),\n }\n )\n\n regridder = xe.Regridder(ds, ds_regrid, algorithm)\n regridder.clean_weight_file()\n return regridder",
"_____no_output_____"
]
],
[
[
"### Create base grid using NO2 dataset",
"_____no_output_____"
]
],
[
[
"ds_s5p = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/s5p_no2/no2_1year.nc')\nds_no2_clouds = ds_s5p[['NO2_column_number_density', 'cloud_fraction']]\nno2_regridder = make_regridder(ds_no2_clouds, ds_no2_clouds, 'NO2_column_number_density')\nds_base_regrid = no2_regridder(ds_no2_clouds)\nds_base_regrid = ds_base_regrid.where(ds_base_regrid['NO2_column_number_density']!=0.)",
"Create weight file: bilinear_148x475_71x231.nc\nRemove file bilinear_148x475_71x231.nc\nusing dimensions ('lat', 'lon') from data variable NO2_column_number_density as the horizontal dimensions for this dataset.\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_regrid.NO2_column_number_density.isel(time=20).plot(ax=ax, transform=ccrs.PlateCarree());\nax.coastlines()\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"def find_boundaries(ds):\n print('Min Lat', ds.lat.min().values)\n print('Max Lat', ds.lat.max().values)\n print('Min Lon', ds.lon.min().values)\n print('Max Lon', ds.lon.max().values) ",
"_____no_output_____"
]
],
[
[
"### Create land mask for base grid",
"_____no_output_____"
]
],
[
[
"#Download Super High Resolution SST file (0.01 degree grid)\n#https://coastwatch.pfeg.noaa.gov/erddap/griddap/jplG1SST.nc?SST[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)],mask[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)],analysis_error[(2017-09-13T00:00:00Z):1:(2017-09-13T00:00:00Z)][(17.005):1:(19.005)][(-69.995):1:(-64.005)]",
"_____no_output_____"
],
[
"ds_sea = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/jplG1SST_e435_8209_9395.nc')",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_sea.SST.isel(time=0).plot(ax=ax, transform=ccrs.PlateCarree());\nax.coastlines()\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"sea_regridder = make_regridder(ds_sea, ds_base_regrid, 'SST')\nds_sea_regrid = sea_regridder(ds_sea)\nds_sea_regrid = ds_sea_regrid.where(ds_sea_regrid['SST']!=0.)",
"Create weight file: bilinear_201x600_81x241.nc\nRemove file bilinear_201x600_81x241.nc\nusing dimensions ('latitude', 'longitude') from data variable SST as the horizontal dimensions for this dataset.\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_sea_regrid.SST.isel(time=0).plot(ax=ax, transform=ccrs.PlateCarree());\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"land_ones = ds_sea_regrid.SST.isel(time=0).fillna(1)\nland_mask = land_ones.where(land_ones ==1.)\nland_mask = land_mask.where(land_mask.lat<18.5)\nland_mask = land_mask.drop('time')\nds_base_regrid.coords['land_mask'] = land_mask",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nland_mask.plot(ax=ax, transform=ccrs.PlateCarree());\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_regrid['NO2_column_number_density'].isel(time=103).where(ds_base_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
]
],
[
[
"### Compute daily averages",
"_____no_output_____"
]
],
[
[
"ds_base = ds_base_regrid.resample(time='1D').mean()",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base['NO2_column_number_density'].isel(time=26).where(ds_base.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
]
],
[
[
"### Add wind speed",
"_____no_output_____"
]
],
[
[
"ds_gfs = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/gfs/gfs_1year.nc')\nds_gfs = ds_gfs.drop('crs')\ngfs_regridder = make_regridder(ds_gfs, ds_base_regrid, 'temperature_2m_above_ground')\nds_gfs_regrid = gfs_regridder(ds_gfs)\nds_gfs_regrid = ds_gfs_regrid.where(ds_gfs_regrid['temperature_2m_above_ground']!=0.)\nds_gfs_regrid.coords['land_mask'] = land_mask\nds_gfs_regrid['wind_speed'] = np.sqrt(np.square(ds_gfs_regrid.u_component_of_wind_10m_above_ground) + np.square(ds_gfs_regrid.v_component_of_wind_10m_above_ground))",
"Create weight file: bilinear_148x475_81x241.nc\nRemove file bilinear_148x475_81x241.nc\nusing dimensions ('lat', 'lon') from data variable temperature_2m_above_ground as the horizontal dimensions for this dataset.\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_gfs_regrid['wind_speed'].isel(time=6).where(ds_gfs_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_gfs_daily_mean = ds_gfs_regrid.resample(time='1D').mean()\nds_gfs_daily_max = ds_gfs_regrid.resample(time='1D').max()\nds_gfs_daily_min = ds_gfs_regrid.resample(time='1D').min()\n",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n"
],
[
"ds_gfs_regrid",
"_____no_output_____"
],
[
"ds_base['wind_speed_mean'] = ds_gfs_daily_mean['wind_speed']\nds_base['gfs_temp_mean'] = ds_gfs_daily_mean['temperature_2m_above_ground']\nds_base['gfs_temp_max'] = ds_gfs_daily_max['temperature_2m_above_ground']\nds_base['gfs_temp_min'] = ds_gfs_daily_min['temperature_2m_above_ground']\nds_base['gfs_humidity_mean'] = ds_gfs_daily_mean['specific_humidity_2m_above_ground']\nds_base['gfs_rain_max'] = ds_gfs_daily_max['precipitable_water_entire_atmosphere']",
"_____no_output_____"
]
],
[
[
"### Add in weather",
"_____no_output_____"
]
],
[
[
"ds_gldas = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/starter_pack/gldas/gldas_1year.nc')\nds_gldas = ds_gldas.drop('crs')\ngldas_regridder = make_regridder(ds_gldas, ds_base_regrid, 'Tair_f_inst', 'nearest_s2d')\nds_gldas_regrid = gldas_regridder(ds_gldas)\nds_gldas_regrid = ds_gldas_regrid.where(ds_gldas_regrid['Tair_f_inst']!=0.)\nds_gldas_regrid.coords['land_mask'] = land_mask",
"Create weight file: nearest_s2d_148x475_81x241.nc\nRemove file nearest_s2d_148x475_81x241.nc\nusing dimensions ('lat', 'lon') from data variable LWdown_f_tavg as the horizontal dimensions for this dataset.\n"
],
[
"ds_gldas_regrid_fill = ds_gldas_regrid.ffill(dim='lat')\nds_gldas_regrid_fill = ds_gldas_regrid_fill.bfill(dim='lat')\nds_gldas_regrid_fill = ds_gldas_regrid_fill.ffill(dim='lon')\nds_gldas_regrid_fill = ds_gldas_regrid_fill.bfill(dim='lon')",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/missing.py:410: FutureWarning: This DataArray contains multi-dimensional coordinates. In the future, these coordinates will be transposed as well unless you specify transpose_coords=False.\n ).transpose(*arr.dims)\n//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/missing.py:427: FutureWarning: This DataArray contains multi-dimensional coordinates. In the future, these coordinates will be transposed as well unless you specify transpose_coords=False.\n ).transpose(*arr.dims)\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_gldas_regrid['Tair_f_inst'].isel(time=4).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_gldas_daily_mean = ds_gldas_regrid_fill.resample(time='1D').mean()\nds_gldas_daily_max = ds_gldas_regrid_fill.resample(time='1D').max()\nds_gldas_daily_min = ds_gldas_regrid_fill.resample(time='1D').min()",
"_____no_output_____"
],
[
"ds_base['gldas_wind_mean'] = ds_gldas_daily_mean['Wind_f_inst']\nds_base['gldas_airT_mean'] = ds_gldas_daily_mean['Tair_f_inst']\nds_base['gldas_airT_max'] = ds_gldas_daily_max['Tair_f_inst']\nds_base['gldas_airT_min'] = ds_gldas_daily_min['Tair_f_inst']\nds_base['gldas_lwdown_mean'] = ds_gldas_daily_mean['LWdown_f_tavg']\nds_base['gldas_pres_mean'] = ds_gldas_daily_mean['Psurf_f_inst']\nds_base['gldas_humidity_mean'] = ds_gldas_daily_mean['Qair_f_inst']\nds_base['gldas_heatflux_mean'] = ds_gldas_daily_mean['Qg_tavg']\nds_base['gldas_rain_max'] = ds_gldas_daily_max['Rainf_f_tavg']\nds_base['gldas_SWdown_max'] = ds_gldas_daily_max['SWdown_f_tavg']\n",
"_____no_output_____"
]
],
[
[
"### Add in night time lights",
"_____no_output_____"
]
],
[
[
"ds_nightlights = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/VIIRS_nighttime_lights.nc')",
"_____no_output_____"
],
[
"ds_nightlights2 = ds_nightlights.drop('crs')",
"_____no_output_____"
],
[
"nl_regridder = make_regridder(ds_nightlights2, ds_base_regrid, 'avg_rad')\n\nds_nl_regrid = nl_regridder(ds_nightlights2)\nds_nl_regrid = ds_nl_regrid.where(ds_nl_regrid['avg_rad']!=0.)",
"Create weight file: bilinear_79x257_81x241.nc\nRemove file bilinear_79x257_81x241.nc\nusing dimensions ('lat', 'lon') from data variable avg_rad as the horizontal dimensions for this dataset.\n"
],
[
"ds_nl_regrid.coords['land_mask'] = land_mask",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_nl_regrid['avg_rad'].where(ds_nl_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base['night_avg_rad'] = ds_nl_regrid['avg_rad']",
"_____no_output_____"
],
[
"ds_base",
"_____no_output_____"
]
],
[
[
"### Add in population",
"_____no_output_____"
]
],
[
[
"ds_population = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/GPWv411_populationdensity.nc')\nds_population = ds_population.drop('crs')\npop_regridder = make_regridder(ds_population, ds_base_regrid, 'population_density')\nds_pop_regrid = pop_regridder(ds_population)\nds_pop_regrid = ds_pop_regrid.where(ds_pop_regrid['population_density']!=0.)\nds_pop_regrid.coords['land_mask'] = land_mask",
"Create weight file: bilinear_79x257_81x241.nc\nRemove file bilinear_79x257_81x241.nc\nusing dimensions ('lat', 'lon') from data variable population_density as the horizontal dimensions for this dataset.\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_pop_regrid['population_density'].where(ds_pop_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base['population_density'] = ds_pop_regrid['population_density']",
"_____no_output_____"
]
],
[
[
"### Add in landcover",
"_____no_output_____"
]
],
[
[
"ds_landcover = xr.open_dataset('/Users/kasmith/Code/kaggle_ds4g/data/supplementary_data/nc/GFSAD1000_landcover.nc')\nds_landcover = ds_landcover.drop('crs')\nland_regridder = make_regridder(ds_landcover, ds_base_regrid, 'landcover_category', 'nearest_s2d')\nds_land_regrid = land_regridder(ds_landcover)\nds_land_regrid.coords['land_mask'] = land_mask",
"Create weight file: nearest_s2d_79x257_81x241.nc\nRemove file nearest_s2d_79x257_81x241.nc\nusing dimensions ('lat', 'lon') from data variable landcover_category as the horizontal dimensions for this dataset.\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_land_regrid['landcover_category'].where(ds_land_regrid.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base['landcover_category'] = ds_land_regrid['landcover_category']",
"_____no_output_____"
]
],
[
[
"### Add power plants layer",
"_____no_output_____"
]
],
[
[
"plants = pd.read_csv('../data/starter_pack/gppd/gppd_120_pr.csv')\nplants = plants[['capacity_mw', 'estimated_generation_gwh', 'primary_fuel', '.geo']]\ncoordinates = pd.json_normalize(plants['.geo'].apply(json.loads))['coordinates']\nplants[['longitude', 'latitude']] = pd.DataFrame(coordinates.values.tolist(), index= coordinates.index)\nplants.drop('.geo', axis=1, inplace=True)",
"_____no_output_____"
],
[
"plants_fossil = plants[plants['primary_fuel'].isin(['Oil', 'Gas', 'Coal'])].copy()",
"_____no_output_____"
],
[
"plants_fossil.reset_index(drop=True, inplace=True)\nplants_fossil['grid_lon'] = np.nan\nplants_fossil['position_lon'] = np.ones\nplants_fossil['grid_lat'] = np.nan\nplants_fossil['position_lat'] = np.ones\nlons = ds_base.lon.values\na=0 \nfor lon in plants_fossil.longitude:\n lon_diff = abs(lon-lons) \n plants_fossil.at[a,'grid_lon'] = lons[np.argmin(lon_diff)]\n plants_fossil.at[a,'position_lon'] = np.argmin(lon_diff)\n a=a+1\n\nlats = ds_base.lat.values\na=0 \nfor lat in plants_fossil.latitude:\n lat_diff = abs(lat-lats) \n plants_fossil.at[a,'grid_lat'] = lats[np.argmin(lat_diff)]\n plants_fossil.at[a,'position_lat'] = np.argmin(lat_diff)\n a=a+1",
"_____no_output_____"
],
[
"plants_fossil['num_plants'] = 1\nplants_fossil_grid = plants_fossil[['grid_lon', 'grid_lat', 'position_lat', 'position_lon', 'num_plants']].groupby(['grid_lon', 'grid_lat', 'position_lat', 'position_lon'], as_index=False).sum()\n",
"_____no_output_____"
],
[
"plants_fossil.to_csv('plants_fossil.csv', index=False)",
"_____no_output_____"
],
[
"plants_mask = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0)) \nposition_lat_id = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0))\nposition_lon_id = 0 * np.ones((ds_base.dims['lat'], ds_base.dims['lon'])) * np.isnan(ds_base.NO2_column_number_density.isel(time=0))\nplants_mask = plants_mask.drop('time')\ncount=0\nfor x in plants_fossil_grid.index:\n plants_mask[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=1\n position_lat_id[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=plants_fossil_grid.at[x,'position_lat']\n position_lon_id[(plants_fossil_grid.at[x,'position_lat']-2):(plants_fossil_grid.at[x,'position_lat']+2),(plants_fossil_grid.at[x,'position_lon']-2):(plants_fossil_grid.at[x,'position_lon']+2)]=plants_fossil_grid.at[x,'position_lon']\n\n \nplants_mask = plants_mask.where(plants_mask == 1.)\nposition_lat_id = position_lat_id.where(position_lat_id >= 1.)\nposition_lon_id = position_lon_id.where(position_lon_id >= 1.)\nds_base.coords['plants_mask'] = (('lat', 'lon'), plants_mask)\n#ds_base.coords['plants_mask'] = ds_base.plants_mask.where(ds_base.land_mask == 1)\nds_base.coords['no_plants_mask'] = ds_base.plants_mask.fillna(0).where((ds_base.plants_mask != 1) & (ds_base.land_mask == 1))\nds_base.coords['no_plants_mask'] = ds_base.no_plants_mask + 1\nds_base.coords['position_lat_id'] = (('lat', 'lon'), position_lat_id)\nds_base.coords['position_lat_id'] = ds_base.position_lat_id.where(ds_base.position_lat_id >= 1)\n\nds_base.coords['position_lon_id'] = (('lat', 'lon'), position_lon_id)\nds_base.coords['position_lon_id'] = ds_base.position_lon_id.where(ds_base.position_lon_id >= 1)\n",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base['NO2_column_number_density'].isel(time=0).where((land_mask==1) & (plants_mask==1)).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base['NO2_column_number_density'].isel(time=0).where(ds_base.no_plants_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
]
],
[
[
"### Calculate Annual Average NO2",
"_____no_output_____"
]
],
[
[
"ds_base_annual = ds_base.where((ds_base.wind_speed_mean <= 2)).mean(dim=['time'])\nds_base_annual_n = ds_base.where((ds_base.wind_speed_mean <= 2)).count(dim=['time'])",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_annual['NO2_column_number_density'].where((ds_base_annual.land_mask == 1) & (ds_base_annual.no_plants_mask ==1)).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base_annual['n'] = ds_base_annual_n['NO2_column_number_density']",
"_____no_output_____"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_annual_n['NO2_column_number_density'].where((ds_base_annual.land_mask == 1) & (ds_base_annual.no_plants_mask ==1)).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base_annual_allwind = ds_base.mean(dim=['time'])",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_annual_allwind['NO2_column_number_density'].where(ds_base_annual_allwind.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base_annual.to_netcdf('annual_low_wind.nc')",
"_____no_output_____"
],
[
"ds_base_annual_allwind.to_netcdf('annual_all_wind.nc')",
"_____no_output_____"
]
],
[
[
"### Calculate monthly average NO2",
"_____no_output_____"
]
],
[
[
"ds_base_monthly = ds_base.where((ds_base.wind_speed_mean <= 5)).resample(time='1M').mean()",
"//anaconda/envs/xesmf_env/lib/python3.7/site-packages/xarray/core/nanops.py:142: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n"
],
[
"ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-65, central_latitude=18))\nds_base_monthly['NO2_column_number_density'].isel(time=6).where(ds_base_monthly.land_mask == 1).plot(ax=ax, transform=ccrs.PlateCarree())\nax.set_extent([-67.5, -65, 17.5, 19])\nax.set_aspect(\"equal\")",
"_____no_output_____"
],
[
"ds_base_monthly.to_netcdf('monthly_all_wind.nc')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ecfed56b4973b047ffe6bb08926bf41373a9ca | 57,699 | ipynb | Jupyter Notebook | notebooks/raiderStats/raiderStats_tutorial.ipynb | jlmaurer/RAiDER-docs | d2ac5b00a007eb651e6113fec7fc37268ddc65b7 | [
"Apache-2.0"
] | null | null | null | notebooks/raiderStats/raiderStats_tutorial.ipynb | jlmaurer/RAiDER-docs | d2ac5b00a007eb651e6113fec7fc37268ddc65b7 | [
"Apache-2.0"
] | null | null | null | notebooks/raiderStats/raiderStats_tutorial.ipynb | jlmaurer/RAiDER-docs | d2ac5b00a007eb651e6113fec7fc37268ddc65b7 | [
"Apache-2.0"
] | null | null | null | 30.464097 | 499 | 0.623269 | [
[
[
"# Peform statistical analyses of GNSS station locations and tropospheric zenith delays\n\n**Author**: Simran Sangha, David Bekaert - Jet Propulsion Laboratory\n\nThis notebook provides an overview of the functionality included in the **`raiderStats.py`** program. Specifically, we outline examples on how to perform basic statistical analyses of GNSS station location and tropospheric zenith delay information over a user defined area of interest, span of time, and seasonal interval. In this notebook, we query GNSS stations spanning northern California between 2016 and 2020. \n\nWe will outline the following statistical analysis and filtering options:\n- Restrict analyses to range of years\n- Restrict analyses to range of months (i.e. seasonal interval)\n- Illustrate station distribution and tropospheric zenith delay mean/standard deviation\n- Illustrate gridded distribution and tropospheric zenith delay mean/standard deviation\n- Generate variogram plots across specified time periods\n- Perform basic seasonal amplitude/phase analyses\n- Examine residuals between weather-models and collocated GNSS stations\n\n<div class=\"alert alert-info\">\n <b>Terminology:</b>\n \n- *GNSS*: Stands for Global Navigation Satellite System. Describes any satellite constellation providing global or regional positioning, navigation, and timing services.\n- *tropospheric zenith delay*: The precise atmospheric delay satellite signals experience when propagating through the troposphere.\n- *variogram*: Characterization of the difference between field values at two locations.\n- *empirical variogram*: Provides a description of how the data are correlated with distance.\n- *experimental variogram*: A discrete function calculated using a measure of variability between pairs of points at various distances\n- *sill*: Limit of the variogram, tending to infinity lag distances.\n- *range*: The distance in which the difference of the variogram from the sill becomes negligible, such that the data arre no longer autocorrelated.\n \n </div>\n ",
"_____no_output_____"
],
[
"## Table of Contents:\n<a id='example_TOC'></a>",
"_____no_output_____"
],
[
"[**Overview of the raiderStats.py program**](#overview)\n- [1. Basic user input options](#overview_1)\n- [2. Run parameters](#overview_2)\n- [3. Optional controls for spatiotemporal subsetting](#overview_3)\n- [4. Supported types of individual station scatter-plots](#overview_4)\n- [5. Supported types of gridded station plots](#overview_5)\n- [6. Supported types of variogram plots](#overview_6)\n- [7. Optional controls for plotting](#overview_7)\n\n[**Download prerequisite GNSS station location and tropospheric zenith delay information with the raiderDownloadGNSS.py program**](#downloads)\n\n[**Examples of the raiderStats.py program**](#examples)\n- [Example 1. Generate all individual station scatter-plots, as listed under section #4](#example_1)\n- [Example 2. Generate all basic gridded station plots, as listed under section #5](#example_2)\n - [Example 2a. Redo plots efficiently using generated grid raster files](#example_2a)\n- [Example 3. Generate gridded mean tropospheric zenith delay plot, with stations superimposed](#example_3)\n- [Example 4. Generate variogram plots](#example_4)\n- [Example 5. Generate seasonal phase/amplitude plots](#example_5)\n- [Example 6. Generate weather model/GNSS residual plots](#example_6)",
"_____no_output_____"
],
[
"## Prep: Initial setup of the notebook",
"_____no_output_____"
],
[
"Below we set up the directory structure for this notebook exercise. In addition, we load the required modules into our python environment using the **`import`** command.",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n## Defining the home and data directories\ntutorial_home_dir = os.path.abspath(os.getcwd())\nwork_dir = os.path.abspath(os.getcwd())\nprint(\"Tutorial directory: \", tutorial_home_dir)\nprint(\"Work directory: \", work_dir)\n\n# Verifying if RAiDER is installed correctly\ntry:\n from RAiDER import statsPlot\nexcept:\n raise Exception('RAiDER is missing from your PYTHONPATH')\n\nos.chdir(work_dir)",
"_____no_output_____"
]
],
[
[
"## Overview of the raiderStats.py program\n<a id='overview'></a>",
"_____no_output_____"
],
[
"The **`raiderStats.py`** program provides a suite of convinient statistical analyses of GNSS station locations and tropospheric zenith delays.\n\nRunning **`raiderStats.py`** with the **`-h`** option will show the parameter options and outline several basic, practical examples. \n\nLet us explore these options:",
"_____no_output_____"
]
],
[
[
"!raiderStats.py -h",
"_____no_output_____"
]
],
[
[
"### 1. Basic user input options\n<a id='overview_1'></a>",
"_____no_output_____"
],
[
"#### Input CSV file (**`--file FNAME`**)",
"_____no_output_____"
],
[
"**REQUIRED** argument. Provide a valid CSV file as input through **`--file`** which lists the GNSS station IDs (ID), lat/lon coordinates (Lat,Lon), dates in YYYY-MM-DD format (Date), and the desired data field in units of meters.\n\nNote that the complementary **`raiderDownloadGNSS.py`** format generates such a primary CSV file named **`UNRcombinedGPS_ztd.csv`** that contains all such fields and is already formatted as expected by **`raiderStats.py`**. Please refer to the accompanying **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb `** for more details and practical examples.",
"_____no_output_____"
],
[
"#### Data column name (**`--column_name COL_NAME`**)",
"_____no_output_____"
],
[
"Specify name of data column in input CSV file through **`--column_name `** that you wish to perform statistical analyses on. Input assumed to be in units of meters.\n\nDefault input column name set to **`ZTD`**, the name assigned to tropospheric zenith delays populated under the **`CombinedGPS_ztd.csv`** file generated through **`raiderDownloadGNSS.py`**\n\nThe column name is always prepended to output products (e.g. `ZTD_grid_heatmap.tif` and `ZTD_grid_heatmap.png`)",
"_____no_output_____"
],
[
"#### Data column unit (**`--unit UNIT`**)",
"_____no_output_____"
],
[
"Specify unit for output rasters/graphics through **`--unit`**. Again, input assumed to be in units of meters so it will be converted into meters if not already in meters.",
"_____no_output_____"
],
[
"### 2. Run parameters\n<a id='overview_2'></a>",
"_____no_output_____"
],
[
"#### Output directory (**`--workdir WORKDIR`**)",
"_____no_output_____"
],
[
"Specify directory to deposit all outputs into with **`--workdir`**. Absolute and relative paths are both supported.\n\nBy default, outputs will be deposited into the current working directory where the program is launched.",
"_____no_output_____"
],
[
"#### Number of CPUs to be used (**`--cpus NUMCPUS`**)",
"_____no_output_____"
],
[
"Specify number of cpus to be used for multiprocessing with **`--cpus`**. For most cases, multiprocessing is essential in order to access data and perform statistical analyses within a reasonable amount of time.\n\nMay specify **`--cpus all`** at your own discretion in order to leverage all available CPUs on your system.\n\nBy default 8 CPUs will be used.",
"_____no_output_____"
],
[
"#### Verbose mode (**`--verbose`**)",
"_____no_output_____"
],
[
"Specify **`--verbose`** to print all statements through entire routine.",
"_____no_output_____"
],
[
"### 3. Optional controls for spatiotemporal subsetting\n<a id='overview_3'></a>",
"_____no_output_____"
],
[
"#### Geographic bounding box (**`--bounding_box BOUNDING_BOX`**)",
"_____no_output_____"
],
[
"An area of interest may be specified as `SNWE` coordinates using the **`--bounding_box`** option. Coordinates should be specified as a space delimited string surrounded by quotes. The common intersection between the user-specified spatial bounds and the spatial bounds computed from the station locations in the input file is then passed. This example below would restrict the analysis to stations over northern California:\n**`--bounding_box '36 40 -124 -119'`**\n\nIf no area of interest is specified, by default the spatial bounds computed from the station locations in the input file as passed.",
"_____no_output_____"
],
[
"#### Gridcell spacing (**`--spacing SPACING`**)",
"_____no_output_____"
],
[
"Specify degree spacing of grid-cells for statistical analyses through **`--spacing`**\n\nBy default grid-cell spacing is set to 1°. If the specified grid-cell spacing is not a multiple of the spatial bounds of the dataset, the grid-cell spacing again defaults back to 1°.",
"_____no_output_____"
],
[
"#### Subset in time (**`--timeinterval TIMEINTERVAL`**)",
"_____no_output_____"
],
[
"Define temporal bounds with **`--timeinterval TIMEINTERVAL`** by specifying earliest YYYY-MM-DD date followed by latest date YYYY-MM-DD. For example: **`--timeinterval 2018-01-01 2019-01-01`**\n\nBy default, bounds set to earliest and latest time found in input file.",
"_____no_output_____"
],
[
"#### Seasonal interval (**`--seasonalinterval SEASONALINTERVAL`**)",
"_____no_output_____"
],
[
"Define subset in time by a specific interval for each year (i.e. seasonal interval) with **`--seasonalinterval SEASONALINTERVAL`** by specifying earliest MM-DD time followed by latest MM-DD time. For example: **`--seasonalinterval '03-21 06-21'`**",
"_____no_output_____"
],
[
"### 4. Supported types of individual station scatter-plots\n<a id='overview_4'></a>",
"_____no_output_____"
],
[
"#### Plot station distribution (**`--station_distribution`**)",
"_____no_output_____"
],
[
"Illustrate each individual station with black markers.",
"_____no_output_____"
],
[
"#### Plot mean tropospheric zenith delay by station (**`--station_delay_mean`**)",
"_____no_output_____"
],
[
"Illustrate the tropospheric zenith delay mean for each station with a **`hot`** colorbar.",
"_____no_output_____"
],
[
"#### Plot standard deviation of tropospheric zenith delay by station (**`--station_delay_stdev`**)",
"_____no_output_____"
],
[
"Illustrate the tropospheric zenith delay standard deviation for each station with a **`hot`** colorbar.",
"_____no_output_____"
],
[
"#### Plot phase/amplitude of tropospheric zenith delay by station (**`--station_seasonal_phase`**)",
"_____no_output_____"
],
[
"Illustrate the phase/amplitude of tropospheric zenith delay for each station with a **`hot`** colorbar.",
"_____no_output_____"
],
[
"### 5. Supported types of gridded station plots\n<a id='overview_5'></a>",
"_____no_output_____"
],
[
"#### Plot gridded station heatmap (**`--grid_heatmap`**)",
"_____no_output_____"
],
[
"Illustrate heatmap of gridded station array with a **`hot`** colorbar.",
"_____no_output_____"
],
[
"#### Plot gridded mean tropospheric zenith delay (**`--grid_delay_mean`**)",
"_____no_output_____"
],
[
"Illustrate gridded tropospheric zenith delay mean with a **`hot`** colorbar.\nAlternatively plot absolute gridded delay mean with the option `--grid_delay_absolute_mean`",
"_____no_output_____"
],
[
"#### Plot gridded median tropospheric zenith delay (**`--grid_delay_median`**)",
"_____no_output_____"
],
[
"Illustrate gridded tropospheric zenith delay median with a **`hot`** colorbar.\nAlternatively plot absolute gridded delay median with the option `--grid_delay_absolute_median`",
"_____no_output_____"
],
[
"#### Plot gridded standard deviation of tropospheric zenith delay (**`--grid_delay_stdev`**)",
"_____no_output_____"
],
[
"Illustrate gridded tropospheric zenith delay standard deviation with a **`hot`** colorbar.\nAlternatively plot absolute gridded delay standard deviation with the option `--grid_delay_absolute_stdev`",
"_____no_output_____"
],
[
"#### Plot gridded station-wise delay phase/amplitude (**`--grid_seasonal_phase`**)",
"_____no_output_____"
],
[
"Illustrate gridded station-wise zenith delay phase/amplitude with a **`hot`** colorbar.\nAlternatively plot absolute gridded delay phase/amplitude with the option `--grid_seasonal_absolute_phase`",
"_____no_output_____"
],
[
"### 6. Supported types of variogram plots\n<a id='overview_6'></a>",
"_____no_output_____"
],
[
"#### Plot variogram (**`--variogramplot`**)",
"_____no_output_____"
],
[
"Passing **`--variogramplot`** toggles plotting of gridded station variogram, where gridded sill and range values for the experimental variogram fits are illustrated.",
"_____no_output_____"
],
[
"#### Apply experimental fit to binned variogram (**`--binnedvariogram`**)",
"_____no_output_____"
],
[
"Pass **`--binnedvariogram`** to apply experimental variogram fit to total binned empirical variograms for each time slice. \n\nDefault is to pass total unbinned empiricial variogram.",
"_____no_output_____"
],
[
"#### Save variogram figures per time-slice (**`--variogram_per_timeslice`**)",
"_____no_output_____"
],
[
"Specify **`--variogram_per_timeslice`** to generate variogram plots per gridded station AND time-slice.\n\nIf option not toggled, then variogram plots are only generated per gridded station and spanning entire time-span.",
"_____no_output_____"
],
[
"### 7. Optional controls for plotting\n<a id='overview_7'></a>",
"_____no_output_____"
],
[
"#### Save gridded array(s) as raster(s) (**`--grid_to_raster`**)",
"_____no_output_____"
],
[
"Save specified gridded array(s) as raster(s). \nMay directly load/plot in successive script call by passing output grid as argument for **`--file`**.\nE.g. if specified with **`--grid_delay_mean`**, then a raster file named `ZTD_grid_delay_mean.tif` containing the gridded mean delay will be generated.",
"_____no_output_____"
],
[
"#### Save debug figures of station-wise seasonal fit (**`--phaseamp_per_station`**)",
"_____no_output_____"
],
[
"Save debug figures of curve-fit vs data per station for seasonal amplitude/phase analaysis options (i.e. **`--grid_seasonal_phase`** and/or **`--station_seasonal_phase`**).",
"_____no_output_____"
],
[
"#### Minimum TS span and minimum fractional observations for seasonal amplitude/phase analyses (**`--min_span`**)",
"_____no_output_____"
],
[
"Minimum TS span (years) and minimum fractional observations in span (fraction) imposed for seasonal amplitude/phase analyses to be performed for a given station.\nBy default set to 2 years and 0.6 respectively (i.e. **`--min_span 2 0.6`**)",
"_____no_output_____"
],
[
"#### Period limit for seasonal amplitude/phase analyses (**`--period_limit`**)",
"_____no_output_____"
],
[
"Period limit (years) imposed for seasonal amplitude/phase analyses to be performed for a given station.",
"_____no_output_____"
],
[
"#### Variogram density threshold (**`--densitythreshold DENSITYTHRESHOLD`**)",
"_____no_output_____"
],
[
"For variogram plots, a given grid-cell is only valid if it contains this specified threshold of stations. \n\nBy default set to 10 stations.",
"_____no_output_____"
],
[
"#### Figure DPI (**`--figdpi FIGDPI`**)",
"_____no_output_____"
],
[
"DPI to use for saving figures.",
"_____no_output_____"
],
[
"#### Plot title (**`--user_title USER_TITLE`**)",
"_____no_output_____"
],
[
"Specify custom title for plots.",
"_____no_output_____"
],
[
"#### Plot format (**`--plot_format PLOT_FMT`**)",
"_____no_output_____"
],
[
"File format for saving plots. Default is PNG.",
"_____no_output_____"
],
[
"#### Colorbar bounds (**`--color_bounds CBOUNDS`**)",
"_____no_output_____"
],
[
"Set lower and upper-bounds for plot colorbars. For example: **`--color_bounds '0 100'`**\n\nBy default set to the dynamic range of the data.",
"_____no_output_____"
],
[
"#### Colorbar percentile limits (**`--colorpercentile COLORPERCENTILE COLORPERCENTILE`**)",
"_____no_output_____"
],
[
"Set lower and upper percentile for plot colorbars. For example: **`--colorpercentile 30 100`**\n\nBy default set to 25% and 95%, respectively.",
"_____no_output_____"
],
[
"#### Superimpose individual stations over gridded array (**`--stationsongrids`**)",
"_____no_output_____"
],
[
"In gridded plots, superimpose your gridded array with a scatterplot of station locations.",
"_____no_output_____"
],
[
"#### Draw gridlines (**`--drawgridlines`**)",
"_____no_output_____"
],
[
"In gridded plots, draw gridlines.",
"_____no_output_____"
],
[
"#### Generate all supported plots (**`--plotall`**)",
"_____no_output_____"
],
[
"Generate all supported plots, as outlined under sections #4, #5, and #6 above.",
"_____no_output_____"
],
[
"## Download prerequisite GNSS station location and tropospheric zenith delay information with the **`raiderDownloadGNSS.py`** program\n<a id='downloads'></a>",
"_____no_output_____"
],
[
"Virtually access GNSS station location and zenith delay information for the years '2016,2019', for every day, at a UTC time of day 'HH:MM:SS' of '00:00:00', and across a geographic bounding box '36 40 -124 -119' spanning over Northern California.\n\nThe footprint of the specified geographic bounding box is again depicted in **Fig. 1**.\n\nIn addition to querying for multiple years, we will also experiment with using the maximum number of allowed CPUs to save some time! Recall again that the default number of CPUs used for parallelization is 8.\n\nNote these features and similar examples are outlined in more detail in the companion notebook **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb`**",
"_____no_output_____"
]
],
[
[
"!raiderDownloadGNSS.py --out products --date 20160101 20191231 --returntime '00:00:00' --bounding_box '36 40 -124 -119' --cpus all",
"_____no_output_____"
]
],
[
[
"All of the extracted tropospheric zenith delay information stored under **`GPS_delays`** is concatenated with the GNSS station location information stored under **`gnssStationList_overbbox.csv`** into a primary comprehensive file **`UNRcombinedGPS_ztd.csv`**\n\n**`UNRcombinedGPS_ztd.csv`** may in turn be directly used to perform basic statistical analyses using **`raiderStats.py`**.",
"_____no_output_____"
],
[
"<img src=\"support_docs/bbox_footprint.png\" alt=\"footprint\" width=\"700\">\n<center><b>Fig. 1</b> Footprint of geopraphic bounding box used in examples 1 and 2. </center>",
"_____no_output_____"
],
[
"## Examples of the **`raiderStats.py`** program\n<a id='examples'></a>",
"_____no_output_____"
],
[
"### Example 1. Generate all individual station scatter-plots, as listed under [section #4](#overview_4) <a id='example_1'></a>",
"_____no_output_____"
],
[
"Using the file **`UNRcombinedGPS_ztd.csv`** generated by **`raiderDownloadGNSS.py`** as input, produce plots illustrating station distribution, mean tropospheric zenith delay by station, and standard deviation of tropospheric zenith delay by station.\n\nRestrict the temporal span of the analyses to all data acquired between 2016-01-01 and 2020-12-31, and restrict the spatial extent to a geographic bounding box '36 40 -124 -119' spanning over Northern California.\n\nThe footprint of the specified geographic bounding box is depicted in **Fig. 1**.\n\nThese basic spatiotemporal constraints will be inherited by all successive examples.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_distribution --station_delay_mean --station_delay_stdev",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated products:",
"_____no_output_____"
]
],
[
[
"!ls maps_ex1/figures",
"_____no_output_____"
]
],
[
[
"Here we visualize the spatial distribution of stations (*ZTD_station_distribution.png*) as black markers.\n\n<img src=\"support_docs/maps/maps_ex1/figures/ZTD_station_distribution.png\" alt=\"ZTD_station_distribution\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_distribution\n```",
"_____no_output_____"
],
[
"Here we visualize the mean tropospheric zenith delay by station (*ZTD_station_delay_mean.png*) with a **`hot`** colorbar. \n\n<img src=\"support_docs/maps/maps_ex1/figures/ZTD_station_delay_mean.png\" alt=\"ZTD_station_delay_mean\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_delay_mean\n```",
"_____no_output_____"
],
[
"Here we visualize the standard deviation of tropospheric zenith delay by station (*ZTD_station_delay_stdev.png*) with a **`hot`** colorbar. \n\n<img src=\"support_docs/maps/maps_ex1/figures/ZTD_station_delay_stdev.png\" alt=\"ZTD_station_delay_stdev\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex1 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --station_delay_stdev\n```",
"_____no_output_____"
],
[
"### Example 2. Generate all basic gridded station plots, as listed under [section #5](#overview_5) <a id='example_2'></a>",
"_____no_output_____"
],
[
"Produce plots illustrating gridded station distribution, gridded mean tropospheric zenith delay, and gridded standard deviation of tropospheric zenith delay.\n\nAlso save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).\n\nFinally, use the maximum number of allowed CPUs to save some time.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_heatmap --grid_delay_mean --grid_delay_stdev --grid_to_raster --cpus all",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated rasters (i.e. the TIF files in the specified output directory):",
"_____no_output_____"
]
],
[
[
"!ls maps_ex2",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated plots:",
"_____no_output_____"
]
],
[
[
"!ls maps_ex2/figures",
"_____no_output_____"
]
],
[
[
"Here we visualize the heatmap of gridded station array (*ZTD_grid_heatmap.png*) with a **`hot`** colorbar.\n\nNote that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**\n\n<img src=\"support_docs/maps/maps_ex2/figures/ZTD_grid_heatmap.png\" alt=\"ZTD_grid_heatmap\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_heatmap --grid_to_raster\n```",
"_____no_output_____"
],
[
"Here we visualize the gridded mean tropospheric zenith delay (*ZTD_grid_delay_mean.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex2/figures/ZTD_grid_delay_mean.png\" alt=\"ZTD_grid_delay_mean\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_delay_mean --grid_to_raster\n```",
"_____no_output_____"
],
[
"Here we visualize the gridded standard deviation of tropospheric zenith delay (*ZTD_grid_delay_stdev.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex2/figures/ZTD_grid_delay_stdev.png\" alt=\"ZTD_grid_delay_stdev\" width=\"700\">\n\nTo generate this figure alone, run:\n```\n!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex2 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --grid_delay_stdev --grid_to_raster\n```",
"_____no_output_____"
],
[
"#### Example 2a. Redo plots efficiently using generated grid raster files <a id='example_2a'></a>",
"_____no_output_____"
],
[
"Since we haved the saved gridded arrays as raster files by specifying the **`--grid_to_raster`** option, we may directly replot these graphics by passing a given output raster file as input (e.g. `--file ZTD_grid_heatmap.tif`).\n\nThis is a practical, efficient means to adjust/replot graphics and save a great deal of time by avoiding the gridding/prep steps involved with processing the initial input CSV file, especially for cases which span continental/global scales.\n\nNote though that since the output rasters are static with respect to the original specified spatiotemporal constraints (e.g. `--bounding_box` and `--timeinterval`), you cannot adjust such options with the rasters as input arguments. These rasters must be re-computed for any adjusted spatiotemporal parameters (if necessary) before replotting.",
"_____no_output_____"
],
[
"For this replotting command, let us also adjust the colorbar-bounds (using the `--color_bounds` option).",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file maps_ex2/ZTD_grid_heatmap.tif --workdir maps_ex2a --color_bounds '10 40'",
"_____no_output_____"
]
],
[
[
"Here we visualize the replotted heatmap of gridded station array (*ZTD_grid_heatmap.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex2a/figures/ZTD_grid_heatmap.png\" alt=\"ZTD_grid_heatmap\" width=\"700\">",
"_____no_output_____"
],
[
"### Example 3. Generate gridded mean tropospheric zenith delay plot, with stations superimposed <a id='example_3'></a>",
"_____no_output_____"
],
[
"Produce plot illustrating gridded mean tropospheric zenith delay, superimposed with individual station locations (`--stationsongrids`).\n\nAdditionally, subset data in time for spring. I.e. **`'03-21 06-21'`**\n\nFinally, use the maximum number of allowed CPUs to save some time.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex3 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --seasonalinterval '03-21 06-21' --grid_delay_mean --stationsongrids --cpus all",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated plot:",
"_____no_output_____"
]
],
[
[
"!ls maps_ex3/figures",
"_____no_output_____"
]
],
[
[
"Here we visualize the gridded mean tropospheric zenith delay (*ZTD_grid_delay_mean.png*) with a **`hot`** colorbar, with superimposed station locations denoted by blue markers.\n\n<img src=\"support_docs/maps/maps_ex3/figures/ZTD_grid_delay_mean.png\" alt=\"ZTD_grid_delay_mean\" width=\"700\">",
"_____no_output_____"
],
[
"### Example 4. Generate variogram plots <a id='example_4'></a>",
"_____no_output_____"
],
[
"Produce plots illustrating empirical/experimental variogram fits per gridded station and time-slice (**`--variogram_per_timeslice`**) and also spanning the entire time-span. Plots of gridded station experimental variogram-derived sill and range values also generated.\n\nAdditionally, subset data in time for spring. I.e. **`'03-21 06-21'`**\n\nAlso save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).\n\nFinally, use the maximum number of allowed CPUs to save some time.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex4 --bounding_box '36 40 -124 -119' --timeinterval '2016-01-01 2020-12-31' --seasonalinterval '03-21 06-21' --variogramplot --variogram_per_timeslice --grid_to_raster --cpus all",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated variograms:",
"_____no_output_____"
]
],
[
[
"!ls maps_ex4/variograms",
"_____no_output_____"
]
],
[
[
"There are several subdirectories corresponding to each grid-cell that each contain empirical and experimental variograms generated for each time-slice (e.g. **`grid6_timeslice20160321_justEMPvariogram.eps `** and **`grid6_timeslice20160321_justEXPvariogram.eps `**, respectively) and across the entire sampled time period (**`grid6_timeslice20160321–20200621_justEMPvariogram.eps `** and **`grid6_timeslice20160321–20200621_justEXPvariogram.eps `**, respectively).\n\nRecall that the former pair of empirical/experimental variograms per time-slice are generated only if the **`---variogram_per_timeslice`** option is toggled. By default only the latter two pair of empirical/experimental variograms spanning the entire time-span are generated.",
"_____no_output_____"
],
[
"Here we visualize the total empirical variogram corresponding to the entire sampled time period for grid-cell 6 in the array (*grid6_timeslice20160321–20200621_justEMPvariogram.eps*). \n\n<img src=\"support_docs/maps/maps_ex4/variograms/grid6/grid6_timeslice20160321–20190621_justEMPvariogram.png\" alt=\"justEMPvariogram\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the total experimental variogram corresponding to the entire sampled time period for grid-cell 6 in the array (*grid6_timeslice20160321–20200621_justEXPvariogram.eps*). \n\n<img src=\"support_docs/maps/maps_ex4/variograms/grid6/grid6_timeslice20160321–20190621_justEXPvariogram.png\" alt=\"justEXPvariogram\" width=\"700\">",
"_____no_output_____"
],
[
"The central coordinates for all grid-nodes that satisfy the specified station density threshold (**`--densitythreshold`**, by default 10 stations per grid-cell) for variogram plots are stored in a lookup table:",
"_____no_output_____"
]
],
[
[
"!head maps_ex4/variograms/gridlocation_lookup.txt",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the other generated figures:",
"_____no_output_____"
]
],
[
[
"!ls maps_ex4/figures",
"_____no_output_____"
]
],
[
[
"Here we visualize the gridded experimental variogram range (*ZTD_grid_range.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex4/figures/ZTD_grid_range.png\" alt=\"ZTD_grid_range\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded experimental variogram variance (*ZTD_grid_variance.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex4/figures/ZTD_grid_variance.png\" alt=\"ZTD_grid_variance\" width=\"700\">",
"_____no_output_____"
],
[
"### Example 5. Generate seasonal phase/amplitude plots <a id='example_5'></a>",
"_____no_output_____"
],
[
"Produce plots illustrating seasonal phase/amplitude/period fits for each individual station (**`--station_seasonal_phase`**) and averaged across each grid-cell (**`--grid_seasonal_phase`**). The standard deviation is also plotted across each grid-cell.\n\nControl the prerequisite minimum time-series span (years) a given station TS must span, and the prerequisite minimum fractional observations in span (fraction across specified `--timeinterval TIMEINTERVAL`, by default the entire span of input dataset). Here, we will specify a minimum time-series span of 3 years, and minimum fraction observation of 0.6 (i.e. **`--min_span 3 0.6`**).\n\nSave figures of curve-fits vs data per station with **`--phaseamp_per_station`** for debugging purposes. Not recommended for large-scale runs in the interest of practical speed/\n\nAlso save gridded arrays as raster files with **`--grid_to_raster`** so as to more conveniently replot in successive script calls (recommended).\n\nFinally, use the maximum number of allowed CPUs to save some time.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file products/UNRcombinedGPS_ztd.csv --workdir maps_ex5 --bounding_box '36 40 -124 -119' --grid_seasonal_phase --station_seasonal_phase --min_span 3 0.6 --phaseamp_per_station --grid_to_raster --cpus all",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated rasters (i.e. the TIF files in the specified output directory):",
"_____no_output_____"
]
],
[
[
"!ls maps_ex5",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated debug figures illustrating the curve-fits vs data (**`--phaseamp_per_station`**):",
"_____no_output_____"
]
],
[
[
"!ls maps_ex5/phaseamp_per_station",
"_____no_output_____"
]
],
[
[
"Here we visualize the time-series and curve-fit corresponding for one of the stations.\n\n<img src=\"maps_ex5/phaseamp_per_station/stationP335.png\" alt=\"stationCACH\" width=\"700\">",
"_____no_output_____"
]
],
[
[
"!ls maps_ex5/figures",
"_____no_output_____"
]
],
[
[
"Now we can take a look at the generated plots:",
"_____no_output_____"
],
[
"Here we visualize the seasonal phase of tropospheric zenith delay by station (*ZTD_station_seasonal_phase.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_station_seasonal_phase.png\" alt=\"ZTD_station_seasonal_phase\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded mean of the station-wise seasonal phase of tropospheric zenith delay (*ZTD_grid_seasonal_phase.png*) with a **`hot`** colorbar.\n\nNote that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_phase.png\" alt=\"ZTD_grid_seasonal_phase\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the seasonal amplitude of tropospheric zenith delay by station (*ZTD_station_seasonal_amplitude.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_station_seasonal_amplitude.png\" alt=\"ZTD_station_seasonal_amplitude\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded mean of the station-wise seasonal amplitude of tropospheric zenith delay (*ZTD_grid_seasonal_amplitude.png*) with a **`hot`** colorbar.\n\nNote that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_amplitude.png\" alt=\"ZTD_grid_seasonal_amplitude\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the seasonal period of tropospheric zenith delay by station (*ZTD_station_delay_period.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_station_delay_period.png\" alt=\"ZTD_station_delay_period\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded period of the station-wise seasonal period of tropospheric zenith delay (*ZTD_grid_seasonal_period.png*) with a **`hot`** colorbar.\n\nNote that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_period.png\" alt=\"ZTD_grid_seasonal_period\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded period standard deviation of the station-wise seasonal phase of tropospheric zenith delay (*ZTD_grid_seasonal_period_stdev.png*) with a **`hot`** colorbar.\n\nNote that the colorbar bounds are saturated, which demonstrates the utility of plotting options outlined under section #7 such as **`--color_bounds`** and **`--colorpercentile`**\n\n<img src=\"support_docs/maps/maps_ex5/figures/ZTD_grid_seasonal_period_stdev.png\" alt=\"ZTD_grid_seasonal_period_stdev\" width=\"700\">",
"_____no_output_____"
],
[
"### Example 6. Generate weather model/GNSS residual plots <a id='example_6'></a>",
"_____no_output_____"
],
[
"Produce plots illustrating the residual between tropospheric zenith delay at specified GNSS stations and collocated weather-model delay nodes.\n\nGNSS data will again be downloaded with **`raiderDownloadGNSS.py`**, and GMAO weather-model derived delay will be computed with **`raiderDelay.py`**",
"_____no_output_____"
],
[
"Virtually access GNSS station location and zenith delay information for the year '2019', for every 12 days, at a UTC time of day 'HH:MM:SS' of '00:00:00', and across a geographic bounding box '36 40 -124 -119' spanning over Northern California.\n\nThe footprint of the specified geographic bounding box is again depicted in **Fig. 1**.\n\nIn addition to querying for multiple years, we will also experiment with using the maximum number of allowed CPUs to save some time! Recall again that the default number of CPUs used for parallelization is 8.\n\nNote these features and similar examples are outlined in more detail in the companion notebook **`raiderDownloadGNSS/raiderDownloadGNSS_tutorial.ipynb`**",
"_____no_output_____"
]
],
[
[
"!raiderDownloadGNSS.py --out GNSS_2019 --date 20190101 20191231 12 --returntime '00:00:00' --bounding_box '36 40 -124 -119' --cpus 12",
"_____no_output_____"
]
],
[
[
"Compute tropospheric zenith delay from the GMAO weather-model for the year '2019', for every 12 days, at a UTC time of day 'HH:MM:SS' of '00:00:00', at stations located across a geographic bounding box '36 40 -124 -119' spanning over Northern California and captured in the `GNSS_2019/gnssStationList_overbbox.csv` list generated by the `raiderDownloadGNSS.py` above (i.e. `--station_file GNSS_2019/gnssStationList_overbbox.csv`), and with an integration height limit `--zref` of 30,000 m. \n\nThe footprint of the specified geographic bounding box is again depicted in **Fig. 1**.",
"_____no_output_____"
]
],
[
[
"!mkdir GMAO_2019\n!cd GMAO_2019\n!raiderDelay.py --model GMAO --date 20190101 20191231 12 --time 00:00 --station_file ../GNSS_2019/gnssStationList_overbbox.csv --zref 30000 -v\n!cd ../",
"_____no_output_____"
]
],
[
[
"Combine delay files derived above from the GMAO weather-model (`--raider 'GMAO_2019/GMAO_Delay_*.csv' --raiderDir GMAO_2019 --raider_column totalDelay`) and GNSS stations (`GNSS_2019/UNRcombinedGPS_ztd.csv --column ZTD`) respectively, passing only data which are collocated in space and time.",
"_____no_output_____"
]
],
[
[
"!raiderCombine.py --gnss GNSS_2019/UNRcombinedGPS_ztd.csv --column ZTD --raider 'GMAO_2019/GMAO_Delay_*.csv' --raiderDir GMAO_2019 --raider_column totalDelay --out Combined_delays_GNSSandGMAO_2019.csv",
"_____no_output_____"
]
],
[
[
"Using the file **`Combined_delays_GNSSandGMAO_2019.csv`** generated by **`raiderCombine.py`** as input and passing the weather-model/GNSS residual values (`--column_name ZTD_minus_RAiDER`), produce plots illustrating mean tropospheric zenith delay by station + across each grid-cell, and standard deviation of tropospheric zenith delay by station + across each grid-cell.",
"_____no_output_____"
]
],
[
[
"!raiderStats.py --file Combined_delays_GNSSandGMAO_2019.csv --column_name ZTD_minus_RAiDER --workdir maps_ex6 --bounding_box '36 40 -124 -119' --station_delay_mean --station_delay_stdev --grid_delay_mean --grid_delay_stdev --grid_to_raster --cpus all",
"_____no_output_____"
]
],
[
[
"Here we visualize the mean tropospheric zenith delay by station (*ZTD_station_delay_mean.png*) with a **`hot`** colorbar. \n\n<img src=\"support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_station_delay_mean.png\" alt=\"ZTD_minus_RAiDER_station_delay_mean\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the standard deviation of tropospheric zenith delay by station (*ZTD_minus_RAiDER_station_delay_stdev.png*) with a **`hot`** colorbar. \n\n<img src=\"support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_station_delay_stdev.png\" alt=\"ZTD_minus_RAiDER_station_delay_stdev\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded mean tropospheric zenith delay (*ZTD_minus_RAiDER_grid_delay_mean.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_grid_delay_mean.png\" alt=\"ZTD_minus_RAiDER_grid_delay_mean\" width=\"700\">",
"_____no_output_____"
],
[
"Here we visualize the gridded standard deviation of tropospheric zenith delay (*ZTD_minus_RAiDER_grid_delay_.png*) with a **`hot`** colorbar.\n\n<img src=\"support_docs/maps/maps_ex6/figures/ZTD_minus_RAiDER_grid_delay_stdev.png\" alt=\"ZTD_minus_RAiDER_grid_delay_stdev\" width=\"700\">",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0ed03954d0588fb22d5533ef9e90b2d0ec0259b | 219,894 | ipynb | Jupyter Notebook | notebooks/smc_logreg_tempering.ipynb | susnato/probml-notebooks | 95a1a1045ed96ce8ca9f59b8664b1356098d427f | [
"MIT"
] | 166 | 2021-07-16T17:33:09.000Z | 2022-03-30T03:35:34.000Z | notebooks/smc_logreg_tempering.ipynb | susnato/probml-notebooks | 95a1a1045ed96ce8ca9f59b8664b1356098d427f | [
"MIT"
] | 29 | 2021-07-21T16:31:51.000Z | 2022-03-31T19:50:13.000Z | notebooks/smc_logreg_tempering.ipynb | susnato/probml-notebooks | 95a1a1045ed96ce8ca9f59b8664b1356098d427f | [
"MIT"
] | 48 | 2021-07-17T08:26:18.000Z | 2022-03-31T03:36:18.000Z | 227.63354 | 39,102 | 0.873116 | [
[
[
"<a href=\"https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/smc_logreg_tempering.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"#SMC for logistic regression\n\nWe compare data tempering (IBIS) with temperature tempering.\n\nCode is from \n\nhttps://github.com/nchopin/particles/blob/master/book/smc_samplers/logistic_reg.py\n\n\n",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/nchopin/particles.git",
"Cloning into 'particles'...\nremote: Enumerating objects: 1506, done.\u001b[K\nremote: Counting objects: 100% (690/690), done.\u001b[K\nremote: Compressing objects: 100% (416/416), done.\u001b[K\nremote: Total 1506 (delta 445), reused 472 (delta 257), pack-reused 816\u001b[K\nReceiving objects: 100% (1506/1506), 4.48 MiB | 13.93 MiB/s, done.\nResolving deltas: 100% (968/968), done.\n"
],
[
"%cd /content/particles",
"/content/particles\n"
],
[
"!pip install --user .",
"Processing /content/particles\n\u001b[33m DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\u001b[0m\nRequirement already satisfied: numpy>=1.18 in /usr/local/lib/python3.7/dist-packages (from particles==0.2) (1.19.5)\nCollecting scipy>=1.7\n Downloading scipy-1.7.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (28.5 MB)\n\u001b[K |████████████████████████████████| 28.5 MB 106 kB/s \n\u001b[?25hRequirement already satisfied: numba in /usr/local/lib/python3.7/dist-packages (from particles==0.2) (0.51.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from particles==0.2) (1.0.1)\nRequirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba->particles==0.2) (0.34.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from numba->particles==0.2) (57.4.0)\nBuilding wheels for collected packages: particles\n Building wheel for particles (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for particles: filename=particles-0.2-py3-none-any.whl size=573163 sha256=8c1ba4a552ad649ea25b8b27167304323c3b05bd28dd4b6844e5c252f8042588\n Stored in directory: /tmp/pip-ephem-wheel-cache-klz7twnq/wheels/c4/ec/4d/9651be18bff1d8c3beaff376421029d3d43569a79306f8a862\nSuccessfully built particles\nInstalling collected packages: scipy, particles\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\nalbumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.\u001b[0m\nSuccessfully installed particles-0.2 scipy-1.7.1\n"
],
[
"import particles\nimport particles.state_space_models as ssm\nimport particles.distributions as dists",
"_____no_output_____"
],
[
"\"\"\"\nNumerical experiment of Chapter 17 (SMC samplers).\nCompare IBIS and SMC tempering for approximating:\n* the normalising constant (marginal likelihood)\n* the posterior expectation of the p coefficients\nfor a logistic regression model.\nSee below for how to select the data-set.\nNote: the SMC samplers implemented in module smc_samplers are now \"waste-free\"\nby default, see Dau & Chopin (2021), and the documentation of `smc_samplers`\n(plus the corresponding jupyter notebook). This script still performs exactly\nthe same numerical experiments as in the book, based on standard (non\nwaste-free) SMC samplers. To do so, we added ``wastefree=False`` to the\ndefinition of the corresponding `Feynman-Kac` object. Again, see the\ndocumentation of `smc_samplers` for more details.\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom numpy import random\nimport seaborn as sb\n\nimport particles\nfrom particles import datasets as dts\nfrom particles import distributions as dists\nfrom particles import resampling as rs\nfrom particles import smc_samplers as ssps\nfrom particles.collectors import Moments\n\ndatasets = {'pima': dts.Pima, 'eeg': dts.Eeg, 'sonar': dts.Sonar}\ndataset_name = 'eeg' # choose one of the three\ndata = datasets[dataset_name]().data\nT, p = data.shape\n\n# for each dataset, we adapt:\n# * N: number of particles\n# * Ks = list of Ks (nr MCMC steps)\n# * typK: value of M used for plots on \"typical\" run\n\nif dataset_name == 'sonar':\n N = 10 ** 4\n Ks = [10, 20, 30, 40, 50, 60]\n typK = 50\nelif dataset_name == 'pima':\n N = 10 ** 3\n Ks = [1, 3, 5]\n typK = 3\nelif dataset_name == 'eeg':\n N = 10 ** 3\n #Ks = [1, 3, 5, 7, 10, 15, 20]\n Ks = [1, 3, 5]\n typK = 5\n\n# prior & model\nprior = dists.StructDist({'beta':dists.MvNormal(scale=5.,\n cov=np.eye(p))})\n\n\nclass LogisticRegression(ssps.StaticModel):\n def logpyt(self, theta, t):\n # log-likelihood factor t, for given theta\n lin = np.matmul(theta['beta'], data[t, :])\n return - np.logaddexp(0., -lin)\n",
"_____no_output_____"
],
[
"\n\n# algorithms\n# N and values of K set above according to dataset\nESSrmin = 0.5\nnruns = 2 # 16\nresults = []\n\n# runs\nprint('Dataset: %s' % dataset_name)\nfor K in Ks:\n for i in range(nruns):\n # need to shuffle the data for IBIS\n random.shuffle(data)\n model = LogisticRegression(data=data, prior=prior)\n for alg_type in ['tempering', 'ibis']:\n if alg_type=='ibis':\n fk = ssps.IBIS(model=model, wastefree=False, len_chain=K + 1)\n pf = particles.SMC(N=N, fk=fk, ESSrmin=ESSrmin,\n collect=[Moments], verbose=False)\n else:\n fk = ssps.AdaptiveTempering(model=model, ESSrmin=ESSrmin,\n wastefree=False, len_chain = K + 1)\n pf = particles.SMC(N=N, fk=fk, ESSrmin=1., collect=[Moments],\n verbose=True)\n # must resample at every time step when doing adaptive\n # tempering\n print('%s, K=%i, run %i' % (alg_type, K, i))\n pf.run()\n print('CPU time (min): %.2f' % (pf.cpu_time / 60))\n print('loglik: %f' % pf.logLt)\n res = {'K': K, 'type': alg_type, 'out': pf.summaries,\n 'cpu': pf.cpu_time}\n if alg_type=='ibis':\n n_eval = N * (T + K * sum([t for t in range(T) if\n pf.summaries.rs_flags[t]]))\n else:\n n_eval = N * T * (1. + K * (len(pf.summaries.ESSs) - 1))\n res['path_sampling'] = pf.X.shared['path_sampling'][-1]\n res['exponents'] = pf.X.shared['exponents']\n res['n_eval'] = n_eval\n results.append(res)\n",
"Dataset: eeg\ntempering, K=1, run 0\nt=0, ESS=500.00, tempering exponent=9.31e-05\nt=1, Metropolis acc. rate (over 1 steps): 0.257, ESS=500.00, tempering exponent=0.00029\nt=2, Metropolis acc. rate (over 1 steps): 0.275, ESS=500.00, tempering exponent=0.000662\nt=3, Metropolis acc. rate (over 1 steps): 0.265, ESS=500.00, tempering exponent=0.00142\nt=4, Metropolis acc. rate (over 1 steps): 0.298, ESS=500.00, tempering exponent=0.00251\nt=5, Metropolis acc. rate (over 1 steps): 0.304, ESS=500.00, tempering exponent=0.00385\nt=6, Metropolis acc. rate (over 1 steps): 0.323, ESS=500.00, tempering exponent=0.00571\nt=7, Metropolis acc. rate (over 1 steps): 0.320, ESS=500.00, tempering exponent=0.0086\nt=8, Metropolis acc. rate (over 1 steps): 0.353, ESS=500.00, tempering exponent=0.0139\nt=9, Metropolis acc. rate (over 1 steps): 0.337, ESS=500.00, tempering exponent=0.0222\nt=10, Metropolis acc. rate (over 1 steps): 0.318, ESS=500.00, tempering exponent=0.0332\nt=11, Metropolis acc. rate (over 1 steps): 0.283, ESS=500.00, tempering exponent=0.0517\nt=12, Metropolis acc. rate (over 1 steps): 0.358, ESS=500.00, tempering exponent=0.0835\nt=13, Metropolis acc. rate (over 1 steps): 0.336, ESS=500.00, tempering exponent=0.122\nt=14, Metropolis acc. rate (over 1 steps): 0.334, ESS=500.00, tempering exponent=0.174\nt=15, Metropolis acc. rate (over 1 steps): 0.307, ESS=500.00, tempering exponent=0.233\nt=16, Metropolis acc. rate (over 1 steps): 0.322, ESS=500.00, tempering exponent=0.289\nt=17, Metropolis acc. rate (over 1 steps): 0.317, ESS=500.00, tempering exponent=0.35\nt=18, Metropolis acc. rate (over 1 steps): 0.393, ESS=500.00, tempering exponent=0.443\nt=19, Metropolis acc. rate (over 1 steps): 0.404, ESS=500.00, tempering exponent=0.571\nt=20, Metropolis acc. rate (over 1 steps): 0.405, ESS=500.00, tempering exponent=0.699\nt=21, Metropolis acc. rate (over 1 steps): 0.446, ESS=500.00, tempering exponent=0.832\nt=22, Metropolis acc. rate (over 1 steps): 0.408, ESS=500.00, tempering exponent=0.995\nt=23, Metropolis acc. rate (over 1 steps): 0.429, ESS=999.31, tempering exponent=1\nCPU time (min): 0.59\nloglik: -9894.531888\nibis, K=1, run 0\nCPU time (min): 0.45\nloglik: -9877.739537\ntempering, K=1, run 1\nt=0, ESS=500.00, tempering exponent=9.36e-05\nt=1, Metropolis acc. rate (over 1 steps): 0.256, ESS=500.00, tempering exponent=0.000288\nt=2, Metropolis acc. rate (over 1 steps): 0.263, ESS=500.00, tempering exponent=0.000657\nt=3, Metropolis acc. rate (over 1 steps): 0.285, ESS=500.00, tempering exponent=0.00138\nt=4, Metropolis acc. rate (over 1 steps): 0.298, ESS=500.00, tempering exponent=0.00246\nt=5, Metropolis acc. rate (over 1 steps): 0.346, ESS=500.00, tempering exponent=0.00386\nt=6, Metropolis acc. rate (over 1 steps): 0.303, ESS=500.00, tempering exponent=0.00593\nt=7, Metropolis acc. rate (over 1 steps): 0.306, ESS=500.00, tempering exponent=0.0086\nt=8, Metropolis acc. rate (over 1 steps): 0.316, ESS=500.00, tempering exponent=0.0126\nt=9, Metropolis acc. rate (over 1 steps): 0.376, ESS=500.00, tempering exponent=0.0204\nt=10, Metropolis acc. rate (over 1 steps): 0.459, ESS=500.00, tempering exponent=0.0327\nt=11, Metropolis acc. rate (over 1 steps): 0.435, ESS=500.00, tempering exponent=0.0529\nt=12, Metropolis acc. rate (over 1 steps): 0.355, ESS=500.00, tempering exponent=0.0838\nt=13, Metropolis acc. rate (over 1 steps): 0.357, ESS=500.00, tempering exponent=0.128\nt=14, Metropolis acc. rate (over 1 steps): 0.362, ESS=500.00, tempering exponent=0.189\nt=15, Metropolis acc. rate (over 1 steps): 0.361, ESS=500.00, tempering exponent=0.265\nt=16, Metropolis acc. rate (over 1 steps): 0.388, ESS=500.00, tempering exponent=0.372\nt=17, Metropolis acc. rate (over 1 steps): 0.450, ESS=500.00, tempering exponent=0.519\nt=18, Metropolis acc. rate (over 1 steps): 0.484, ESS=500.00, tempering exponent=0.669\nt=19, Metropolis acc. rate (over 1 steps): 0.442, ESS=500.00, tempering exponent=0.825\nt=20, Metropolis acc. rate (over 1 steps): 0.395, ESS=500.00, tempering exponent=0.991\nt=21, Metropolis acc. rate (over 1 steps): 0.366, ESS=997.96, tempering exponent=1\nCPU time (min): 0.54\nloglik: -9913.230136\nibis, K=1, run 1\nCPU time (min): 0.48\nloglik: -9909.165287\ntempering, K=3, run 0\nt=0, ESS=500.00, tempering exponent=9.27e-05\nt=1, Metropolis acc. rate (over 3 steps): 0.255, ESS=500.00, tempering exponent=0.000277\nt=2, Metropolis acc. rate (over 3 steps): 0.249, ESS=500.00, tempering exponent=0.000627\nt=3, Metropolis acc. rate (over 3 steps): 0.263, ESS=500.00, tempering exponent=0.00123\nt=4, Metropolis acc. rate (over 3 steps): 0.288, ESS=500.00, tempering exponent=0.00226\nt=5, Metropolis acc. rate (over 3 steps): 0.293, ESS=500.00, tempering exponent=0.00381\nt=6, Metropolis acc. rate (over 3 steps): 0.275, ESS=500.00, tempering exponent=0.00621\nt=7, Metropolis acc. rate (over 3 steps): 0.276, ESS=500.00, tempering exponent=0.00967\nt=8, Metropolis acc. rate (over 3 steps): 0.275, ESS=500.00, tempering exponent=0.0152\nt=9, Metropolis acc. rate (over 3 steps): 0.310, ESS=500.00, tempering exponent=0.0261\nt=10, Metropolis acc. rate (over 3 steps): 0.306, ESS=500.00, tempering exponent=0.0463\nt=11, Metropolis acc. rate (over 3 steps): 0.302, ESS=500.00, tempering exponent=0.0762\nt=12, Metropolis acc. rate (over 3 steps): 0.278, ESS=500.00, tempering exponent=0.117\nt=13, Metropolis acc. rate (over 3 steps): 0.279, ESS=500.00, tempering exponent=0.172\nt=14, Metropolis acc. rate (over 3 steps): 0.275, ESS=500.00, tempering exponent=0.233\nt=15, Metropolis acc. rate (over 3 steps): 0.287, ESS=500.00, tempering exponent=0.3\nt=16, Metropolis acc. rate (over 3 steps): 0.292, ESS=500.00, tempering exponent=0.374\nt=17, Metropolis acc. rate (over 3 steps): 0.303, ESS=500.00, tempering exponent=0.459\nt=18, Metropolis acc. rate (over 3 steps): 0.300, ESS=500.00, tempering exponent=0.548\nt=19, Metropolis acc. rate (over 3 steps): 0.279, ESS=500.00, tempering exponent=0.648\nt=20, Metropolis acc. rate (over 3 steps): 0.283, ESS=500.00, tempering exponent=0.763\nt=21, Metropolis acc. rate (over 3 steps): 0.279, ESS=500.00, tempering exponent=0.871\nt=22, Metropolis acc. rate (over 3 steps): 0.289, ESS=500.00, tempering exponent=0.965\nt=23, Metropolis acc. rate (over 3 steps): 0.274, ESS=894.72, tempering exponent=1\nCPU time (min): 1.71\nloglik: -9872.572721\nibis, K=3, run 0\nCPU time (min): 1.22\nloglik: -9854.823827\ntempering, K=3, run 1\nt=0, ESS=500.00, tempering exponent=9.23e-05\nt=1, Metropolis acc. rate (over 3 steps): 0.258, ESS=500.00, tempering exponent=0.000275\nt=2, Metropolis acc. rate (over 3 steps): 0.261, ESS=500.00, tempering exponent=0.000592\nt=3, Metropolis acc. rate (over 3 steps): 0.275, ESS=500.00, tempering exponent=0.00117\nt=4, Metropolis acc. rate (over 3 steps): 0.286, ESS=500.00, tempering exponent=0.00222\nt=5, Metropolis acc. rate (over 3 steps): 0.301, ESS=500.00, tempering exponent=0.00392\nt=6, Metropolis acc. rate (over 3 steps): 0.292, ESS=500.00, tempering exponent=0.00641\nt=7, Metropolis acc. rate (over 3 steps): 0.284, ESS=500.00, tempering exponent=0.0106\nt=8, Metropolis acc. rate (over 3 steps): 0.285, ESS=500.00, tempering exponent=0.0176\nt=9, Metropolis acc. rate (over 3 steps): 0.285, ESS=500.00, tempering exponent=0.0278\nt=10, Metropolis acc. rate (over 3 steps): 0.267, ESS=500.00, tempering exponent=0.043\nt=11, Metropolis acc. rate (over 3 steps): 0.276, ESS=500.00, tempering exponent=0.0676\nt=12, Metropolis acc. rate (over 3 steps): 0.272, ESS=500.00, tempering exponent=0.103\nt=13, Metropolis acc. rate (over 3 steps): 0.282, ESS=500.00, tempering exponent=0.148\nt=14, Metropolis acc. rate (over 3 steps): 0.286, ESS=500.00, tempering exponent=0.205\nt=15, Metropolis acc. rate (over 3 steps): 0.296, ESS=500.00, tempering exponent=0.278\nt=16, Metropolis acc. rate (over 3 steps): 0.300, ESS=500.00, tempering exponent=0.37\nt=17, Metropolis acc. rate (over 3 steps): 0.305, ESS=500.00, tempering exponent=0.467\nt=18, Metropolis acc. rate (over 3 steps): 0.303, ESS=500.00, tempering exponent=0.567\nt=19, Metropolis acc. rate (over 3 steps): 0.288, ESS=500.00, tempering exponent=0.665\nt=20, Metropolis acc. rate (over 3 steps): 0.291, ESS=500.00, tempering exponent=0.759\nt=21, Metropolis acc. rate (over 3 steps): 0.283, ESS=500.00, tempering exponent=0.848\nt=22, Metropolis acc. rate (over 3 steps): 0.265, ESS=500.00, tempering exponent=0.94\nt=23, Metropolis acc. rate (over 3 steps): 0.260, ESS=764.59, tempering exponent=1\nCPU time (min): 1.73\nloglik: -9863.238013\nibis, K=3, run 1\nCPU time (min): 1.18\nloglik: -9852.332666\ntempering, K=5, run 0\nt=0, ESS=500.00, tempering exponent=8.89e-05\nt=1, Metropolis acc. rate (over 5 steps): 0.255, ESS=500.00, tempering exponent=0.000279\nt=2, Metropolis acc. rate (over 5 steps): 0.251, ESS=500.00, tempering exponent=0.00061\nt=3, Metropolis acc. rate (over 5 steps): 0.252, ESS=500.00, tempering exponent=0.00117\nt=4, Metropolis acc. rate (over 5 steps): 0.256, ESS=500.00, tempering exponent=0.00206\nt=5, Metropolis acc. rate (over 5 steps): 0.268, ESS=500.00, tempering exponent=0.00342\nt=6, Metropolis acc. rate (over 5 steps): 0.270, ESS=500.00, tempering exponent=0.00556\nt=7, Metropolis acc. rate (over 5 steps): 0.269, ESS=500.00, tempering exponent=0.00937\nt=8, Metropolis acc. rate (over 5 steps): 0.259, ESS=500.00, tempering exponent=0.016\nt=9, Metropolis acc. rate (over 5 steps): 0.252, ESS=500.00, tempering exponent=0.027\nt=10, Metropolis acc. rate (over 5 steps): 0.232, ESS=500.00, tempering exponent=0.0446\nt=11, Metropolis acc. rate (over 5 steps): 0.219, ESS=500.00, tempering exponent=0.0686\nt=12, Metropolis acc. rate (over 5 steps): 0.222, ESS=500.00, tempering exponent=0.0983\nt=13, Metropolis acc. rate (over 5 steps): 0.222, ESS=500.00, tempering exponent=0.135\nt=14, Metropolis acc. rate (over 5 steps): 0.234, ESS=500.00, tempering exponent=0.185\nt=15, Metropolis acc. rate (over 5 steps): 0.237, ESS=500.00, tempering exponent=0.25\nt=16, Metropolis acc. rate (over 5 steps): 0.247, ESS=500.00, tempering exponent=0.32\nt=17, Metropolis acc. rate (over 5 steps): 0.246, ESS=500.00, tempering exponent=0.393\nt=18, Metropolis acc. rate (over 5 steps): 0.250, ESS=500.00, tempering exponent=0.477\nt=19, Metropolis acc. rate (over 5 steps): 0.260, ESS=500.00, tempering exponent=0.571\nt=20, Metropolis acc. rate (over 5 steps): 0.257, ESS=500.00, tempering exponent=0.673\nt=21, Metropolis acc. rate (over 5 steps): 0.262, ESS=500.00, tempering exponent=0.783\nt=22, Metropolis acc. rate (over 5 steps): 0.254, ESS=500.00, tempering exponent=0.881\nt=23, Metropolis acc. rate (over 5 steps): 0.263, ESS=500.00, tempering exponent=0.968\nt=24, Metropolis acc. rate (over 5 steps): 0.262, ESS=912.40, tempering exponent=1\nCPU time (min): 2.95\nloglik: -9854.048048\nibis, K=5, run 0\nCPU time (min): 1.85\nloglik: -9847.005143\ntempering, K=5, run 1\nt=0, ESS=500.00, tempering exponent=0.000104\nt=1, Metropolis acc. rate (over 5 steps): 0.255, ESS=500.00, tempering exponent=0.000321\nt=2, Metropolis acc. rate (over 5 steps): 0.256, ESS=500.00, tempering exponent=0.000686\nt=3, Metropolis acc. rate (over 5 steps): 0.245, ESS=500.00, tempering exponent=0.0013\nt=4, Metropolis acc. rate (over 5 steps): 0.256, ESS=500.00, tempering exponent=0.00224\nt=5, Metropolis acc. rate (over 5 steps): 0.261, ESS=500.00, tempering exponent=0.0037\nt=6, Metropolis acc. rate (over 5 steps): 0.260, ESS=500.00, tempering exponent=0.00586\nt=7, Metropolis acc. rate (over 5 steps): 0.258, ESS=500.00, tempering exponent=0.00952\nt=8, Metropolis acc. rate (over 5 steps): 0.255, ESS=500.00, tempering exponent=0.0155\nt=9, Metropolis acc. rate (over 5 steps): 0.251, ESS=500.00, tempering exponent=0.0259\nt=10, Metropolis acc. rate (over 5 steps): 0.241, ESS=500.00, tempering exponent=0.0427\nt=11, Metropolis acc. rate (over 5 steps): 0.233, ESS=500.00, tempering exponent=0.0676\nt=12, Metropolis acc. rate (over 5 steps): 0.228, ESS=500.00, tempering exponent=0.101\nt=13, Metropolis acc. rate (over 5 steps): 0.232, ESS=500.00, tempering exponent=0.144\nt=14, Metropolis acc. rate (over 5 steps): 0.238, ESS=500.00, tempering exponent=0.196\nt=15, Metropolis acc. rate (over 5 steps): 0.246, ESS=500.00, tempering exponent=0.253\nt=16, Metropolis acc. rate (over 5 steps): 0.249, ESS=500.00, tempering exponent=0.318\nt=17, Metropolis acc. rate (over 5 steps): 0.263, ESS=500.00, tempering exponent=0.388\nt=18, Metropolis acc. rate (over 5 steps): 0.254, ESS=500.00, tempering exponent=0.467\nt=19, Metropolis acc. rate (over 5 steps): 0.262, ESS=500.00, tempering exponent=0.556\nt=20, Metropolis acc. rate (over 5 steps): 0.264, ESS=500.00, tempering exponent=0.653\nt=21, Metropolis acc. rate (over 5 steps): 0.252, ESS=500.00, tempering exponent=0.755\nt=22, Metropolis acc. rate (over 5 steps): 0.266, ESS=500.00, tempering exponent=0.871\nt=23, Metropolis acc. rate (over 5 steps): 0.275, ESS=500.00, tempering exponent=0.989\nt=24, Metropolis acc. rate (over 5 steps): 0.276, ESS=993.44, tempering exponent=1\nCPU time (min): 2.93\nloglik: -9854.453127\nibis, K=5, run 1\nCPU time (min): 1.69\nloglik: -9849.459786\ntempering, K=7, run 0\nt=0, ESS=500.00, tempering exponent=9.08e-05\nt=1, Metropolis acc. rate (over 7 steps): 0.252, ESS=500.00, tempering exponent=0.000277\nt=2, Metropolis acc. rate (over 7 steps): 0.258, ESS=500.00, tempering exponent=0.000608\nt=3, Metropolis acc. rate (over 7 steps): 0.251, ESS=500.00, tempering exponent=0.00116\nt=4, Metropolis acc. rate (over 7 steps): 0.265, ESS=500.00, tempering exponent=0.00206\nt=5, Metropolis acc. rate (over 7 steps): 0.277, ESS=500.00, tempering exponent=0.00354\nt=6, Metropolis acc. rate (over 7 steps): 0.266, ESS=500.00, tempering exponent=0.00598\nt=7, Metropolis acc. rate (over 7 steps): 0.262, ESS=500.00, tempering exponent=0.00971\nt=8, Metropolis acc. rate (over 7 steps): 0.257, ESS=500.00, tempering exponent=0.0161\nt=9, Metropolis acc. rate (over 7 steps): 0.248, ESS=500.00, tempering exponent=0.0264\nt=10, Metropolis acc. rate (over 7 steps): 0.235, ESS=500.00, tempering exponent=0.0437\nt=11, Metropolis acc. rate (over 7 steps): 0.236, ESS=500.00, tempering exponent=0.0684\nt=12, Metropolis acc. rate (over 7 steps): 0.225, ESS=500.00, tempering exponent=0.1\nt=13, Metropolis acc. rate (over 7 steps): 0.232, ESS=500.00, tempering exponent=0.138\nt=14, Metropolis acc. rate (over 7 steps): 0.239, ESS=500.00, tempering exponent=0.183\nt=15, Metropolis acc. rate (over 7 steps): 0.231, ESS=500.00, tempering exponent=0.241\nt=16, Metropolis acc. rate (over 7 steps): 0.244, ESS=500.00, tempering exponent=0.311\nt=17, Metropolis acc. rate (over 7 steps): 0.252, ESS=500.00, tempering exponent=0.386\nt=18, Metropolis acc. rate (over 7 steps): 0.252, ESS=500.00, tempering exponent=0.468\nt=19, Metropolis acc. rate (over 7 steps): 0.262, ESS=500.00, tempering exponent=0.557\nt=20, Metropolis acc. rate (over 7 steps): 0.269, ESS=500.00, tempering exponent=0.649\nt=21, Metropolis acc. rate (over 7 steps): 0.256, ESS=500.00, tempering exponent=0.738\nt=22, Metropolis acc. rate (over 7 steps): 0.259, ESS=500.00, tempering exponent=0.831\nt=23, Metropolis acc. rate (over 7 steps): 0.257, ESS=500.00, tempering exponent=0.934\nt=24, Metropolis acc. rate (over 7 steps): 0.255, ESS=748.44, tempering exponent=1\nCPU time (min): 4.13\nloglik: -9848.850450\nibis, K=7, run 0\nCPU time (min): 3.02\nloglik: -9847.692150\ntempering, K=7, run 1\nt=0, ESS=500.00, tempering exponent=9.13e-05\nt=1, Metropolis acc. rate (over 7 steps): 0.243, ESS=500.00, tempering exponent=0.000287\nt=2, Metropolis acc. rate (over 7 steps): 0.246, ESS=500.00, tempering exponent=0.000652\nt=3, Metropolis acc. rate (over 7 steps): 0.258, ESS=500.00, tempering exponent=0.00124\nt=4, Metropolis acc. rate (over 7 steps): 0.257, ESS=500.00, tempering exponent=0.00214\nt=5, Metropolis acc. rate (over 7 steps): 0.252, ESS=500.00, tempering exponent=0.00362\nt=6, Metropolis acc. rate (over 7 steps): 0.251, ESS=500.00, tempering exponent=0.00597\nt=7, Metropolis acc. rate (over 7 steps): 0.252, ESS=500.00, tempering exponent=0.00939\nt=8, Metropolis acc. rate (over 7 steps): 0.241, ESS=500.00, tempering exponent=0.0148\nt=9, Metropolis acc. rate (over 7 steps): 0.238, ESS=500.00, tempering exponent=0.0234\nt=10, Metropolis acc. rate (over 7 steps): 0.229, ESS=500.00, tempering exponent=0.0379\nt=11, Metropolis acc. rate (over 7 steps): 0.235, ESS=500.00, tempering exponent=0.0614\nt=12, Metropolis acc. rate (over 7 steps): 0.237, ESS=500.00, tempering exponent=0.0945\nt=13, Metropolis acc. rate (over 7 steps): 0.228, ESS=500.00, tempering exponent=0.137\nt=14, Metropolis acc. rate (over 7 steps): 0.240, ESS=500.00, tempering exponent=0.19\nt=15, Metropolis acc. rate (over 7 steps): 0.240, ESS=500.00, tempering exponent=0.248\nt=16, Metropolis acc. rate (over 7 steps): 0.244, ESS=500.00, tempering exponent=0.311\nt=17, Metropolis acc. rate (over 7 steps): 0.239, ESS=500.00, tempering exponent=0.379\nt=18, Metropolis acc. rate (over 7 steps): 0.249, ESS=500.00, tempering exponent=0.455\nt=19, Metropolis acc. rate (over 7 steps): 0.253, ESS=500.00, tempering exponent=0.538\nt=20, Metropolis acc. rate (over 7 steps): 0.261, ESS=500.00, tempering exponent=0.631\nt=21, Metropolis acc. rate (over 7 steps): 0.260, ESS=500.00, tempering exponent=0.726\nt=22, Metropolis acc. rate (over 7 steps): 0.251, ESS=500.00, tempering exponent=0.817\nt=23, Metropolis acc. rate (over 7 steps): 0.251, ESS=500.00, tempering exponent=0.907\nt=24, Metropolis acc. rate (over 7 steps): 0.266, ESS=563.15, tempering exponent=1\nCPU time (min): 4.11\nloglik: -9850.407733\nibis, K=7, run 1\nCPU time (min): 2.58\nloglik: -9847.138432\n"
],
[
"\n# plots\n#######\nsavefigs = True # do you want to save figures as pdfs\nplt.style.use('ggplot')\npal = sb.dark_palette('white', n_colors=2)\n\n# Compare standard and path sampling estimates of the log-normalising cst\nplt.figure()\ndiff_est = [(r['out'].logLts[-1] - r['path_sampling'])\n for r in results if r['type']=='tempering']\nsb.histplot(diff_est)",
"_____no_output_____"
],
[
"# Figure 17.1: typical behaviour of IBIS\ntyp_ibis = [r for r in results if r['type']=='ibis' and r['K'] == typK][0]\ntyp_ess = typ_ibis['out'].ESSs\ntyp_rs_times = np.nonzero(typ_ibis['out'].rs_flags)[0]\n\n# Left panel: evolution of ESS\nfig, ax = plt.subplots()\nax.plot(typ_ess, 'k')\nax.set(xlabel=r'$t$', ylabel='ESS')\nif savefigs:\n plt.savefig(dataset_name + '_typical_ibis_ess.pdf')\n plt.savefig(dataset_name + '_typical_ibis_ess.png')\n\n# Right panel: evolution of resampling times\nfig, ax = plt.subplots()\nax.plot(typ_rs_times[:-1], np.diff(typ_rs_times), 'ko-')\nax.set(xlabel=r'$t$', ylabel='duration between successive rs')\nif savefigs:\n plt.savefig(dataset_name + '_typical_ibis_rs_times.pdf')\n plt.savefig(dataset_name + '_typical_ibis_rs_times.png')",
"_____no_output_____"
],
[
"# Figure 17.2: evolution of temperature in a typical tempering run\ntyp_temp = [r for r in results if r['type']=='tempering' and r['K'] == typK][0]\nexpnts = typ_temp['exponents']\nplt.figure()\nplt.plot(expnts, 'k')\nplt.xlabel(r'$t$')\nplt.ylabel('tempering exponent')\nif savefigs:\n plt.savefig(dataset_name + '_typical_tempering_temperatures.pdf')\n plt.savefig(dataset_name + '_typical_tempering_temperatures.png')",
"_____no_output_____"
],
[
"# nr evals vs K for both algorithms\nplt.figure()\nsb.boxplot(x=[r['K'] for r in results],\n y=[r['n_eval'] for r in results],\n hue=[r['type'] for r in results])\nplt.xlabel('number MCMC steps')\nplt.ylabel('number likelihood evaluations')\nif savefigs:\n plt.savefig(dataset_name + '_boxplots_nevals_vs_K.pdf')\n plt.savefig(dataset_name + '_boxplots_nevals_vs_K.png')",
"_____no_output_____"
],
[
"print(type(results))\nprint(results[0])\nfor r in results:\n print(r['type'], 'K=', r['K'], 'time=', r['cpu'])",
"<class 'list'>\n{'K': 1, 'type': 'tempering', 'out': <particles.collectors.Summaries object at 0x7f307c297a90>, 'cpu': 35.458105918, 'path_sampling': -9894.552441367015, 'exponents': [0.0, 9.306707012648636e-05, 0.0002898627563112856, 0.0006618455648883144, 0.0014183980691296664, 0.002507870437781154, 0.0038479550089156626, 0.005705163274481007, 0.008603119112205248, 0.013935392698910011, 0.022230662894991725, 0.033194739869915085, 0.051692383341063665, 0.08354491448844609, 0.12227938118802113, 0.17406891245698036, 0.23286324675513, 0.289010223871243, 0.35035677692909667, 0.443419593914814, 0.5712134103654529, 0.6994814843236252, 0.8324421117283232, 0.9946749866112512, 1.0], 'n_eval': 359520000.0}\ntempering K= 1 time= 35.458105918\nibis K= 1 time= 27.205261016999998\ntempering K= 1 time= 32.33377925000002\nibis K= 1 time= 28.813738553000007\ntempering K= 3 time= 102.83632723400001\nibis K= 3 time= 73.07628615200008\ntempering K= 3 time= 103.81470425999998\nibis K= 3 time= 70.56549695900003\ntempering K= 5 time= 177.10674820500003\nibis K= 5 time= 111.04784905800011\ntempering K= 5 time= 175.84440240699996\nibis K= 5 time= 101.23418587399988\ntempering K= 7 time= 247.79403795000007\nibis K= 7 time= 181.4744050459999\ntempering K= 7 time= 246.65038172999994\nibis K= 7 time= 154.86092913099992\n"
],
[
"# Figure 17.3: Box-plots estimate versus number of MCMC steps\n# Left panel: marginal likelihood\nplt.figure()\nsb.boxplot(x=[r['K'] for r in results],\n y=[r['out'].logLts[-1] for r in results],\n hue=[r['type'] for r in results])\nplt.xlabel('number MCMC steps')\nplt.ylabel('marginal likelihood')\nif savefigs:\n plt.savefig(dataset_name + '_boxplots_marglik_vs_K.pdf')\n plt.savefig(dataset_name + '_boxplots_marglik_vs_K.png')\n\n# Right panel: post expectation 1st pred\nplt.figure()\nsb.boxplot(x=[r['K'] for r in results],\n y=[r['out'].moments[-1]['mean']['beta'][1] for r in results],\n hue=[r['type'] for r in results])\nplt.xlabel('number MCMC steps')\nplt.ylabel('posterior expectation first predictor')\nif savefigs:\n plt.savefig(dataset_name + '_boxplots_postexp1_vs_K.pdf')\n plt.savefig(dataset_name + '_boxplots_postexp1_vs_K.png')",
"_____no_output_____"
],
[
"# Figure 17.4: variance vs CPU trade-off\n# variance times K, as a function of K\nplt.figure()\n#cols = {'ibis': 'gray', 'tempering':'black'}\ncols = {'ibis': 'blue', 'tempering':'red'}\nlsts = {'ibis': '--', 'tempering': '-'}\nfor i in range(p):\n for alg_type in ['ibis', 'tempering']:\n adj_var = []\n for K in Ks:\n mts = [r['out'].moments[-1]\n for r in results if r['K']==K and r['type']==alg_type]\n av = (K * np.var([m['mean']['beta'][i] for m in mts]) /\n np.mean([m['var']['beta'][i] for m in mts]))\n adj_var.append(av)\n if i==0:\n plt.plot(Ks, adj_var, color=cols[alg_type], label=alg_type,\n alpha=.8, linewidth=2, linestyle=lsts[alg_type])\n else:\n plt.plot(Ks, adj_var, color=cols[alg_type], alpha=.8, linewidth=2,\n linestyle=lsts[alg_type])\nplt.legend()\nplt.xticks(Ks, ['%i' % K for K in Ks]) # force int ticks\nplt.xlabel('number MCMC steps')\nplt.ylabel(r'variance times number MCMC steps')\nif savefigs:\n plt.savefig(dataset_name + '_postexp_var_vs_K.pdf')\n plt.savefig(dataset_name + '_postexp_var_vs_K.png')",
"_____no_output_____"
],
[
"!ls *.png\n",
"eeg_boxplots_marglik_vs_K.png\teeg_typical_ibis_ess.png\neeg_boxplots_nevals_vs_K.png\teeg_typical_ibis_rs_times.png\neeg_boxplots_postexp1_vs_K.png\teeg_typical_tempering_temperatures.png\neeg_postexp_var_vs_K.png\n"
],
[
"!mkdir figures\n!mv *.png figures\n!mv *.pdf figures\n!ls",
"book\t CONTRIBUTING.md INSTALL particles\t setup.py\nCHANGELOG docs\t LICENSE README.md\n_config.yml figures\t papers requirements.txt\n"
],
[
"!zip -r figures figures",
" adding: figures/ (stored 0%)\n adding: figures/eeg_postexp_var_vs_K.pdf (deflated 30%)\n adding: figures/eeg_boxplots_marglik_vs_K.pdf (deflated 29%)\n adding: figures/eeg_boxplots_postexp1_vs_K.pdf (deflated 29%)\n adding: figures/eeg_postexp_var_vs_K.png (deflated 3%)\n adding: figures/eeg_typical_ibis_ess.pdf (deflated 6%)\n adding: figures/eeg_typical_ibis_ess.png (deflated 6%)\n adding: figures/eeg_typical_tempering_temperatures.pdf (deflated 41%)\n adding: figures/eeg_boxplots_postexp1_vs_K.png (deflated 10%)\n adding: figures/eeg_typical_ibis_rs_times.png (deflated 7%)\n adding: figures/eeg_typical_ibis_rs_times.pdf (deflated 35%)\n adding: figures/eeg_boxplots_marglik_vs_K.png (deflated 11%)\n adding: figures/eeg_boxplots_nevals_vs_K.pdf (deflated 30%)\n adding: figures/eeg_boxplots_nevals_vs_K.png (deflated 8%)\n adding: figures/eeg_typical_tempering_temperatures.png (deflated 10%)\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed058a979d1b139eb0ad3cdcef7262d1790a91 | 143,115 | ipynb | Jupyter Notebook | semeval2021/bert-multitask-reproduce.ipynb | skoltech-nlp/toxic-span-detection | 07004db49214d697e48a81fa2ff39b23a17f30ea | [
"Apache-2.0"
] | 1 | 2021-11-14T09:33:56.000Z | 2021-11-14T09:33:56.000Z | semeval2021/bert-multitask-reproduce.ipynb | skoltech-nlp/toxic-span-detection | 07004db49214d697e48a81fa2ff39b23a17f30ea | [
"Apache-2.0"
] | null | null | null | semeval2021/bert-multitask-reproduce.ipynb | skoltech-nlp/toxic-span-detection | 07004db49214d697e48a81fa2ff39b23a17f30ea | [
"Apache-2.0"
] | null | null | null | 38.617107 | 33,508 | 0.583566 | [
[
[
"* basic roberta ft: 0.6589791487657798 (thr 0.3)\n* basic roberta ft (head first): 0.6768011808573329 (thr 0.42)\n* fine tune roberta on weird clf, then only head on spans, then whole: 0.6853127403287083 (thr 0.32)\n* ",
"_____no_output_____"
]
],
[
[
"from transformers import RobertaTokenizer, RobertaForTokenClassification\nfrom transformers import BertTokenizer, BertForTokenClassification\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\nimport torch",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '4'\ndevice = torch.device('cuda:0')",
"_____no_output_____"
],
[
"model_name = 'roberta-base' #roberta-base",
"_____no_output_____"
],
[
"tokenizer = AutoTokenizer.from_pretrained(model_name)",
"_____no_output_____"
],
[
"# model = AutoModelForTokenClassification.from_pretrained(model_name)",
"_____no_output_____"
]
],
[
[
"```\ninputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\nlabels = torch.tensor([1] * inputs[\"input_ids\"].size(1)).unsqueeze(0) # Batch size 1\noutputs = model(**inputs, labels=labels)\n```",
"_____no_output_____"
],
[
"# Create labels for tagging",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nfrom ast import literal_eval\nimport re\nimport nltk\nimport matplotlib.pyplot as plt\nfrom nltk.tokenize import word_tokenize\n\npath = 'data/'",
"_____no_output_____"
],
[
"trial = pd.read_csv(path + 'tsd_trial.csv')\ntrain = pd.read_csv(path + 'tsd_train.csv')\n# final_test = pd.read_csv(path + 'tsd_test.csv')\nfinal_test = pd.read_csv(path + 'tsd_test_gt.csv')\n\ntrain['spans'] = train.spans.apply(literal_eval)\ntrial['spans'] = trial.spans.apply(literal_eval)\nfinal_test['spans'] = final_test.spans.apply(literal_eval)\ntrial.shape, train.shape, final_test.shape",
"_____no_output_____"
],
[
"print(len(set(trial.text).intersection(set(train.text))))\nprint(len(set(final_test.text).intersection(set(train.text))))",
"8\n0\n"
],
[
"print((train.spans.apply(len) == 0).mean())\nprint((trial.spans.apply(len) == 0).mean())",
"0.06109081748331024\n0.06231884057971015\n"
],
[
"import spans_utils\nfrom importlib import reload\nreload(spans_utils)\nfrom spans_utils import display_spans, spans2labels, labels2spans\n\ndisplay_spans(trial.spans[0], trial.text[0])\ndisplay_spans(trial.spans[0], trial.text[0])",
"_____no_output_____"
],
[
"from tqdm.auto import tqdm, trange",
"_____no_output_____"
],
[
"n = 0\nfor row in tqdm([row for i, row in trial.iterrows()]):\n break\n labels = spans2labels(row.text, row.spans, tokenizer)\n spans2 = labels2spans(row.text, labels, tokenizer)\n if row.spans != spans2:\n t = row.text.replace(' ', '+')\n display_spans(row.spans, t)\n display_spans(spans2, t)\n n += 1\nprint(n)",
"_____no_output_____"
],
[
"train_labels = [spans2labels(row.text, row.spans, tokenizer) for i, row in tqdm(train.iterrows())]",
"_____no_output_____"
],
[
"trial_labels = [spans2labels(row.text, row.spans, tokenizer) for i, row in tqdm(trial.iterrows())]",
"_____no_output_____"
],
[
"train['labels'] = train_labels\ntrial['labels'] = trial_labels",
"_____no_output_____"
],
[
"class SpansDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels=None):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: val[idx] for key, val in self.encodings.items()}\n if self.labels is not None:\n item['labels'] = self.labels[idx]\n return item\n\n def __len__(self):\n return len(self.encodings['input_ids'])",
"_____no_output_____"
],
[
"train_dataset = SpansDataset(tokenizer(train.text.tolist()), train_labels)\neval_dataset = SpansDataset(tokenizer(trial.text.tolist()), trial_labels)",
"_____no_output_____"
],
[
"final_test_dataset = SpansDataset(tokenizer(final_test.text.tolist()))",
"_____no_output_____"
],
[
"from transformers import DataCollatorForTokenClassification\ndata_collator = DataCollatorForTokenClassification(tokenizer, padding=True)",
"_____no_output_____"
],
[
"import numpy as np\nfrom semeval2021 import f1",
"_____no_output_____"
]
],
[
[
"### Dataset for classification",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df1 = pd.read_csv('../data/train/train.1.tsv', sep='\\t')\ndf0 = pd.read_csv('../data/train/train_small.0.tsv', sep='\\t')\ndf01 = pd.concat([df1, df0], ignore_index=True)\ndf01.label = df01.label.astype(int)\nprint(df01.shape)\ndf01.sample(3)",
"(358984, 2)\n"
],
[
"from sklearn.model_selection import train_test_split\ndf_train, df_test = train_test_split(df01, test_size=0.1, random_state=1)",
"_____no_output_____"
],
[
"df_train.head(10)",
"_____no_output_____"
],
[
"class SpansDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels=None):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: val[idx] for key, val in self.encodings.items()}\n if self.labels is not None:\n item['labels'] = self.labels[idx]\n return item\n\n def __len__(self):\n return len(self.encodings['input_ids'])",
"_____no_output_____"
],
[
"clf_train_dataset = SpansDataset(\n tokenizer(df_train.comment_text.tolist(), truncation=True), \n df_train.label.tolist()\n)",
"_____no_output_____"
],
[
"clf_test_dataset = SpansDataset(\n tokenizer(df_test.comment_text.tolist(), truncation=True), \n df_test.label.tolist()\n)",
"_____no_output_____"
],
[
"clf_test_small_dataset = SpansDataset(\n tokenizer(df_test.comment_text.iloc[:3000].tolist(), truncation=True), \n df_test.label[:3000].tolist()\n)",
"_____no_output_____"
]
],
[
[
"# Train a single-task model",
"_____no_output_____"
],
[
"https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb\nhttps://huggingface.co/transformers/custom_datasets.html",
"_____no_output_____"
]
],
[
[
"from transformers import Trainer, TrainingArguments, EarlyStoppingCallback\nfrom transformers.file_utils import cached_property\nfrom typing import Tuple\n\nclass TrAr(TrainingArguments):\n @cached_property\n def _setup_devices(self):\n return device",
"_____no_output_____"
],
[
"torch.cuda.set_device(device)",
"_____no_output_____"
],
[
"model = AutoModelForTokenClassification.from_pretrained(model_name)\nmodel.to(device);",
"Some weights of the model checkpoint at roberta-base were not used when initializing RobertaForTokenClassification: ['lm_head.bias', 'lm_head.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.weight', 'lm_head.layer_norm.bias', 'lm_head.decoder.weight']\n- This IS expected if you are initializing RobertaForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of RobertaForTokenClassification were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.weight', 'classifier.bias']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
],
[
"for param in model.roberta.parameters():\n param.requires_grad = False",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='./models2/roberta_single', # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=8, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-3,\n logging_dir='./logs', # directory for storing logs\n logging_steps=100,\n eval_steps=100,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=eval_dataset, # evaluation dataset\n data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]\n)",
"_____no_output_____"
],
[
"trainer.train()",
"_____no_output_____"
],
[
"for param in model.parameters():\n param.requires_grad = True",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='./models2/roberta_single', # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=8, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-5,\n logging_dir='./logs', # directory for storing logs\n logging_steps=500,\n eval_steps=500,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=eval_dataset, # evaluation dataset\n data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]\n)",
"_____no_output_____"
]
],
[
[
"* The minimal loss of a single-task model (full) was about 28% on validation with 0.04 on train. \n* If we first train only head (batch 8, lr 1e-3 with 3K warmup and 1e-8 decline), we get minimal loss of 0.185 on validation with 0.23 on train\n* Training then the whole model (batch 8, lr 1e-5 with 3K warmup and 1e-8 decline) we get minimal loss of 0.175 on validation with 0.21 on train",
"_____no_output_____"
]
],
[
[
"trainer.train()",
"_____no_output_____"
],
[
"model.save_pretrained('./models2/roberta_single')",
"_____no_output_____"
],
[
"trainer.evaluate()",
"_____no_output_____"
]
],
[
[
"### evaluate",
"_____no_output_____"
]
],
[
[
"pred = trainer.predict(eval_dataset)",
"_____no_output_____"
],
[
"for threshold in [0, 0.01, 0.03, 0.1, 0.3, 0.4, 0.5, 0.6, 0.7, 1]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))",
"0 0.21345926240649193\n0.01 0.2522426276657023\n0.03 0.39910998627052807\n0.1 0.5959594514495652\n0.3 0.6691954194658056\n0.4 0.6734060063813712\n0.5 0.6728307956631072\n0.6 0.652604453261445\n0.7 0.6215603361690663\n1 0.06231884057971015\n"
],
[
"for threshold in [0.3, 0.32, 0.35, 0.38, 0.4, 0.42, 0.45, 0.5, 0.55, 0.6]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))",
"0.3 0.6691954194658056\n0.32 0.6719575974133731\n0.35 0.6718814105545576\n0.38 0.6704494596966453\n0.4 0.6734060063813712\n0.42 0.6733924519991911\n0.45 0.675073109186953\n0.5 0.6728307956631072\n0.55 0.6635405099600963\n0.6 0.652604453261445\n"
]
],
[
[
"## Prepare a submission",
"_____no_output_____"
]
],
[
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"threshold = 0.4\npreds = []\nfor text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))",
"_____no_output_____"
],
[
"row = final_test.sample(1).iloc[0]\ndisplay_spans(preds[row.name], row.text)",
"_____no_output_____"
]
],
[
[
"65.31% ",
"_____no_output_____"
]
],
[
[
"print(np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)]))",
"0.6631491659944739\n"
]
],
[
[
"# WM Classifier + tagging",
"_____no_output_____"
]
],
[
[
"from transformers import RobertaTokenizer, RobertaForTokenClassification, RobertaForSequenceClassification\nfrom transformers import BertTokenizer, BertForTokenClassification\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\nimport torch",
"_____no_output_____"
],
[
"from transformers.models.roberta.modeling_roberta import RobertaModel\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nclass WMean(nn.Module):\n def __init__(self, dim=-2):\n super(WMean, self).__init__()\n self.pow = torch.nn.Parameter(data=torch.Tensor([1.0]), requires_grad=True)\n self.coef = torch.nn.Parameter(data=torch.Tensor([0.0, 1.0]), requires_grad=True)\n self.dim = dim\n \n def forward(self, x, mask=None):\n result = x ** self.pow[0]\n if mask is None:\n mp = result.mean(dim=-1)\n else:\n mp = (result * mask).sum(dim=self.dim) / mask.sum(dim=self.dim)\n return torch.log(mp) * self.coef[1] + self.coef[0]\n\n\nclass RobertaTaggerClassifier(RobertaForTokenClassification):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n self.wmean = WMean()\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n token_logits = self.classifier(sequence_output)\n\n if attention_mask is not None:\n masks = attention_mask.unsqueeze(-1).repeat(1, 1, 2)\n else:\n masks = None\n\n logits = self.wmean(torch.softmax(token_logits, dim=-1), mask=masks)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )",
"_____no_output_____"
],
[
"model = RobertaTaggerClassifier.from_pretrained('roberta-base')",
"Some weights of the model checkpoint at roberta-base were not used when initializing RobertaTaggerClassifier: ['lm_head.bias', 'lm_head.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.weight', 'lm_head.layer_norm.bias', 'lm_head.decoder.weight']\n- This IS expected if you are initializing RobertaTaggerClassifier from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaTaggerClassifier from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of RobertaTaggerClassifier were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.weight', 'classifier.bias', 'wmean.pow', 'wmean.coef']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
],
[
"tokenizer = AutoTokenizer.from_pretrained('roberta-base')",
"_____no_output_____"
],
[
"inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")",
"_____no_output_____"
],
[
"with torch.no_grad():\n o = model(**inputs)\no",
"_____no_output_____"
],
[
"#device = torch.device('cuda:3')\n\nfrom transformers import Trainer, TrainingArguments\nfrom transformers.file_utils import cached_property\nfrom typing import Tuple\n\nclass TrAr(TrainingArguments):\n @cached_property\n def _setup_devices(self):\n return device",
"_____no_output_____"
]
],
[
[
"The strategy: first tune the head only with large batches and LR, then tune the whole model. \n\nHead-only stops at loss 0.4185, full model - at loss 0.302685",
"_____no_output_____"
]
],
[
[
"for param in model.roberta.parameters():\n param.requires_grad = False",
"_____no_output_____"
],
[
"NEW_MODEL_NAME = './models2/roberta_clf_wm'",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir=NEW_MODEL_NAME, # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=64, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-3,\n logging_dir='./logs', # directory for storing logs\n logging_steps=100,\n eval_steps=500,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=clf_train_dataset, # training dataset\n eval_dataset=clf_test_small_dataset, # evaluation dataset\n #data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]\n)",
"_____no_output_____"
],
[
"trainer.train();",
"_____no_output_____"
],
[
"for param in model.parameters():\n param.requires_grad = True",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir=NEW_MODEL_NAME, # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=64, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-5,\n logging_dir='./logs', # directory for storing logs\n logging_steps=500,\n eval_steps=500,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=clf_train_dataset, # training dataset\n eval_dataset=clf_test_small_dataset, # evaluation dataset\n #data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=10, early_stopping_threshold=0)]\n)",
"_____no_output_____"
],
[
"import gc\ngc.collect()\ntorch.cuda.empty_cache()",
"_____no_output_____"
],
[
"trainer.train()",
"_____no_output_____"
],
[
"print(model.wmean.pow)\nprint(model.wmean.coef)",
"Parameter containing:\ntensor([1.9293], device='cuda:0', requires_grad=True)\nParameter containing:\ntensor([1.0333, 1.9060], device='cuda:0', requires_grad=True)\n"
],
[
"model.save_pretrained(NEW_MODEL_NAME)",
"_____no_output_____"
]
],
[
[
"# Fine tune the averager classifier",
"_____no_output_____"
]
],
[
[
"model = AutoModelForTokenClassification.from_pretrained('./models2/roberta_clf_wm')",
"Some weights of the model checkpoint at ./models2/roberta_clf_wm were not used when initializing RobertaForTokenClassification: ['wmean.pow', 'wmean.coef']\n- This IS expected if you are initializing RobertaForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
],
[
"NEW_MODEL_NAME = './models2/roberta_clf_wm_ft'",
"_____no_output_____"
],
[
"for param in model.roberta.parameters():\n param.requires_grad = False",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir=NEW_MODEL_NAME, # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=64, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-3,\n logging_dir='./logs', # directory for storing logs\n logging_steps=100,\n eval_steps=500,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=eval_dataset, # evaluation dataset\n data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]\n)",
"_____no_output_____"
],
[
"trainer.train()",
"_____no_output_____"
]
],
[
[
"* the raw quasi-classifier: no use in the model at all\n* fine tuned head: still no use, the best score is 0.2138\n* fine tune whole model: 0.3 0.6849391042415774",
"_____no_output_____"
]
],
[
[
"for param in model.parameters():\n param.requires_grad = True",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir=NEW_MODEL_NAME, # output directory\n overwrite_output_dir=True,\n num_train_epochs=10, # total # of training epochs\n per_device_train_batch_size=8, # batch size per device during training\n per_device_eval_batch_size=8, # batch size for evaluation\n warmup_steps=3000, # number of warmup steps for learning rate scheduler\n weight_decay=1e-8, # strength of weight decay\n learning_rate=1e-5,\n logging_dir='./logs', # directory for storing logs\n logging_steps=500,\n eval_steps=500,\n evaluation_strategy='steps',\n save_total_limit=1,\n load_best_model_at_end=True,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, # the instantiated 🤗 Transformers model to be trained\n args=training_args, # training arguments, defined above\n train_dataset=train_dataset, # training dataset\n eval_dataset=eval_dataset, # evaluation dataset\n data_collator=data_collator,\n tokenizer=tokenizer,\n callbacks=[EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0)]\n)",
"_____no_output_____"
],
[
"trainer.train()",
"_____no_output_____"
]
],
[
[
"* The minimal loss of a single-task model (full) was about 28% on validation with 0.04 on train. \n* If we first train only head (batch 8, lr 1e-3 with 3K warmup and 1e-8 decline), we get minimal loss of 0.185 on validation with 0.23 on train\n* Training then the whole model (batch 8, lr 1e-5 with 3K warmup and 1e-8 decline) we get minimal loss of 0.175 on validation with 0.21 on train",
"_____no_output_____"
]
],
[
[
"trainer.train()",
"_____no_output_____"
],
[
"NEW_MODEL_NAME",
"_____no_output_____"
],
[
"model.save_pretrained(NEW_MODEL_NAME)",
"_____no_output_____"
],
[
"pred = trainer.predict(eval_dataset)\nfor threshold in [0, 0.01, 0.03, 0.1, 0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7, 1]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))",
"_____no_output_____"
],
[
"for threshold in [ 0.25, 0.28, 0.3, 0.32, 0.35]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n print(threshold, np.mean([f1(p, y) for p, y in zip(preds, trial.spans)]))",
"0.25 0.66657885004928\n0.28 0.6746374794679898\n0.3 0.6764404567060631\n0.32 0.6784818373039875\n0.35 0.6782236752482207\n"
],
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"threshold = 0.4\npreds = []\nfor text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\nprint(len(preds))",
"2000\n"
],
[
"print(np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)]))",
"0.6503622193673821\n"
]
],
[
[
"# Try to reproduce the score of an ordinary classifier fine tuned as tagger",
"_____no_output_____"
],
[
"```\n* roberta_clf_proba - roberta classifier with wm head\n* roberta_clf_ft_plus_pseudolabels - roberta_clf_ft + pseudolabels fine-tuning on data/train/train.1.tsv\n* roberta_clf - preliminary form of roberta_clf_proba\n* roberta_clf_ft - roberta_clf_proba + tagger fine-tuning\n* roberta_selflabel - preliminary form of roberta_clf_ft_plus_pseudolabels\n* roberta_selflabel_final - preliminary form of roberta_clf_ft_plus_pseudolabels\n* roberta_single_v2 - just roberta tagger \n* roberta_single - just roberta tagger, first version\n* roberta_clf_2 - roberta classic classifier\n* roberta_ft_v2 - roberta_clf_2 + tagger fine-tuning\n```\n\n",
"_____no_output_____"
],
[
"#### roberta_ft_v2",
"_____no_output_____"
]
],
[
[
"model = RobertaForTokenClassification.from_pretrained('models/roberta_ft_v2')\nmodel.to(device);",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='tmp',\n per_device_eval_batch_size=8,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, \n args=training_args, \n data_collator=data_collator,\n tokenizer=tokenizer,\n)",
"_____no_output_____"
],
[
"pred = trainer.predict(eval_dataset)",
"_____no_output_____"
],
[
"\nfor threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])\n print(threshold, score)\n ",
"0.3 0.6646029995260339\n0.35 0.6715131454982759\n0.4 0.6744209810749072\n0.45 0.6716742100903217\n0.5 0.673925228987182\n0.55 0.6710741889893577\n0.6 0.6717334585591548\n0.65 0.6738372257764363\n0.7 0.6682677734353392\n0.75 0.6617100031176605\n0.8 0.6475396800952727\n0.85 0.6278304231112732\n0.9 0.59010012748674\n"
],
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"\n\nscores = []\nfor threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])\n print(threshold, score)\n scores.append(score)\nscores_standard_clf = scores",
"0.1 0.5545336665297276\n0.15 0.5882344110328829\n0.2 0.6077757811791716\n0.25 0.622973609210159\n0.3 0.633958561050074\n0.35 0.6443207294927615\n0.4 0.6519269346134431\n0.45 0.6589416029476445\n0.5 0.6639145447542181\n0.55 0.6676402819948017\n0.6 0.6695941122804318\n0.65 0.6756992752387653\n0.7 0.6785258499047722\n0.75 0.6851208918524581\n0.8 0.6864238573971339\n0.85 0.6771599110310472\n0.9 0.6545350848098808\n"
]
],
[
[
"#### roberta_clf_ft",
"_____no_output_____"
]
],
[
[
"model = RobertaForTokenClassification.from_pretrained('models/roberta_clf_ft')\nmodel.to(device);",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='tmp',\n per_device_eval_batch_size=8,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, \n args=training_args, \n data_collator=data_collator,\n tokenizer=tokenizer,\n)",
"_____no_output_____"
],
[
"pred = trainer.predict(eval_dataset)",
"_____no_output_____"
],
[
"\nfor threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])\n print(threshold, score)",
"0.3 0.6849391042415774\n0.35 0.684180917165571\n0.4 0.6787335180780203\n0.45 0.6735204378403357\n0.5 0.6640287939316257\n0.55 0.6541785553598181\n0.6 0.6514593408593448\n0.65 0.6397508521212191\n0.7 0.6172986482655661\n0.75 0.5866965505118897\n0.8 0.548500481404008\n0.85 0.5094082267816179\n0.9 0.44324794411502716\n"
],
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"\n\nscores = []\nfor threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])\n print(threshold, score)\n scores.append(score)\nscores_tagging_clf = scores",
"0.1 0.5752439183819529\n0.15 0.6090082887908743\n0.2 0.6286550876155881\n0.25 0.6431153729750709\n0.3 0.6569361916626016\n0.35 0.6647910078957441\n0.4 0.6682952103234534\n0.45 0.6754628567024429\n0.5 0.680619717437095\n0.55 0.6809661485870909\n0.6 0.6832488454134622\n0.65 0.6797076042739688\n0.7 0.6773883276721572\n0.75 0.6683696147429589\n0.8 0.6469023025439109\n0.85 0.6102817741854818\n0.9 0.5628998440674843\n"
]
],
[
[
"#### roberta_clf_ft_plus_pseudolabels",
"_____no_output_____"
]
],
[
[
"model = RobertaForTokenClassification.from_pretrained('models/roberta_clf_ft_plus_pseudolabels')\nmodel.to(device);",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='tmp',\n per_device_eval_batch_size=8,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, \n args=training_args, \n data_collator=data_collator,\n tokenizer=tokenizer,\n)",
"_____no_output_____"
],
[
"pred = trainer.predict(eval_dataset)",
"_____no_output_____"
],
[
"\nfor threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])\n print(threshold, score)",
"0.3 0.6827629205595998\n0.35 0.6855501867465028\n0.4 0.6880259983225367\n0.45 0.6834992017957995\n0.5 0.6797056139144931\n0.55 0.6784005272126371\n0.6 0.6688018820701156\n0.65 0.6623198122783942\n0.7 0.6511178167749042\n0.75 0.6296963400207056\n0.8 0.6064892688014509\n0.85 0.564881928012032\n0.9 0.5030782060202328\n"
],
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"\nscores = []\nfor threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])\n print(threshold, score)\n scores.append(score)\nscores_pseudolabel = scores",
"0.1 0.5602049029090154\n0.15 0.5965378234529369\n0.2 0.6161116736335367\n0.25 0.6329899472598364\n0.3 0.6458124347386611\n0.35 0.6555907203988778\n0.4 0.6620672594740299\n0.45 0.6692636447522394\n0.5 0.674174837442134\n0.55 0.6786709542304435\n0.6 0.6819797727432348\n0.65 0.6816887377159155\n0.7 0.6820785225830176\n0.75 0.6807040933415471\n0.8 0.6784134903922955\n0.85 0.6601609890118179\n0.9 0.6186889746671457\n"
]
],
[
[
"#### roberta_single_v2",
"_____no_output_____"
]
],
[
[
"model = RobertaForTokenClassification.from_pretrained('models/roberta_single_v2')\nmodel.to(device);",
"_____no_output_____"
],
[
"training_args = TrAr(\n output_dir='tmp',\n per_device_eval_batch_size=8,\n)",
"_____no_output_____"
],
[
"trainer = Trainer(\n model=model, \n args=training_args, \n data_collator=data_collator,\n tokenizer=tokenizer,\n)",
"_____no_output_____"
],
[
"pred = trainer.predict(eval_dataset)",
"_____no_output_____"
],
[
"\nfor threshold in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(trial.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, trial.spans)])\n print(threshold, score)",
"0.3 0.66359735406299\n0.35 0.6700635517847642\n0.4 0.6731999549591532\n0.45 0.6729071697382535\n0.5 0.6702516698368454\n0.55 0.6634497922801099\n0.6 0.6618129018990354\n0.65 0.6534142826263554\n0.7 0.6474321946754974\n0.75 0.6306972882994829\n0.8 0.6064613727890047\n0.85 0.5650635904385258\n0.9 0.4991748439461978\n"
],
[
"pred = trainer.predict(final_test_dataset)",
"_____no_output_____"
],
[
"\nscores = []\nfor threshold in [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]:\n preds = []\n for text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\n score = np.mean([f1(p, y) for p, y in zip(preds, final_test.spans)])\n print(threshold, score)\n scores.append(score)\nscores_standard = scores",
"0.1 0.4950510627108955\n0.15 0.5492469372720504\n0.2 0.5870032860644548\n0.25 0.6115084907939428\n0.3 0.6296202613629943\n0.35 0.6435584198597509\n0.4 0.6550788084151723\n0.45 0.660826323136532\n0.5 0.6682946286098522\n0.55 0.6709144466014042\n0.6 0.677576863437034\n0.65 0.6812185597173275\n0.7 0.6818291704532222\n0.75 0.6769056066936087\n0.8 0.667081025778574\n0.85 0.6527564321439464\n0.9 0.6191291845142385\n"
],
[
"xx = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]\nplt.plot(xx, scores_standard)\nplt.plot(xx, scores_standard_clf)\nplt.plot(xx, scores_tagging_clf)\nplt.plot(xx, scores_pseudolabel)\nplt.legend(['standard', 'clf', 'tagging clf', 'pseudo labels'])",
"_____no_output_____"
],
[
"ss = [scores_standard, scores_pseudolabel, scores_standard_clf, scores_tagging_clf]\nfor sss in ss:\n print(f'{np.max(sss):.3f}, {xx[np.argmax(sss)]}, {sss[8]:.3f}, {sss[10]:.3f}')",
"0.682, 0.7, 0.668, 0.678\n0.682, 0.7, 0.674, 0.682\n0.686, 0.8, 0.664, 0.670\n0.683, 0.6, 0.681, 0.683\n"
]
],
[
[
"#### standard deviation of score",
"_____no_output_____"
]
],
[
[
"threshold = 0.5\npreds = []\nfor text, pr in zip(final_test.text, pred.predictions):\n proba = np.exp(pr[pr[:, 0]!=-100])\n proba /= proba.sum(axis=1, keepdims=True)\n labels = (proba[:, 1] >= threshold).astype(int).tolist()\n preds.append(labels2spans(text, labels, tokenizer))\nff = [f1(p, y) for p, y in zip(preds, final_test.spans)]\nscore = np.mean(ff)\nprint(score)",
"0.6682946286098522\n"
],
[
"se = np.std(ff) / np.sqrt(len(ff)) * 1.96\nprint(score - se, score + se)",
"0.6506454922132865 0.685943765006418\n"
],
[
"np.std(ff) / np.sqrt(len(ff))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ed0a01c45e5ba4b21dae4589cef7316122bad8 | 1,150 | ipynb | Jupyter Notebook | Python/Exercicios_Curso_em_Videos/ex019.ipynb | rubensrabelo/Exercicios | af8e399a013e1b357b0cef2506c99dae8b8dd7b6 | [
"MIT"
] | null | null | null | Python/Exercicios_Curso_em_Videos/ex019.ipynb | rubensrabelo/Exercicios | af8e399a013e1b357b0cef2506c99dae8b8dd7b6 | [
"MIT"
] | null | null | null | Python/Exercicios_Curso_em_Videos/ex019.ipynb | rubensrabelo/Exercicios | af8e399a013e1b357b0cef2506c99dae8b8dd7b6 | [
"MIT"
] | null | null | null | 21.296296 | 44 | 0.525217 | [
[
[
"from random import choice\na1 = str(input('Primeiro Aluno: '))\na2 = str(input('Segundo Aluno: '))\na3 = str(input('Terceiro Aluno: '))\na4 = str(input('Quarto Aluno: '))\nlista_alunos = [a1, a2, a3, a4]\na5 = choice(lista_alunos)\nprint(f'O aluno escohido foi {a5}.')",
"Primeiro Aluno: Paulo\nSegundo Aluno: Rebeca\nTerceiro Aluno: Fernanda\nQuarto Aluno: Rubens\nO aluno escohido foi Paulo.\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d0ed1dbbe6679cc08dfaaffa304c624f89a16ea4 | 34,160 | ipynb | Jupyter Notebook | Chapter6/ch6-2-classification-with-neural-networks.ipynb | PacktPublishing/-Hands-On-Predictive-Analytics-with-Python | 90ba92f75f5014bb6b77f99dc7997d218d493e4a | [
"MIT"
] | 13 | 2018-10-03T04:15:07.000Z | 2022-02-22T02:45:55.000Z | Chapter06/ch6-2-classification-with-neural-networks.ipynb | Ygsunshine007/Hands-On-Predictive-Analytics-with-Python | 9cae12fdea7238cd53a39fae6fbcd2d0ece13a48 | [
"MIT"
] | null | null | null | Chapter06/ch6-2-classification-with-neural-networks.ipynb | Ygsunshine007/Hands-On-Predictive-Analytics-with-Python | 9cae12fdea7238cd53a39fae6fbcd2d0ece13a48 | [
"MIT"
] | 7 | 2018-10-03T19:20:23.000Z | 2021-08-11T20:33:12.000Z | 40.666667 | 274 | 0.440105 | [
[
[
"# Predicting Credit Card Default with Neural Networks",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Back with the credit card default dataset",
"_____no_output_____"
]
],
[
[
"# Loading the dataset\nDATA_DIR = '../data'\nFILE_NAME = 'credit_card_default.csv'\ndata_path = os.path.join(DATA_DIR, FILE_NAME)\nccd = pd.read_csv(data_path, index_col=\"ID\")\nccd.rename(columns=lambda x: x.lower(), inplace=True)\nccd.rename(columns={'default payment next month':'default'}, inplace=True)\n\n# getting the groups of features\nbill_amt_features = ['bill_amt'+ str(i) for i in range(1,7)]\npay_amt_features = ['pay_amt'+ str(i) for i in range(1,7)]\nnumerical_features = ['limit_bal','age'] + bill_amt_features + pay_amt_features\n\n# Creating creating binary features\nccd['male'] = (ccd['sex'] == 1).astype('int')\nccd['grad_school'] = (ccd['education'] == 1).astype('int')\nccd['university'] = (ccd['education'] == 2).astype('int')\n#ccd['high_school'] = (ccd['education'] == 3).astype('int')\nccd['married'] = (ccd['marriage'] == 1).astype('int')\n\n# simplifying pay features \npay_features= ['pay_' + str(i) for i in range(1,7)]\nfor x in pay_features:\n ccd.loc[ccd[x] <= 0, x] = 0\n\n# simplifying delayed features\ndelayed_features = ['delayed_' + str(i) for i in range(1,7)]\nfor pay, delayed in zip(pay_features, delayed_features):\n ccd[delayed] = (ccd[pay] > 0).astype(int)\n \n# creating a new feature: months delayed\nccd['months_delayed'] = ccd[delayed_features].sum(axis=1)",
"_____no_output_____"
]
],
[
[
"## Split and standarize the dataset",
"_____no_output_____"
]
],
[
[
"numerical_features = numerical_features + ['months_delayed']\nbinary_features = ['male','married','grad_school','university']\nX = ccd[numerical_features + binary_features]\ny = ccd['default'].astype(int)\n\n## Split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=5/30, random_state=101)\n\n## Standarize\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(X_train[numerical_features])\nX_train.loc[:, numerical_features] = scaler.transform(X_train[numerical_features])\n# Standarize also the testing set\nX_test.loc[:, numerical_features] = scaler.transform(X_test[numerical_features])",
"C:\\Users\\direc\\Anaconda3\\envs\\mybook\\lib\\site-packages\\pandas\\core\\indexing.py:543: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n self.obj[item] = s\n"
]
],
[
[
"### Building the neural network for classification",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential\nnn_classifier = Sequential()",
"C:\\Users\\direc\\Anaconda3\\envs\\mybook\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n"
],
[
"from keras.layers import Dense\nn_input = X_train.shape[1]\nn_units_hidden = 64\nnn_classifier.add(Dense(units=n_units_hidden, activation='relu', input_shape=(n_input,)))",
"_____no_output_____"
],
[
"# add 2nd hidden layer\nnn_classifier.add(Dense(units=n_units_hidden, activation='relu'))\n# add 3th hidden layer\nnn_classifier.add(Dense(units=n_units_hidden, activation='relu'))\n# add 4th hidden layer\nnn_classifier.add(Dense(units=n_units_hidden, activation='relu'))\n# add 5th hidden layer\nnn_classifier.add(Dense(units=n_units_hidden, activation='relu'))",
"_____no_output_____"
],
[
"# output layer\nnn_classifier.add(Dense(1, activation='sigmoid'))",
"_____no_output_____"
]
],
[
[
"### Training the network",
"_____no_output_____"
]
],
[
[
"## compiling step\nnn_classifier.compile(loss='binary_crossentropy', optimizer='adam')",
"_____no_output_____"
],
[
"nn_classifier.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 64) 1280 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_3 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_4 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_5 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndense_6 (Dense) (None, 1) 65 \n=================================================================\nTotal params: 17,985\nTrainable params: 17,985\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"nn_classifier.save_weights('class_initial_w.h5')",
"_____no_output_____"
],
[
"batch_size = 64\nn_epochs = 150\nnn_classifier.fit(X_train, y_train, epochs=n_epochs, batch_size=batch_size)",
"Epoch 1/150\n25000/25000 [==============================] - 1s 30us/step - loss: 0.4635\nEpoch 2/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.4467\nEpoch 3/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4432\nEpoch 4/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4414\nEpoch 5/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4399\nEpoch 6/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4389\nEpoch 7/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4373\nEpoch 8/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4370\nEpoch 9/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4344\nEpoch 10/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4332\nEpoch 11/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4330\nEpoch 12/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4306\nEpoch 13/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4293\nEpoch 14/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.4279\nEpoch 15/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4262\nEpoch 16/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4257\nEpoch 17/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4219\nEpoch 18/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4206\nEpoch 19/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4191\nEpoch 20/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4163\nEpoch 21/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4153\nEpoch 22/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4118\nEpoch 23/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4090\nEpoch 24/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4072\nEpoch 25/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4050\nEpoch 26/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4026\nEpoch 27/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3995\nEpoch 28/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3963\nEpoch 29/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3927\nEpoch 30/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3916\nEpoch 31/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3883\nEpoch 32/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3858\nEpoch 33/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3830\nEpoch 34/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3829\nEpoch 35/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3805\nEpoch 36/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3753\nEpoch 37/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3723\nEpoch 38/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3684\nEpoch 39/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3646\nEpoch 40/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3658\nEpoch 41/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3627\nEpoch 42/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3577\nEpoch 43/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3554\nEpoch 44/150\n25000/25000 [==============================] - 0s 19us/step - loss: 0.3542\nEpoch 45/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3538\nEpoch 46/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.3472\nEpoch 47/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.3471\nEpoch 48/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3450\nEpoch 49/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3411\nEpoch 50/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3423\nEpoch 51/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3362\nEpoch 52/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3334\nEpoch 53/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3335\nEpoch 54/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3421\nEpoch 55/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3315\nEpoch 56/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3274\nEpoch 57/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3269\nEpoch 58/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3198\nEpoch 59/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3187\nEpoch 60/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3167\nEpoch 61/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3180\nEpoch 62/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3138\nEpoch 63/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3081\nEpoch 64/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3099\nEpoch 65/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3079\nEpoch 66/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3007\nEpoch 67/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3055\nEpoch 68/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3032\nEpoch 69/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3010\nEpoch 70/150\n25000/25000 [==============================] - 0s 19us/step - loss: 0.2987\nEpoch 71/150\n25000/25000 [==============================] - 0s 19us/step - loss: 0.2966\nEpoch 72/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.3032\nEpoch 73/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2959\nEpoch 74/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2914\nEpoch 75/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2896\nEpoch 76/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2837\nEpoch 77/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2860\nEpoch 78/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2820\nEpoch 79/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2796\nEpoch 80/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2791\nEpoch 81/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2847\nEpoch 82/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2748\nEpoch 83/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2721\nEpoch 84/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2678\nEpoch 85/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2724\nEpoch 86/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2755\nEpoch 87/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2641\nEpoch 88/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2626\nEpoch 89/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2685\nEpoch 90/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2739\nEpoch 91/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2659\nEpoch 92/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2687\nEpoch 93/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2620\nEpoch 94/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2636\nEpoch 95/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2624\nEpoch 96/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2553\nEpoch 97/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2489\nEpoch 98/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2500\nEpoch 99/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2571\nEpoch 100/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2530\nEpoch 101/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2588\nEpoch 102/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2534\nEpoch 103/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2447\nEpoch 104/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2508\nEpoch 105/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2433\nEpoch 106/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2381\nEpoch 107/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2510\nEpoch 108/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2528\nEpoch 109/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2399\nEpoch 110/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2359\nEpoch 111/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2324\nEpoch 112/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2493\nEpoch 113/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2407\nEpoch 114/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2408\nEpoch 115/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2343\nEpoch 116/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2322\nEpoch 117/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2314\nEpoch 118/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2292\nEpoch 119/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2354\nEpoch 120/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2353\nEpoch 121/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2303\nEpoch 122/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2238\nEpoch 123/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2218\nEpoch 124/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2299\nEpoch 125/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2224\nEpoch 126/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2244\nEpoch 127/150\n25000/25000 [==============================] - 0s 17us/step - loss: 0.2223\nEpoch 128/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2198\nEpoch 129/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2242\nEpoch 130/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2378\nEpoch 131/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2157\nEpoch 132/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2157\nEpoch 133/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2113\nEpoch 134/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2159\nEpoch 135/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2152\nEpoch 136/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2129\nEpoch 137/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2200\nEpoch 138/150\n25000/25000 [==============================] - 0s 18us/step - loss: 0.2137\nEpoch 139/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2096\nEpoch 140/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2089\nEpoch 141/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2136\nEpoch 142/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2095\nEpoch 143/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2011\nEpoch 144/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2098\nEpoch 145/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2227\nEpoch 146/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2123\nEpoch 147/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.2051\nEpoch 148/150\n25000/25000 [==============================] - 0s 15us/step - loss: 0.2085\nEpoch 149/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.1986\nEpoch 150/150\n25000/25000 [==============================] - 0s 16us/step - loss: 0.1929\n"
]
],
[
[
"## Evaluating predictions",
"_____no_output_____"
]
],
[
[
"## Getting the probabilities\ny_pred_train_prob = nn_classifier.predict(X_train)\ny_pred_test_prob = nn_classifier.predict(X_test)\n\n## Classifications from predictions\ny_pred_train = (y_pred_train_prob > 0.5).astype(int)\ny_pred_test = (y_pred_test_prob > 0.5).astype(int)",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\ntrain_acc = accuracy_score(y_true=y_train, y_pred=y_pred_train)\ntest_acc = accuracy_score(y_true=y_test, y_pred=y_pred_test)\nprint(\"Train Accuracy: {:0.3f} \\nTest Accuracy: {:0.3f}\".format(train_acc, test_acc))",
"Train Accuracy: 0.923 \nTest Accuracy: 0.740\n"
]
],
[
[
"## Re-training the network with less epochs",
"_____no_output_____"
]
],
[
[
"## load the initial weights\nnn_classifier.load_weights('class_initial_w.h5')",
"_____no_output_____"
],
[
"batch_size = 64\nn_epochs = 50\nnn_classifier.compile(loss='binary_crossentropy', optimizer='adam')\nnn_classifier.fit(X_train, y_train, epochs=n_epochs, batch_size=batch_size)",
"Epoch 1/50\n25000/25000 [==============================] - 1s 32us/step - loss: 0.4642\nEpoch 2/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4468\nEpoch 3/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4441\nEpoch 4/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4416\nEpoch 5/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4396\nEpoch 6/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4391\nEpoch 7/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4377\nEpoch 8/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4363\nEpoch 9/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4352\nEpoch 10/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4337\nEpoch 11/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4322\nEpoch 12/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4327\nEpoch 13/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4295\nEpoch 14/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4284\nEpoch 15/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4271\nEpoch 16/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4247\nEpoch 17/50\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4238\nEpoch 18/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4208\nEpoch 19/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4200\nEpoch 20/50\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4178\nEpoch 21/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4166\nEpoch 22/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4129\nEpoch 23/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4111\nEpoch 24/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4085\nEpoch 25/50\n25000/25000 [==============================] - 0s 15us/step - loss: 0.4077\nEpoch 26/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4041\nEpoch 27/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.4037\nEpoch 28/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3999\nEpoch 29/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3980\nEpoch 30/50\n25000/25000 [==============================] - 0s 15us/step - loss: 0.3955\nEpoch 31/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3925\nEpoch 32/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3903\nEpoch 33/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3879\nEpoch 34/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3856\nEpoch 35/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3829\nEpoch 36/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3802\nEpoch 37/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3779\nEpoch 38/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3778\nEpoch 39/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3712\nEpoch 40/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3695\nEpoch 41/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3679\nEpoch 42/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3639\nEpoch 43/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3601\nEpoch 44/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3573\nEpoch 45/50\n25000/25000 [==============================] - 0s 18us/step - loss: 0.3545\nEpoch 46/50\n25000/25000 [==============================] - 0s 17us/step - loss: 0.3575\nEpoch 47/50\n25000/25000 [==============================] - 0s 17us/step - loss: 0.3519\nEpoch 48/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3470\nEpoch 49/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3459\nEpoch 50/50\n25000/25000 [==============================] - 0s 16us/step - loss: 0.3488\n"
],
[
"## Getting the probabilities\ny_pred_train_prob = nn_classifier.predict(X_train)\ny_pred_test_prob = nn_classifier.predict(X_test)\n\n## Classifications from predictions\ny_pred_train = (y_pred_train_prob > 0.5).astype(int)\ny_pred_test = (y_pred_test_prob > 0.5).astype(int)\n\n## Calculating accuracy\ntrain_acc = accuracy_score(y_true=y_train, y_pred=y_pred_train)\ntest_acc = accuracy_score(y_true=y_test, y_pred=y_pred_test)\nprint(\"Train Accuracy: {:0.3f} \\nTest Accuracy: {:0.3f}\".format(train_acc, test_acc))",
"Train Accuracy: 0.852 \nTest Accuracy: 0.786\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ed1eeb420db6ce6c9d89b620ea035f69627891 | 42,792 | ipynb | Jupyter Notebook | Filtering.ipynb | tejasvi541/Pandas-Tutorial | 0a89e7aa3b9cf11d2eb1460c8e646b4324f79635 | [
"MIT"
] | null | null | null | Filtering.ipynb | tejasvi541/Pandas-Tutorial | 0a89e7aa3b9cf11d2eb1460c8e646b4324f79635 | [
"MIT"
] | null | null | null | Filtering.ipynb | tejasvi541/Pandas-Tutorial | 0a89e7aa3b9cf11d2eb1460c8e646b4324f79635 | [
"MIT"
] | null | null | null | 39.769517 | 281 | 0.397831 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"# Let us just create a dictioinary to understand about the DataFrame.\n\npeople = {\n \"First\": [\"Me\", \"Myself\", \"I\"],\n \"Last\" : [\"He\", 'She', \"It\"],\n \"Email\" : [\"[email protected]\", \"[email protected]\", \"[email protected]\"]\n}\n\n\n# In this dict We can visualise the keys as the column's descripton and the values as the data of those column , then we can visualise that each row of values is meant for a single person in this case. We can Make this dict be represented as rows and columns by using Pandas.",
"_____no_output_____"
],
[
"df_example1 = pd.DataFrame(people)",
"_____no_output_____"
],
[
"# Filter is python keyword so avoid using that.\n# We cant use python default and or , we will use | and &.\nfilt = (df_example1['Last'] == 'He') & (df_example1['First'] == 'Me')",
"_____no_output_____"
],
[
"df_example1.loc[filt, 'Email']\n# We get emails that matches those last name.",
"_____no_output_____"
],
[
"df_example1['Last'] == 'She'\n# This returns True with the data its matched correctly and false where not matched.",
"_____no_output_____"
],
[
"df_example1[filt]\n# It returns all the rows which have last name He",
"_____no_output_____"
],
[
"df_example1.loc[filt]\n# Same result as we passed just filt.(it passes series of booleans.)",
"_____no_output_____"
],
[
"df_example1.loc[~filt, 'Email']\n# ~ this negates the answer means those rows which doesnt match with our desired surname , now only their emails will showup.",
"_____no_output_____"
]
],
[
[
"# Now we will load our stcakoverflow survery data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('data/survey_results_public.csv', index_col = 'Respondent')\nschema_df = pd.read_csv('data/survey_results_schema.csv', index_col = 'Column')",
"_____no_output_____"
],
[
"# Filtering data for people's salaries above/below a certain amount.\n# If you dont know which column in the data frame gives the salary, you can find that using schema_df\n#Creating a filter for our desired result.\nhigh_salary = (df['ConvertedComp'] > 70000)",
"_____no_output_____"
],
[
"df.loc[high_salary]",
"_____no_output_____"
],
[
"# Now we want only certain columms , we can pass that in df.loc\ndf.loc[high_salary, ['Country', 'LanguageWorkedWith', 'ConvertedComp']]",
"_____no_output_____"
],
[
"# Now as such we want data of the countries of our choice, we will create a list of those first.\n# and then we will create a new filter to filter out the stuff.\ncountries = ['United States', 'India', 'United Kingdom', 'Germany', 'Canada']\nfiltc = df['Country'].isin(countries)",
"_____no_output_____"
],
[
"df.loc[filtc, 'Country']",
"_____no_output_____"
],
[
"# We only want look at people who answered they knew python.\n# We will grab first the languageworedwith column for the desired data.\ndf['LanguageWorkedWith']",
"_____no_output_____"
],
[
"# We will use string method in pandas to get our results. or we can use (Regex).\n# for this column here , the string in my column should contain python.\n# We have NaN , so we will set na to false so it doesnt cause any error\nfiltl = df['LanguageWorkedWith'].str.contains('Python', na = False)",
"_____no_output_____"
],
[
"df.loc[filtl,'LanguageWorkedWith']\n# all of these rows should have python.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed20f2f0b4458b7d7407176b7f19a9ce03dba6 | 64,948 | ipynb | Jupyter Notebook | Africa.ipynb | tgadf/charts | b2c78ec8467b8837c1d773dd55a4b1cfeecb564a | [
"MIT"
] | null | null | null | Africa.ipynb | tgadf/charts | b2c78ec8467b8837c1d773dd55a4b1cfeecb564a | [
"MIT"
] | null | null | null | Africa.ipynb | tgadf/charts | b2c78ec8467b8837c1d773dd55a4b1cfeecb564a | [
"MIT"
] | null | null | null | 33.721703 | 127 | 0.568439 | [
[
[
"## Basic stuff\n%load_ext autoreload\n%autoreload\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))\ndisplay(HTML(\"\"\"<style>div.output_area{max-height:10000px;overflow:scroll;}</style>\"\"\"))\n#IPython.Cell.options_default.cm_config.lineNumbers = true;\n\n################################################################################\n## Python Version\n################################################################################\nimport sys\nfrom io import StringIO\nfrom pandas import DataFrame, read_csv\nimport urllib\nfrom time import sleep\nfrom fsUtils import isFile\nfrom ioUtils import getFile, saveFile\nfrom webUtils import getHTML",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"def downloadURL(url):\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers={'User-Agent':user_agent,} \n\n print(\"Now Downloading {0}\".format(url))\n\n request=urllib.request.Request(url,None,headers) #The assembled request\n response = urllib.request.urlopen(request)\n data = response.read() # The data u need\n \n return data, response.getcode()",
"_____no_output_____"
],
[
"txt=\"\"\"Angola\nNgola Ritmos\nWaldemar Bastos\nBonga\nTeta Lando\nSam Mangwana\nLourdes Van-Dúnem\nMatadidi Mario\nPaulo Flores\nNeide Van-Dúnem\nNeblina\nTitica\nDon Kikas\nNeide Van-Dúnem\nArmy Squad\nDiamondog\nKeyLiza\nAnselmo Ralph\nNeide Van-Dúnem\nDon Kikas\nBuraka Som Sistema\nTitica\nDog Murras\n\nBenin\nAngelique Kidjo\nWally Badarou\n\nNigeria\nEvelyn summer\n\nBotswana\nBanjo Mosele\nFranco and Afro Musica\nMatsieng\nZeus\nBurkina Faso\nBalaké\nCheikh Lô\nDramane Kone\nFarafina\nBurundi\nKhadja Nin\nKebby Boy\nSat-B\n\nCameroon\nNjacko Backo\nFrancis Bebey\nMoni Bilé\nDiboué Black\nRichard Bona\nLes Têtes Brulées\nManu Dibango\nCharlotte Dipanda\nDyllann\nStanley Enow\nJovi\nMichael Kiessou\nCoco Mbassi\nYannick Noah\nKristo Numpuby\nSally Nyolo\nPetit Pays\nSam Fan Thomas\nLady Ponce\nMagasco\nWes Madiko\nDaphné\nSalatiel\nMr. Leo\nBlanche Bailly\nReniss\nSublymme\nKing B Swag\n\nCape Verde\nCesaria Evora\nGil Semedo\nCôte d'Ivoire\nAlpha Blondy\nMagic System\nErnesto Djédjé\nTiken Jah Fakoly\nDJ Arafat\nSerge Beynaud\nFoliba trio\n\nRepublic of the Congo (Congo-Brazzaville)\nYoulou Mabiala\nPierre Moutouari\nWerrason\nPapa Wemba\nFerre Gola\nFally Ipupa\nMbilia Bel\nAbeti Masikini\nMadilu System\nYoulou Mabiala\nFranco Luambo Makiadi\nFranklin Boukaka\nKoffi Olomide\n\nDemocratic Republic of the Congo (former Zaire)\nAbeti Masikini\nAfrican Fiesta\nAvelino\nAwilo Longomba\nBimi Ombale\nBisso Na Bisso\nBouro Mpela\nBozi Boziana\nCindy Le Coeur\nDadju\nDamso\nDany Engobo\nEvoloko Jocker\nDiblo Dibala\nDindo Yogo\nFabregas\nFally Ipupa\nFerré Gola\nGaz Mawete\nGeo Bilongo\nGibson Butukondolo\nGrand Kalle\nHéritier Watanabe\nIcha Kavons\nINNOSS'B\nJean Bosco Mwenda\nJessy Matador\nJimmy Omonga\nJosky Kiambukuta Londa\nKalash Criminel\nKanda Bongo Man\nKasai Allstars\nKaysha\nKeblack\nKékélé\nKing Kester Emeneya\nKoffi Olomide\nKonono Nº1\nKasaloo Kyanga\nLU KALA\nLanga Langa Stars\nLe Grand Kalle\nLokua Kanza\nMadilu Système\nMaître Gims\nMarie Daulne\nMarie Misamu\nMayaula Mayoni\nMbongwana Star\nM'bilia Bel\nMichel Boyibanda\nMohombi\nMose Fan Fan\nM'Pongo Love\nNaza\nNdombe Opetum\nNico Kasanda\nNinho\nPapa Wemba\nPepe Kalle and Empire Bakuba\nRay Lema\nSam Mangwana\nSinguila\nTabu Ley Rochereau\nWerrason\nYoulou Mabiala\nYxng Bane\n\nEgypt\nAmal Maher\nAmira Selim\nAmr Diab\nAngham\nAnoushka\nCarmen Suleiman\nDina El Wedidi\nHisham Abbas\nLeila Mourad\nMayam Mahmoud\nMohamed Mounir\nMohammed Abdel Wahab\nTamer Hosny\nEzz Eddin Hosni (1927-2013)\nMounira El Mahdeya\nNesma Mahgoub\nRatiba El-Hefny\nRuby\nSayed Darwish\nShadiya\nSherine\nUmm Kulthum\nYasmine Niazy\nYousra\nZizi Adel\n\nEritrea\nAbraham Afewerki\n\nEthiopia\nAminé\nMulugeta Abate\nTeddy Afro\nAlemu Aga\nMahmoud Ahmed\nTadesse Alemu\nMulatu Astatke\nAster Aweke\nAbatte Barihun\nAragaw Bedaso\nEyasu Berhe\nGirma Bèyènè\nAli Birra\nTamrat Desta\nAlemayehu Eshete\nTilahun Gessesse\nGigi\nThomas Gobena\nHachalu Hundessa\nKenna\nGetatchew Mekurya\nMunit Mesfin\nLoLa Monroe\nEmilia Rydberg\nKuku Sebsebe\nKiros Alemayehu\nTigist Shibabaw\nShantam Shubissa\nAbdu Kiar\nWalias Band\nWayna\nAsnaketch Worku\nDawit Yifru\nGildo Kassa\nYared Negu\n \nGabon\nOliver N'Goma\nPatience Dabany\nAnnie-Flore Batchiellilys\n\nGambia\nSona Maya Jobarteh\nFoday Musa Suso\n\nGhana\nGuy Warren\nRebop Kwaku Baah\nBecca\nDopeNation\nFuse ODG\nJay Ghartey\nOsibisa\nWendy Shay\nDarkovibes\nMugeez\nKiDi\nKuami Eugene\nEbony Reigns\nIwan\nKaakie\nSamini\nShatta Wale\nStonebwoy\nBernice Ofei\nDanny Nettey\nHelen Yawson\nJoe Beecham\nJoe Mettle\nKofi Owusu Dua Anto\nNayaah\nNii Okai\nOhemaa Mercy\nPreachers\nQwameGaby\nStella Aba Seal\nTagoe Sisters\nDiana Hamilton\nJoyce Blessing\nEfya\nA. B. Crentsil\nAlex Konadu\nAmakye Dede\nBen Brako\nBisa Kdei\nC.K. Mann\nDaddy Lumba\nE. T. Mensah\nEbo Taylor\nK. Frimpong\nKing Bruce\nKojo Antwi\nKoo Nimo\nKwabena Kwabena\nJerry Hansen\nAyesem\nAyigbe Edem\nBall J\nBice Osei Kuffour\nBuk Bak\nC-Real\nCastro\nCorp Sayvee\nD-Black\nEfya\nEL\nEno Barony\nGasmilla\nKesse\nM.anifest\nMedikal\nNero X\nOkyeame Kwame\nReggie Rockstone\nRuff n Smooth\nSarkodie\nSherifa Gunu\nSway\nTinny\nTrigmatic\nJoey B\nPappy Kojo\nGurunkz\nR2Bees\nKofi Kinaata\nKwesi Arthur\nKiDi\nKuami Eugene\nAdam Ro\nBobo Shanti\nRascalimu\nRita Marley\nRocky Dawuni\nSamini\nSheriff Ghale\nStonebwoy\nFancy Gadam\nAbubakari Lunna\nEphraim Amu\nKen Kafui\nPhilip Gbeho\n \nGuinea\nSona Tata Condé\nSekouba Bambino\nDaddi Cool\nLes Ballets Africains\nBalla et ses Balladins\nBembeya Jazz\nDjeli Moussa Diawara\nFamoudou Konaté\nMory Kanté\nMamady Keita\nBallet Nimba\nGuinea-Bissau\nJosé Carlos Schwarz\nEneida Marta\n\nKenya\nAkothee\nAvril\nAyub Ogada\nCece Sagini\nDaddy Owen\nDavid Mathenge\nDaudi Kabaka\nDJ Fita\nEric Wainaina\nE-Sir\nFadhili William\nFundi Konde\nGeorge Ramogi\nGloria Muliro\nHarry Kimani\nJabali Afrika\nJason Dunford\nJua Cali\nKavirondo\nKing Kaka\nKleptomaniax\nMighty King Kong\nMonski\nMusa Juma\nNaiboi\nNecessary Noize\nOkatch Biggy\nOtile Brown\nPrincess Jully\nRedsan\nRoger Whittaker\nSanaipei Tande\nSauti Sol\nSize 8\nStella Mwangi\nSuzzana Owiyo\nTony Nyadundo\nWahu\nWanyika bands\nSimba Wanyika\nWilly Paul\nWyre\n \nLiberia\nSundaygar Dearboy\nKnero\nTakun-J\n\nMadagascar\nAmbondronA\nVaiavy Chila\nMily Clément\nNinie Doniah\nRakoto Frah\nD'Gary\nRégis Gizavo\nEusèbe Jaojoby\nLego\nMahaleo\nErick Manana\nJerry Marcoss\nToto Mwandjani\nOladad\nRabaza\nNaka Rabemanantsoa\nAndrianary Ratianarivo\nOlombelona Ricky\nRossy\nMama Sana\nSenge\nMadagascar Slim\nTarika\nTearano\nJustin Vali\nNicolas Vatomanga\n\nMali\nBoubacar Traoré\nMory Kanté\nSalif Keita\nToumani Diabaté\nKandia Kouyaté\nHabib Koité\nIssa Bagayogo\nRokia Traoré\nTinariwen\nAli Farka Touré\nAmadou et Mariam\nOumou Sangaré\nAfel Bocoum\nLobi Traoré\nFatoumata Diawara\nDjelimady Tounkara\nRail Band\nMauritania\nDimi Mint Abba\nMalouma\nNoura Mint Seymali\n\nMorocco\nSaad Lamjarred\nElam Jay\nAnoGhan\nOussama Belhcen\nRajae El Mouhandiz\nMr Sufian\nManal\nTwo Tone\nMuslim\nDizzy DROS\nL7a9d\nCut Killer\nCanardo\nFrench Montana\nILY\nLarbi Batma\nAbdessadeq Cheqara\nMohamed Rouicha\nWorld music\nDriss El Maloumi\nHenry Azra\n \nMozambique\nWazimbo\nGhorwane\nFany Pfumo\nStewart Sukuma\nMoreira Chonguica\nLizha James\nNeyma\nMingas\nAl Bowlly\nWazimbo\n340ml\nAfric Simone\nNiger\nMamar Kassey\nMdou Moctar\n\nNigeria\n2face Idibia - hip hop and R&B singer\n9ice - hip hop and afropop singer\nA\nA-Q - hip hop artist\nAbiodun Koya (born 1980), gospel singer, opera singer\nAdé Bantu - Nigerian-German musician, producer, front man of the 13 piece band BANTU\nAdekunle Gold - singer, songwriter\nAdewale Ayuba - fuji music singer\nAfrikan Boy - rapper\nAfro Candy - pop singer\nAlamu Atatalo - sekere singer, a type of traditional Yoruba music\nAli Jita - Hausa singer and song writer\nAmarachi - singer, dancer, violinist\nAndre Blaze - rapper\nAramide - Afro-Jazz singer\nAṣa - R&B, country and pop singer-songwriter\nAyinde Bakare - Yoruba jùjú and highlife musician\nAyinla Kollington - Fuji musician\nB\nBabatunde Olatunji - drummer\nBanky W - pop and R&B singer-songwriter\nBlackface Naija - reggae musician\nBlaqbonez - rapper\nBrymo - singer\nBurna Boy - reggae-dancehall musician\nC\nCDQ - rapper, songwriter\nCelestine Ukwu - highlife musician\nChidinma - pop singer\nChike - singer, songwriter and actor\nChinko Ekun – rapper, songwriter\nCobhams Asuquo - soul singer\nCynthia Morgan - pop, hip hop and dancehall singer\nD\nD'banj - pop singer\nDa Emperor - indigenous rapper\nDa Grin - rapper\nDammy Krane - singer, songwriter\nDarey - R&B singer-songwriter\nDauda Epo-Akara - Yoruba musician\nDavido - pop singer\nDekumzy - R&B and highlife singer\nDele Ojo - juju music singer and performer\nDice Ailes - pop singer\nDi'Ja - singer\nDon Jazzy - recording artist and record producer\nD'Prince - Afro-pop singer\nDr. Alban - Nigerian-Swedish recording artist and producer\nDr SID - pop singer\nDuncan Mighty - reggae singer\nE\nEbenezer Obey - jùjú musician\nEchezonachukwu Nduka - pianist and musicologist\nEddy Wata - Eurodance singer\nEedris Abdulkareem\nEgo Ogbaro\neLDee – rapper, singer, producer\nEmeka Nwokedi – conductor and music director\nEmma Nyra – R&B singer\nEmmy Gee – rapper\nEva Alordiah-rapper and singer\nEvi Edna Ogholi-Reggae singer\nF\nFalz - rapper, songwriter\nFaze - R&B singer\nFela Kuti - afrobeat, jazz singer-songwriter and instrumentalist\nFela Sowande\nFemi Kuti - afrobeat, jazz singer-songwriter and instrumentalist\nFireboy DML - singer\nFlavour N'abania - highlife and hip hop singer\nFrank Edwards – gospel singer\nG\nGenevieve Nnaji - pop singer\nH\nHelen Parker-Jayne Isibor - opera singer and composer\nHarrysong - singer and songwriter\nHaruna Ishola\nHumblesmith - afropop singer\nI\nI.K. Dairo\nIce Prince - rapper\nIdahams - Singer and song writer\nIyanya - pop singer\nJ\nJ. Martins - highlife singer-songwriter and record producer\nJesse Jagz - rapper\nJasën Blu - R&B singer-songwriter and record producer\nJoeboy - singer\nJohnny Drille - singer\nK\nKcee\nKing Wadada - reggae singer\nKizz Daniel\nKoker\nKorede Bello\nL\nLadipoe\nLagbaja\nLara George\nLaycon\nLil Kesh\nLyta\nM\nM.I - rapper\nM Trill - rapper\nMajek Fashek - singer-songwriter\nMay7ven\nMaud Meyer - jazz singer\nMike Ejeagha - Highlife musician\nMo'Cheddah - hip hop singer\nMode 9 - rapper\nMonica Ogah - pop singer-songwriter\nMr 2Kay\nMr Eazi - singer-songwriter\nMr Raw\nMr Real - house singer\nMuma Gee - pop singer-songwriter\nMuna - rapper\nN\nNaeto C\nNaira Marley – singer and songwriter\nNiniola - Afro-house artist\nNiyola - soul and jazz singer\nNneka - hip hop and soul singer\nNonso Amadi\nNosa - gospel artist\nO\nObesere - fuji musician\nObiwon - R&B and gospel singer\nOlamide - rapper and hip hop artist\nOliver De Coque\nOmawumi - soul singer\nOmotola Jalade Ekeinde – R&B and pop singer\nOnyeka Onwenu - pop singer\nOrezi - reggae singer\nOriental Brothers\nOritse Femi\nOrlando Julius\nOsita Osadebe\nOrlando Owoh\nP\nPatience Ozokwor - highlife singer\nPatoranking - reggae and dancehall singer\nPepenazi - rapper, hip hop artist and record producer\nPericoma Okoye\nPeruzzi\nPeter King\nPhyno - rapper and record producer\nPraiz - R&B singer and songwriter\nPrince Nico Mbarga\nR\nReekado Banks - hip hop artist\nRema - Afrobeats and Trap\nRex Lawson\nRic Hassani\nRuby Gyang\nRuggedman - rapper and hip hop artist\nRuntown - songwriter and hip hop artist\nS\nSade Adu\nSafin De Coque - rapper and hip hop artist\nSalawa Abeni - Waka singer\nSamsong - gospel singer\nSasha P - rapper and singer\nSean Tizzle - Afropop\nSeun Kuti - afrobeat, Jazz singer-songwriter and instrumentalist\nSeyi Shay - pop singer and songwriter\nShina Peters - juju singer\nSimi\nSinach - gospel singer\nSkales - rapper and singer\nShola Allynson - Gospel Singer\nSonny Okosuns\nSound Sultan\nStella Damasus - R&B and soul singer\nSunny Ade - jùjú singer\nTamara Jones\nTekno Miles\nTems\nTeni\nTerry G\nTimaya\nTiwa Savage\nTimi Dakolo\nToby Foyeh\nTonto Dikeh\nTony Allen\nTony Tetuila\nTonye Garrick\nTope Alabi\nTunde King\nTunde Nightingale\nTY Bello\nVictor Olaiya\nVictor Uwaifo\nWaconzy\nWaje\nWasiu Alabi Pasuma\nWeird MC\nWilliam Onyeabor\nWizkid\nYcee\nYemi Alade\nYinka Ayefele\nYinka Davies\nYung6ix\nYusuf Olatunji\nZlatan\nZayn Africa\nZoro African\n\nRwanda\nAlpha Rwirangira\nTom Close\nRiderman\nKing James\nKnolwess Butera\nBenjami Mugisha\nUrban Boyz\nKate Bashabe\nSimon Bikindi\nCorneille\nMiss Jojo\n\nSenegal\nAkon\nBaaba Maal\nÉtoile de Dakar\nIsmaël Lô\nMansour Seck\nOrchestra Baobab\nPositive Black Soul\nThione Seck and Raam Daan\nStar Band\nTouré Kunda\nYoussou N'Dour and Étoile de Dakar\nXalam (band)\n\nSierra Leone\nBai Kamara\nS. E. Rogie\nSteady Bongo\nK-Man\nEmmerson\nAnis Halloway\nSupa Laj\n\nSomalia\nXiddigaha Geeska\nMohamed Mooge Liibaan\nAbdullahi Qarshe\nWaayaha Cusub\nAli Feiruz\nHasan Adan Samatar\nAar Maanta\nMohamed Sulayman Tubeec\nMaryam Mursal\nK'naan\nGuduuda 'Arwo\nMagool\n\nSouth Africa\nAfrican Children's Choir\nAfrotraction\nAKA, hip-hop artist and record producer\nAkustika Chamber Singers, chamber choir from Pretoria\naKing, South African acoustic rock band\nAmanda Black, Multi-award winning and platinum-selling Afro-soul singer-songwriter\nAmampondo, award-winning traditional Xhosa percussion group from Cape Town\nAnatii (born 1993), hip-hop artist and record producer\nA-Reece (born 1997), hip-hop artist and lyricist\nLeigh Ashton (born 1956), singer-songwriter from Johannesburg\nAssagai, Afro-rock band\nThe Awakening, gothic rock\nB\nBabes Wodumo, gqom musician\nBallyhoo, 1980s pop band best known for the hit \"Man on the Moon\"\nThe Bang\nLeonel Bastos (born 1956), Mozambiquan adult contemporary musician and producer working in South Africa\nBattery 9\nBlackByrd\nBusiswa, house musician\nBLK JKS\nElvis Blue, musician and songwriter\nBoo!\nBles Bridges (1947–2000), singer\nStef Bos\nCristina Boshoff\nJonathan Butler, singer-songwriter and guitarist\nThe Brother Moves On\nBrasse Vannie Kaap\nBright Blue, 1980s pop band, best known for the hit song \"Weeping\"\nBuckfever Underground\nBeatenberg\nBongo Maffin, kwaito music group\nBoom Shaka\nBucie (born 1987), R&B and soul singer\nGuy Buttery\nC\nAdrienne Camp, singer-songwriter\nCaptain Stu, ska, funk, punk, and soul fusion band\nArno Carstens, former lead singer of Springbok Nude Girls\nCassette\nCassper Nyovest, rapper and record producer\nTony Cedras (born 1952), musician\nChad, (born 1993), rapper\nYvonne Chaka Chaka, singer\nChris Chameleon, solo artist, lead singer and bass guitarist for Boo\nBlondie Chaplin, singer and guitarist\nJesse Clegg (born 1988)\nJohnny Clegg (born 1953)\nClout, 1970s rock group\nBasil Coetzee (1944–1998), saxophonist\nMimi Coertse (born 1932), musician\nTony Cox (born 1954), guitarist\nCrashcarburn\nCrossingpoint, Christian progressive hardcore band\nCutting Jade\nCivil Twilight\nCrow Black Sky\nD\nDa L.E.S (born 1985), hip-hop artist\nSimphiwe Dana (born 1980)\nDanny K (Daniel Koppel), R&B singer-songwriter\nKurt Darren, singer\nPierre de Charmoy\nSteven De Groote (1953–1989), classical pianist and winner of the Van Cliburn International Piano Competition\nFanie de Jager (born 1949), operatic tenor\nDie Antwoord\nDie Heuwels Fantasties\nBonginkosi Dlamini (born 1977), poet, actor and singer, also known as Zola\nDollar Brand (born 1934)\nDonald, singer\nDorp\nDownfall\nDr Victor and the Rasta Rebels, reggae\nDreamteam, hip-hop group from Durban\nJabulani Dubazana, singer, Ladysmith Black Mambazo\nLucky Dube (1964–2007)\nDuck and Cover, hard rock band\nAmpie du Preez, singer and guitarist\nJohnny Dyani (1945–1986), jazz double bassist\nDJ Speedsta , Hip Hop Dj\nE\nDennis East, singer\nShane Eagle (b. 1996), hip-hop artist\nAlton Edwards, singer\nEden, pop band\nElaine, singer and songwriter\nEndorphine\nEmtee (b. 1992), hip-hop artist\nDawid Engela (1931–1967), composer and musicologist\néVoid, 1980s new wave\nErica Eloff, soprano\nF\nThe Fake Leather Blues Band\nFalling Mirror\nBrenda Fassie (1964–2004)\nRicky Fataar (born 1952), drummer\nDuncan Faure, singer-songwriter formerly with the band Rabbitt\nMongezi Feza (1945–1975), trumpet player and flautist\nAnton Fig, drummer\nJosh Fix\nFokofpolisiekar, Afrikaans rock band\nFoto na Dans, Afrikaans rock band\nFour Jacks and a Jill\nJohnny Fourie (1937–2007), jazz guitarist\nFreshlyground\nFuzigish\nFifi Cooper\nG\nHotep Idris Galeta (born 1941), jazz pianist\nGoldfish\nAnton Goosen (born 1946), singer\nDie Grafsteensangers\nGoodluck\nH\nHalf Price (band)\nPaul Hanmer, composer, pianist, and jazz musician\nThe Helicopters\nKen E Henson (born 1947), musician\nHenry Ate\nSonja Herholdt\nHog Hoggidy Hog\nSteve Hofmeyr (born 1964), singer and actor\nDie Heuwels Fantasties\nI\nAbdullah Ibrahim (born 1934)\niFANi\nIsochronous\nJ\nJabu Khanyile (1957–2006)\nJack Parow\nRobbie Jansen (1949–2010)\nJeremy Loops (born 1986), modern folk, singer\nJesse Jordan Band\nTheuns Jordaan (born 1971), singer and songwriter\nClaire Johnston (born 1967), lead singer of Mango Groove\nTrevor Jones (born 1949), composer\nArmand Joubert\nJoy, a vocal group\nJohn Edmond (born 1936), singer\nJohn Ireland (born 1954), singer and songwriter\nJulian Bahula, jazz drummer*Juluka\nJust Jinjer (previously Just Jinger)\nJR, rapper\nJunkyard Lipstick\nL-Tido (born 1982), hip-hop artist, aka 16V\nK\nKabelo Mabalane (born 1976), kwaito artist, former member of TKZee\nKalahari Surfers\nWouter Kellerman, South African flautist\nJohannes Kerkorrel (1960–2002)\nSibongile Khumalo (born 1957), singer\nKOBUS!\nKoos Kombuis (born 1954)\nJohn Kongos (born 1945)\nKongos\nGé Korsten (1927–1999)\nDavid Kramer (born 1951)\nKwesta, hip-hop artist and poet\nK.O, hip-hop artist and record producer\nKabza De Small , King of Amapiano\nL\nFelix Laband, electronic musician\nRiku Lätti, songwriter, composer, music producer\nLadysmith Black Mambazo (born 1960), isicathamiya group\nDon Laka, jazz musician, pianist, producer\nRobert Lange (born 1948), music producer\nLani Groves\nLark\nJack Lerole (c.1940–2003), tin whistle player; singer\nSolomon Linda, songwriter\nLira\nLocnville\nRoger Lucey, singer and guitarist\nLucky Dube, singer and keyboard player\nM\nMark Haze, Rock singer\nSipho Mabuse (born 1951), singer\nArthur Mafokate, kwaito singer and composer\nMahlathini and the Mahotella Queens, a mbaqanga band\nVusi Mahlasela (born 1965)\nMakgona Tsohle Band (1964–1999), a mbaqanga instrumental band\nBongi Makeba (1950–1985), singer-songwriter\nMiriam Makeba (1932–2008)\nMalaika (group)\nPetronel Malan (1974–), concert pianist\nMan As Machine\nMandoza (born 1978), kwaito singer\nMango Groove\nMildred Mangxola (born 1944), singer in Mahlathini and the Mahotella Queens and member of the Mahotella Queens\nManfred Mann\nMarcAlex, group known for the hit \"Quick Quick\"\nJosef Marais (1905–1978)\nMartin PK\nHugh Masekela (born 1939)\nDorothy Masuka (born 1935), jazz singer\nNeels Mattheus (1935-2003), traditional musician\nDave Matthews (born 1967), lead singer and founding member of Dave Matthews Band\nIrene Mawela (born 1940), veteran singer and composer\nIllana May\nAbednego Mazibuko, singer with Ladysmith Black Mambazo\nAlbert Mazibuko (born 1948), singer with Ladysmith Black Mambazo\nThandiswa Mazwai (born 1976)\nChris McGregor (1936–1990), jazz pianist and composer\nBusi Mhlongo (1947–2010), singer, dancer and composer\nMind Assault\nMoreira Chonguica (born 1077), jazz saxophonist and producer\nKippie Moeketsi (1925–1983), saxophonist\nPops Mohamed (born 1949), jazz musician\nLouis Moholo (born 1940), drummer\nMatthew Mole\nLebo Morake (aka Lebo M)\nShaun Morgan (born 1980), singer also known as Shaun Morgan Welgemoed\nIke Moriz (born 1972), singer, composer and lyricist\nJean Morrison\nMshengu White Mambazo (1976–2003), junior choir of Ladysmith Black Mambazo\nRussel Mthembu, singer with Ladysmith Black Mambazo\nMoozlie (born 1992), hip-hop artist and television presenter\nMuzi (born 1991), electronic musician\nMoonchild Sanelly Musician and Dancer\nN\nNádine (born 1982), singer-songwriter\nThe Narrow\nNasty C (born 1997), hip-hop artist and record producer\nBongani Ndodana-Breen, composer\nJim Neversink, alternative country singer-songwriter and guitarist\nNew Academics\nSteve Newman\nBernoldus Niemand (1959–1995)\nSimon \"Mahlathini\" Nkabinde (1937–1999), Mbaqanga singer\nWest Nkosi (1940–1998), mbaqanga musician\nNo Friends of Harry\nNobesuthu Mbadu (born 1945), singer in Mahlathini and the Mahotella Queens and member of the Mahotella Queens\nSiphiwo Ntshebe (1974–2010), operatic tenor from New Brighton, Port Elizabeth\nAshton Nyte, solo artist as well as lead singer and producer of The Awakening\nThys Nywerheid\nNadia Nakai (born 1990), hip-hop artist\nO\nSarah Oates, violinist and associate leader Philharmonia orchestra\nWendy Oldfield, rock singer-songwriter\nOskido, record producer and songwriter\nP\nJack Parow, hip-hop artist\nThe Parlotones\nAl Paton, singer-songwriter, producer, and percussionist\nPeter Toussaint\nPetit Cheval\nJames Phillips, singer-songwriter also known as Bernoldus Niemand\nAnke Pietrangeli (born 1982), winner of the second series of Idols\nDizu Plaatjies, founder and former lead singer of Amampondo\nPlush\nPJ Powers (born 1960)\nPrime Circle\nProfessor (born 1978), Kwaito musician\nDudu Pukwana (1938–1990), saxophonist, pianist, and composer\nPurified, Christian hip-hop artist\nPatricia Majalisa, bubblegum artist\nQ\nQkumba Zoo\nR\nRabbitt\nRouge (rapper)\nTrevor Rabin (born 1954), musician\nDolly Rathebe (1928–2004)\nLaurika Rauch, Afrikaans singer\nRiddare av Koden\nSurendran Reddy (1962–2010) pianist and composer\nRiky Rick (born 1987), hip-hop artist and record producer\nRobin Auld\nRay Phiri (1947-2017), Jazz, jazz fusion, reggae and mbaqanga musician\nS\nSandy B\nSavuka\nRobert Schneider of The Apples in Stereo\nLeon Schuster\nSeether, formerly called Saron Gas, hard rock and alternative metal band\nGerard Sekoto (1913–1993)\nJudith Sephuma\nJockey Shabalala (1943–2006), singer with Ladysmith Black Mambazo\nJoseph Shabalala (born 1941), lead singer and founder of Ladysmith Black Mambazo\nMsizi Shabalala (born 1975), singer with Ladysmith Black Mambazo\nSibongiseni Shabalala (born 1973), singer with Ladysmith Black Mambazo\nTroye Sivan (born 1995), South African-born\nThamsanqa Shabalala (born 1977), singer with Ladysmith Black Mambazo\nThulani Shabalala (born 1968), singer with Ladysmith Black Mambazo\nShane Eagle (born 1996), hip-hop artist and lyricist\nShiraz, band active between 1984 - 1984\nMargaret Singana (1938–2000)\nRobert Sithole, pennywhistle player\nSkylight (band)\nKyla-Rose Smith (born 1982), violinist and dancer\nSonja Herholdt\nEnoch Sontonga, teacher, lay-preacher and composer who wrote \"Nkosi Sikelel' iAfrika\"\nSouth African National Youth Orchestra\nSpringbok Nude Girls\nZanne Stapelberg (born 1977), opera soprano\nDale Stewart (born 1979)\nSterling EQ\nStimela band formed in 1982\nStraatligkinders\nSugardrive\nValiant Swart\nOkmalumkoolkat (born 1983), hip-hop artist\nStogie T , Hip Hop Artist\nT\nTananas\nTaxi Violence\nPeta Teanet, singer\nTKZee, kwaito group\nHilda Tloubatla (born 1942), lead singer of Mahotella Queens, and singer in Mahlathini and the Mahotella Queens\nTokollo Tshabalala, kwaito singer also known as Magesh\nPeter Toussaint, singer-songwriter and guitar player\nToya Delazy, pop singer and pianist\nTribe After Tribe\nTuks, hip-hop artist\nTumi and the Volume\nTweak\nU\nUhuru— Kwaito and afropop music group\nUrban Creep\nV\nBobby van Jaarsveld (born 1987), singer-songwriter and actor\nBok van Blerk (born 1978)\nJason van Wyk (born 1990), composer, producer\nVan Coke Kartel\nAmor Vittone (born 1972)\nValiant Swart (born 1965)\nW\nWatershed\nWargrave\nShaun Welgemoed (born 1978)\nHeinz Winckler (born 1978), singer who won the first series of Idols\nWinston's Jive Mixup\nWonderboom\nMarkus Wormstorm, electronic musician and composer\nY\nPretty Yende (born 1985), operatic soprano from Piet Retief, Mpumalanga\nYorxe (born 1998), singer and songwriter\nYoungstaCPT (born 1991), rapper and songwriter\nZ\nZahara, singer-songwriter and poet\nZebra & Giraffe\nKaren Zoid (born 1978)\nZola (born 1977)\nZonke (born 1979)\nAuth3ntiC\n\nSouth Sudan\nYaba Angelosi\nMary Boyoi\nEmmanuel Jal\nSilver X\n\nSudan\nAbdel Aziz El Mubarak\nAbdel Gadir Salim\nAlKabli\nEmmanuel Jal\nMohammed Wardi\nMohamed Gubara\n\nSwaziland\nDusty & Stones\nKambi\nTendaness\n\nTanzania\nAli Kiba\nBill Nass\nJoseph Lusungu\nMnenge Ramadhani\nMuhiddin Maalim\nHassani Bitchuka\nSaidi Mabera\nWilson Kinyonga\nRemmy Ongala\nKasaloo Kyanga\nMr. Nice\nSaida Karoli\nDiamond Platnumz\nLady Jaydee\nProfessor Jay\nTID\nRose Mhando\nVanessa Mdee\nA.Y.\nRuby\nRayvanny\nBi Kidude\nCarola Kinasha\nImani Sanga\nTudd Thomas\nHarmonize\nJoel lwaga\nPaul Clement\nGoodluck Gozbert\nBella Kombo\nSara Nyongole\nAngel Benard\nZoravo\nKibonge Wa Yesu\nCalvin John\nMirriam Mbepera\nDerick Marton\nBeda Andrew\nDr. Ipyana\nAshley Nassary\nJessica Honore\nChristina Shusho\nWalter Chilambo\nBoaz Danken\nMartha Mwaipaja\nJohn Lisu\n\nTogo\nBella Bellow\nKing Mensah\n\nUganda\nHoly Keane Amooti\nAziz Azion\nA Pass\nAfrigo Band\nBabaluku\nBataka Squad\nBebe Cool\nBobi Wine\nBosmic Otim\nFresh Kid Uganda\nJose Chameleone\nMac Elvis\nExodus\nDavid Lutalo\nEddy Kenzo\nFik Fameica\nGabriel K\nGoodlyfe Crew\nSam Gombya\nSophie Gombya\nGiovanni Kiyingi\nJackie Akello\nJackie Chandiru\nJanzi Band\nJemimah Sanyu\nJimmy Katumba\nJudith Babirye\nJuliana Kanyomozi\nPaulo Kafeero\nMichael Ross Kakooza\nAngella Katatumba\nIsaiah Katumwa\nJoanita Kawalya\nLeila Kayondo\nKeko\nSuzan Kerunen\nMaurice Kirya\nKlear Kut\nSylver Kyagulanyi\nPhilly Lutaaya\nLevixone\nLydia Jazmine\nLumix Da Don\nMad Ice\nMaster Blaster\nRachael Magoola\nFred Masagazi\nMoses Matovu\nMariam Ndagire\nLilian Mbabazi\nFrank Mbalire\nMilege\nPeter Miles\nPhina Mugerwa\nBenon Mugumbya\nFille Mutoni\nGrace Nakimera\nHalima Namakula\nRema Namakula\nIryn Namubiru\nNavio\nNick Nola\nIrene Ntale\nGravity Omutujju\nGeoffrey Oryema\nPapa Cidy\nProducer Hannz\nRabadaba\nRachel K\nRagga Dee\nRadio and Weasle\nRuyonga\nSaba Saba aka Krazy Native\nCinderella Sanyu\nSsewa Ssewa\nSera\nSheebah Karungi\nSister Charity\nSpice Diana\nMadoxx Ssemanda Sematimba\nSt. Nelly-Sade\nThe Mith\nHenry Tigan\nAllan Toniks\nTshila\nTrix Lane\nUndercover Brothers Ug\nVampino\nViboyo\nElly Wamala\nWilson Bugembe\nBobi Wine\nGNL Zamba\n\nZambia\nAlick Nkhata\nB Flow\nBallad Zulu\nChef 187\nJordan Katembula\nJust Slim\nLarry Maluma\nLazarus Tembo\nLeo \"K'millian\" Moyo\nLily Tembo\nMacky 2\nMaiko Zulu\nMampi\nMoonga K.\nNashil Pichen\nOC Osilliation\nPaul Ngozi\nShom-C\nVictor Kachaka\nYvonne Mwale\nPetersen Zagaze\nBobby East\nAmayenge\nDistro Kuomboka\nMashome Blue Jeans\nWitch\nZone Fam\n\nZimbabwe\nBarura Express – band\nBhundu Boys – jit and chimurenga music band\nHohodza – band\nMbira dzeNharira – mbira band\nMechanic Manyeruke and the Puritans – gospel music group\nR.U.N.N. family – mbira-inspired reggae and rhumba group\nSiyaya – music and dance group\nFlint Bedrock (born 1985) – pop singer-songwriter\nMkhululi Bhebhe (born 1984) - contemporary gospel vocalist\nCharles Charamba (born 1971) – gospel singer[1]\nOlivia Charamba (1999–1999) – gospel singer\nBrian Chikwava (born 1971) – writer and musician\nSimon Chimbetu (1955–2005) – singer-songwriter and guitarist[2]\nJames Chimombe (1951–1990) – singer and guitarist[2]\nMusekiwa Chingodza (born 1970) – mbira and marimba player\nChirikure Chirikure (born 1962) – musician and songwriter\nStella Chiweshe (born 1946) – mbira player and singer-songwriter\nDizzy Dee (1999–1999) – Australia-based reggae artist\nLeonard Dembo (1959–1996) – guitarist and singer-songwriter; member of the band Barura Express[2]\nTehn Diamond (born 1985) – Zimbabwean hip hop musician and rapper\nChartwell Dutiro (born 1957) – mbira player and singer-songwriter[3]\nMbuya Dyoko (1944–2013) – mbira player\nJohn Edmond (born 1936) – Rhodesian folk singer\nTendayi Gahamadze (born 1959) – mbira player and singer-songwriter; member of Mbira dzeNharira\nMichael Gibbs (born 1937) – England-based jazz composer\nDerek Hudson (1934–2005) – English-born conductor and composer\nNgonidzashe Kambarami (born 1983) – urban grooves artist\nVictor Kunonga (born 1974) – Afrojazz singer-songwriter\nForward Kwenda (born 1963) – mbira player\nJah Prayzah (born 1987) – Afropop and Afrojazz musician\nHope Masike mbira player and percussionist and singer\nIgnatius Mabasa (born 1971) – writer and musician\nAlick Macheso (born 1968) – singer-songwriter and guitarist\nSafirio Madzikatire (1932–1996) – actor and musician[2]\nMadzitatiguru (born 1989) – poet and musician\nTakunda Mafika (1983–2011) – mbira player\nCosmas Magaya (born 1953) – mbira player\nTkay Maidza (born 1996) – Australia-based singer-songwriter and rapper\nLovemore Majaivana (born 1954) – Ndebele music singer-songwriter\nZeke Manyika (born 1955) – England-based rock and roll singer-songwriter and drummer\nLeonard Mapfumo (born 1983) – urban grooves and hip hop artist\nThomas Mapfumo (born 1945) – chimurenga music artist\nChiwoniso Maraire (1976–2013) – mbira player and singer-songwriter[2]\nDumisani Maraire (1944–1999) – mbira payer and singer-songwriter\nMashasha (born 1982) – guitarist and singer-songwriter\nMaskiri (born 1980) – hip hop artist and rapper\nDorothy Masuka (born 1935) – South Africa-based jazz singer\nPaul Matavire (1961–2005) – blind jit musician[2]\nLouis Mhlanga (born 1956) – South Africa-based Afrojazz singer-songwriter and guitarist\nObi Mhondera (born 1980) – England-based pop songwriter\nEric Moyo (born 1982) – singer\nTongai Moyo (1968–2011) – sungura singer-songwriter[2]\nAugust Msarurgwa (1920–1968) – composer\nAudius Mtawarira (born 1977) – Australia-based urban grooves artist\nOliver Mtukudzi (1952–2019) – Afrojazz singer-songwriter and guitarist\nSam Mtukudzi (1988–2010) – Afrojazz musician[2]\nAnna Mudeka – England-based musician\nCarol Mujokoro – gospel music artist\nEphat Mujuru (1950–2001) – mbira player[2]\nPrince Kudakwashe Musarurwa (born 1988) – Afrojazz musician\nIsaac Musekiwa – DR Congo-based soukous artist and saxophonist\nBusi Ncube (born 1963) – mbira player and singer\nAlbert Nyathi (born 1962) – poet and singer-songwriter\nJah Prayzah, musician\nRamadu (born 1975) – singer-songwriter\nRoki (born 1985) – Madagascar-born urban grooves artist\nKingsley Sambo (1936–1977) – jazz guitarist\nHerbert Schwamborn (born 1973) – Germany-based hip hop and electronic music artist; member of the band Söhne Mannheims\nJonah Sithole (1952–1997) – chimurenga music artist and guitarist[2]\nSolomon Skuza (1956–1995) – pop singer-songwriter[2]\nBuffalo Souljah (born 1980) – Zimdancehall and reggae artist\nShingisai Suluma (born 1971) – gospel music artist\nTakura (born 1991) – house music and hip hop artist\nTocky Vibes (born 1993) - Singer Lyricist Songwriter\nSystem Tazvida (born 1968) – singer-songwriter\nBiggie Tembo Jr. (born 1988) – jit musician\nClem Tholet (1948–2004) – Rhodesian guitarist and folk singer\nGarikayi Tirikoti (born 1961) – mbira player\nDiego Tryno (born 1998) - urban contemporary and hip-hop musician\nViomak – protest musician and activist\nPatrick Mukwamba (born 1951) – pop singer\nTarisai Vushe (born 1987) – Australia-based singer who appeared on Australian Idol\nEdith WeUtonga (born 1979) – Afrojazz singer-songwriter and bass guitarist\nWinky D (born 1983) – dancehall and reggae artist\nJonathan Wutawunashe – gospel artist\nLeonard Zhakata (born 1968) – sungura and adult contemporary music artist\nZinjaziyamluma- maskandi singer\nCharity Zisengwe – contemporary Christian music artist\n\nSoukous\nAntoine Kolosoy, a.k.a. Papa Wendo\nAurlus Mabele\nAwilo Longomba\nBozi Boziana\nDiblo Dibala\nDindo Yogo\nDr Nico Kasanda\nEmpire Bakuba\nEvoloko Jocker\nFally Ipupa\nFerre Gola\nFrançois Luambo Makiadi, band leader of OK Jazz\nGrand Kalle, band leader of Grand Kalle et l'African Jazz\nKanda Bongo Man\nKasaloo Kyanga\nKing Kester Emeneya\nKoffi Olomide\nLes Quatre Étoiles 4 Etoiles\nLoketo\nM'bilia Bel\nMeiway\nMose Fan Fan\nMonique Séka\nNyboma\nOliver N'Goma\nPapa Wemba\nPepe Kalle\nQuartier Latin International\nLes Quatre Étoiles\nRemmy Ongala\nRigo Star\nSam Fan Thomas\nSam Mangwana\nSamba Mapangala, band leader of Orchestra Virunga\nTabu Ley Rochereau, band leader of African Fiesta\nTshala Muana\nWerrason\nYondo Sister\nZaiko Langa Langa\"\"\".split(\"\\n\")\n\nfrom string import ascii_uppercase\ndata = {}\ncountry = None\nfor line in txt:\n if country is None:\n country = line\n data[country] = []\n continue\n if len(line) == 0:\n country = None\n continue\n artist = line\n artist = artist.split(\" (born\")[0]\n artist = artist.split(\" (1\")[0]\n artist = artist.split(\" (b\")[0]\n artist = artist.split(\" (c\")[0]\n artist = artist.split(\" (p\")[0]\n artist = artist.split(\", \")[0]\n artist = artist.split(\" – \")[0] \n artist = artist.split(\" - \")[0]\n artist = artist.replace(\"(band)\", \"\").strip()\n artist = artist.replace(\"(group)\", \"\").strip()\n artist = artist.replace(\"(rapper)\", \"\").strip()\n if artist in ascii_uppercase:\n continue\n \n data[country].append(artist)\n\nfrom pandas import Series\nafrican_artists = DataFrame(Series(data))\nafrican_artists.columns = [\"Artists\"]\nafrican_artists.head()\n\nsaveFile(ifile=\"/Volumes/Piggy/Charts/data/africa/categories/Artists.p\", idata=african_artists, debug=True)",
"Saving data to /Volumes/Piggy/Charts/data/africa/categories/Artists.p\n --> This file is 16.7kB.\nSaved data to /Volumes/Piggy/Charts/data/africa/categories/Artists.p\n --> This file is 16.7kB.\n"
],
[
"%autoreload\nfrom Africa import africaData\nafrica = africaData()\nafrica.parse()",
"Found 1 files.\nAngola [Ngola Ritmos, Waldemar Bastos, Bonga, Teta La...\nBenin [Angelique Kidjo, Wally Badarou]\nNigeria [2face Idibia, 9ice, A-Q, Abiodun Koya, Adé Ba...\nBotswana [Banjo Mosele, Franco and Afro Musica, Matsien...\nCameroon [Njacko Backo, Francis Bebey, Moni Bilé, Dibou...\nCape Verde [Cesaria Evora, Gil Semedo, Côte d'Ivoire, Alp...\nRepublic of the Congo (Congo-Brazzaville) [Youlou Mabiala, Pierre Moutouari, Werrason, P...\nDemocratic Republic of the Congo (former Zaire) [Abeti Masikini, African Fiesta, Avelino, Awil...\nEgypt [Amal Maher, Amira Selim, Amr Diab, Angham, An...\nEritrea [Abraham Afewerki]\nEthiopia [Aminé, Mulugeta Abate, Teddy Afro, Alemu Aga,...\nGambia [Sona Maya Jobarteh, Foday Musa Suso]\nGhana [Guy Warren, Rebop Kwaku Baah, Becca, DopeNati...\nKenya [Akothee, Avril, Ayub Ogada, Cece Sagini, Dadd...\nMadagascar [AmbondronA, Vaiavy Chila, Mily Clément, Ninie...\nMali [Boubacar Traoré, Mory Kanté, Salif Keita, Tou...\nMorocco [Saad Lamjarred, Elam Jay, AnoGhan, Oussama Be...\nRwanda [Alpha Rwirangira, Tom Close, Riderman, King J...\nSenegal [Akon, Baaba Maal, Étoile de Dakar, Ismaël Lô,...\nSierra Leone [Bai Kamara, S. E. Rogie, Steady Bongo, K-Man,...\nSomalia [Xiddigaha Geeska, Mohamed Mooge Liibaan, Abdu...\nSouth Africa [African Children's Choir, Afrotraction, AKA, ...\nSouth Sudan [Yaba Angelosi, Mary Boyoi, Emmanuel Jal, Silv...\nSudan [Abdel Aziz El Mubarak, Abdel Gadir Salim, AlK...\nSwaziland [Dusty & Stones, Kambi, Tendaness]\nTanzania [Ali Kiba, Bill Nass, Joseph Lusungu, Mnenge R...\nTogo [Bella Bellow, King Mensah]\nUganda [Holy Keane Amooti, Aziz Azion, A Pass, Afrigo...\nZambia [Alick Nkhata, B Flow, Ballad Zulu, Chef 187, ...\nZimbabwe [Barura Express, Bhundu Boys, Hohodza, Mbira d...\nSoukous [Antoine Kolosoy, Aurlus Mabele, Awilo Longomb...\nName: Artists, dtype: object\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Angola.p\n --> This file is 277B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Angola.p\n --> This file is 277B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Benin.p\n --> This file is 57B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Benin.p\n --> This file is 57B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Nigeria.p\n --> This file is 2.0kB.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Nigeria.p\n --> This file is 2.0kB.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Botswana.p\n --> This file is 189B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Botswana.p\n --> This file is 189B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Cameroon.p\n --> This file is 369B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Cameroon.p\n --> This file is 369B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Cape_Verde.p\n --> This file is 182B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Cape_Verde.p\n --> This file is 182B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Republic_of_the_Congo_(Congo-Brazzaville).p\n --> This file is 204B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Republic_of_the_Congo_(Congo-Brazzaville).p\n --> This file is 204B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Democratic_Republic_of_the_Congo_(former_Zaire).p\n --> This file is 832B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Democratic_Republic_of_the_Congo_(former_Zaire).p\n --> This file is 832B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Egypt.p\n --> This file is 326B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Egypt.p\n --> This file is 326B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Eritrea.p\n --> This file is 38B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Eritrea.p\n --> This file is 38B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Ethiopia.p\n --> This file is 515B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Ethiopia.p\n --> This file is 515B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Gambia.p\n --> This file is 62B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Gambia.p\n --> This file is 62B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Ghana.p\n --> This file is 1.2kB.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Ghana.p\n --> This file is 1.2kB.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Kenya.p\n --> This file is 541B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Kenya.p\n --> This file is 541B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Madagascar.p\n --> This file is 347B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Madagascar.p\n --> This file is 347B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Mali.p\n --> This file is 300B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Mali.p\n --> This file is 300B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Morocco.p\n --> This file is 447B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Morocco.p\n --> This file is 447B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Rwanda.p\n --> This file is 178B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Rwanda.p\n --> This file is 178B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Senegal.p\n --> This file is 220B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Senegal.p\n --> This file is 220B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Sierra_Leone.p\n --> This file is 122B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Sierra_Leone.p\n --> This file is 122B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Somalia.p\n --> This file is 208B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Somalia.p\n --> This file is 208B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/South_Africa.p\n --> This file is 3.8kB.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/South_Africa.p\n --> This file is 3.8kB.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/South_Sudan.p\n --> This file is 83B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/South_Sudan.p\n --> This file is 83B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Sudan.p\n --> This file is 118B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Sudan.p\n --> This file is 118B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Swaziland.p\n --> This file is 61B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Swaziland.p\n --> This file is 61B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Tanzania.p\n --> This file is 575B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Tanzania.p\n --> This file is 575B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Togo.p\n --> This file is 49B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Togo.p\n --> This file is 49B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Uganda.p\n --> This file is 1.1kB.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Uganda.p\n --> This file is 1.1kB.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Zambia.p\n --> This file is 365B.\nSaved data to /Volumes/Piggy/Charts/data/africa/results/Zambia.p\n --> This file is 365B.\nSaving data to /Volumes/Piggy/Charts/data/africa/results/Zimbabwe.p\n"
],
[
"%autoreload\nfrom Africa import africaData\nafrica = africaData()\n#africa.setDBRenames(manDB)\n#africa.setMultiDBRenames(multimanDB)\nafrica.setChartUsage(rank=[0,1,2,3])\nafrica.setFullChartData()\nafrica.setArtistAlbumData()\nafrica.saveArtistAlbumData()\nafrica.saveFullChartData()",
"Using Charts For Rank 0\n Categories: ['south_africa']\n\tChart: south_africa\n Using 1 Charts\nUsing 1 Charts For Rank 0\nUsing Charts For Rank 1\n Categories: ['egypt']\n\tChart: egypt\n Using 1 Charts\nUsing 1 Charts For Rank 1\nUsing Charts For Rank 2\n Categories: ['kenya']\n\tChart: kenya\n Using 1 Charts\nUsing 1 Charts For Rank 2\nUsing Charts For Rank 3\n Categories: ['ethiopia', 'eritrea']\n\tChart: ethiopia\n Using 1 Charts\n\tChart: eritrea\n Using 1 Charts\nUsing 2 Charts For Rank 3\n=== ChartUsage ===\n Using Charts (Rank=[0, 1, 2, 3]): ['South_Africa', 'Egypt', 'Kenya', 'Ethiopia', 'Eritrea']\nFound 31 summary files\n Using Egypt\n Using Eritrea\n Using Ethiopia\n Using Kenya\n Using South_Africa\nRenamed 0 single artists\nSaving 426 Artist Album Data to currentafricaArtistAlbumData.p\nSaving data to currentafricaArtistAlbumData.p\n --> This file is 6.4kB.\nSaved data to currentafricaArtistAlbumData.p\n --> This file is 6.4kB.\nSaving 426 Full Artist Data\nSaving data to currentafricaFullChartArtistAlbumData.p\n --> This file is 8.9kB.\nSaved data to currentafricaFullChartArtistAlbumData.p\n --> This file is 8.9kB.\n"
],
[
"from searchUtils import findExt\nfrom fileUtils import getBaseFilename\nnames = [getBaseFilename(ifile) for ifile in findExt(\"/Volumes/Piggy/Charts/data/africa/results/\", \".p\")]\nfor name in names:\n key = name.replace(\"(\", \"\")\n key = key.replace(\")\", \"\")\n key = key.replace(\"-\", \"_\")\n print(\"self.{0: <50} = ['{1}']\".format(key.lower(), name))",
"self.angola = ['Angola']\nself.benin = ['Benin']\nself.nigeria = ['Nigeria']\nself.botswana = ['Botswana']\nself.cameroon = ['Cameroon']\nself.cape_verde = ['Cape_Verde']\nself.republic_of_the_congo_congo_brazzaville = ['Republic_of_the_Congo_(Congo-Brazzaville)']\nself.democratic_republic_of_the_congo_former_zaire = ['Democratic_Republic_of_the_Congo_(former_Zaire)']\nself.egypt = ['Egypt']\nself.eritrea = ['Eritrea']\nself.ethiopia = ['Ethiopia']\nself.gambia = ['Gambia']\nself.ghana = ['Ghana']\nself.kenya = ['Kenya']\nself.madagascar = ['Madagascar']\nself.mali = ['Mali']\nself.morocco = ['Morocco']\nself.rwanda = ['Rwanda']\nself.senegal = ['Senegal']\nself.sierra_leone = ['Sierra_Leone']\nself.somalia = ['Somalia']\nself.south_africa = ['South_Africa']\nself.south_sudan = ['South_Sudan']\nself.sudan = ['Sudan']\nself.swaziland = ['Swaziland']\nself.tanzania = ['Tanzania']\nself.togo = ['Togo']\nself.uganda = ['Uganda']\nself.zambia = ['Zambia']\nself.zimbabwe = ['Zimbabwe']\nself.soukous = ['Soukous']\n"
],
[
"#uDisc.setDBRenames(manDB)\n#uDisc.setMultiDBRenames(multimanDB)\nuDisc.setChartUsage(rank=[0])\nuDisc.setFullChartData()\nuDisc.setArtistAlbumData()\nuDisc.saveArtistAlbumData()\nuDisc.saveFullChartData()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed2b6e046ff5e6ff86ce124299051cb46e59b4 | 160,258 | ipynb | Jupyter Notebook | baseline/SEN/.ipynb_checkpoints/sen_loss-checkpoint.ipynb | ISCASTEAM/Sequencing-the-musical-sections | 0d02e1260b83fde8b9b18fa6fd8ebe29ce3bc15f | [
"MIT"
] | 21 | 2018-12-06T08:42:33.000Z | 2021-12-11T05:52:53.000Z | baseline/SEN/.ipynb_checkpoints/sen_loss-checkpoint.ipynb | ISCASTEAM/Sequencing-the-musical-sections | 0d02e1260b83fde8b9b18fa6fd8ebe29ce3bc15f | [
"MIT"
] | 1 | 2018-12-06T08:42:57.000Z | 2018-12-06T10:03:11.000Z | baseline/SEN/.ipynb_checkpoints/sen_loss-checkpoint.ipynb | ISCASTEAM/Sequencing-the-musical-sections | 0d02e1260b83fde8b9b18fa6fd8ebe29ce3bc15f | [
"MIT"
] | null | null | null | 696.773913 | 154,680 | 0.940989 | [
[
[
"import numpy as np\nimport librosa\nimport glob\nimport os\nfrom random import randint\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import sampler\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\n%matplotlib inline\n\n\nimport import_ipynb\nfrom sen_dataloader import *\nfrom sen_net import *",
"importing Jupyter notebook from sen_dataloader.ipynb\nimporting Jupyter notebook from sen_net.ipynb\n"
],
[
"#Loss is every data_batch loss\n#meanLoss is Loss[:-100]/100\n\ndef plot(loss,mean_loss):\n from IPython import display\n display.clear_output(wait=True)\n display.display(plt.gcf())\n plt.figure(figsize=(20,10))\n plt.clf()\n\n plt.title('Training...')\n plt.xlabel('iterator')\n plt.ylabel('Loss')\n plt.plot(loss)\n plt.plot(mean_loss)\n plt.show()",
"_____no_output_____"
],
[
"torch.cuda.set_device(3)\nprint(torch.cuda.current_device())\n\nSENmodel = SEN_classify().double().cuda()",
"3\n"
],
[
"max_epoch = 1\noptimizer = optim.Adam(SENmodel.parameters(), lr=1e-3)\n\nweight = [1,0.2]\nclass_weights = torch.DoubleTensor(weight).cuda()\nlossFunc = nn.CrossEntropyLoss(weight=class_weights)\n\ncal_loss_list = []\ncal_mean_loss_list = []\n\n# 大概3.6w pair训练数据\nfor epoch in range(max_epoch):\n running_loss = 0.0\n for i, data in enumerate(training_generator, 0):\n inputs, labels = data\n labels = labels.cuda()\n x1 = torch.transpose((inputs[...,0]),2,3).cuda()\n x2 = torch.transpose((inputs[...,1]),2,3).cuda()\n \n optimizer.zero_grad()\n outputs = SENmodel(x1,x2)\n loss = lossFunc(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss = loss.item()\n cal_loss_list.append(running_loss)\n if(i<100):\n cal_mean_loss_list.append(sum(cal_loss_list)/len(cal_loss_list))\n else:\n cal_mean_loss_list.append(sum(cal_loss_list[-100:])/100)\n\n if(i%10 == 0):\n plot(cal_loss_list,cal_mean_loss_list)\n",
"_____no_output_____"
],
[
"torch.save(SENmodel.state_dict(), '../../model/baseline_sen.pkl')\nprint('Finished Training')",
"_____no_output_____"
],
[
"#### every 10 batch to plot one picture\nwith open('./baseline_tsn_loss.txt', 'w') as the_file:\n for token in cal_loss_list:\n the_file.write(str(token)+\"\\n\")\n \nwith open('./baseline_tsn_meanloss.txt', 'w') as the_file:\n for token in cal_mean_loss_list:\n the_file.write(str(token)+\"\\n\")\n\nprint(len(cal_loss_list),len(cal_mean_loss_list))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed40e1a60efb828c724ebc2e6ee4e909ec4552 | 165,949 | ipynb | Jupyter Notebook | examples/welllogs/1-basics/2-groups_wells.ipynb | scuervo91/reservoirpy | a4db620baf3ff66a85c7f61b1919713a8642e6fc | [
"MIT"
] | 16 | 2020-05-07T01:57:04.000Z | 2021-11-27T12:45:59.000Z | examples/welllogs/1-basics/2-groups_wells.ipynb | scuervo91/reservoirpy | a4db620baf3ff66a85c7f61b1919713a8642e6fc | [
"MIT"
] | null | null | null | examples/welllogs/1-basics/2-groups_wells.ipynb | scuervo91/reservoirpy | a4db620baf3ff66a85c7f61b1919713a8642e6fc | [
"MIT"
] | 5 | 2020-05-12T07:28:24.000Z | 2021-12-10T21:24:59.000Z | 70.496602 | 26,416 | 0.719643 | [
[
[
"# WELL NOTEBOOK\n## Well logs visualization & petrophysics",
"_____no_output_____"
],
[
"Install the the repository reservoirpy from github and import the required packages",
"_____no_output_____"
]
],
[
[
"import os\npath = os.path.join('/home/santiago/Documents/dev/reservoirpy')\nimport sys\nsys.path.insert(0,path)\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom shapely.geometry import Point\nimport folium\nfrom pyproj import Proj, transform, CRS, Transformer\nimport pyvista as pv\nfrom reservoirpy.wellpy import path as ph\n",
"_____no_output_____"
]
],
[
[
"### Well atributes\n\nWell atributes, name, rte, coordinates, survey",
"_____no_output_____"
]
],
[
[
"deviation = pd.read_csv('survey.csv', header=[0])\ndeviation.head()",
"_____no_output_____"
],
[
"tops1 = ph.tops({'formation':['fm1','fm2'],'md_top':[5000,5100],'md_bottom':[5099,5145]})\ntops1",
"_____no_output_____"
]
],
[
[
"## Create some wells",
"_____no_output_____"
]
],
[
[
"#Create the well object\nname1 = 'well-1'\nrte1 = 1515.78 # Rotary table Elevation\nsurf_coord1 = [1000000,1000000]#Point(1000100,1000000,520)\ncrs1 = 'EPSG:3117'\ntops1 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})\ndeviation1 = deviation.copy()\ndeviation1['azi'] = deviation1['azi'] + 0\n\nw1 = ph.well(name=name1, \n rte=rte1, \n surf_coord=surf_coord1, \n survey = deviation1,\n tops=tops1,\n crs=crs1)\n\n\n#Create the well object\nname2 = 'well-2'\nrte2 = 515 # Rotary table Elevation\nsurf_coord2 = Point(1000100,1000000)\ncrs2 = 'EPSG:3117'\ntops2 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})\ndeviation2 = deviation.copy()\ndeviation2['azi'] = deviation1['azi'] + 0\n\nw2 = ph.well(name=name2, \n rte=rte2, \n surf_coord=surf_coord2,\n survey = deviation2,\n tops=tops2,\n crs=crs2)\n\n\n#Create the well object\nname3 = 'well-3'\nrte3 = 515 # Rotary table Elevation\nsurf_coord3 = Point(1000500,1000000)\ncrs3 = 'EPSG:3117'\ntops3 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})\ndeviation3 = deviation.copy()\ndeviation3['azi'] = deviation1['azi'] + 30\n\nw3 = ph.well(name=name3, \n rte=rte3, \n surf_coord=surf_coord3,\n survey = deviation3,\n tops=tops3,\n crs=crs3)\n\n#Create the well object\nname4 = 'well-4'\nrte4 = 515 # Rotary table Elevation\nsurf_coord4 = Point(1100500,1200000)\ncrs4 = 'EPSG:3117'\ntops4 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})\n\nw4 = ph.well(name=name4, \n rte=rte4, \n surf_coord=surf_coord4,\n tops=tops4,\n crs=crs4)\n\n#Create the well object\nname5 = 'well-5'\nrte5 = 515 # Rotary table Elevation\nsurf_coord5 = Point(1170500,1200000)\ncrs5 = 'EPSG:3117'\ntops5 = ph.tops({'formation':['fm1','fm2'],'md_top':[12000,12100],'md_bottom':[12099,12145]})\n\nw5 = ph.well(name=name5, \n rte=rte5, \n surf_coord=surf_coord5,\n tops=tops5,\n crs=crs5,\n td=8452)",
"divide by zero encountered in true_divide\ninvalid value encountered in multiply\n"
],
[
"w4.survey",
"_____no_output_____"
]
],
[
[
"## Create an empty wells group\n\nYou can create a `wells_group` object either empty or not. It only receives `well` object.",
"_____no_output_____"
]
],
[
[
"g1 = ph.wells_group(w1)",
"_____no_output_____"
]
],
[
[
"To see the list of wells call the method `wells_group.wells`. It contains a dictionary with the name of each well as the key and the `well` object as the item",
"_____no_output_____"
]
],
[
[
"g1.wells",
"_____no_output_____"
]
],
[
[
"### Add more wells to existing list\n\nby calling the method `wells_group.add_well()` you can add more wells to an existing group",
"_____no_output_____"
]
],
[
[
"g1.add_well(w2,w3)\ng1.wells",
"_____no_output_____"
]
],
[
[
"### Get attributes from a `wells_group`",
"_____no_output_____"
]
],
[
[
"g1.wells['well-3'].surf_coord.wkt",
"_____no_output_____"
]
],
[
[
"### Describe each well with its attributes",
"_____no_output_____"
]
],
[
[
"g1.describe()",
"_____no_output_____"
]
],
[
[
"#### Wells tops\n\nGet the wells formations tops. If no parameters passed, it returns all wells and formations. You can pass `wells` and `formations` parameter to get the selected wells and formations",
"_____no_output_____"
]
],
[
[
"g1.wells_tops()",
"_____no_output_____"
],
[
"g1.wells_tops(wells=['well-1','well-2'], formations=['fm1'])",
"_____no_output_____"
]
],
[
[
"#### Wells survey",
"_____no_output_____"
]
],
[
[
"g1.wells_surveys().head()",
"_____no_output_____"
],
[
"g1.wells_surveys(wells=['well-1','well-2'])",
"_____no_output_____"
],
[
"g1.wells_distance(dims=['z'])",
"_____no_output_____"
],
[
"dist = g1.wells_distance(wells=['well-1','well-2'],dims=['y','z','x'])\ndist",
"_____no_output_____"
],
[
"m = g1.wells_map(zoom=13)\nm",
"_____no_output_____"
],
[
"g1.wells_coordinates()",
"_____no_output_____"
],
[
"g1.wells_tops().head()",
"_____no_output_____"
],
[
"g1.formation_distance(formation='fm2')",
"_____no_output_____"
],
[
"g1.formation_distance(wells=['well-1','well-2','well-3'],formation='fm2', dims=['tvdss_top'])",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\n\nfor i in g1.wells:\n _t = g1.wells[i].tops\n _s = g1.wells[i].survey\n ax.scatter(_t['easting']-1000000,_t['northing']-1000000)\n ax.plot(_s['easting']-1000000,_s['northing']-1000000)",
"_____no_output_____"
],
[
"df, c = g1.wells_tops(projection1d=True, azi=45)\nprint(c)\nprint(df)",
"[1000101.15424695 999579.60576028]\n formation md_top md_bottom tvd_top tvd_bottom tvd_tick \\\n0 fm1 12000 12099 11769.039476 11862.764382 93.724906 \n1 fm2 12100 12145 11863.712479 11906.390781 42.678303 \n2 fm1 12000 12099 11769.039476 11862.764382 93.724906 \n3 fm2 12100 12145 11863.712479 11906.390781 42.678303 \n4 fm1 12000 12099 11769.039476 11862.764382 93.724906 \n5 fm2 12100 12145 11863.712479 11906.390781 42.678303 \n\n tvdss_top tvdss_bottom northing easting \\\n0 -10253.259476 -10346.984382 999560.336081 9.999743e+05 \n1 -10347.932479 -10390.610781 999550.532263 9.999739e+05 \n2 -11254.039476 -11347.764382 999560.336081 1.000074e+06 \n3 -11348.712479 -11391.390781 999550.532263 1.000074e+06 \n4 -11254.039476 -11347.764382 999632.083642 1.000258e+06 \n5 -11348.712479 -11391.390781 999623.814231 1.000253e+06 \n\n geometry well projection \n0 POINT (999974.312 999560.336) well-1 -103.316402 \n1 POINT (999973.871 999550.532) well-1 -110.561210 \n2 POINT (1000074.312 999560.336) well-2 -32.605723 \n3 POINT (1000073.871 999550.532) well-2 -39.850532 \n4 POINT (1000257.922 999632.084) well-3 147.958998 \n5 POINT (1000252.637 999623.814) well-3 138.374868 \n"
],
[
"surv,ce = g1.wells_surveys(projection1d=True, azi=45, center=c)\nprint(surv)",
" md inc azi tvd tvdss north_offset \\\n0 0.0 0.00 0.00 0.000000 1515.780000 0.000000 \n1 193.0 0.06 0.00 192.999965 1322.780035 0.101055 \n2 375.0 0.12 0.00 374.999732 1140.780268 0.386939 \n3 559.0 0.33 347.60 558.998219 956.781781 1.097142 \n4 651.0 0.36 340.50 650.996554 864.783446 1.628348 \n.. ... ... ... ... ... ... \n132 12222.0 18.48 213.46 11979.483886 -11464.483886 -1266.497287 \n133 12312.0 18.32 213.53 12064.882700 -11549.882700 -1290.188049 \n134 12401.0 18.66 213.58 12149.288309 -11634.288309 -1313.709700 \n135 12488.0 19.28 212.83 12231.562968 -11716.562968 -1337.374054 \n136 12520.0 19.00 212.50 12261.793994 -11746.793994 -1346.206546 \n\n east_offset northing easting dleg \\\n0 0.000000 1.000000e+06 1.000000e+06 0.000000 \n1 0.000000 1.000000e+06 1.000000e+06 0.031088 \n2 0.000000 1.000000e+06 1.000000e+06 0.032967 \n3 -0.113784 1.000000e+06 1.000000e+06 0.116497 \n4 -0.267154 1.000000e+06 9.999999e+05 0.056709 \n.. ... ... ... ... \n132 -832.486431 9.996140e+05 1.000246e+06 0.422310 \n133 -848.163944 9.996068e+05 1.000241e+06 0.179465 \n134 -863.765207 9.995996e+05 1.000237e+06 0.382438 \n135 -879.250193 9.995924e+05 1.000232e+06 0.765749 \n136 -884.913189 9.995897e+05 1.000230e+06 0.938055 \n\n geometry well projection \n0 POINT (1000000.000 1000000.000) well-1 225.736764 \n1 POINT (1000000.000 1000000.031) well-1 225.758544 \n2 POINT (1000000.000 1000000.118) well-1 225.820159 \n3 POINT (999999.965 1000000.334) well-1 225.948703 \n4 POINT (999999.919 1000000.496) well-1 226.030137 \n.. ... ... ... \n132 POINT (1000246.258 999613.972) well-3 126.904281 \n133 POINT (1000241.480 999606.751) well-3 118.419389 \n134 POINT (1000236.724 999599.581) well-3 109.987378 \n135 POINT (1000232.005 999592.368) well-3 101.549671 \n136 POINT (1000230.278 999589.676) well-3 98.425514 \n\n[411 rows x 13 columns]\n"
],
[
"azi= 0\n\ntops, center = g1.wells_tops(projection1d=True, azi=azi)\nsurv,ce = g1.wells_surveys(projection1d=True, azi=azi, center=center)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nsns.lineplot(x='projection',y='tvdss_top', data=tops, \n hue='formation', style='formation',markers=True, ax=ax, palette='Set1')\n\nsns.lineplot(x='projection',y='tvdss', data=surv, \n hue='well', style='well', ax=ax,palette='GnBu_d')",
"_____no_output_____"
],
[
"\ng1.structural_view(azi=45,ylims=[-4000,-12000],formations=['fm2'])",
"_____no_output_____"
],
[
"g1.structural_view(azi=45,formations=['fm1'], wells=['well-1','well-2'])",
"_____no_output_____"
]
],
[
[
"## Export wells survey to PyVista object vtk",
"_____no_output_____"
]
],
[
[
"w1_vtk = g1.wells['well-1'].get_vtk()\nw1_vtk",
"_____no_output_____"
],
[
"w1_vtk.plot(notebook=False)",
"_____no_output_____"
],
[
"ss=g1.wells_surveys_vtk()\n\nss.plot(notebook=False)",
"_____no_output_____"
],
[
"p=pv.Plotter(notebook=False)\np.add_mesh(ss['well-1'], scalars='azi')\np.add_mesh(ss['well-2'], scalars='tvdss')\np.show() ",
"_____no_output_____"
],
[
"tops_vtk = g1.tops_vtk()\ntops_vtk.plot(notebook=False)",
"_____no_output_____"
],
[
"str_vtk = g1.structural_view_vtk()\nstr_vtk.plot(notebook=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed51f1e4bb909315c287ac64063bf5f746c0bb | 8,707 | ipynb | Jupyter Notebook | examples/paddle.ipynb | qjyn/awesome-DeepLearning | 2235ccb2b6317d239d8a0df1ab4e20384f8d0b1a | [
"Apache-2.0"
] | null | null | null | examples/paddle.ipynb | qjyn/awesome-DeepLearning | 2235ccb2b6317d239d8a0df1ab4e20384f8d0b1a | [
"Apache-2.0"
] | null | null | null | examples/paddle.ipynb | qjyn/awesome-DeepLearning | 2235ccb2b6317d239d8a0df1ab4e20384f8d0b1a | [
"Apache-2.0"
] | null | null | null | 30.766784 | 111 | 0.510738 | [
[
[
"import paddle\r\nfrom paddle.nn import Linear\r\nimport paddle.nn.functional as F\r\nimport numpy as np\r\nimport os\r\nimport random",
"_____no_output_____"
],
[
"def load_data():\r\n # 从文件导入数据\r\n datafile = 'external-libraries/housing.data'\r\n data = np.fromfile(datafile, sep=' ', dtype=np.float32)\r\n\r\n # 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数\r\n feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', \\\r\n 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]\r\n feature_num = len(feature_names)\r\n\r\n # 将原始数据进行Reshape,变成[N, 14]这样的形状\r\n data = data.reshape([data.shape[0] // feature_num, feature_num])\r\n\r\n # 将原数据集拆分成训练集和测试集\r\n # 这里使用80%的数据做训练,20%的数据做测试\r\n # 测试集和训练集必须是没有交集的\r\n ratio = 0.8\r\n offset = int(data.shape[0] * ratio)\r\n training_data = data[:offset]\r\n\r\n # 计算train数据集的最大值,最小值,平均值\r\n maximums, minimums, avgs = training_data.max(axis=0), training_data.min(axis=0), \\\r\n training_data.sum(axis=0) / training_data.shape[0]\r\n \r\n # 记录数据的归一化参数,在预测时对数据做归一化\r\n global max_values\r\n global min_values\r\n global avg_values\r\n max_values = maximums\r\n min_values = minimums\r\n avg_values = avgs\r\n\r\n # 对数据进行归一化处理\r\n for i in range(feature_num):\r\n data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])\r\n\r\n # 训练集和测试集的划分比例\r\n training_data = data[:offset]\r\n test_data = data[offset:]\r\n return training_data, test_data",
"_____no_output_____"
],
[
"class Regressor(paddle.nn.Layer):\r\n\r\n # self代表类的实例自身\r\n def __init__(self):\r\n # 初始化父类中的一些参数\r\n super(Regressor, self).__init__()\r\n \r\n # 定义两层全连接层,输入维度是13,输出维度是1\r\n self.fc1=Linear(in_features=13, out_features=20)\r\n self.fc2=Linear(in_features=20, out_features=1)\r\n\r\n # 网络的前向计算\r\n def forward(self, inputs):\r\n outputs1 = self.fc1(inputs)\r\n outputs2 = F.relu(outputs1)\r\n x = self.fc2(outputs2)\r\n return x",
"_____no_output_____"
],
[
"\r\n# 声明定义好的线性回归模型\r\nmodel = Regressor()\r\n# 开启模型训练模式\r\nmodel.train()\r\n# 加载数据\r\ntraining_data, test_data = load_data()\r\n# 定义优化算法,使用随机梯度下降SGD\r\n# 学习率设置为0.01\r\nopt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())",
"_____no_output_____"
],
[
"EPOCH_NUM = 10 # 设置外层循环次数\r\nBATCH_SIZE = 10 # 设置batch大小\r\n\r\n# 定义外层循环\r\nfor epoch_id in range(EPOCH_NUM):\r\n # 在每轮迭代开始之前,将训练数据的顺序随机的打乱\r\n np.random.shuffle(training_data)\r\n # 将训练数据进行拆分,每个batch包含10条数据\r\n mini_batches = [training_data[k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]\r\n # 定义内层循环\r\n for iter_id, mini_batch in enumerate(mini_batches):\r\n x = np.array(mini_batch[:, :-1]) # 获得当前批次训练数据\r\n y = np.array(mini_batch[:, -1:]) # 获得当前批次训练标签(真实房价)\r\n # 将numpy数据转为飞桨动态图tensor形式\r\n house_features = paddle.to_tensor(x)\r\n prices = paddle.to_tensor(y)\r\n \r\n # 前向计算\r\n predicts = model(house_features)\r\n \r\n # 计算损失\r\n loss = F.square_error_cost(predicts, label=prices)\r\n avg_loss = paddle.mean(loss)\r\n if iter_id%20==0:\r\n print(\"epoch: {}, iter: {}, loss: {}\".format(epoch_id, iter_id, avg_loss.numpy()))\r\n \r\n # 反向传播\r\n avg_loss.backward()\r\n # 最小化loss,更新参数\r\n opt.step()\r\n # 清除梯度\r\n opt.clear_grad()",
"epoch: 0, iter: 0, loss: [0.09724949]\nepoch: 0, iter: 20, loss: [0.02470883]\nepoch: 0, iter: 40, loss: [0.1317286]\nepoch: 1, iter: 0, loss: [0.04004578]\nepoch: 1, iter: 20, loss: [0.04134566]\nepoch: 1, iter: 40, loss: [0.06713556]\nepoch: 2, iter: 0, loss: [0.03014165]\nepoch: 2, iter: 20, loss: [0.02819235]\nepoch: 2, iter: 40, loss: [0.01499602]\nepoch: 3, iter: 0, loss: [0.04065119]\nepoch: 3, iter: 20, loss: [0.02119301]\nepoch: 3, iter: 40, loss: [0.1602248]\nepoch: 4, iter: 0, loss: [0.02666318]\nepoch: 4, iter: 20, loss: [0.04125451]\nepoch: 4, iter: 40, loss: [0.02176096]\nepoch: 5, iter: 0, loss: [0.03553397]\nepoch: 5, iter: 20, loss: [0.02891409]\nepoch: 5, iter: 40, loss: [0.0525896]\nepoch: 6, iter: 0, loss: [0.03100935]\nepoch: 6, iter: 20, loss: [0.05877483]\nepoch: 6, iter: 40, loss: [0.11256783]\nepoch: 7, iter: 0, loss: [0.0504374]\nepoch: 7, iter: 20, loss: [0.01870409]\nepoch: 7, iter: 40, loss: [0.06765319]\nepoch: 8, iter: 0, loss: [0.04343303]\nepoch: 8, iter: 20, loss: [0.02473232]\nepoch: 8, iter: 40, loss: [0.02147216]\nepoch: 9, iter: 0, loss: [0.02831091]\nepoch: 9, iter: 20, loss: [0.00857646]\nepoch: 9, iter: 40, loss: [0.01226023]\n"
],
[
"# 保存模型参数,文件名为LR_model.pdparams\r\npaddle.save(model.state_dict(), 'LR_model.pdparams')",
"_____no_output_____"
],
[
"def load_one_example():\r\n # 从上边已加载的测试集中,随机选择一条作为测试数据\r\n idx = np.random.randint(0, test_data.shape[0])\r\n idx = -10 #测试倒数第10个数据\r\n one_data, label = test_data[idx, :-1], test_data[idx, -1]\r\n # 修改该条数据shape为[1,13]\r\n one_data = one_data.reshape([1,-1])\r\n return one_data, label",
"_____no_output_____"
],
[
"# 参数为保存模型参数的文件地址\r\nmodel_dict = paddle.load('LR_model.pdparams')\r\nmodel.load_dict(model_dict)\r\nmodel.eval()\r\n\r\n# 参数为数据集的文件地址\r\none_data, label = load_one_example()\r\n# 将数据转为动态图的variable格式 \r\none_data = paddle.to_tensor(one_data)\r\npredict = model(one_data)\r\n\r\n# 对结果做反归一化处理\r\npredict = predict * (max_values[-1] - min_values[-1]) + avg_values[-1]\r\n# 对label数据做反归一化处理\r\nlabel = label * (max_values[-1] - min_values[-1]) + avg_values[-1]\r\n\r\nprint(\"predicted {}, real {}\".format(predict.numpy(), label))",
"predicted [[18.519562]], real 19.700000762939453\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed5e1f9f2ecf468c0454dd6665ad2d8f59822e | 8,899 | ipynb | Jupyter Notebook | Workshop_Part_1.ipynb | dolbyio-samples/Workshop-Media-APIs-Getting-Started | 8b7ac545647c77b4ce7385f3b65c082cdbeb3463 | [
"CC0-1.0"
] | 2 | 2022-01-27T19:05:57.000Z | 2022-03-24T18:14:55.000Z | Workshop_Part_1.ipynb | dolbyio-samples/Workshop-Media-APIs-Getting-Started | 8b7ac545647c77b4ce7385f3b65c082cdbeb3463 | [
"CC0-1.0"
] | null | null | null | Workshop_Part_1.ipynb | dolbyio-samples/Workshop-Media-APIs-Getting-Started | 8b7ac545647c77b4ce7385f3b65c082cdbeb3463 | [
"CC0-1.0"
] | null | null | null | 27.551084 | 185 | 0.560063 | [
[
[
"# **<div align=\"center\"> Dolby.io Developer Days Media APIs 101 - Getting Started </div>**\n\n### **<div align=\"center\"> Notebook #1: Getting Started</div>**\n",
"_____no_output_____"
],
[
"### Starting with a Raw Audio File\n\nWe can run code blocks like this in Binder by pressing \"Control+Enter\". Try it now after clicking the below code block!",
"_____no_output_____"
]
],
[
[
"import IPython # Helper library to play audio files in Python natively.\n\n# Set this link to any publically accessible media file you would like!\noriginal_audio_file = \"https://dolbyio.s3-us-west-1.amazonaws.com/public/shelby/airplane.original.mp4\"\n\nIPython.display.Audio(original_audio_file) # Display the audio embedded within python",
"_____no_output_____"
]
],
[
[
"This installed IPython to our workspace, to let us play media files natively within Python, and set a variable to this public media file we will use for the rest of this notebook.",
"_____no_output_____"
],
[
"### **Step #1:** Gathering Credentials\n\n- Go to http://dashboard.dolby.io/signup/ to sign up for a Dolby.io account.\n- At the bottom of the \"Applications\" widget on the dashboard, click \"_my first app_\"\n- Scroll down to the box labeled **'Media APIs'**.\n- Copy the key text under \"API Key:\" and replace the string below, then run the cell.\n- Also enter in your name to customize the output URL later.\n- _Press Control+Enter to run the cell._\n \n",
"_____no_output_____"
]
],
[
[
"# Enter your Dolby.io Media API Key here.\napi_key = \"<YOUR_API_KEY_HERE>\"\n# Enter your name here to customize the output URL later.\nname = \"<YOUR_NAME_HERE>\"\n\nprint(\"API Key and Name set!\")",
"_____no_output_____"
]
],
[
[
"Now we have two key variables set:\n\n1. The link to the original media file we want to process.\n2. Our API key so we can properly call the REST API endpoints.\n\nAs well as your name, just so we can differentiate output later on.\n",
"_____no_output_____"
],
[
"### **Step #2:** Calling the Enhance Job\n> Note: all of the following code is adapted from the Enhance quickstart found here: https://docs.dolby.io/media-apis/docs/quick-start-to-enhancing-media\n\n- Run the cell below to start the enhance job, this should output a JSON response with only a `job_id` in the body if no errors occur.",
"_____no_output_____"
]
],
[
[
"import requests # Python library to make HTTP requests\n\noutput_url = f\"dlb://out/workshop-{name}.mp4\" # Setting the output URL to have a different location based on your name!\n\n# Building the body of the request\nbody = {\n \"input\" : original_audio_file,\n \"output\" : output_url,\n}\n\n# Building the headers and url of the request\nurl = \"https://api.dolby.com/media/enhance\"\nheaders = {\n \"x-api-key\": api_key,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n}\n\n# Call the API request!\nresponse = requests.post(url, json=body, headers=headers)\nresponse.raise_for_status()\nprint(response.json()) # Prints out the output of the request",
"_____no_output_____"
]
],
[
[
"### **Step #3:** Checking Job Status\n\n- Now that we have created a job, we should check its status.\n- Run the cell below to check the status, this file is small so it should take only a couple of seconds.",
"_____no_output_____"
]
],
[
[
"url = \"https://api.dolby.com/media/enhance\"\nheaders = {\n \"x-api-key\": api_key,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n}\n\nparams = {\n \"job_id\": response.json()[\"job_id\"]\n}\n\nresponse = requests.get(url, params=params, headers=headers)\nresponse.raise_for_status()\nprint(response.json())",
"_____no_output_____"
]
],
[
[
"This should look like the following when done:\n\n```json\n{'path': '/media/enhance', 'status': 'Success', 'progress': 100, 'api_version': 'v1.1.2', 'result': {}}\n```",
"_____no_output_____"
],
[
"### **Step #4:** Download the Processed File\n\n- Now we want to download the file!\n- We can do this with another request.",
"_____no_output_____"
]
],
[
[
"import shutil\n\n# The name of the file that will be downloaded locally!\noutput_path = f\"workshop-{name}.mp4\"\n\nurl = \"https://api.dolby.com/media/output\"\nheaders = {\n \"x-api-key\": api_key,\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n}\n\nargs = {\n \"url\": output_url\n}\n\n# Take the response and download it locally\nwith requests.get(url, params=args, headers=headers, stream=True) as response:\n response.raise_for_status()\n response.raw.decode_content = True\n print(\"Downloading from {0} into {1}\".format(response.url, output_path))\n with open(output_path, \"wb\") as output_file:\n shutil.copyfileobj(response.raw, output_file)",
"_____no_output_____"
]
],
[
[
"When it is done downloading, you'll see it pop up on the left side bar.\n\nNow that the file is downloaded lets give it a listen. Does it sound better?",
"_____no_output_____"
]
],
[
[
"IPython.display.Audio(output_path)",
"_____no_output_____"
]
],
[
[
"### **Congratulations you made your first call with the Dolby.io Enhance API!**\n\nWe can now move onto Workshop Part 2 on the left sidebar!\n\n\n\nReferences:\n\nhttps://docs.python-requests.org/en/latest/\n\nhttps://ipython.org/",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0ed603ae4cd3712470a8a4df2f2ec7fed144b9a | 196,113 | ipynb | Jupyter Notebook | Notebooks/SWMat.ipynb | PuneetGrov3r/SWMat | 8411fc04658d5d64788eb40454f777dce00830bd | [
"Apache-2.0"
] | 13 | 2019-04-03T00:54:30.000Z | 2022-01-08T22:38:21.000Z | Notebooks/SWMat.ipynb | PuneetGrov3r/SWMat | 8411fc04658d5d64788eb40454f777dce00830bd | [
"Apache-2.0"
] | null | null | null | Notebooks/SWMat.ipynb | PuneetGrov3r/SWMat | 8411fc04658d5d64788eb40454f777dce00830bd | [
"Apache-2.0"
] | 1 | 2019-04-03T05:48:31.000Z | 2019-04-03T05:48:31.000Z | 341.066087 | 47,268 | 0.926267 | [
[
[
"# Import",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"import matplotlib\nmatplotlib.__version__",
"_____no_output_____"
],
[
"np.__version__, pd.__version__",
"_____no_output_____"
]
],
[
[
"# Dataset:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import california_housing\n\ndata = california_housing.fetch_california_housing()\n\nX = data['data']\ny = data['target']\ncolumns = data['feature_names']\n\ntrain_df = pd.DataFrame(X, index=np.arange(len(X)), columns=columns)\ntrain_df['target'] = y\ntrain_df.head()",
"_____no_output_____"
]
],
[
[
"# 1) Initialize:",
"_____no_output_____"
]
],
[
[
"import sys\n\nsys.path.append('../SWMat/')\nfrom SWMat import SWMat",
"_____no_output_____"
],
[
"from matplotlib.patches import Wedge, Polygon\nfrom matplotlib.collections import PatchCollection",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(10, 7))\n\nax = plt.gca()\nfor pos in [\"right\", \"left\", \"top\", \"bottom\"]:\n ax.spines[pos].set_visible(False)\nax.xaxis.set_visible(False)\nax.yaxis.set_visible(False)\npatches = []\n\npatches += [Wedge((.2, .6), .1, 45, 270, width=0.05),\n Wedge((.2, .45), .1, 225, 450, width=0.05),\n Polygon(np.array([[.22, .23], [.26, .23], [.33, .48], [.39, .40], [.43, .47], [.49, .22], \n [.52, .22], [.45, .53], [.42, .54], [.39, .46], [.34, .53], [.32, .54]]) + np.array([0.25, 0.3])),\n Polygon(np.array([[.32, .70], [.27, .32], [.32, .31], [.36, .44], [.40, .44], [.43, .30], [.45, .30],\n [.53, .66], [.50, .67], [.45, .39], [.43, .39], [.42, .48], [.38, .50], [.32, .37], [.29, .37], [.35, .70]]))\n ]\n\ncolors = 100*np.random.rand(len(patches))\n\np = PatchCollection(patches, alpha=0.85)\np.set_array(np.array(colors))\nax.add_collection(p);\n\nplt.text(0.1, 0.09, \"Storytelling With Matplotlib\", fontsize=30, color=\"#3b5998\")\n\nplt.annotate(\"Cluttered Data...\", xy=(.8, .5), xytext=(1.1, .75), color=\"black\",\n arrowprops={'arrowstyle':'->', 'color': 'black', \n \"connectionstyle\":\"arc3,rad=-0.2\"},\n bbox={'pad':6, 'edgecolor':'orange', 'facecolor': \n 'orange', 'alpha':0.4}, fontsize=17)\n#plt.text(x=1.3, y=.1, s=\"Communicating Data\\nEffectively.\", fontsize=20, ha=\"center\")\nswm = SWMat(plt, ax=ax)\nswm.text(\"\\> Communicating <prop color='#3b5998' fontsize='30'>Data</prop>Effectively.\", fontsize=20, \n position=\"out-lower-right\");",
"_____no_output_____"
],
[
"# Simple Text\n\nswm = SWMat(plt) # And... base beautifications will be added.\ny = np.arange(500) + np.random.random(500)*50 + np.random.random(500)*40 + np.random.random(500)*50 + np.random.random(500)*10\nx = np.arange(500)\nplt.scatter(x, y)\nswm.text(\"Here goes your text!\\nAnother Text!!\");",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nls = swm.line_plot(np.array([[1, 2, 3, 4], [1, 2, 3, 4]]).T, np.array([[1, 4, 2, 6], [4, 2, 6, 5]]).T, line_labels=[\"A\", \"B\"],\n highlight=0, lw=3)",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nhist = swm.hist(train_df['target'], highlight=3, bins=[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5], ec='w', hide_y=True)\n#t = swm.text(\"My first text!<prop>Possible Outliers</prop><prop>haleluya\\nyo lib ipsum dipsum</prop>\\nipsum\", \n# fontsize=18)",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nswm.bar(np.array([[1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 1, 3]]), data_labels=[\"Alpha\", \"Beta\"], highlight={\"data\":1, \"cat\":1},\n cat_labels=[\"One\", \"Two\", \"Three\"], plot_type=\"stacked100%\", width=0.8);",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nv = swm.violinplot(train_df['target'], show=\"top\", highlight={\"0\":[(0.7, 2.3), (4.7, 6)]})",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nswm.bar(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 3, 6], [2, 4, 2], [2, 4, 1]]), data_labels=[\"A\", \"B\", \"C\", \"D\"], cat_labels=[\"One\", \"Two\", \"Three\"], highlight={\"data\":1});",
"_____no_output_____"
],
[
"swm = SWMat(plt)\nswm.bar(np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), np.array([[2, 5, 3], [4, 3, 6], [2, 4, 2]]), data_labels=[\"A\", \"B\", \"C\"], cat_labels=[\"One\", \"Two\", \"Three\"]);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed7110951f2da5c2413728661ca5650003e44c | 3,928 | ipynb | Jupyter Notebook | p2_continuous-control_v20/Test_Continuous_Control_v20.ipynb | parkjin-nim/Deep_reinforcement_learning | 87149467ec825a2369f4a84112ac2b4622f3bc45 | [
"MIT"
] | null | null | null | p2_continuous-control_v20/Test_Continuous_Control_v20.ipynb | parkjin-nim/Deep_reinforcement_learning | 87149467ec825a2369f4a84112ac2b4622f3bc45 | [
"MIT"
] | null | null | null | p2_continuous-control_v20/Test_Continuous_Control_v20.ipynb | parkjin-nim/Deep_reinforcement_learning | 87149467ec825a2369f4a84112ac2b4622f3bc45 | [
"MIT"
] | null | null | null | 26.013245 | 112 | 0.547098 | [
[
[
"from unityagents import UnityEnvironment\nimport numpy as np\nimport random\n\nfrom collections import deque\nimport matplotlib.pyplot as plt\nfrom ddpg_agent3 import Agent\nfrom model3 import Actor, Critic\n\nimport torch\n\n%matplotlib inline\n",
"_____no_output_____"
],
[
"env = UnityEnvironment(file_name='../../Reacher.app')",
"_____no_output_____"
],
[
"# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]",
"_____no_output_____"
],
[
"# reset the environment\nenv_info = env.reset(train_mode=False)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\nprint('Number of agents:', num_agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\nprint('Size of each action:', action_size)\n\n# examine the state space \nstates = env_info.vector_observations\nstate_size = states.shape[1]\nprint('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\nprint('The state for the first agent looks like:', states[0])",
"_____no_output_____"
],
[
"\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nrandom_seed=0\nAgent.actor_local = Actor(state_size, action_size, random_seed).to(device)\nAgent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth'))\n\nagents =[] \n\nfor i in range(num_agents):\n agents.append(Agent(state_size, action_size, random_seed=0))",
"_____no_output_____"
],
[
"while True:\n actions = np.array([agents[i].act(states[i]) for i in range(num_agents)])\n\n env_info = env.step(actions)[brain_name] # send the action to the environment\n next_states = env_info.vector_observations # get the next state\n rewards = env_info.rewards # get the reward\n dones = env_info.local_done \n\n states = next_states\n scores += rewards\n\n print('\\rScore: {:.2f}\\tmin: {:.2f}\\tmax: {:.2f}'\n .format(np.mean(scores), np.min(scores), np.max(scores)), end=\"\") \n \n if np.any(dones):\n break\n \nprint(\"\\nScores: {}\".format(scores))",
"_____no_output_____"
],
[
"env.reset()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed8348be3324c03494efa75daf156689eefd19 | 966,359 | ipynb | Jupyter Notebook | deep-learning/fastai-docs/fastai_docs-master/dev_nb/snapshot/002_images.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | deep-learning/fastai-docs/fastai_docs-master/dev_nb/snapshot/002_images.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 162 | 2019-08-16T17:24:47.000Z | 2021-09-27T21:41:00.000Z | deep-learning/fastai-docs/fastai_docs-master/dev_nb/snapshot/002_images.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | 363.292857 | 161,864 | 0.92986 | [
[
[
"%load_ext autoreload\n%autoreload 2\n\nimport sys\nsys.path.append('../docs')\nfrom gen_doc.nbdoc import show_doc as sd",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"#export\nfrom nb_001b import *\nimport sys, PIL, matplotlib.pyplot as plt, itertools, math, random, collections, torch\nimport scipy.stats, scipy.special\n\nfrom enum import Enum, IntEnum\nfrom torch import tensor, Tensor, FloatTensor, LongTensor, ByteTensor, DoubleTensor, HalfTensor, ShortTensor\nfrom operator import itemgetter, attrgetter\nfrom numpy import cos, sin, tan, tanh, log, exp\nfrom dataclasses import field\nfrom functools import reduce\nfrom collections import defaultdict, abc, namedtuple, Iterable\nfrom typing import Tuple, Hashable, Mapping, Dict\n\nimport mimetypes\nimport abc\nfrom abc import abstractmethod, abstractproperty",
"_____no_output_____"
]
],
[
[
"# CIFAR subset data",
"_____no_output_____"
],
[
"First we want to view our data to check if everything is how we expect it to be.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"DATA_PATH = Path('data')\nPATH = DATA_PATH/'cifar10_dog_air'\nTRAIN_PATH = PATH/'train'",
"_____no_output_____"
],
[
"dog_fn = list((TRAIN_PATH/'dog').iterdir())[0]\ndog_image = PIL.Image.open(dog_fn)\ndog_image.resize((256,256))",
"_____no_output_____"
],
[
"air_fn = list((TRAIN_PATH/'airplane').iterdir())[1]\nair_image = PIL.Image.open(air_fn)\nair_image.resize((256,256))",
"_____no_output_____"
]
],
[
[
"## Simple Dataset/Dataloader",
"_____no_output_____"
],
[
"We will build a Dataset class for our image files. A Dataset class needs to have two functions: `__len__` and `__getitem__`. Our `ImageDataset` class additionally gets image files from their respective directories and transforms them to tensors.",
"_____no_output_____"
]
],
[
[
"#export\ndef image2np(image:Tensor)->np.ndarray:\n \"convert from torch style `image` to numpy/matplot style\"\n res = image.cpu().permute(1,2,0).numpy()\n return res[...,0] if res.shape[2]==1 else res\n\ndef show_image(img:Tensor, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True, \n title:Optional[str]=None, cmap:str='binary', alpha:Optional[float]=None)->plt.Axes:\n \"plot tensor `img` using matplotlib axis `ax`. `figsize`,`axis`,`title`,`cmap` and `alpha` pass to `ax.imshow`\"\n if ax is None: fig,ax = plt.subplots(figsize=figsize)\n ax.imshow(image2np(img), cmap=cmap, alpha=alpha)\n if hide_axis: ax.axis('off')\n if title: ax.set_title(title)\n return ax",
"_____no_output_____"
],
[
"class Image():\n def __init__(self, px): self.px = px\n def show(self, ax=None, **kwargs): return show_image(self.px, ax=ax, **kwargs)\n @property\n def data(self): return self.px",
"_____no_output_____"
],
[
"#export\nFilePathList = Collection[Path]\nTensorImage = Tensor\nNPImage = np.ndarray\n\ndef find_classes(folder:Path)->FilePathList:\n \"return class subdirectories in imagenet style train `folder`\"\n classes = [d for d in folder.iterdir()\n if d.is_dir() and not d.name.startswith('.')]\n assert(len(classes)>0)\n return sorted(classes, key=lambda d: d.name)\n\nimage_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))\n\ndef get_image_files(c:Path, check_ext:bool=True)->FilePathList:\n \"return list of files in `c` that are images. `check_ext` will filter to `image_extensions`.\"\n return [o for o in list(c.iterdir())\n if not o.name.startswith('.') and not o.is_dir()\n and (not check_ext or (o.suffix in image_extensions))]\n\ndef pil2tensor(image:NPImage)->TensorImage:\n \"convert PIL style `image` array to torch style image tensor `get_image_files`\"\n arr = torch.ByteTensor(torch.ByteStorage.from_buffer(image.tobytes()))\n arr = arr.view(image.size[1], image.size[0], -1)\n return arr.permute(2,0,1)\n\nPathOrStr = Union[Path,str]\ndef open_image(fn:PathOrStr):\n \"return `Image` object created from image in file `fn`\"\n x = PIL.Image.open(fn).convert('RGB')\n return Image(pil2tensor(x).float().div_(255))",
"_____no_output_____"
],
[
"#export\nNPArrayableList = Collection[Union[np.ndarray, list]]\nNPArrayMask = np.ndarray\nSplitArrayList = List[Tuple[np.ndarray,np.ndarray]]\n\ndef arrays_split(mask:NPArrayMask, *arrs:NPArrayableList)->SplitArrayList:\n \"given `arrs` is [a,b,...] and `mask`index - return[(a[mask],a[~mask]),(b[mask],b[~mask]),...]\"\n mask = array(mask)\n return list(zip(*[(a[mask],a[~mask]) for a in map(np.array, arrs)]))\n\ndef random_split(valid_pct:float, *arrs:NPArrayableList)->SplitArrayList:\n \"randomly `array_split` with `valid_pct` ratio. good for creating validation set.\"\n is_train = np.random.uniform(size=(len(arrs[0]),)) > valid_pct\n return arrays_split(is_train, *arrs)\n\nclass DatasetBase(Dataset):\n \"base class for all fastai datasets\"\n def __len__(self): return len(self.x)\n @property\n def c(self): \n \"number of classes expressed by dataset y variable\"\n return self.y.shape[-1] if len(self.y.shape)>1 else 1\n def __repr__(self): return f'{type(self).__name__} of len {len(self)}'\n\nclass LabelDataset(DatasetBase):\n \"base class for fastai datasets that do classification\"\n @property\n def c(self): \n \"number of classes expressed by dataset y variable\"\n return len(self.classes)",
"_____no_output_____"
],
[
"#export\nImgLabel = str\nImgLabels = Collection[ImgLabel]\nClasses = Collection[Any]\n\nclass ImageDataset(LabelDataset):\n \"Dataset for folders of images in style {folder}/{class}/{images}\"\n def __init__(self, fns:FilePathList, labels:ImgLabels, classes:Optional[Classes]=None):\n self.classes = ifnone(classes, list(set(labels)))\n self.class2idx = {v:k for k,v in enumerate(self.classes)}\n self.x = np.array(fns)\n self.y = np.array([self.class2idx[o] for o in labels], dtype=np.int64)\n \n def __getitem__(self,i): return open_image(self.x[i]),self.y[i]\n \n @staticmethod\n def _folder_files(folder:Path, label:ImgLabel, check_ext=True)->Tuple[FilePathList,ImgLabels]:\n \"from `folder` return image files and labels. The labels are all `label`. `check_ext` means only image files\"\n fnames = get_image_files(folder, check_ext=check_ext)\n return fnames,[label]*len(fnames)\n \n @classmethod\n def from_single_folder(cls, folder:PathOrStr, classes:Classes, check_ext=True):\n \"typically used for test set. label all images in `folder` with `classes[0]`\"\n fns,labels = cls._folder_files(folder, classes[0], check_ext=check_ext)\n return cls(fns, labels, classes=classes)\n\n @classmethod\n def from_folder(cls, folder:Path, classes:Optional[Classes]=None, \n valid_pct:float=0., check_ext:bool=True) -> Union['ImageDataset', List['ImageDataset']]:\n \"\"\"dataset of `classes` labeled images in `folder`. Optional `valid_pct` split validation set.\"\"\"\n if classes is None: classes = [cls.name for cls in find_classes(folder)]\n \n fns,labels = [],[]\n for cl in classes:\n f,l = cls._folder_files(folder/cl, cl, check_ext=check_ext)\n fns+=f; labels+=l\n \n if valid_pct==0.: return cls(fns, labels, classes=classes)\n return [cls(*a, classes=classes) for a in random_split(valid_pct, fns, labels)]",
"_____no_output_____"
],
[
"sd(ImageDataset.from_folder)",
"_____no_output_____"
]
],
[
[
"# Data augmentation",
"_____no_output_____"
],
[
"We are going to augment our data to increase the size of our training set with artificial images. These new images are basically \"free\" data that we can use in our training to help our model generalize better (reduce overfitting).",
"_____no_output_____"
],
[
"## Lighting",
"_____no_output_____"
],
[
"We will start by changing the **brightness** and **contrast** of our images.",
"_____no_output_____"
],
[
"### Method",
"_____no_output_____"
],
[
"**Brightness**\n\nBrightness refers to where does our image stand on the dark-light spectrum. Brightness is applied by adding a positive constant to each of the image's channels. This works because each of the channels in an image goes from 0 (darkest) to 255 (brightest) in a dark-light continum. (0, 0, 0) is black (total abscence of light) and (255, 255, 255) is white (pure light). You can check how this works by experimenting by yourself [here](https://www.w3schools.com/colors/colors_rgb.asp).\n\n_Parameters_\n\n1. **Change** How much brightness do we want to add to (or take from) the image.\n\n Domain: Real numbers\n \n**Contrast**\n\nContrast refers to how sharp a distinction there is between brighter and darker sections of our image. To increase contrast we need darker pixels to be darker and lighter pixels to be lighter. In other words, we would like channels with a value smaller than 128 to decrease and channels with a value of greater than 128 to increase.\n\n_Parameters_\n\n1. **Scale** How much contrast do we want to add to (or remove from) the image.\n\n Domain: [0, +inf]\n \n***On logit and sigmoid***\n\nNotice that for both transformations we first apply the logit to our tensor, then apply the transformation and finally take the sigmoid. This is important for two reasons. \n\nFirst, we don't want to overflow our tensor values. In other words, we need our final tensor values to be between [0,1]. Imagine, for instance, a tensor value at 0.99. We want to increase its brightness, but we can’t go over 1.0. By doing logit first, which first moves our space to -inf to +inf, this works fine. The same applies to contrast if we have a scale S > 1 (might make some of our tensor values greater than one).\n\nSecond, when we apply contrast, we need to affect the dispersion of values around the middle value. Say we want to increase contrast. Then we need the bright values (>0.5) to get brighter and dark values (<0.5) to get darker. We must first transform our tensor values so our values which were originally <0.5 are now negative and our values which were originally >0.5 are now positive. This way, when we multiply by a constant, the dispersion around 0 will increase. The logit function does exactly this and allows us to increase or decrease dispersion around a mid value.",
"_____no_output_____"
],
[
"### Implementation",
"_____no_output_____"
]
],
[
[
"#export\ndef logit(x:Tensor)->Tensor: return -(1/x-1).log()\ndef logit_(x:Tensor)->Tensor: return (x.reciprocal_().sub_(1)).log_().neg_()",
"_____no_output_____"
],
[
"def contrast(x:Tensor, scale:float)->Tensor: return x.mul_(scale)",
"_____no_output_____"
],
[
"#export\nFlowField = Tensor\nLogitTensorImage = TensorImage\nAffineMatrix = Tensor\nKWArgs = Dict[str,Any]\nArgStar = Collection[Any]\nCoordSize = Tuple[int,int,int]\n\nLightingFunc = Callable[[LogitTensorImage, ArgStar, KWArgs], LogitTensorImage]\nPixelFunc = Callable[[TensorImage, ArgStar, KWArgs], TensorImage]\nCoordFunc = Callable[[FlowField, CoordSize, ArgStar, KWArgs], LogitTensorImage]\nAffineFunc = Callable[[KWArgs], AffineMatrix]\n\n\nclass ItemBase():\n \"All tranformable dataset items use this type\"\n @property\n @abstractmethod\n def device(self): pass\n @property\n @abstractmethod\n def data(self): pass\n\nclass ImageBase(ItemBase):\n \"Img based `Dataset` items dervie from this. Subclass to handle lighting, pixel, etc\"\n def lighting(self, func:LightingFunc, *args, **kwargs)->'ImageBase': return self\n def pixel(self, func:PixelFunc, *args, **kwargs)->'ImageBase': return self\n def coord(self, func:CoordFunc, *args, **kwargs)->'ImageBase': return self\n def affine(self, func:AffineFunc, *args, **kwargs)->'ImageBase': return self\n\n def set_sample(self, **kwargs)->'ImageBase':\n \"set parameters that control how we `grid_sample` the image after transforms are applied\"\n self.sample_kwargs = kwargs\n return self\n \n def clone(self)->'ImageBase': \n \"clones this item and its `data`\"\n return self.__class__(self.data.clone())",
"_____no_output_____"
],
[
"#export\nclass Image(ImageBase):\n \"supports appying transforms to image data\"\n def __init__(self, px)->'Image':\n \"create from raw tensor image data `px`\"\n self._px = px\n self._logit_px=None\n self._flow=None\n self._affine_mat=None\n self.sample_kwargs = {}\n\n @property\n def shape(self)->Tuple[int,int,int]: \n \"returns (ch, h, w) for this image\"\n return self._px.shape\n @property\n def size(self)->Tuple[int,int,int]: \n \"returns (h, w) for this image\"\n return self.shape[-2:]\n @property\n def device(self)->torch.device: return self._px.device\n \n def __repr__(self): return f'{self.__class__.__name__} ({self.shape})'\n\n def refresh(self)->None:\n \"applies any logit or affine transfers that have been \"\n if self._logit_px is not None:\n self._px = self._logit_px.sigmoid_()\n self._logit_px = None\n if self._affine_mat is not None or self._flow is not None:\n self._px = grid_sample(self._px, self.flow, **self.sample_kwargs)\n self.sample_kwargs = {}\n self._flow = None\n return self\n\n @property\n def px(self)->TensorImage:\n \"get the tensor pixel buffer\"\n self.refresh()\n return self._px\n @px.setter\n def px(self,v:TensorImage)->None: \n \"set the pixel buffer to `v`\"\n self._px=v\n\n @property\n def flow(self)->FlowField:\n \"access the flow-field grid after applying queued affine transforms\"\n if self._flow is None:\n self._flow = affine_grid(self.shape)\n if self._affine_mat is not None:\n self._flow = affine_mult(self._flow,self._affine_mat)\n self._affine_mat = None\n return self._flow\n \n @flow.setter\n def flow(self,v:FlowField): self._flow=v\n\n def lighting(self, func:LightingFunc, *args:Any, **kwargs:Any)->'Image':\n \"equivalent to `image = sigmoid(func(logit(image)))`\"\n self.logit_px = func(self.logit_px, *args, **kwargs)\n return self\n\n def pixel(self, func:PixelFunc, *args, **kwargs)->'Image':\n \"equivalent to `image.px = func(image.px)`\"\n self.px = func(self.px, *args, **kwargs)\n return self\n\n def coord(self, func:CoordFunc, *args, **kwargs)->'Image':\n \"equivalent to `image.flow = func(image.flow, image.size)`\" \n self.flow = func(self.flow, self.shape, *args, **kwargs)\n return self\n\n def affine(self, func:AffineFunc, *args, **kwargs)->'Image':\n \"equivalent to `image.affine_mat = image.affine_mat @ func()`\" \n m = tensor(func(*args, **kwargs)).to(self.device)\n self.affine_mat = self.affine_mat @ m\n return self\n\n def resize(self, size:Union[int,CoordSize])->'Image':\n \"resize the image to `size`, size can be a single int\"\n assert self._flow is None\n if isinstance(size, int): size=(self.shape[0], size, size)\n self.flow = affine_grid(size)\n return self\n\n @property\n def affine_mat(self)->AffineMatrix:\n \"get the affine matrix that will be applied by `refresh`\"\n if self._affine_mat is None:\n self._affine_mat = torch.eye(3).to(self.device)\n return self._affine_mat\n @affine_mat.setter\n def affine_mat(self,v)->None: self._affine_mat=v\n\n @property\n def logit_px(self)->LogitTensorImage:\n \"get logit(image.px)\"\n if self._logit_px is None: self._logit_px = logit_(self.px)\n return self._logit_px\n @logit_px.setter\n def logit_px(self,v:LogitTensorImage)->None: self._logit_px=v\n \n def show(self, ax:plt.Axes=None, **kwargs:Any)->None: \n \"plots the image into `ax`\"\n show_image(self.px, ax=ax, **kwargs)\n \n @property\n def data(self)->TensorImage: \n \"returns this images pixels as a tensor\"\n return self.px",
"_____no_output_____"
],
[
"train_ds = ImageDataset.from_folder(PATH/'train')\nvalid_ds = ImageDataset.from_folder(PATH/'test')",
"_____no_output_____"
],
[
"x = lambda: train_ds[1][0]",
"_____no_output_____"
],
[
"img = x()\nimg.logit_px = contrast(img.logit_px, 0.5)\nimg.show()",
"_____no_output_____"
],
[
"x().lighting(contrast, 0.5).show()",
"_____no_output_____"
]
],
[
[
"## Transform class",
"_____no_output_____"
]
],
[
[
"class Transform():\n _wrap=None\n def __init__(self, func): self.func=func\n def __call__(self, x, *args, **kwargs):\n if self._wrap: return getattr(x, self._wrap)(self.func, *args, **kwargs)\n else: return self.func(x, *args, **kwargs)\n \nclass TfmLighting(Transform): _wrap='lighting'",
"_____no_output_____"
],
[
"@TfmLighting\ndef brightness(x, change): return x.add_(scipy.special.logit(change))\n@TfmLighting\ndef contrast(x, scale): return x.mul_(scale)",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\n\nx().show(axes[0])\ncontrast(x(), 1.0).show(axes[1])\ncontrast(x(), 0.5).show(axes[2])\ncontrast(x(), 2.0).show(axes[3])",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\n\nx().show(axes[0])\nbrightness(x(), 0.8).show(axes[1])\nbrightness(x(), 0.5).show(axes[2])\nbrightness(x(), 0.2).show(axes[3])",
"_____no_output_____"
],
[
"def brightness_contrast(x, scale_contrast, change_brightness):\n return brightness(contrast(x, scale=scale_contrast), change=change_brightness)",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\n\nbrightness_contrast(x(), 0.75, 0.7).show(axes[0])\nbrightness_contrast(x(), 2.0, 0.3).show(axes[1])\nbrightness_contrast(x(), 2.0, 0.7).show(axes[2])\nbrightness_contrast(x(), 0.75, 0.3).show(axes[3])",
"_____no_output_____"
]
],
[
[
"## Random lighting",
"_____no_output_____"
],
[
"Next, we will make our previous transforms random since we are interested in automatizing the pipeline. We will achieve this by making our parameters stochastic with a specific distribution. \n\nWe will use a <a href=\"https://en.wikipedia.org/wiki/Uniform_distribution_(continuous)\">uniform</a> distribution for brightness change since its domain is the real numbers and the impact varies linearly with the scale. For contrast change we use [log_uniform](https://www.vosesoftware.com/riskwiki/LogUniformdistribution.php) for two reasons. First, contrast scale has a domain of [0, inf]. Second, the impact of the scale in the transformation is non-linear (i.e. 0.5 is as extreme as 2.0, 0.2 is as extreme as 5). The log_uniform function is appropriate because it has the same domain and correctly represents the non-linearity of the transform, P(0.5) = P(2).",
"_____no_output_____"
]
],
[
[
"#export\ndef uniform(low:Number, high:Number, size:List[int]=None)->float:\n \"draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`\"\n return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)\n\ndef log_uniform(low, high, size=None):\n \"draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)\"\n res = uniform(log(low), log(high), size)\n return exp(res) if size is None else res.exp_()\n\ndef rand_bool(p:float, size=None): \n \"draw 1 or shape=`size` random booleans (True occuring probability p)\"\n return uniform(0,1,size)<p",
"_____no_output_____"
],
[
"scipy.stats.gmean([log_uniform(0.5,2.0) for _ in range(1000)])",
"_____no_output_____"
],
[
"#export\nimport inspect\nfrom copy import copy,deepcopy\n\ndef get_default_args(func):\n return {k: v.default\n for k, v in inspect.signature(func).parameters.items()\n if v.default is not inspect.Parameter.empty}\n\ndef listify(p=None, q=None):\n \"Makes `p` same length as `q`\"\n if p is None: p=[]\n elif not isinstance(p, Iterable): p=[p]\n n = q if type(q)==int else len(p) if q is None else len(q)\n if len(p)==1: p = p * n\n assert len(p)==n, f'List len mismatch ({len(p)} vs {n})'\n return list(p)",
"_____no_output_____"
],
[
"#export\nclass Transform():\n _wrap=None\n order=0\n def __init__(self, func, order=None):\n if order is not None: self.order=order\n self.func=func\n self.params = copy(func.__annotations__)\n self.def_args = get_default_args(func)\n setattr(Image, func.__name__,\n lambda x, *args, **kwargs: self.calc(x, *args, **kwargs))\n \n def __call__(self, *args, p=1., is_random=True, **kwargs):\n if args: return self.calc(*args, **kwargs)\n else: return RandTransform(self, kwargs=kwargs, is_random=is_random, p=p)\n \n def calc(tfm, x, *args, **kwargs):\n if tfm._wrap: return getattr(x, tfm._wrap)(tfm.func, *args, **kwargs)\n else: return tfm.func(x, *args, **kwargs)\n\n @property\n def name(self): return self.__class__.__name__\n \n def __repr__(self): return f'{self.name} ({self.func.__name__})'\n\nclass TfmLighting(Transform): order,_wrap = 8,'lighting'",
"_____no_output_____"
],
[
"#export\n@dataclass\nclass RandTransform():\n tfm:Transform\n kwargs:dict\n p:int=1.0\n resolved:dict = field(default_factory=dict)\n do_run:bool = True\n is_random:bool = True\n \n def resolve(self):\n if not self.is_random:\n self.resolved = {**self.tfm.def_args, **self.kwargs}\n return\n\n self.resolved = {}\n # for each param passed to tfm...\n for k,v in self.kwargs.items():\n # ...if it's annotated, call that fn...\n if k in self.tfm.params:\n rand_func = self.tfm.params[k]\n self.resolved[k] = rand_func(*listify(v))\n # ...otherwise use the value directly\n else: self.resolved[k] = v\n # use defaults for any args not filled in yet\n for k,v in self.tfm.def_args.items():\n if k not in self.resolved: self.resolved[k]=v\n # anything left over must be callable without params\n for k,v in self.tfm.params.items():\n if k not in self.resolved: self.resolved[k]=v()\n\n self.do_run = rand_bool(self.p)\n\n @property\n def order(self): return self.tfm.order\n\n def __call__(self, x, *args, **kwargs):\n return self.tfm(x, *args, **{**self.resolved, **kwargs}) if self.do_run else x",
"_____no_output_____"
],
[
"#export\n@TfmLighting\ndef brightness(x, change:uniform): return x.add_(scipy.special.logit(change))\n\n@TfmLighting\ndef contrast(x, scale:log_uniform): return x.mul_(scale)",
"_____no_output_____"
],
[
"x().contrast(scale=2).show()",
"_____no_output_____"
],
[
"x().contrast(scale=2).brightness(0.8).show()",
"_____no_output_____"
],
[
"tfm = contrast(scale=(0.3,3))\ntfm.resolve()\ntfm,tfm.resolved,tfm.do_run",
"_____no_output_____"
],
[
"# all the same\ntfm.resolve()\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: tfm(x()).show(ax)",
"_____no_output_____"
],
[
"tfm = contrast(scale=(0.3,3))\n\n# different\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes:\n tfm.resolve()\n tfm(x()).show(ax)",
"_____no_output_____"
],
[
"tfm = contrast(scale=2, is_random=False)\ntfm.resolve()\ntfm(x()).show()",
"_____no_output_____"
]
],
[
[
"## Composition",
"_____no_output_____"
],
[
"We are interested in composing the transform functions so as to apply them all at once. We will try to feed a list of transforms to our pipeline for it to apply all of them.\n\nApplying a function to our transforms before calling them in Python is easiest if we use a decorator. You can find more about decorators [here](https://www.thecodeship.com/patterns/guide-to-python-function-decorators/).",
"_____no_output_____"
]
],
[
[
"#export\ndef resolve_tfms(tfms):\n for f in listify(tfms): f.resolve()\n\ndef apply_tfms(tfms, x, do_resolve=True):\n if not tfms: return x\n tfms = listify(tfms)\n if do_resolve: resolve_tfms(tfms)\n x = x.clone()\n for tfm in tfms: x = tfm(x)\n return x",
"_____no_output_____"
],
[
"x = train_ds[1][0]",
"_____no_output_____"
],
[
"tfms = [contrast(scale=(0.3,3.0), p=0.9),\n brightness(change=(0.35,0.65), p=0.9)]\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms,x).show(ax)",
"_____no_output_____"
],
[
"_,axes = plt.subplots(2,4, figsize=(12,6))\nfor i in range(4):\n apply_tfms(tfms,x).show(axes[0,i])\n apply_tfms(tfms,x,do_resolve=False).show(axes[1,i])",
"_____no_output_____"
],
[
"apply_tfms([],x).show()",
"_____no_output_____"
]
],
[
[
"## DatasetTfm",
"_____no_output_____"
]
],
[
[
"#export\nclass DatasetTfm(Dataset):\n def __init__(self, ds:Dataset, tfms:Collection[Callable]=None, **kwargs):\n self.ds,self.tfms,self.kwargs = ds,tfms,kwargs\n \n def __len__(self): return len(self.ds)\n \n def __getitem__(self,idx):\n x,y = self.ds[idx]\n return apply_tfms(self.tfms, x, **self.kwargs), y\n \n def __getattr__(self,k): return getattr(self.ds, k)\n\nimport nb_001b\nnb_001b.DatasetTfm = DatasetTfm",
"_____no_output_____"
],
[
"bs=64",
"_____no_output_____"
],
[
"#export\ndef to_data(b):\n if is_listy(b): return [to_data(o) for o in b]\n return b.data if isinstance(b,ItemBase) else b\n\ndef data_collate(batch):\n return torch.utils.data.dataloader.default_collate(to_data(batch))\n\n@dataclass\nclass DeviceDataLoader():\n dl: DataLoader\n device: torch.device\n def __post_init__(self): self.dl.collate_fn=data_collate\n\n def __len__(self): return len(self.dl)\n def __getattr__(self,k): return getattr(self.dl, k)\n def proc_batch(self,b): return to_device(b, self.device)\n\n def __iter__(self):\n self.gen = map(self.proc_batch, self.dl)\n return iter(self.gen)\n\n @classmethod\n def create(cls, *args, device=default_device, **kwargs):\n return cls(DataLoader(*args, **kwargs), device=device)\n \nnb_001b.DeviceDataLoader = DeviceDataLoader",
"_____no_output_____"
],
[
"data = DataBunch.create(train_ds, valid_ds, bs=bs, num_workers=4)\nlen(data.train_dl), len(data.valid_dl), data.train_dl.dataset.c",
"_____no_output_____"
],
[
"#export\ndef show_image_batch(dl, classes, rows=None, figsize=(12,15)):\n x,y = next(iter(dl))\n if rows is None: rows = int(math.sqrt(len(x)))\n show_images(x[:rows*rows],y[:rows*rows],rows, classes)\n\ndef show_images(x,y,rows, classes, figsize=(9,9)):\n fig, axs = plt.subplots(rows,rows,figsize=figsize)\n for i, ax in enumerate(axs.flatten()):\n show_image(x[i], ax)\n ax.set_title(classes[y[i]])\n plt.tight_layout()",
"_____no_output_____"
],
[
"show_image_batch(data.train_dl, train_ds.classes, 6)",
"_____no_output_____"
],
[
"data = DataBunch.create(train_ds, valid_ds, bs=bs, train_tfm=tfms)",
"_____no_output_____"
],
[
"show_image_batch(data.train_dl, train_ds.classes, 6)",
"_____no_output_____"
]
],
[
[
"# Affine",
"_____no_output_____"
],
[
"We will now add affine transforms that operate on the coordinates instead of pixels like the lighting transforms we just saw. An [affine transformation](https://en.wikipedia.org/wiki/Affine_transformation) is a function \"(...) between affine spaces which preserves points, straight lines and planes.\" ",
"_____no_output_____"
],
[
"## Details",
"_____no_output_____"
],
[
"Our implementation first creates a grid of coordinates for the original image. The grid is normalized to a [-1, 1] range with (-1, -1) representing the top left corner, (1, 1) the bottom right corner and (0, 0) the center. Next, we build an affine matrix representing our desired transform and we multiply it by our original grid coordinates. The result will be a set of x, y coordinates which references where in the input image will each of the pixels in the output image be mapped. It has a size of w \\* h \\* 2 since it needs two coordinates for each of the h * w pixels of the output image. \n\nThis is clearest if we see it graphically. We will build an affine matrix of the following form:\n\n`[[a, b, e],\n [c, d, f]]`\n\n\nwith which we will transform each pair of x, y coordinates in our original grid into our transformation grid:\n\n\n`[[a, b], [[x], [[e], [[x'],\n [c, d]] x [y]] + [f]] = [y']]` \n\nSo after the transform we will get a new grid with which to map our input image into our output image. This will be our **map of where from exactly does our transformation source each pixel in the output image**.\n\n**Enter problems**\n\nAffine transforms face two problems that must be solved independently:\n1. **The interpolation problem**: The result of our transformation gives us float coordinates, and we need to decide, for each (i,j), how to assign these coordinates to pixels in the input image.\n2. **The missing pixel problem**: The result of our transformation may have coordinates which exceed the [-1, 1] range of our original grid and thus fall outside of our original grid.\n\n**Solutions to problems**\n\n1. **The interpolation problem**: We will perform a [bilinear interpolation](https://en.wikipedia.org/wiki/Bilinear_interpolation). This takes an average of the values of the pixels corresponding to the four points in the grid surrounding the result of our transformation, with weights depending on how close we are to each of those points. \n2. **The missing pixel problem**: For these values we need padding, and we face a few options:\n\n 1. Adding zeros on the side (so the pixels that fall out will be black)\n 2. Replacing them by the value at the border\n 3. Mirroring the content of the picture on the other side (reflect padding).\n \n \n### Transformation Method\n\n**Zoom**\n\nZoom changes the focus of the image according to a scale. If a scale of >1 is applied, grid pixels will be mapped to coordinates that are more central than the pixel's coordinates (closer to 0,0) while if a scale of <1 is applied, grid pixels will be mapped to more perispheric coordinates (closer to the borders) in the input image.\n\nWe can also translate our transform to zoom into a non-centrical area of the image. For this we use $col_c$ which displaces the x axis and $row_c$ which displaces the y axis.\n\n_Parameters_\n\n1. **Scale** How much do we want to zoom in or out to our image.\n\n Domain: Real numbers\n \n2. **Col_pct** How much do we want to displace our zoom along the x axis.\n\n Domain: Real numbers between 0 and 1\n \n \n3. **Row_pct** How much do we want to displace our zoom along the y axis.\n\n Domain: Real numbers between 0 and 1\n \n\n<u>Affine matrix</u>\n\n`[[1/scale, 0, col_c],\n [0, 1/scale, row_c]]`\n\n\n**Rotate**\n\nRotate shifts the image around its center in a given angle theta. The rotation is counterclockwise if theta is positive and clockwise if theta is negative. If you are curious about the derivation of the rotation matrix you can find it [here](https://matthew-brett.github.io/teaching/rotation_2d.html).\n\n_Parameters_\n\n1. **Degrees** By which angle do we want to rotate our image.\n\n Domain: Real numbers\n \n<u>Affine matrix</u>\n\n`[[cos(theta), -sin(theta), 0],\n [sin(theta), cos(theta), 0]]`",
"_____no_output_____"
],
[
"## Deterministic affine",
"_____no_output_____"
]
],
[
[
"#export\ndef grid_sample_nearest(input, coords, padding_mode='zeros'):\n if padding_mode=='border': coords.clamp(-1,1)\n bs,ch,h,w = input.size()\n sz = tensor([w,h]).float()[None,None]\n coords.add_(1).mul_(sz/2)\n coords = coords[0].round_().long()\n if padding_mode=='zeros':\n mask = (coords[...,0] < 0) + (coords[...,1] < 0) + (coords[...,0] >= w) + (coords[...,1] >= h)\n mask.clamp_(0,1)\n coords[...,0].clamp_(0,w-1)\n coords[...,1].clamp_(0,h-1)\n result = input[...,coords[...,1],coords[...,0]]\n if padding_mode=='zeros': result[...,mask] = result[...,mask].zero_()\n return result",
"_____no_output_____"
],
[
"#export\ndef grid_sample(x, coords, mode='bilinear', padding_mode='reflect'):\n if padding_mode=='reflect': padding_mode='reflection'\n if mode=='nearest': return grid_sample_nearest(x[None], coords, padding_mode)[0]\n return F.grid_sample(x[None], coords, mode=mode, padding_mode=padding_mode)[0]\n\ndef affine_grid(size):\n size = ((1,)+size)\n N, C, H, W = size\n grid = FloatTensor(N, H, W, 2)\n linear_points = torch.linspace(-1, 1, W) if W > 1 else tensor([-1])\n grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(grid[:, :, :, 0])\n linear_points = torch.linspace(-1, 1, H) if H > 1 else tensor([-1])\n grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(grid[:, :, :, 1])\n return grid\n\ndef affine_mult(c,m):\n if m is None: return c\n size = c.size()\n c = c.view(-1,2)\n c = torch.addmm(m[:2,2], c, m[:2,:2].t()) \n return c.view(size)",
"_____no_output_____"
],
[
"def rotate(degrees):\n angle = degrees * math.pi / 180\n return [[cos(angle), -sin(angle), 0.],\n [sin(angle), cos(angle), 0.],\n [0. , 0. , 1.]]",
"_____no_output_____"
],
[
"def xi(): return train_ds[1][0]\nx = xi().data",
"_____no_output_____"
],
[
"c = affine_grid(x.shape)",
"_____no_output_____"
],
[
"m = rotate(30)\nm = x.new_tensor(m)\nm",
"_____no_output_____"
],
[
"c[0,...,0]",
"_____no_output_____"
],
[
"c[0,...,1]",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
],
[
"c = affine_mult(c,m)",
"_____no_output_____"
],
[
"c[0,...,0]",
"_____no_output_____"
],
[
"c[0,...,1]",
"_____no_output_____"
],
[
"img2 = grid_sample(x, c, padding_mode='zeros')\nshow_image(img2);",
"_____no_output_____"
],
[
"xi().affine(rotate, 30).show()",
"_____no_output_____"
]
],
[
[
"## Affine transform",
"_____no_output_____"
]
],
[
[
"#export\nclass TfmAffine(Transform): order,_wrap = 5,'affine'\nclass TfmPixel(Transform): order,_wrap = 10,'pixel'\n\n@TfmAffine\ndef rotate(degrees:uniform):\n angle = degrees * math.pi / 180\n return [[cos(angle), -sin(angle), 0.],\n [sin(angle), cos(angle), 0.],\n [0. , 0. , 1.]]\n\ndef get_zoom_mat(sw, sh, c, r):\n return [[sw, 0, c],\n [0, sh, r],\n [0, 0, 1.]]\n\n@TfmAffine\ndef zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):\n s = 1-1/scale\n col_c = s * (2*col_pct - 1)\n row_c = s * (2*row_pct - 1)\n return get_zoom_mat(1/scale, 1/scale, col_c, row_c)\n\n@TfmAffine\ndef squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):\n if scale <= 1: \n col_c = (1-scale) * (2*col_pct - 1)\n return get_zoom_mat(scale, 1, col_c, 0.)\n else: \n row_c = (1-1/scale) * (2*row_pct - 1)\n return get_zoom_mat(1, 1/scale, 0., row_c)",
"_____no_output_____"
],
[
"rotate(xi(), 30).show()",
"_____no_output_____"
],
[
"zoom(xi(), 0.6).show()",
"_____no_output_____"
],
[
"zoom(xi(), 0.6).set_sample(padding_mode='zeros').show()",
"_____no_output_____"
],
[
"zoom(xi(), 2, 0.2, 0.2).show()",
"_____no_output_____"
],
[
"scales = [0.75,0.9,1.1,1.33]\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor i, ax in enumerate(axes): squish(xi(), scales[i]).show(ax)",
"_____no_output_____"
],
[
"_,axes=plt.subplots(1,3,figsize=(9,3))\nxi().show(axes[0])\n\nimg2 = rotate(xi(), 30).refresh()\nimg2 = zoom(img2, 1.6)\nimg2.show(axes[1])\n\nzoom(rotate(xi(), 30), 1.6).show(axes[2])",
"_____no_output_____"
],
[
"xi().resize(48).show()",
"_____no_output_____"
],
[
"img2 = zoom(xi().resize(48), 1.6, 0.8, 0.2)\nrotate(img2, 30).show()",
"_____no_output_____"
],
[
"img2 = zoom(xi().resize(24), 1.6, 0.8, 0.2)\nrotate(img2, 30).show(hide_axis=False)",
"_____no_output_____"
],
[
"img2 = zoom(xi().resize(48), 1.6, 0.8, 0.2)\nrotate(img2, 30).set_sample(mode='nearest').show()",
"_____no_output_____"
]
],
[
[
"## Random affine",
"_____no_output_____"
],
[
"As we did with the Lighting transform, we now want to build randomness into our pipeline so we can increase the automatization of the transform process. \n\nWe will use a uniform distribution for both our transforms since their impact is linear and their domain is the real numbers.",
"_____no_output_____"
],
[
"**Apply all transforms**\n\nWe will make all transforms try to do as little calculations as possible.\n\nWe do only one affine transformation by multiplying all the affine matrices of the transforms, then we apply to the coords any non-affine transformation we might want (jitter, elastic distorsion). Next, we crop the coordinates we want to keep and, by doing it before the interpolation, we don't need to compute pixel values that won't be used afterwards. Finally we perform the interpolation and we apply all the transforms that operate pixelwise (brightness, contrast).",
"_____no_output_____"
]
],
[
[
"tfm = rotate(degrees=(-45,45.), p=0.75); tfm",
"_____no_output_____"
],
[
"tfm.resolve(); tfm",
"_____no_output_____"
],
[
"x = xi()",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfm, x).show(ax)",
"_____no_output_____"
],
[
"tfms = [rotate(degrees=(-45,45.), p=0.75),\n zoom(scale=(0.5,2.0), p=0.75)]\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms,x).show(ax)",
"_____no_output_____"
],
[
"#export\ndef apply_tfms(tfms, x, do_resolve=True, xtra=None, size=None, **kwargs):\n if not (tfms or size): return x\n if not xtra: xtra={}\n tfms = sorted(listify(tfms), key=lambda o: o.tfm.order)\n if do_resolve: resolve_tfms(tfms)\n x = x.clone()\n if kwargs: x.set_sample(**kwargs)\n if size: x.resize(size)\n for tfm in tfms:\n if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm])\n else: x = tfm(x)\n return x",
"_____no_output_____"
],
[
"tfms = [rotate(degrees=(-45,45.), p=0.75),\n zoom(scale=(1.0,2.0), row_pct=(0,1.), col_pct=(0,1.))]\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms,x, padding_mode='zeros', size=64).show(ax)",
"_____no_output_____"
],
[
"tfms = [squish(scale=(0.5,2), row_pct=(0,1.), col_pct=(0,1.))]\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms,x).show(ax)",
"_____no_output_____"
]
],
[
[
"# Coord and pixel",
"_____no_output_____"
],
[
"## Jitter / flip",
"_____no_output_____"
],
[
"The last two transforms we will use are **jitter** and **flip**. \n\n**Jitter**\n\nJitter is a transform which adds a random value to each of the pixels to make them somewhat different than the original ones. In our implementation we first get a random number between (-1, 1) and we multiply it by a constant M which scales it.\n\n_Parameters_\n\n1. **Magnitude** How much random noise do we want to add to each of the pixels in our image.\n\n Domain: Real numbers between 0 and 1.\n \n**Flip**\n\nFlip is a transform that reflects the image on a given axis.\n\n_Parameters_\n\n1. **P** Probability of applying the transformation to an input.\n\n Domain: Real numbers between 0 and 1.",
"_____no_output_____"
]
],
[
[
"#export\nclass TfmCoord(Transform): order,_wrap = 4,'coord'\n\n@TfmCoord\ndef jitter(c, size, magnitude:uniform):\n return c.add_((torch.rand_like(c)-0.5)*magnitude*2)\n\n@TfmPixel\ndef flip_lr(x): return x.flip(2)",
"_____no_output_____"
],
[
"tfm = jitter(magnitude=(0,0.1))\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes:\n tfm.resolve()\n tfm(xi()).show(ax)",
"_____no_output_____"
],
[
"tfm = flip_lr(p=0.5)\n\n_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes:\n tfm.resolve()\n tfm(xi()).show(ax)",
"_____no_output_____"
]
],
[
[
"## Crop/pad",
"_____no_output_____"
],
[
"**Crop**\n\nCrop is a transform that cuts a series of pixels from an image. It does this by removing rows and columns from the input image.\n\n_Parameters_\n\n1. **Size** What is the target size of each side in pixels. If only one number *s* is specified, image is made square with dimensions *s* \\* *s*.\n\n Domain: Positive integers.\n \n2. **Row_pct** Determines where to cut our image vertically on the bottom and top (which rows are left out). If <0.5, more rows will be cut in the top than in the bottom and viceversa (varies linearly).\n\n Domain: Real numbers between 0 and 1.\n \n3. **Col_pct** Determines where to cut our image horizontally on the left and right (which columns are left out). If <0.5, more rows will be cut in the left than in the right and viceversa (varies linearly).\n\n Domain: Real numbers between 0 and 1.\n \nOur three parameters are related with the following equations:\n\n1. output_rows = [**row_pct***(input_rows-**size**):**size**+**row_pct***(input_rows-**size**)]\n\n2. output_cols = [**col_pct***(input_cols-**size**):**size**+**col_pct***(input_cols-**size**)]\n\n**Pad**\n\n\nPads each of the four borders of our image with a certain amount of pixels. Can pad with reflection (reflects border pixels to fill new pixels) or zero (adds black pixels). \n\n_Parameters_\n\n1. **Padding** Amount of pixels to add to each border. [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad)\n\n Domain: Positive integers.\n \n2. **Mode** How to fill new pixels. For more detail see the Pytorch subfunctions for padding.\n\n Domain: \n - Reflect (default): reflects opposite pixels to fill new pixels. [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ReflectionPad2d)\n - Constant: adds pixels with specified value (default is 0, black pixels) [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ConstantPad2d)\n - Replicate: replicates border row or column pixels to fill new pixels [More details](https://pytorch.org/docs/stable/nn.html#torch.nn.ReplicationPad2d)\n \n \n***On using padding and crop***\n\nA nice way to use these two functions is to combine them into one transform. We can add padding to the image and then crop some of it out. This way, we can create a new image to augment our training set without losing image information by cropping. Furthermore, this can be done in several ways (modifying the amount and type of padding and the crop style) so it gives us great flexibility to add images to our training set. You can find an example of this in the code below.",
"_____no_output_____"
]
],
[
[
"[(o.__name__,o.order) for o in\n sorted((Transform,TfmAffine,TfmCoord,TfmLighting,TfmPixel),key=attrgetter('order'))]",
"_____no_output_____"
],
[
"#export\n@partial(TfmPixel, order=-10)\ndef pad(x, padding, mode='reflect'):\n return F.pad(x[None], (padding,)*4, mode=mode)[0]\n\n@TfmPixel\ndef crop(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5):\n size = listify(size,2)\n rows,cols = size\n row = int((x.size(1)-rows+1) * row_pct)\n col = int((x.size(2)-cols+1) * col_pct)\n return x[:, row:row+rows, col:col+cols].contiguous()",
"_____no_output_____"
],
[
"pad(xi(), 4, 'constant').show()",
"_____no_output_____"
],
[
"crop(pad(xi(), 4, 'constant'), 32, 0.25, 0.75).show(hide_axis=False)",
"_____no_output_____"
],
[
"crop(pad(xi(), 4), 32, 0.25, 0.75).show()",
"_____no_output_____"
]
],
[
[
"## Combine",
"_____no_output_____"
]
],
[
[
"tfms = [flip_lr(p=0.5),\n pad(padding=4, mode='constant'),\n crop(size=32, row_pct=(0,1.), col_pct=(0,1.))]",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms, x).show(ax)",
"_____no_output_____"
],
[
"tfms = [\n flip_lr(p=0.5),\n contrast(scale=(0.5,2.0)),\n brightness(change=(0.3,0.7)),\n rotate(degrees=(-45,45.), p=0.5),\n zoom(scale=(0.5,1.2), p=0.8)\n]",
"_____no_output_____"
],
[
"_,axes = plt.subplots(1,4, figsize=(12,3))\nfor ax in axes: apply_tfms(tfms, x).show(ax)",
"_____no_output_____"
],
[
"_,axes = plt.subplots(2,4, figsize=(12,6))\n\nfor i in range(4):\n apply_tfms(tfms, x, padding_mode='zeros', size=48).show(axes[0][i], hide_axis=False)\n apply_tfms(tfms, x, mode='nearest', do_resolve=False).show(axes[1][i], hide_axis=False)",
"_____no_output_____"
]
],
[
[
"## RandomResizedCrop (Torchvision version)",
"_____no_output_____"
]
],
[
[
"#export\ndef compute_zs_mat(sz, scale, squish, invert, row_pct, col_pct):\n orig_ratio = math.sqrt(sz[2]/sz[1])\n for s,r,i in zip(scale,squish, invert):\n s,r = math.sqrt(s),math.sqrt(r)\n if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture\n w,h = (s/r, s*r) if i else (s*r,s/r)\n w /= orig_ratio\n h *= orig_ratio\n col_c = (1-w) * (2*col_pct - 1)\n row_c = (1-h) * (2*row_pct - 1)\n return get_zoom_mat(w, h, col_c, row_c)\n \n #Fallback, hack to emulate a center crop without cropping anything yet.\n if orig_ratio > 1: return get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)\n else: return get_zoom_mat(1, orig_ratio**2, 0, 0.)\n\n@TfmCoord\ndef zoom_squish(c, size, scale:uniform=1.0, squish:uniform=1.0, invert:rand_bool=False, \n row_pct:uniform=0.5, col_pct:uniform=0.5):\n #This is intended for scale, squish and invert to be of size 10 (or whatever) so that the transform\n #can try a few zoom/squishes before falling back to center crop (like torchvision.RandomResizedCrop)\n m = compute_zs_mat(size, scale, squish, invert, row_pct, col_pct)\n return affine_mult(c, FloatTensor(m))",
"_____no_output_____"
],
[
"rrc = zoom_squish(scale=(0.25,1.0,10), squish=(0.5,1.0,10), invert=(0.5,10),\n row_pct=(0,1.), col_pct=(0,1.))",
"_____no_output_____"
],
[
"_,axes = plt.subplots(2,4, figsize=(12,6))\nfor i in range(4):\n apply_tfms(rrc, x, size=48).show(axes[0][i])\n apply_tfms(rrc, x, do_resolve=False, mode='nearest').show(axes[1][i])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0ed876f39cb61d19bf5b092a6e69848fd137dc5 | 120,708 | ipynb | Jupyter Notebook | predictions/Prediction_Offense_Final.ipynb | JimKing100/nfl-test | e35f517be8b347d076f91101fc18887c064902fa | [
"MIT"
] | null | null | null | predictions/Prediction_Offense_Final.ipynb | JimKing100/nfl-test | e35f517be8b347d076f91101fc18887c064902fa | [
"MIT"
] | null | null | null | predictions/Prediction_Offense_Final.ipynb | JimKing100/nfl-test | e35f517be8b347d076f91101fc18887c064902fa | [
"MIT"
] | null | null | null | 38.054224 | 254 | 0.328603 | [
[
[
"<a href=\"https://colab.research.google.com/github/JimKing100/nfl-test/blob/master/predictions/Prediction_Offense_Final.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# Installs\n!pip install pmdarima",
"Collecting pmdarima\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/83/aa/feb76414043592c3149059ab772a51de03fbf7544a8e19237f229a50a949/pmdarima-1.5.3-cp36-cp36m-manylinux1_x86_64.whl (1.5MB)\n\u001b[K |████████████████████████████████| 1.5MB 1.4MB/s \n\u001b[?25hRequirement already satisfied: urllib3 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (1.24.3)\nRequirement already satisfied: Cython>=0.29 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (0.29.16)\nRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (1.18.2)\nRequirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (1.4.1)\nRequirement already satisfied: pandas>=0.19 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (1.0.3)\nRequirement already satisfied: statsmodels>=0.10.2 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (0.10.2)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (0.14.1)\nRequirement already satisfied: scikit-learn>=0.22 in /usr/local/lib/python3.6/dist-packages (from pmdarima) (0.22.2.post1)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.19->pmdarima) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.19->pmdarima) (2018.9)\nRequirement already satisfied: patsy>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from statsmodels>=0.10.2->pmdarima) (0.5.1)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.6.1->pandas>=0.19->pmdarima) (1.12.0)\nInstalling collected packages: pmdarima\nSuccessfully installed pmdarima-1.5.3\n"
],
[
"# Imports\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.tsa.arima_model import ARIMA\nimport pmdarima as pm\nfrom sklearn import preprocessing",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"# Import data\noriginal_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-actuals/actuals_offense.csv')\nkickers_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_kicker.csv')\noffense_df = pd.read_csv('https://raw.githubusercontent.com/JimKing100/nfl-test/master/data-revised/rookies_non_offense.csv')\nplayer_df = pd.concat([kickers_df, offense_df], ignore_index=True)",
"_____no_output_____"
],
[
"# The dataframe of actual offensive points for each game from 2000-2019\noriginal_df.head()",
"_____no_output_____"
],
[
"# The dataframe of all 2019 offensive players (kickers and offense)\nplayer_df.head(50)",
"_____no_output_____"
],
[
"# Add a row to the final_df dataframe\n# Each row represents the predicted points for each team\ndef add_row(df, p, f, l, n, pos, pred, act):\n\n df = df.append({'player': p,\n 'first': f,\n 'last': l,\n 'name': n,\n 'position': pos,\n 'week1-pred': pred,\n 'week1-act': act\n }, ignore_index=True)\n \n return df",
"_____no_output_____"
],
[
"# The main code for iterating through the player(offense and kicker) list, calculating the points and adding the rows\n# to the final_df dataframe.\ncolumn_names = ['player',\n 'first',\n 'last',\n 'name',\n 'position', \n 'week1-pred',\n 'week1-act'\n ]\n\nplayer_list = offense_df['player'].tolist()\n\nfinal_df = pd.DataFrame(columns = column_names)\n\nfor player in player_list:\n\n first = player_df['first'].loc[(player_df['player']==player)].iloc[0]\n last = player_df['last'].loc[(player_df['player']==player)].iloc[0]\n name = player_df['name'].loc[(player_df['player']==player)].iloc[0]\n position1 = player_df['position1'].loc[(player_df['player']==player)].iloc[0]\n start_year = player_df['start'].loc[(player_df['player']==player)].iloc[0]\n row = original_df.index[(original_df['player']==player)][0]\n\n if start_year < 2000:\n start_year = 2000\n col = ((start_year - 2000) * 16) + 5\n train_data = original_df.iloc[row, col:309]\n actuals = original_df.iloc[row, 309:325]\n act_points = actuals.sum()\n\n print(player)\n\n if (player != 'GG-0310') & (player != 'KA-0737') & (player != 'JM-6775') & \\\n (player != 'AL-0387') & (player != 'JW-5475'):\n # ARIMA model\n model = pm.auto_arima(train_data, start_p=1, start_q=1,\n test='adf', # use adftest to find optimal 'd'\n max_p=3, max_q=3, # maximum p and q\n m=1, # frequency of series\n d=None, # let model determine 'd'\n seasonal=False, # No Seasonality\n start_P=0, \n D=0, \n trace=False,\n error_action='ignore', \n suppress_warnings=True, \n stepwise=True)\n \n # Forecast\n n_periods = 16\n fc = model.predict(n_periods=n_periods, return_conf_int=False)\n index_of_fc = np.arange(len(train_data), len(train_data)+n_periods)\n fc_series = pd.Series(fc, index=index_of_fc)\n pred_points = fc_series.sum()\n else:\n pred_points = 0\n\n final_df = add_row(final_df, player, first, last, name, position1, pred_points, act_points)",
"TB-2300\nDB-3800\nJM-2900\nBR-1100\nEM-0200\nMS-0200\nPR-0300\nAR-1300\nRF-0500\nMM-4700\nJF-1900\nMR-2500\nBH-1900\nCD-0300\nMS-4100\nCM-1500\nAD-0100\nCN-0500\nTT-0500\nAT-0160\nCK-0250\nKC-2350\nNF-0250\nRG-1850\nRT-0150\nRW-3850\nMB-0450\nMG-0850\nRG-1885\nAM-1150\n"
],
[
"# The final_df dataframe\nfinal_df['week1-diff'] = final_df['week1-pred'] - final_df['week1-act']\nfinal_df['week1-pct'] = final_df['week1-diff']/final_df['week1-pred']",
"_____no_output_____"
],
[
"# Calculate the metrics\npred_median_error = final_df['week1-pct'].median()\n\nprint('Median Error - %.4f%%' % (pred_median_error * 100))",
"Median Error - 53.6584%\n"
],
[
"final_df.head(50)",
"_____no_output_____"
],
[
"# Save the results to .csv file\nfinal_df.to_csv('/content/week1-pred-offense-norookies.csv', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed87e01826b707791deb0ee5d1675a40eb8d29 | 25,093 | ipynb | Jupyter Notebook | Machine Learning Basics/DSA Preparatory Notebook 1.ipynb | Jarvin-M/DataScience-Africa-2018 | 80579fe46793ad49d4149e9dda702ef506d1b683 | [
"MIT"
] | null | null | null | Machine Learning Basics/DSA Preparatory Notebook 1.ipynb | Jarvin-M/DataScience-Africa-2018 | 80579fe46793ad49d4149e9dda702ef506d1b683 | [
"MIT"
] | null | null | null | Machine Learning Basics/DSA Preparatory Notebook 1.ipynb | Jarvin-M/DataScience-Africa-2018 | 80579fe46793ad49d4149e9dda702ef506d1b683 | [
"MIT"
] | null | null | null | 44.100176 | 9,732 | 0.727255 | [
[
[
"",
"_____no_output_____"
],
[
"### Instructions",
"_____no_output_____"
],
[
"1. Make sure you are using a version of notebook greater than v.3. If you installed Anaconda with python 3 - this is likely to be fine. The next piece of code will check if you have the right version.\n2. The notebook has both some open test cases that you can use to test the functionality of your code - however it will be run on another set of test cases that you can't from which marks will be awarded. So passing all the tests in this notebook is not a guarantee that you have done things correctly - though its highly probable.\n3. Also make sure you submit a notebook that doesn't return any errors. One way to ensure this is to run all the cells before you submit the notebook.\n4. When you are done create a zip file of your notebook and upload that\n5. For each cell where you see \"YOUR CODE HERE\" delete the return notImplemented statement when you write your code there - don't leave it in the notebook.\n6. Once you are done, you are done.",
"_____no_output_____"
],
[
"# DSA 2018 Nyeri Preparatory Notebook\nBy Ciira Maina\n\n\nIn preparation for DSA 2018 Nyeri, we would like potential participants to complete a number of exercises in probability, machine learning and programming to ensure that they have the necessary prerequisite knowledge to attend the summer school. You will be required to submit notebooks with solutions to these exercises during the application process.\n\nIn this first exercise we will require you to download a dataset and perform computations on the data. These data are from a paper in 1966 by Cox and Lewis and report the time difference between nerve pulses on a nerve fibre. 799 observations are reported. These data are used for some examples in the text [\"All of Statistics\"](http://www.stat.cmu.edu/~larry/all-of-statistics/) by Larry Wasserman.\n\nThe data are available [here](http://www.stat.cmu.edu/~larry/all-of-statistics/=data/nerve.dat)",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport urllib.request\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Obtain the data\n\nWrite code to obtain the data from the website above and store it in a one dimensional array of floating point numbers.",
"_____no_output_____"
]
],
[
[
"nerve_data_url='http://www.stat.cmu.edu/~larry/all-of-statistics/=data/nerve.dat'\ndef read_data(url):\n # Read in data from url and return 1-D array\n fromUrl = urllib.request.urlopen(url).read().decode('utf-8')\n fromUrl = fromUrl.split('\\r\\n')\n \n newlist=[]\n for x in fromUrl:\n newlist.append(x.split('\\t'))\n \n finalist=[] \n for i in newlist:\n for x in i:\n if x != '':\n finalist.append(float(x))\n \n return np.array(finalist)",
"_____no_output_____"
],
[
"nerve_data = read_data(nerve_data_url)\nassert len(nerve_data) == 799",
"_____no_output_____"
]
],
[
[
"## Preliminary Visualisation\n\nPlot a histogram of the data. Ensure you label your axes.",
"_____no_output_____"
]
],
[
[
"plt.hist(nerve_data)\nplt.xlabel('Nerve Pulses')\nplt.ylabel('Frequency of occurence')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Preliminary analysis\n\nThe cumulative distribution function of a random variable $\\mathbf{X}$ is given by\n\n$\n\\begin{equation}\nF_X(x)=P(\\mathbf{X}\\leq x)\n\\end{equation}$\n\nIf we obtain $n$ observations $X_1,\\ldots,X_n$ from this distribution, the empirical distibution function is given by\n\n$\n\\begin{equation}\n\\hat{F}_n(x)=\\frac{\\sum_{i=1}^n\\mathbf{I}(X_i\\leq x)}{n}\n\\end{equation}$\n\nwhere\n$\n \\begin{equation}\n \\mathbf{I}(X_i\\leq x) = \n \\begin{cases}\n 1 & \\text{if $X_i\\leq x $} \\\\\n 0 & \\text{if $X_i> x$} \n \\end{cases}\n \\end{equation}\n$\n* Plot the empirical distribution function of the nerve pulse data\n* Estimate the probability that the wait time between nerve pulses is less than $0.3$ - We will call this P1\n* Estimate the probability that the wait time between nerve pulses is between $0.1$ and $0.3$ - We will call this P2\n\n",
"_____no_output_____"
],
[
"Given a random variable X obtain the empirical distribution of a given set of data",
"_____no_output_____"
]
],
[
[
"def cdf(X, data):\n # Return a vector the size of X representing the CDF\n # YOUR CODE HERE\n \n raise NotImplementedError()",
"_____no_output_____"
]
],
[
[
"Plot the empirical distribution function of the nerve pulse data",
"_____no_output_____"
]
],
[
[
"X=np.linspace(0,np.max(nerve_data),100)\n# YOUR CODE HERE\nraise NotImplementedError()",
"_____no_output_____"
]
],
[
[
"Estimate the probability that the wait time between nerve pulses is less than 0.3. Hint: refer to the previous fomula for the cummulative distribution",
"_____no_output_____"
]
],
[
[
"def prob_x(x, data):\n # YOUR CODE HERE\n raise NotImplementedError()",
"_____no_output_____"
],
[
"P1 = prob_x(0.3, nerve_data)\nassert abs(P1-0.760951188986) < 1e-6",
"_____no_output_____"
]
],
[
[
"Estimate the probability that the wait time between nerve pulses is between 0.1 and 0.3",
"_____no_output_____"
]
],
[
[
"def prob_xy(x,y,data):\n # Return probability of wait time between x, and y\n # YOUR CODE HERE\n raise NotImplementedError()",
"_____no_output_____"
],
[
"P2 = prob_xy(0.1,0.3,nerve_data)\nassert abs(P2-0.377972465582) < 1e-6",
"_____no_output_____"
]
],
[
[
"## Estimating properties of the distribution\n\nWe can estimate properties of the true distribution of the data $F_X(x)$ using the empirical distribution function $\\hat{F}_n(x)$. To do this we can use \"plug in\" estimators. Here we will estimate the mean, variance and skewness. The expressions for the \"plug in\" estimators for these quantities are\n* Mean: $\\hat{\\mu}=\\frac{1}{n}\\sum_{i=1}^nX_i$\n* Variance: $\\hat{\\sigma}^2=\\frac{1}{n}\\sum_{i=1}^n(X_i-\\hat{\\mu})^2$\n* Skewness: $\\hat{\\kappa}=\\frac{\\frac{1}{n}\\sum_{i=1}^n(X_i-\\hat{\\mu})^3}{\\hat{\\sigma}^3}$\n\nCompute the plug in estimators of the mean, variance and skewness for the nerve pulse wait time data.",
"_____no_output_____"
]
],
[
[
"def dist_properties(data):\n # Return the mean, variance, skewness of the distribution\n # YOUR CODE HERE\n raise NotImplementedError()",
"_____no_output_____"
],
[
"mu, var, kappa = dist_properties(nerve_data)\nassert np.round(mu,3) == 0.219\nassert np.round(var,3) == 0.044\nassert np.round(kappa,3) == 1.761",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0ed916229a46214cc29b185fca4aa1af8845fe2 | 1,011 | ipynb | Jupyter Notebook | Project_Euler-Problem_7.ipynb | SmirnovAnton/project-euler-solutions | 31d69fa2356b81395bf9267025da9d3d09e313cf | [
"Apache-2.0"
] | null | null | null | Project_Euler-Problem_7.ipynb | SmirnovAnton/project-euler-solutions | 31d69fa2356b81395bf9267025da9d3d09e313cf | [
"Apache-2.0"
] | null | null | null | Project_Euler-Problem_7.ipynb | SmirnovAnton/project-euler-solutions | 31d69fa2356b81395bf9267025da9d3d09e313cf | [
"Apache-2.0"
] | null | null | null | 18.381818 | 109 | 0.499505 | [
[
[
"\"\"\"\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10 001st prime number?\"\"\"\n\nfrom sympy import prime\n\nprime(10001)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d0ed9bd6a5807bf393e5333f5c1dd6912211418a | 13,290 | ipynb | Jupyter Notebook | itertool.ipynb | tourloukisg/Python_Tutorial | a5a05e38aa88a0d3878fd6530f9952002755e0ae | [
"MIT"
] | null | null | null | itertool.ipynb | tourloukisg/Python_Tutorial | a5a05e38aa88a0d3878fd6530f9952002755e0ae | [
"MIT"
] | null | null | null | itertool.ipynb | tourloukisg/Python_Tutorial | a5a05e38aa88a0d3878fd6530f9952002755e0ae | [
"MIT"
] | null | null | null | 30.204545 | 683 | 0.500828 | [
[
[
"# Itertools\n# product --> Returns the Cartesian product of iterables such as lists\n\nfrom itertools import product\n\nlst_a=[2,4]\nlst_b=[3,6]\nprint('List a -->',lst_a)\nprint('List b -->',lst_b)\nab=product(lst_a,lst_b)\nprint('Returns a product object -->',type(ab))\nlst_ab=list(ab)\nprint('Returns a list -->',type(lst_ab))\nprint('Product of the two lists -->',lst_ab)",
"List a --> [2, 4]\nList b --> [3, 6]\nReturns a product object --> <class 'itertools.product'>\nReturns a list --> <class 'list'>\nProduct of the two lists --> [(2, 3), (2, 6), (4, 3), (4, 6)]\n"
],
[
"# Itertools\n# product(repeat=x) --> Returns the product of an iterable with itself x times\n\nfrom itertools import product\n\nlst_a=['a']\nlst_b=['b','c']\nprint('List a -->',lst_a)\nprint('List b -->',lst_b)\nprint('\\r')\n# repeat=1\nab=product(lst_a,lst_b,repeat=1)\nlst_ab=list(ab)\nprint('Product (repeat=1) -->',lst_ab)\n\n# repeat=2\nab=product(lst_a,lst_b,repeat=2)\nlst_ab=list(ab)\nprint('Product (repeat=2) -->',lst_ab)\n\n#repeat=3\nprint('\\r')\na=[0,1]\nprint('List a -->',a)\np=product(a,repeat=3)\nlst_p=list(p)\nprint('Product (repeat=3) -->',lst_p)",
"List a --> ['a']\nList b --> ['b', 'c']\n\nProduct (repeat=1) --> [('a', 'b'), ('a', 'c')]\nProduct (repeat=2) --> [('a', 'b', 'a', 'b'), ('a', 'b', 'a', 'c'), ('a', 'c', 'a', 'b'), ('a', 'c', 'a', 'c')]\n\nList a --> [0, 1]\nProduct (repeat=3) --> [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)]\n"
],
[
"# Itertools Infinite Iterators\n# count(n) --> Returns consecutive values (start= number n, end = infinite if stop condition is not specified)\n\nfrom itertools import count\n\nfor x in count(1):\n print(x,end=' ')\n if x==10:\n break\n",
"1 2 3 4 5 6 7 8 9 10 "
],
[
"# Itertools Infinite Iterators\n# cycle(iterable) --> Cycling through an iterable (i.e.list). Infinite loops if stop condition is not specified)\n\nfrom itertools import cycle\nc=[0,1,2,3,4]\nlst=[]\nfor x in cycle(c):\n print(x,end=' ')\n lst.append(x)\n if len(lst)==12:\n break",
"0 1 2 3 4 0 1 2 3 4 0 1 "
],
[
"# Itertools Infinite Iterators\n# repeat (object/iterable,times) --> Repeats the elements of an iterable/object/value\n# repeat() runs indefinitely unless a times argument value is selected\n\nfrom itertools import repeat\nc=[0,1,2,3,4]\n\nfor x in repeat(c,times=3):\n print(x)\n ",
"[0, 1, 2, 3, 4]\n[0, 1, 2, 3, 4]\n[0, 1, 2, 3, 4]\n"
],
[
"# Itertools\n# permutations(iterable,r) --> Returns all possible combinations (length=r) of the elements in an iterable\n\nfrom itertools import permutations\n\nlst=[0,1,2]\nprint('List -->',lst)\nprint('\\r')\n# length r=None\nres=list(permutations(lst))\nprint('List elements permutations (length r= None) -->',res)\n\n# length r=2\nres2=list(permutations(lst,r=2))\nprint('List elements permutations (length r= 2) -->',res2)",
"List --> [0, 1, 2]\n\nList elements permutations (length r= None) --> [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]\nList elements permutations (length r= 2) --> [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]\n"
],
[
"# Itertools\n# combinations(iterable,r) --> Returns iterable elements subsequences of length r (sorted order based on index)\n# where the iterable elements are not repeated in the subsequences\nfrom itertools import combinations\n\nlst=[0,1,2,3]\nprint('List -->',lst)\nprint('\\r')\n# length r=4\nres=list(combinations(lst,4))\nprint('List elements combinations (length r= 4) -->',res)\n\n# length r=3\nres1=list(combinations(lst,3))\nprint('List elements combinations (length r= 3) -->',res1)\n\n# length r=2\nres2=list(combinations(lst,2))\nprint('List elements combinations (length r= 2) -->',res2)\nprint('\\r')\n\n# string - length r=2\nstring='car'\nprint('String -->',string)\nprint('\\r')\n# length r=4\nres_s=tuple(combinations(string,2))\nprint('String elements combinations (length r= 2) -->',res_s)",
"List --> [0, 1, 2, 3]\n\nList elements combinations (length r= 4) --> [(0, 1, 2, 3)]\nList elements combinations (length r= 3) --> [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)]\nList elements combinations (length r= 2) --> [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]\n\nString --> car\n\nString elements combinations (length r= 2) --> (('c', 'a'), ('c', 'r'), ('a', 'r'))\n"
],
[
"# Itertools\n# combinations_with_replacement(iterable,r) --> Returns iterable elements subsequences of length r (sorted \n# order based on index) where thr iterable elements are repeated in the subsequences\n\nfrom itertools import combinations,combinations_with_replacement\n\nlst=[0,1,2]\nprint('List -->',lst)\nprint('\\r')\n\n# combinations \nres=list(combinations(lst,2))\nprint('List elements combinations (length r= 2) -->',res)\n\n# combinations with replacement\nres1=list(combinations_with_replacement(lst,2))\nprint('List elements combinations with replacement (length r= 2) -->',res1)",
"List --> [0, 1, 2]\n\nList elements combinations (length r= 2) --> [(0, 1), (0, 2), (1, 2)]\nList elements combinations with replacement (length r= 2) --> [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]\n"
],
[
"# Itertools\n# accumulate(iterable) --> Returns accumulated sums (running totals) \n\nfrom itertools import accumulate\n\nlst=[0,1,2,3]\nprint('List -->',lst)\nprint('\\r')\n\n# accumulate \nres=list(accumulate(lst))\nprint('Accumulated Sums -->',res)",
"List --> [0, 1, 2, 3]\n\nAccumulated Sums --> [0, 1, 3, 6]\n"
],
[
"# Itertools\n# accumulate(iterable,func=operator.x) -->where x =(add,sub,mul,truediv,floordiv etc.),default value = add\n\nfrom itertools import accumulate\nimport operator\nlst=[6,4,2,1]\nprint('List -->',lst)\nprint('\\r')\n\n# operator.add --> default\nres=list(accumulate(lst,func=operator.add))\nprint('Operator.add -->',res)\n\n# operator.sub \nres1=list(accumulate(lst,func=operator.sub))\nprint('Operator.sub -->',res1)\n\n# operator.mul \nres2=list(accumulate(lst,func=operator.mul))\nprint('Operator.mul -->',res2)\n\n# operator.truediv \nres3=list(accumulate(lst,func=operator.truediv))\nprint('Operator.truediv -->',res3)\n\n# operator.floordiv \nres4=list(accumulate(lst,func=operator.floordiv))\nprint('Operator.floordiv -->',res4)\n",
"List --> [6, 4, 2, 1]\n\nOperator.add --> [6, 10, 12, 13]\nOperator.sub --> [6, 2, 0, -1]\nOperator.mul --> [6, 24, 48, 48]\nOperator.truediv --> [6, 1.5, 0.75, 0.75]\nOperator.floordiv --> [6, 1, 0, 0]\n"
],
[
"# Itertools\n# accumulate(iterable,func=x) -->where x =(min,max)\n\nfrom itertools import accumulate\nimport operator\nlst=[2,5,1,8,4]\nprint('List -->',lst)\nprint('\\r')\n\n# func=min\nres=list(accumulate(lst,func=min))\nprint('func=min -->',res)\n\n# func=max \nres1=list(accumulate(lst,func=max))\nprint('func=max -->',res1)",
"List --> [2, 5, 1, 8, 4]\n\nfunc=min --> [2, 2, 1, 1, 1]\nfunc=max --> [2, 5, 5, 8, 8]\n"
],
[
"# Itertools\n# groupby(iterable,key) --> Takes a) an iterable (list,dict) and b) a key that is a function that determines # the keys for each iterable element. Groupby returns consecutive keys & groups from the selected iterable.\n\nfrom itertools import groupby\n\nlst=[1,2,3,4,5,6,7,8,9,10]\nprint('List -->',lst)\nprint('\\r')\n \n# groupby --> 1st Example\nprint('List elements greater than 5:\\n')\nres=groupby(lst,key=lambda x:x>5)\nfor k,v in res:\n print(k,list(v))\n\n# groupby -->2nd Example\nprint('\\r')\ncars=[{'make':'Chevrolet','year':2017},{'make':'Honda','year':2017},{'make':'GMC','year':2018}\n,{'make':'Honda','year':2019},{'make':'BMW','year':2020},{'make':'Nissan','year':2020}]\nprint('---------------------------------------------------------------------------')\nprint('Car Make & Year:\\n')\nprint(cars)\nprint('\\r')\nprint('Group cars by year:\\n')\nres1=groupby(cars,key=lambda x:x['year'])\nfor k,v in res1:\n print(k,list(v))\n",
"List --> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nList elements greater than 5:\n\nFalse [1, 2, 3, 4, 5]\nTrue [6, 7, 8, 9, 10]\n\n---------------------------------------------------------------------------\nCar Make & Year:\n\n[{'make': 'Chevrolet', 'year': 2017}, {'make': 'Honda', 'year': 2017}, {'make': 'GMC', 'year': 2018}, {'make': 'Honda', 'year': 2019}, {'make': 'BMW', 'year': 2020}, {'make': 'Nissan', 'year': 2020}]\n\nGroup cars by year:\n\n2017 [{'make': 'Chevrolet', 'year': 2017}, {'make': 'Honda', 'year': 2017}]\n2018 [{'make': 'GMC', 'year': 2018}]\n2019 [{'make': 'Honda', 'year': 2019}]\n2020 [{'make': 'BMW', 'year': 2020}, {'make': 'Nissan', 'year': 2020}]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0ed9c4864014bc618430fd52afb796c51a138d5 | 131 | ipynb | Jupyter Notebook | .ipynb_checkpoints/local-checkpoint.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | null | null | null | .ipynb_checkpoints/local-checkpoint.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | 2 | 2021-10-12T22:04:31.000Z | 2021-10-12T22:50:08.000Z | local.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | null | null | null | 32.75 | 75 | 0.885496 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0edb58528bf87c411e46b8d182ec8a9e35584d9 | 75,567 | ipynb | Jupyter Notebook | data-submission-2020-05-19/Moonshot submission 5-19.ipynb | vvoelz/covid-FAH-CPU | 1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5 | [
"MIT"
] | 1 | 2020-04-16T05:10:33.000Z | 2020-04-16T05:10:33.000Z | data-submission-2020-05-19/Moonshot submission 5-19.ipynb | vvoelz/covid-FAH-CPU | 1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5 | [
"MIT"
] | 7 | 2020-03-16T16:14:28.000Z | 2020-05-16T16:05:18.000Z | data-submission-2020-05-19/Moonshot submission 5-19.ipynb | vvoelz/covid-FAH-CPU | 1b22f0ac046d37fdcbf7c2b1b476abd35eb162c5 | [
"MIT"
] | null | null | null | 42.960205 | 123 | 0.388 | [
[
[
"### As before, let's find the set of compounds for which both simulations and experimental measurements exist\n\nMatt Robinson posted a `moonshot_initial_activity_data.csv` file of the initial activity data:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\ndf_activity = pd.read_csv('../data-release-2020-05-10/moonshot_initial_activity_data.csv')\n\n# Find all that have IC50 data\nIC50_measured = pd.notnull(df_activity[\"IC50 (µM)\"]) \ndf_activity[IC50_measured]",
"_____no_output_____"
],
[
"# Translate the new IDs back to the old IDs so we can find them in our results\n\n## make a translation table\nall_df = pd.read_csv(\"https://covid.postera.ai/covid/submissions.csv\")\nnew_CID_list = list(all_df.CID)\nold_CID_list = list(all_df.old_CID)\nnew2old_CID = {}\nold2new_CID = {}\n\nfor i in range(len(new_CID_list)):\n new2old_CID[new_CID_list[i]] = old_CID_list[i]\n old2new_CID[old_CID_list[i]] = new_CID_list[i]\n \nfor s in df_activity[IC50_measured].CID:\n print(s, '-->', new2old_CID[s])",
"TRY-UNI-714a760b-6 --> TRY-UNI-714-6\nTRY-UNI-714a760b-22 --> TRY-UNI-714-22\nTRY-UNI-714a760b-20 --> TRY-UNI-714-20\nJAN-GHE-83b26c96-4 --> None-4\nTRY-UNI-714a760b-12 --> TRY-UNI-714-12\nMAT-POS-916a2c5a-3 --> MAT-POS-916-3\nMAT-POS-916a2c5a-2 --> MAT-POS-916-2\nMAT-POS-916a2c5a-1 --> MAT-POS-916-1\nLON-WEI-b8d98729-39 --> LON-WEI-b8d-39\nLON-WEI-b8d98729-38 --> LON-WEI-b8d-38\nLON-WEI-b8d98729-27 --> LON-WEI-b8d-27\nLON-WEI-b8d98729-23 --> LON-WEI-b8d-23\nLON-WEI-b8d98729-17 --> LON-WEI-b8d-17\nAAR-POS-d2a4d1df-23 --> None-23\nAAR-POS-d2a4d1df-22 --> None-22\nAAR-POS-d2a4d1df-32 --> None-32\nDAR-DIA-23aa0b97-20 --> DAR-DIA-23a-20\nDAR-DIA-23aa0b97-19 --> DAR-DIA-23a-19\n"
],
[
"## Are THESE in the latest results pkl???\n\n# df_results = pd.read_pickle('master_results_WL0.12_051820.pkl') # these have covalent warheads in them\ndf_results = pd.read_pickle('master_results_WL0.12_051920.pkl')\n\nfor s in df_activity[IC50_measured].CID:\n df_hits = df_results[df_results.identity.str.contains(new2old_CID[s])]\n if len(df_hits) > 0:\n print(s, '<--', new2old_CID[s])\n print(df_hits)\n print('\\n##########\\n\\n')\n ",
"DAR-DIA-23aa0b97-20 <-- DAR-DIA-23a-20\n dataset fah identity receptor \\\n139 MS0326_v3 PROJ14824/RUN1748 DAR-DIA-23a-20 Mpro-x1249-protein.pdb \n\n score febkT error ns_RL ns_L wl_RL \\\n139 -5.55803 -7.556216 2.262881 [24] [440, 490, 400, 430, 450] [0.11806] \n\n L_error RL_error \n139 0.125419 3.469447e-17 \n\n##########\n\n\nDAR-DIA-23aa0b97-19 <-- DAR-DIA-23a-19\n dataset fah identity receptor \\\n201 MS0326_v3 PROJ14824/RUN1737 DAR-DIA-23a-19 Mpro-x0104-protein.pdb \n\n score febkT error ns_RL ns_L wl_RL \\\n201 -8.744043 -9.805734 2.365252 [68] [410, 460, 470, 450, 440] [0.11806] \n\n L_error RL_error \n201 0.198033 1.387779e-17 \n\n##########\n\n\n"
],
[
"# Let's look at our current ranking:\n\ndf_results",
"_____no_output_____"
],
[
"top10_indices = df_results.index[0:10]\nfor i in range(len(top10_indices)):\n index = top10_indices[i]\n oldID = df_results.loc[index].identity\n if oldID.count('ÁLV') > 0:\n oldID = oldID.replace('ÁLV','ALV')\n try:\n newID = old2new_CID[oldID]\n except:\n newID = ''\n print('rank:', i+1, 'oldID:', oldID, 'newID:', newID, df_results.loc[index].dataset, df_results.loc[index].fah)\n",
"rank: 1 oldID: NIM-UNI-36e-3 newID: NIM-UNI-36e12f95-3 MS0323_v3 PROJ14822/RUN127\nrank: 2 oldID: JON-UIO-066-14 newID: JON-UIO-066ce08b-14 MS0326_v3 PROJ14824/RUN2448\nrank: 3 oldID: CHR-SOS-709-10 newID: CHR-SOS-7098f804-10 MS0406-2_v3 PROJ14827/RUN360\nrank: 4 oldID: LIZ-THE-f11-1 newID: LIZ-THE-f118233e-1 MS0326_v2 PROJ14723/RUN404\nrank: 5 oldID: ALV-UNI-7ff-36 newID: MS0326_v2 PROJ14723/RUN2963\nrank: 6 oldID: TRY-UNI-714-16 newID: TRY-UNI-714a760b-16 MS0326_v3 PROJ14824/RUN189\nrank: 7 oldID: ALV-UNI-7ff-43 newID: MS0326_v3 PROJ14824/RUN19\nrank: 8 oldID: BEN-VAN-d8f-12 newID: BEN-VAN-d8fd1356-12 MS0326_v3 PROJ14823/RUN713\nrank: 9 oldID: ALE-HEI-f28-17 newID: ALE-HEI-f28a35b5-17 MS0326_v3 PROJ14823/RUN403\nrank: 10 oldID: CHR-SOS-709-6 newID: CHR-SOS-7098f804-6 MS0323_v3 PROJ14822/RUN454\n"
]
],
[
[
"## Top 10 profiles\n\n### \\# 1 NIM-UNI-36e-3 NIM-UNI-36e12f95-3\n\nhttps://covid.postera.ai/covid/submissions/36e12f95-0811-4857-8bc6-a4aee0788f1c/3\n<img src=\"https://covid.postera.ai/synthesize/CC(=O)c1ccc(Br)c2%5BnH%5Dc(=O)n(-c3cccnc3)c12\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0323/plots/MS0323_v3_1-500_p14822_127_19May2020.png\">\n\n### \\# 2 JON-UIO-066-14 JON-UIO-066ce08b-14 MS0326_v3 PROJ14824/RUN2448\n\nhttps://covid.postera.ai/covid/submissions/066ce08b-1104-439d-946f-d7c319de995c/14\n\n<img src=\"https://covid.postera.ai/synthesize/C%5BC@H%5D(NC(=O)C(F)F)c1cccc(F)c1\">\n\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_3000-5538_p14824_2448_19May2020.png\">\n\n\n### \\# 3 CHR-SOS-709-10 CHR-SOS-7098f804-10\n\n\nhttps://covid.postera.ai/covid/submissions/7098f804-b66c-4fb6-89f4-8e4e0c78a7cb/10\n<img src=\"https://covid.postera.ai/synthesize/O=C(Nc1cnccc1Cl)c1cc(Cl)ccc1O\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0406-2/plots/MS0406-2_v3_0-2999_p14827_360_19May2020.png\">\n\n\n### \\# 4 LIZ-THE-f11-1 newID: LIZ-THE-f118233e-1 MS0326_v2 PROJ14723/RUN404\n\nhttps://covid.postera.ai/covid/submissions/7023c732-4bbd-4499-a930-9b1b18b131ec/1\n\n<img src=\"https://covid.postera.ai/synthesize/CNc1ncc(C%23N)cc1Oc1ccccc1\">\n\n\n### \\# 5 ALV-UNI-7ff-36 newID: MS0326_v2 PROJ14723/RUN2963\n\nhttps://covid.postera.ai/covid/submissions/7ff1a6f9-745f-4b82-81e0-c1d353ea5dfe/36\n\n<img src=\"https://covid.postera.ai/synthesize/Cc1cc(-c2c(-c3ccc(F)cc3)nn3nc(C)ccc23)%5BnH%5Dn1\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v2_1-3000_p14723_2963_19May2020.png\">\n\n### \\# 6 TRY-UNI-714-16 newID: TRY-UNI-714a760b-16 MS0326_v3 PROJ14824/RUN189\n\nhttps://covid.postera.ai/covid/submissions/714a760b-0e02-4b09-8736-f27f854f8c22/16\n<img src=\"https://covid.postera.ai/synthesize/Cc1ccncc1NC(=O)C(C)C1CCCCC1\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_3000-5538_p14824_189_19May2020.png\">\n\n### \\#7 ALV-UNI-7ff-43 newID: MS0326_v3 PROJ14824/RUN19\n\nhttps://covid.postera.ai/covid/submissions/7ff1a6f9-745f-4b82-81e0-c1d353ea5dfe/43\n<img src=\"https://covid.postera.ai/synthesize/Cc1cn2c(-c3cccnc3)c(-c3ccc(F)cc3)nc2s1\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_300\">\n\n### \\#8 BEN-VAN-d8f-12 BEN-VAN-d8fd1356-12 MS0326_v3 PROJ14823/RUN713\n\nhttps://covid.postera.ai/covid/submissions/d8fd1356-48a3-47db-b12f-ee2f1a630081/12\n<img src=\"https://covid.postera.ai/synthesize/CNc1c%5BnH%5Dc2c(Oc3cc(C)c(Br)cn3)c(Cl)c(F)cc12\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_1-3000_p14823_713_19May2020.png\">\n\n### \\#9 ALE-HEI-f28-17 ALE-HEI-f28a35b5-17 MS0326_v3 PROJ14823/RUN403\n\nhttps://covid.postera.ai/covid/submissions/f28a35b5-9f3e-4135-a6b4-7ce39ba4980a/17\n<img src=\"https://covid.postera.ai/synthesize/Cc1ccncc1NC(=O)N1CCN(C)CC1\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0326/plots/MS0326_v3_1-3000_p14823_403_19May2020.png\">\n\n### \\#10 CHR-SOS-709-6 CHR-SOS-7098f804-6 MS0323_v3 PROJ14822/RUN454\n\nhttps://covid.postera.ai/covid/submissions/7098f804-b66c-4fb6-89f4-8e4e0c78a7cb/6\n<img src=\"https://covid.postera.ai/synthesize/O=C(Nc1ccc(%5BN+%5D(=O)%5BO-%5D)cc1)c1ccccc1\">\n<img src=\"http://yabmtm.hopto.org:31415/MS0323/plots/MS0323_v3_1-500_p14822_454_19May2020.png\">\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0edc7dc5a44d94393173409d8059efb1d725f4b | 35,961 | ipynb | Jupyter Notebook | dataprep/model_one_policy_variables.ipynb | braadbaart/macroeconomics | 4ab019b2bbb78c337a1ab940c3412293097a2a1e | [
"MIT"
] | 1 | 2021-09-07T22:42:38.000Z | 2021-09-07T22:42:38.000Z | dataprep/model_one_policy_variables.ipynb | braadbaart/macroeconomics | 4ab019b2bbb78c337a1ab940c3412293097a2a1e | [
"MIT"
] | null | null | null | dataprep/model_one_policy_variables.ipynb | braadbaart/macroeconomics | 4ab019b2bbb78c337a1ab940c3412293097a2a1e | [
"MIT"
] | null | null | null | 35.78209 | 289 | 0.4105 | [
[
[
"## Model one policy variables\n\nThis notebook extracts the selected policy variables in the `indicator_list` from IMF and World Bank (wb) data sources, and writes them to a csv file.",
"_____no_output_____"
]
],
[
[
"import warnings\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"warnings.filterwarnings('ignore')\npd.options.display.float_format = '{:20,.2f}'.format",
"_____no_output_____"
]
],
[
[
"| variable | origin | source |granularity|countries| description | composition |\n| --------------------------|-------------------|-------------|-----------|---------|-------------------------------------------------------------|-------------------------------------------------------------------|\n| total debt service | - | wb econ | yearly | 217 | Total debt service (% of GNI) | - |\n| interest payments | - | wb econ | yearly | 217 | Interest payments on external debt (% of GNI) | - |\n| lending interest rate | - | wb econ | yearly | 217 | Lending interest rate (%) | - |\n| firms using banks | - | wb econ | yearly | 217 | Firms using banks to finance investment (% of firms) | - |\n| bank capital ratio | - | wb econ | yearly | 217 | Bank capital to assets ratio (%) | - |\n| tax revenue gdp share | - | wb econ | yearly | 217 | Tax revenue (% of GDP) | - |\n| short term debt | - | wb econ | yearly | 217 | Short-term debt (% of total external debt) | - |\n| inflation | - | wb econ | yearly | 217 | Inflation, GDP deflator (annual %) | - |\n| GDP growth | - | wb econ | yearly | 217 | GDP growth (annual %) | - |\n| real interest rate | - | wb econ | yearly | 217 | Real interest rate (%) | - |\n| firm market cap | - | wb econ | yearly | 217 | Market capitalization of listed domestic companies (% of GDP) | - |\n| GDP per capita growth | - | wb econ | yearly | 217 | GDP per capita growth (annual %) | - |\n| GDP | - | wb econ | yearly | 217 | GDP (constant 2010 USD) | - |\n| GNI growth | - | wb econ | yearly | 217 | GNI growth (annual %) | - |\n| interest payments | - | wb econ | yearly | 217 | Interest payments (% of expense) | - |\n| nonperforming bank loans | - | wb econ | yearly | 217 | Bank nonperforming loans to total gross loans (%) | - |\n| savings | - | wb econ | yearly | 217 | Gross domestic savings (% of GDP) | - |\n| gross savings | - | wb econ | yearly | 217 | Gross savings (% of GNI) | - |\n| GNI per capita growth | - | wb econ | yearly | 217 | GNI per capita growth (annual %) | - |\n| employee compensation | - | wb econ | yearly | 217 | Compensation of employees (% of expense) | - |\n| reserves | - | wb econ | yearly | 217 | Total reserves (% of total external debt) | - |\n| broad money | - | wb econ | yearly | 217 | Broad money (% of GDP) | - |\n| GNI | - | wb econ | yearly | 217 | GNI (constant 2010 USD) | - |\n| government debt | - | wb econ | yearly | 217 | Central government debt, total (% of GDP) | - |",
"_____no_output_____"
]
],
[
[
"indicator_list = ['Total debt service (% of GNI)', 'Interest payments on external debt (% of GNI)',\n 'Lending interest rate (%)', 'Firms using banks to finance investment (% of firms)',\n 'Bank capital to assets ratio (%)', 'Tax revenue (% of GDP)', 'Short-term debt (% of total external debt)',\n 'Inflation, GDP deflator (annual %)', 'GDP growth (annual %)', 'Real interest rate (%)',\n 'Market capitalization of listed domestic companies (% of GDP)', 'GDP per capita growth (annual %)',\n 'GDP (constant 2010 US$)', 'GNI growth (annual %)', 'Interest payments (% of expense)',\n 'Bank nonperforming loans to total gross loans (%)', 'Gross domestic savings (% of GDP)',\n 'Gross savings (% of GNI)', 'GNI per capita growth (annual %)', 'Compensation of employees (% of expense)',\n 'Total reserves (% of total external debt)', 'Broad money (% of GDP)', 'GNI (constant 2010 US$)',\n 'Central government debt, total (% of GDP)']",
"_____no_output_____"
],
[
"len(indicator_list)",
"_____no_output_____"
]
],
[
[
"## Load imf monthly data",
"_____no_output_____"
]
],
[
[
"%%bash\nwc -l imf/*.csv",
" 365536 data/imf/BOP_11-25-2018 19-15-19-60_timeSeries.csv\n 64 data/imf/COMMP_11-25-2018 19-13-52-15_timeSeries.csv\n 14430 data/imf/CPI_11-25-2018 19-14-47-26_timeSeries.csv\n 1693 data/imf/FDI_11-20-2018 21-39-31-89_timeSeries.csv\n 1247714 data/imf/GFSR_11-25-2018 19-23-39-70_timeSeries.csv\n 16732 data/imf/IRFCL_11-25-2018 19-13-18-05_timeSeries.csv\n 7846 data/imf/ITS_11-14-2018 15-14-06-02_timeSeries.csv\n 7425 data/imf/PPLT_11-25-2018 19-25-01-32_timeSeries.csv\n 1661440 total\n"
],
[
"time_values = [str('%sM%s' % (y, m)) for m in list(range(1, 13)) for y in list(range(1960, 2018))]\nimf_columns = ['Country Name', 'Indicator Name'] + time_values",
"_____no_output_____"
],
[
"imf_country_aggregates = ['Euro Area']",
"_____no_output_____"
],
[
"def load_imf_monthly(file_name, indicators, imf_columns, country_aggregates):\n csv_df = pd.read_csv('data/imf/%s' % file_name).fillna(0)\n base_df = csv_df.loc[csv_df['Attribute'] == 'Value'].drop(columns=['Attribute'])\n monthly_df = base_df.loc[(base_df['Indicator Name'].isin(indicators))]\n imf_df = monthly_df[imf_columns].fillna(0)\n df = pd.melt(imf_df, id_vars=['Country Name', 'Indicator Name'], var_name='date', value_name='value')\n df['date'] = pd.to_datetime(df['date'], format='%YM%m')\n df.columns = ['country', 'indicator', 'date', 'value']\n return df.loc[~df['country'].isin(country_aggregates)]",
"_____no_output_____"
],
[
"imf_pplt_df = load_imf_monthly('PPLT_11-25-2018 19-25-01-32_timeSeries.csv', indicator_list, imf_columns, imf_country_aggregates)",
"_____no_output_____"
],
[
"imf_cpi_df = load_imf_monthly('CPI_11-25-2018 19-14-47-26_timeSeries.csv', indicator_list, imf_columns, imf_country_aggregates)",
"_____no_output_____"
],
[
"imf_df = pd.concat([imf_cpi_df, imf_pplt_df], join='outer')",
"_____no_output_____"
],
[
"imf_df.size",
"_____no_output_____"
],
[
"imf_df.head(15)",
"_____no_output_____"
],
[
"len(imf_df['country'].unique())",
"_____no_output_____"
],
[
"imf_countries = sorted(list(imf_df['country'].unique()))",
"_____no_output_____"
]
],
[
[
"### Load world bank yearly data",
"_____no_output_____"
]
],
[
[
"%%bash\nwc -l world_bank/*.csv",
" 33534 data/world_bank/ECON.csv\n 9589 data/world_bank/HNP.csv\n 38 data/world_bank/HNP_indicator_definitions.csv\n 36174 data/world_bank/POP.csv\n 79335 total\n"
],
[
"wb_country_aggregates = ['nan', 'Lower middle income', 'Post-demographic dividend', 'High income',\n 'Pre-demographic dividend', 'East Asia & Pacific (IDA & IBRD countries)',\n 'Europe & Central Asia (excluding high income)', 'Heavily indebted poor countries (HIPC)',\n 'Caribbean small states', 'Pacific island small states', 'Middle income',\n 'Late-demographic dividend', 'OECD members', 'IDA & IBRD total', 'Not classified', \n 'East Asia & Pacific (excluding high income)',\n 'Latin America & the Caribbean (IDA & IBRD countries)', 'Low income', 'Low & middle income',\n 'IDA blend', 'IBRD only', 'Sub-Saharan Africa (excluding high income)', \n 'Fragile and conflict affected situations', 'Europe & Central Asia (IDA & IBRD countries)',\n 'Euro area', 'Other small states', 'Europe & Central Asia', 'Arab World',\n 'Latin America & Caribbean (excluding high income)', \n 'Sub-Saharan Africa (IDA & IBRD countries)', 'Early-demographic dividend', 'IDA only',\n 'Small states', 'Middle East & North Africa (excluding high income)', 'East Asia & Pacific',\n 'South Asia', 'European Union', 'Least developed countries: UN classification',\n 'Middle East & North Africa (IDA & IBRD countries)', 'Upper middle income',\n 'South Asia (IDA & IBRD)', 'Central Europe and the Baltics', 'Sub-Saharan Africa', \n 'Latin America & Caribbean', 'Middle East & North Africa', 'IDA total', 'North America',\n 'Last Updated: 11/14/2018', 'Data from database: World Development Indicators', 'World']",
"_____no_output_____"
],
[
"wb_cols = ['Country Name', 'Series Name'] + [str('%s [YR%s]' % (y, y)) for y in list(range(1960, 2018))]",
"_____no_output_____"
],
[
"def load_wb_yearly(file_name, indicators, wb_columns, country_aggregates):\n csv_df = pd.read_csv('world_bank/%s' % file_name).fillna(0)\n base_df = csv_df.loc[(csv_df['Series Name'].isin(indicators))]\n wb_df = base_df[wb_columns].fillna(0)\n df = pd.melt(wb_df, id_vars=['Country Name', 'Series Name'], var_name='date', value_name='value')\n df['date'] = pd.to_datetime(df['date'].map(lambda x: int(x.split(' ')[0])), format='%Y')\n df.columns = ['country', 'indicator', 'date', 'value']\n return df.loc[~df['country'].isin(country_aggregates)]",
"_____no_output_____"
],
[
"wb_econ_df = load_wb_yearly('ECON.csv', indicator_list, wb_cols, wb_country_aggregates)",
"_____no_output_____"
],
[
"wb_hnp_df = load_wb_yearly('HNP.csv', indicator_list, wb_cols, wb_country_aggregates)",
"_____no_output_____"
],
[
"wb_pop_df = load_wb_yearly('POP.csv', indicator_list, wb_cols, wb_country_aggregates)",
"_____no_output_____"
],
[
"wb_df = pd.concat([wb_econ_df, wb_hnp_df, wb_pop_df], join='outer')",
"_____no_output_____"
],
[
"wb_df.size",
"_____no_output_____"
],
[
"wb_df.head(15)",
"_____no_output_____"
],
[
"len(wb_df['country'].unique())",
"_____no_output_____"
],
[
"wb_countries = sorted(list(wb_df['country'].unique()))",
"_____no_output_____"
]
],
[
[
"### Combine the two datasets",
"_____no_output_____"
]
],
[
[
"imf_specific = [country for country in imf_countries if country not in wb_countries]",
"_____no_output_____"
],
[
"len(imf_specific)",
"_____no_output_____"
],
[
"imf_to_wb_country_map = {\n 'Afghanistan, Islamic Republic of': 'Afghanistan',\n 'Armenia, Republic of': 'Armenia',\n 'Azerbaijan, Republic of': 'Azerbaijan',\n 'Bahrain, Kingdom of': 'Bahrain',\n 'China, P.R.: Hong Kong': 'Hong Kong SAR, China',\n 'China, P.R.: Macao': 'Macao SAR, China',\n 'China, P.R.: Mainland': 'China',\n 'Congo, Democratic Republic of': 'Congo, Dem. Rep.',\n 'Congo, Republic of': 'Congo, Rep.',\n 'Egypt': 'Egypt, Arab Rep.',\n 'French Territories: New Caledonia': 'New Caledonia',\n 'Iran, Islamic Republic of': 'Iran',\n 'Korea, Republic of': 'Korea, Rep.',\n 'Kosovo, Republic of': 'Kosovo',\n \"Lao People's Democratic Republic\": 'Lao PDR',\n 'Serbia, Republic of': 'Serbia',\n 'Sint Maarten': 'Sint Maarten (Dutch part)',\n 'Timor-Leste, Dem. Rep. of': 'Timor-Leste',\n 'Venezuela, Republica Bolivariana de': 'Venezuela, RB',\n 'Venezuela, República Bolivariana de': 'Venezuela, RB',\n 'Yemen, Republic of': 'Yemen'\n}",
"_____no_output_____"
],
[
"imf_df = imf_df.replace({'country': imf_to_wb_country_map})",
"_____no_output_____"
],
[
"policy_df = pd.concat([wb_df, imf_df], join='outer')",
"_____no_output_____"
],
[
"policy_df.size",
"_____no_output_____"
],
[
"policy_df.head(15)",
"_____no_output_____"
],
[
"indicators = sorted(list(policy_df['indicator'].unique()))",
"_____no_output_____"
],
[
"assert len(indicators) == len(indicator_list), 'The number of retrieved variables (%s) does not match the number of specified variables (%s).\\nThe following variables are missing:\\n\\n %s' % (len(indicators), len(indicator_list), [i for i in indicator_list if i not in indicators])",
"_____no_output_____"
],
[
"policy_df.to_csv('model_one/policy.csv', sep=';', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0edd303d0c8449a0e003f0436ef35ebea51ec2b | 6,657 | ipynb | Jupyter Notebook | examples/notebook/contrib/blending.ipynb | MaximilianAzendorf/wasm-or-tools | f16c3efc13ad5d41c7a65338434ea88ed908c398 | [
"Apache-2.0"
] | null | null | null | examples/notebook/contrib/blending.ipynb | MaximilianAzendorf/wasm-or-tools | f16c3efc13ad5d41c7a65338434ea88ed908c398 | [
"Apache-2.0"
] | null | null | null | examples/notebook/contrib/blending.ipynb | MaximilianAzendorf/wasm-or-tools | f16c3efc13ad5d41c7a65338434ea88ed908c398 | [
"Apache-2.0"
] | null | null | null | 31.107477 | 246 | 0.528616 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0edd81ebd277080527e53da3f5a2dcb13c60b88 | 33,967 | ipynb | Jupyter Notebook | notebooks/S3/modulec_testing.ipynb | luissian/opentrons_covid19 | d5b0332954ea0446f311d19eb905c39b665ed72d | [
"MIT"
] | null | null | null | notebooks/S3/modulec_testing.ipynb | luissian/opentrons_covid19 | d5b0332954ea0446f311d19eb905c39b665ed72d | [
"MIT"
] | null | null | null | notebooks/S3/modulec_testing.ipynb | luissian/opentrons_covid19 | d5b0332954ea0446f311d19eb905c39b665ed72d | [
"MIT"
] | null | null | null | 59.695958 | 2,182 | 0.648688 | [
[
[
"from opentrons import simulate\nctx = simulate.get_protocol_api('2.1')\n\nNUM_SAMPLES = 48\nVOLUME_MMIX = 20\nELUTION_LABWARE = '2ml tubes'\nPREPARE_MASTERMIX = True\nMM_TYPE = 'MM1'\n\nEL_LW_DICT = {\n 'large strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',\n 'short strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',\n '2ml tubes': 'opentrons_24_tuberack_generic_2ml_screwcap',\n '1.5ml tubes': 'opentrons_24_tuberack_nest_1.5ml_screwcap'\n}\n",
"C:\\Users\\Adm\\.opentrons\\deck_calibration.json not found. Loading defaults\nC:\\Users\\Adm\\.opentrons\\robot_settings.json not found. Loading defaults\n"
],
[
" source_racks = [\n ctx.load_labware(EL_LW_DICT[ELUTION_LABWARE], slot,\n 'RNA elution labware ' + str(i+1))\n for i, slot in enumerate(['4', '5', '1', '2'])\n ]\n\n tips20 = [\n ctx.load_labware('opentrons_96_filtertiprack_20ul', slot)\n for slot in ['6', '9', '8', '7']\n ]\n tips300 = [ctx.load_labware('opentrons_96_filtertiprack_200ul', '3')]\n tempdeck = ctx.load_module('tempdeck', '10')\n pcr_plate = tempdeck.load_labware(\n 'biorad_96_wellplate_200ul_pcr', 'PCR plate')\n tempdeck.set_temperature(4)\n mm_rack = ctx.load_labware(\n 'opentrons_24_tuberack_generic_2ml_screwcap', '11',\n '2ml screw tube aluminum block for mastermix')\n\n # pipette\n p20 = ctx.load_instrument('p20_single_gen2', 'right', tip_racks=tips20)\n p300 = ctx.load_instrument('p300_single_gen2', 'left', tip_racks=tips300)\n",
"_____no_output_____"
],
[
"print(source_racks)\nprint(tips20)\nprint(tips300)\nprint(tempdeck)\nprint(pcr_plate)\nprint(mm_rack)",
"[RNA elution labware 1 on 4, RNA elution labware 2 on 5, RNA elution labware 3 on 1, RNA elution labware 4 on 2]\n[Opentrons 96 Filter Tip Rack 20 µL on 6, Opentrons 96 Filter Tip Rack 20 µL on 9, Opentrons 96 Filter Tip Rack 20 µL on 8, Opentrons 96 Filter Tip Rack 20 µL on 7]\n[Opentrons 96 Filter Tip Rack 200 µL on 3]\nTemperatureModuleContext at Temperature Module on 10 lw PCR plate on Temperature Module on 10\nPCR plate on Temperature Module on 10\n2ml screw tube aluminum block for mastermix on 11\n"
],
[
"\n# Know which class cames the object from.\nmm_rack.__class__\n# Know which methods are available for the object.\ndir(mm_rack)",
"_____no_output_____"
],
[
"# Example, access wells in rack object.\nmm_rack.wells()",
"_____no_output_____"
],
[
"sources = [\n tube\n for rack in source_racks for tube in rack.wells()][:NUM_SAMPLES]\nprint(sources)",
"[A1 of RNA elution labware 1 on 4, B1 of RNA elution labware 1 on 4, C1 of RNA elution labware 1 on 4, D1 of RNA elution labware 1 on 4, A2 of RNA elution labware 1 on 4, B2 of RNA elution labware 1 on 4, C2 of RNA elution labware 1 on 4, D2 of RNA elution labware 1 on 4, A3 of RNA elution labware 1 on 4, B3 of RNA elution labware 1 on 4, C3 of RNA elution labware 1 on 4, D3 of RNA elution labware 1 on 4, A4 of RNA elution labware 1 on 4, B4 of RNA elution labware 1 on 4, C4 of RNA elution labware 1 on 4, D4 of RNA elution labware 1 on 4, A5 of RNA elution labware 1 on 4, B5 of RNA elution labware 1 on 4, C5 of RNA elution labware 1 on 4, D5 of RNA elution labware 1 on 4, A6 of RNA elution labware 1 on 4, B6 of RNA elution labware 1 on 4, C6 of RNA elution labware 1 on 4, D6 of RNA elution labware 1 on 4, A1 of RNA elution labware 2 on 5, B1 of RNA elution labware 2 on 5, C1 of RNA elution labware 2 on 5, D1 of RNA elution labware 2 on 5, A2 of RNA elution labware 2 on 5, B2 of RNA elution labware 2 on 5, C2 of RNA elution labware 2 on 5, D2 of RNA elution labware 2 on 5, A3 of RNA elution labware 2 on 5, B3 of RNA elution labware 2 on 5, C3 of RNA elution labware 2 on 5, D3 of RNA elution labware 2 on 5, A4 of RNA elution labware 2 on 5, B4 of RNA elution labware 2 on 5, C4 of RNA elution labware 2 on 5, D4 of RNA elution labware 2 on 5, A5 of RNA elution labware 2 on 5, B5 of RNA elution labware 2 on 5, C5 of RNA elution labware 2 on 5, D5 of RNA elution labware 2 on 5, A6 of RNA elution labware 2 on 5, B6 of RNA elution labware 2 on 5, C6 of RNA elution labware 2 on 5, D6 of RNA elution labware 2 on 5]\n"
],
[
"sources=list()\nfor rack in source_racks:\n for tube in rack.wells():\n sources.append(tube)\nprint(sources[:NUM_SAMPLES])",
"[A1 of RNA elution labware 1 on 4, B1 of RNA elution labware 1 on 4, C1 of RNA elution labware 1 on 4, D1 of RNA elution labware 1 on 4, A2 of RNA elution labware 1 on 4, B2 of RNA elution labware 1 on 4, C2 of RNA elution labware 1 on 4, D2 of RNA elution labware 1 on 4, A3 of RNA elution labware 1 on 4, B3 of RNA elution labware 1 on 4, C3 of RNA elution labware 1 on 4, D3 of RNA elution labware 1 on 4, A4 of RNA elution labware 1 on 4, B4 of RNA elution labware 1 on 4, C4 of RNA elution labware 1 on 4, D4 of RNA elution labware 1 on 4, A5 of RNA elution labware 1 on 4, B5 of RNA elution labware 1 on 4, C5 of RNA elution labware 1 on 4, D5 of RNA elution labware 1 on 4, A6 of RNA elution labware 1 on 4, B6 of RNA elution labware 1 on 4, C6 of RNA elution labware 1 on 4, D6 of RNA elution labware 1 on 4, A1 of RNA elution labware 2 on 5, B1 of RNA elution labware 2 on 5, C1 of RNA elution labware 2 on 5, D1 of RNA elution labware 2 on 5, A2 of RNA elution labware 2 on 5, B2 of RNA elution labware 2 on 5, C2 of RNA elution labware 2 on 5, D2 of RNA elution labware 2 on 5, A3 of RNA elution labware 2 on 5, B3 of RNA elution labware 2 on 5, C3 of RNA elution labware 2 on 5, D3 of RNA elution labware 2 on 5, A4 of RNA elution labware 2 on 5, B4 of RNA elution labware 2 on 5, C4 of RNA elution labware 2 on 5, D4 of RNA elution labware 2 on 5, A5 of RNA elution labware 2 on 5, B5 of RNA elution labware 2 on 5, C5 of RNA elution labware 2 on 5, D5 of RNA elution labware 2 on 5, A6 of RNA elution labware 2 on 5, B6 of RNA elution labware 2 on 5, C6 of RNA elution labware 2 on 5, D6 of RNA elution labware 2 on 5]\n"
],
[
"dests = [\n well\n for h_block in range(2)\n for v_block in range(2)\n for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]\n for well in col[4*h_block:4*(h_block+1)]][:NUM_SAMPLES]\nprint(dests)",
"[A1 of PCR plate on Temperature Module on 10, B1 of PCR plate on Temperature Module on 10, C1 of PCR plate on Temperature Module on 10, D1 of PCR plate on Temperature Module on 10, A2 of PCR plate on Temperature Module on 10, B2 of PCR plate on Temperature Module on 10, C2 of PCR plate on Temperature Module on 10, D2 of PCR plate on Temperature Module on 10, A3 of PCR plate on Temperature Module on 10, B3 of PCR plate on Temperature Module on 10, C3 of PCR plate on Temperature Module on 10, D3 of PCR plate on Temperature Module on 10, A4 of PCR plate on Temperature Module on 10, B4 of PCR plate on Temperature Module on 10, C4 of PCR plate on Temperature Module on 10, D4 of PCR plate on Temperature Module on 10, A5 of PCR plate on Temperature Module on 10, B5 of PCR plate on Temperature Module on 10, C5 of PCR plate on Temperature Module on 10, D5 of PCR plate on Temperature Module on 10, A6 of PCR plate on Temperature Module on 10, B6 of PCR plate on Temperature Module on 10, C6 of PCR plate on Temperature Module on 10, D6 of PCR plate on Temperature Module on 10, A7 of PCR plate on Temperature Module on 10, B7 of PCR plate on Temperature Module on 10, C7 of PCR plate on Temperature Module on 10, D7 of PCR plate on Temperature Module on 10, A8 of PCR plate on Temperature Module on 10, B8 of PCR plate on Temperature Module on 10, C8 of PCR plate on Temperature Module on 10, D8 of PCR plate on Temperature Module on 10, A9 of PCR plate on Temperature Module on 10, B9 of PCR plate on Temperature Module on 10, C9 of PCR plate on Temperature Module on 10, D9 of PCR plate on Temperature Module on 10, A10 of PCR plate on Temperature Module on 10, B10 of PCR plate on Temperature Module on 10, C10 of PCR plate on Temperature Module on 10, D10 of PCR plate on Temperature Module on 10, A11 of PCR plate on Temperature Module on 10, B11 of PCR plate on Temperature Module on 10, C11 of PCR plate on Temperature Module on 10, D11 of PCR plate on Temperature Module on 10, A12 of PCR plate on Temperature Module on 10, B12 of PCR plate on Temperature Module on 10, C12 of PCR plate on Temperature Module on 10, D12 of PCR plate on Temperature Module on 10]\n"
],
[
"dests = list()\nfor h_block in range(2):\n print(\"hblock = \" + str(h_block))\n for v_block in range(2):\n print(\"vblock = \" + str(v_block))\n for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]:\n print(\"col = \" + str(col))\n for well in col[4*h_block:4*(h_block+1)]:\n print(well)\n dests.append(well)\ndests = dests[:NUM_SAMPLES] ",
"hblock = 0\nvblock = 0\ncol = [A1 of PCR plate on Temperature Module on 10, B1 of PCR plate on Temperature Module on 10, C1 of PCR plate on Temperature Module on 10, D1 of PCR plate on Temperature Module on 10, E1 of PCR plate on Temperature Module on 10, F1 of PCR plate on Temperature Module on 10, G1 of PCR plate on Temperature Module on 10, H1 of PCR plate on Temperature Module on 10]\nA1 of PCR plate on Temperature Module on 10\nB1 of PCR plate on Temperature Module on 10\nC1 of PCR plate on Temperature Module on 10\nD1 of PCR plate on Temperature Module on 10\ncol = [A2 of PCR plate on Temperature Module on 10, B2 of PCR plate on Temperature Module on 10, C2 of PCR plate on Temperature Module on 10, D2 of PCR plate on Temperature Module on 10, E2 of PCR plate on Temperature Module on 10, F2 of PCR plate on Temperature Module on 10, G2 of PCR plate on Temperature Module on 10, H2 of PCR plate on Temperature Module on 10]\nA2 of PCR plate on Temperature Module on 10\nB2 of PCR plate on Temperature Module on 10\nC2 of PCR plate on Temperature Module on 10\nD2 of PCR plate on Temperature Module on 10\ncol = [A3 of PCR plate on Temperature Module on 10, B3 of PCR plate on Temperature Module on 10, C3 of PCR plate on Temperature Module on 10, D3 of PCR plate on Temperature Module on 10, E3 of PCR plate on Temperature Module on 10, F3 of PCR plate on Temperature Module on 10, G3 of PCR plate on Temperature Module on 10, H3 of PCR plate on Temperature Module on 10]\nA3 of PCR plate on Temperature Module on 10\nB3 of PCR plate on Temperature Module on 10\nC3 of PCR plate on Temperature Module on 10\nD3 of PCR plate on Temperature Module on 10\ncol = [A4 of PCR plate on Temperature Module on 10, B4 of PCR plate on Temperature Module on 10, C4 of PCR plate on Temperature Module on 10, D4 of PCR plate on Temperature Module on 10, E4 of PCR plate on Temperature Module on 10, F4 of PCR plate on Temperature Module on 10, G4 of PCR plate on Temperature Module on 10, H4 of PCR plate on Temperature Module on 10]\nA4 of PCR plate on Temperature Module on 10\nB4 of PCR plate on Temperature Module on 10\nC4 of PCR plate on Temperature Module on 10\nD4 of PCR plate on Temperature Module on 10\ncol = [A5 of PCR plate on Temperature Module on 10, B5 of PCR plate on Temperature Module on 10, C5 of PCR plate on Temperature Module on 10, D5 of PCR plate on Temperature Module on 10, E5 of PCR plate on Temperature Module on 10, F5 of PCR plate on Temperature Module on 10, G5 of PCR plate on Temperature Module on 10, H5 of PCR plate on Temperature Module on 10]\nA5 of PCR plate on Temperature Module on 10\nB5 of PCR plate on Temperature Module on 10\nC5 of PCR plate on Temperature Module on 10\nD5 of PCR plate on Temperature Module on 10\ncol = [A6 of PCR plate on Temperature Module on 10, B6 of PCR plate on Temperature Module on 10, C6 of PCR plate on Temperature Module on 10, D6 of PCR plate on Temperature Module on 10, E6 of PCR plate on Temperature Module on 10, F6 of PCR plate on Temperature Module on 10, G6 of PCR plate on Temperature Module on 10, H6 of PCR plate on Temperature Module on 10]\nA6 of PCR plate on Temperature Module on 10\nB6 of PCR plate on Temperature Module on 10\nC6 of PCR plate on Temperature Module on 10\nD6 of PCR plate on Temperature Module on 10\nvblock = 1\ncol = [A7 of PCR plate on Temperature Module on 10, B7 of PCR plate on Temperature Module on 10, C7 of PCR plate on Temperature Module on 10, D7 of PCR plate on Temperature Module on 10, E7 of PCR plate on Temperature Module on 10, F7 of PCR plate on Temperature Module on 10, G7 of PCR plate on Temperature Module on 10, H7 of PCR plate on Temperature Module on 10]\nA7 of PCR plate on Temperature Module on 10\nB7 of PCR plate on Temperature Module on 10\nC7 of PCR plate on Temperature Module on 10\nD7 of PCR plate on Temperature Module on 10\ncol = [A8 of PCR plate on Temperature Module on 10, B8 of PCR plate on Temperature Module on 10, C8 of PCR plate on Temperature Module on 10, D8 of PCR plate on Temperature Module on 10, E8 of PCR plate on Temperature Module on 10, F8 of PCR plate on Temperature Module on 10, G8 of PCR plate on Temperature Module on 10, H8 of PCR plate on Temperature Module on 10]\nA8 of PCR plate on Temperature Module on 10\nB8 of PCR plate on Temperature Module on 10\nC8 of PCR plate on Temperature Module on 10\nD8 of PCR plate on Temperature Module on 10\ncol = [A9 of PCR plate on Temperature Module on 10, B9 of PCR plate on Temperature Module on 10, C9 of PCR plate on Temperature Module on 10, D9 of PCR plate on Temperature Module on 10, E9 of PCR plate on Temperature Module on 10, F9 of PCR plate on Temperature Module on 10, G9 of PCR plate on Temperature Module on 10, H9 of PCR plate on Temperature Module on 10]\nA9 of PCR plate on Temperature Module on 10\nB9 of PCR plate on Temperature Module on 10\nC9 of PCR plate on Temperature Module on 10\nD9 of PCR plate on Temperature Module on 10\ncol = [A10 of PCR plate on Temperature Module on 10, B10 of PCR plate on Temperature Module on 10, C10 of PCR plate on Temperature Module on 10, D10 of PCR plate on Temperature Module on 10, E10 of PCR plate on Temperature Module on 10, F10 of PCR plate on Temperature Module on 10, G10 of PCR plate on Temperature Module on 10, H10 of PCR plate on Temperature Module on 10]\nA10 of PCR plate on Temperature Module on 10\nB10 of PCR plate on Temperature Module on 10\nC10 of PCR plate on Temperature Module on 10\nD10 of PCR plate on Temperature Module on 10\ncol = [A11 of PCR plate on Temperature Module on 10, B11 of PCR plate on Temperature Module on 10, C11 of PCR plate on Temperature Module on 10, D11 of PCR plate on Temperature Module on 10, E11 of PCR plate on Temperature Module on 10, F11 of PCR plate on Temperature Module on 10, G11 of PCR plate on Temperature Module on 10, H11 of PCR plate on Temperature Module on 10]\nA11 of PCR plate on Temperature Module on 10\nB11 of PCR plate on Temperature Module on 10\nC11 of PCR plate on Temperature Module on 10\nD11 of PCR plate on Temperature Module on 10\ncol = [A12 of PCR plate on Temperature Module on 10, B12 of PCR plate on Temperature Module on 10, C12 of PCR plate on Temperature Module on 10, D12 of PCR plate on Temperature Module on 10, E12 of PCR plate on Temperature Module on 10, F12 of PCR plate on Temperature Module on 10, G12 of PCR plate on Temperature Module on 10, H12 of PCR plate on Temperature Module on 10]\nA12 of PCR plate on Temperature Module on 10\nB12 of PCR plate on Temperature Module on 10\nC12 of PCR plate on Temperature Module on 10\nD12 of PCR plate on Temperature Module on 10\nhblock = 1\nvblock = 0\ncol = [A1 of PCR plate on Temperature Module on 10, B1 of PCR plate on Temperature Module on 10, C1 of PCR plate on Temperature Module on 10, D1 of PCR plate on Temperature Module on 10, E1 of PCR plate on Temperature Module on 10, F1 of PCR plate on Temperature Module on 10, G1 of PCR plate on Temperature Module on 10, H1 of PCR plate on Temperature Module on 10]\nE1 of PCR plate on Temperature Module on 10\nF1 of PCR plate on Temperature Module on 10\nG1 of PCR plate on Temperature Module on 10\nH1 of PCR plate on Temperature Module on 10\ncol = [A2 of PCR plate on Temperature Module on 10, B2 of PCR plate on Temperature Module on 10, C2 of PCR plate on Temperature Module on 10, D2 of PCR plate on Temperature Module on 10, E2 of PCR plate on Temperature Module on 10, F2 of PCR plate on Temperature Module on 10, G2 of PCR plate on Temperature Module on 10, H2 of PCR plate on Temperature Module on 10]\nE2 of PCR plate on Temperature Module on 10\nF2 of PCR plate on Temperature Module on 10\nG2 of PCR plate on Temperature Module on 10\nH2 of PCR plate on Temperature Module on 10\ncol = [A3 of PCR plate on Temperature Module on 10, B3 of PCR plate on Temperature Module on 10, C3 of PCR plate on Temperature Module on 10, D3 of PCR plate on Temperature Module on 10, E3 of PCR plate on Temperature Module on 10, F3 of PCR plate on Temperature Module on 10, G3 of PCR plate on Temperature Module on 10, H3 of PCR plate on Temperature Module on 10]\nE3 of PCR plate on Temperature Module on 10\nF3 of PCR plate on Temperature Module on 10\nG3 of PCR plate on Temperature Module on 10\nH3 of PCR plate on Temperature Module on 10\ncol = [A4 of PCR plate on Temperature Module on 10, B4 of PCR plate on Temperature Module on 10, C4 of PCR plate on Temperature Module on 10, D4 of PCR plate on Temperature Module on 10, E4 of PCR plate on Temperature Module on 10, F4 of PCR plate on Temperature Module on 10, G4 of PCR plate on Temperature Module on 10, H4 of PCR plate on Temperature Module on 10]\nE4 of PCR plate on Temperature Module on 10\nF4 of PCR plate on Temperature Module on 10\nG4 of PCR plate on Temperature Module on 10\nH4 of PCR plate on Temperature Module on 10\ncol = [A5 of PCR plate on Temperature Module on 10, B5 of PCR plate on Temperature Module on 10, C5 of PCR plate on Temperature Module on 10, D5 of PCR plate on Temperature Module on 10, E5 of PCR plate on Temperature Module on 10, F5 of PCR plate on Temperature Module on 10, G5 of PCR plate on Temperature Module on 10, H5 of PCR plate on Temperature Module on 10]\nE5 of PCR plate on Temperature Module on 10\nF5 of PCR plate on Temperature Module on 10\nG5 of PCR plate on Temperature Module on 10\nH5 of PCR plate on Temperature Module on 10\ncol = [A6 of PCR plate on Temperature Module on 10, B6 of PCR plate on Temperature Module on 10, C6 of PCR plate on Temperature Module on 10, D6 of PCR plate on Temperature Module on 10, E6 of PCR plate on Temperature Module on 10, F6 of PCR plate on Temperature Module on 10, G6 of PCR plate on Temperature Module on 10, H6 of PCR plate on Temperature Module on 10]\nE6 of PCR plate on Temperature Module on 10\nF6 of PCR plate on Temperature Module on 10\nG6 of PCR plate on Temperature Module on 10\nH6 of PCR plate on Temperature Module on 10\nvblock = 1\ncol = [A7 of PCR plate on Temperature Module on 10, B7 of PCR plate on Temperature Module on 10, C7 of PCR plate on Temperature Module on 10, D7 of PCR plate on Temperature Module on 10, E7 of PCR plate on Temperature Module on 10, F7 of PCR plate on Temperature Module on 10, G7 of PCR plate on Temperature Module on 10, H7 of PCR plate on Temperature Module on 10]\nE7 of PCR plate on Temperature Module on 10\nF7 of PCR plate on Temperature Module on 10\nG7 of PCR plate on Temperature Module on 10\nH7 of PCR plate on Temperature Module on 10\ncol = [A8 of PCR plate on Temperature Module on 10, B8 of PCR plate on Temperature Module on 10, C8 of PCR plate on Temperature Module on 10, D8 of PCR plate on Temperature Module on 10, E8 of PCR plate on Temperature Module on 10, F8 of PCR plate on Temperature Module on 10, G8 of PCR plate on Temperature Module on 10, H8 of PCR plate on Temperature Module on 10]\nE8 of PCR plate on Temperature Module on 10\nF8 of PCR plate on Temperature Module on 10\nG8 of PCR plate on Temperature Module on 10\nH8 of PCR plate on Temperature Module on 10\ncol = [A9 of PCR plate on Temperature Module on 10, B9 of PCR plate on Temperature Module on 10, C9 of PCR plate on Temperature Module on 10, D9 of PCR plate on Temperature Module on 10, E9 of PCR plate on Temperature Module on 10, F9 of PCR plate on Temperature Module on 10, G9 of PCR plate on Temperature Module on 10, H9 of PCR plate on Temperature Module on 10]\nE9 of PCR plate on Temperature Module on 10\nF9 of PCR plate on Temperature Module on 10\nG9 of PCR plate on Temperature Module on 10\nH9 of PCR plate on Temperature Module on 10\ncol = [A10 of PCR plate on Temperature Module on 10, B10 of PCR plate on Temperature Module on 10, C10 of PCR plate on Temperature Module on 10, D10 of PCR plate on Temperature Module on 10, E10 of PCR plate on Temperature Module on 10, F10 of PCR plate on Temperature Module on 10, G10 of PCR plate on Temperature Module on 10, H10 of PCR plate on Temperature Module on 10]\nE10 of PCR plate on Temperature Module on 10\nF10 of PCR plate on Temperature Module on 10\nG10 of PCR plate on Temperature Module on 10\nH10 of PCR plate on Temperature Module on 10\ncol = [A11 of PCR plate on Temperature Module on 10, B11 of PCR plate on Temperature Module on 10, C11 of PCR plate on Temperature Module on 10, D11 of PCR plate on Temperature Module on 10, E11 of PCR plate on Temperature Module on 10, F11 of PCR plate on Temperature Module on 10, G11 of PCR plate on Temperature Module on 10, H11 of PCR plate on Temperature Module on 10]\nE11 of PCR plate on Temperature Module on 10\nF11 of PCR plate on Temperature Module on 10\nG11 of PCR plate on Temperature Module on 10\nH11 of PCR plate on Temperature Module on 10\ncol = [A12 of PCR plate on Temperature Module on 10, B12 of PCR plate on Temperature Module on 10, C12 of PCR plate on Temperature Module on 10, D12 of PCR plate on Temperature Module on 10, E12 of PCR plate on Temperature Module on 10, F12 of PCR plate on Temperature Module on 10, G12 of PCR plate on Temperature Module on 10, H12 of PCR plate on Temperature Module on 10]\nE12 of PCR plate on Temperature Module on 10\nF12 of PCR plate on Temperature Module on 10\nG12 of PCR plate on Temperature Module on 10\nH12 of PCR plate on Temperature Module on 10\n"
],
[
"max_trans_per_asp = 8\n#print(max_trans_per_asp)\nsplit_ind = [ind for ind in range(0, NUM_SAMPLES, max_trans_per_asp)]\ndest_sets = [dests[split_ind[i]:split_ind[i+1]]\n for i in range(len(split_ind)-1)] + [dests[split_ind[-1]:]]\n\ndest_sets",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0eddb026b86d6c59156da6ee676d68b8131a71e | 5,809 | ipynb | Jupyter Notebook | OOP_Concepts_2.ipynb | wearlianbaguio/OOP-1-1 | 714d51ba9c8ce1f7454eba0efbdb3abc1eef6206 | [
"Apache-2.0"
] | null | null | null | OOP_Concepts_2.ipynb | wearlianbaguio/OOP-1-1 | 714d51ba9c8ce1f7454eba0efbdb3abc1eef6206 | [
"Apache-2.0"
] | null | null | null | OOP_Concepts_2.ipynb | wearlianbaguio/OOP-1-1 | 714d51ba9c8ce1f7454eba0efbdb3abc1eef6206 | [
"Apache-2.0"
] | null | null | null | 35.638037 | 233 | 0.531761 | [
[
[
"<a href=\"https://colab.research.google.com/github/wearlianbaguio/OOP-1-1/blob/main/OOP_Concepts_2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"##Application 1\n1. Create a Python program that displays the name of the students (Student 1, Student 2, Student 3) and their term grades\n2. Create a class name Person and attributes - std1, std2, std3, pre, mid, fin\n3. Compute the average of each term grade using Grade() method \n4. Information about student's grades must be hidden from others \n5. Save your python program named as \"OOP Concepts 2\" to your GitHub repository \"OOP 1-1\"\n6. Share your GitHub link attached with this manuscript.",
"_____no_output_____"
]
],
[
[
"class Person:\n def __init__(self,std1_fullname,std1_PreMidFin,std2_fullname,std2_PreMidFin,std3_fullname,std3_PreMidFin):\n self.std1_fullname = std1_fullname\n self.__std1_PreMidFin = std1_PreMidFin\n self.std2_fullname = std2_fullname\n self.__std2_PreMidFin = std2_PreMidFin\n self.std3_fullname = std3_fullname\n self.__std3_PreMidFin = std3_PreMidFin\n def set_std1_PreMidFin(self, std1_PreMidFin):\n self.__std1_PreMidFin = std1_PreMidFin\n def get_std1_PreMidFin(self):\n return self.__std1_PreMidFin\n def set_std1_PreMidFin(self, std1_PreMidFin):\n self.__std2_PreMidFin = std2_PreMidFin\n def get_std1_PreMidFin(self):\n return self.__std2_PreMidFin\n def set_std1_PreMidFin(self, std1_PreMidFin):\n self.__std3_PreMidFin = std3_PreMidFin\n def get_std1_PreMidFin(self):\n return self.__std3_PreMidFin \n\n def Info(self):\n print(self.std1_fullname)\n print(self.__std1_PreMidFin)\n print(self.std2_fullname)\n print(self.__std2_PreMidFin)\n print(self.std3_fullname)\n print(self.__std3_PreMidFin)\n\nstudent = Person(\"Barabasz Viscenzo\",\" Prelim:86 Midterms:90 Final:89\",\"Isabella Sebastian\",\" Prelim:92 Midterms:90 Final:93\",\"Laszlo Samaniego\",\" Prelim:95 Midterms:98 Final:97\")\nstudent.Info()",
"Barabasz Viscenzo\n Prelim:86 Midterms:90 Final:89\nIsabella Sebastian\n Prelim:92 Midterms:90 Final:93\nLaszlo Samaniego\n Prelim:95 Midterms:98 Final:97\n"
]
],
[
[
"##The average of three students grade based on each of their term",
"_____no_output_____"
]
],
[
[
"class grades:\n def __init__(self, prelim, midterms, finals):\n self.__prelim = prelim\n self.__midterms = midterms\n self.__finals = finals\n def set_prelim(self, prelim):\n self.__prelim = prelim\n def get_prelim(self):\n return self.__prelim\n def set_prelim(self, midterms):\n self.__midterms = midterms\n def get_midterms(self):\n return self.__midterms\n def set_finals(self, finals):\n self.__finals = finals\n def get_finals(self):\n return self.__finals\n def Grade(self):\n return (self.__prelim + self.__midterms + self.__finals)// 3\nstd1 = grades(86, 90, 89)\nstd2 = grades(92, 90, 93)\nstd3 = grades(95, 98, 97)\n\nprint(\"Barabasz Viscenzo's average is\", std1.Grade()) \nprint(\"Isablla Sebastian's average is\", std2.Grade()) \nprint(\"Laszlo Samaniego's average is\", std3.Grade()) ",
"Barabasz Viscenzo's average is 88\nIsablla Sebastian's average is 91\nLaszlo Samaniego's average is 96\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0ede726aca0321bbcab96a28e6d71d2ee93c2a8 | 4,532 | ipynb | Jupyter Notebook | test.ipynb | Anjanababue/python-basics | 2a0ca929f329269acb8ef1382e2f4fb093dfaa84 | [
"MIT"
] | null | null | null | test.ipynb | Anjanababue/python-basics | 2a0ca929f329269acb8ef1382e2f4fb093dfaa84 | [
"MIT"
] | null | null | null | test.ipynb | Anjanababue/python-basics | 2a0ca929f329269acb8ef1382e2f4fb093dfaa84 | [
"MIT"
] | null | null | null | 21.684211 | 370 | 0.483672 | [
[
[
"print(\"hello\")",
"hello\n"
],
[
"a=1\nb=2\nc=a+b\nprint(c)",
"3\n"
],
[
"a=1\nb=2\nc=a+b\nprint(c)\na=2\nprint(c)",
"3\n3\n"
],
[
"a=1\nb=2\nc=a+b\nprint(c)\na=2\nc=a+b\nprint(c)\n",
"3\n4\n"
],
[
"if True:\n print (\"True\")\nelse:\n print (\"False\")",
"True\n"
],
[
"if True:\nprint \"Answer\"\nprint \"True\"\nelse:\nprint \"Answer\"\nprint \"False\"",
"_____no_output_____"
],
[
"import sys\n\ntry:\n # open file stream\n file = open(file_name, \"w\")\nexcept IOError:\n print (\"There was an error writing to\"), file_name\n sys.exit()\nprint \"Enter '\", file_finish,\nprint \"' When finished\"\nwhile file_text != file_finish:\n file_text = raw_input(\"Enter text: \")\n if file_text == file_finish:\n # close the file\n file.close\n break\n file.write(file_text)\n file.write(\"\\n\")\nfile.close()\nfile_name = raw_input(\"Enter filename: \")\nif len(file_name) == 0:\n print (\"Next time please enter something\")\n sys.exit()\ntry:\n file = open(file_name, \"r\")\nexcept IOError:\n print (\"There was an error reading file\")\n sys.exit()\nfile_text = file.read()\nfile.close()\nprint file_textimport sys\n\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.