hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eca4370db116a89162d0a08980f0a0a46ff954b2 | 123,533 | ipynb | Jupyter Notebook | 9-3.ipynb | yonghunlee1/honza_gonbu | 3ee4b0a7650636dbc71d6844548fdcc3e8ac466e | [
"MIT"
] | 1 | 2021-07-26T01:04:38.000Z | 2021-07-26T01:04:38.000Z | 9-3.ipynb | yonghunlee1/honza_gonbu | 3ee4b0a7650636dbc71d6844548fdcc3e8ac466e | [
"MIT"
] | null | null | null | 9-3.ipynb | yonghunlee1/honza_gonbu | 3ee4b0a7650636dbc71d6844548fdcc3e8ac466e | [
"MIT"
] | 1 | 2021-06-28T01:53:19.000Z | 2021-06-28T01:53:19.000Z | 119.470986 | 17,746 | 0.745477 | [
[
[
"# LSTM๊ณผ GRU ์
",
"_____no_output_____"
],
[
"<table align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/rickiepark/hg-mldl/blob/master/9-3.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />๊ตฌ๊ธ ์ฝ๋ฉ์์ ์คํํ๊ธฐ</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## LSTM ์ ๊ฒฝ๋ง ํ๋ จํ๊ธฐ",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.datasets import imdb\nfrom sklearn.model_selection import train_test_split\n\n(train_input, train_target), (test_input, test_target) = imdb.load_data(\n num_words=500)\n\ntrain_input, val_input, train_target, val_target = train_test_split(\n train_input, train_target, test_size=0.2, random_state=42)",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz\n17465344/17464789 [==============================] - 0s 0us/step\n"
],
[
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n\ntrain_seq = pad_sequences(train_input, maxlen=100)\nval_seq = pad_sequences(val_input, maxlen=100)",
"_____no_output_____"
],
[
"from tensorflow import keras\n\nmodel = keras.Sequential()\n\nmodel.add(keras.layers.Embedding(500, 16, input_length=100))\nmodel.add(keras.layers.LSTM(8))\nmodel.add(keras.layers.Dense(1, activation='sigmoid'))\n\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, 100, 16) 8000 \n_________________________________________________________________\nlstm (LSTM) (None, 8) 800 \n_________________________________________________________________\ndense (Dense) (None, 1) 9 \n=================================================================\nTotal params: 8,809\nTrainable params: 8,809\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"rmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\nmodel.compile(optimizer=rmsprop, loss='binary_crossentropy', \n metrics=['accuracy'])\n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint('best-lstm-model.h5', \n save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=3,\n restore_best_weights=True)\n\nhistory = model.fit(train_seq, train_target, epochs=100, batch_size=64,\n validation_data=(val_seq, val_target),\n callbacks=[checkpoint_cb, early_stopping_cb])",
"Epoch 1/100\n313/313 [==============================] - 11s 9ms/step - loss: 0.6923 - accuracy: 0.5354 - val_loss: 0.6917 - val_accuracy: 0.5468\nEpoch 2/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6893 - accuracy: 0.6054 - val_loss: 0.6874 - val_accuracy: 0.6250\nEpoch 3/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6806 - accuracy: 0.6608 - val_loss: 0.6725 - val_accuracy: 0.6814\nEpoch 4/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6354 - accuracy: 0.7053 - val_loss: 0.5830 - val_accuracy: 0.7200\nEpoch 5/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5546 - accuracy: 0.7398 - val_loss: 0.5398 - val_accuracy: 0.7456\nEpoch 6/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5201 - accuracy: 0.7614 - val_loss: 0.5112 - val_accuracy: 0.7666\nEpoch 7/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4943 - accuracy: 0.7747 - val_loss: 0.4908 - val_accuracy: 0.7744\nEpoch 8/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4758 - accuracy: 0.7857 - val_loss: 0.4765 - val_accuracy: 0.7824\nEpoch 9/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4632 - accuracy: 0.7929 - val_loss: 0.4689 - val_accuracy: 0.7868\nEpoch 10/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4546 - accuracy: 0.7964 - val_loss: 0.4624 - val_accuracy: 0.7908\nEpoch 11/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4476 - accuracy: 0.7980 - val_loss: 0.4567 - val_accuracy: 0.7900\nEpoch 12/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4416 - accuracy: 0.8024 - val_loss: 0.4535 - val_accuracy: 0.7920\nEpoch 13/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4374 - accuracy: 0.8044 - val_loss: 0.4508 - val_accuracy: 0.7940\nEpoch 14/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4341 - accuracy: 0.8071 - val_loss: 0.4512 - val_accuracy: 0.7884\nEpoch 15/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4305 - accuracy: 0.8069 - val_loss: 0.4507 - val_accuracy: 0.7882\nEpoch 16/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4281 - accuracy: 0.8073 - val_loss: 0.4439 - val_accuracy: 0.7960\nEpoch 17/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4259 - accuracy: 0.8087 - val_loss: 0.4426 - val_accuracy: 0.7956\nEpoch 18/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4233 - accuracy: 0.8105 - val_loss: 0.4441 - val_accuracy: 0.7908\nEpoch 19/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4218 - accuracy: 0.8109 - val_loss: 0.4411 - val_accuracy: 0.7932\nEpoch 20/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4196 - accuracy: 0.8113 - val_loss: 0.4382 - val_accuracy: 0.7994\nEpoch 21/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4185 - accuracy: 0.8130 - val_loss: 0.4404 - val_accuracy: 0.7902\nEpoch 22/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4171 - accuracy: 0.8129 - val_loss: 0.4356 - val_accuracy: 0.7998\nEpoch 23/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4156 - accuracy: 0.8130 - val_loss: 0.4352 - val_accuracy: 0.8008\nEpoch 24/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4144 - accuracy: 0.8141 - val_loss: 0.4347 - val_accuracy: 0.8026\nEpoch 25/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4135 - accuracy: 0.8137 - val_loss: 0.4344 - val_accuracy: 0.7966\nEpoch 26/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4122 - accuracy: 0.8149 - val_loss: 0.4350 - val_accuracy: 0.7972\nEpoch 27/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4111 - accuracy: 0.8149 - val_loss: 0.4416 - val_accuracy: 0.7896\nEpoch 28/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4101 - accuracy: 0.8158 - val_loss: 0.4321 - val_accuracy: 0.8048\nEpoch 29/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4091 - accuracy: 0.8162 - val_loss: 0.4359 - val_accuracy: 0.7946\nEpoch 30/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4079 - accuracy: 0.8177 - val_loss: 0.4319 - val_accuracy: 0.7994\nEpoch 31/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4072 - accuracy: 0.8169 - val_loss: 0.4298 - val_accuracy: 0.8024\nEpoch 32/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4059 - accuracy: 0.8168 - val_loss: 0.4291 - val_accuracy: 0.8046\nEpoch 33/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4051 - accuracy: 0.8181 - val_loss: 0.4318 - val_accuracy: 0.7992\nEpoch 34/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4042 - accuracy: 0.8176 - val_loss: 0.4354 - val_accuracy: 0.7942\nEpoch 35/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4037 - accuracy: 0.8173 - val_loss: 0.4283 - val_accuracy: 0.8012\nEpoch 36/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4027 - accuracy: 0.8202 - val_loss: 0.4319 - val_accuracy: 0.7990\nEpoch 37/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4021 - accuracy: 0.8187 - val_loss: 0.4282 - val_accuracy: 0.8068\nEpoch 38/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4013 - accuracy: 0.8206 - val_loss: 0.4270 - val_accuracy: 0.8062\nEpoch 39/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4001 - accuracy: 0.8203 - val_loss: 0.4273 - val_accuracy: 0.8014\nEpoch 40/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3997 - accuracy: 0.8220 - val_loss: 0.4276 - val_accuracy: 0.8056\nEpoch 41/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3985 - accuracy: 0.8223 - val_loss: 0.4250 - val_accuracy: 0.8074\nEpoch 42/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3978 - accuracy: 0.8228 - val_loss: 0.4257 - val_accuracy: 0.8054\nEpoch 43/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3968 - accuracy: 0.8214 - val_loss: 0.4282 - val_accuracy: 0.8102\nEpoch 44/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3963 - accuracy: 0.8213 - val_loss: 0.4243 - val_accuracy: 0.8056\nEpoch 45/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3954 - accuracy: 0.8236 - val_loss: 0.4291 - val_accuracy: 0.8012\nEpoch 46/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3943 - accuracy: 0.8231 - val_loss: 0.4243 - val_accuracy: 0.8078\nEpoch 47/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3937 - accuracy: 0.8224 - val_loss: 0.4232 - val_accuracy: 0.8060\nEpoch 48/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3929 - accuracy: 0.8241 - val_loss: 0.4293 - val_accuracy: 0.8044\nEpoch 49/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3918 - accuracy: 0.8242 - val_loss: 0.4308 - val_accuracy: 0.8022\nEpoch 50/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3915 - accuracy: 0.8242 - val_loss: 0.4226 - val_accuracy: 0.8062\nEpoch 51/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3907 - accuracy: 0.8261 - val_loss: 0.4229 - val_accuracy: 0.8056\nEpoch 52/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3899 - accuracy: 0.8245 - val_loss: 0.4244 - val_accuracy: 0.8030\nEpoch 53/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.3893 - accuracy: 0.8256 - val_loss: 0.4236 - val_accuracy: 0.8052\n"
],
[
"import matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend(['train', 'val'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## ์ํ ์ธต์ ๋๋กญ์์ ์ ์ฉํ๊ธฐ",
"_____no_output_____"
]
],
[
[
"model2 = keras.Sequential()\n\nmodel2.add(keras.layers.Embedding(500, 16, input_length=100))\nmodel2.add(keras.layers.LSTM(8, dropout=0.3))\nmodel2.add(keras.layers.Dense(1, activation='sigmoid'))",
"_____no_output_____"
],
[
"rmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\nmodel2.compile(optimizer=rmsprop, loss='binary_crossentropy', \n metrics=['accuracy'])\n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint('best-dropout-model.h5', \n save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=3,\n restore_best_weights=True)\n\nhistory = model2.fit(train_seq, train_target, epochs=100, batch_size=64,\n validation_data=(val_seq, val_target),\n callbacks=[checkpoint_cb, early_stopping_cb])",
"Epoch 1/100\n313/313 [==============================] - 5s 9ms/step - loss: 0.6923 - accuracy: 0.5348 - val_loss: 0.6911 - val_accuracy: 0.5792\nEpoch 2/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6897 - accuracy: 0.5994 - val_loss: 0.6872 - val_accuracy: 0.6398\nEpoch 3/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6813 - accuracy: 0.6522 - val_loss: 0.6703 - val_accuracy: 0.6906\nEpoch 4/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6297 - accuracy: 0.6992 - val_loss: 0.5941 - val_accuracy: 0.7140\nEpoch 5/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5815 - accuracy: 0.7207 - val_loss: 0.5675 - val_accuracy: 0.7374\nEpoch 6/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5612 - accuracy: 0.7374 - val_loss: 0.5486 - val_accuracy: 0.7550\nEpoch 7/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5452 - accuracy: 0.7498 - val_loss: 0.5345 - val_accuracy: 0.7590\nEpoch 8/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5294 - accuracy: 0.7577 - val_loss: 0.5212 - val_accuracy: 0.7664\nEpoch 9/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5155 - accuracy: 0.7670 - val_loss: 0.5094 - val_accuracy: 0.7706\nEpoch 10/100\n313/313 [==============================] - 3s 8ms/step - loss: 0.5031 - accuracy: 0.7750 - val_loss: 0.5010 - val_accuracy: 0.7712\nEpoch 11/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4917 - accuracy: 0.7796 - val_loss: 0.4892 - val_accuracy: 0.7782\nEpoch 12/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4846 - accuracy: 0.7831 - val_loss: 0.4822 - val_accuracy: 0.7846\nEpoch 13/100\n313/313 [==============================] - 3s 8ms/step - loss: 0.4758 - accuracy: 0.7865 - val_loss: 0.4733 - val_accuracy: 0.7884\nEpoch 14/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4665 - accuracy: 0.7910 - val_loss: 0.4671 - val_accuracy: 0.7842\nEpoch 15/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4603 - accuracy: 0.7924 - val_loss: 0.4619 - val_accuracy: 0.7894\nEpoch 16/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4533 - accuracy: 0.7971 - val_loss: 0.4560 - val_accuracy: 0.7904\nEpoch 17/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4493 - accuracy: 0.7946 - val_loss: 0.4532 - val_accuracy: 0.7928\nEpoch 18/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4433 - accuracy: 0.8002 - val_loss: 0.4501 - val_accuracy: 0.7928\nEpoch 19/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4413 - accuracy: 0.8016 - val_loss: 0.4453 - val_accuracy: 0.7936\nEpoch 20/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4362 - accuracy: 0.8029 - val_loss: 0.4438 - val_accuracy: 0.7950\nEpoch 21/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4333 - accuracy: 0.8063 - val_loss: 0.4409 - val_accuracy: 0.7958\nEpoch 22/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4320 - accuracy: 0.8058 - val_loss: 0.4412 - val_accuracy: 0.7948\nEpoch 23/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4298 - accuracy: 0.8041 - val_loss: 0.4402 - val_accuracy: 0.7924\nEpoch 24/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4288 - accuracy: 0.8035 - val_loss: 0.4375 - val_accuracy: 0.7976\nEpoch 25/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4274 - accuracy: 0.8041 - val_loss: 0.4358 - val_accuracy: 0.7992\nEpoch 26/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4246 - accuracy: 0.8059 - val_loss: 0.4359 - val_accuracy: 0.7998\nEpoch 27/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4235 - accuracy: 0.8077 - val_loss: 0.4386 - val_accuracy: 0.8008\nEpoch 28/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4227 - accuracy: 0.8077 - val_loss: 0.4353 - val_accuracy: 0.8018\nEpoch 29/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4211 - accuracy: 0.8074 - val_loss: 0.4347 - val_accuracy: 0.7984\nEpoch 30/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4222 - accuracy: 0.8067 - val_loss: 0.4338 - val_accuracy: 0.7986\nEpoch 31/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4215 - accuracy: 0.8065 - val_loss: 0.4336 - val_accuracy: 0.8008\nEpoch 32/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4200 - accuracy: 0.8080 - val_loss: 0.4326 - val_accuracy: 0.7998\nEpoch 33/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4195 - accuracy: 0.8084 - val_loss: 0.4324 - val_accuracy: 0.7966\nEpoch 34/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4192 - accuracy: 0.8101 - val_loss: 0.4321 - val_accuracy: 0.7970\nEpoch 35/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4176 - accuracy: 0.8097 - val_loss: 0.4336 - val_accuracy: 0.8036\nEpoch 36/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4185 - accuracy: 0.8096 - val_loss: 0.4330 - val_accuracy: 0.8018\nEpoch 37/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4163 - accuracy: 0.8087 - val_loss: 0.4335 - val_accuracy: 0.8030\n"
],
[
"plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend(['train', 'val'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 2๊ฐ์ ์ธต์ ์ฐ๊ฒฐํ๊ธฐ",
"_____no_output_____"
]
],
[
[
"model3 = keras.Sequential()\n\nmodel3.add(keras.layers.Embedding(500, 16, input_length=100))\nmodel3.add(keras.layers.LSTM(8, dropout=0.3, return_sequences=True))\nmodel3.add(keras.layers.LSTM(8, dropout=0.3))\nmodel3.add(keras.layers.Dense(1, activation='sigmoid'))\n\nmodel3.summary()",
"Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_2 (Embedding) (None, 100, 16) 8000 \n_________________________________________________________________\nlstm_2 (LSTM) (None, 100, 8) 800 \n_________________________________________________________________\nlstm_3 (LSTM) (None, 8) 544 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 9 \n=================================================================\nTotal params: 9,353\nTrainable params: 9,353\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"rmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\nmodel3.compile(optimizer=rmsprop, loss='binary_crossentropy', \n metrics=['accuracy'])\n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint('best-2rnn-model.h5', \n save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=3,\n restore_best_weights=True)\n\nhistory = model3.fit(train_seq, train_target, epochs=100, batch_size=64,\n validation_data=(val_seq, val_target),\n callbacks=[checkpoint_cb, early_stopping_cb])",
"Epoch 1/100\n313/313 [==============================] - 7s 14ms/step - loss: 0.6918 - accuracy: 0.5480 - val_loss: 0.6904 - val_accuracy: 0.5902\nEpoch 2/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.6833 - accuracy: 0.6177 - val_loss: 0.6735 - val_accuracy: 0.5924\nEpoch 3/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.6318 - accuracy: 0.6682 - val_loss: 0.5963 - val_accuracy: 0.7110\nEpoch 4/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.5710 - accuracy: 0.7281 - val_loss: 0.5554 - val_accuracy: 0.7390\nEpoch 5/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.5418 - accuracy: 0.7456 - val_loss: 0.5355 - val_accuracy: 0.7420\nEpoch 6/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.5222 - accuracy: 0.7577 - val_loss: 0.5129 - val_accuracy: 0.7610\nEpoch 7/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.5055 - accuracy: 0.7648 - val_loss: 0.4997 - val_accuracy: 0.7646\nEpoch 8/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4947 - accuracy: 0.7703 - val_loss: 0.4932 - val_accuracy: 0.7748\nEpoch 9/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4865 - accuracy: 0.7775 - val_loss: 0.4798 - val_accuracy: 0.7774\nEpoch 10/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4761 - accuracy: 0.7818 - val_loss: 0.4741 - val_accuracy: 0.7800\nEpoch 11/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4707 - accuracy: 0.7847 - val_loss: 0.4716 - val_accuracy: 0.7808\nEpoch 12/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4653 - accuracy: 0.7885 - val_loss: 0.4646 - val_accuracy: 0.7884\nEpoch 13/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4594 - accuracy: 0.7917 - val_loss: 0.4596 - val_accuracy: 0.7854\nEpoch 14/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4570 - accuracy: 0.7908 - val_loss: 0.4574 - val_accuracy: 0.7896\nEpoch 15/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4534 - accuracy: 0.7919 - val_loss: 0.4541 - val_accuracy: 0.7894\nEpoch 16/100\n313/313 [==============================] - 4s 13ms/step - loss: 0.4496 - accuracy: 0.7942 - val_loss: 0.4514 - val_accuracy: 0.7898\nEpoch 17/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4463 - accuracy: 0.7945 - val_loss: 0.4488 - val_accuracy: 0.7908\nEpoch 18/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4437 - accuracy: 0.7980 - val_loss: 0.4470 - val_accuracy: 0.7926\nEpoch 19/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4404 - accuracy: 0.7998 - val_loss: 0.4461 - val_accuracy: 0.7906\nEpoch 20/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4379 - accuracy: 0.8023 - val_loss: 0.4440 - val_accuracy: 0.7958\nEpoch 21/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4364 - accuracy: 0.7994 - val_loss: 0.4442 - val_accuracy: 0.7886\nEpoch 22/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4356 - accuracy: 0.8007 - val_loss: 0.4431 - val_accuracy: 0.7966\nEpoch 23/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4344 - accuracy: 0.8008 - val_loss: 0.4411 - val_accuracy: 0.7946\nEpoch 24/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4327 - accuracy: 0.8042 - val_loss: 0.4408 - val_accuracy: 0.7912\nEpoch 25/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4317 - accuracy: 0.8013 - val_loss: 0.4396 - val_accuracy: 0.7916\nEpoch 26/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4290 - accuracy: 0.8016 - val_loss: 0.4389 - val_accuracy: 0.7928\nEpoch 27/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4293 - accuracy: 0.8034 - val_loss: 0.4390 - val_accuracy: 0.7916\nEpoch 28/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4298 - accuracy: 0.8039 - val_loss: 0.4397 - val_accuracy: 0.8010\nEpoch 29/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4265 - accuracy: 0.8054 - val_loss: 0.4387 - val_accuracy: 0.7998\nEpoch 30/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4276 - accuracy: 0.8056 - val_loss: 0.4374 - val_accuracy: 0.7946\nEpoch 31/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4262 - accuracy: 0.8047 - val_loss: 0.4369 - val_accuracy: 0.7918\nEpoch 32/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4230 - accuracy: 0.8054 - val_loss: 0.4361 - val_accuracy: 0.7970\nEpoch 33/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4239 - accuracy: 0.8064 - val_loss: 0.4355 - val_accuracy: 0.7970\nEpoch 34/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4237 - accuracy: 0.8069 - val_loss: 0.4359 - val_accuracy: 0.8000\nEpoch 35/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4207 - accuracy: 0.8084 - val_loss: 0.4357 - val_accuracy: 0.7990\nEpoch 36/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4219 - accuracy: 0.8079 - val_loss: 0.4348 - val_accuracy: 0.7994\nEpoch 37/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4203 - accuracy: 0.8108 - val_loss: 0.4338 - val_accuracy: 0.7960\nEpoch 38/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4196 - accuracy: 0.8087 - val_loss: 0.4333 - val_accuracy: 0.7994\nEpoch 39/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4194 - accuracy: 0.8099 - val_loss: 0.4340 - val_accuracy: 0.8006\nEpoch 40/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4203 - accuracy: 0.8076 - val_loss: 0.4325 - val_accuracy: 0.8002\nEpoch 41/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4179 - accuracy: 0.8105 - val_loss: 0.4326 - val_accuracy: 0.8000\nEpoch 42/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4173 - accuracy: 0.8106 - val_loss: 0.4333 - val_accuracy: 0.8014\nEpoch 43/100\n313/313 [==============================] - 4s 12ms/step - loss: 0.4169 - accuracy: 0.8105 - val_loss: 0.4326 - val_accuracy: 0.8026\n"
],
[
"plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend(['train', 'val'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## GRU ์ ๊ฒฝ๋ง ํ๋ จํ๊ธฐ",
"_____no_output_____"
]
],
[
[
"model4 = keras.Sequential()\n\nmodel4.add(keras.layers.Embedding(500, 16, input_length=100))\nmodel4.add(keras.layers.GRU(8))\nmodel4.add(keras.layers.Dense(1, activation='sigmoid'))\n\nmodel4.summary()",
"Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_3 (Embedding) (None, 100, 16) 8000 \n_________________________________________________________________\ngru (GRU) (None, 8) 624 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 9 \n=================================================================\nTotal params: 8,633\nTrainable params: 8,633\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"rmsprop = keras.optimizers.RMSprop(learning_rate=1e-4)\nmodel4.compile(optimizer=rmsprop, loss='binary_crossentropy', \n metrics=['accuracy'])\n\ncheckpoint_cb = keras.callbacks.ModelCheckpoint('best-gru-model.h5', \n save_best_only=True)\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=3,\n restore_best_weights=True)\n\nhistory = model4.fit(train_seq, train_target, epochs=100, batch_size=64,\n validation_data=(val_seq, val_target),\n callbacks=[checkpoint_cb, early_stopping_cb])",
"Epoch 1/100\n313/313 [==============================] - 4s 9ms/step - loss: 0.6922 - accuracy: 0.5236 - val_loss: 0.6912 - val_accuracy: 0.5532\nEpoch 2/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6902 - accuracy: 0.5547 - val_loss: 0.6891 - val_accuracy: 0.5754\nEpoch 3/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6871 - accuracy: 0.5788 - val_loss: 0.6855 - val_accuracy: 0.5992\nEpoch 4/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6820 - accuracy: 0.6043 - val_loss: 0.6796 - val_accuracy: 0.6104\nEpoch 5/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6740 - accuracy: 0.6217 - val_loss: 0.6708 - val_accuracy: 0.6196\nEpoch 6/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.6617 - accuracy: 0.6380 - val_loss: 0.6570 - val_accuracy: 0.6344\nEpoch 7/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6423 - accuracy: 0.6572 - val_loss: 0.6344 - val_accuracy: 0.6552\nEpoch 8/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.6090 - accuracy: 0.6819 - val_loss: 0.5934 - val_accuracy: 0.6918\nEpoch 9/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5553 - accuracy: 0.7205 - val_loss: 0.5455 - val_accuracy: 0.7290\nEpoch 10/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.5232 - accuracy: 0.7462 - val_loss: 0.5282 - val_accuracy: 0.7426\nEpoch 11/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.5080 - accuracy: 0.7571 - val_loss: 0.5203 - val_accuracy: 0.7430\nEpoch 12/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4968 - accuracy: 0.7640 - val_loss: 0.5078 - val_accuracy: 0.7602\nEpoch 13/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4870 - accuracy: 0.7696 - val_loss: 0.5005 - val_accuracy: 0.7590\nEpoch 14/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4787 - accuracy: 0.7752 - val_loss: 0.4932 - val_accuracy: 0.7664\nEpoch 15/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4716 - accuracy: 0.7801 - val_loss: 0.4870 - val_accuracy: 0.7714\nEpoch 16/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4652 - accuracy: 0.7837 - val_loss: 0.4836 - val_accuracy: 0.7680\nEpoch 17/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4595 - accuracy: 0.7893 - val_loss: 0.4762 - val_accuracy: 0.7760\nEpoch 18/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4543 - accuracy: 0.7927 - val_loss: 0.4733 - val_accuracy: 0.7726\nEpoch 19/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4488 - accuracy: 0.7954 - val_loss: 0.4670 - val_accuracy: 0.7776\nEpoch 20/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4449 - accuracy: 0.7964 - val_loss: 0.4619 - val_accuracy: 0.7828\nEpoch 21/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4411 - accuracy: 0.7980 - val_loss: 0.4589 - val_accuracy: 0.7826\nEpoch 22/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4379 - accuracy: 0.8001 - val_loss: 0.4558 - val_accuracy: 0.7880\nEpoch 23/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4352 - accuracy: 0.8021 - val_loss: 0.4538 - val_accuracy: 0.7858\nEpoch 24/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4320 - accuracy: 0.8047 - val_loss: 0.4523 - val_accuracy: 0.7894\nEpoch 25/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4299 - accuracy: 0.8074 - val_loss: 0.4489 - val_accuracy: 0.7912\nEpoch 26/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4274 - accuracy: 0.8066 - val_loss: 0.4499 - val_accuracy: 0.7898\nEpoch 27/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4254 - accuracy: 0.8087 - val_loss: 0.4465 - val_accuracy: 0.7930\nEpoch 28/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4233 - accuracy: 0.8095 - val_loss: 0.4456 - val_accuracy: 0.7950\nEpoch 29/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4220 - accuracy: 0.8107 - val_loss: 0.4483 - val_accuracy: 0.7958\nEpoch 30/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4204 - accuracy: 0.8120 - val_loss: 0.4436 - val_accuracy: 0.7928\nEpoch 31/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4196 - accuracy: 0.8109 - val_loss: 0.4432 - val_accuracy: 0.7918\nEpoch 32/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4182 - accuracy: 0.8120 - val_loss: 0.4422 - val_accuracy: 0.7930\nEpoch 33/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4174 - accuracy: 0.8123 - val_loss: 0.4416 - val_accuracy: 0.7938\nEpoch 34/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4166 - accuracy: 0.8146 - val_loss: 0.4422 - val_accuracy: 0.7984\nEpoch 35/100\n313/313 [==============================] - 2s 7ms/step - loss: 0.4160 - accuracy: 0.8145 - val_loss: 0.4440 - val_accuracy: 0.7974\nEpoch 36/100\n313/313 [==============================] - 2s 8ms/step - loss: 0.4153 - accuracy: 0.8156 - val_loss: 0.4418 - val_accuracy: 0.7982\n"
],
[
"plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend(['train', 'val'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## ๋ง๋ฌด๋ฆฌ",
"_____no_output_____"
]
],
[
[
"test_seq = pad_sequences(test_input, maxlen=100)\n\nrnn_model = keras.models.load_model('best-2rnn-model.h5')\n\nrnn_model.evaluate(test_seq, test_target)",
"782/782 [==============================] - 5s 5ms/step - loss: 0.4261 - accuracy: 0.8018\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca43adbbd50607b0a04a636646947e409fd62c9 | 1,747 | ipynb | Jupyter Notebook | preprocessing/RobustScaler.ipynb | keyianpai/tiny-sklearn | 8571fc8dee2a08822b22c540375255dbf19106fa | [
"MIT"
] | 19 | 2019-05-08T14:50:24.000Z | 2022-01-18T07:40:55.000Z | preprocessing/RobustScaler.ipynb | keyianpai/tiny-sklearn | 8571fc8dee2a08822b22c540375255dbf19106fa | [
"MIT"
] | 1 | 2019-12-05T18:08:49.000Z | 2019-12-06T04:46:55.000Z | preprocessing/RobustScaler.ipynb | keyianpai/tiny-sklearn | 8571fc8dee2a08822b22c540375255dbf19106fa | [
"MIT"
] | 2 | 2019-05-08T21:38:37.000Z | 2020-01-21T15:33:09.000Z | 23.931507 | 74 | 0.540927 | [
[
[
"import numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import RobustScaler as skRobustScaler",
"_____no_output_____"
],
[
"class RobustScaler:\n def __init__(self, quantile_range=(25, 75)):\n self.quantile_range = quantile_range\n\n def fit(self, X):\n self.center_ = np.median(X, axis=0)\n quantiles = np.percentile(X, self.quantile_range, axis=0)\n self.scale_ = quantiles[1] - quantiles[0]\n return self\n\n def transform(self, X):\n return (X - self.center_) / self.scale_",
"_____no_output_____"
],
[
"X, _ = load_iris(return_X_y=True)\nsc1 = RobustScaler().fit(X)\nsc2 = skRobustScaler().fit(X)\nassert np.allclose(sc1.center_, sc2.center_)\nassert np.allclose(sc1.scale_, sc2.scale_)\nXt1 = sc1.transform(X)\nXt2 = sc2.transform(X)\nassert np.allclose(Xt1, Xt2)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
eca4486c3149f3c2bd318edd541d2408d2aea562 | 10,142 | ipynb | Jupyter Notebook | eval.ipynb | cboettig/rl-ray-demo | 96affbbc1b2cb94af9a060fb8ff15e98e59cb678 | [
"MIT"
] | null | null | null | eval.ipynb | cboettig/rl-ray-demo | 96affbbc1b2cb94af9a060fb8ff15e98e59cb678 | [
"MIT"
] | null | null | null | eval.ipynb | cboettig/rl-ray-demo | 96affbbc1b2cb94af9a060fb8ff15e98e59cb678 | [
"MIT"
] | 1 | 2022-02-22T20:31:05.000Z | 2022-02-22T20:31:05.000Z | 46.737327 | 1,170 | 0.645336 | [
[
[
"import os\nimport cloudpickle\nimport ray\nfrom ray import tune\nfrom ray.rllib import agents\nimport torch\nfrom ray.tune.registry import get_trainable_cls\n\ncheckpoint = \"saved_checkpoint/checkpoint/checkpoint\"\nrun = \"PPO\"\nenv = \"gym_fishing.envs.FishingCtsEnv\"\n\n\n# Based on rllib evaluate,\n# https://github.com/ray-project/ray/blob/master/rllib/evaluate.py\nconfig_dir = os.path.dirname(checkpoint)\nconfig_path = os.path.join(config_dir, \"../params.pkl\")\nconfig_path\nwith open(config_path, \"rb\") as f:\n config = cloudpickle.load(f)\nconfig",
"_____no_output_____"
],
[
"\n# Based on rllib evaluate,\n# https://github.com/ray-project/ray/blob/master/rllib/evaluate.py\n# Make sure worker 0 has an Env.\nconfig[\"create_env_on_driver\"] = True\n# Make sure we have evaluation workers.\nif not config.get(\"evaluation_num_workers\"):\n config[\"evaluation_num_workers\"] = config.get(\"num_workers\", 0)\nif not config.get(\"evaluation_duration\"):\n config[\"evaluation_duration\"] = 1\n# Hard-override this as it raises a warning by Trainer otherwise.\n# Makes no sense anyways, to have it set to None as we don't call\n# `Trainer.train()` here.\nconfig[\"evaluation_interval\"] = 1\n\n\n\n\n# Create the Trainer from config.\ncls = get_trainable_cls(run)\nagent = cls(env=env, config=config)\n\nagent.restore(checkpoint)",
"2022-03-06 04:50:48,075\tWARNING deprecation.py:45 -- DeprecationWarning: `simple_optimizer` has been deprecated. This will raise an error in the future!\n2022-03-06 04:50:48,210\tINFO trainable.py:472 -- Restored on 172.18.0.8 from checkpoint: saved_checkpoint/checkpoint/checkpoint\n2022-03-06 04:50:48,211\tINFO trainable.py:480 -- Current state after restoring: {'_iteration': 250, '_timesteps_total': 1000000, '_time_total': 2036.3581442832947, '_episodes_total': 19192}\n\u001b[2m\u001b[36m(RolloutWorker pid=222871)\u001b[0m 2022-03-06 04:50:50,673\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222875)\u001b[0m 2022-03-06 04:50:50,701\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222878)\u001b[0m 2022-03-06 04:50:50,776\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222872)\u001b[0m 2022-03-06 04:50:50,807\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222866)\u001b[0m 2022-03-06 04:50:50,824\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222864)\u001b[0m 2022-03-06 04:50:50,906\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222881)\u001b[0m 2022-03-06 04:50:50,893\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n\u001b[2m\u001b[36m(RolloutWorker pid=222882)\u001b[0m 2022-03-06 04:50:50,850\tWARNING deprecation.py:45 -- DeprecationWarning: `rllib.env.remote_vector_env.RemoteVectorEnv` has been deprecated. Use `ray.rllib.env.remote_base_env.RemoteBaseEnv` instead. This will raise an error in the future!\n"
],
[
"agent.evaluate()",
"_____no_output_____"
],
[
"# Read in an ExperimentAnalysis from a json state file\n#results_path = \"~/ray_results/ARS/\"\nresults_path = \"~/ray_results/fishing-ppo/\"\n\nanalysis = tune.ExperimentAnalysis(experiment_checkpoint_path=results_path)\n#best_trial = analysis.get_best_trial(metric=\"episode_reward_mean\", mode=\"max\")\nbest_trial = \"PPO_gym_fishing.envs.FishingCtsEnv_deefe_00000\"\n#best_checkpoint = analysis.get_best_checkpoint(trial=best_trial, metric=\"episode_reward_mean\", mode=\"max\")\n",
"_____no_output_____"
],
[
"\n#config = analysis.get_best_trial(metric=\"episode_reward_mean\", mode=\"max\").last_result[\"config\"]\n",
"Could not find best trial. Did you pass the correct `metric` parameter?\n"
],
[
"# extract the hyper-parameter config\nconfig = analysis.get_best_trial(metric=\"episode_reward_mean\", mode=\"max\").last_result[\"config\"]\nconfig.pop(\"in_evaluation\", None)\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca462f9b6dd21087429e457bbc4b6e323aed1d3 | 6,733 | ipynb | Jupyter Notebook | tensorflow/lite/examples/experimental_new_converter/keras_lstm.ipynb | anigasan/tensorflow | 5b780b4983007661ca479bf4d7ed9a260d8ce43f | [
"Apache-2.0"
] | 1 | 2020-01-06T12:33:30.000Z | 2020-01-06T12:33:30.000Z | tensorflow/lite/examples/experimental_new_converter/keras_lstm.ipynb | anigasan/tensorflow | 5b780b4983007661ca479bf4d7ed9a260d8ce43f | [
"Apache-2.0"
] | null | null | null | tensorflow/lite/examples/experimental_new_converter/keras_lstm.ipynb | anigasan/tensorflow | 5b780b4983007661ca479bf4d7ed9a260d8ce43f | [
"Apache-2.0"
] | null | null | null | 28.054167 | 312 | 0.590227 | [
[
[
"# Overview\n\nThis CodeLab demonstrates how to build a LSTM model for MNIST recognition using Keras, and how to convert it to TensorFlow Lite.\n\nThe CodeLab is very similar to the `tf.lite.experimental.nn.TFLiteLSTMCell`\n[CodeLab](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/experimental/examples/lstm/TensorFlowLite_LSTM_Keras_Tutorial.ipynb). However, with the control flow support in the experimental new converter, we can define the model with control flow directly without refactoring the code.\n\nAlso note: We're not trying to build the model to be a real world application, but only demonstrate how to use TensorFlow Lite. You can a build a much better model using CNN models. For a more canonical lstm codelab, please see [here](https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py).",
"_____no_output_____"
],
[
"# Step 0: Prerequisites\n\nIt's recommended to try this feature with the newest TensorFlow nightly pip build.",
"_____no_output_____"
]
],
[
[
"!pip install tf-nightly --upgrade",
"_____no_output_____"
]
],
[
[
"\n## Step 1: Build the MNIST LSTM model.\n\n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport tensorflow as tf",
"_____no_output_____"
],
[
"model = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(28, 28), name='input'),\n tf.keras.layers.LSTM(20),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='output')\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"## Step 2: Train & Evaluate the model.\nWe will train the model using MNIST data.",
"_____no_output_____"
]
],
[
[
"# Load MNIST dataset.\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\nx_train = x_train.astype(np.float32)\nx_test = x_test.astype(np.float32)\n\n# Change this to True if you want to test the flow rapidly.\n# Train with a small dataset and only 1 epoch. The model will work poorly\n# but this provides a fast way to test if the conversion works end to end.\n_FAST_TRAINING = False\n_EPOCHS = 5\nif _FAST_TRAINING:\n _EPOCHS = 1\n _TRAINING_DATA_COUNT = 1000\n x_train = x_train[:_TRAINING_DATA_COUNT]\n y_train = y_train[:_TRAINING_DATA_COUNT]\n\nmodel.fit(x_train, y_train, epochs=_EPOCHS)\nmodel.evaluate(x_test, y_test, verbose=0)\n",
"_____no_output_____"
]
],
[
[
"## Step 3: Convert the Keras model to TensorFlow Lite model.\n\nNote here: we just convert to TensorFlow Lite model as usual.",
"_____no_output_____"
]
],
[
[
"converter = tf.lite.TFLiteConverter.from_keras_model(model)\n# Note: It will NOT work without enabling the experimental converter!\n# `experimental_new_converter` flag. The new code wasn't deployed to the\n# \"TensorFlow public guest runtime\" so we're setting both flags in Colab\n# for now.\nconverter.experimental_new_converter = True\ntflite_model = converter.convert()",
"_____no_output_____"
]
],
[
[
"## Step 4: Check the converted TensorFlow Lite model.\n\nNow load the TensorFlow Lite model and use the TensorFlow Lite python interpreter to verify the results.",
"_____no_output_____"
]
],
[
[
"# Run the model with TensorFlow to get expected results.\nexpected = model.predict(x_test[0:1])\n\n# Run the model with TensorFlow Lite\ninterpreter = tf.lite.Interpreter(model_content=tflite_model)\ninterpreter.allocate_tensors()\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\ninterpreter.set_tensor(input_details[0][\"index\"], x_test[0:1, :, :])\ninterpreter.invoke()\nresult = interpreter.get_tensor(output_details[0][\"index\"])\n\n# Assert if the result of TFLite model is consistent with the TF model.\nnp.testing.assert_almost_equal(expected, result)\nprint(\"Done. The result of TensorFlow matches the result of TensorFlow Lite.\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca471f9e64f0797094906a773cbe4cf65e33bee | 8,048 | ipynb | Jupyter Notebook | notebooks/baseline_svm_model.ipynb | ParivedaChiFinesse/trends-neuroimaging | 449ea419dba4358faa44adde52dcef470ee81254 | [
"MIT"
] | null | null | null | notebooks/baseline_svm_model.ipynb | ParivedaChiFinesse/trends-neuroimaging | 449ea419dba4358faa44adde52dcef470ee81254 | [
"MIT"
] | 1 | 2021-08-23T20:43:02.000Z | 2021-08-23T20:43:02.000Z | notebooks/baseline_svm_model.ipynb | ParivedaChiFinesse/trends-neuroimaging | 449ea419dba4358faa44adde52dcef470ee81254 | [
"MIT"
] | null | null | null | 29.265455 | 179 | 0.469061 | [
[
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.svm import LinearSVR\nfrom sklearn.model_selection import KFold\n\n\ndef metric(y_true, y_pred):\n return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0))",
"_____no_output_____"
],
[
"fnc_df = pd.read_csv(\"../input/trends-assessment-prediction/fnc.csv\")\nloading_df = pd.read_csv(\"../input/trends-assessment-prediction/loading.csv\")\n\nfnc_features, loading_features = list(fnc_df.columns[1:]), list(loading_df.columns[1:])\ndf = fnc_df.merge(loading_df, on=\"Id\")\n\n\nlabels_df = pd.read_csv(\"../input/trends-assessment-prediction/train_scores.csv\")\nlabels_df[\"is_train\"] = True\n\ndf = df.merge(labels_df, on=\"Id\", how=\"left\")\n\ntest_df = df[df[\"is_train\"] != True].copy()\ndf = df[df[\"is_train\"] == True].copy()\n\ndf.shape, test_df.shape",
"_____no_output_____"
],
[
"# Giving less importance to FNC features since they are easier to overfit due to high dimensionality.\nFNC_SCALE = 1/500\n\ndf[fnc_features] *= FNC_SCALE\ntest_df[fnc_features] *= FNC_SCALE",
"_____no_output_____"
],
[
"%%time\n\nNUM_FOLDS = 7\nkf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0)\n\n\nfeatures = loading_features + fnc_features\n\noverall_score = 0\nfor target, c, w in [(\"age\", 100, 0.3), (\"domain1_var1\", 10, 0.175), (\"domain1_var2\", 10, 0.175), (\"domain2_var1\", 10, 0.175), (\"domain2_var2\", 10, 0.175)]: \n y_oof = np.zeros(df.shape[0])\n y_test = np.zeros((test_df.shape[0], NUM_FOLDS))\n \n for f, (train_ind, val_ind) in enumerate(kf.split(df, df)):\n train_df, val_df = df.iloc[train_ind], df.iloc[val_ind]\n train_df = train_df[train_df[target].notnull()]\n\n model = LinearSVR(C=c)\n model.fit(train_df[features], train_df[target])\n\n y_oof[val_ind] = model.predict(val_df[features])\n y_test[:, f] = model.predict(test_df[features])\n \n df[\"pred_{}\".format(target)] = y_oof\n test_df[target] = y_test.mean(axis=1)\n \n score = metric(df[df[target].notnull()][target].values, df[df[target].notnull()][\"pred_{}\".format(target)].values)\n overall_score += w*score\n print(target, np.round(score, 4))\n print()\n \nprint(\"Overall score:\", np.round(overall_score, 4))",
"age 0.1651\n\ndomain1_var1 0.1565\n\ndomain1_var2 0.151\n\ndomain2_var1 0.1855\n\ndomain2_var2 0.1785\n\nOverall score: 0.167\nCPU times: user 38.9 s, sys: 14.6 s, total: 53.6 s\nWall time: 36.8 s\n"
],
[
"sub_df = pd.melt(test_df[[\"Id\", \"age\", \"domain1_var1\", \"domain1_var2\", \"domain2_var1\", \"domain2_var2\"]], id_vars=[\"Id\"], value_name=\"Predicted\")\nsub_df[\"Id\"] = sub_df[\"Id\"].astype(\"str\") + \"_\" + sub_df[\"variable\"].astype(\"str\")\n\nsub_df = sub_df.drop(\"variable\", axis=1).sort_values(\"Id\")\nassert sub_df.shape[0] == test_df.shape[0]*5\nsub_df.head(10)",
"_____no_output_____"
],
[
"sub_df.to_csv(\"submission.csv\", index=False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca49f2a9888e729f8aeaf4ac3024ef47c3053f5 | 238,882 | ipynb | Jupyter Notebook | src/Object dectction/Retinanet_Training_Foot_model_final_version_06_06_2020.ipynb | NC717/DeepRA | 409b33e5447e116d0ff2eff59548506a055180e4 | [
"Apache-2.0"
] | null | null | null | src/Object dectction/Retinanet_Training_Foot_model_final_version_06_06_2020.ipynb | NC717/DeepRA | 409b33e5447e116d0ff2eff59548506a055180e4 | [
"Apache-2.0"
] | null | null | null | src/Object dectction/Retinanet_Training_Foot_model_final_version_06_06_2020.ipynb | NC717/DeepRA | 409b33e5447e116d0ff2eff59548506a055180e4 | [
"Apache-2.0"
] | null | null | null | 58.37781 | 1,491 | 0.536692 | [
[
[
"# Installing retinanet library",
"_____no_output_____"
]
],
[
[
"# a = []\n# while(1):\n# a.append(1)",
"_____no_output_____"
],
[
"!git clone --recursive https://github.com/fizyr/keras-retinanet.git\n!ls\n!cd keras-retinanet && pip install . --user\n!pip install Cython\n!pip install --user git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI\n!cd keras-retinanet/snapshots && \\\nwget https://github.com/fizyr/keras-retinanet/releases/download/0.2/resnet50_coco_best_v2.0.3.h5\n\n!pip install --upgrade git+https://github.com/fizyr/keras-retinanet\n!pip install --upgrade git+https://github.com/broadinstitute/keras-resnet\n\nimport keras\nimport keras_resnet",
"Cloning into 'keras-retinanet'...\nremote: Enumerating objects: 5, done.\u001b[K\nremote: Counting objects: 20% (1/5)\u001b[K\rremote: Counting objects: 40% (2/5)\u001b[K\rremote: Counting objects: 60% (3/5)\u001b[K\rremote: Counting objects: 80% (4/5)\u001b[K\rremote: Counting objects: 100% (5/5)\u001b[K\rremote: Counting objects: 100% (5/5), done.\u001b[K\nremote: Compressing objects: 100% (5/5), done.\u001b[K\nremote: Total 5762 (delta 0), reused 1 (delta 0), pack-reused 5757\u001b[K\nReceiving objects: 100% (5762/5762), 13.38 MiB | 32.39 MiB/s, done.\nResolving deltas: 100% (3865/3865), done.\nSubmodule 'tests/test-data' (https://github.com/fizyr/keras-retinanet-test-data.git) registered for path 'tests/test-data'\nCloning into '/content/keras-retinanet/tests/test-data'...\nremote: Enumerating objects: 45, done. \nremote: Total 45 (delta 0), reused 0 (delta 0), pack-reused 45 \nSubmodule path 'tests/test-data': checked out '98404379fbf1ff1273d01db835c10cc83a4f8007'\ndrive keras-retinanet\tsample_data\nProcessing /content/keras-retinanet\nRequirement already satisfied: keras in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (2.3.1)\nCollecting keras-resnet==0.1.0\n Downloading https://files.pythonhosted.org/packages/05/46/ad0b2d1a05d9497bd80c98a2c3f4d8be38a4601ace69af72814f5fafd851/keras-resnet-0.1.0.tar.gz\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (1.12.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (1.4.1)\nRequirement already satisfied: cython in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (0.29.19)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (7.0.0)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (4.1.2.30)\nRequirement already satisfied: progressbar2 in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (3.38.0)\nRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.0.8)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.1.2)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (3.13)\nRequirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.18.5)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (2.10.0)\nRequirement already satisfied: python-utils>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from progressbar2->keras-retinanet==0.5.1) (2.4.0)\nBuilding wheels for collected packages: keras-retinanet, keras-resnet\n Building wheel for keras-retinanet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for keras-retinanet: filename=keras_retinanet-0.5.1-cp36-cp36m-linux_x86_64.whl size=169702 sha256=cbc7680823ab6d57839cb3c8ab30853278255dabc2fcdc06f5d5fa77ede5f54e\n Stored in directory: /root/.cache/pip/wheels/b2/9f/57/cb0305f6f5a41fc3c11ad67b8cedfbe9127775b563337827ba\n Building wheel for keras-resnet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for keras-resnet: filename=keras_resnet-0.1.0-py2.py3-none-any.whl size=13346 sha256=1827af4e60e94c812e32c9e15f6f9f84c8f7a3c763ba481256726a477412c098\n Stored in directory: /root/.cache/pip/wheels/80/dd/ac/842235b63dddac12faa4b48ebe58b8944e8c2e57c2e38dddb6\nSuccessfully built keras-retinanet keras-resnet\nInstalling collected packages: keras-resnet, keras-retinanet\n\u001b[33m WARNING: The scripts retinanet-convert-model, retinanet-debug, retinanet-evaluate and retinanet-train are installed in '/root/.local/bin' which is not on PATH.\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\nSuccessfully installed keras-resnet-0.1.0 keras-retinanet-0.5.1\nRequirement already satisfied: Cython in /usr/local/lib/python3.6/dist-packages (0.29.19)\nCollecting git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI\n Cloning https://github.com/cocodataset/cocoapi.git to /tmp/pip-req-build-bblg_ad1\n Running command git clone -q https://github.com/cocodataset/cocoapi.git /tmp/pip-req-build-bblg_ad1\nRequirement already satisfied (use --upgrade to upgrade): pycocotools==2.0 from git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI in /usr/local/lib/python3.6/dist-packages\nRequirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.6/dist-packages (from pycocotools==2.0) (47.1.1)\nRequirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.6/dist-packages (from pycocotools==2.0) (0.29.19)\nRequirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.6/dist-packages (from pycocotools==2.0) (3.2.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.1.0->pycocotools==2.0) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.1.0->pycocotools==2.0) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.1.0->pycocotools==2.0) (1.2.0)\nRequirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.1.0->pycocotools==2.0) (1.18.5)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=2.1.0->pycocotools==2.0) (2.8.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib>=2.1.0->pycocotools==2.0) (1.12.0)\nBuilding wheels for collected packages: pycocotools\n Building wheel for pycocotools (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pycocotools: filename=pycocotools-2.0-cp36-cp36m-linux_x86_64.whl size=267013 sha256=4e28bb6af69248408ae3483f6169f8c3dc35337eb13c4b0dc82c5c9bbaae6c0a\n Stored in directory: /tmp/pip-ephem-wheel-cache-c_g0aaz5/wheels/90/51/41/646daf401c3bc408ff10de34ec76587a9b3ebfac8d21ca5c3a\nSuccessfully built pycocotools\n--2020-06-14 17:23:14-- https://github.com/fizyr/keras-retinanet/releases/download/0.2/resnet50_coco_best_v2.0.3.h5\nResolving github.com (github.com)... 140.82.114.3\nConnecting to github.com (github.com)|140.82.114.3|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://github-production-release-asset-2e65be.s3.amazonaws.com/100249425/190de828-40ad-11e8-8d21-51a2a173a26c?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200614%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200614T172314Z&X-Amz-Expires=300&X-Amz-Signature=69b8569d0e5d3c1367f074d42da8d0011a0fc4e710e18aef7035ef99d66bb829&X-Amz-SignedHeaders=host&actor_id=0&repo_id=100249425&response-content-disposition=attachment%3B%20filename%3Dresnet50_coco_best_v2.0.3.h5&response-content-type=application%2Foctet-stream [following]\n--2020-06-14 17:23:14-- https://github-production-release-asset-2e65be.s3.amazonaws.com/100249425/190de828-40ad-11e8-8d21-51a2a173a26c?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200614%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200614T172314Z&X-Amz-Expires=300&X-Amz-Signature=69b8569d0e5d3c1367f074d42da8d0011a0fc4e710e18aef7035ef99d66bb829&X-Amz-SignedHeaders=host&actor_id=0&repo_id=100249425&response-content-disposition=attachment%3B%20filename%3Dresnet50_coco_best_v2.0.3.h5&response-content-type=application%2Foctet-stream\nResolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.217.10.52\nConnecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.217.10.52|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 152662144 (146M) [application/octet-stream]\nSaving to: โresnet50_coco_best_v2.0.3.h5โ\n\nresnet50_coco_best_ 100%[===================>] 145.59M 89.4MB/s in 1.6s \n\n2020-06-14 17:23:16 (89.4 MB/s) - โresnet50_coco_best_v2.0.3.h5โ saved [152662144/152662144]\n\nCollecting git+https://github.com/fizyr/keras-retinanet\n Cloning https://github.com/fizyr/keras-retinanet to /tmp/pip-req-build-3wpg9oew\n Running command git clone -q https://github.com/fizyr/keras-retinanet /tmp/pip-req-build-3wpg9oew\n Running command git submodule update --init --recursive -q\nRequirement already satisfied, skipping upgrade: keras in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (2.3.1)\nRequirement already satisfied, skipping upgrade: keras-resnet==0.1.0 in /root/.local/lib/python3.6/site-packages (from keras-retinanet==0.5.1) (0.1.0)\nRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (1.12.0)\nRequirement already satisfied, skipping upgrade: scipy in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (1.4.1)\nRequirement already satisfied, skipping upgrade: cython in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (0.29.19)\nRequirement already satisfied, skipping upgrade: Pillow in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (7.0.0)\nRequirement already satisfied, skipping upgrade: opencv-python in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (4.1.2.30)\nRequirement already satisfied, skipping upgrade: progressbar2 in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.1) (3.38.0)\nRequirement already satisfied, skipping upgrade: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (3.13)\nRequirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.1.2)\nRequirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (2.10.0)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.18.5)\nRequirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.1) (1.0.8)\nRequirement already satisfied, skipping upgrade: python-utils>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from progressbar2->keras-retinanet==0.5.1) (2.4.0)\nBuilding wheels for collected packages: keras-retinanet\n Building wheel for keras-retinanet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for keras-retinanet: filename=keras_retinanet-0.5.1-cp36-cp36m-linux_x86_64.whl size=169714 sha256=b341128bfb8d13c78c914e49e6add553b1176263f96fed28c740e113796a8cfa\n Stored in directory: /tmp/pip-ephem-wheel-cache-w79g6rr5/wheels/3e/f1/75/4b42a59887b48ec1022cc76889b1e48da866f1482fd7a0f3df\nSuccessfully built keras-retinanet\nInstalling collected packages: keras-retinanet\n Found existing installation: keras-retinanet 0.5.1\n Uninstalling keras-retinanet-0.5.1:\n Successfully uninstalled keras-retinanet-0.5.1\nSuccessfully installed keras-retinanet-0.5.1\nCollecting git+https://github.com/broadinstitute/keras-resnet\n Cloning https://github.com/broadinstitute/keras-resnet to /tmp/pip-req-build-wu6l9llb\n Running command git clone -q https://github.com/broadinstitute/keras-resnet /tmp/pip-req-build-wu6l9llb\nRequirement already satisfied, skipping upgrade: keras>=2.2.4 in /usr/local/lib/python3.6/dist-packages (from keras-resnet==0.2.0) (2.3.1)\nRequirement already satisfied, skipping upgrade: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (3.13)\nRequirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (1.0.8)\nRequirement already satisfied, skipping upgrade: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (1.12.0)\nRequirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (1.1.2)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (1.18.5)\nRequirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (2.10.0)\nRequirement already satisfied, skipping upgrade: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from keras>=2.2.4->keras-resnet==0.2.0) (1.4.1)\nBuilding wheels for collected packages: keras-resnet\n Building wheel for keras-resnet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for keras-resnet: filename=keras_resnet-0.2.0-py2.py3-none-any.whl size=22144 sha256=03646051c463980fa0773321ccd342b39aed46103086cca365e1297ab944952a\n Stored in directory: /tmp/pip-ephem-wheel-cache-_woalzau/wheels/10/52/f3/6a1fdbfb022ce9abfdf00a1ca7e90cef71dea99976edbcb53f\nSuccessfully built keras-resnet\n\u001b[31mERROR: keras-retinanet 0.5.1 has requirement keras-resnet==0.1.0, but you'll have keras-resnet 0.2.0 which is incompatible.\u001b[0m\nInstalling collected packages: keras-resnet\n Found existing installation: keras-resnet 0.1.0\n Uninstalling keras-resnet-0.1.0:\n Successfully uninstalled keras-resnet-0.1.0\nSuccessfully installed keras-resnet-0.2.0\n"
],
[
"\nimport xml.etree.ElementTree as ET\nimport os\nimport numpy as np\nimport keras\nimport math\nimport tensorflow as tf\nimport cv2\nfrom os import listdir, walk\nfrom os.path import join\nfrom keras_retinanet.bin.train import create_generators,create_models,create_callbacks\nfrom keras_retinanet.models import backbone,load_model,convert_model\nfrom keras_retinanet.utils.config import read_config_file,parse_anchor_parameters\nfrom keras_retinanet.utils.visualization import draw_boxes\nfrom sklearn.model_selection import train_test_split\nfrom imgaug import augmenters as iaa\n",
"_____no_output_____"
]
],
[
[
"# Training Left Foot model",
"_____no_output_____"
]
],
[
[
"\n# tf.set_random_seed(31) # SEEDS MAKE RESULTS MORE REPRODUCABLE\nnp.random.seed(17)\nclasses = np.arange(0, 6, 1).tolist()\n\nimport pandas as pd\ntr_annots = pd.read_csv('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_foot_train_03_05.csv', header = None)\ntr_annots['ids'] = tr_annots[0].apply(lambda x: x.split(\"/\")[-1].split(\".\")[0])\ntrain_ids = tr_annots['ids'].unique().tolist()\n\nimport pandas as pd\ntr_annots = pd.read_csv('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_foot_val_03_06.csv', header = None)\ntr_annots['ids'] = tr_annots[0].apply(lambda x: x.split(\"/\")[-1].split(\".\")[0])\nval_ids = tr_annots['ids'].unique().tolist()\n\nprint(len(train_ids), len(val_ids))",
"355 61\n"
]
],
[
[
"# Previous config files",
"_____no_output_____"
]
],
[
[
"# # with open('config.ini','w') as f:\n# # f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512\\nstrides = 8 16 32 64 128\\nratios = 1 1.5 2 2.5 \\nscales = 1 2\\n')\n\n# #Updating the anchor parameters\n# with open('config.ini','w') as f:\n# f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512\\nstrides = 8 16 32 64 128\\nratios = 1.5 1.8 2 2.2 2.5 2.8 3.0\\nscales = 1 1.2 1.6 2 3\\n')\n",
"_____no_output_____"
]
],
[
[
"## New config parameters for anchors",
"_____no_output_____"
]
],
[
[
"#Updating the anchor parameters\n# with open('config.ini','w') as f:\n# f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512\\nstrides = 8 16 32 64 128\\nratios = 0.8 1 1.3 1.5 1.8 2 2.2 2.5 2.8 3.0 3.2\\nscales = 1 1.2 1.6\\n')",
"_____no_output_____"
],
[
"# # New ratios parameters for training as previous didn't help\n# with open('config.ini','w') as f:\n# f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512\\nstrides = 8 16 32 64 128\\nratios = 0.8 1 1.3 1.5 1.8 2 2.2 2.5 2.8 3.0 3.2\\nscales = 1 1.2 1.6\\n')\n",
"_____no_output_____"
],
[
"# Updating the parameters\n# New ratios parameters for training as previous didn't help\nwith open('config.ini','w') as f:\n f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1 1.5 2 2.5 3\\nscales = 1 1.2 1.6\\n')\n",
"_____no_output_____"
],
[
"\nb = backbone('resnet50')\n\n# Increasing batch size from 4 -> 8\nclass args:\n batch_size = 8\n config = read_config_file('config.ini')\n random_transform = True # Image augmentation\n annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_foot_train_03_05.csv'\n val_annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_foot_val_03_06.csv'\n classes = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/Foot class names.csv'\n image_min_side = 1000\n image_max_side = 1400\n no_resize = False\n dataset_type = 'csv'\n tensorboard_dir = ''\n evaluation = False\n snapshots = True\n snapshot_path = \"/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued\"\n backbone = 'resnet50'\n epochs = 200\n steps = len(train_ids)//(batch_size)\n weighted_average = True\n reduce_lr_factor = 0.1\n reduce_lr_patience = 4\n compute_val_loss = True\n iou_threshold = 0.6\n nms_threshold = 0.5\n score_threshold = 0.15\n anchors = True\n resize = True\n display_name = 'Anchors'\n no_gui = False",
"_____no_output_____"
],
[
"train_gen, valid_gen = create_generators(args, b.preprocess_image)",
"_____no_output_____"
],
[
"\nfrom keras_retinanet.preprocessing.generator import Generator\n# from ..utils.image import read_image_bgr\nfrom keras_retinanet.utils.transform import random_transform_generator\nfrom keras_retinanet.utils.image import random_visual_effect_generator\n\nimport numpy as np\nfrom PIL import Image\nfrom six import raise_from\n\nimport csv\nimport sys\nimport os.path\nfrom collections import OrderedDict\n\nimport PIL.Image\nimport PIL.ImageOps\nimport numpy as np\nfrom os import makedirs \n\n\ndef exif_transpose(img):\n if not img:\n return img\n\n exif_orientation_tag = 274\n\n # Check for EXIF data (only present on some files)\n if hasattr(img, \"_getexif\") and isinstance(img._getexif(), dict) and exif_orientation_tag in img._getexif():\n exif_data = img._getexif()\n orientation = exif_data[exif_orientation_tag]\n\n # Handle EXIF Orientation\n if orientation == 1:\n # Normal image - nothing to do!\n pass\n elif orientation == 2:\n # Mirrored left to right\n img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n elif orientation == 3:\n # Rotated 180 degrees\n img = img.rotate(180)\n elif orientation == 4:\n # Mirrored top to bottom\n img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT)\n elif orientation == 5:\n # Mirrored along top-left diagonal\n img = img.rotate(-90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)\n elif orientation == 6:\n # Rotated 90 degrees\n img = img.rotate(-90, expand=True)\n elif orientation == 7:\n # Mirrored along top-right diagonal\n img = img.rotate(90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)\n elif orientation == 8:\n # Rotated 270 degrees\n img = img.rotate(90, expand=True)\n\n return img\n\n\ndef load_image_file(file, mode='RGB'):\n # Load the image with PIL\n img = PIL.Image.open(file)\n\n if hasattr(PIL.ImageOps, 'exif_transpose'):\n # Very recent versions of PIL can do exit transpose internally\n img = PIL.ImageOps.exif_transpose(img)\n else:\n # Otherwise, do the exif transpose ourselves\n img = exif_transpose(img)\n\n img = img.convert(mode)\n\n return np.array(img)\n\ndef _parse(value, function, fmt):\n \"\"\"\n Parse a string into a value, and format a nice ValueError if it fails.\n\n Returns `function(value)`.\n Any `ValueError` raised is catched and a new `ValueError` is raised\n with message `fmt.format(e)`, where `e` is the caught `ValueError`.\n \"\"\"\n try:\n return function(value)\n except ValueError as e:\n raise_from(ValueError(fmt.format(e)), None)\n\n\ndef _read_classes(csv_reader):\n \"\"\" Parse the classes file given by csv_reader.\n \"\"\"\n result = OrderedDict()\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n class_name, class_id = row\n except ValueError:\n raise_from(ValueError('line {}: format should be \\'class_name,class_id\\''.format(line)), None)\n class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))\n\n if class_name in result:\n raise ValueError('line {}: duplicate class name: \\'{}\\''.format(line, class_name))\n result[class_name] = class_id\n return result\n\n\ndef _read_annotations(csv_reader, classes):\n \"\"\" Read annotations from the csv_reader.\n \"\"\"\n result = OrderedDict()\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n img_file, x1, y1, x2, y2, class_name = row[:6]\n except ValueError:\n raise_from(ValueError('line {}: format should be \\'img_file,x1,y1,x2,y2,class_name\\' or \\'img_file,,,,,\\''.format(line)), None)\n\n if img_file not in result:\n result[img_file] = []\n\n # If a row contains only an image path, it's an image without annotations.\n if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):\n continue\n\n x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))\n y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))\n x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))\n y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))\n\n # Check that the bounding box is valid.\n if x2 <= x1:\n raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))\n if y2 <= y1:\n raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))\n\n # check if the current class name is correctly present\n if class_name not in classes:\n raise ValueError('line {}: unknown class name: \\'{}\\' (classes: {})'.format(line, class_name, classes))\n\n result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})\n return result\n\n\ndef _open_for_csv(path):\n \"\"\" Open a file with flags suitable for csv.reader.\n\n This is different for python2 it means with mode 'rb',\n for python3 this means 'r' with \"universal newlines\".\n \"\"\"\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')\n\n\nclass CSVGenerator(Generator):\n \"\"\" Generate data for a custom CSV dataset.\n\n See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.\n \"\"\"\n\n def __init__(\n self,\n csv_data_file,\n csv_class_file,\n base_dir=None,\n **kwargs\n ):\n \"\"\" Initialize a CSV data generator.\n\n Args\n csv_data_file: Path to the CSV annotations file.\n csv_class_file: Path to the CSV classes file.\n base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).\n \"\"\"\n self.image_names = []\n self.image_data = {}\n self.base_dir = base_dir\n\n # Take base_dir from annotations file if not explicitly specified.\n if self.base_dir is None:\n self.base_dir = os.path.dirname(csv_data_file)\n\n # parse the provided class file\n try:\n with _open_for_csv(csv_class_file) as file:\n self.classes = _read_classes(csv.reader(file, delimiter=','))\n except ValueError as e:\n raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)\n\n self.labels = {}\n for key, value in self.classes.items():\n self.labels[value] = key\n\n # csv with img_path, x1, y1, x2, y2, class_name\n try:\n with _open_for_csv(csv_data_file) as file:\n self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)\n except ValueError as e:\n raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)\n self.image_names = list(self.image_data.keys())\n\n super(CSVGenerator, self).__init__(**kwargs)\n\n def size(self):\n \"\"\" Size of the dataset.\n \"\"\"\n return len(self.image_names)\n\n def num_classes(self):\n \"\"\" Number of classes in the dataset.\n \"\"\"\n return max(self.classes.values()) + 1\n\n def has_label(self, label):\n \"\"\" Return True if label is a known label.\n \"\"\"\n return label in self.labels\n\n def has_name(self, name):\n \"\"\" Returns True if name is a known class.\n \"\"\"\n return name in self.classes\n\n def name_to_label(self, name):\n \"\"\" Map name to label.\n \"\"\"\n return self.classes[name]\n\n def label_to_name(self, label):\n \"\"\" Map label to name.\n \"\"\"\n return self.labels[label]\n\n def image_path(self, image_index):\n \"\"\" Returns the image path for image_index.\n \"\"\"\n return os.path.join(self.base_dir, self.image_names[image_index])\n\n def image_aspect_ratio(self, image_index):\n \"\"\" Compute the aspect ratio for an image with image_index.\n \"\"\"\n # PIL is fast for metadata\n image = Image.open(self.image_path(image_index))\n return float(image.width) / float(image.height)\n\n def load_image(self, image_index):\n \"\"\" Load an image at the image_index.\n \"\"\"\n return load_image_file(self.image_path(image_index))\n\n def load_annotations(self, image_index):\n \"\"\" Load annotations for an image_index.\n \"\"\"\n path = self.image_names[image_index]\n annotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}\n\n for idx, annot in enumerate(self.image_data[path]):\n annotations['labels'] = np.concatenate((annotations['labels'], [self.name_to_label(annot['class'])]))\n annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[\n float(annot['x1']),\n float(annot['y1']),\n float(annot['x2']),\n float(annot['y2']),\n ]]))\n\n return annotations\n\ndef mAP(y_true, y_pred):\n num_classes = y_true.shape[1]\n average_precisions = []\n relevant = K.sum(K.round(K.clip(y_true, 0, 1)))\n tp_whole = K.round(K.clip(y_true * y_pred, 0, 1))\n for index in range(num_classes):\n temp = K.sum(tp_whole[:,:index+1],axis=1)\n average_precisions.append(temp * (1/(index + 1)))\n AP = Add()(average_precisions) / relevant\n mAP = K.mean(AP,axis=0)\n return mAP\n\ndef create_callbacks(model, training_model, prediction_model, validation_generator, args):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n makedirs(args.tensorboard_dir)\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir = args.tensorboard_dir,\n histogram_freq = 0,\n batch_size = args.batch_size,\n write_graph = True,\n write_grads = False,\n write_images = False,\n embeddings_freq = 0,\n embeddings_layer_names = None,\n embeddings_metadata = None\n )\n\n if args.evaluation and validation_generator:\n if args.dataset_type == 'coco':\n from ..callbacks.coco import CocoEval\n\n # use prediction model for evaluation\n evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)\n else:\n evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(keras.callbacks.ReduceLROnPlateau(\n monitor = 'loss',\n factor = args.reduce_lr_factor,\n patience = args.reduce_lr_patience,\n verbose = 1,\n mode = 'auto',\n min_delta = 0.0001,\n cooldown = 0,\n min_lr = 0\n ))\n\n callbacks.append(keras.callbacks.EarlyStopping(\n monitor = 'classification_loss',\n patience = 20,\n mode = 'max',\n min_delta = 0.01\n ))\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n 'batch_size' : args.batch_size,\n 'config' : args.config,\n 'image_min_side' : args.image_min_side,\n 'image_max_side' : args.image_max_side,\n 'no_resize' : args.no_resize,\n 'preprocess_image' : preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-.1, .1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05)\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'train',\n image_extension=args.image_extension,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'val',\n image_extension=args.image_extension,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator\n\ntrain_gen, valid_gen = create_generators(args, b.preprocess_image)",
"_____no_output_____"
],
[
"\n# # from keras_retinanet.bin.train import create_generators\n# # from keras_retinanet.models import backbone\n# from keras_retinanet.utils.config import read_config_file,parse_anchor_parameters\n# # # from keras_retinanet.bin.debug import run\n# from google.colab.patches import cv2_imshow\n# from keras_retinanet.utils.anchors import anchors_for_shape, compute_gt_annotations\n# from keras_retinanet.utils.visualization import draw_annotations, draw_boxes, draw_caption\n\n\n# def run(generator, args, anchor_params):\n# \"\"\" Main loop.\n\n# Args\n# generator: The generator to debug.\n# args: parseargs args object.\n# \"\"\"\n# # display images, one at a time\n# i = 0\n# while True:\n# # load the data\n# image = generator.load_image(i)\n# annotations = generator.load_annotations(i)\n# if len(annotations['labels']) > 0 :\n# # apply random transformations\n# if args.random_transform:\n# image, annotations = generator.random_transform_group_entry(image, annotations)\n# image, annotations = generator.random_visual_effect_group_entry(image, annotations)\n\n# # resize the image and annotations\n# if args.resize:\n# image, image_scale = generator.resize_image(image)\n# annotations['bboxes'] *= image_scale\n\n# anchors = anchors_for_shape(image.shape, anchor_params=anchor_params)\n# positive_indices, _, max_indices = compute_gt_annotations(anchors, annotations['bboxes'])\n\n# # draw anchors on the image\n# if args.anchors:\n# draw_boxes(image, anchors[positive_indices], (255, 255, 0), thickness=1)\n\n# # draw annotations on the image\n# if args.annotations:\n# # draw annotations in red\n# draw_annotations(image, annotations, color=(0, 0, 255), label_to_name=generator.label_to_name)\n\n# # draw regressed anchors in green to override most red annotations\n# # result is that annotations without anchors are red, with anchors are green\n# draw_boxes(image, annotations['bboxes'][max_indices[positive_indices], :], (0, 255, 0))\n\n# # display name on the image\n# if args.display_name:\n# draw_caption(image, [0, image.shape[0]], os.path.basename(generator.image_path(i)))\n\n# # write to file and advance if no-gui selected\n# if args.no_gui:\n# output_path = make_output_path(args.output_dir, generator.image_path(i), flatten=args.flatten_output)\n# os.makedirs(os.path.dirname(output_path), exist_ok=True)\n# cv2.imwrite(output_path, image)\n# i += 1\n# if i == generator.size(): # have written all images\n# break\n# else:\n# continue\n\n# # if we are using the GUI, then show an image\n# cv2_imshow(image)\n# key = cv2.waitKeyEx()\n\n# # press right for next image and left for previous (linux or windows, doesn't work for macOS)\n# # if you run macOS, press \"n\" or \"m\" (will also work on linux and windows)\n\n# if key in rightkeys:\n# i = (i + 1) % generator.size()\n# if key in leftkeys:\n# i -= 1\n# if i < 0:\n# i = generator.size() - 1\n\n# # press q or Esc to quit\n# if (key == ord('q')) or (key == 27):\n# return False\n\n# return True\n\n\n# # train_gen, valid_gen = create_generators(args,b.preprocess_image)\n# while run(train_gen, args, parse_anchor_parameters(args.config)):\n# pass",
"_____no_output_____"
],
[
"import imgaug as ia\nsometimes = lambda aug: iaa.Sometimes(0.5, aug)\n# Define our sequence of augmentation steps that will be applied to every image.\nseq = iaa.Sequential(\n[ \n # sometimes(iaa.Crop(percent=(0, 0.2))),\n \n # sometimes(iaa.Affine(\n # scale={\"x\": (0.9, 1.2), \"y\": (0.9, 1.2)},\n # translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n # rotate=(-45, 45),\n # shear=(-16, 16),\n # order=[0, 1],\n # cval=(0, 255),\n # mode=ia.ALL\n # )),\n\n # # Change brightness of images (85-115% of original value).\n # iaa.Multiply((0.90, 1.15), per_channel=0.5),\n\n # # # Improve or worsen the contrast of images.\n # iaa.ContrastNormalization((0.75, 1.25), per_channel=0.5),\n\n # Convert each image to grayscale and then overlay the\n # result with the original with random alpha. I.e. remove\n # colors with varying strengths.\n # iaa.Grayscale(alpha=(0.0, 0.25)),\n#\n # Execute 1 to 9 of the following (less important) augmenters per\n # image. Don't execute all of them, as that would often be way too\n # strong.\n #\n iaa.SomeOf((1, 9),\n [\n\n # Blur each image with varying strength using\n # gaussian blur (sigma between 0 and .5),\n # average/uniform blur (kernel size 1x1)\n # median blur (kernel size 1x1).\n iaa.OneOf([\n iaa.GaussianBlur((0,1)),\n iaa.AverageBlur(k=(2,2)),\n iaa.MedianBlur(k=(1,1)),\n ]),\n\n # Sharpen each image, overlay the result with the original\n # image using an alpha between 0 (no sharpening) and 1\n # (full sharpening effect).\n iaa.Sharpen(alpha=(0, 0.25), lightness=(0.75, 1.5)),\n\n # Add gaussian noise to some images.\n # In 50% of these cases, the noise is randomly sampled per\n # channel and pixel.\n # In the other 50% of all cases it is sampled once per\n # pixel (i.e. brightness change).\n iaa.AdditiveGaussianNoise(\n loc=0, scale=(0.0, 0.01*255), per_channel=0.5\n ),\n\n # Either drop randomly 1 to 10% of all pixels (i.e. set\n # them to black) or drop them on an image with 2-5% percent\n # of the original size, leading to large dropped\n # rectangles.\n # iaa.OneOf([\n # iaa.Dropout((0.01, 0.1), per_channel=0.5),\n # iaa.CoarseDropout(\n # (0.03, 0.15), size_percent=(0.02, 0.05),\n # per_channel=0.2\n # ),\n # ]),\n \n \n # Invert each image's channel with 5% probability.\n # This sets each pixel value v to 255-v.\n iaa.Invert(0.05, per_channel=True), # invert color channels\n\n # Add a value of -5 to 5 to each pixel.\n iaa.Add((-5, 5), per_channel=0.5),\n\n # # Change brightness of images (85-115% of original value).\n iaa.Multiply((0.85, 1.15), per_channel=0.5),\n\n # # Improve or worsen the contrast of images.\n iaa.ContrastNormalization((0.75, 1.25), per_channel=0.5),\n\n # Convert each image to grayscale and then overlay the\n # result with the original with random alpha. I.e. remove\n # colors with varying strengths.\n # iaa.Grayscale(alpha=(0.0, 0.25)),\n\n # In some images distort local areas with varying strength.\n sometimes(iaa.PiecewiseAffine(scale=(0.001, 0.01)))\n ],\n # do all of the above augmentations in random order\n random_order=True\n )\n ],\n # do all of the above augmentations in random order\n random_order=True\n)\n\ndef augment_train_gen(train_gen, visualize=False):\n '''\n Creates a generator using another generator with applied image augmentation.\n Args\n train_gen : keras-retinanet generator object.\n visualize : Boolean; False will convert bounding boxes to their anchor box targets for the model.\n '''\n imgs = []\n boxes = []\n targets = []\n size = train_gen.size()\n idx = 0\n while True:\n while len(imgs) < args.batch_size:\n image = train_gen.load_image(idx % size)\n annotations = train_gen.load_annotations(idx % size)\n image,annotations = train_gen.random_transform_group_entry(image,annotations)\n imgs.append(image) \n boxes.append(annotations['bboxes'])\n targets.append(annotations)\n idx += 1\n if visualize:\n imgs = seq.augment_images(imgs)\n imgs = np.array(imgs)\n boxes = np.array(boxes)\n yield imgs,boxes\n else:\n imgs = seq.augment_images(imgs)\n imgs,targets = train_gen.preprocess_group(imgs,targets)\n imgs = train_gen.compute_inputs(imgs)\n targets = train_gen.compute_targets(imgs,targets)\n imgs = np.array(imgs)\n yield imgs,targets\n imgs = []\n boxes = []\n targets = []\n\t\t\n\t\t\n# import matplotlib.pyplot as plt\n\n# skip_batches = 5\n# i = 0\n# for imgs,boxes in augment_train_gen(train_gen,visualize=True):\n# if i > skip_batches:\n# fig=plt.figure(figsize=(24,96))\n# columns = 2\n# rows = 8\n# for i in range(1, columns*rows + 1):\n# draw_boxes(imgs[i], boxes[i], (0, 255, 0), thickness=1)\n# fig.add_subplot(rows, columns, i)\n# plt.imshow(cv2.cvtColor(imgs[i],cv2.COLOR_BGR2RGB))\n# plt.show()\n# break\n# else:\n# i += 1\n\nmodel, training_model, prediction_model = create_models(\n backbone_retinanet=b.retinanet,\n num_classes=train_gen.num_classes(),\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=args.config\n )",
"tracking <tf.Variable 'Variable:0' shape=(15, 4) dtype=float32, numpy=\narray([[-16. , -16. , 16. , 16. ],\n [-19.2 , -19.2 , 19.2 , 19.2 ],\n [-25.6 , -25.6 , 25.6 , 25.6 ],\n [-13.063946 , -19.595919 , 13.063946 , 19.595919 ],\n [-15.676735 , -23.515102 , 15.676735 , 23.515102 ],\n [-20.902313 , -31.35347 , 20.902313 , 31.35347 ],\n [-11.313708 , -22.627417 , 11.313708 , 22.627417 ],\n [-13.57645 , -27.1529 , 13.57645 , 27.1529 ],\n [-18.101934 , -36.20387 , 18.101934 , 36.20387 ],\n [-10.119288 , -25.298222 , 10.119288 , 25.298222 ],\n [-12.1431465, -30.357866 , 12.1431465, 30.357866 ],\n [-16.190863 , -40.477154 , 16.190863 , 40.477154 ],\n [ -9.237604 , -27.712812 , 9.237604 , 27.712812 ],\n [-11.085126 , -33.25538 , 11.085126 , 33.25538 ],\n [-14.780168 , -44.3405 , 14.780168 , 44.3405 ]],\n dtype=float32)> anchors\ntracking <tf.Variable 'Variable:0' shape=(15, 4) dtype=float32, numpy=\narray([[-32. , -32. , 32. , 32. ],\n [-38.4 , -38.4 , 38.4 , 38.4 ],\n [-51.2 , -51.2 , 51.2 , 51.2 ],\n [-26.127892, -39.191837, 26.127892, 39.191837],\n [-31.35347 , -47.030205, 31.35347 , 47.030205],\n [-41.804626, -62.70694 , 41.804626, 62.70694 ],\n [-22.627417, -45.254833, 22.627417, 45.254833],\n [-27.1529 , -54.3058 , 27.1529 , 54.3058 ],\n [-36.20387 , -72.40774 , 36.20387 , 72.40774 ],\n [-20.238577, -50.596443, 20.238577, 50.596443],\n [-24.286293, -60.715733, 24.286293, 60.715733],\n [-32.381725, -80.95431 , 32.381725, 80.95431 ],\n [-18.475208, -55.425625, 18.475208, 55.425625],\n [-22.170252, -66.51076 , 22.170252, 66.51076 ],\n [-29.560335, -88.681 , 29.560335, 88.681 ]], dtype=float32)> anchors\ntracking <tf.Variable 'Variable:0' shape=(15, 4) dtype=float32, numpy=\narray([[ -64. , -64. , 64. , 64. ],\n [ -76.8 , -76.8 , 76.8 , 76.8 ],\n [-102.4 , -102.4 , 102.4 , 102.4 ],\n [ -52.255783, -78.383675, 52.255783, 78.383675],\n [ -62.70694 , -94.06041 , 62.70694 , 94.06041 ],\n [ -83.60925 , -125.41388 , 83.60925 , 125.41388 ],\n [ -45.254833, -90.50967 , 45.254833, 90.50967 ],\n [ -54.3058 , -108.6116 , 54.3058 , 108.6116 ],\n [ -72.40774 , -144.81548 , 72.40774 , 144.81548 ],\n [ -40.477154, -101.19289 , 40.477154, 101.19289 ],\n [ -48.572586, -121.431465, 48.572586, 121.431465],\n [ -64.76345 , -161.90862 , 64.76345 , 161.90862 ],\n [ -36.950417, -110.85125 , 36.950417, 110.85125 ],\n [ -44.340504, -133.02151 , 44.340504, 133.02151 ],\n [ -59.12067 , -177.362 , 59.12067 , 177.362 ]],\n dtype=float32)> anchors\ntracking <tf.Variable 'Variable:0' shape=(15, 4) dtype=float32, numpy=\narray([[-128. , -128. , 128. , 128. ],\n [-153.6 , -153.6 , 153.6 , 153.6 ],\n [-204.8 , -204.8 , 204.8 , 204.8 ],\n [-104.511566, -156.76735 , 104.511566, 156.76735 ],\n [-125.41388 , -188.12082 , 125.41388 , 188.12082 ],\n [-167.2185 , -250.82776 , 167.2185 , 250.82776 ],\n [ -90.50967 , -181.01933 , 90.50967 , 181.01933 ],\n [-108.6116 , -217.2232 , 108.6116 , 217.2232 ],\n [-144.81548 , -289.63095 , 144.81548 , 289.63095 ],\n [ -80.95431 , -202.38577 , 80.95431 , 202.38577 ],\n [ -97.14517 , -242.86293 , 97.14517 , 242.86293 ],\n [-129.5269 , -323.81723 , 129.5269 , 323.81723 ],\n [ -73.90083 , -221.7025 , 73.90083 , 221.7025 ],\n [ -88.68101 , -266.04303 , 88.68101 , 266.04303 ],\n [-118.24134 , -354.724 , 118.24134 , 354.724 ]],\n dtype=float32)> anchors\ntracking <tf.Variable 'Variable:0' shape=(15, 4) dtype=float32, numpy=\narray([[-256. , -256. , 256. , 256. ],\n [-307.2 , -307.2 , 307.2 , 307.2 ],\n [-409.6 , -409.6 , 409.6 , 409.6 ],\n [-209.02313, -313.5347 , 209.02313, 313.5347 ],\n [-250.82776, -376.24164, 250.82776, 376.24164],\n [-334.437 , -501.65552, 334.437 , 501.65552],\n [-181.01933, -362.03867, 181.01933, 362.03867],\n [-217.2232 , -434.4464 , 217.2232 , 434.4464 ],\n [-289.63095, -579.2619 , 289.63095, 579.2619 ],\n [-161.90862, -404.77155, 161.90862, 404.77155],\n [-194.29034, -485.72586, 194.29034, 485.72586],\n [-259.0538 , -647.63446, 259.0538 , 647.63446],\n [-147.80167, -443.405 , 147.80167, 443.405 ],\n [-177.36201, -532.08606, 177.36201, 532.08606],\n [-236.48268, -709.448 , 236.48268, 709.448 ]], dtype=float32)> anchors\n"
]
],
[
[
"# Retraining the foot model with better parameters\n- config file -> # Updating the parameters\n# New ratios parameters for training as previous didn't help\nwith open('config.ini','w') as f:\n f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1 1.5 2 2.5 3\\nscales = 1 1.2 1.6\\n')\n- iou 0.6 NMS 0.5 and score threshold 0.15",
"_____no_output_____"
],
[
"## Continued training from 21 epoch",
"_____no_output_____"
]
],
[
[
"from keras_retinanet.callbacks import RedirectModel \ncallbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n valid_gen,\n args,\n)\ntraining_model.load_weights('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2/resnet50_csv_21_class_loss_0.0973.h5', skip_mismatch = True, by_name = True)\n\ntraining_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"Epoch 1/200\n44/44 [==============================] - 258s 6s/step - loss: 1.4052 - regression_loss: 1.2905 - classification_loss: 0.1146\n\nEpoch 00001: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_01.h5\nEpoch 2/200\n44/44 [==============================] - 227s 5s/step - loss: 1.4634 - regression_loss: 1.3478 - classification_loss: 0.1156\n\nEpoch 00002: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_02.h5\nEpoch 3/200\n44/44 [==============================] - 242s 5s/step - loss: 1.4614 - regression_loss: 1.3458 - classification_loss: 0.1157\n\nEpoch 00003: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_03.h5\nEpoch 4/200\n44/44 [==============================] - 234s 5s/step - loss: 1.4859 - regression_loss: 1.3659 - classification_loss: 0.1200\n\nEpoch 00004: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_04.h5\nEpoch 5/200\n44/44 [==============================] - 244s 6s/step - loss: 1.4409 - regression_loss: 1.3247 - classification_loss: 0.1162\n\nEpoch 00005: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_05.h5\n\nEpoch 00005: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.\nEpoch 6/200\n44/44 [==============================] - 245s 6s/step - loss: 1.3897 - regression_loss: 1.2784 - classification_loss: 0.1114\n\nEpoch 00006: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_06.h5\nEpoch 7/200\n44/44 [==============================] - 208s 5s/step - loss: 1.3457 - regression_loss: 1.2432 - classification_loss: 0.1025\n\nEpoch 00007: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_07.h5\nEpoch 8/200\n44/44 [==============================] - 236s 5s/step - loss: 1.3330 - regression_loss: 1.2351 - classification_loss: 0.0979\n\nEpoch 00008: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_08.h5\nEpoch 9/200\n44/44 [==============================] - 240s 5s/step - loss: 1.3067 - regression_loss: 1.2155 - classification_loss: 0.0911\n\nEpoch 00009: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_09.h5\nEpoch 10/200\n44/44 [==============================] - 238s 5s/step - loss: 1.2904 - regression_loss: 1.1966 - classification_loss: 0.0938\n\nEpoch 00010: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_10.h5\nEpoch 11/200\n44/44 [==============================] - 241s 5s/step - loss: 1.3014 - regression_loss: 1.2102 - classification_loss: 0.0911\n\nEpoch 00011: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_11.h5\nEpoch 12/200\n44/44 [==============================] - 233s 5s/step - loss: 1.3038 - regression_loss: 1.2132 - classification_loss: 0.0906\n\nEpoch 00012: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_12.h5\nEpoch 13/200\n44/44 [==============================] - 237s 5s/step - loss: 1.2689 - regression_loss: 1.1816 - classification_loss: 0.0873\n\nEpoch 00013: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_13.h5\nEpoch 14/200\n44/44 [==============================] - 230s 5s/step - loss: 1.2629 - regression_loss: 1.1742 - classification_loss: 0.0887\n\nEpoch 00014: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_14.h5\nEpoch 15/200\n44/44 [==============================] - 247s 6s/step - loss: 1.2790 - regression_loss: 1.1863 - classification_loss: 0.0927\n\nEpoch 00015: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_15.h5\nEpoch 16/200\n44/44 [==============================] - 226s 5s/step - loss: 1.2724 - regression_loss: 1.1834 - classification_loss: 0.0890\n\nEpoch 00016: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_16.h5\nEpoch 17/200\n44/44 [==============================] - 224s 5s/step - loss: 1.2727 - regression_loss: 1.1843 - classification_loss: 0.0884\n\nEpoch 00017: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_17.h5\nEpoch 18/200\n44/44 [==============================] - 229s 5s/step - loss: 1.2765 - regression_loss: 1.1861 - classification_loss: 0.0904\n\nEpoch 00018: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_18.h5\n\nEpoch 00018: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.\nEpoch 19/200\n44/44 [==============================] - 229s 5s/step - loss: 1.2427 - regression_loss: 1.1542 - classification_loss: 0.0885\n\nEpoch 00019: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_19.h5\nEpoch 20/200\n44/44 [==============================] - 245s 6s/step - loss: 1.2512 - regression_loss: 1.1658 - classification_loss: 0.0854\n\nEpoch 00020: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_20.h5\nEpoch 21/200\n44/44 [==============================] - 245s 6s/step - loss: 1.2303 - regression_loss: 1.1455 - classification_loss: 0.0848\n\nEpoch 00021: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/Model v2 continued/resnet50_csv_21.h5\n"
],
[
"from keras_retinanet.callbacks import RedirectModel \ncallbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n valid_gen,\n args,\n)\ntraining_model.load_weights('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/resnet50_csv_98_class_loss_0.2137.h5', skip_mismatch = True, by_name = True)\n\ntraining_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer regression_submodel due to mismatch in shape ((3, 3, 256, 60) vs (132, 256, 3, 3)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer regression_submodel due to mismatch in shape ((60,) vs (132,)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer classification_submodel due to mismatch in shape ((3, 3, 256, 75) vs (165, 256, 3, 3)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer classification_submodel due to mismatch in shape ((75,) vs (165,)).\n weight_values[i].shape))\n"
]
],
[
[
"# Previous version -> picked up class loss 0.2137 model",
"_____no_output_____"
]
],
[
[
"callbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n valid_gen,\n args,\n)\ntraining_model.load_weights('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/resnet50_coco_best_v2.0.1.h5', skip_mismatch = True, by_name = True)",
"/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer regression_submodel due to mismatch in shape ((3, 3, 256, 132) vs (36, 256, 3, 3)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer regression_submodel due to mismatch in shape ((132,) vs (36,)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer classification_submodel due to mismatch in shape ((3, 3, 256, 165) vs (720, 256, 3, 3)).\n weight_values[i].shape))\n/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py:1319: UserWarning: Skipping loading of weights for layer classification_submodel due to mismatch in shape ((165,) vs (720,)).\n weight_values[i].shape))\n"
]
],
[
[
"## Will train a new model for Foot joints",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Training the final foot model with below parameters\n- Iou = 0.5\n- NMS = 0.5\n- score threshold = 0.05\n- Ratios = 0.8 1 1.3 1.5 1.8 2 2.2 2.5 2.8 3.0 3.2\n- scales = 1 1.2 1.6\n- IMgae min size = 1000\n- Image max side = 1400 \n- Evaluation = False",
"_____no_output_____"
]
],
[
[
"training_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"Epoch 1/200\n88/88 [==============================] - 270s 3s/step - loss: 2.9207 - regression_loss: 2.2971 - classification_loss: 0.6236\n\nEpoch 00001: saving model to /content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Foot model with new annotations/resnet50_csv_01.h5\n"
]
],
[
[
"## Visualizing anchor boxes",
"_____no_output_____"
]
],
[
[
"\nfrom keras_retinanet.bin.train import create_generators\nfrom keras_retinanet.models import backbone\nfrom keras_retinanet.utils.config import read_config_file,parse_anchor_parameters\nfrom keras_retinanet.bin.debug import run\n\n\nb = backbone('resnet50')\n\nclass args:\n batch_size = 4\n config = read_config_file('config.ini')\n random_transform = True # Image augmentation\n annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_hand_train_03_06.csv'\n val_annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/all_hand_val_03_06.csv'\n classes = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/retinanet train val files new/Hands class names.csv'\n image_min_side = 1000\n image_max_side = 1400\n no_resize = False\n dataset_type = 'csv'\n tensorboard_dir = ''\n evaluation = False\n snapshots = True\n snapshot_path = \"/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/Hand models New annotations\"\n backbone = 'resnet50'\n epochs = 200\n steps = len(train_ids)//(batch_size)\n weighted_average = True\n reduce_lr_factor = 0.1\n reduce_lr_patience = 4\n compute_val_loss = True\n iou_threshold = 0.5\n nms_threshold = 0.5\n score_threshold = 0.05\n\ntrain_gen, valid_gen = create_generators(args,b.preprocess_image)\nwhile run(valid_gen,args,parse_anchor_parameters(args.config)):\n pass",
"_____no_output_____"
]
],
[
[
"## Retraining from epoch 16 Freeze previousm model\n-- mAP -> 0.40 class loss 0.41",
"_____no_output_____"
]
],
[
[
"training_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"Epoch 1/150\n88/88 [==============================] - 276s 3s/step - loss: 2.7548 - regression_loss: 2.3195 - classification_loss: 0.4353\n"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"## Have reduced the ratios in this run and changed the anchors sizes",
"_____no_output_____"
]
],
[
[
"training_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"Epoch 1/150\n88/88 [==============================] - 299s 3s/step - loss: 3.0064 - regression_loss: 2.4081 - classification_loss: 0.5983\n"
]
],
[
[
"\n# Logs for few changes made\n- Ratios changed\n- IOU threshold, NMS threshold = 0.6 and score threshold = 0.1",
"_____no_output_____"
]
],
[
[
"training_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"Epoch 1/150\n88/88 [==============================] - 558s 6s/step - loss: 6.1760 - regression_loss: 2.4610 - classification_loss: 3.7149\n"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"# Right foot joint detection",
"_____no_output_____"
]
],
[
[
"\n# tf.set_random_seed(31) # SEEDS MAKE RESULTS MORE REPRODUCABLE\nnp.random.seed(17)\nclasses = np.arange(0, 11, 1).tolist()\n\n# def convert_annotation(image_id,filename):\n# in_file = open('training_data/labels/%s.xml'%(image_id))\n# out_file = open(filename, 'a')\n# tree=ET.parse(in_file)\n# root = tree.getroot()\n \n# if root.iter('object') is not None:\n# for obj in root.iter('object'):\n# cls = obj.find('name').text\n# if cls not in classes:\n# continue\n# cls_id = classes.index(cls)\n \n# xmlbox = obj.find('bndbox')\n# x1 = math.ceil(float(xmlbox.find('xmin').text))\n# y1 = math.ceil(float(xmlbox.find('ymin').text))\n# x2 = math.ceil(float(xmlbox.find('xmax').text))\n# y2 = math.ceil(float(xmlbox.find('ymax').text))\n# if x1 == x2 or y1 == y2:\n# continue\n \n# out_file.write(f'training_data/images/{image_id}.jpg,{x1},{y1},{x2},{y2},{cls}\\n')\n# else:\n# out_file.write(f'training_data/images/{image_id}.jpg,,,,,\\n')\n\n\n# _,_,image_ids = next(walk('training_data/images'))\n# image_ids = [i[:-4] for i in image_ids]\n# open('annotations.csv','w')\n# open('val_annotations.csv','w')\n\n# train_ids,val_ids = train_test_split(image_ids,random_state=31,test_size=0)\n\n# for image_id in train_ids:\n# convert_annotation(image_id,'annotations.csv')\n \n# for image_id in val_ids:\n# convert_annotation(image_id,'val_annotations.csv')\n \n# print(len(train_ids),len(val_ids))\n\nimport pandas as pd\ntr_annots = pd.read_csv('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/Right foot train.csv', header = None)\ntr_annots['ids'] = tr_annots[0].apply(lambda x: x.split(\"/\")[-1].split(\".\")[0])\ntrain_ids = tr_annots['ids'].unique().tolist()\n\nimport pandas as pd\ntr_annots = pd.read_csv('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/Right foot val.csv', header = None)\ntr_annots['ids'] = tr_annots[0].apply(lambda x: x.split(\"/\")[-1].split(\".\")[0])\nval_ids = tr_annots['ids'].unique().tolist()\n\nprint(len(train_ids), len(val_ids))\n\nwith open('config.ini','w') as f:\n f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512\\nstrides = 8 16 32 64 128\\nratios = 1 1.2 1.5 2 \\nscales = 2 3 4 \\n')\n\t\nb = backbone('resnet50')\n\nclass args:\n batch_size = 2\n config = read_config_file('config.ini')\n random_transform = True # Image augmentation\n annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/Right foot train.csv'\n val_annotations = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/training_data/Right foot val.csv'\n classes = '/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/foot_class_names.csv'\n image_min_side = 900\n image_max_side = 1200\n no_resize = False\n dataset_type = 'csv'\n tensorboard_dir = ''\n evaluation = False\n snapshots = True\n snapshot_path = \"/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/save right foot model/\"\n backbone = 'resnet50'\n epochs = 100\n steps = len(train_ids)//(batch_size)\n weighted_average = True\n\t\ntrain_gen, valid_gen = create_generators(args, b.preprocess_image)\n\n\nsometimes = lambda aug: iaa.Sometimes(0.5, aug)\n# Define our sequence of augmentation steps that will be applied to every image.\nseq = iaa.Sequential(\n [\n #\n # Execute 1 to 9 of the following (less important) augmenters per\n # image. Don't execute all of them, as that would often be way too\n # strong.\n #\n iaa.SomeOf((1, 9),\n [\n\n # Blur each image with varying strength using\n # gaussian blur (sigma between 0 and .5),\n # average/uniform blur (kernel size 1x1)\n # median blur (kernel size 1x1).\n iaa.OneOf([\n iaa.GaussianBlur((0,0.5)),\n iaa.AverageBlur(k=(1)),\n iaa.MedianBlur(k=(1)),\n ]),\n\n # Sharpen each image, overlay the result with the original\n # image using an alpha between 0 (no sharpening) and 1\n # (full sharpening effect).\n iaa.Sharpen(alpha=(0, 0.25), lightness=(0.75, 1.5)),\n\n # Add gaussian noise to some images.\n # In 50% of these cases, the noise is randomly sampled per\n # channel and pixel.\n # In the other 50% of all cases it is sampled once per\n # pixel (i.e. brightness change).\n iaa.AdditiveGaussianNoise(\n loc=0, scale=(0.0, 0.01*255), per_channel=0.5\n ),\n\n # Either drop randomly 1 to 10% of all pixels (i.e. set\n # them to black) or drop them on an image with 2-5% percent\n # of the original size, leading to large dropped\n # rectangles.\n iaa.OneOf([\n iaa.Dropout((0.01, 0.1), per_channel=0.5),\n iaa.CoarseDropout(\n (0.03, 0.15), size_percent=(0.02, 0.05),\n per_channel=0.2\n ),\n ]),\n\n # Add a value of -5 to 5 to each pixel.\n iaa.Add((-5, 5), per_channel=0.5),\n\n # Change brightness of images (85-115% of original value).\n iaa.Multiply((0.85, 1.15), per_channel=0.5),\n\n # Improve or worsen the contrast of images.\n iaa.ContrastNormalization((0.75, 1.25), per_channel=0.5),\n\n # Convert each image to grayscale and then overlay the\n # result with the original with random alpha. I.e. remove\n # colors with varying strengths.\n iaa.Grayscale(alpha=(0.0, 0.25)),\n\n # In some images distort local areas with varying strength.\n sometimes(iaa.PiecewiseAffine(scale=(0.001, 0.01)))\n ],\n # do all of the above augmentations in random order\n random_order=True\n )\n ],\n # do all of the above augmentations in random order\n random_order=True\n)\n\n\ndef augment_train_gen(train_gen, visualize=False):\n '''\n Creates a generator using another generator with applied image augmentation.\n Args\n train_gen : keras-retinanet generator object.\n visualize : Boolean; False will convert bounding boxes to their anchor box targets for the model.\n '''\n imgs = []\n boxes = []\n targets = []\n size = train_gen.size()\n idx = 0\n while True:\n while len(imgs) < args.batch_size:\n image = train_gen.load_image(idx % size)\n annotations = train_gen.load_annotations(idx % size)\n image,annotations = train_gen.random_transform_group_entry(image,annotations)\n imgs.append(image) \n boxes.append(annotations['bboxes'])\n targets.append(annotations)\n idx += 1\n if visualize:\n imgs = seq.augment_images(imgs)\n imgs = np.array(imgs)\n boxes = np.array(boxes)\n yield imgs,boxes\n else:\n imgs = seq.augment_images(imgs)\n imgs,targets = train_gen.preprocess_group(imgs,targets)\n imgs = train_gen.compute_inputs(imgs)\n targets = train_gen.compute_targets(imgs,targets)\n imgs = np.array(imgs)\n yield imgs,targets\n imgs = []\n boxes = []\n targets = []\n\t\t\n\t\t\n# import matplotlib.pyplot as plt\n\n# skip_batches = 5\n# i = 0\n# for imgs,boxes in augment_train_gen(train_gen,visualize=True):\n# if i > skip_batches:\n# fig=plt.figure(figsize=(24,96))\n# columns = 2\n# rows = 8\n# for i in range(1, columns*rows + 1):\n# draw_boxes(imgs[i], boxes[i], (0, 255, 0), thickness=1)\n# fig.add_subplot(rows, columns, i)\n# plt.imshow(cv2.cvtColor(imgs[i],cv2.COLOR_BGR2RGB))\n# plt.show()\n# break\n# else:\n# i += 1\n\nmodel, training_model, prediction_model = create_models(\n backbone_retinanet=b.retinanet,\n num_classes=train_gen.num_classes(),\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=args.config\n )\n\n\t\t\ncallbacks = create_callbacks(\n model,\n training_model,\n prediction_model,\n valid_gen,\n args,\n)\ntraining_model.load_weights('/content/drive/My Drive/RA2/Retinanet/Upload for Kaggle/resnet50_csv_27.h5', skip_mismatch = True, by_name = True)\n\n##m Final model for Right hand joints detections\ntraining_model.fit_generator(generator=augment_train_gen(train_gen),\n steps_per_epoch = args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks, ) ",
"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r14/34 [===========>..................] - ETA: 32s - loss: 1.1881 - regression_loss: 1.0194 - classification_loss: 0.1686"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca4ae3a1e522823c80984ebc5cc5d6992373ac1 | 5,322 | ipynb | Jupyter Notebook | 2020_notebooks/Labs/lab_5/solution.ipynb | blended-learning/compilers | 5869286f473183f00d27248eefaab94afb9297cf | [
"Apache-2.0"
] | null | null | null | 2020_notebooks/Labs/lab_5/solution.ipynb | blended-learning/compilers | 5869286f473183f00d27248eefaab94afb9297cf | [
"Apache-2.0"
] | null | null | null | 2020_notebooks/Labs/lab_5/solution.ipynb | blended-learning/compilers | 5869286f473183f00d27248eefaab94afb9297cf | [
"Apache-2.0"
] | null | null | null | 25.222749 | 118 | 0.513153 | [
[
[
"@file:DependsOn(\"/data/shared/antlr-4.9.1-complete.jar\")\n@file:DependsOn(\".\")",
"_____no_output_____"
],
[
"import org.antlr.v4.runtime.*\nimport calculator.*",
"_____no_output_____"
]
],
[
[
"## Work Unit 1",
"_____no_output_____"
],
[
"Complete the following visitor implementation",
"_____no_output_____"
]
],
[
[
"class Visitor: CalcBaseVisitor<Int>() {\n val scope = mutableMapOf<String, Int>()\n override fun visitProgram(ctx: CalcParser.ProgramContext): Int {\n ctx.statement().forEach {\n this.visit(it)\n }\n return 0\n }\n override fun visitAssignStatement(ctx: CalcParser.AssignStatementContext): Int = this.visit(ctx.assign())\n override fun visitExprStatement(ctx: CalcParser.ExprStatementContext): Int {\n val result = this.visit(ctx.expr())\n println(\"> \" + result)\n return result\n }\n override fun visitAssign(ctx: CalcParser.AssignContext): Int {\n val name = ctx.ID().text\n var value = this.visit(ctx.expr())\n scope[name] = value\n \n return value\n }\n override fun visitAdd(ctx: CalcParser.AddContext): Int = this.visit(ctx.e1) + this.visit(ctx.e2)\n override fun visitMul(ctx: CalcParser.MulContext): Int = this.visit(ctx.e1) * this.visit(ctx.e2)\n override fun visitParen(ctx: CalcParser.ParenContext): Int = this.visit(ctx.expr())\n override fun visitNum(ctx: CalcParser.NumContext): Int = ctx.NUM().text.toInt()\n override fun visitId(ctx: CalcParser.IdContext): Int {\n return scope.getOrDefault(ctx.ID().text, 0)\n }\n}",
"_____no_output_____"
],
[
"fun run(source: String) {\n val input = CharStreams.fromString(source)\n val lexer = CalcLexer(input)\n val tokens = CommonTokenStream(lexer)\n val parser = CalcParser(tokens)\n val tree = parser.program()\n \n Visitor().visit(tree)\n}",
"_____no_output_____"
],
[
"val source = \"\"\"\nx = 2;\ny = 4;\n1 + 2; // = 3\n(1+2)*3; // 3*3 = 9\nx + y; // 2+4 = 6\nx * (y + y); // 2 * (4 + 4) = 16\nx = (x * x); // x=2*2=4\nx = (x * x); // x=4*4=16\nx = (x * x); // x=16*16=256\nx\nx = (x * x); // x=256*256=65536\nx;\n\"\"\"\n\nrun(source)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca4bd2a257b761218f8dc618a81ca9360bf7deb | 215,306 | ipynb | Jupyter Notebook | demos/PCT5300-Reese-CoraClassification/python/part_1_neural_net.ipynb | brandonmreese/sas-global-forum-2020 | 14f969eb98626fc4342b032c8fe0413adbcf21f8 | [
"Apache-2.0"
] | 30 | 2020-01-17T19:46:09.000Z | 2022-03-16T08:01:59.000Z | demos/PCT5300-Reese-CoraClassification/python/part_1_neural_net.ipynb | brandonmreese/sas-global-forum-2020 | 14f969eb98626fc4342b032c8fe0413adbcf21f8 | [
"Apache-2.0"
] | 6 | 2020-02-11T17:04:40.000Z | 2020-11-03T17:04:37.000Z | demos/PCT5300-Reese-CoraClassification/python/part_1_neural_net.ipynb | brandonmreese/sas-global-forum-2020 | 14f969eb98626fc4342b032c8fe0413adbcf21f8 | [
"Apache-2.0"
] | 70 | 2020-01-16T15:06:56.000Z | 2022-03-22T21:54:37.000Z | 34.794118 | 479 | 0.400973 | [
[
[
"## CAS Connection\n\n### Connect to the Cas Server",
"_____no_output_____"
]
],
[
[
"import swat\ns = swat.CAS(host, port)",
"_____no_output_____"
]
],
[
[
"# Document Classification\n## Part 1: Network Analytics for Feature Engineering\nIn this notebook, you will build a neural network model to perform a classification task. The example presented illustrates the advantage of using network analysics to construct additional features from connected data.\n\nA different, but comparable method for incorporating network connectivity (using the same data set) is presented in the TensorFlow blog article [Graph regularization for document classification using natural graphs](https://www.tensorflow.org/neural_structured_learning/tutorials/graph_keras_mlp_cora). However, the feature engineering approach presented in this notebook achieves higher prediction accuracy than the graph regularization approach presented in that article.",
"_____no_output_____"
],
[
"# Load Data\n\nThe Cora data set is publicly available via [this hyperlink](https://linqs.soe.ucsc.edu/data).\n\nThe data set represents a set of papers, listed in the file cora.content along with 1433 flag columns. Each column indicates whether each paper contains a particular word.\n\nThe second file, cora.cites, contains pairs of papers, in which the first paper cites the second.",
"_____no_output_____"
]
],
[
[
"import document_classification_scripts as scripts\nimport importlib\nimportlib.reload(scripts)\nfrom document_classification_scripts import AttributeDict, nClasses, nWords, targetColumn, baseFeatureList\ndemo = scripts.Demo(s)",
"NOTE: Added action set 'sampling'.\nNOTE: Added action set 'pca'.\nNOTE: Added action set 'fedsql'.\nNOTE: Added action set 'deepLearn'.\nNOTE: Added action set 'network'.\nNOTE: Added action set 'transpose'.\nNOTE: Added action set 'table'.\nNOTE: Added action set 'builtins'.\nNOTE: Added action set 'neuralNet'.\nNOTE: Added action set 'autotune'.\nNOTE: Added action set 'session'.\nNOTE: Added action set 'decisionTree'.\nNOTE: Added action set 'aStore'.\nNOTE: Added action set 'aggregation'.\n"
],
[
"def loadRawData(pathCoraContent=\"../data/cora.content\",\n pathCoraCites=\"../data/cora.cites\"\n ):\n contentDf = pd.read_csv(pathCoraContent, sep=\"\\t\", header=None)\n contentDf.rename(columns=(lambda x: f\"w{x}\"), inplace=True)\n contentDf.rename(\n columns={\n \"w0\": \"node\",\n \"w1434\": \"target\"},\n inplace=True)\n s.upload(contentDf, casout={\"name\": \"content\", \"replace\": True})\n\n citesDf = pd.read_csv(pathCoraCites, sep=\"\\t\", header=None)\n citesDf.rename(columns={0: \"from\", 1: \"to\"}, inplace=True)\n s.upload(citesDf, casout={\"name\": \"cites\", \"replace\": True})",
"_____no_output_____"
],
[
"demo.loadRawData()",
"NOTE: Cloud Analytic Services made the uploaded file available as table CONTENT in caslib CASUSER(brrees).\nNOTE: The table CONTENT has been created in caslib CASUSER(brrees) from binary data uploaded to Cloud Analytic Services.\nNOTE: Cloud Analytic Services made the uploaded file available as table CITES in caslib CASUSER(brrees).\nNOTE: The table CITES has been created in caslib CASUSER(brrees) from binary data uploaded to Cloud Analytic Services.\n"
],
[
"def head(table, nRows=5):\n return s.table.fetch(\n table=table,\n format=True,\n to=nRows,\n maxRows=nRows)",
"_____no_output_____"
],
[
"demo.head(\"content\")",
"_____no_output_____"
],
[
"demo.head(\"cites\")",
"_____no_output_____"
]
],
[
[
"### Graph Visualization Code",
"_____no_output_____"
]
],
[
[
"def graph2dot(linksDf=None,\n nodesDf=None,\n linksFrom=\"from\",\n linksTo=\"to\",\n nodesNode=\"node\",\n nodesLabel=None,\n nodesSize=None,\n nodesSizeScale=1,\n nodesColor=None,\n linksLabel=None,\n linksColor=None,\n outFile=None,\n view=True,\n stdout=None,\n size=10,\n layout=None,\n directed=False,\n sort=True):\n dot = Digraph() if directed else Graph()\n dot.attr(rankdir='LR')\n dot.attr(size=f\"{size}\")\n dot.attr('node', shape='circle')\n if layout is not None:\n dot.attr(layout=f\"{layout}\")\n\n if(linksDf):\n for index, row in (linksDf.sort(\n [linksFrom, linksTo]).iterrows() if sort else linksDf.iterrows()):\n dot.edge(\n f\"{row[linksFrom]}\", f\"{row[linksTo]}\", label=None if (\n linksLabel is None) else f\"{row[linksLabel]}\", color=None if (\n linksColor is None) else row[linksColor])\n\n if(nodesDf):\n for index, row in (\n nodesDf.sort(\n [nodesNode]).iterrows() if sort else nodesDf.iterrows()):\n dot.node(\n f\"{row[nodesNode]}\",\n f\"{row[nodesNode]}\" if nodesLabel is None else f\"{row[nodesLabel]}\",\n width=None if (\n nodesSize is None) else f\"{1*nodesSizeScale*row[nodesSize]}\",\n color=None if (\n linksColor is None) else row[linksColor])\n if stdout is None:\n stdout = True if outFile is None else False\n if stdout:\n print(dot.source)\n if outFile is not None:\n dot.render(f\"../dot/{outFile}\", view=view)\n return dot\n\ndef showReachNeighborhood(session,\n tableLinks,\n tableNodes,\n node,\n hops,\n directed=False,\n size=5,\n layout=\"fdp\",\n nodesSizeScale=100\n ):\n nodeSub = {\n \"node\": [node],\n \"reach\": [1]\n }\n nodeSubDf = pd.DataFrame(nodeSub, columns=[\"node\", \"reach\"])\n session.upload(nodeSubDf, casout={\"name\": \"_nodeSub_\", \"replace\": True})\n session.network.reach(\n loglevel=\"NONE\",\n direction=\"directed\" if directed else \"undirected\",\n links=tableLinks,\n nodes=tableNodes,\n nodesVar={\"vars\": [\"target\"]},\n maxReach=hops,\n outReachLinks={\"name\": \"_reachLinks_\", \"replace\": True},\n outReachNodes={\"name\": \"_reachNodes_\", \"replace\": True},\n nodesSubset=\"_nodeSub_\"\n )\n session.datastep.runCode(\n code=f\"\"\"\n data _reachNodes_;\n set _reachNodes_;\n length label $50;\n label=target || \"\\nPaperId = \" || put(node, 7.);\n if put(node, 7.) = {node} then label = \"???\" || \"\\nPaperId = \" || put(node, 7.);\n run;\n \"\"\"\n )\n return graph2dot(linksDf=session.CASTable(\"_reachLinks_\"),\n nodesDf=session.CASTable(\"_reachNodes_\"),\n nodesLabel=\"label\",\n layout=layout,\n directed=directed,\n size=size,\n nodesSizeScale=nodesSizeScale,\n stdout=False)",
"_____no_output_____"
]
],
[
[
"### Graph Visualization",
"_____no_output_____"
]
],
[
[
"scripts.showReachNeighborhood(s,\n \"cites\",\n \"content\",\n 8617,\n 2,\n directed=True,\n size=10,\n nodesSizeScale=5\n )",
"NOTE: Cloud Analytic Services made the uploaded file available as table _NODESUB_ in caslib CASUSER(brrees).\nNOTE: The table _NODESUB_ has been created in caslib CASUSER(brrees) from binary data uploaded to Cloud Analytic Services.\nNOTE: Character values have been converted to numeric values at the places given by: (Line):(Column).\n 0:164\nNOTE: Duplicate messages output by DATA step:\nNOTE: Character values have been converted to numeric values at the places given by: (Line):(Column). (occurred 32 times)\n 0:164 (occurred 32 times)\n"
]
],
[
[
"# Data Preprocessing\n",
"_____no_output_____"
],
[
"## Create a custom format definition for target labels",
"_____no_output_____"
]
],
[
[
"def defineTargetVariableFormat():\n \"\"\"Custom Format Definition for Target Labels.\"\"\"\n s.sessionProp.addFmtLib(\n fmtLibName=\"myFmtLib\",\n caslib=\"mycas\",\n replace=True\n )\n s.sessionProp.addFormat(\n fmtLibName=\"myFmtLib\",\n fmtName=targetClassFmt,\n replace=True,\n ranges={\"'Case_Based'='1'\",\n \"'Genetic_Algorithms'='2'\",\n \"'Neural_Networks'='3'\",\n \"'Probabilistic_Methods'='4'\",\n \"'Reinforcement_Learning'='5'\",\n \"'Rule_Learning'='6'\",\n \"'Theory'='7'\",\n \"' '=' '\"\n })",
"_____no_output_____"
],
[
"demo.defineTargetVariableFormat()",
"NOTE: Format library MYFMTLIB added. Format search update using parameter APPEND completed.\n"
]
],
[
[
"## Data Partitioning\nPerforms an 80%/20% training/test split. ",
"_____no_output_____"
]
],
[
[
"def addCaslibIfNeeded(caslib):\n r = s.table.queryCaslib(caslib=caslib)\n if not r[caslib]:\n s.table.addcaslib(\n activeOnAdd=False,\n caslib=\"cora\",\n datasource={\"srctype\": \"path\"},\n path=\"/bigdisk/lax/brrees/data/cora\"\n )\n\ndef saveTables(tables, caslib=\"cora\", replace=True):\n for table in tables:\n s.table.save(\n caslib=caslib,\n table=table,\n name=f\"{table}.sashdat\",\n replace=replace\n )\n\ndef loadTables(tables, caslib=\"cora\"):\n for table in tables:\n s.table.loadTable(\n caslib=caslib,\n casOut={\"name\": table, \"replace\": True},\n path=f\"{table}.sashdat\"\n )\n\ndef partitionData(tableIn=\"content\",\n tableOut=\"contentPartitioned\",\n tableTrainOut=\"contentTrain\",\n tableTestOut=\"contentTest\"):\n s.sampling.srs(\n table=tableIn,\n seed=randomSeed,\n samppct=trainPercentage,\n partInd=True,\n output={\n \"casout\": {\"name\": tableOut, \"replace\": True},\n \"copyVars\": \"all\",\n \"partIndName\": \"partition\"\n }\n )\n\n s.datastep.runCode(\n code=f\"\"\"\n data {tableTrainOut} {tableTestOut};\n set {tableOut};\n if partition=1 then output {tableTrainOut};\n else output {tableTestOut};\n run;\n \"\"\"\n )\n\ndef loadOrPartitionData(newRun=False):\n coraCaslib = \"cora\"\n addCaslibIfNeeded(coraCaslib)\n\n r = s.table.fileInfo(caslib=coraCaslib)\n if \"contentPartitioned.sashdat\" not in r.FileInfo[\"Name\"].unique():\n newRun = True\n\n if newRun:\n partitionData()\n saveTables(\n [\"contentPartitioned\", \"contentTrain\", \"contentTest\"])\n else:\n loadTables(\n [\"contentPartitioned\", \"contentTrain\", \"contentTest\"])",
"_____no_output_____"
],
[
"demo.loadOrPartitionData()\nprint(f\"contentTrain: (rows, cols) = {s.CASTable('contentTrain').shape}\")\nprint(f\"contentTest: (rows, cols) = {s.CASTable('contentTest').shape}\")",
"NOTE: Cloud Analytic Services added the caslib 'cora'.\nNOTE: Cloud Analytic Services made the file contentPartitioned.sashdat available as table CONTENTPARTITIONED in caslib CASUSER(brrees).\nNOTE: Cloud Analytic Services made the file contentTrain.sashdat available as table CONTENTTRAIN in caslib CASUSER(brrees).\nNOTE: Cloud Analytic Services made the file contentTest.sashdat available as table CONTENTTEST in caslib CASUSER(brrees).\ncontentTrain: (rows, cols) = (2166, 1436)\ncontentTest: (rows, cols) = (542, 1436)\n"
]
],
[
[
"## Principal Component Analysis (PCA)\nIt is often useful to perform Principal Component Analysis on wide data. The eigenvalue PCA method in the pca action set allows you to keep only the components that contain the largest eigenvalues. This means the limited number of selected PCA variables can compactly represent most of the variation observed in the original wide data set.",
"_____no_output_____"
]
],
[
[
"def performPca(nPca):\n nPca = nPca\n s.pca.eig(\n table=\"contentTrain\",\n n=nPca,\n prefix=\"pca\",\n inputs=baseFeatureList,\n code={\n \"casOut\": {\"name\": \"pcaTransformCode\", \"replace\": True},\n \"comment\": False,\n \"tabForm\": True\n },\n output={\"casOut\": {\"name\": \"contentTrainPca\", \"replace\": True},\n \"copyVars\": [\"node\", \"target\", \"partition\"],\n \"score\": \"pca\"}\n )\n s.datastep.runCodeTable(\n table=\"contentPartitioned\",\n codeTable=\"pcaTransformCode\",\n casout={\"name\": \"contentPartitionedPca\"},\n dropVars=baseFeatureList\n )\n s.datastep.runCode(\n code=\"data contentTestPca; set contentPartitionedPca(where=(partition=0)); run;\")",
"_____no_output_____"
],
[
"nPca = 40\ndemo.performPca(nPca)\npcaFeatureList = [f\"pca{i}\" for i in range(1,nPca)]",
"WARNING: The variable w445 in table CONTENTTRAIN is constant.\n"
]
],
[
[
"## Join Citations and Training Targets\nBy executing a join query, you can join the training target information to the citation links data. This allows you to partition the graph into subgraphs based on the type of paper being cited.",
"_____no_output_____"
]
],
[
[
"def joinTrainingTargets():\n s.fedsql.execDirect(\n query=\"\"\"\n create table citesTrain {options replace=true} as\n select a.*, b.target as from_target, c.target as to_target\n from cites as a\n inner join contentTrain as b\n on a.from = b.node\n inner join contentTrain as c\n on a.to = c.node;\n \"\"\"\n )\n s.fedsql.execDirect(\n query=\"\"\"\n create table citesCombined {options replace=true} as\n select a.*, b.target as from_target, c.target as to_target\n from cites as a\n left join contentTrain as b\n on a.from = b.node\n left join contentTrain as c\n on a.to = c.node;\n \"\"\"\n )",
"_____no_output_____"
],
[
"demo.joinTrainingTargets()",
"NOTE: Table CITESTRAIN was created in caslib CASUSER(brrees) with 3562 rows returned.\nNOTE: Table CITESCOMBINED was created in caslib CASUSER(brrees) with 5429 rows returned.\n"
]
],
[
[
"## Generate Network Features\n\nYou can perform feature engineering using the network action set. The following code defines functions that:\n\n- initialize the nodes table for your network by copying the content data table\n- load the nodes and links tables into an in-memory graph object\n- add features generated from each of the network actions considered:\n - centrality\n - community\n - core \n - nodeSimilarity\n- merge the new features into the nodes table as they are generated\n- unload the in-memory graph object\n\nOnce these network features are generated, they are available to augment the classification model building process.",
"_____no_output_____"
]
],
[
[
"def initNetwork(tableName):\n # Copies the base feature table and returns an empty network features\n # list\n s.datastep.runCode(\n code=f\"data {tableName}Network; set {tableName}; run;\"\n )\n featureList = []\n return featureList\n\ndef loadGraph(tableNodes, tableLinks):\n # Load an in-memory copy of the graph represented by the nodes and\n # links tables\n r = s.network.loadGraph(\n multilinks=False,\n links=tableLinks,\n nodes={\n \"name\": tableNodes,\n \"computedVars\": (\"initialCommunity\"),\n \"computedVarsProgram\": \"initialCommunity = input(put(target, $targetClass.), 1.);\"},\n nodesVar={\n \"vars\": (\"initialCommunity\")})\n return r.graph\n\ndef unloadGraph(graphId):\n r = s.network.unloadGraph(\n graph=graphId\n )\n return None\n\ndef mergeFeatures(tableNodes, tableNodesAdd, featureList, featureListAdd):\n s.datastep.runCode(\n code=f\"\"\"\n data {tableNodes};\n merge {tableNodes} {tableNodesAdd};\n by node;\n run;\n \"\"\"\n )\n featureList = featureList + featureListAdd\n return featureList\n\ndef mergeRatioFeature(\n tableNodes,\n tableNodesAdd,\n featureList,\n featureAdd,\n denominator):\n s.datastep.runCode(\n code=f\"\"\"\n data {tableNodes};\n merge {tableNodes} {tableNodesAdd};\n by node;\n if ({denominator} GT 0) then {featureAdd} = {featureAdd}/{denominator};\n else {featureAdd}=0;\n run;\n \"\"\"\n )\n featureList = featureList + [featureAdd]\n return featureList\n\ndef addFeaturesNodeSimilarity(graphId, tableNodes, featureList):\n nDimensions = 10\n outTableNodes = \"outNodesNodeSim\"\n s.network.nodesimilarity(\n loglevel=\"BASIC\",\n graph=graphId,\n jaccard=False,\n vector=True,\n proximityOrder=\"second\",\n nDimensions=nDimensions,\n nSamples=500000,\n convergenceThreshold=0.0001,\n outNodes={\"name\": outTableNodes, \"replace\": True}\n )\n newFeatures = [f\"vec_{i}\" for i in range(\n nDimensions)] + [f\"ctx_{i}\" for i in range(nDimensions)]\n featureList = mergeFeatures(\n tableNodes, outTableNodes, featureList, newFeatures)\n return featureList\n\ndef addFeaturesCore(graphId, tableNodes, tableLinks, featureList):\n nDimensions = 10\n outTableNodes = \"outNodesCore\"\n s.network.core(\n graph=graphId,\n outNodes={\"name\": outTableNodes, \"replace\": True}\n )\n newFeatures = [\"core_out\"]\n featureList = mergeFeatures(\n tableNodes, outTableNodes, featureList, newFeatures)\n\n subgraphs = [\"Case_Based\",\n \"Genetic_Algorithms\",\n \"Neural_Networks\",\n \"Probabilistic_Methods\",\n \"Reinforcement_Learning\",\n \"Rule_Learning\",\n \"Theory\"\n ]\n\n for subgraph in subgraphs:\n s.network.core(\n nodes=tableNodes,\n links={\n \"name\": tableLinks,\n \"where\": f\"from_target EQ '{subgraph}' OR to_target EQ '{subgraph}'\"},\n outNodes={\n \"name\": outTableNodes,\n \"replace\": True})\n newFeature = \"core_out_\" + subgraph\n featureList = mergeRatioFeature(\n tableNodes,\n outTableNodes +\n f\"(rename=(core_out={newFeature}))\",\n featureList,\n newFeature,\n \"core_out\")\n return featureList\n\ndef addFeaturesCentrality(tableNodes, tableLinks, featureList):\n outTableNodes = \"nodesCentrality\"\n outTableTransIn = \"nodesDegreeIn\"\n outTableTransOut = \"nodesDegreeOut\"\n\n s.network.centrality(\n direction=\"directed\",\n links={\n \"name\": tableLinks,\n \"where\": \"from_target NE ' '\",\n \"groupBy\": \"from_target\"},\n outNodes={\n \"name\": outTableNodes,\n \"replace\": True},\n degree=\"unweight\")\n s.transpose.transpose(\n table={\"name\": outTableNodes, \"groupBy\": \"node\"},\n transpose=(\"centr_degree_in\"),\n id=(\"from_target\"),\n prefix=\"deg_in_\",\n casOut={\"name\": outTableTransIn, \"replace\": True}\n )\n\n s.network.centrality(\n direction=\"directed\",\n links={\n \"name\": tableLinks,\n \"where\": \"to_target NE ' '\",\n \"groupBy\": \"to_target\"},\n outNodes={\n \"name\": outTableNodes,\n \"replace\": True},\n degree=\"unweight\")\n s.transpose.transpose(\n table={\"name\": outTableNodes, \"groupBy\": \"node\"},\n transpose=(\"centr_degree_out\"),\n id=(\"to_target\"),\n prefix=\"deg_out_\",\n casOut={\"name\": outTableTransOut, \"replace\": True}\n )\n tableNetworkDegree = \"networkDegree\"\n s.datastep.runCode(\n code=f\"\"\"\n data networkDegree;\n merge {tableNodes}(keep=node)\n {outTableTransIn}(drop=_NAME_)\n {outTableTransOut}(drop=_NAME_);\n by node;\n array x deg_: ;\n do over x;\n if x=. then x=0;\n end;\n run;\n \"\"\"\n )\n r = s.table.columnInfo(\n table=tableNetworkDegree\n )\n newFeatures = r[\"ColumnInfo\"]['Column'].tolist()[1:]\n featureList = mergeFeatures(\n tableNodes, tableNetworkDegree, featureList, newFeatures)\n return featureList\n\ndef addFeaturesCommunity(graphId, tableNodes, featureList):\n outTableNodes = \"outNodesCommunity\"\n outTableComm = \"outComm\"\n outTableOverlap = \"OutCommOverlap\"\n s.network.community(\n graph=graphId,\n warmstart=\"initialCommunity\",\n resolutionList=(1.0, 0.2),\n outNodes={\"name\": outTableNodes, \"replace\": True}\n )\n s.network.community(\n graph=graphId,\n warmstart=\"initialCommunity\",\n resolutionList=(1.0),\n outCommunity={\"name\": outTableComm, \"replace\": True},\n outOverlap={\"name\": outTableOverlap, \"replace\": True}\n )\n s.fedsql.execDirect(\n query=f\"\"\"\n create table {outTableNodes} {{options replace=true}} as\n select a.*, b.nodes as \"commNodes\",\n b.conductance as \"commConductance\",\n b.density as \"commDensity\",\n COALESCE(c.intensity, 0) as \"commIntensity\"\n from {outTableNodes} as a\n left join {outTableComm} as b\n on a.community_0 = b.community and b.level = 0\n left join {outTableOverlap} as c\n on a.node=c.node and a.community_0 = c.community\n ;\n \"\"\"\n )\n newFeatures = [\n \"commNodes\",\n \"commConductance\",\n \"commDensity\",\n \"commIntensity\"]\n featureList = mergeFeatures(\n tableNodes, outTableNodes, featureList, newFeatures)\n return featureList\n\ndef addNetworkFeatures(tableInitNodes, tableLinks, networkParam):\n tableNodes = f\"{tableInitNodes}Network\"\n featureList = initNetwork(tableInitNodes)\n if loadedGraph is None:\n loadedGraph = loadGraph(tableInitNodes, tableLinks)\n if networkParam.useCommunity:\n featureList = addFeaturesCommunity(\n loadedGraph, tableNodes, featureList)\n if networkParam.useCentrality:\n featureList = addFeaturesCentrality(\n tableNodes, tableLinks, featureList)\n if networkParam.useNodeSimilarity:\n featureList = addFeaturesNodeSimilarity(\n loadedGraph, tableNodes, featureList)\n if networkParam.useCore:\n featureList = addFeaturesCore(\n loadedGraph, tableNodes, tableLinks, featureList)\n if loadedGraph is not None:\n unloadGraph(loadedGraph)\n loadedGraph = None\n return (tableNodes, featureList)",
"_____no_output_____"
]
],
[
[
"Four tables are created {training, scoring} X {base, PCA}:\n- for training, the graph is limited to links where both endpoints belong to the training data set\n- for scoring, the whole graph is available (with missing target labels for the test data set)\n\nThe following cell adds Network features to each of these four tables.",
"_____no_output_____"
]
],
[
[
"%%capture\nnetworkParam=AttributeDict({\n \"useCentrality\":True,\n \"useNodeSimilarity\":True,\n \"useCommunity\":True,\n \"useCore\":True\n})\n\ntableContentNetwork, networkFeatureList = demo.addNetworkFeatures(\n \"contentTrain\", \"citesTrain\", networkParam)\ntableContentPartitionedNetwork, networkFeatureList = demo.addNetworkFeatures(\n \"contentPartitioned\", \"citesCombined\", networkParam)\n\ntableContentNetworkPca, networkFeatureList = demo.addNetworkFeatures(\n \"contentTrainPca\", \"citesTrain\", networkParam)\ntableContentPartitionedNetworkPca, networkFeatureList = demo.addNetworkFeatures(\n \"contentPartitionedPca\", \"citesCombined\", networkParam)",
"_____no_output_____"
],
[
"s.datastep.runCode(\n code = f\"data contentTestNetwork; set {tableContentPartitionedNetwork}(where=(partition=0)); run;\"\n)\nprint(f\"contentTestNetwork: (rows, cols) = {s.CASTable('contentTestNetwork').shape}\")\n\ns.datastep.runCode(\n code = f\"data contentTestPcaNetwork; set {tableContentPartitionedNetworkPca}(where=(partition=0)); run;\"\n)\nprint(f\"contentTestPcaNetwork: (rows, cols) = {s.CASTable('contentTestPcaNetwork').shape}\")",
"contentTestNetwork: (rows, cols) = (542, 1485)\ncontentTestPcaNetwork: (rows, cols) = (542, 92)\n"
]
],
[
[
"# Build a Neural Network Classifier\n## Define a Neural Net with Two Dense Hidden Layers\n\nThe neural network architechture to be trained has two dense hidden layers:\n",
"_____no_output_____"
],
[
"### deepLearn Hyperparameters",
"_____no_output_____"
]
],
[
[
"deepLearnParam = AttributeDict({\n \"randomSeed\": 456,\n \"dropout\": 0.5,\n \"activation\": \"RECTIFIER\",\n \"outputActivation\": \"SOFTMAX\",\n \"denseLayers\": [50,50],\n \"nOutputs\": nClasses,\n \"nEpochs\": 100,\n \"algoMethod\": \"ADAM\",\n \"useLocking\": False\n})",
"_____no_output_____"
]
],
[
[
"## Train a Neural Net Model with Baseline Features",
"_____no_output_____"
]
],
[
[
"def defineNnModel(modelName, deepLearnParam):\n s.deepLearn.buildModel(\n modelTable={\"name\": modelName, \"replace\": True},\n type=\"DNN\"\n )\n\n # Add the input layer\n s.deepLearn.addLayer(\n modelTable={\"name\": modelName},\n layer={\"type\": \"INPUT\"},\n name=\"inputLayer\"\n )\n\n # Add the dense (a.k.a. fully connected) layers\n prevLayer = \"inputLayer\"\n for i in range(len(deepLearnParam.denseLayers)):\n thisLayer = f\"denseLayer{i}\"\n s.deepLearn.addLayer(\n modelTable={\"name\": modelName},\n layer={\"type\": \"FC\",\n \"n\": deepLearnParam.denseLayers[i],\n \"act\": deepLearnParam.activation,\n \"dropout\": deepLearnParam.dropout},\n name=thisLayer,\n srcLayers=[prevLayer]\n )\n prevLayer = thisLayer\n\n # Add the output layer\n s.deepLearn.addLayer(\n modelTable={\"name\": modelName},\n layer={\"type\": \"OUTPUT\",\n \"n\": deepLearnParam.nOutputs,\n \"act\": deepLearnParam.outputActivation},\n name=\"outputLayer\",\n srcLayers=[prevLayer]\n )\n\ndef trainNnModel(modelName, tableTrain, featureList, deepLearnParam):\n return s.deepLearn.dlTrain(\n seed=deepLearnParam.randomSeed,\n inputs=featureList,\n target=targetColumn,\n table=tableTrain,\n modelTable=modelName,\n optimizer={\n \"algorithm\": {\n \"method\": deepLearnParam.algoMethod,\n \"useLocking\": deepLearnParam.useLocking},\n \"maxEpochs\": deepLearnParam.nEpochs},\n modelWeights={\n \"name\": f\"{modelName}Weights\",\n \"replace\": True})\n\ndef scoreNnModel(modelName, tableTest, silent=False):\n r = s.deepLearn.dlScore(\n initWeights=f\"{modelName}Weights\",\n table=tableTest,\n modelTable=modelName,\n copyVars=(\"node\", \"target\"),\n casOut={\"name\": f\"{modelName}Scored\", \"replace\": True}\n )\n accuracy = (100 - float(r.ScoreInfo[\"Value\"][2])) / 100\n if not silent:\n print(r.ScoreInfo)\n print(f\"Accuracy = {accuracy}\")\n return accuracy\n\ndef bootstrapNnModel(modelName, tableTrain, tableTest, featureList, deepLearnParam, n):\n accuracies = []\n for i in range(n):\n partitionData(tableIn=tableTest, tableOut=f\"{tableTest}Part_\", table1Out=f\"{tableTest}Boot_\", table2Out=None, frac1=90, randomSeed=(i+5678), partName=\"bootstrap\")\n trainNnModel(modelName, tableTrain, featureList, deepLearnParam)\n acc=scoreNnModel(modelName, f\"{tableTest}Boot_\", silent=True)\n print(f\"Accuracy = {acc}\")\n accuracies = accuracies + [acc]\n\n print(f\"Bootstrap Accuracy = {np.mean(accuracies)} +- {np.std(accuracies)}\")\n return accuracies",
"_____no_output_____"
],
[
"baseModel = \"baseModel\"\ndemo.defineNnModel(baseModel, deepLearnParam)",
"_____no_output_____"
],
[
"demo.trainNnModel(baseModel,\"contentTrain\", scripts.baseFeatureList, deepLearnParam)",
"The history saving thread hit an unexpected error (OperationalError('disk I/O error',)).History will not be written to the database.\n"
],
[
"demo.scoreNnModel(baseModel,\"contentTest\")",
" Descr Value\n0 Number of Observations Read 542\n1 Number of Observations Used 542\n2 Misclassification Error (%) 21.95572\n3 Loss Error 1.917275\nAccuracy = 0.7804428\n"
]
],
[
[
"### Bootstrap Runs",
"_____no_output_____"
]
],
[
[
"accuracies = demo.bootstrapNnModel(baseModel,\"contentTrain\",\"contentTest\",baseFeatureList, deepLearnParam, 25);",
"NOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5678 for sampling.\nAccuracy = 0.7643442999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5679 for sampling.\nAccuracy = 0.7848361\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5680 for sampling.\nAccuracy = 0.7745902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5681 for sampling.\nAccuracy = 0.7745902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5682 for sampling.\nAccuracy = 0.7745902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5683 for sampling.\nAccuracy = 0.7745902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5684 for sampling.\nAccuracy = 0.7663934\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5685 for sampling.\nAccuracy = 0.7889344\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5686 for sampling.\nAccuracy = 0.7786884999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5687 for sampling.\nAccuracy = 0.7807377\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5688 for sampling.\nAccuracy = 0.7827869\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5689 for sampling.\nAccuracy = 0.7848361\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5690 for sampling.\nAccuracy = 0.7786884999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5691 for sampling.\nAccuracy = 0.7868852\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5692 for sampling.\nAccuracy = 0.7827869\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5693 for sampling.\nAccuracy = 0.7827869\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5694 for sampling.\nAccuracy = 0.7725409999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5695 for sampling.\nAccuracy = 0.7786884999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5696 for sampling.\nAccuracy = 0.7786884999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5697 for sampling.\nAccuracy = 0.7848361\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5698 for sampling.\nAccuracy = 0.7786884999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5699 for sampling.\nAccuracy = 0.7704918000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5700 for sampling.\nAccuracy = 0.7848361\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5701 for sampling.\nAccuracy = 0.7745902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5702 for sampling.\nAccuracy = 0.7786884999999999\nBootstrap Accuracy = 0.778524596 +- 0.00607663747567552\n"
]
],
[
[
"## Train a Neural Net Model with PCA Features",
"_____no_output_____"
]
],
[
[
"pcaModel=\"pcaModel\"\ndemo.defineNnModel(pcaModel, deepLearnParam)",
"_____no_output_____"
],
[
"demo.trainNnModel(pcaModel,\"contentTrainPca\", pcaFeatureList, deepLearnParam)",
"_____no_output_____"
]
],
[
[
"### Score the PCA Features Model",
"_____no_output_____"
]
],
[
[
"demo.scoreNnModel(pcaModel,\"contentTestPca\")",
" Descr Value\n0 Number of Observations Read 542\n1 Number of Observations Used 542\n2 Misclassification Error (%) 26.01476\n3 Loss Error 0.765852\nAccuracy = 0.7398524000000001\n"
]
],
[
[
"### Bootstrap Runs",
"_____no_output_____"
]
],
[
[
"accuracies = demo.bootstrapNnModel(pcaModel,\"contentTrainPca\",\"contentTestPca\",pcaFeatureList, deepLearnParam, 25);",
"NOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5678 for sampling.\nAccuracy = 0.7336066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5679 for sampling.\nAccuracy = 0.7356557000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5680 for sampling.\nAccuracy = 0.7438525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5681 for sampling.\nAccuracy = 0.7459015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5682 for sampling.\nAccuracy = 0.7459015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5683 for sampling.\nAccuracy = 0.7336066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5684 for sampling.\nAccuracy = 0.7418032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5685 for sampling.\nAccuracy = 0.7315574000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5686 for sampling.\nAccuracy = 0.7459015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5687 for sampling.\nAccuracy = 0.7336066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5688 for sampling.\nAccuracy = 0.7356557000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5689 for sampling.\nAccuracy = 0.7459015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5690 for sampling.\nAccuracy = 0.7479508\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5691 for sampling.\nAccuracy = 0.7336066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5692 for sampling.\nAccuracy = 0.75\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5693 for sampling.\nAccuracy = 0.7520492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5694 for sampling.\nAccuracy = 0.7438525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5695 for sampling.\nAccuracy = 0.7336066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5696 for sampling.\nAccuracy = 0.7418032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5697 for sampling.\nAccuracy = 0.7377049\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5698 for sampling.\nAccuracy = 0.7520492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5699 for sampling.\nAccuracy = 0.7418032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5700 for sampling.\nAccuracy = 0.7520492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5701 for sampling.\nAccuracy = 0.7561475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5702 for sampling.\nAccuracy = 0.7622951\nBootstrap Accuracy = 0.7431147599999999 +- 0.00802859458022384\n"
]
],
[
[
"## Train a Model that includes Only Network Features",
"_____no_output_____"
]
],
[
[
"networkOnlyModel = \"networkOnlyModel\"\ndemo.defineNnModel(networkOnlyModel, deepLearnParam)",
"_____no_output_____"
],
[
"demo.trainNnModel(networkOnlyModel,\"contentTrainNetwork\",networkFeatureList, deepLearnParam)",
"_____no_output_____"
]
],
[
[
"### Score the Network-Only Model",
"_____no_output_____"
]
],
[
[
"demo.scoreNnModel(networkOnlyModel,\"contentTestNetwork\")",
" Descr Value\n0 Number of Observations Read 542\n1 Number of Observations Used 542\n2 Misclassification Error (%) 14.02214\n3 Loss Error 0.622495\nAccuracy = 0.8597786\n"
]
],
[
[
"### Bootstrap Runs",
"_____no_output_____"
]
],
[
[
"demo.bootstrapNnModel(networkOnlyModel,tableContentNetwork,\"contentTestNetwork\",networkFeatureList, deepLearnParam, 25);",
"NOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5678 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5679 for sampling.\nAccuracy = 0.8606556999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5680 for sampling.\nAccuracy = 0.8606556999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5681 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5682 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5683 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5684 for sampling.\nAccuracy = 0.8586066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5685 for sampling.\nAccuracy = 0.8627049\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5686 for sampling.\nAccuracy = 0.875\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5687 for sampling.\nAccuracy = 0.8709015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5688 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5689 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5690 for sampling.\nAccuracy = 0.8565573999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5691 for sampling.\nAccuracy = 0.8729508\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5692 for sampling.\nAccuracy = 0.8709015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5693 for sampling.\nAccuracy = 0.8647541\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5694 for sampling.\nAccuracy = 0.8627049\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5695 for sampling.\nAccuracy = 0.8729508\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5696 for sampling.\nAccuracy = 0.8770492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5697 for sampling.\nAccuracy = 0.8709015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5698 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5699 for sampling.\nAccuracy = 0.8770492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5700 for sampling.\nAccuracy = 0.8627049\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5701 for sampling.\nAccuracy = 0.8606556999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5702 for sampling.\nAccuracy = 0.8647541\nBootstrap Accuracy = 0.86704918 +- 0.005508296169161583\n"
]
],
[
[
"## Train a Neural Net Model with Baseline + Network Features",
"_____no_output_____"
]
],
[
[
"networkModel = \"networkModel\"\ndemo.defineNnModel(networkModel, deepLearnParam)",
"_____no_output_____"
],
[
"demo.trainNnModel(networkModel,\"contentTrainNetwork\",baseFeatureList+networkFeatureList, deepLearnParam)",
"_____no_output_____"
]
],
[
[
"### Score the Baseline+Network Model",
"_____no_output_____"
]
],
[
[
"demo.scoreNnModel(networkModel,\"contentTestNetwork\")",
" Descr Value\n0 Number of Observations Read 542\n1 Number of Observations Used 542\n2 Misclassification Error (%) 11.43911\n3 Loss Error 0.937649\nAccuracy = 0.8856089\n"
]
],
[
[
"### Bootstrap Runs",
"_____no_output_____"
]
],
[
[
"demo.bootstrapNnModel(networkModel,\"contentTrainNetwork\",\"contentTestNetwork\",baseFeatureList+networkFeatureList, deepLearnParam, 25);",
"NOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5678 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5679 for sampling.\nAccuracy = 0.8299179999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5680 for sampling.\nAccuracy = 0.8483607000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5681 for sampling.\nAccuracy = 0.8586066\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5682 for sampling.\nAccuracy = 0.8647541\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5683 for sampling.\nAccuracy = 0.8442623\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5684 for sampling.\nAccuracy = 0.8627049\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5685 for sampling.\nAccuracy = 0.8790984000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5686 for sampling.\nAccuracy = 0.8934426000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5687 for sampling.\nAccuracy = 0.8852459\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5688 for sampling.\nAccuracy = 0.875\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5689 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5690 for sampling.\nAccuracy = 0.8606556999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5691 for sampling.\nAccuracy = 0.8401639\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5692 for sampling.\nAccuracy = 0.8647541\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5693 for sampling.\nAccuracy = 0.8811475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5694 for sampling.\nAccuracy = 0.8709015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5695 for sampling.\nAccuracy = 0.8401639\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5696 for sampling.\nAccuracy = 0.8545081999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5697 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5698 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5699 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5700 for sampling.\nAccuracy = 0.8442623\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5701 for sampling.\nAccuracy = 0.8668032999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5702 for sampling.\nAccuracy = 0.8852459\nBootstrap Accuracy = 0.86360656 +- 0.015498961208726233\n"
]
],
[
[
"## Train a Neural Net Model with PCA + Network Features",
"_____no_output_____"
]
],
[
[
"networkPcaModel = \"networkPcaModel\"\ndemo.defineNnModel(networkPcaModel, deepLearnParam)",
"_____no_output_____"
],
[
"demo.trainNnModel(networkPcaModel,\"contentTrainPcaNetwork\",pcaFeatureList+networkFeatureList, deepLearnParam)",
"_____no_output_____"
]
],
[
[
"### Score the PCA + Network Model",
"_____no_output_____"
]
],
[
[
"demo.scoreNnModel(networkPcaModel,\"contentTestPcaNetwork\")",
" Descr Value\n0 Number of Observations Read 542\n1 Number of Observations Used 542\n2 Misclassification Error (%) 11.43911\n3 Loss Error 0.493904\nAccuracy = 0.8856089\n"
]
],
[
[
"### Bootstrap Runs",
"_____no_output_____"
]
],
[
[
"demo.bootstrapNnModel(networkPcaModel,\"contentTrainPcaNetwork\",\"contentTestPcaNetwork\",pcaFeatureList+networkFeatureList, deepLearnParam, 25);",
"NOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5678 for sampling.\nAccuracy = 0.8872951\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5679 for sampling.\nAccuracy = 0.8688525\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5680 for sampling.\nAccuracy = 0.8852459\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5681 for sampling.\nAccuracy = 0.8729508\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5682 for sampling.\nAccuracy = 0.8790984000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5683 for sampling.\nAccuracy = 0.8831967000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5684 for sampling.\nAccuracy = 0.8770492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5685 for sampling.\nAccuracy = 0.8995902\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5686 for sampling.\nAccuracy = 0.8831967000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5687 for sampling.\nAccuracy = 0.8852459\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5688 for sampling.\nAccuracy = 0.8709015999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5689 for sampling.\nAccuracy = 0.8872951\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5690 for sampling.\nAccuracy = 0.8770492\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5691 for sampling.\nAccuracy = 0.8975409999999999\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5692 for sampling.\nAccuracy = 0.8811475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5693 for sampling.\nAccuracy = 0.8811475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5694 for sampling.\nAccuracy = 0.8893443000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5695 for sampling.\nAccuracy = 0.8811475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5696 for sampling.\nAccuracy = 0.8647541\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5697 for sampling.\nAccuracy = 0.8872951\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5698 for sampling.\nAccuracy = 0.8811475\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5699 for sampling.\nAccuracy = 0.8872951\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5700 for sampling.\nAccuracy = 0.8852459\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5701 for sampling.\nAccuracy = 0.8790984000000001\nNOTE: Simple Random Sampling is in effect.\nNOTE: Using SEED=5702 for sampling.\nAccuracy = 0.8852459\nBootstrap Accuracy = 0.8822950840000001 +- 0.0077562012301734444\n"
]
],
[
[
"# Session Cleanup",
"_____no_output_____"
]
],
[
[
"s.terminate();",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca4bf70ae47fc5271aca0772b94ccbf9a4f4d15 | 386,816 | ipynb | Jupyter Notebook | poems_Disney.ipynb | marisbotero/poesIA | 5868bed658e3797d62777b42ff038e2ddc3d7fb3 | [
"Apache-2.0"
] | null | null | null | poems_Disney.ipynb | marisbotero/poesIA | 5868bed658e3797d62777b42ff038e2ddc3d7fb3 | [
"Apache-2.0"
] | null | null | null | poems_Disney.ipynb | marisbotero/poesIA | 5868bed658e3797d62777b42ff038e2ddc3d7fb3 | [
"Apache-2.0"
] | null | null | null | 43.084874 | 227 | 0.392716 | [
[
[
"<a href=\"https://colab.research.google.com/github/marisbotero/poesIA/blob/main/poems_Disney.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## Poemitas๐",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport string\nimport requests\nimport pandas as pd\n#path_to_file = '/content/texto.txt'\nresponse = requests.get('https://raw.githubusercontent.com/laxmimerit/poetry-data/master/disney.txt')\nprint(response.text)\ndata = response.text.splitlines()\nprint('Length of data: ', len(data))",
"When somebody loved me\nEverything was beautiful\nEvery hour we spent together\nLives within my heart And when she was sad\nI was there to dry her tears\nAnd when she was happy so was I\nWhen she loved me Through the summer and the fall\nWe had each other that was all\nJust she and I together\nLike it was meant to be And when she was lonely\nI was there to comfort her\nAnd I knew that she loved me So the years went by\nI stayed the same\nShe began to drift away\nI was left alone Still I waited for the day\nWhen she'd say\nI will always love you Lonely and forgotten\nNever thought she'd look my way\nShe smiled at me and held me Just like she used to do\nLike she loved me\nWhen she loved me When somebody loved me\nEverything was beautiful\nEvery hour we spent together\nLives within my heart When she loved me, hey Sentimentos sรฃo\nFรกceis de mudar\nMesmo entre quem\nNรฃo vรช que alguรฉm\nPode ser seu par Basta um olhar\nQue o outro nรฃo espera\nPara assustar e atรฉ perturbar\nMesmo a bela e a fera Sentimento assim\nSempre รฉ uma surpresa\nQuando ele vem\nNada o detรฉm\nร uma chama acesa Sentimentos vรชm\nPara nos trazer\nNovas sensaรงรตes\nDoces emoรงรตes\nE um novo prazer E numa estaรงรฃo\nComo a primavera\nSentimentos sรฃo\nComo uma canรงรฃo\nPara a bela e a fera Sentimentos sรฃo\nComo uma canรงรฃo\nPara a bela e a fera Para a bela e a fera Tale as old as time\nTrue as it can be\nBarely even friends\nThen somebody bends\nUnexpectedly. Just a little change\nSmall to say the least\nBoth a little scared\nNeither one prepared\nBeauty and the Beast Ever just the same\nEver a surprise\nEver as before\nEver just as sure\nAs the sun will rise Tale as old as time\nTune as old as song\nBittersweet and strange\nFinding you can change\nLearning you were wrong Certain as the sun\nRising in the east\nTale as old as time\nSong as old as rhyme\nBeauty and the Beast Tale as old as time\nSong as old as rhyme\nBeauty and the Beast Look at this stuff, isn't it neat?\nWouldn't you think my collection's complete?\nWouldn't you think I'm the girl\nThe girl who has everything?\nLook at this trove, treasures untold\nHow many wonders can one cavern hold?\nLookin' around here you'd think\nSure, she's got everything I've got gadgets and gizmos aplenty\nI've got whooz-its and whatz-its galore\nYou want thingamabobs?\nI got twenty\nBut who cares?\nNo big deal\nI want more I want to be where the people are\nI want to see\nWant to see 'em dancing\nWalking around on those\nWhadd'ya call 'em? Oh, feet\nFlipping your fins you don't get too far\nLegs are required for jumping, dancing\nStrolling along down a\nWhat's that word again? Street Up where they walk\nUp where they run\nUp where they stay all day in the sun\nWandering free\nWish I could be\nPart of that world What would I give\nIf I could live\nOut of these waters?\nWhat would I pay\nTo spend a day\nWarm on the sand?\nBetcha on land\nThey understand\nThat they don't reprimand their daughters\nBright young women\nSick of swimming\nReady to stand And I'm ready to know what the people know\nAsk 'em my questions\nAnd get some answers\nWhat's a fire and why does it\nWhat's the word? Burn? When's it my turn?\nWouldn't I love\nLove to explore that shore above?\nOut of the sea\nWish I could be\nPart of that world\nOut of the sea\nWish I could be\nPart of your world You think I'm an ignorant savage\nAnd you've been so many places\nI guess it must be so\nBut still I cannot see\nIf the savage one is me\nHow can there be so much that you don't know\nYou don't know You think you own whatever land you land on\nThe Earth is just a dead thing you can claim\nBut I know every rock and tree and creature\nHas a life, has a spirit, has a name You think the only people who are people\nAre the people who look and think like you\nBut if you walk the footsteps of a stranger\nYou'll learn things you never knew, you never knew Have you ever heard the wolf cry to the blue corn moon\nOr asked the grinning bobcat why he grinned\nCan you sing with all the voices of the mountains\nCan you paint with all the colors of the wind\nCan you paint with all the colors of the wind Come run the hidden pine trails of the forest\nCome taste the sun sweet berries of the Earth\nCome roll in all the riches all around you\nAnd for once, never wonder what they're worth The rainstorm and the river are my brothers\nThe heron and the otter are my friends\nAnd we are all connected to each other\nIn a circle, in a hoop that never ends How high will the sycamore grow\nIf you cut it down, then you'll never know\nAnd you'll never hear the wolf cry to the blue corn moon\nFor whether we are white or copper skinned\nWe need to sing with all the voices of the mountains\nWe need to paint with all the colors of the wind You can own the Earth and still\nAll you'll own is Earth until\nYou can paint with all the colors of the wind It's a small world, it's a small world\nIt's a small world, it's a small world It's a world of laughter, a world of tears\nIt's a world of hope and a world of fears\nThere's so much that we share, that it's time we're aware\nIt's a small world after all (Everybody now) it's a small world after all, it's a small world after all\n(Everybody now) it's a small world after all, it's a small, small, small, small world There is just one moon and one golden sun\nAnd a smile means friendship to everyone\nThough the mountains be wide and the oceans are wide\nIt's a small world after all (Come on, come on) (Everybody now) it's a small world after all (can you hear me now) it's a small world after all\n(Everybody now) it's a small world after all, it's a small, small, small, small world It's a small world after all, no matter if you're big or small\nCome on everybody let's bounce to this\nLet's play some love with a little twist\nIt's a small world, it's a small world It's a world of laughter, a world of tears\nIt's a world of hope and a world of fears\nThere's so much that we share, that it's time we're aware\nIt's a small world after all, it's a small world (Everybody now) it's a small world after all, it's a small world after all\n(Everybody now) it's a small world after all, it's a small, small, small, small world (can you hear me now?) It's a small world after all (come on, come on)\nIt's a small, small, small, small world (come on, come on)\nIt's a small world, after all (can you hear me now?) It's a small world after all (come on, come on)\nIt's a small world after all\nIt's a small, small, small, small world (pretty small eh) It's a small world after all, it's a small world after all\nIt's a small world after all, it's a small, small, small, small world\nIt's a small world after all, it's a small world after all\nIt's a small world after all, it's a small, small, small, small world A dream is a wish your heart makes\nWhen you're fast asleep\nIn dreams you will loose your heartache\nWhatever you wish for, you keep Have faith in your dreams and someday\nYour rainbow will come smiling through\nNo matter how your heart is grieving\nIf you keep on believing\nThe dream that you wish will come true Oh, that clock!\nOh, killjoy\nI hear you, come on, get up you say\nTime to start another day\nEven he orders me around\nWell, there's one thing\nThey can't order me to stop dreaming\nAnd perhaps someday\nThe dreams that I wish will come true No matter how your heart is grieving\nIf you keep on believing\nThe dream that you wish will come true I've been staring at the edge of the water\nLong as I can remember\nNever really knowing why\nI wish I could be the perfect daughter\nBut I come back to the water\nNo matter how hard I try Every turn I take\nEvery trail I track\nEvery path I make\nEvery road leads back to the place I know\nWhere I cannot go\nWhere I long to be See the line where the sky meets the sea\nIt calls me\nAnd no one knows how far it goes\nIf the wind in my sail on the sea stays behind me\nOne day I'll know\nIf I go there's just no telling how far I'll go I know everybody on this island\nSeems so happy on this island\nEverything is by design\nI know everybody on this island\nHas a role on this island\nSo maybe I can roll with mine I can lead with pride\nI can make us strong\nI'll be satisfied if I play along\nBut the voice inside sings a different song\nWhat is wrong with me See the light as it shines on the sea\nIt's blinding\nBut no one knows how deep it goes\nAnd it seems like it's calling out to me\nSo come find me\nAnd let me know\nWhat's beyond that line\nWill I cross that line The line where the sky meets the sea\nIt calls me\nAnd no one knows how far it goes\nIf the wind in my sail on the sea stays behind me\nOne day I'll know\nHow far I'll go Look at me\nI will never pass for a perfect bride\nOr a perfect daughter\nCan it be\nI'm not meant to play this part?\nNow I see\nThat if I were truly to be myself\nI would break my fam'ly's heart Who is that girl I see\nStaring straight\nBack at me?\nWhy is my reflection someone\nI don't know?\nSomehow I cannot hide\nWho I am\nThough I've tried\nWhen will my reflection show\nWho I am inside?\nWhen will my reflection show\nWho I am inside? When you wish upon a star\nMakes no difference who you are\nAnything your heart desires\nWill come to you\nIf your heart is in your dream\nNo request is too extreme\nWhen you wish upon a star\nAs dreamers do\nFate is kind\nShe brings to those to love\nThe sweet fulfillment of\nTheir secret longing\nLike a bolt out of the blue\nFate steps in and sees you through\nWhen you wish upon a star\nYour dreams come true Belle:\nThere's something sweet and almost kind\nBut he was mean and he was coarse and unrefined\nAnd now he's dear and so unsure\nI wonder why I didn't see it there before Beast:\nShe glanced this way, I thought I saw\nAnd when we touched she didn't shudder at my paw\nNo it can't be, I'll just ignore\nBut then she's never looked at me that way before Belle:\nNew and a bit alarming\nWho'd have ever thought that this could be?\nTrue that he's no Prince Charming\nBut there's something in him that I simply didn't see Lumiere:\nWell, who'd have thought? Mrs Potts:\nWell, bless my soul Cogsworth:\nWell, who'd have known? Mrs Potts:\nWell, who indeed? Lumiere:\nAnd who'd have guessed they'd come together on their own? Mrs Potts:\nIt's so peculiar,\nWait and see. All three:\nWe'll wait and see\nA few days more\nThere may be something there that wasn't there before Cogsworth:\nWell here's a thought, perhaps there's something there that wasn't there before Chip:\nWhat mama? Mrs Potts:\nThere may be something there that wasn't there before If there's a prize for rotten judgment\nI guess I've already won that\nNo man is worth the aggravation\nThat's ancient history, been there, done that\nWho d'you think you're kidding\nHe's the earth and heaven to you\nTry to keep it hidden\nHoney we can see right through you (Oh no)\nGirl, you can't conceal it\nWe know how you're feeling\nWho you thinking of?\nNo chance, no way, I won't say it, no, no\nYou swoon, you sigh, why deny it, oh, oh\nIt's too clichรฉ, I won't say I'm in love\nI thought my heart had learned its lesson\nIt feels so good when you start out\nMy head is screaming \"get a grip, girl\"\n\"Unless you're dying to cry your heart out\"\nGirl, you can't deny it\nWho you are and how you're feeling\nBaby we're not buying\nHon we saw you hit the ceiling\nFace it like a grown-up\nWhen you gonna own up that you got, got, got it bad?\nNo chance, no way, I won't say it, no no\nGive up, give in, check the grin, you're in love\nThis scene won't play, I won't say I'm in love\nWe'll do it until you admit you're in love\nYou're way off base, I won't say it\nGet off my case, I won't say it\nGirl don't be proud, it's okay you're in love\nAt least out loud I won't say I'm in love Tale as old as time\nTrue as it can be\nBarely even friends\nThen somebody bends\nUnexpectedly. Just a little change\nSmall to say the least\nBoth a little scared\nNeither one prepared\nBeauty and the Beast Ever just the same\nEver a surprise\nEver as before\nEver just as sure\nAs the sun will rise Tale as old as time\nTune as old as song\nBittersweet and strange\nFinding you can change\nLearning you were wrong Certain as the sun\nRising in the east\nTale as old as time\nSong as old as rhyme\nBeauty and the Beast Tale as old as time\nSong as old as rhyme\nBeauty and the Beast Little town\nIt's a quiet village\nEvery day\nLike the one before\nLittle town\nFull of little people\nWaking up to say Bonjour\nBonjour\nBonjour Bonjour Bonjour There goes the baker with his tray, like always\nThe same old bread and rolls to sell\nEvery morning just the same\nSince the morning that we came\nTo this poor provincial town\nGood Morning, Belle\n'Morning, Monsieur\nWhere are you off to\nThe bookshop. I just finished the most wonderful story\nabout a beanstalk and an ogre and a\nThat's nice. Marie, the baguettes hurry up Look there she goes that girl is strange, no question\nDazed and distracted, can't you tell\nNever part of any crowd\n'Cause her head's up on some cloud\nNo denying she's a funny girl that Belle Bonjour\nGood day\nHow is your family Bonjour\nGood day\nHow is your wife I need six eggs\nThat's too expensive There must be more than this provincial life Ah, Belle\nGood Morning. I've come to return the book I borrowed\nFinished already\nOh, I couldn't put it down. Have you got anything new\nHa Ha! Not since yesterday\nThat's all right. I'll borrow, this one\nThat one? But you've read it twice\nWell, it's my favorite! Far off places, daring sword fights\nmagic spells, a prince in disguise\nIf you like it all that much, it's yours\nBut sir\nI insist\nWell, thank you. Thank you very much Look there she goes that girl is so peculiar\nI wonder if she's feeling well\nWith a dreamy far-off look\nAnd her nose stuck in a book\nWhat a puzzle to the rest of us is Belle Oh, isn't this amazing?\nIt's my favorite part because you'll see\nHere's where she meets Prince Charming\nBut she won't discover that it's him 'til chapter three Now it's no wonder that her name means \"beauty\"\nHer looks have got no parallel\nBut behind that fair facade\nI'm afraid she's rather odd\nVery diff'rent from the rest of us\nShe's nothing like the rest of us\nYes, diff'rent from the rest of us is Belle Wow! You didn't miss a shot, Gaston, You're the greatest\nhunter in the whole world!\nI know.\nNo beast alive stands a chance against you. Ha ha ha and\nno girl, for that matter.\nIt's true, LeFou. And I've got my sights set on that one\nHmm the inventor's daughter\nShe's the one - the lucky girl I'm going to marry\nBut she's\nThe most beautiful girl in town.\nI know, but\nThat makes her the best. And don't I deserve the best\nWell, of course! I mean you do, but Right from the moment when I met her, saw her\nI said she's gorgeous and I fell\nHere in town there's only she\nWho is beautiful as me\nSo I'm making plans to woo and marry Belle Look there he goes\nIsn't he dreamy\nMonsieur Gaston\nOh he's so cute\nBe still my heart\nI'm hardly breathing\nHe's such a tall, dark, strong and handsome brute Bonjour\nPardon\nGood day\nMais oui\nYou call this bacon\nWhat lovely grapes\nSome cheese\nTen yards\nOne pound\n'Scuse me\nI'll get the knife\nPlease let me through\nThis bread\nThose fish\nIt's stale\nThey smell\nMadame's mistaken There must be more than this provincial life\nJust watch, I'm going to make Belle my wife Look there she goes a girl who's strange but special\nA most peculiar mademoiselle\nIt's a pity and a sin\nShe doesn't quite fit in\n'Cause she really is a funny girl\nA beauty but a funny girl\nShe really is a funny girl\nThat Belle Master, I don't think you quite realize what you got here!\nSo, why don't you just ruminate,\nWhilst I illuminate the possibilities! Well, Ali Baba had them forty thieves,\nScheherazade had a thousand tales.\nBut master you're in luck, 'cause up your sleeves\nYou've got a brand of magic never fails You've got some power in your corner now!\nSome heavy ammunition in your camp!\nYou got some punch, pizzaz, yahoo and how...\nSee, all you gotta do is rub that lamp, and I'll say: Chorus:\n\"Mr. Aladdin, sir, what will your pleasure be?\nLet me take your order, jot it down?\"\nYou ain't never had friend like me Life is your restaurant and I'm your maitre'd\nCome on whisper what it is you want;\nYou ain't never had friend like me! Yes sir, we pride ourselves on service.\nYou're the boss, the king, the shah.\nSay what you wish; it's yours, true dish.\nHow 'bout a little more baklava? Have some of column A try All of column B,\nI'm in the mood to help you, dude.\nYou ain't never had friend like me Oh my. No no. My my my.\nCan your friends do this?\nCan your friends do that?\nCan your friends pull this\nOut their little hat?\nCan your friends go poof?\nWell looky here!\nCan your friends go abracadabra, let her rip,\nAnd then make the sucker disappear? So don't just sit there slack-jawed, buggy-eyed,\nI'm here to answer all your midnight prayers,\nYou've got me bona fide certified;\nYou've got a genie for charge d'affaires. I've got a powerful urge to help you out,\nSo what you wish, I really wanna know?\nYou've got a list that's three miles long, no doubt,\nWell all you've gotta do is rub like so, and oh ยญ- Mr. Aladdin, sir, have a wish or two or three.\nI'm on the job you big nabob!\nYou ain't never had a friend, never had a friend,\nYou ain't never had a friend, never had a friend,\nYou ain't never had a friend like me.\nYou ain't never had a friend like me The seaweed is always greener\nIn somebody else's lake\nYou dream about going up there\nBut that is a big mistake\nJust look at the world around you\nRight here on the ocean floor\nSuch wonderful things surround you\nWhat more is you lookin' for? Under the sea\nUnder the sea\nDarling it's better\nDown where it's wetter\nTake it from me\nUp on the shore they work all day\nOut in the sun they slave away\nWhile we devotin'\nFull time to floatin'\nUnder the sea Down here all the fish is happy\nAs off through the waves they roll\nThe fish on the land ain't happy\nThey sad 'cause they in their bowl\nBut fish in the bowl is lucky\nThey in for a worser fate\nOne day when the boss get hungry\nGuess who's gon' be on the plate? Under the sea\nUnder the sea\nNobody beat us\nFry us and eat us\nIn fricassee\nWe what the land folks loves to cook\nUnder the sea we off the hook\nWe got no troubles\nLife is the bubbles\nUnder the sea (Under the sea)\nUnder the sea (Under the sea) Since life is sweet here\nWe got the beat here\nNaturally (Naturally)\nEven the sturgeon an' the ray\nThey get the urge 'n' start to play\nWe got the spirit\nYou got to hear it\nUnder the sea The newt play the flute\nThe carp play the harp\nThe plaice play the bass\nAnd they soundin' sharp\nThe bass play the brass\nThe chub play the tub\nThe fluke is the duke of soul\n(Yeah)\nThe ray he can play\nThe lings on the strings\nThe trout rockin' out\nThe blackfish she sings\nThe smelt and the sprat\nThey know where it's at\nAn' oh that blowfish blow Yeah, under the sea (Under the sea)\nUnder the sea (Under the sea)\nWhen the sardine\nBegin the beguine\nIt's music to me (It's music to me)\nWhat do they got? A lot of sand\nWe got a hot crustacean band\nEach little clam here\nKnow how to jam here\nUnder the sea\nEach little slug here\nCuttin' a rug here\nUnder the sea\nEach little snail here\nKnow how to wail here\nThat's why it's hotter\nUnder the water\nYa we in luck here\nDown in the muck here\nUnder the sea [Gramma Tala:]\nI know a girl from an island\nShe stands apart from the crowd\nShe loves the sea and her people\nShe makes her whole family proud\nSometimes the world seems against you\nThe journey may leave a scar\nBut scars can heal and reveal just\nWhere you are The people you love will change you\nThe things you have learned will guide you\nAnd nothing on earth can silence\nThe quiet voice still inside you\nAnd when that voice starts to whisper\nMoana you've come so far\nMoana listen\nDo you know who you are? [Moana:]\nWho am I?\nI am a girl who loves my island\nI'm the girl who loves the sea\nIt calls me\nI am the daughter of the village chief\nWe are descended from voyagers\nWho found their way across the world\nThey call me\nI've delivered us to where we are\nI have journeyed farther\nI am everything I've learned and more\nStill it calls me\nAnd the call isn't out there at all, it's inside me\nIt's like the tide; always falling and rising\nI will carry you here in my heart you'll remind me\nThat come what may\nI know the way\nI am Moana! The only way to get what you want is to become a human yourself\nCan you do that? My dear, sweet child, that's what I do\nIt's what I live for\nTo help unfortunate merfolk like yourself\nPoor souls with no one else to turn to I admit that in the past I've been a nasty\nThey weren't kidding when they called me, well, a witch\nBut you'll find that nowadays\nI've mended all my ways\nRepented, seen the light, and made a switch\nTo this\nAnd I fortunately know a little magic\nIt's a talent that I always have possessed\nAnd dear lady, please don't laugh\nI use it on behalf\nOf the miserable, the lonely, and depressed (Pathetic) Poor unfortunate souls\nIn pain, in need\nThis one longing to be thinner\nThat one wants to get the girl\nAnd do I help them?\nYes, indeed\nThose poor unfortunate souls\nSo sad, so true\nThey come flocking to my cauldron\nCrying, \"Spells, Ursula, please!\"\nAnd I help them\nYes I do Now it's happened once or twice\nSomeone couldn't pay the price\nAnd I'm afraid I had to rake 'em 'cross the coals\nYes I've had the odd complaint\nBut on the whole I've been a saint\nTo those poor unfortunate souls Have we got a deal?\nIf I become human, I'll never be with my father or sisters again\nBut you'll have your man\nLife's full of tough choices, isn't it?\nOh, and there is one more thing\nWe haven't discussed the subject of payment\nBut I don't have-\nI'm not asking much, just a token really, a trifle\nWhat I want from you is your voice\nBut without my voice, how can I-\nYou'll have your looks, your pretty face\nAnd don't underestimate the importance of body language, ha! The men up there don't like a lot of blabber\nThey think a girl who gossips is a bore\nYet on land it's much preferred for ladies not to say a word\nAnd after all dear, what is idle babble for?\nCome on, they're not all that impressed with conversation\nTrue gentlemen avoid it when they can\nBut they dote and swoon and fawn\nOn a lady who's withdrawn\nIt's she who holds her tongue who gets a man Come on you poor unfortunate soul\nGo ahead\nMake your choice\nI'm a very busy woman and I haven't got all day\nIt won't cost much\nJust your voice!\nYou poor unfortunate soul\nIt's sad but true\nIf you want to cross the bridge, my sweet\nYou've got the pay the toll\nTake a gulp and take a breath\nAnd go ahead and sign the scroll\nFlotsam, Jetsam, now I've got her, boys\nThe boss is on a roll\nThis poor unfortunate soul Beluga sevruga\nCome winds of the Caspian Sea\nLarengix glaucitis\nEt max laryngitis\nLa voce to me Now, sing\nAa-aa-aah, a-aa-aah\nKeep singing!\nAa-aa-aah, a-aa-aah (Spoken) Mama, I don't have time for dancing\n(Sung) That's just gonna have to wait a while\nAin't got time for messing around\nAnd it's not my style This old town can slow you down\nPeople taking the easy way\nBut I know exactly where I'm going\nI'm getting closer and closer everyday And I'm almost there\nI'm almost there\nPeople 'round here think I'm crazy\nBut I don't care Trials and tribulations\nI've had my share\nBut there ain't nothing gonna stop me now\nCause I'm almost there I remember daddy told me\n\"Fairy tales can come true\nBut you gotta make it happen\nIt all depends on you\" So I work real hard each and everyday\nNow things for sure are going my way\nJust doing what I do\nLook out boys I'm coming through And I'm almost there\nI'm almost there\nPeople gonna come here from everywhere\nAnd I'm almost there I'm almost there There's been trials and tribulations\nYou know I've had my share\nBut I've climbed a mountain, I've crossed a river\nAnd I'm almost there I'm almost there I'm almost there I know you\nI walked with you\nOnce Upon a dream\nI know you That gleam in your eyes\nIs so familiar a gleam\nYet I know it's true\nThat visions are seldom what they seem But if I know you\nI know what you do\nYou'll love me at once\nThe way you did once upon a dream La da la da la ahahahahah\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream\nI know you\nI walked with you\nOnce Upon a dream I know you\nThat gleam in your eyes\nIs so familiar a gleam\nYet I know it's true That visions are seldom what they seem\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream The King and his men\nstole the queen from her bed,\nand bound her in her bones.\nThe seas be ours, and by the powers;\nwhere we will, we'll roam. Yo ho, all hands,\nhoist the colors high.\nHeave ho, thieves and beggars;\nnever shall we die. Now, some have died and some are alive\nand others sail on the sea.\nWith the keys to the cage\nand the devil to pay,\nwe lay to the fiddler's green. Yo ho haul together,\nhoist the colors high.\nHeave ho, thieves and beggars;\nnever shall we die. The bell has been raised\nfrom its watery grave,\nhear its sepulchral tone.\nA call to all; pay heed the squall,\nand turn your sails to home. Yo ho, haul together,\nhoist the colors high.\nHeave ho, thieves and beggars;\nnever shall we die. Yo ho, haul together,\nhoist the colors high.\nHeave ho, thieves and beggars;\nnever shall we die. Yo ho, haul together,\nhoist the colors high.\nHeave ho, thieves and beggars;\nnever shall we die. The king and his men\nstole the queen from her bed,\nand bound her in her bones.\nThe seas be ours, and by the powers;\nwhere we will; we'll roam. So many times out there\nI've watched a happy pair of lovers walking in the night\nThey had a kind of glow around them\nIt almost looked like heaven's light I knew I'd never know that warm and loving glow\nThough I might wish with all my might\nNo face as hideous as my face\nWas ever meant for heaven's light But suddenly an angel has smiled at me\nAnd kissed my cheek without a trace of fright I dare to dream that she might even care for me\nAnd as I ring these bells tonight\nMy cold dark tower seems so bright\nI swear it must be Heaven's light Confiteor deo omnipotenti\n(I confess to God almighty)\nBeatae Mariae semper virgini\n(To blessed Mary ever virgin)\nBeato Michaeli archangelo\n(To the blessed Archangel Michael)\nSanctis apostolis omnibus sanctis\n(To the Holy Apostles, to all the saints) Beata Maria, you know I am a righteous man\nOf my virtue I am justly proud\nEt tibit Pater\n(And to you, Father) Beata Maria, you know I'm so much purer than\nThe common, vulgar, weak, licentious crowd\nQuia peccavi nimis\n(That I have sinned) Then tell me, Maria, why I see her dancing there?\nWhy her smold'ring eyes still scorch my soul\nCogitatione\n(In thought) I feel her, I see her, the sun caught in her raven hair\nIs blazing in me out of all control\nVerbo et opere\n(In word and deed) Like fire, Hellfire\nThis fire in my skin\nThis burning desire\nIs turning me to sin\nIt's not my fault Mea culpa\n(Through my fault)\nI'm not to blame\nMea culpa\n(Through my fault) It is the gypsy girl\nThe witch who sent this flame\nMea maxima culpa\n(Through my most grievous fault)\nIt's not my fault\nMea culpa\n(Through my fault) If in God's plan\nMea culpa\n(Through my fault)\nHe made the devil so much\nStronger than a man\nMea maxima culpa\n(Through my most grievous fault) Protect me, Maria\nDon't let this siren cast her spell\nDon't let her fire sear my flesh and bone\nDestroy Esmeralda\nAnd let her taste the fires of hell\nOr else let her be mine and mine alone Minister Frollo, the gypsy has escaped\nWhat? No longer in the cathedral, she's gone\nBut how? Never mind, get out, you idiot\nI'll find her, I'll find her if I have to burn down all of Paris Hellfire, dark fire\nNow gypsy, it's your turn\nChoose me or your pyre\nBe mine or you will burn Kyrie Eleison\n(Lord, have mercy)\nGod, have mercy on her\nKyrie Eleison\n(Lord, have mercy) God, have mercy on me\nKyrie Eleison\n(Lord, have mercy)\nBut she will be mine\nOr she will burn I'm gonna be a mighty king\nSo enemies beware!\nWell, I've never seen a king of beasts\nWith quite so little hair\nI'm gonna be the mane event\nLike no king was before I'm brushing up on looking down\nI'm working on my ROAR\nThus far, a rather uninspiring thing\nOh, I just can't wait to be king!\n(You've rather a long way to go, young master\nif you think) No one saying do this\n(Now when I said that, I)\nNo one saying be there\n(What I meant was)\nNo one saying stop that\n(Look, what you don't realize) No one saying see here\n(Now see here!)\nFree to run around all day\n(Well, that's definitely out)\nFree to do it all my way!\nI think it's time that you and I Arranged a heart to heart\nKings don't need advice\nFrom little hornbills for a start\nIf this is where the monarchy is headed\nCount me out!\nOut of service, out of Africa I wouldn't hang about\nThis child is getting wildly out of wing\nOh, I just can't wait to be king!\nEverybody look left\nEverybody look right\nEverywhere you look I'm Standing in spotlight!\nNot yet!\nLet every creature go for broke and sing\nLet's hear it in the herd and on the wing\nIt's gonna be King Simba's finest fling\nOh, I just can't wait to be king!\nOh, I just can't wait to be king!\nOh, I just can't wait to be king! A word's just a word\n'Til you mean what you say\nAnd love isn't love\n'Til you give it away We've all got a gift\nYeah, something to give\nTo make a change Send it on, on and on\nJust one hand can heal another\nBe a part, reach a heart\nJust one spark starts a fire With one little action\nThe chain reaction will never stop\nMake it strong\nShine a light, and send it on Just smile, and the world\nWill smile along with you\nThat small act of love\nIs meant for one who become two If we take the chances\nTo change circumstances\nImagine all we can do If we send it on, on and on\nJust one hand can heal another\nBe a part, reach a heart\nJust one spark starts a fire With one little action\nThe chain reaction will never stop\nMake it strong\nShine a light, and send it on\nSend it on, ooh, send it on There's power in all\nOf the choices we make\nSo I'm starting now\nThere's not a moment to waste A word's just word\n'Til you mean what you say\nAnd love isn't love\n'Til you give it away Send it on, on and on\nJust one hand can heal another\nBe a part, reach a heart\nJust one spark starts a fire With one little action\nThe chain reaction will never stop\nMake it strong\nShine a light, and send it on Send it on, on and on\nJust one hand can heal another\nBe a part, reach a heart\nJust one spark starts a fire With one little action\nThe chain reaction will help things start\nMake it strong\nShine a light, and send it on\nShine a light, and send it on\nShine a light, and send it on Every time she'd find a minute\nThat's the time that they begin it\nCinderelly, Cinderelly (Cinderella!) Cinderelly, Cinderelly\nNight and day it's Cinderelly\nMake the fire, fix the breakfast\nWash the dishes, do the mopping\nAnd the sweeping and the dusting\nThey always keep her hopping\nShe goes around in circles till she's very, very dizzy\nStill they holler \"Keep a-busy, Cinderelly!\" We can do it, we can do it\nWe can help our Cinderelly\nWe can make her dress so pretty\nThere's nothing to it, really\nWe'll tie a sash around it\nPut a ribbon through it\nWhen dancing at the ball she'll be more beautiful than all\nIn the lovely dress we'll make for Cinderelly! Hurry, hurry, hurry, hurry\nGonna help our Cinderelly\nGot no time to dilly-dally\nWe gotta get a-goin'\nI'll cut it with these scissors!\nAnd I can do the sewing!\nLeave the sewing to the women\nYou go get some trimmin'\nAnd we'll make a lovely dress for Cinderelly, whoo! We'll make a lovely dress for Cinderelly! Morning in Paris, the city awakes\nTo the bells of Notre Dame\nThe fisherman fishes, the bakerman bakes\nTo the bells of Notre Dame\nTo the big bells as loud as the thunder\nTo the little bells soft as a psalm\nAnd some say the soul of the city is\nThe toll of the bells\nThe bells of Notre Dame Dark was the night when our tale was begun\nOn the docks near Notre Dame Four frightened gypsies slid silently under\nThe docks near Notre Dame But a trap had been laid for the gypsies\nAnd they gazed up in fear and alarm\nAt a figure whose clutches\nWere iron as much as the bells The bells of Notre Dame (Kyrie Eleison)\nJudge Claude Frollo longed\nTo purge the world\nOf vice and sin\n(Kyrie Eleison)\nAnd he saw corruption\nEv'rywhere\nExcept within Dies irae, dies illa\n(Dies irae, dies illa)\nDies irae, dies illa\n(Dies irae, dies illa)\nSolvet saeclum in favilla\nTeste David cum sibylla\nQuantus tremor est futurus\nQuando Judex est venturus See there the innocent blood you have spilt\nOn the steps of Notre Dame Now you would add this child's blood to your guilt\nOn the steps of Notre Dame You can lie to yourself and your minions\nYou can claim that you haven't a qualm\nBut you never can run from\nNor hide what you've done from the eyes\nThe very eyes of Notre Dame (Kyrie Eleison)\nAnd for one time in his live\nOf power and control\n(Kyrie Eleison)\nFrollo felt a twinge of fear\nFor his immortal soul Just so he's kept locked away\nWhere no one else can see Even this foul creature may\nYet prove one day to be\nOf use to me Now here is a riddle to guess if you can\nSing the bells of Notre Dame\nWho is the monster and who is the man Sing the bells, bells, bells, bells\nBells, bells, bells, bells\nBells of Notre Dame I can show you the world\nShining, shimmering, splendid\nTell me, princess, now when did\nYou last let your heart decide? I can open your eyes\nTake you wonder by wonder\nOver, sideways and under\nOn a magic carpet ride A whole new world\nA new fantastic point of view\nNo one to tell us no or where to go\nOr say we're only dreaming A whole new world\nA dazzling place I never knew\nBut when I'm way up here, it's crystal clear\nThat now I'm in a whole new world with you\nNow I'm in a whole new world with you Unbelievable sights\nIndescribable feeling\nSoaring, tumbling, freewheeling\nThrough an endless diamond sky A whole new world\nDon't you dare close your eyes\nA hundred thousand things to see\n(Hold your breath, it gets better) I'm like a shooting star\nI've come so far\nI can't go back\nTo where I used to be A whole new world\nEvery turn a surprise\nWith new horizons to pursue\nEvery moment red-letter I'll chase them anywhere\nThere's time to spare\nLet me share this whole new world with you A whole new world\nThat's where we'll be\nA thrilling chase\nA wondrous place\nFor you and me Somewhere out there,\nBeneath the pale moonlight,\nSomeone's thinking of me,\nAnd loving me tonight. Somewhere out there,\nSomeone's saying a prayer,\nThat we'll find one another,\nIn that big somewhere out there. And even though I know how very far apart we are,\nIt helps to think we might be wishing on the same bright star,\nAnd when the night wind starts to sing a lonesome lullaby,\nIt helps to think we're sleeping underneath the same big sky! Somewhere out there,\nIf love can see us through,\nThen we'll be together,\nSomewhere out there,\nOut where dreams\nCome true... There you see her\nSitting there across the way\nShe don't got a lot to say\nBut there's something about her\nAnd you don't know why\nBut you're dying to try\nYou wanna kiss the girl Yes, you want her\nLook at her, you know you do\nPossible she wants you too\nThere is one way to ask her\nIt don't take a word\nNot a single word\nGo on and kiss the girl Sing with me now\nSha la la la la la\nMy oh my\nLook like the boy too shy\nAin't gonna kiss the girl\nSha la la la la la\nAin't that sad?\nAin't it a shame?\nToo bad, he gonna miss the girl Now's your moment\nFloating in a blue lagoon\nBoy, you better do it soon\nNo time will be better\nShe don't say a word\nAnd she won't say a word\nUntil you kiss the girl Sha la la la la la\nDon't be scared\nYou got the mood prepared\nGo on and kiss the girl\nSha la la la la la\nDon't stop now\nDon't try to hide it how\nYou want to kiss the girl\nSha la la la la la\nFloat along\nAnd listen to the song\nThe song say kiss the girl\nSha la la la la la\nThe music play\nDo what the music say\nYou got to kiss the girl\nYou've got to kiss the girl\nYou wanna kiss the girl\nYou've gotta kiss the girl\nGo on and kiss the girl Zip-a-dee-doo-dah, zip-a-dee-ay\nMy, oh, my, what a wonderful day\nPlenty of sunshine headin' my way\nZip-a-dee-doo-dah, zip-a-dee-ay! Mister Bluebird's on my shoulder\nIt's the truth, it's \"actch'll\"\nEverything is \"satisfactch'll\" Zip-a-dee-doo-dah, zip-a-dee-ay\nWonderful feeling, wonderful day!\nYes, sir Zip-a-dee-doo-dah, zip-a-dee-ay\nMy, oh, my, what a wonderful day\nPlenty of sunshine headin' my way\nZip-a-dee-doo-dah, zip-a-dee-ay! Mister Bluebird's on my shoulder\nIt's the truth, it's \"actch'll\"\nEverything is \"satisfactch'll\" Zip-a-dee-doo-dah, zip-a-dee-ay\nWonderful feeling, feeling this way! Mister Bluebird's on my shoulder\nIt is the truth, it's \"actch'll\", hm? Where is that bluebird? Everything is \"satisfactch'll\"\nZip-a-dee-doo-dah, zip-a-dee-ay\nWonderful feeling, wonderful day! Bless my soul\nHerc was on a roll\nPerson of the week in every Greek opinion poll\nWhat a pro!\nHerc could stop a show\nPoint him at a monster and you're talking S.R.O. He was a no one\nA zero, zero\nNow he a hot shot\nHe's a hero\nHe was a kid with his act down pat\nFrom zero to hero in no time flat\nZero to hero, just like that! When he smiled the girls went wild\nWith oohs and ahhs\nAnd they slapped his face on ev'ry vase\n(On every vahse) From appearance fees and royalties\nOur Herc had cash to burn\nNow nouveau riche and famous\nHe could tell you what's a Grecian \"earn\"! Say \"Amen\"\nThere he goes again\nSweet and undefeated\nAnd an awesome 10 for 10\nFolks lined up\nJust to watch him flex\nAnd this perfect package packed a pair of pretty pecs Hercie, he comes, he sees, he conquers\nHoney, the crowds were going bonkers\nHe showed the moxie, brains, and spunk\nFrom zero to hero\nA major hunk\nZero to hero\nAnd who'd have thunk? Who put the \"glad\" in \"gladiator\"?\nHercules!\nWhose daring deeds are great theater?\nHercules!\nIs he bold?\nNo one braver!\nIs he sweet?\nOur favorite flavor! Hercules, Hercules\nHercules, Hercules\nHercules, Hercules Bless my soul\nHerc was on a roll\nUndefeated\nRiding high\nAnd the nicest guy\nNot conceited He was a nothin'\nA zero, zero\nNow he's a hot-shot\nHe's our hero\nHe hit the heights at breakneck speed! From zero to hero\nHerc is a hero\nNow he's a hero Yes indeed! Tale as old as time\nTrue as it can be\nBarely even friends\nThen somebody bends\nUnexpectedly. Just a little change\nSmall to say the least\nBoth a little scared\nNeither one prepared\nBeauty and the Beast Ever just the same\nEver a surprise\nEver as before\nEver just as sure\nAs the sun will rise Tale as old as time\nTune as old as song\nBittersweet and strange\nFinding you can change\nLearning you were wrong Certain as the sun\nRising in the east\nTale as old as time\nSong as old as rhyme\nBeauty and the Beast Tale as old as time\nSong as old as rhyme\nBeauty and the Beast Think of a wonderful thought\nAny merry little thought\nThink of Christmas, think of snow\nThink of sleigh bells off you go\nLike reindeer in the sky You can fly\nYou can fly\nYou can fly Think of the happiest things\nIt's the same as having wings\nTake the path that moonbeams make\nIf the moon is still awake\nYou'll see him wink his eye You can fly\nYou can fly\nYou can fly Up you go with a height and ho\nTo the stars beyond the blue\nThere's a Never Land waiting for you\nWhere all your happy dreams come true\nEvery dream that you dream will come true When there's a smile in your heart\nThere's no better time to start\nThink of all the joy you'll find\nWhen you leave the world behind\nAnd bid your cares goodbye You can fly\nYou can fly\nYou can fly\nYou can fly\nYou can fly When there's a smile in your heart\nThere's no better time to start\nThink of all the joy you'll find\nWhen you leave the world behind\nAnd bid your cares goodbye You can fly\nYou can fly\nYou can fly\nYou can fly\nYou can fly I've been standing at the edge of the water\nLong as I can remember\nNever really knowing why\nI wish I could be the perfect daughter\nBut I come back to the water\nNo matter how hard I try Every turn I take\nEvery trail I track\nEvery path I make\nEvery road leads back to the place I know\nWhere I cannot go\nWhere I long to be See the light where the sky meets the sea\nIt calls me\nNo one knows how far it goes\nIf the wind in my sail on the sea stays behind me\nOne day I'll know\nIf I go there's just no telling how far I'll go I know everybody on this island\nSeems so happy on this island\nEverything is by design\nI know everybody on this island\nHas a role on this island\nSo maybe I can roll with mine I can lead with pride\nI can make us strong\nI'll be satisfied if I play along\nBut the voice inside sings a different song\nWhat is wrong with me See the light as it shines on the sea\nIt's blinding\nBut no one knows how deep it goes\nAnd it seems like it's calling out to me\nSo come find me\nAnd let me know\nWhat's beyond that line\nWill I cross that line See the light where the sky meets the sea\nIt calls me\nAnd no one knows how far it goes\nIf the wind in my sail on the sea stays behind me\nOne day I'll know\nHow far I'll go Well Tamatoa hasn't always been this glam\nI was a drab little crab once\nNow I know I can be happy as a clam\nBecause I'm beautiful baby Did your granny say listen to your heart?\nBe who you are on the inside?\nI need three words to tear her argument apart: your granny lied I'd rather be shiny\nLike a treasure from a sunken pirate wreck\nScrub the deck and make it look shiny\nI will sparkle like a wealthy woman's neck\nJust a sec, don't ya know\nFish are dumb, dumb, dumb\nThey chase anything that glitters, beginners\nOh and here they come, come, come to the brightest thing that glitters\nMmm fish dinners\nI just love free food (free food) and you look like sea food (sea food) Well, well, well\nLittle Maui's havin' trouble with his look\nYou little semi, demi, mini god\nOuch what a terrible performance get the hook\nGet it?\nYou don't swing it like you used to, man\nYet I have to give you credit from my start\nAnd your tattoos on the outside\nFor just like you I made myself a work of art\nI'll never hide\nI can't I'm too shiny\nWatch me dazzle like a diamond in the rough\nStrut my stuff, my stuff is so shiny\nSend your armies but they'll never be enough\nMy shell's too tough, Maui man\nYou can try, try, try but you can't expect a demigod to beat a decapod\nLook it up\nYou will die, die, die now it's time for me to take apart your achin' heart Far from the ones who abandoned you\nChasin' the love of the humans who made you feel wanted\nYou tried to be tough but your armor's just not hard enough\nMaui, now it's time to kick your heiney\nEver seen someone so shiny Soak it in 'cause it's the last you'll ever see\nC'est la vie, mon ami\nI'm so shiny\nNow I'll eat you so prepare your final plea just for me\nYou'll never be quite as shiny\nYou wish you were nice and shiny What I love most about rivers is\nYou can't step in the same river twice\nThe water's always changing, always flowing But people, I guess, can't live like that\nWe all must pay a price\nTo be safe, we lose our chance of ever knowing What's around the river bend\nWaiting just around the river bend I look once more just around the river bend\nBeyond the shore where the gulls fly free\nDon't know what for what I dream the day might send\nJust around the river bend for me, coming for me I feel it there beyond those trees\nOr right behind these waterfalls\nCan I ignore that sound of distant drumming? For a handsome sturdy husband who builds handsome sturdy walls\nAnd never dreams that something might be coming? Just around the river bend\nJust around the river bend I look once more just around the river bend\nBeyond the shore somewhere past the sea\nDon't know what for why do all my dreams extend\nJust around the river bend, just around the river bend Should I choose the smoothest course\nSteady as the beating drum? Should I marry Kocoum?\nIs all my dreaming at an end?\nOr do you still wait for me, dream giver\nJust around the river bend? In every job that must be done\nThere is an element of fun\nYou find the fun and snap!\nThe job's a game And every task you undertake\nBecomes a piece of cake\nA lark! A spree! It's very clear to see that A spoonful of sugar helps the medicine go down\nThe medicine go down, the medicine go down\nJust a spoonful of sugar helps the medicine go down\nIn a most delightful way A robin feathering his nest has very little time to rest\nWhile gathering his bits of twine and twig\nThough quite intent in his pursuit\nHe has a merry tune to toot\nHe knows a song will move the job along For a spoonful of sugar helps the medicine go down\nThe medicine go down, the medicine go down\nJust a spoonful of sugar helps the medicine go down\nIn a most delightful way The honey bee that fetch the nectar from the flowers to the comb\nNever tired of ever buzzing to and fro\nBecause they take a little nip from every flower that they sip\nAnd hence (And hence)\nThey find (They find)\nTheir task is not a grind Ah, ah, ah! Ma chere Mademoiselle, it is with deepest pride\nand greatest pleasure that we welcome you tonight,\nAnd now we invite you to relax, let us pull up a\nchair as the dining room proudly presents\nyour dinner! Be our guest! Be our guest!\nPut our service to the test\nTie your napkin 'round your neck, cherie\nand we provide the rest\nsoup du jour, hot hors d'oeuvres\nwhy, we only live to serve\ntry the grey stuff, it's delicious\ndon't believe me? ask the dishes\nthey can sing, they can dance\nafter all, Miss, this is France\nand dinner here is never second best\ngo on, unfold your menu, take a glance then you'll be our guest, oui, our guest, be our guest beef ragout, cheese soufflรฉ\npie and pudding en flambรฉ\nwe`ll prepare and serve with flair a culinary cabaret!\nyou're alone, and you're scared\nbut the banquet's all prepared\nno one's gloomy or complaining\nwhile the flatware's entertaining\nwe tell jokes! I do tricks with my fellow candlesticks\nand it's all in perfect taste that you can bet\ncome on and lift your glass, you've won your own free pass\nto be our guest, if you're stressed\nits fine dining we suggest, be our guest, be our guest, be our guest! life is so unnerving\nfor a servant who's not serving\nhe's not whole without a soul to wait upon\nas, those good old days when we were useful...\nsuddenly those good old days are gone\nten years we've been rusting\nneeding so much more than dusting\nneeding exercise, a chance to use our skills!\nmost days we just lay around the castle\nflabby, fat, and lazy, you walked in and oops-a-daisy it's a guest it's a guest\nsakes alive, we'll be blessed\nwines been poured and thank the Lord\nI've had the napkins freshly pressed\nwith dessert, she'll want tea\nand my dear that fine with me\nwhile the cups do their soft-shoein'\nI'll be bubbling, I'll be brewing\nI'll get warm, piping hot\nheaven's sakes! is that a spot\nclean it up! we want the company impressed\nwe've got a lot to do, is it one lump or two\nfor you our guest, she's our guest,\nshe's our guest, she's our guest\nbe our guest! be our guest! be our guest Be our guest! be our guest!\nour command is your request\nit's been years since we've had anybody here\nand were obsessed, with you meal, with your ease\nyes, indeed, we aim to please\nwhile the candlelight's still glowing\nlet us help you, well keep going\ncourse by course, one by one\ntill you shout, enough I'm done\nthen well sing you off to sleep as you digest\ntonight you'll prop your feet up, but for now, let's eat up Be our guest! Be our guest! Be our guest!\nPlease, be our guest! Yo ho, yo ho, a pirate's life for me\nWe pillage, we plunder, we rifle, and loot\nDrink up, me 'earties, yo ho\nWe kidnap and ravage and don't give a hoot\nDrink up me 'earties, yo ho Yo ho, yo ho, a pirate's life for me\nWe extort, we pilfer, we filch, and sack\nDrink up, me 'earties, yo ho\nMaraud and embezzle, and even high-jack\nDrink up, me 'earties, yo ho Yo ho, yo ho, a pirate's life for me\nWe kindle and char, inflame and ignite\nDrink up, me 'earties, yo ho\nWe burn up the city, we're really a fright\nDrink up, me 'earties, yo ho We're rascals, scoundrels, villans, and knaves\nDrink up, me 'earties, yo ho\nWe're devils and black sheep, really bad eggs\nDrink up, me 'earties, yo ho Yo ho, yo ho, a pirate's life for me\nWe're beggars and blighters, ne'er-do-well cads\nDrink up, me 'earties, yo ho\nAye, but we're loved by our mommies and dads\nDrink up, me 'earties, yo ho Let it snow, let it snow, let it snow Oh, the weather outside is frightful\nBut the fire is so delightful\nAnd since we've no place to go\nLet it snow, let it snow, let it snow It doesn't show signs of stopping\nAnd I've bought some corn for popping\nThe lights are turned way down low\nLet it snow, let it snow, let it snow When we finally kiss goodnight\nHow I hate going out in the storm\nBut if you'll really hold me tight\nAll the way home I'll be warm The fire is slowly dying\nAnd my dear, we're still goodbying\nBut as long as you love me so\nLet it snow, let it snow, let it snow Let it snow, let it snow, let it snow When we finally kiss goodnight\nHow I hate going out in the storm\nBut if you'll really hold me tight\nAll the way home I'll be warm The fire is slowly dying\nAnd, my dear, we're still goodbying\nBut as long as you love me so\nLet it snow, let it snow, let it snow As long as you love me so\nLet it snow, let it snow, let it snow\nLet it snow, let it snow, let it snow\nLet it snow, let it snow Salagadoola mechicka boola\nBibbidi-bobbidi-boo\nPut them together and what have you got\nBibbidi-bobbidi-boo Salagadoola mechicka boola\nBibbidi-bobbidi-boo\nIt'll do magic, believe it or not\nBibbidi-bobbidi-boo Yes, salagadoola means\nMechicka booleroo\nBut the thing mabob that does the job\nIs bibbidi-bobbidi-boo I got myself a notion\nAnd one I know that you'll understand\nTo set the world in motion by reaching out for each other's hand Maybe we'll discover\nWhat we shoulda known all along\nOne way or another, together's where we both belong If we listen to each other's heart\nWe'll find we're never too far apart\nAnd maybe love is a reason why\nFor the first time ever, we're seein' it eye to eye If a wall should come between us\nToo high to climb, too hard to break through\nI know that love'll lead us\nAnd find a way to bring me to you So don't be in a hurry\nThink before you count us out\nYou don't have to worry\nI won't ever let you drown\n(Nothing's gonna stop us now) If we listen to each other's heart\nWe'll find we're never too far apart\nAnd maybe love is a reason why\nFor the first time ever, we're seein' it eye to eye Love is why we're seein' it eye to eye\n(Yes, we are seein' it eye to eye)\nSeein' it eye to\n(Love is why we're seein' it)\nI think we're seein' it eye to eye\n(eye to eye)\neye to\nWe're seein' it eye to eye\n(eye to eye!) If you're ever lonely, then stop!\nYou don't have to be\nAfter all, it's only a beat away from you to me\n(Take a look inside and see) If we listen to each other's heart\nWe'll find we're never too far apart\nAnd maybe love is a reason why\nFor the first time ever, we're seein' it eye to eye Seein' it eye to\nSeein' it eye to eye\nWe're seein' it eye to eye, baby\nFor the first time\nFor the first time eye to eye\nSeein't it\nSeein' it, baby\nSeein' it eye to\nFor the first time ever\nHey yeah\nSeein' it, baby\nWe're seein' it eye to eye\nSeein' it\n(C'mon, baby) eye to eye\neye to eye\neye to eye\neye to eye\nYeah\neye to eye! Now I'm the king of the swingers oh\nThe jungle VIP\nI've reached the top and had to stop\nAnd that's what botherin' me\nI wanna be a man, mancub\nAnd stroll right into town\nAnd be just like the other men\nI'm tired of monkeyin' around Oh, oobee doo\nI wanna be like you\nI wanna walk like you\nTalk like you, too\nYou'll see it's true\nAn ape like me\nCan learn to be humen too (Gee, cousin Louie\nYou're doin' real good) Now here's your part of the deal, 'cause\nLay the secret on me of man's red fire But I don't know how to make fire Now don't try to kid me, mancub\nI made a deal with you\nWhat I desire is man's red fire\nTo make my dream come true\nNow, give me the secret, mancub\nC'mon, clue me what to do\nGive me the power of man's red flower\nSo I can be like you You\nI wanna be like you\nI wanna walk like you\nTalk like you, too\nYou'll see it's true\nSomeone like me\nCan learn to be\nLike someone like me\nCan learn to be\nLike someone like you\nCan learn to be\nLike someone like me Cruella De Vil\nCruella De Vil\nIf she doesn't scare you\nNo evil thing will\nTo see her is to\nTake a sudden chill\nCruella, Cruella\nShe's like a spider waiting\nFor the kill\nLook out for Cruella De Vil At first you think\nCruella is the devil\nBut after time has worn\nAway the shock\nYou come to realize\nYou've seen her kind of eyes\nWatching you from underneath\nA rock! This vampire bat\nThis inhuman beast\nShe ought to be locked up\nAnd never released\nThe world was such\nA wholesome place until\nCruella, Cruella De Vil Look for the bare necessities\nThe simple bare necessities\nForget about your worries and your strife\nI mean the bare necessities\nOld Mother Nature's recipes\nThat brings the bare necessities of life Wherever I wander, wherever I roam\nI couldn't be fonder of my big home\nThe bees are buzzin' in the tree\nTo make some honey just for me\nWhen you look under the rocks and plants\nAnd take a glance at the fancy ants\nThen maybe try a few The bare necessities of life will come to you\nThey'll come to you! Look for the bare necessities\nThe simple bare necessities\nForget about your worries and your strife\nI mean the bare necessities\nThat's why a bear can rest at ease\nWith just the bare necessities of life Now when you pick a pawpaw\nOr a prickly pear\nAnd you prick a raw paw\nWell next time beware\nDon't pick the prickly pear by the paw\nWhen you pick a pear\nTry to use the claw\nBut you don't need to use the claw\nWhen you pick a pear of the big pawpaw\nHave I given you a clue? The bare necessities of life will come to you\nThey'll come to you! Oh man this is really living\nSo just try and relax, yeah cool it\nFall apart in my backyard\n'Cause let me tell you something little britches\nIf you act like that bee acts, uh uh\nYou're working too hard And don't spend your time lookin' around\nFor something you want that can't be found\nWhen you find out you can live without it\nAnd go along not thinkin' about it\nI'll tell you something true The bare necessities of life will come to you Look for the bare necessities\nThe simple bare necessities\nForget about your worries and your strife\nI mean the bare necessities\nThat's why a bear can rest at ease\nWith just the bare necessities of life\nWith just the bare necessities of life I see what's happening yeah\nYou're face to face with greatness and it's strange\nYou don't even know how you feel\nIt's adorable!\nWell, it's nice to see that humans never change\nOpen your eyes, let's begin\nYes, it's really me\nIt's Maui, breathe it in\nI know its a lot; the hair, the bod\nWhen you're staring at a demigod What can I say except you're welcome\nFor the tides, the sun, the sky\nHey, it's okay, it's okay, you're welcome\nI'm just an ordinary demi-guy Hey!\nWhat has two thumbs and pulled up the sky\nWhen you were waddling yay high\nThis guy!\nWhen the nights got cold\nWho stole you fire from down below\nYou're lookin' at him, yo\nOh!\nAlso I lassoed the sun\n(You're welcome)\nTo stretch your days and bring you fun\nAlso I harnessed the breeze\n(You're welcome!)\nTo fill your sails and shake your trees So what can I say except you're welcome\nFor the islands I pulled from the sea\nThere's no need to pray, it's okay, you're welcome\nHa! I guess it's just my way of being me\nYou're welcome!\nYou're welcome! Well, come to think of it\nKid, honestly I could go on and on\nI could explain every natural phenomenon\nThe tide? The grass? The ground?\nOh, that was Maui just messin' around\nI killed an eel, I buried its guts\nSprouted a tree, now you've got coconuts\nWhat's the lesson? What is the takeaway?\nDon't mess with Maui when he's on the breakaway And the tapestry here on my skin\nIs a map of the victories I win\nLook where I've been\nI make everything happen\nLook at that mean mini Maui just tickety-tappin'\nHeh, heh, heh ,heh, heh, heh, hey! Well anyway, let me say you're welcome (You're welcome)\nFor the wonderful world you know\nHey, it's okay, it's okay, you're welcome (You're welcome)\nWell, come to think of it, I gotta go!\nHey, it's your day to say you're welcome (You're welcome)\n'Cause I'm gonna need that boat\nI'm sailing away, away, you're welcome (You're welcome)\n'Cause Maui can do anything but float! (You're welcome)\nYou're welcome! (You're welcome)\nYou're welcome! And thank you! Who's the leader of the club,\nThat's made for you and me?\nM-I-C-K-E-Y M-O-U-S-E! Hey there! Hi there! Ho there!\nYour as welcome as can be,\nM-I-C-K-E-Y M-O-U-S-E! Mickey Mouse!\nDonald Duck!\nMickey Mouse!\nDonald Duck! Forever man has held a banner\nHigh, high, high. high! Come along lets sing the song\nAnd join the jamberre!\nM-I-C-K-E-Y M-O-U-S-E! Mickey Mouse Club!\nMickey Mouse Club! We'll have fun, we'll meet new faces.\nWe'll do things and we'll go places.\nAll around the world were marching. Who's the leader of the club,\nThat's made for you and me?\nM-I-C-K-E-Y M-O-U-S-E! Hey there! Hi there! Ho there!\nYour as welcome as can be,\nM-I-C-K-E-Y M-O-U-S-E! Mickey Mouse!\nDonald Duck!\nMickey Mouse!\nDonald Duck! Forever man has held a banner\nHigh, high, high. high! Come along lets sing the song\nAnd join the jamberre!\nM-I-C-K-E-Y M-O-U-S-E! Yay Mickey!\nYay Mickey!\nYay Mickey Mouse Club! Whale of a Tale\nDisney\n2000 Leagues Under the Sea\nGot a whale of a tale to tell ya, lads\nA whale of a tale or two\n'Bout the flapping fish and girls I've loved\nOn nights like this with the moon above\nA whale of a tale and it's all true,\nI swear by my tatoo\nThere was Mermaid Minnie\nMet her down in Madagascar\nShe would kiss me\nAnytime that I would ask her\nThen one evening\nHer flame of love blew out\nBlow me down and pick me up,\nShe swapped me for a trout!\nGot a whale of a tale to tell ya, lads\nA whale of a tale or two\n'Bout the flapping fish and girls I've loved\nOn nights like this with the moon above\nA whale of a tale and it's all true,\nI swear by my tatoo\nThere was Typhoon Tessie\nMet her on the coast of Java\nWhen we kissed I\nBubbled up like molten lava\nThen she gave me\nThe scare of my young life\nBlow me down, and pick me up\nShe was the captain's wife!\nGot a whale of a tale to tell ya, lads\nA whale of a tale or two\n'Bout the flapping fish and girls I've loved\nOn nights like this with the moon above\nA whale of a tale and it's all true,\nI swear by my tatoo\nThen there was Harpoon Hannah\nHad a face that made you shudder\nLips like Fish hooks\nAnd a nose just like a rudder\nIf I kissed her\nAnd held her tenderly\n(Held her tenderly)\nThere's no sea monster big enough\nTo ever frighten me!\nGot a whale of a tale to tell ya, lads\nA whale of a tale or two\n'Bout the flapping fish and girls I've loved\nOn nights like this with the moon above\nA whale of a tale and it's all true,\nI swear by my tatoo! With tuppence for paper and strings\nYou can have your own set of wings\nWith your feet on the ground\nYou're a bird in a flight\nWith your fist holding tight\nTo the string of your kite Oh, oh, oh!\nLet's go fly a kite\nUp to the highest height!\nLet's go fly a kite and send it soaring\nUp through the atmosphere\nUp where the air is clear\nOh, let's go fly a kite! When you send it flyin' up there\nAll at once you're lighter than air\nYou can dance on the breeze\nOver 'ouses and trees\nWith your first 'olding tight\nTo the string of your kite Oh, oh, oh!\nLet's go fly a kite\nUp to the highest height!\nLet's go fly a kite and send it soaring\nUp through the atmosphere\nUp where the air is clear\nOh, let's go fly a kite! Want to know a secret?\nPromise not to tell?\nWhen you're standing by a wishing well\nMake a wish into the well\nThat's all you have to do\nAnd if you hear it echoing\nYour wish will soon come true I'm wishing for the one I love\nTo find me today\nI'm hoping\nAnd I'm dreaming of\nThe nice things he'll say I'm wishing for the one I love\nTo find me today Today!\nNow that I've found you\nHear what I have to say\nOne Song\nI have but one song\nOne song\nOnly for you One heart\nTenderly beating\nEver entreating\nConstant and true One love\nThat has possessed me\nOne love\nThrilling me through One song\nMy heart keeps singing\nOf one love\nOnly for you Let's get down to business, to defeat the Huns\nDid they send me daughters, when I asked for sons?\nYou're the saddest bunch I ever met\nBut you can bet before we're through\nMister, I'll make a man out of you Tranquil as a forest but on fire within\nOnce you find your center, you are sure to win\nYou're a spineless, pale, pathetic lot\nAnd you haven't got a clue\nSomehow I'll make a man out of you I'm never gonna catch my breath\nSay goodbye to those who knew me\nBoy, was I a fool in school for cutting gym\nThis guy's got 'em scared to death\nHope he doesn't see right through me\nNow I really wish that I knew how to swim Be a man\nWe must be swift as the coursing river\nBe a man\nWith all the force of a great typhoon\nBe a man\nWith all the strength of a raging fire\nMysterious as the dark side of the moon Time is racing toward us till the Huns arrive\nHeed my every order and you might survive\nYou're unsuited for the rage of war\nSo pack up, go home, you're through\nHow could I make a man out of you? Be a man\nWe must be swift as the coursing river\nBe a man\nWith all the force of a great typhoon\nBe a man\nWith all the strength of a raging fire\nMysterious as the dark side of the moon Be a man\nWe must be swift as the coursing river\nBe a man\nWith all the force of a great typhoon\nBe a man\nWith all the strength of a raging fire\nMysterious as the dark side of the moon (Ou mata e matagi)\nI have crossed the horizon to find you\n(Ou loto mamaina toa)\nI know your name\n(Manatu atu)\nThey have stolen the heart from inside you\n(Taku pelepele)\nBut this does not define you\n(Manatu atu)\nThis is not who you are\nYou know who you are Look at this stuff, isn't it neat?\nWouldn't you think my collection's complete?\nWouldn't you think I'm the girl\nThe girl who has everything?\nLook at this trove, treasures untold\nHow many wonders can one cavern hold?\nLookin' around here you'd think\nSure, she's got everything I've got gadgets and gizmos aplenty\nI've got whooz-its and whatz-its galore\nYou want thingamabobs?\nI got twenty\nBut who cares?\nNo big deal\nI want more I want to be where the people are\nI want to see\nWant to see 'em dancing\nWalking around on those\nWhadd'ya call 'em? Oh, feet\nFlipping your fins you don't get too far\nLegs are required for jumping, dancing\nStrolling along down a\nWhat's that word again? Street Up where they walk\nUp where they run\nUp where they stay all day in the sun\nWandering free\nWish I could be\nPart of that world What would I give\nIf I could live\nOut of these waters?\nWhat would I pay\nTo spend a day\nWarm on the sand?\nBetcha on land\nThey understand\nThat they don't reprimand their daughters\nBright young women\nSick of swimming\nReady to stand And I'm ready to know what the people know\nAsk 'em my questions\nAnd get some answers\nWhat's a fire and why does it\nWhat's the word? Burn? When's it my turn?\nWouldn't I love\nLove to explore that shore above?\nOut of the sea\nWish I could be\nPart of that world\nOut of the sea\nWish I could be\nPart of your world When somebody loved me\nEverything was beautiful\nEvery hour we spent together\nLives within my heart And when she was sad\nI was there to dry her tears\nAnd when she was happy so was I\nWhen she loved me Through the summer and the fall\nWe had each other that was all\nJust she and I together\nLike it was meant to be And when she was lonely\nI was there to comfort her\nAnd I knew that she loved me So the years went by\nI stayed the same\nShe began to drift away\nI was left alone Still I waited for the day\nWhen she'd say\nI will always love you Lonely and forgotten\nNever thought she'd look my way\nShe smiled at me and held me Just like she used to do\nLike she loved me\nWhen she loved me When somebody loved me\nEverything was beautiful\nEvery hour we spent together\nLives within my heart When she loved me, hey All those days watching from the windows\nAll those years outside looking in\nAll that time never even knowing\nJust how blind I've been\nNow I'm here blinking in the starlight\nNow I'm here suddenly I see\nStanding here it's all so clear\nI'm where I'm meant to be And at last I see the light\nAnd it's like the fog has lifted\nAnd at last I see the light\nAnd it's like the sky is new\nAnd it's warm and real and bright\nAnd the world has somehow shifted\nAll at once everything looks different\nNow that I see you All those days chasing down a daydream\nAll those years living in a blur\nAll that time never truly seeing\nThings, the way they were\nNow she's here shining in the starlight\nNow she's here suddenly I know\nIf she's here it's crystal clear\nI'm where I'm meant to go And at last I see the light And it's like the fog has lifted And at last I see the light And it's like the sky is new And it's warm and real and bright\nAnd the world has somehow shifted\nAll at once everything is different\nNow that I see you Now that I see you Wonderful, marvelous,\nThe man with the strength of many.\nBrave and bold with a heart of gold,\nJohn Henry was a mighty man.\nBorn with a hammer right in his hand. Well, come on John,\nYou're the one that we're depending on.\nSwing that hammer with all your might,\nI know you can show them how to do it right. John Henry, John Henry,\nJohn Henry is a mighty man.\nBorn with a hammer,\nA ten pound hammer,\nA twenty pound hammer right in his hand. I heard John's mama liked to sew at night,\nSo he pulled down the moon for a little bit of light.\nIt took a lot of cooking to keep John fed,\nTen dozen eggs and eight loves of bread. John Henry, John Henry,\nJohn Henry is a mighty man.\nBorn with a hammer, born with a hammer,\nBorn with a hammer right in his hand. He plowed the earth so wide and deep,\nThe seed he sowed the ground had to keep.\nHis hammer hit the earth with such a mighty blow,\nEverything he planted would jump up and grow. John Henry, John Henry,\nJohn Henry is a powerful man.\nAll our lives we've been so poor,\nJohn let me show you what we're working for. Canaan Land, Canaan Land,\nEverybody workin' for the Canaan Land.\nHome and freedom hand in hand,\nWorkin' for the Canaan Land. Oh, movin' on down the line,\nJohn Henry came right on time.\nMovin' on down the line,\nJohn Henry came right on time. Home and freedom hand in hand,\nWorkin' for the Canaan Land. John Henry, John Henry,\nJohn Henry is a mighty man.\nJohn Henry, John Henry,\nJohn Henry is a powerful man. Steel drivin' man,\nJohn Henry was.\nSteel drivin' man,\nOh yes he was.\nSo proud, defiant,\nA gentle giant,\nA steel drivin' natural man. Well, a-come on, John,\nYou're the one that we're dependin' on.\nSwing that hammer, bust that rock,\nCome on, give it everything you've got. John Henry, John Henry,\nJohn Henry is a mighty man.\nBorn with a hammer, born with a hammer,\nBorn with a hammer right in his hand. John and the drill made the valley shake,\nRumblin' almost caused an earthquake.\nThunder and lightning was everywhere,\nOh, Lord, that battle was beyond compare. John Henry, John Henry,\nJohn Henry is a powerful man.\nBorn with a hammer, born with a hammer,\nBorn with a hammer right in his hand. No mountain too high,\nNo valley too low,\nNo river too wide,\nNo place too far to go. If you believe, you can overcome.\nThe battle is already won,\nJust keep on keepin' on.\nStand tall, just like John. No matter who you are,\nNo matter where you're from,\nNothing can stop you,\nFrom who you can become. Just keep on keepin' on,\nStand tall, just like John.\nJust keep on keepin' on,\nStand tall, just like John. No matter who you are,\nNo matter where you're from,\nNothing can stop you,\nFrom who you can become. Just keep on keepin' on,\nStand tall, just like John.\nJust keep on keepin' on,\nStand tall, just like John.\nJust keep on keepin' on,\nStand tall, just like John.\nJust keep on keepin' on,\nStand tall, just like John. Do you wanna build a snowman?\nCome on let's go and play\nI never see you anymore\nCome out the door\nIt's like you've gone away We used to be best buddies\nAnd now we're not\nI wish you would tell me why!\nDo you wanna build a snowman?\nIt doesn't have to be a snowman\nOkay, bye Do you wanna build a snowman?\nOr ride our bikes around the halls\nI think some company is overdue\nI've started talking to the\nPictures on the walls It gets a little lonely\nAll these empty rooms (these empty rooms)\nJust watching the hours\nTick by Do you wanna build a snowman?\n(Do you wanna build a snowman?)\nIt doesn't have to be a snowman\n(Don't have to be a snowman)\nOkay, bye Elsa?\nPlease, I know you're in there (I know you're in there)\nPeople are asking where you've been\nThey say, \"have courage\" and I'm trying to (trying to)\nI'm right out here for you (here for you)\nJust let me in (just let me in) We only have each other (we only have each other)\nIt's just you and me\nWhat are we gonna do? (what are we gonna do?) Do you wanna build a snowman?\nDo you wanna build a snowman?\nIt doesn't have to be a snowman\nOkay, bye Some day my prince will come\nSome day we'll meet again\nAnd away to his castle we'll go\nTo be happy forever I know Some day when spring is here\nWe'll find a love anew\nAnd the birds will sing\nAnd wedding bells will ring\nSomeday when my dreams come true La la lu, La la lu\nOh, my little star sweeper\nI'll sweep the stardust for you La la lu, La la lu\nLittle soft fluffy sleeper\nHere comes a pink cloud for you La la lu, La la lu\nLittle wandering angel\nFold up your wings close your eyes La la lu, La la lu\nAnd may love be your keeper\nLa la lu, La la lu, La la lu Hakuna Matata!\nWhat a wonderful phrase\nHakuna Matata!\nAin't no passing craze It means no worries\nFor the rest of your days\nIt's our problem-free philosophy\nHakuna Matata! Why, when he was a young warthog\nWhen I was a young wart-hoooog!\nVery nice!\nThanks!\nHe found his aroma lacked a certain appeal\nHe could clear the Savannah after every meal\nI'm a sensitive soul, though I seem thick-skinned\nAnd it hurt that my friends never stood downwind\nAnd oh, the shame\n(He was ashamed!)\nThought of changin' my name\n(Oh, what's in a name?)\nAnd I got downhearted\n(How did you feel?)\nEvery time that I-\nPumbaa! Not in front of the kids!\nOh... sorry Hakuna Matata!\nWhat a wonderful phrase\nHakuna Matata!\nAin't no passing craze\nIt means no worries\nFor the rest of your days\nYeah, sing it, kid!\nIt's our problem-free philosophy\nHakuna Matata! Hakuna Matata\nHakuna Matata\nHakuna Matata\nHakuna It means no worries\nFor the rest of your days\nIt's our problem-free philosophy\nHakuna Matata From the day we arrive on the planet\nAnd blinking, step into the sun\nThere's more to see than can ever be seen\nMore to do than can ever be done There's far too much to take in here\nMore to find than can ever be found\nBut the sun rolling high\nThrough the sapphire sky\nKeeps great and small on the endless round It's the circle of life\nAnd it moves us all\nThrough despair and hope\nThrough faith and love Till we find our place\nOn the path unwinding\nIn the circle\nThe circle of life It's the circle of life\nAnd it moves us all\nThrough despair and hope\nThrough faith and love Till we find our place\nOn the path unwinding\nIn the circle\nThe circle of life Mmmmmm\nMmmmmm\nSo this is love\nMmmmmm\nSo this is love\nSo this is what makes life divine\nI'm all aglow, mmmmmm\nAnd now I know\nAnd now I know\nThe key to all heaven is mine\nMy heart has wings, mmmmmm\nAnd I can fly\nI'll touch every star in the sky\nSo this is the miracle\nThat I've been dreaming of\nMmmmmm\nMmmmmm\nSo this is love Oh, this is the night, it's a beautiful night\nAnd we call it bella notte\nLook at the skies, they have stars in their eyes\nOn this lovely bella notte Side by side with your loved one\nYou'll find enchantment here\nThe night will weave its magic spell\nWhen the one you love is near For this is the night, and the heavens are right\nOn this lovely bella notte This is the night, it's a beautiful night\nAnd we call it bella notte\nLook at the skies, they have stars in their eyes\nOn this lovely bella notte Side by side with your loved one\nYou'll find enchantment here\nThe night will weave its magic spell\nWhen the one you love is near For this is the night, and the heavens are right\nOn this lovely bella notte Look at me,\nI will never pass for a perfect bride, or a perfect daughter.\nCan it be,\nI'm not meant to play this part?\nNow I see, that if I were truly to be myself,\nI would break my family's heart. Who is that girl I see, staring straight back at me?\nWhy is my reflection someone I don't know?\nSomehow I cannot hide\nWho I am, though I've tried.\nWhen will my reflection show, who I am, inside?\nHow I pray, that a time will come,\nI can free myself, from their expectations\nOn that day, I'll discover someway to be myself, and to make my family proud.\nThey want a docile lamb,\nNo-one knows who I am.\nMust there be a secret me,\nI'm forced to hide?\nMust I pretend that I am someone else for all time?\nWhen will my reflection show, who I am inside?\nWhen will my reflection show, who I am inside? I know you\nI walked with you\nOnce Upon a dream\nI know you That gleam in your eyes\nIs so familiar a gleam\nYet I know it's true\nThat visions are seldom what they seem But if I know you\nI know what you do\nYou'll love me at once\nThe way you did once upon a dream La da la da la ahahahahah\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream\nI know you\nI walked with you\nOnce Upon a dream I know you\nThat gleam in your eyes\nIs so familiar a gleam\nYet I know it's true That visions are seldom what they seem\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream There's a calm surrender to the rush of day\nWhen the heat of the rolling world can be turned away\nAn enchanted moment and it sees me through\nIt's enough for this restless warrior just to be with you And can you feel the love tonight? It is where we are\nIt's enough for this wide-eyed wanderer that we got this far\nAnd can you feel the love tonight? How it's laid to rest\nIt's enough to make kings and vagabonds believe the very best There's a time for everyone, if they only learn\nThat the twisting kaleidoscope moves us all in turn\nThere's a rhyme and reason to the wild outdoors\nWhen the heart of this star-crossed voyager beats in time with yours And can you feel the love tonight? It is where we are\nIt's enough for this wide-eyed wanderer that we got this far\nAnd can you feel the love tonight? How it's laid to rest\nIt's enough to make kings and vagabonds believe the very best\nIt's enough to make kings and vagabonds believe the very best Ou mata e matagi\nOu loto mamaina toa\nManatu atu\nTaku pelepele Pa mai to mafanafanaga\nSaolotoga tenei\nManatunatu\nKi tamafine Maua ai te lumanai\nKi tamafine\nOu mata e matagi Ke manatua\nFaiga iena\nte luelue\nte malohi\nina hiva\nKe manatua\nFaiga iena\nte fiafia te malie\nina hiva\nHau la ke ta o\nkuku mai to lima\nHau la ke ta o\nhau ta hihiva\nHau la ke ta o\nkuku mai to lima\nHau la ke ta o\nko koe taku pele e fofou ai au\nTamilomilo fakatamilomilo\ne kua ninimo toku ulu\nTamilomilo fakatamilomilo\ntoe fai toe fai ke manino\nKatakata mai\nhihiva mai\nfakalogo ki te pate aue\nma te luelue Translation: Do you remember\nwhen we did all that\nthe sway\nthe energy\nexpressed in the dance\nDo you remember\nwhen we did all that\nthe joy and sweetness\nexpressed in the dance\ncome on let's go\ngive me your hand\ncome on let's go\ncome let's dance\ncome on let's go\ngive me your hand\ncome on let's go\nyou are the only one that I wish for\nRound and round spinning round\nhey my head's getting dizzy\nRound and round spinning round\ndo it again it'll soon clear\nSmile my way\nkeep on dancing\nlisten to the pate\nand keep on swaying Chief Tui:\nMoana, make way, make way!\nMoana, it's time you knew\nThe village of Motunui is\nAll you need The dancers are practising\nThey dance to an ancient song\n(Who needs a new song? This old one's all we need) This tradition is our mission\nAnd Moana, there's so much to do\n(Make way!)\nDon't trip on the taro root\nThat's all you need We share everything we make\n(We make)\nWe joke and we weave our baskets\n(Aha!)\nThe fishermen come back from the sea Moana:\nI wanna see Chief Tui:\nDon't walk away\nMoana, stay on the ground now\nOur people will need a chief\nAnd there you are Chief Tui and Sina:\nThere comes a day\nWhen you're gonna look around\nAnd realize happiness is\nWhere you are Chief Tui:\nConsider the coconut\n(The what?)\nConsider its tree\nWe use each part of the coconut\nThat's all we need Sina:\nWe make our nets from the fibers\n(We make our nets from the fibers)\nThe water is sweet inside\n(The water is sweet inside)\nWe use the leaves to build fires\n(We use the leaves to build fires)\nWe cook up the meat inside\n(We cook up the meat inside) Chief Tui:\nConsider the coconuts\n(Consider the coconuts)\nThe trunks and the leaves\n(Ha!)\nThe island gives us what we need Moana:\nAnd no one leaves Chief Tui:\nThat's right, we stay\nWe're safe and we're well provided\nAnd when we look to the future\nThere you are You'll be okay\nIn time you'll learn just as I did\nChief Tui and Sina:\nYou must find happiness right\nWhere you are Gramma Tala:\nI like to dance with the water\nThe undertow and the waves\nThe water is mischievous\nHa! I like how it misbehaves The village may think I'm crazy\nOr say that I drift too far\nBut once you know what you like, well\nThere you are You are your father's daughter\nStubbornness and pride\nMind what he says but remember\nYou may hear a voice inside\nAnd if the voice starts to whisper\nTo follow the farthest star\nMoana, that voice inside is\nWho you are Villagers:\nWe make our nets from the fibers\n(We weave our nets from the fibers)\nThe water is sweet inside\n(And we taste the sweet inside)\nWe use the leaves to build fires\n(We sing these songs in our choir)\nWe cook up the meat inside\n(We have mouths to feed inside) Chief Tui:\nThe village believes in us\n(That's right!)\nThe village believes\n(Ha!)\nThe island gives us what we need\nAnd no one leaves Moana:\nSo here I'll stay\nMy home, my people beside me\nAnd when I think of tomorrow\nThere we are I'll lead the way\nI'll have my people to guide me\nWe'll build our future together\nVillagers: Where we are Moana:\n'Cause every path leads you back to\nVillagers:\nWhere you are\nMoana:\nYou can find happiness right -\nVillagers:\nWhere you are\nWhere you are Don't you disrespect me little man\nDon't you derogate or deride\nYou're in my world now, not your world\nAnd I got friends on the other side\n(He's got friends on the other side) That's and echo gentleman\nJust a little something we have here in Louisiana\nA little parlor trick, don't worry Sit down at my table, put your mind at ease\nIf you relax it will enable me to do anything I please\nI can read your future, I can change it 'round some too\nI look deep into your heart and soul You do have have a soul, don't you, Lawrence?\nMake your wildest dreams come true I got voodoo, I got voodoo\nI got things I didn't even try\nAnd I got friends on the other side\n(He's got friends on the other side) The cards, the cards, the cards will tell\nThe past, the present and the future as well\nThe cards, the cards, just take three\nTake a little trip into your future with me Now you, young man are from across the sea\nYou come from two long lines of royalty\nI'm a royal myself on my mother's side\nYour lifestyle's high but your funds are low\nYou need to marry a little honey who's daddy got dough Mom and dad cut you off, huh, playboy?\nNow y'all better get hitched but hitching ties you down\nYou just want to be free, hop from place to place\nBut freedom takes green It's the green, it's the green, it's the green you need\nAnd when I look into your future it's the green that I see On you little man, I don't want wanna waste much time\nYou've been pushed 'round all your life\nYou've been pushed 'round by your mother\nAnd your sister and your brother\nAnd if you was married you'd be pushed 'round by your wife But in your future the you I see\nIs exactly the man you always wanted to be\nShake my hand, come on boys\nWon't you shake a poor sinner's hand? Yes, are you ready?\n(Are you ready?)\nAre you ready? Transformation central\nTransformation central\nTransformation central\nTransformation central Transformafication central, can you feel it?\nYou're changing, you're changing, you're changing alright\nI hope you're satisfied, but if you ain't don't blame me\nYou can blame my friends on the other side You got what you wanted\nWhat you want's what you get\nHush Ain't it a glorious day?\nRight as a mornin' in May\nI feel like I could fly\nHave you ever seen the grass\nSo green, or a bluer sky? Oh it's a jolly 'oliday with Mary\nMary makes your 'eart so light!\nWhen the day is gray and ordianry\nMary makes the sun shine bright!\nOh, 'appiness is bloomin' all around 'er\nThe daffodils are smilin' at the dove\nWhen Mary 'olds your 'and\nYou feel so grand\nYour 'eart starts beatin' like\nA big brass band!\nIt's a jolly 'oliday with Mary\nNo wonder that it's Mary that we love! Oh it's a jolly 'oliday with Mary\nMary makes your 'eart so light!\nWhen the day is gray and ordianry\nMary makes the sun shine bright!\nOh, 'appiness is bloomin' all around 'er\nThe daffodils are smilin' at the dove\nWhen Mary 'olds your 'and\nYou feel so grand\nYour 'eart starts beatin' like\nA big brass band!\nIt's a jolly 'oliday with Mary\nNo wonder that it's Mary that we love! Oh, it's a jolly holiday with you, Bert\nGentlemen like you are few\nThough you're just a diamond\nIn the rough, Bert\nUnderneath your blood is blue!\nYou'd never think of pressing\nYour advantage\nForbearance is the hallmark of your creed\nA lady needn't fear when you are near\nYour sweet gentility is crystal clear!\nOh, it's a jolly holiday with you, Bert\nA jolly, jolly holiday with you! It's true that Mavis and Sybil 'ave\nWays that are winning\nAnd Prudence and Gwendolyn\nSet your 'eart spinning\nPhoebe's delightful, Maude is disarming\nJanice, Felicia, Lydia--charming\nCynthia's dashing, Vivian's sweet\nStephanie's smashing, Priscilla a treat\nVeronica, Millicent, Agnes, and Jane\nConvivial company, time and again\nDorcas and Phyllis and Glynis are sorts\nI'll agree are three jolly good sports\nBut cream of the crop, tip of the top\nIt's Mary Poppins, and there we stop! If I never knew you\nIf I never felt this love\nI would have no inkling of\nHow precious life can be And if I never knew you\nI would never have a clue\nHow at last I'd find in you\nThe missing part of me In this world so full of fear\nFull of rage and lies\nI can see the truth so clear\nIn your eyes\nSo dry your eyes And I'm so grateful to you\nI'd have lived my whole life through\nLost forever\nIf I never knew you If I never knew you\nI'd be safe but half as real\nNever knowing I could feel\nA love so strong and true I'm so grateful to you\nI'd have lived my whole life through\nLost forever\nIf I never knew you I thought our love would be so beautiful Somehow we made the whole world bright I never knew that fear and hate could be so strong\nAll they'd leave us were these whispers in the night\nBut still my heart is singing\nWe were right If I never knew you\nThere's no moment I regret\nIf I never knew this love\nI would have no inkling of\nIf our time has gone too fast I've lived at last\nHow precious life can be I thought our love would be so beautiful\nSomehow we'd make the whole world bright I thought our love would be so beautiful\nWe'd turn the darkness into light And still my heart is singing\nWe were right We were right\nAnd if I never knew you\nI'd have lived my whole life through Empty as the sky Never knowing why\nLost forever I know you\nI walked with you\nOnce Upon a dream\nI know you That gleam in your eyes\nIs so familiar a gleam\nYet I know it's true\nThat visions are seldom what they seem But if I know you\nI know what you do\nYou'll love me at once\nThe way you did once upon a dream La da la da la ahahahahah\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream\nI know you\nI walked with you\nOnce Upon a dream I know you\nThat gleam in your eyes\nIs so familiar a gleam\nYet I know it's true That visions are seldom what they seem\nBut if I know you\nI know what you do\nYou'll love me at once The way you did once upon a dream You've got a friend in me\nYou've got a friend in me\nWhen the road looks rough ahead\nAnd you're miles and miles\nFrom your nice warm bed\nJust remember what your old pal said\nBoy, you've got a friend in me\nYou've got a friend in me\nYou've got a friend in me\nYou've got a friend in me\nYou've got troubles, well I've got 'em too\nThere isn't anything I wouldn't do for you\nWe stick together and we see it through\nYou've got a friend in me\nYou've got a friend in me Some other folks might be\nA little bit smarter than I am\nBigger and stronger too\nMaybe\nBut none of them will ever love you the way I do\nIt's me and you\nAnd as the years go by\nBoys, our friendship will never die\nYou're gonna see\nIt's our destiny\nYou've got a friend in me\nYou've got a friend in me\nYou've got a friend in me I have often dreamed of a far off place\nWhere a hero's welcome would be waiting for me\nWhere the crowds would cheer, when they see my face\nAnd a voice keeps saying this is where I'm meant to be I'll be there someday, I can go the distance\nI will find my way if I can be strong\nI know every mile would be worth my while\nWhen I go the distance, I'll be right where I belong Down an unknown road to embrace my fate\nThough that road may wander, it will lead me to you\nAnd a thousand years would be worth the wait\nIt might take a lifetime but somehow I'll see it through And I won't look back, I can go the distance\nAnd I'll stay on track, no I won't accept defeat\nIt's an uphill slope\nBut I won't loose hope, 'till I go the distance\nAnd my journey is complete, oh yeah But to look beyond the glory is the hardest part\nFor a hero's strength is measured by his heart, oh Like a shooting star, I will go the distance\nI will search the world, I will face its harms\nI don't care how far, I can go the distance\n'Till I find my hero's welcome waiting in your arms I will search the world, I will face its harms\n'Till I find my hero's welcome waiting in your arms I've got no strings\nTo hold me down\nTo make me fret\nOr make me frown\nI had strings\nBut now I'm free\nThere are no strings on me Hi-ho the me-ri-o\nThat's the only way to go\nI want the world to know\nNothing ever worries me I've got no strings\nSo I have fun\nI'm not tied up to anyone\nThey've got strings\nBut you can see\nThere are no strings on me You have no strings\nYour arms is free\nTo love me by the Zuider Zee\nYa, ya, ya\nIf you would woo\nI'd bust my strings for you You've got no strings\nComme ci comme ca\nYour savoire-faire is ooh la la\nI've got strings\nBut entre nous\nI'd cut my strings for you Down where the Volga flows\nThere's a Russian rendezvous\nWhere me and Ivan go\nBut I'd rather go with you, hey There are no strings on me Come stop your crying\nIt will be alright\nJust take my hand\nAnd hold it tight I will protect you\nFrom all around you\nI will be here\nDon't you cry For one so small,\nYou seem so strong\nMy arms will hold you,\nKeep you safe and warm\nThis bond between us\nCan't be broken\nI will be here\nDon't you cry 'Cause you'll be in my heart\nYes, you'll be in my heart\nFrom this day on\nNow and forever more\nYou'll be in my heart\nNo matter what they say\nYou'll be here in my heart\nAlways Why can't they understand the way we feel\nThey just don't trust what they can't explain\nI know we're different, but deep inside us\nWe're not that different at all And you'll be in my heart\nYes you'll be in my heart\nFrom this day on\nNow and forever more Don't listen to them\n'Cause what do they know\nWe need each other, to have, to hold\nThey'll see in time, I know When destiny calls you, you must be strong\nI may not be with you\nBut you got to hold on\nThey'll see in time, I know\nWe'll show them together 'Cause you'll be in my heart\nBelieve me you'll be in my heart\nI'll be there from this day on\nNow and forever more You'll be in my heart\nNo matter what they say\nYou'll be here in my heart\nAlways Always I'll be with you\nI'll be there for you always\nAlways and always Just look over your shoulder\nJust look over your shoulder\nJust look over your shoulder\nI'll be there\nAlways \nLength of data: 2499\n"
]
],
[
[
"Construir modelo LSTM y prepare X e y",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM, Embedding\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences",
"_____no_output_____"
]
],
[
[
"tokenizaciรณn",
"_____no_output_____"
]
],
[
[
"token = Tokenizer()\ntoken.fit_on_texts(data)",
"_____no_output_____"
],
[
"#help(token)",
"_____no_output_____"
]
],
[
[
"Las palabras tokenizadas se pueden ver mediante",
"_____no_output_____"
]
],
[
[
"token.word_index",
"_____no_output_____"
]
],
[
[
"Codifiquemos palabras tokenizadas. Convertirรก datos de texto en tokens numรฉricos.",
"_____no_output_____"
]
],
[
[
"encoded_text = token.texts_to_sequences(data)\n# vocabulary size should be + 1\nvocab_size = len(token.word_counts) + 1",
"_____no_output_____"
]
],
[
[
"Preparar datos de entrenamiento",
"_____no_output_____"
]
],
[
[
"datalist = []\nfor d in encoded_text:\n if len(d)>1:\n for i in range(2, len(d)):\n datalist.append(d[:i])\n print(d[:i])",
"\u001b[1;30;43mSe truncaron las รบltimas lรญneas 5000 del resultado de transmisiรณn.\u001b[0m\n[177, 322, 184, 2, 704, 1, 172, 173, 7, 100, 44]\n[177, 322, 184, 2, 704, 1, 172, 173, 7, 100, 44, 40]\n[177, 322, 184, 2, 704, 1, 172, 173, 7, 100, 44, 40, 6]\n[519, 40]\n[519, 40, 6]\n[519, 40, 6, 3]\n[519, 40, 6, 3, 67]\n[519, 40, 6, 3, 67, 22]\n[519, 40, 6, 3, 67, 22, 1]\n[519, 40, 6, 3, 67, 22, 1, 172]\n[1, 813]\n[1, 813, 172]\n[814, 296]\n[814, 296, 17]\n[814, 296, 17, 395]\n[814, 296, 17, 395, 4]\n[814, 296, 17, 395, 4, 17]\n[5, 325]\n[5, 325, 1]\n[5, 325, 1, 172]\n[125, 109]\n[125, 109, 2]\n[125, 109, 2, 1146]\n[125, 109, 2, 1146, 19]\n[125, 109, 2, 1146, 19, 217]\n[125, 109, 2, 1146, 19, 217, 54]\n[25, 21]\n[25, 21, 1]\n[25, 21, 1, 172]\n[25, 21, 1, 172, 173]\n[25, 21, 1, 172, 173, 7]\n[25, 21, 1, 172, 173, 7, 100]\n[25, 21, 1, 172, 173, 7, 100, 45]\n[25, 21, 1, 172, 173, 7, 100, 45, 29]\n[25, 21, 1, 172, 173, 7, 100, 45, 29, 3]\n[25, 21, 1, 172, 173, 7, 100, 45, 29, 3, 450]\n[25, 21, 1, 172, 173, 7, 100, 45, 29, 3, 450, 2]\n[98, 2]\n[98, 2, 1148]\n[4, 3]\n[4, 3, 2157]\n[4, 3, 2157, 2]\n[4, 3, 2157, 2, 2158]\n[86, 2159]\n[86, 2159, 55]\n[52, 450]\n[52, 450, 1]\n[52, 450, 1, 1148]\n[52, 450, 1, 1148, 637]\n[52, 450, 1, 1148, 637, 115]\n[52, 450, 1, 1148, 637, 115, 1]\n[29, 3]\n[29, 3, 450]\n[29, 3, 450, 2]\n[184, 6]\n[184, 6, 303]\n[184, 6, 303, 1]\n[26, 3]\n[26, 3, 52]\n[26, 3, 52, 145]\n[26, 3, 52, 145, 6]\n[26, 3, 52, 145, 6, 303]\n[26, 3, 52, 145, 6, 303, 1]\n[29, 3]\n[29, 3, 450]\n[29, 3, 450, 2]\n[29, 3, 450, 2, 637]\n[29, 3, 450, 2, 637, 7]\n[29, 3, 450, 2, 637, 7, 1]\n[29, 3, 450, 2, 637, 7, 1, 208]\n[48, 5]\n[48, 5, 2160]\n[48, 5, 2160, 3]\n[48, 5, 2160, 3, 2]\n[48, 5, 2160, 3, 2, 635]\n[48, 5, 2160, 3, 2, 635, 1]\n[48, 5, 2160, 3, 2, 635, 1, 172]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173, 7]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173, 7, 100]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173, 7, 100, 44]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173, 7, 100, 44, 40]\n[48, 5, 2160, 3, 2, 635, 1, 172, 173, 7, 100, 44, 40, 6]\n[519, 40]\n[519, 40, 6]\n[519, 40, 6, 3]\n[519, 40, 6, 3, 60]\n[519, 40, 6, 3, 60, 80]\n[519, 40, 6, 3, 60, 80, 31]\n[519, 40, 6, 3, 60, 80, 31, 15]\n[519, 40, 6, 3, 60, 80, 31, 15, 196]\n[28, 21]\n[28, 21, 184]\n[28, 21, 184, 4]\n[28, 21, 184, 4, 804]\n[28, 21, 184, 4, 804, 300]\n[28, 21, 184, 4, 804, 300, 2161]\n[658, 330]\n[658, 330, 13]\n[658, 330, 13, 16]\n[215, 70]\n[215, 70, 8]\n[215, 70, 8, 186]\n[215, 70, 8, 186, 3]\n[215, 70, 8, 186, 3, 232]\n[215, 70, 8, 186, 3, 232, 77]\n[39, 3]\n[39, 3, 775]\n[39, 3, 775, 32]\n[39, 3, 775, 32, 14]\n[39, 3, 775, 32, 14, 1111]\n[39, 3, 775, 32, 14, 1111, 2164]\n[39, 3, 775, 32, 14, 1111, 2164, 1151]\n[49, 774]\n[49, 774, 102]\n[49, 774, 102, 418]\n[49, 774, 102, 418, 4]\n[49, 774, 102, 418, 4, 52]\n[49, 774, 102, 418, 4, 52, 677]\n[49, 774, 102, 418, 4, 52, 677, 17]\n[49, 774, 102, 418, 4, 52, 677, 17, 55]\n[49, 774, 102, 418, 4, 52, 677, 17, 55, 469]\n[22, 232]\n[22, 232, 3]\n[22, 232, 3, 120]\n[22, 232, 3, 120, 14]\n[22, 232, 3, 120, 14, 146]\n[22, 232, 3, 120, 14, 146, 10]\n[29, 3]\n[29, 3, 94]\n[29, 3, 94, 72]\n[29, 3, 94, 72, 3]\n[29, 3, 94, 72, 3, 19]\n[29, 3, 94, 72, 3, 19, 351]\n[29, 3, 94, 72, 3, 19, 351, 596]\n[4, 53]\n[4, 53, 238]\n[4, 53, 238, 101]\n[4, 53, 238, 101, 2165]\n[4, 53, 238, 101, 2165, 296]\n[75, 186]\n[75, 186, 3]\n[75, 186, 3, 232]\n[75, 186, 3, 232, 83]\n[75, 186, 3, 232, 83, 1]\n[75, 186, 3, 232, 83, 1, 172]\n[75, 186, 3, 232, 83, 1, 172, 173]\n[75, 186, 3, 232, 83, 1, 172, 173, 7]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6, 3]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6, 3, 67]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6, 3, 67, 22]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6, 3, 67, 22, 1]\n[75, 186, 3, 232, 83, 1, 172, 173, 7, 100, 44, 40, 6, 3, 67, 22, 1, 172]\n[1, 813]\n[1, 813, 172]\n[814, 296]\n[814, 296, 17]\n[814, 296, 17, 395]\n[814, 296, 17, 395, 4]\n[814, 296, 17, 395, 4, 17]\n[5, 325]\n[5, 325, 1]\n[5, 325, 1, 172]\n[125, 109]\n[125, 109, 2]\n[125, 109, 2, 1146]\n[125, 109, 2, 1146, 19]\n[125, 109, 2, 1146, 19, 217]\n[125, 109, 2, 1146, 19, 217, 54]\n[25, 21]\n[25, 21, 1]\n[25, 21, 1, 172]\n[25, 21, 1, 172, 173]\n[25, 21, 1, 172, 173, 7]\n[25, 21]\n[25, 21, 1]\n[25, 21, 1, 172]\n[25, 21, 1, 172, 173]\n[25, 21, 1, 172, 173, 7]\n[25, 21, 1, 172, 173, 7, 100]\n[25, 21, 1, 172, 173, 7, 100, 5]\n[25, 21, 1, 172, 173, 7, 100, 5, 51]\n[25, 21, 1, 172, 173, 7, 100, 5, 51, 209]\n[25, 21, 1, 172, 173, 7, 100, 5, 51, 209, 2166]\n[49, 245]\n[49, 245, 6]\n[49, 245, 6, 245]\n[49, 245, 6, 245, 25]\n[49, 245, 6, 245, 25, 2167]\n[49, 245, 6, 245, 25, 2167, 4]\n[49, 245, 6, 245, 25, 2167, 4, 12]\n[3, 52]\n[3, 52, 222]\n[3, 52, 222, 23]\n[3, 52, 222, 23, 69]\n[3, 52, 222, 23, 69, 3]\n[86, 12]\n[86, 12, 427]\n[86, 12, 427, 6]\n[86, 12, 427, 6, 51]\n[86, 12, 427, 6, 51, 14]\n[86, 12, 427, 6, 51, 14, 1104]\n[86, 12, 427, 6, 51, 14, 1104, 42]\n[1076, 17]\n[1076, 17, 155]\n[1076, 17, 155, 154]\n[170, 12]\n[170, 12, 196]\n[12, 391]\n[12, 391, 2169]\n[12, 391, 2169, 9]\n[5, 23]\n[5, 23, 178]\n[5, 23, 178, 2]\n[5, 23, 178, 2, 381]\n[5, 23, 178, 2, 381, 1]\n[5, 23, 178, 2, 381, 1, 768]\n[5, 23, 178, 2, 381, 1, 768, 1]\n[29, 49]\n[29, 49, 564]\n[29, 49, 564, 54]\n[29, 49, 564, 54, 2]\n[29, 49, 564, 54, 2, 1103]\n[29, 49, 564, 54, 2, 1103, 30]\n[29, 49, 564, 54, 2, 1103, 30, 19]\n[29, 49, 564, 54, 2, 1103, 30, 19, 5]\n[29, 49, 564, 54, 2, 1103, 30, 19, 5, 81]\n[29, 49, 564, 54, 2, 1103, 30, 19, 5, 81, 779]\n[29, 49, 564, 54, 2, 1103, 30, 19, 5, 81, 779, 49]\n[22, 1]\n[22, 1, 2171]\n[22, 1, 2171, 1]\n[22, 1, 2171, 1, 151]\n[22, 1, 2171, 1, 151, 1]\n[221, 12]\n[221, 12, 271]\n[221, 12, 271, 12]\n[221, 12, 271, 12, 271]\n[221, 12, 271, 12, 271, 49]\n[37, 21]\n[37, 21, 167]\n[37, 21, 167, 2172]\n[37, 21, 167, 2172, 1101]\n[37, 21, 167, 2172, 1101, 792]\n[30, 123]\n[30, 123, 299]\n[30, 123, 299, 2173]\n[30, 123, 299, 2173, 4]\n[30, 123, 299, 2173, 4, 816]\n[30, 123, 299, 2173, 4, 816, 57]\n[30, 123, 299, 2173, 4, 816, 57, 1]\n[29, 3]\n[29, 3, 157]\n[29, 3, 157, 2174]\n[29, 3, 157, 2174, 638]\n[29, 1]\n[29, 1, 525]\n[29, 1, 525, 35]\n[64, 761]\n[64, 761, 3]\n[64, 761, 3, 129]\n[64, 761, 3, 129, 65]\n[64, 761, 3, 129, 65, 89]\n[49, 469]\n[49, 469, 54]\n[49, 469, 54, 424]\n[1152, 5]\n[1152, 5, 2176]\n[1152, 5, 2176, 1]\n[6, 2177]\n[6, 2177, 17]\n[6, 2177, 17, 269]\n[6, 2177, 17, 269, 4]\n[6, 2177, 17, 269, 4, 1137]\n[6, 2177, 17, 269, 4, 1137, 3]\n[1152, 5]\n[1152, 5, 2178]\n[1152, 5, 2178, 1]\n[6, 2179]\n[6, 2179, 17]\n[6, 2179, 17, 1033]\n[6, 2179, 17, 1033, 4]\n[6, 2179, 17, 1033, 4, 639]\n[6, 2179, 17, 1033, 4, 639, 17]\n[6, 2179, 17, 1033, 4, 639, 17, 802]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30, 19]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30, 19, 5]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30, 19, 5, 81]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30, 19, 5, 81, 779]\n[6, 2179, 17, 1033, 4, 639, 17, 802, 28, 30, 19, 5, 81, 779, 49]\n[22, 1]\n[22, 1, 2180]\n[22, 1, 2180, 5]\n[22, 1, 2180, 5, 816]\n[22, 1, 2180, 5, 816, 65]\n[22, 1, 2180, 5, 816, 65, 1]\n[99, 27]\n[99, 27, 145]\n[99, 27, 145, 6]\n[99, 27, 145, 6, 1154]\n[99, 27, 145, 6, 1154, 12]\n[99, 27, 145, 6, 1154, 12, 271]\n[99, 27, 145, 6, 1154, 12, 271, 49]\n[273, 5]\n[273, 5, 411]\n[273, 5, 411, 12]\n[273, 5, 411, 12, 21]\n[273, 5, 411, 12, 21, 16]\n[273, 5, 411, 12, 21, 16, 68]\n[273, 5, 411, 12, 21, 16, 68, 7]\n[273, 5, 411, 12, 21, 16, 68, 7, 2181]\n[49, 114]\n[49, 114, 86]\n[49, 114, 86, 40]\n[49, 114, 86, 40, 6]\n[49, 114, 86, 40, 6, 91]\n[49, 114, 86, 40, 6, 91, 7]\n[623, 2182]\n[623, 2182, 5]\n[623, 2182, 5, 139]\n[623, 2182, 5, 139, 53]\n[623, 2182, 5, 139, 53, 11]\n[623, 2182, 5, 139, 53, 11, 4]\n[5, 139]\n[5, 139, 1155]\n[5, 139, 1155, 95]\n[5, 139, 1155, 95, 1156]\n[1, 1000]\n[1, 1000, 1]\n[1, 1000, 1, 1157]\n[1, 1000, 1, 1157, 1]\n[60, 14]\n[60, 14, 61]\n[60, 14, 61, 391]\n[60, 14, 61, 391, 21]\n[60, 14, 61, 391, 21, 2184]\n[5, 2185]\n[5, 2185, 167]\n[5, 2185, 167, 2186]\n[5, 2185, 167, 2186, 5]\n[5, 2185, 167, 2186, 5, 2187]\n[5, 2185, 167, 2186, 5, 2187, 178]\n[2189, 2]\n[2189, 2, 556]\n[2189, 2, 556, 45]\n[2189, 2, 556, 45, 90]\n[2189, 2, 556, 45, 90, 35]\n[209, 1]\n[209, 1, 947]\n[209, 1, 947, 30]\n[209, 1, 947, 30, 15]\n[209, 1, 947, 30, 15, 1]\n[52, 2191]\n[52, 2191, 25]\n[52, 2191, 25, 391]\n[52, 2191, 25, 391, 29]\n[52, 2191, 25, 391, 29, 197]\n[52, 2191, 25, 391, 29, 197, 11]\n[52, 2191, 25, 391, 29, 197, 11, 1]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4, 1]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4, 1, 2193]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4, 1, 2193, 66]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4, 1, 2193, 66, 11]\n[52, 2191, 25, 391, 29, 197, 11, 1, 2192, 4, 1, 2193, 66, 11, 16]\n[15, 2]\n[15, 2, 2194]\n[15, 2, 2194, 7]\n[15, 2, 2194, 7, 1]\n[15, 2, 2194, 7, 1, 2195]\n[15, 2, 2194, 7, 1, 2195, 5]\n[67, 56]\n[67, 56, 76]\n[5, 71]\n[5, 71, 138]\n[67, 54]\n[67, 54, 14]\n[67, 54, 14, 325]\n[67, 54, 14, 325, 1102]\n[67, 54, 14, 325, 1102, 391]\n[67, 54, 14, 325, 1102, 391, 21]\n[67, 54, 14, 325, 1102, 391, 21, 2196]\n[451, 451]\n[451, 451, 451]\n[451, 451, 451, 451]\n[451, 451, 451, 451, 451]\n[451, 451, 451, 451, 451, 451]\n[451, 451, 451, 451, 451, 451, 221]\n[451, 451, 451, 451, 451, 451, 221, 86]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70, 8]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70, 8, 81]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70, 8, 81, 49]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70, 8, 81, 49, 114]\n[451, 451, 451, 451, 451, 451, 221, 86, 2198, 70, 8, 81, 49, 114, 49]\n[22, 1]\n[22, 1, 199]\n[22, 1, 199, 33]\n[22, 1, 199, 33, 3]\n[221, 12]\n[221, 12, 271]\n[221, 12, 271, 12]\n[221, 12, 271, 12, 271]\n[221, 12, 271, 12, 271, 49]\n[221, 12, 271, 12, 271, 49, 114]\n[221, 12, 271, 12, 271, 49, 114, 49]\n[86, 40]\n[86, 40, 6]\n[86, 40, 6, 91]\n[86, 40, 6, 91, 7]\n[86, 40, 6, 91, 7, 9]\n[86, 40, 6, 91, 7, 9, 5]\n[86, 40, 6, 91, 7, 9, 5, 432]\n[221, 12]\n[221, 12, 17]\n[221, 12, 17, 79]\n[221, 12, 17, 79, 6]\n[221, 12, 17, 79, 6, 81]\n[221, 12, 17, 79, 6, 81, 49]\n[221, 12, 17, 79, 6, 81, 49, 114]\n[221, 12, 17, 79, 6, 81, 49, 114, 49]\n[215, 37]\n[215, 37, 160]\n[215, 37, 160, 145]\n[215, 37, 160, 145, 14]\n[37, 2200]\n[37, 2200, 189]\n[37, 2200, 189, 189]\n[37, 2200, 189, 189, 49]\n[37, 2200, 189, 189, 49, 114]\n[37, 2200, 189, 189, 49, 114, 49]\n[215, 391]\n[215, 391, 19]\n[215, 391, 19, 46]\n[215, 391, 19, 46, 423]\n[215, 391, 19, 46, 423, 26]\n[215, 391, 19, 46, 423, 26, 1086]\n[215, 391, 19, 46, 423, 26, 1086, 49]\n[49, 114]\n[49, 114, 49]\n[49, 114]\n[49, 114, 4]\n[49, 114, 4, 577]\n[49, 114, 4, 577, 3]\n[49, 114, 4, 577, 3, 377]\n[49, 114, 4, 577, 3, 377, 1]\n[49, 114, 4, 577, 3, 377, 1, 1159]\n[49, 114, 4, 577, 3, 377, 1, 1159, 7]\n[49, 114, 4, 577, 3, 377, 1, 1159, 7, 1]\n[125, 276]\n[125, 276, 22]\n[125, 276, 22, 3]\n[125, 276, 22, 3, 4]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818, 36]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818, 36, 97]\n[17, 24]\n[17, 24, 114]\n[17, 24, 114, 24]\n[17, 24, 114, 24, 19]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 307]\n[641, 642]\n[641, 642, 308]\n[641, 642, 308, 80]\n[641, 642, 308, 80, 123]\n[641, 642, 308, 80, 123, 405]\n[641, 642, 308, 80, 123, 405, 2]\n[134, 134]\n[134, 134, 134]\n[134, 134, 134, 134]\n[134, 134, 134, 134, 40]\n[134, 134, 134, 134, 40, 238]\n[134, 134, 134, 134, 40, 238, 1161]\n[134, 134, 134, 134, 40, 238, 1161, 181]\n[134, 134, 134, 134, 40, 238, 1161, 181, 1]\n[4, 1162]\n[4, 1162, 1]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 307]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 307, 396]\n[307, 396]\n[307, 396, 526]\n[307, 396, 526, 116]\n[307, 396, 526, 116, 48]\n[307, 396, 526, 116, 48, 520]\n[307, 396, 526, 116, 48, 520, 116]\n[307, 396, 526, 116, 48, 520, 116, 1164]\n[307, 396, 526, 116, 48, 520, 116, 1164, 168]\n[116, 46]\n[116, 46, 239]\n[116, 46, 239, 4]\n[116, 46, 239, 4, 116]\n[116, 46, 239, 4, 116, 53]\n[20, 96]\n[20, 96, 1]\n[20, 96, 1, 33]\n[20, 96, 1, 33, 157]\n[20, 96, 1, 33, 157, 2202]\n[20, 96, 1, 33, 157, 2202, 377]\n[20, 96, 1, 33, 157, 2202, 377, 1]\n[20, 96, 1, 33, 157, 2202, 377, 1, 1159]\n[20, 96, 1, 33, 157, 2202, 377, 1, 1159, 7]\n[20, 96, 1, 33, 157, 2202, 377, 1, 1159, 7, 1]\n[125, 276]\n[125, 276, 22]\n[125, 276, 22, 3]\n[125, 276, 22, 3, 4]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818, 36]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 221, 36, 818, 36, 97]\n[17, 24]\n[17, 24, 114]\n[17, 24, 114, 24]\n[17, 24, 114, 24, 19]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 307]\n[641, 642]\n[641, 642, 308]\n[641, 642, 308, 80]\n[641, 642, 308, 80, 123]\n[641, 642, 308, 80, 123, 405]\n[641, 642, 308, 80, 123, 405, 2]\n[134, 134]\n[134, 134, 134]\n[134, 134, 134, 134]\n[134, 134, 134, 134, 40]\n[134, 134, 134, 134, 40, 238]\n[134, 134, 134, 134, 40, 238, 1161]\n[134, 134, 134, 134, 40, 238, 1161, 181]\n[134, 134, 134, 134, 40, 238, 1161, 181, 1]\n[4, 1162]\n[4, 1162, 1]\n[234, 5]\n[234, 5, 452]\n[234, 5, 452, 453]\n[234, 5, 452, 453, 119]\n[234, 5, 452, 453, 119, 454]\n[234, 5, 452, 453, 119, 454, 234]\n[234, 5, 452, 453, 119, 454, 234, 191]\n[234, 5, 452, 453, 119, 454, 234, 191, 455]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119]\n[234, 5, 452, 453, 119, 454, 234, 191, 455, 390, 119, 638]\n[638, 307]\n[638, 307, 396]\n[638, 307, 396, 526]\n[638, 307, 396, 526, 219]\n[638, 307, 396, 526, 219, 7]\n[638, 307, 396, 526, 219, 7, 2]\n[2204, 2205]\n[2204, 2205, 126]\n[2204, 2205, 126, 1]\n[35, 2]\n[35, 2, 219]\n[35, 2, 219, 7]\n[35, 2, 219, 7, 2]\n[35, 2, 219, 7, 2, 111]\n[35, 2, 219, 7, 2, 111, 6]\n[35, 2, 219, 7, 2, 111, 6, 186]\n[35, 2, 219, 7, 2, 111, 6, 186, 301]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 98]\n[498, 1]\n[498, 1, 644]\n[498, 1, 644, 248]\n[498, 1, 644, 248, 4]\n[498, 1, 644, 248, 4, 517]\n[498, 1, 644, 248, 4, 517, 76]\n[11, 525]\n[11, 525, 32]\n[11, 525, 32, 31]\n[11, 525, 32, 31, 25]\n[11, 525, 32, 31, 25, 1]\n[11, 525, 32, 31, 25, 1, 227]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 4]\n[2, 219, 7, 2, 111, 4, 12]\n[2, 219, 7, 2, 111, 4, 12, 20]\n[5, 509]\n[5, 509, 115]\n[5, 509, 115, 16]\n[36, 61]\n[36, 61, 2206]\n[582, 62]\n[582, 62, 89]\n[582, 62, 89, 13]\n[58, 128]\n[58, 128, 188]\n[2209, 14]\n[2209, 14, 5]\n[2209, 14, 5, 128]\n[2209, 14, 5, 128, 472]\n[177, 34]\n[62, 1048]\n[62, 1048, 7]\n[62, 1048, 7, 41]\n[62, 1048, 7, 41, 2211]\n[591, 8]\n[591, 8, 89]\n[591, 8, 89, 4]\n[591, 8, 89, 4, 450]\n[591, 8, 89, 4, 450, 8]\n[58, 2212]\n[58, 2212, 8]\n[58, 2212, 8, 22]\n[58, 2212, 8, 22, 2]\n[35, 2]\n[35, 2, 219]\n[35, 2, 219, 7]\n[35, 2, 219, 7, 2]\n[35, 2, 219, 7, 2, 111]\n[35, 2, 219, 7, 2, 111, 6]\n[35, 2, 219, 7, 2, 111, 6, 186]\n[35, 2, 219, 7, 2, 111, 6, 186, 301]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 98]\n[498, 1]\n[498, 1, 644]\n[498, 1, 644, 248]\n[498, 1, 644, 248, 4]\n[498, 1, 644, 248, 4, 517]\n[498, 1, 644, 248, 4, 517, 76]\n[11, 525]\n[11, 525, 32]\n[11, 525, 32, 31]\n[11, 525, 32, 31, 25]\n[11, 525, 32, 31, 25, 1]\n[11, 525, 32, 31, 25, 1, 227]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 4]\n[2, 219, 7, 2, 111, 4, 12]\n[2, 219, 7, 2, 111, 4, 12, 20]\n[5, 509]\n[5, 509, 115]\n[5, 509, 115, 16]\n[36, 61]\n[36, 61, 646]\n[582, 62]\n[582, 62, 11]\n[582, 62, 11, 1]\n[582, 62, 11, 1, 2214]\n[582, 62, 11, 1, 2214, 7]\n[29, 18]\n[29, 18, 766]\n[2216, 57]\n[2216, 57, 32]\n[2216, 57, 32, 2217]\n[177, 58]\n[177, 58, 2219]\n[1, 1142]\n[1, 1142, 7]\n[1, 1142, 7, 16]\n[1, 1142, 7, 16, 353]\n[591, 8]\n[591, 8, 89]\n[591, 8, 89, 4]\n[591, 8, 89, 4, 450]\n[591, 8, 89, 4, 450, 8]\n[58, 61]\n[58, 61, 1]\n[58, 61, 1, 2220]\n[35, 2]\n[35, 2, 219]\n[35, 2, 219, 7]\n[35, 2, 219, 7, 2]\n[35, 2, 219, 7, 2, 111]\n[35, 2, 219, 7, 2, 111, 6]\n[35, 2, 219, 7, 2, 111, 6, 186]\n[35, 2, 219, 7, 2, 111, 6, 186, 301]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 98]\n[498, 1]\n[498, 1, 644]\n[498, 1, 644, 248]\n[498, 1, 644, 248, 4]\n[498, 1, 644, 248, 4, 517]\n[498, 1, 644, 248, 4, 517, 76]\n[11, 525]\n[11, 525, 32]\n[11, 525, 32, 31]\n[11, 525, 32, 31, 25]\n[11, 525, 32, 31, 25, 1]\n[11, 525, 32, 31, 25, 1, 227]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 4]\n[2, 219, 7, 2, 111, 4, 12]\n[2, 219, 7, 2, 111, 4, 12, 20]\n[5, 509]\n[5, 509, 115]\n[5, 509, 115, 16]\n[177, 36]\n[177, 36, 61]\n[177, 36, 61, 2221]\n[107, 2]\n[107, 2, 245]\n[107, 2, 245, 14]\n[107, 2, 245, 14, 276]\n[107, 2, 245, 14, 276, 3]\n[2223, 32]\n[2223, 32, 248]\n[4, 2]\n[4, 2, 961]\n[4, 2, 961, 21]\n[4, 2, 961, 21, 32]\n[4, 2, 961, 21, 32, 2]\n[39, 5]\n[39, 5, 766]\n[4, 405]\n[4, 405, 62]\n[405, 62]\n[99, 27]\n[99, 27, 73]\n[99, 27, 73, 781]\n[99, 27, 73, 781, 208]\n[6, 88]\n[6, 88, 2226]\n[35, 2]\n[35, 2, 219]\n[35, 2, 219, 7]\n[35, 2, 219, 7, 2]\n[35, 2, 219, 7, 2, 111]\n[35, 2, 219, 7, 2, 111, 6]\n[35, 2, 219, 7, 2, 111, 6, 186]\n[35, 2, 219, 7, 2, 111, 6, 186, 301]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 98]\n[498, 1]\n[498, 1, 644]\n[498, 1, 644, 248]\n[498, 1, 644, 248, 4]\n[498, 1, 644, 248, 4, 517]\n[498, 1, 644, 248, 4, 517, 76]\n[11, 525]\n[11, 525, 32]\n[11, 525, 32, 31]\n[11, 525, 32, 31, 25]\n[11, 525, 32, 31, 25, 1]\n[11, 525, 32, 31, 25, 1, 227]\n[2, 219]\n[2, 219, 7]\n[2, 219, 7, 2]\n[2, 219, 7, 2, 111]\n[2, 219, 7, 2, 111, 4]\n[2, 219, 7, 2, 111, 4, 12]\n[2, 219, 7, 2, 111, 4, 12, 20]\n[5, 509]\n[5, 509, 115]\n[5, 509, 115, 16]\n[5, 509, 115, 16, 645]\n[5, 509, 115, 16, 645, 25]\n[5, 509, 115, 16, 645, 25, 2227]\n[5, 509, 115, 16, 645, 25, 2227, 22]\n[5, 509, 115, 16, 645, 25, 2227, 22, 2228]\n[5, 509, 115, 16, 645, 25, 2227, 22, 2228, 4]\n[3, 19]\n[3, 19, 48]\n[3, 19, 48, 17]\n[3, 19, 48, 17, 356]\n[3, 19, 48, 17, 356, 579]\n[3, 19, 48, 17, 356, 579, 7]\n[25, 17]\n[25, 17, 553]\n[25, 17, 553, 11]\n[25, 17, 553, 11, 1]\n[49, 2]\n[49, 2, 2229]\n[49, 2, 2229, 13]\n[49, 2, 2229, 13, 2]\n[25, 17]\n[25, 17, 2231]\n[25, 17, 2231, 2232]\n[6, 1]\n[6, 1, 1165]\n[6, 1, 1165, 7]\n[6, 1, 1165, 7, 17]\n[6, 1, 1165, 7, 17, 345]\n[6, 1, 1165, 7, 17, 345, 60]\n[6, 1, 1165, 7, 17, 345, 60, 60]\n[154, 53]\n[154, 53, 117]\n[154, 53, 117, 2]\n[57, 6]\n[57, 6, 1]\n[57, 6, 1, 1166]\n[154, 53]\n[154, 53, 117]\n[154, 53, 117, 2]\n[154, 53, 117, 2, 345]\n[154, 53, 117, 2, 345, 4]\n[154, 53, 117, 2, 345, 4, 156]\n[154, 53, 117, 2, 345, 4, 156, 9]\n[57, 87]\n[57, 87, 1]\n[57, 56]\n[57, 56, 1]\n[57, 56, 1, 820]\n[57, 56, 1, 820, 15]\n[60, 154]\n[60, 154, 53]\n[60, 154, 53, 117]\n[60, 154, 53, 117, 2]\n[60, 154, 53, 117, 2, 345]\n[60, 154, 53, 117, 2, 345, 29]\n[60, 154, 53, 117, 2, 345, 29, 3]\n[60, 154, 53, 117, 2, 345, 29, 3, 156]\n[60, 154, 53, 117, 2, 345, 29, 3, 156, 9]\n[60, 154, 53, 117, 2, 345, 29, 3, 156, 9, 2233]\n[60, 154, 53, 117, 2, 345, 29, 3, 156, 9, 2233, 57]\n[20, 54]\n[20, 54, 92]\n[20, 54, 92, 49]\n[20, 54, 92, 49, 2234]\n[20, 54, 92, 49, 2234, 247]\n[3, 19]\n[3, 19, 393]\n[3, 19, 393, 11]\n[3, 19, 393, 11, 1]\n[515, 2235]\n[515, 2235, 4]\n[25, 17]\n[25, 17, 343]\n[25, 17, 343, 2236]\n[6, 1]\n[6, 1, 1165]\n[6, 1, 1165, 7]\n[6, 1, 1165, 7, 17]\n[6, 1, 1165, 7, 17, 345]\n[6, 1, 1165, 7, 17, 345, 60]\n[6, 1, 1165, 7, 17, 345, 60, 60]\n[154, 53]\n[154, 53, 117]\n[154, 53, 117, 2]\n[57, 6]\n[57, 6, 1]\n[57, 6, 1, 1166]\n[154, 53]\n[154, 53, 117]\n[154, 53, 117, 2]\n[154, 53, 117, 2, 345]\n[154, 53, 117, 2, 345, 4]\n[154, 53, 117, 2, 345, 4, 156]\n[154, 53, 117, 2, 345, 4, 156, 9]\n[57, 87]\n[57, 87, 1]\n[57, 56]\n[57, 56, 1]\n[57, 56, 1, 820]\n[57, 56, 1, 820, 15]\n[60, 154]\n[60, 154, 53]\n[60, 154, 53, 117]\n[60, 154, 53, 117, 2]\n[60, 154, 53, 117, 2, 345]\n[60, 154, 53, 117, 2, 345, 120]\n[60, 154, 53, 117, 2, 345, 120, 6]\n[60, 154, 53, 117, 2, 345, 120, 6, 23]\n[60, 154, 53, 117, 2, 345, 120, 6, 23, 2]\n[2237, 101]\n[2237, 101, 6]\n[29, 49]\n[29, 49, 609]\n[29, 49, 609, 115]\n[29, 49, 609, 115, 2]\n[29, 49, 609, 115, 2, 619]\n[71, 2]\n[71, 2, 112]\n[71, 2, 112, 394]\n[71, 2, 112, 394, 1]\n[125, 20]\n[125, 20, 3]\n[125, 20, 3, 48]\n[125, 20, 3, 48, 6]\n[4, 39]\n[4, 39, 3]\n[4, 39, 3, 240]\n[4, 39, 3, 240, 9]\n[17, 112]\n[17, 112, 44]\n[17, 112, 44, 787]\n[17, 112, 44, 787, 40]\n[17, 112, 44, 787, 40, 83]\n[17, 112, 44, 787, 40, 83, 37]\n[17, 112, 44, 787, 40, 83, 37, 619]\n[17, 112, 44, 787, 40, 83, 37, 619, 22]\n[17, 112, 44, 787, 40, 83, 37, 619, 22, 1]\n[17, 112, 44, 787, 40, 83, 37, 619, 22, 1, 34]\n[17, 112, 44, 787, 40, 83, 37, 619, 22, 1, 34, 5]\n[6, 94]\n[6, 94, 8]\n[4, 37]\n[4, 37, 477]\n[1, 427]\n[1, 427, 239]\n[1, 427, 239, 2240]\n[1, 427, 239, 2240, 81]\n[1, 427, 239, 2240, 81, 37]\n[1, 427, 239, 2240, 81, 37, 619]\n[1, 427, 239, 2240, 81, 37, 619, 22]\n[1, 427, 239, 2240, 81, 37, 619, 22, 1]\n[1, 427, 239, 2240, 81, 37, 619, 22, 1, 34]\n[1, 427, 239, 2240, 81, 37, 619, 22, 1, 34, 5]\n[6, 94]\n[6, 94, 8]\n[6, 94, 8, 821]\n[45, 14]\n[45, 14, 76]\n[45, 14, 76, 503]\n[240, 30]\n[240, 30, 5]\n[240, 30, 5, 48]\n[240, 30, 5, 48, 6]\n[5, 48]\n[5, 48, 26]\n[5, 48, 26, 34]\n[212, 22]\n[212, 22, 3]\n[212, 22, 3, 34]\n[2242, 4]\n[2242, 4, 83]\n[2242, 4, 83, 34]\n[14, 123]\n[14, 123, 1005]\n[1082, 8]\n[1082, 8, 87]\n[1082, 8, 87, 34]\n[16, 63]\n[16, 63, 822]\n[7, 34]\n[212, 22]\n[212, 22, 3]\n[212, 22, 3, 154]\n[212, 22, 3, 154, 132]\n[212, 22, 3, 154, 132, 89]\n[212, 22, 3, 154, 132, 89, 6]\n[212, 22, 3, 154, 132, 89, 6, 2243]\n[212, 22, 3, 154, 132, 89, 6, 2243, 6]\n[212, 22, 3, 154, 132, 89, 6, 2243, 6, 1168]\n[212, 22, 3, 154, 132, 89, 6, 2243, 6, 1168, 1]\n[171, 50]\n[171, 50, 156]\n[171, 50, 156, 8]\n[171, 50, 156, 8, 679]\n[171, 50, 156, 8, 679, 29]\n[171, 50, 156, 8, 679, 29, 5]\n[171, 50, 156, 8, 679, 29, 5, 904]\n[171, 50, 156, 8, 679, 29, 5, 904, 22]\n[49, 1]\n[49, 1, 2245]\n[49, 1, 2245, 2246]\n[49, 1, 2245, 2246, 5]\n[49, 1, 2245, 2246, 5, 88]\n[26, 3]\n[26, 3, 19]\n[26, 3, 19, 1121]\n[26, 3, 19, 1121, 224]\n[26, 3, 19, 1121, 224, 93]\n[621, 75]\n[621, 75, 71]\n[621, 75, 71, 2]\n[621, 75, 71, 2, 80]\n[621, 75, 71, 2, 80, 72]\n[621, 75, 71, 2, 80, 72, 7]\n[621, 75, 71, 2, 80, 72, 7, 3]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24, 2]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24, 2, 907]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24, 2, 907, 26]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24, 2, 907, 26, 11]\n[621, 75, 71, 2, 80, 72, 7, 3, 2247, 24, 2, 907, 26, 11, 129]\n[92, 3]\n[92, 3, 94]\n[92, 3, 94, 17]\n[92, 3, 94, 17, 2248]\n[92, 3, 94, 17, 2248, 3]\n[92, 3, 94, 17, 2248, 3, 38]\n[92, 3, 94, 17, 2248, 3, 38, 348]\n[92, 3, 94, 17, 2248, 3, 38, 348, 6]\n[49, 2]\n[49, 2, 2249]\n[49, 2, 2249, 1083]\n[49, 2, 2249, 1083, 1006]\n[4, 3]\n[4, 3, 595]\n[4, 3, 595, 35]\n[4, 3, 595, 35, 2]\n[324, 75]\n[324, 75, 71]\n[324, 75, 71, 2]\n[324, 75, 71, 2, 80]\n[324, 75, 71, 2, 80, 72]\n[324, 75, 71, 2, 80, 72, 7]\n[324, 75, 71, 2, 80, 72, 7, 3]\n[324, 75, 71, 2, 80, 72, 7, 3, 37]\n[324, 75, 71, 2, 80, 72, 7, 3, 37, 42]\n[324, 75, 71, 2, 80, 72, 7, 3, 37, 42, 160]\n[324, 75, 71, 2, 80, 72, 7, 3, 37, 42, 160, 2250]\n[324, 75, 71, 2, 80, 72, 7, 3, 37, 42, 160, 2250, 16]\n[81, 796]\n[81, 796, 6]\n[81, 796, 6, 192]\n[81, 796, 6, 192, 64]\n[81, 796, 6, 192, 64, 149]\n[620, 61]\n[620, 61, 5]\n[620, 61, 5, 2]\n[620, 61, 5, 2, 2251]\n[620, 61, 5, 2, 2251, 13]\n[620, 61, 5, 2, 2251, 13, 2252]\n[620, 61, 5, 2, 2251, 13, 2252, 22]\n[620, 61, 5, 2, 2251, 13, 2252, 22, 2253]\n[31, 2255]\n[31, 2255, 35]\n[31, 2255, 35, 287]\n[31, 2255, 35, 287, 406]\n[31, 2255, 35, 287, 406, 6]\n[361, 84]\n[361, 84, 378]\n[361, 84, 378, 51]\n[361, 84, 378, 51, 106]\n[361, 84, 378, 51, 106, 87]\n[45, 5]\n[45, 5, 196]\n[45, 5, 196, 112]\n[45, 5, 196, 112, 14]\n[45, 5, 196, 112, 14, 5]\n[45, 5, 196, 112, 14, 5, 149]\n[45, 5, 196, 112, 14, 5, 149, 69]\n[45, 5, 196, 112, 14, 5, 149, 69, 6]\n[45, 5, 196, 112, 14, 5, 149, 69, 6, 2257]\n[45, 5, 196, 112, 14, 5, 149, 69, 6, 2257, 10]\n[45, 5, 196, 112, 14, 5, 149, 69, 6, 2257, 10, 2]\n[18, 211]\n[18, 211, 10]\n[18, 211, 10, 823]\n[18, 211, 10, 823, 24]\n[18, 211, 10, 823, 24, 1]\n[18, 211, 10, 823, 24, 1, 824]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 825]\n[25, 20, 1, 825, 7]\n[25, 20, 1, 825, 7, 2]\n[25, 20, 1, 825, 7, 2, 518]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 527]\n[25, 20, 1, 527, 7]\n[25, 20, 1, 527, 7, 2]\n[25, 20, 1, 527, 7, 2, 826]\n[827, 24]\n[827, 24, 1]\n[827, 24, 1, 375]\n[827, 24, 1, 375, 220]\n[827, 24, 1, 375, 220, 7]\n[827, 24, 1, 375, 220, 7, 1]\n[827, 24, 1, 375, 220, 7, 1, 227]\n[827, 24, 1, 375, 220, 7, 1, 227, 55]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258, 2259]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258, 2259, 105]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258, 2259, 105, 512]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258, 2259, 105, 512, 1]\n[827, 24, 1, 375, 220, 7, 1, 227, 55, 15, 2258, 2259, 105, 512, 1, 1169]\n[1032, 16]\n[1032, 16, 95]\n[1032, 16, 95, 687]\n[1032, 16, 95, 687, 4]\n[1032, 16, 95, 687, 4, 3]\n[1032, 16, 95, 687, 4, 3, 279]\n[49, 2261]\n[49, 2261, 22]\n[49, 2261, 22, 1]\n[49, 2261, 22, 1, 1171]\n[49, 2261, 22, 1, 1171, 7]\n[28, 2263]\n[28, 2263, 57]\n[28, 2263, 57, 53]\n[28, 2263, 57, 53, 333]\n[28, 2263, 57, 53, 333, 49]\n[69, 139]\n[69, 139, 5]\n[69, 139, 5, 71]\n[69, 139, 5, 71, 2]\n[69, 139, 5, 71, 2, 80]\n[69, 139, 5, 71, 2, 80, 72]\n[69, 139, 5, 71, 2, 80, 72, 7]\n[69, 139, 5, 71, 2, 80, 72, 7, 3]\n[69, 139, 5, 71, 2, 80, 72, 7, 3, 10]\n[69, 139, 5, 71, 2, 80, 72, 7, 3, 10, 2]\n[18, 211]\n[18, 211, 10]\n[18, 211, 10, 823]\n[18, 211, 10, 823, 24]\n[18, 211, 10, 823, 24, 1]\n[18, 211, 10, 823, 24, 1, 824]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 825]\n[25, 20, 1, 825, 7]\n[25, 20, 1, 825, 7, 2]\n[25, 20, 1, 825, 7, 2, 518]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 527]\n[25, 20, 1, 527, 7]\n[25, 20, 1, 527, 7, 2]\n[25, 20, 1, 527, 7, 2, 826]\n[827, 24]\n[827, 24, 1]\n[827, 24, 1, 375]\n[827, 24, 1, 375, 220]\n[827, 24, 1, 375, 220, 7]\n[827, 24, 1, 375, 220, 7, 1]\n[827, 24, 1, 375, 220, 7, 1, 227]\n[827, 24, 1, 375, 220, 7, 1, 227, 10]\n[827, 24, 1, 375, 220, 7, 1, 227, 10, 2]\n[18, 211]\n[18, 211, 10]\n[18, 211, 10, 823]\n[18, 211, 10, 823, 24]\n[18, 211, 10, 823, 24, 1]\n[18, 211, 10, 823, 24, 1, 824]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 825]\n[25, 20, 1, 825, 7]\n[25, 20, 1, 825, 7, 2]\n[25, 20, 1, 825, 7, 2, 518]\n[10, 2]\n[25, 20]\n[25, 20, 1]\n[25, 20, 1, 527]\n[25, 20, 1, 527, 7]\n[25, 20, 1, 527, 7, 2]\n[25, 20, 1, 527, 7, 2, 826]\n[827, 24]\n[827, 24, 1]\n[827, 24, 1, 375]\n[827, 24, 1, 375, 220]\n[827, 24, 1, 375, 220, 7]\n[827, 24, 1, 375, 220, 7, 1]\n[827, 24, 1, 375, 220, 7, 1, 227]\n[827, 24, 1, 375, 220, 7, 1, 227, 528]\n[827, 24, 1, 375, 220, 7, 1, 227, 528, 828]\n[827, 24, 1, 375, 220, 7, 1, 227, 528, 828, 119]\n[5, 48]\n[5, 48, 759]\n[5, 48, 759, 1]\n[5, 48, 759, 1, 2264]\n[5, 48, 759, 1, 2264, 6]\n[5, 48, 759, 1, 2264, 6, 94]\n[528, 1172]\n[528, 1172, 1173]\n[5, 23]\n[5, 23, 17]\n[50, 48]\n[50, 48, 2265]\n[50, 48, 2265, 1]\n[50, 48, 2265, 1, 63]\n[50, 48, 2265, 1, 63, 65]\n[50, 48, 2265, 1, 63, 65, 124]\n[26, 31]\n[26, 31, 555]\n[26, 31, 555, 101]\n[26, 31, 555, 101, 2266]\n[31, 15]\n[31, 15, 101]\n[31, 15, 101, 64]\n[31, 15, 101, 64, 3]\n[3, 23]\n[3, 23, 64]\n[3, 23, 64, 3]\n[3, 23, 64, 3, 38]\n[3, 23, 64, 3, 38, 67]\n[3, 23, 64, 3, 38, 67, 54]\n[3, 23, 64, 3, 38, 67, 54, 31]\n[3, 23, 64, 3, 38, 67, 54, 31, 467]\n[3, 23, 64, 3, 38, 67, 54, 31, 467, 286]\n[3, 23, 64, 3, 38, 67, 54, 31, 467, 286, 9]\n[312, 3]\n[312, 3, 91]\n[312, 3, 91, 16]\n[312, 3, 91, 16, 868]\n[312, 3]\n[312, 3, 91]\n[312, 3, 91, 37]\n[312, 3, 91, 37, 1]\n[1, 82]\n[1, 82, 64]\n[1, 82, 64, 123]\n[67, 54]\n[67, 54, 31]\n[67, 54, 31, 869]\n[67, 54, 31, 869, 870]\n[69, 468]\n[69, 468, 872]\n[69, 468, 872, 19]\n[69, 468, 872, 19, 34]\n[69, 468, 872, 19, 34, 873]\n[469, 96]\n[469, 96, 66]\n[469, 96, 66, 551]\n[348, 143]\n[348, 143, 35]\n[348, 143, 35, 138]\n[348, 143, 35, 138, 76]\n[348, 143, 35, 138, 76, 35]\n[348, 143, 35, 138, 76, 35, 874]\n[348, 143, 35, 138, 76, 35, 874, 4]\n[348, 143, 35, 138, 76, 35, 874, 4, 875]\n[76, 35]\n[76, 35, 877]\n[76, 35, 877, 178]\n[76, 35, 877, 178, 4]\n[76, 35, 877, 178, 4, 878]\n[76, 35, 877, 178, 4, 878, 178]\n[3, 120]\n[5, 35]\n[26, 64]\n[27, 208]\n[5, 120]\n[5, 120, 144]\n[5, 120, 144, 5]\n[5, 120, 144, 5, 120]\n[5, 120, 144, 5, 120, 6]\n[5, 120, 144, 5, 120, 6, 10]\n[5, 120, 144, 5, 120, 6, 10, 56]\n[5, 120, 144, 5, 120, 6, 10, 56, 1]\n[5, 120, 144, 5, 120, 6, 10, 56, 1, 152]\n[5, 120]\n[5, 120, 6]\n[120, 6]\n[120, 6, 51]\n[120, 6, 51, 287]\n[675, 96]\n[675, 96, 11]\n[881, 314]\n[881, 314, 287]\n[881, 314, 287, 60]\n[882, 17]\n[882, 17, 883]\n[882, 17, 883, 3]\n[882, 17, 883, 3, 52]\n[882, 17, 883, 3, 52, 132]\n[882, 17, 883, 3, 52, 132, 102]\n[884, 38]\n[884, 38, 885]\n[884, 38, 885, 22]\n[884, 38, 885, 22, 886]\n[887, 238]\n[887, 238, 89]\n[209, 14]\n[209, 14, 225]\n[209, 14, 225, 350]\n[209, 14, 225, 350, 888]\n[209, 14, 225, 350, 888, 57]\n[209, 14, 225, 350, 888, 57, 56]\n[209, 14, 225, 350, 888, 57, 56, 50]\n[57, 56]\n[57, 56, 50]\n[57, 56]\n[57, 56, 50]\n[57, 56, 50, 409]\n[57, 56, 50, 409, 20]\n[57, 56, 50, 409, 20, 79]\n[57, 56, 50, 409, 20, 79, 13]\n[57, 56, 50, 409, 20, 79, 13, 1]\n[112, 5]\n[112, 5, 139]\n[153, 7]\n[153, 7, 14]\n[153, 7, 14, 33]\n[153, 7, 14, 33, 30]\n[153, 7, 14, 33, 30, 128]\n[153, 7, 14, 33, 30, 128, 5]\n[39, 5]\n[39, 5, 139]\n[72, 7]\n[72, 7, 288]\n[30, 128]\n[30, 128, 5]\n[6, 677]\n[6, 677, 2]\n[260, 11]\n[260, 11, 1]\n[890, 11]\n[14, 50]\n[14, 50, 52]\n[14, 50, 52, 891]\n[14, 50, 52, 891, 226]\n[261, 353]\n[892, 7]\n[354, 6]\n[354, 6, 289]\n[354, 6, 289, 4]\n[354, 6, 289, 4, 37]\n[354, 6, 289, 4, 37, 354]\n[354, 6, 289, 4, 37, 354, 6]\n[354, 6, 289, 4, 37, 354, 6, 23]\n[354, 6, 289, 4, 37, 354, 6, 23, 30]\n[354, 6, 289, 4, 37, 354, 6, 23, 30, 1]\n[354, 6, 289, 4, 37, 354, 6, 23, 30, 1, 152]\n[472, 287]\n[472, 287, 16]\n[4, 132]\n[4, 132, 133]\n[209, 2]\n[209, 2, 129]\n[209, 2, 129, 4]\n[209, 2, 129, 4, 109]\n[209, 2, 129, 4, 109, 555]\n[209, 1]\n[209, 1, 225]\n[209, 1, 225, 355]\n[209, 1, 225, 355, 896]\n[209, 1, 225, 355, 896, 9]\n[209, 1, 225, 355, 896, 9, 16]\n[312, 5]\n[41, 6]\n[41, 6, 897]\n[41, 6, 897, 14]\n[41, 6, 897, 14, 473]\n[72, 7]\n[72, 7, 1]\n[112, 5]\n[112, 5, 139]\n[153, 7]\n[153, 7, 14]\n[72, 7]\n[72, 7, 1]\n[112, 5]\n[112, 5, 139]\n[153, 7]\n[153, 7, 17]\n[153, 7, 17, 33]\n[153, 7, 17, 33, 29]\n[153, 7, 17, 33, 29, 310]\n[153, 7, 17, 33, 29, 310, 131]\n[138, 61]\n[95, 536]\n[95, 536, 18]\n[95, 536, 18, 537]\n[460, 403]\n[460, 403, 16]\n[460, 403, 16, 63]\n[460, 403, 16, 63, 4]\n[460, 403, 16, 63, 4, 29]\n[460, 403, 16, 63, 4, 29, 58]\n[460, 403, 16, 63, 4, 29, 58, 61]\n[5, 61]\n[5, 61, 36]\n[5, 61, 36, 6]\n[5, 61, 36, 6, 657]\n[5, 61, 36, 6, 657, 62]\n[4, 29]\n[4, 29, 58]\n[4, 29, 58, 61]\n[4, 29, 58, 61, 257]\n[4, 29, 58, 61, 257, 28]\n[4, 29, 58, 61, 257, 28, 61]\n[29, 58]\n[29, 58, 131]\n[29, 58, 131, 8]\n[29, 58, 131, 8, 87]\n[29, 58, 131, 8, 87, 1]\n[29, 58, 131, 8, 87, 1, 853]\n[29, 58, 131, 8, 87, 1, 853, 4]\n[29, 58, 131, 8, 87, 1, 853, 4, 1]\n[18, 107]\n[18, 107, 175]\n[18, 107, 175, 207]\n[18, 107, 175, 207, 14]\n[18, 107, 175, 207, 14, 61]\n[21, 58]\n[21, 58, 4]\n[21, 58, 4, 5]\n[32, 9]\n[32, 9, 61]\n[32, 9, 61, 258]\n[32, 9, 61, 258, 6]\n[32, 9, 61, 258, 6, 10]\n[32, 9, 61, 258, 6, 10, 4]\n[32, 9, 61, 258, 6, 10, 4, 29]\n[32, 9, 61, 258, 6, 10, 4, 29, 58]\n[32, 9, 61, 258, 6, 10, 4, 29, 58, 61]\n[5, 61]\n[5, 61, 36]\n[5, 61, 36, 6]\n[5, 61, 36, 6, 854]\n[4, 5]\n[4, 5, 149]\n[4, 5, 149, 14]\n[4, 5, 149, 14, 58]\n[4, 5, 149, 14, 58, 131]\n[4, 5, 149, 14, 58, 131, 8]\n[4, 5, 149, 14, 58, 131, 8, 28]\n[4, 5, 149, 14, 58, 131, 8, 28, 1]\n[4, 5, 149, 14, 58, 131, 8, 28, 1, 311]\n[4, 5, 149, 14, 58, 131, 8, 28, 1, 311, 659]\n[5, 855]\n[5, 855, 1]\n[58, 856]\n[58, 856, 6]\n[58, 856, 6, 660]\n[5, 61]\n[5, 61, 661]\n[5, 61, 661, 539]\n[5, 61, 661, 539, 166]\n[5, 61, 661, 539, 166, 5]\n[5, 61, 661, 539, 166, 5, 857]\n[5, 61, 661, 539, 166, 5, 857, 22]\n[5, 61, 661, 539, 166, 5, 857, 22, 1]\n[29, 461]\n[5, 44]\n[5, 44, 150]\n[5, 44, 150, 41]\n[5, 44, 150, 41, 3]\n[5, 44, 150, 41, 3, 347]\n[5, 44, 150, 41, 3, 347, 4]\n[42, 190]\n[42, 190, 461]\n[42, 190, 461, 67]\n[42, 190, 461, 67, 16]\n[58, 540]\n[58, 540, 54]\n[58, 540, 54, 8]\n[58, 540, 54, 8, 4]\n[58, 540, 54, 8, 4, 405]\n[58, 540, 54, 8, 4, 405, 8]\n[58, 540, 54, 8, 4, 405, 8, 21]\n[58, 540, 54, 8, 4, 405, 8, 21, 32]\n[58, 540, 54, 8, 4, 405, 8, 21, 32, 58]\n[58, 540, 54, 8, 4, 405, 8, 21, 32, 58, 462]\n[58, 540, 54, 8, 4, 405, 8, 21, 32, 58, 462, 6]\n[32, 58]\n[32, 58, 131]\n[29, 58]\n[29, 58, 131]\n[29, 58, 131, 8]\n[29, 58, 131, 8, 29]\n[29, 58, 131, 8, 29, 310]\n[29, 58, 131, 8, 29, 310, 131]\n[138, 61]\n[95, 536]\n[95, 536, 18]\n[95, 536, 18, 537]\n[460, 403]\n[460, 403, 16]\n[460, 403, 16, 63]\n[460, 403, 16, 63, 29]\n[460, 403, 16, 63, 29, 58]\n[460, 403, 16, 63, 29, 58, 131]\n[460, 403, 16, 63, 29, 58, 131, 8]\n[460, 403, 16, 63, 29, 58, 131, 8, 221]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20, 192]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20, 192, 269]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20, 192, 269, 812]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20, 192, 269, 812, 65]\n[460, 403, 16, 63, 29, 58, 131, 8, 221, 20, 192, 269, 812, 65, 1]\n[20, 192]\n[20, 192, 311]\n[20, 192, 311, 799]\n[20, 192, 311, 799, 1056]\n[20, 14]\n[20, 14, 55]\n[20, 14, 55, 42]\n[20, 14, 55, 42, 222]\n[21, 69]\n[21, 69, 2268]\n[21, 69, 2268, 76]\n[45, 37]\n[45, 37, 66]\n[45, 37, 66, 1176]\n[45, 37, 66, 1176, 13]\n[45, 37, 66, 1176, 13, 1]\n[45, 37]\n[45, 37, 66]\n[45, 37, 66, 603]\n[45, 37, 66, 603, 5]\n[609, 66]\n[609, 66, 12]\n[609, 66, 12, 20]\n[609, 66, 12, 20, 28]\n[37, 56]\n[37, 56, 37]\n[37, 56, 37, 258]\n[37, 56, 37, 258, 6]\n[37, 56, 37, 258, 6, 10]\n[37, 56, 37, 258, 6, 10, 4]\n[37, 56, 37, 258, 6, 10, 4, 54]\n[37, 56, 37, 258, 6, 10, 4, 54, 338]\n[37, 56, 37, 258, 6, 10, 4, 54, 338, 5]\n[37, 56, 37, 258, 6, 10, 4, 54, 338, 5, 51]\n[37, 56, 37, 258, 6, 10, 4, 54, 338, 5, 51, 1]\n[4, 12]\n[4, 12, 32]\n[4, 12, 32, 1]\n[4, 12, 32, 1, 1178]\n[4, 12, 32, 1, 1178, 123]\n[4, 54]\n[4, 54, 338]\n[4, 54, 338, 5]\n[4, 54, 338, 5, 51]\n[4, 54, 338, 5, 51, 1]\n[4, 12]\n[4, 12, 32]\n[4, 12, 32, 1]\n[4, 12, 32, 1, 185]\n[4, 12, 32, 1, 185, 15]\n[4, 12]\n[4, 12, 260]\n[4, 12, 260, 4]\n[4, 12, 260, 4, 505]\n[4, 12, 260, 4, 505, 4]\n[4, 1]\n[4, 1, 33]\n[4, 1, 33, 123]\n[4, 1, 33, 123, 324]\n[20, 54]\n[20, 54, 92]\n[20, 54, 92, 138]\n[20, 54, 92, 138, 578]\n[45, 14]\n[45, 14, 5]\n[45, 14, 5, 51]\n[45, 14, 5, 51, 3]\n[45, 14, 5, 51, 3, 20]\n[45, 14, 5, 51, 3, 20, 192]\n[45, 14, 5, 51, 3, 20, 192, 269]\n[45, 14, 5, 51, 3, 20, 192, 269, 2269]\n[45, 14, 5, 51, 3, 20, 192, 269, 2269, 89]\n[45, 14, 5, 51, 3, 20, 192, 269, 2269, 89, 2]\n[20, 192]\n[20, 192, 311]\n[20, 192, 311, 1150]\n[20, 192, 311, 1150, 13]\n[20, 192, 311, 1150, 13, 2]\n[20, 14]\n[20, 14, 55]\n[20, 14, 55, 42]\n[20, 14, 55, 42, 694]\n[239, 1]\n[239, 1, 68]\n[239, 1, 68, 50]\n[45, 143]\n[45, 143, 66]\n[45, 143, 66, 1075]\n[45, 143, 66, 1075, 13]\n[45, 143, 66, 1075, 13, 1]\n[45, 143]\n[45, 143, 66]\n[45, 143, 66, 603]\n[45, 143, 66, 603, 5]\n[39, 143]\n[39, 143, 66]\n[39, 143, 66, 12]\n[39, 143, 66, 12, 782]\n[37, 56]\n[37, 56, 37]\n[37, 56, 37, 258]\n[37, 56, 37, 258, 6]\n[37, 56, 37, 258, 6, 53]\n[37, 56, 37, 258, 6, 53, 4]\n[37, 56, 37, 258, 6, 53, 4, 54]\n[37, 56, 37, 258, 6, 53, 4, 54, 338]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4, 12]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4, 12, 260]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4, 12, 260, 4]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4, 12, 260, 4, 505]\n[37, 56, 37, 258, 6, 53, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 1178, 123, 1179, 4, 54, 338, 5, 51, 1, 130, 4, 12, 32, 1, 185, 15, 168, 4, 12, 260, 4, 505, 4]\n[4, 1]\n[4, 1, 33]\n[4, 1, 33, 123]\n[4, 1, 33, 123, 324]\n[20, 54]\n[20, 54, 92]\n[20, 54, 92, 138]\n[20, 54, 92, 138, 15]\n[45, 14]\n[45, 14, 5]\n[45, 14, 5, 51]\n[45, 14, 5, 51, 3]\n[45, 14, 5, 51, 3, 45]\n[45, 14, 5, 51, 3, 45, 14]\n[45, 14, 5, 51, 3, 45, 14, 5]\n[45, 14, 5, 51, 3, 45, 14, 5, 51]\n[45, 14, 5, 51, 3, 45, 14, 5, 51, 3]\n[45, 14, 5, 51, 3, 45, 14, 5, 51, 3, 199]\n[1, 80]\n[1, 80, 25]\n[1, 80, 25, 1]\n[1, 80, 25, 1, 527]\n[1, 80, 25, 1, 527, 7]\n[2274, 4]\n[2274, 4, 1095]\n[2274, 4, 1095, 25]\n[2274, 4, 1095, 25, 2]\n[2274, 4, 1095, 25, 2, 63]\n[2274, 4, 1095, 25, 2, 63, 7]\n[85, 118]\n[85, 118, 61]\n[85, 118, 61, 2]\n[85, 118, 61, 2, 387]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 106]\n[252, 25, 2, 174, 106, 13]\n[252, 25, 2, 174, 106, 13, 136]\n[252, 25, 2, 174, 106, 13, 136, 148]\n[252, 25, 2, 174, 106, 13, 136, 148, 86]\n[252, 25, 2, 174, 106, 13, 136, 148, 86, 40]\n[252, 25, 2, 174, 106, 13, 136, 148, 86, 40, 11]\n[49, 1]\n[49, 1, 34]\n[49, 1, 34, 14]\n[49, 1, 34, 14, 93]\n[49, 1, 34, 14, 93, 2276]\n[798, 14]\n[798, 14, 174]\n[798, 14, 174, 25]\n[798, 14, 174, 25, 20]\n[798, 14, 174, 25, 20, 17]\n[5, 23]\n[5, 23, 3]\n[5, 23, 3, 19]\n[5, 23, 3, 19, 242]\n[5, 23, 3, 19, 242, 274]\n[5, 23, 3, 19, 242, 274, 69]\n[5, 23, 3, 19, 242, 274, 69, 6]\n[5, 23, 3, 19, 242, 274, 69, 6, 46]\n[5, 23, 3, 19, 242, 274, 69, 6, 46, 9]\n[5, 23, 3, 19, 242, 274, 69, 6, 46, 9, 106]\n[5, 23, 3, 19, 242, 274, 69, 6, 46, 9, 106, 85]\n[5, 23, 3, 19, 242, 274, 69, 6, 46, 9, 106, 85, 118]\n[5, 23, 3, 19, 242, 274, 69, 6, 46, 9, 106, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 387]\n[252, 25]\n[252, 25, 2]\n[2, 584]\n[2, 584, 730]\n[2, 674]\n[2, 674, 730]\n[2, 674, 730, 174]\n[2, 674, 730, 174, 106]\n[2, 674, 730, 174, 106, 13]\n[2, 674, 730, 174, 106, 13, 136]\n[2, 674, 730, 174, 106, 13, 136, 148]\n[2, 674, 730, 174, 106, 13, 136, 148, 5]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277, 707]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277, 707, 2278]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277, 707, 2278, 6]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277, 707, 2278, 6, 2279]\n[2, 674, 730, 174, 106, 13, 136, 148, 5, 902, 2277, 707, 2278, 6, 2279, 54]\n[28, 84]\n[28, 84, 816]\n[28, 84, 816, 89]\n[28, 84, 816, 89, 1]\n[28, 84, 816, 89, 1, 227]\n[28, 84, 816, 89, 1, 227, 22]\n[28, 84, 816, 89, 1, 227, 22, 2]\n[28, 84, 816, 89, 1, 227, 22, 2, 77]\n[28, 84, 816, 89, 1, 227, 22, 2, 77, 700]\n[28, 84, 816, 89, 1, 227, 22, 2, 77, 700, 7]\n[9, 2280]\n[9, 2280, 2]\n[9, 2280, 2, 381]\n[9, 2280, 2, 381, 7]\n[9, 2280, 2, 381, 7, 2281]\n[9, 2280, 2, 381, 7, 2281, 6]\n[9, 2280, 2, 381, 7, 2281, 6, 141]\n[9, 2280, 2, 381, 7, 2281, 6, 141, 85]\n[584, 2283]\n[584, 2283, 718]\n[584, 2283, 718, 4]\n[584, 2283, 718, 4, 2284]\n[584, 2283, 718, 4, 2284, 499]\n[584, 2283, 718, 4, 2284, 499, 7]\n[584, 2283, 718, 4, 2284, 499, 7, 713]\n[584, 2283, 718, 4, 2284, 499, 7, 713, 85]\n[584, 2283, 718, 4, 2284, 499, 7, 713, 85, 118]\n[584, 2283, 718, 4, 2284, 499, 7, 713, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 387]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 252]\n[252, 25, 2, 174, 252, 25]\n[252, 25, 2, 174, 252, 25, 2]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 106]\n[252, 25, 2, 174, 106, 13]\n[252, 25, 2, 174, 106, 13, 136]\n[252, 25, 2, 174, 106, 13, 136, 148]\n[252, 25, 2, 174, 106, 13, 136, 148, 84]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285, 1]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285, 1, 315]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285, 1, 315, 28]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285, 1, 315, 28, 416]\n[252, 25, 2, 174, 106, 13, 136, 148, 84, 2285, 1, 315, 28, 416, 4]\n[1, 2286]\n[1, 2286, 84]\n[1, 2286, 84, 2287]\n[1, 2286, 84, 2287, 1]\n[1, 2286, 84, 2287, 1, 640]\n[1, 2286, 84, 2287, 1, 640, 107]\n[1, 2286, 84, 2287, 1, 640, 107, 6]\n[136, 174]\n[136, 174, 711]\n[136, 174, 711, 1]\n[136, 174, 711, 1, 315]\n[136, 174, 711, 1, 315, 25]\n[136, 174, 711, 1, 315, 25, 583]\n[136, 174, 711, 1, 315, 25, 583, 2]\n[136, 174, 711, 1, 315, 25, 583, 2, 387]\n[138, 84]\n[138, 84, 2288]\n[138, 84, 2288, 128]\n[138, 84, 2288, 128, 2289]\n[138, 84, 2288, 128, 2289, 57]\n[138, 84, 2288, 128, 2289, 57, 4]\n[138, 84, 2288, 128, 2289, 57, 4, 909]\n[138, 84, 2288, 128, 2289, 57, 4, 909, 85]\n[138, 84, 2288, 128, 2289, 57, 4, 909, 85, 118]\n[138, 84, 2288, 128, 2289, 57, 4, 909, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 587]\n[20, 59]\n[20, 59, 460]\n[20, 59, 460, 444]\n[20, 59, 460, 444, 140]\n[20, 59, 460, 444, 140, 28]\n[85, 70]\n[85, 70, 8]\n[85, 70, 8, 242]\n[85, 70, 8, 242, 3]\n[85, 70, 8, 242, 3, 30]\n[85, 70, 8, 242, 3, 30, 93]\n[85, 70, 8, 242, 3, 30, 93, 774]\n[85, 70, 8, 242, 3, 30, 93, 774, 22]\n[85, 70, 8, 242, 3, 30, 93, 774, 22, 529]\n[85, 70, 8, 242, 3, 30, 93, 774, 22, 529, 210]\n[85, 70, 8, 242, 3, 30, 93, 774, 22, 529, 210, 529]\n[195, 833]\n[195, 833, 22]\n[195, 833, 22, 1]\n[195, 833, 22, 1, 529]\n[333, 4]\n[333, 4, 834]\n[333, 4, 834, 148]\n[333, 4, 834, 148, 13]\n[833, 22]\n[833, 22, 1]\n[833, 22, 1, 529]\n[833, 22, 1, 529, 210]\n[833, 22, 1, 529, 210, 60]\n[833, 22, 1, 529, 210, 60, 1181]\n[833, 22, 1, 529, 210, 60, 1181, 11]\n[833, 22, 1, 529, 210, 60, 1181, 11, 89]\n[833, 22, 1, 529, 210, 60, 1181, 11, 89, 1]\n[85, 118]\n[85, 118, 714]\n[85, 118, 714, 106]\n[85, 118, 714, 106, 11]\n[1181, 11]\n[1181, 11, 89]\n[1181, 11, 89, 1]\n[85, 118]\n[85, 118, 714]\n[85, 118, 714, 106]\n[85, 118, 714, 106, 11]\n[85, 118, 714, 106, 11, 55]\n[85, 118, 714, 106, 11, 55, 333]\n[85, 118, 714, 106, 11, 55, 333, 4]\n[85, 118, 714, 106, 11, 55, 333, 4, 834]\n[85, 118, 714, 106, 11, 55, 333, 4, 834, 148]\n[85, 118, 714, 106, 11, 55, 333, 4, 834, 148, 13]\n[833, 22]\n[833, 22, 1]\n[833, 22, 1, 529]\n[833, 22, 1, 529, 210]\n[833, 22, 1, 529, 210, 85]\n[833, 22, 1, 529, 210, 85, 118]\n[833, 22, 1, 529, 210, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 387]\n[85, 118]\n[85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 587]\n[85, 118, 15, 2, 587, 80]\n[85, 118, 15, 2, 587, 80, 835]\n[85, 118, 15, 2, 587, 80, 835, 836]\n[85, 118]\n[835, 836]\n[60, 170]\n[60, 170, 84]\n[28, 490]\n[2, 2291]\n[2, 835]\n[2, 835, 836]\n[2, 835, 836, 1156]\n[2, 835, 836, 1156, 80]\n[2, 835, 836, 1156, 80, 86]\n[2, 835, 836, 1156, 80, 86, 2]\n[2, 835, 836, 1156, 80, 86, 2, 40]\n[2, 835, 836, 1156, 80, 86, 2, 40, 11]\n[49, 1]\n[49, 1, 34]\n[49, 1, 34, 14]\n[49, 1, 34, 14, 93]\n[49, 1, 34, 14, 93, 2293]\n[798, 14]\n[798, 14, 174]\n[798, 14, 174, 1182]\n[798, 14, 174, 1182, 14]\n[40, 11]\n[40, 11, 193]\n[40, 11, 193, 9]\n[40, 11, 193, 9, 138]\n[40, 11, 193, 9, 138, 90]\n[40, 11, 193, 9, 138, 90, 35]\n[40, 11, 193, 9, 138, 90, 35, 85]\n[40, 11, 193, 9, 138, 90, 35, 85, 118]\n[40, 11, 193, 9, 138, 90, 35, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 387]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 252]\n[252, 25, 2, 174, 252, 25]\n[252, 25, 2, 174, 252, 25, 2]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 106]\n[252, 25, 2, 174, 106, 13]\n[252, 25, 2, 174, 106, 13, 136]\n[252, 25, 2, 174, 106, 13, 136, 148]\n[252, 25, 2, 174, 106, 13, 136, 148, 85]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4, 1]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4, 1, 2294]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4, 1, 2294, 276]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4, 1, 2294, 276, 1]\n[252, 25, 2, 174, 106, 13, 136, 148, 85, 4, 1, 2294, 276, 1, 1183]\n[2295, 214]\n[2295, 214, 2296]\n[2295, 214, 2296, 167]\n[1068, 4]\n[1068, 4, 2298]\n[1068, 4, 2298, 61]\n[60, 510]\n[60, 510, 14]\n[60, 510, 14, 1184]\n[60, 510, 14, 1184, 61]\n[60, 510, 14, 1184, 61, 323]\n[60, 510, 14, 1184, 61, 323, 2299]\n[60, 510, 14, 1184, 61, 323, 2299, 85]\n[60, 510, 14, 1184, 61, 323, 2299, 85, 118]\n[60, 510, 14, 1184, 61, 323, 2299, 85, 118, 85]\n[85, 118]\n[85, 118, 15]\n[85, 118, 15, 2]\n[85, 118, 15, 2, 587]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 252]\n[252, 25, 2, 174, 252, 25]\n[252, 25, 2, 174, 252, 25, 2]\n[252, 25]\n[252, 25, 2]\n[252, 25, 2, 174]\n[252, 25, 2, 174, 106]\n[252, 25, 2, 174, 106, 13]\n[252, 25, 2, 174, 106, 13, 136]\n[252, 25, 2, 174, 106, 13, 136, 148]\n[252, 25, 2, 174, 106, 13, 136, 148, 27]\n[252, 25, 2, 174, 106, 13, 136, 148, 27, 1025]\n[252, 25, 2, 174, 106, 13, 136, 148, 27, 1025, 102]\n[27, 1183]\n[27, 1183, 102]\n[27, 158]\n[27, 158, 102]\n[27, 229]\n[27, 229, 102]\n[27, 229, 102, 108]\n[27, 229, 102, 108, 6]\n[27, 229, 102, 108, 6, 53]\n[27, 229, 102, 108, 6, 53, 39]\n[27, 229, 102, 108, 6, 53, 39, 3]\n[27, 229, 102, 108, 6, 53, 39, 3, 392]\n[27, 229, 102, 108, 6, 53, 39, 3, 392, 3]\n[27, 229, 102, 108, 6, 53, 39, 3, 392, 3, 19]\n[1, 1184]\n[1, 1184, 15]\n[1, 1184, 15, 708]\n[21, 141]\n[21, 141, 11]\n[21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[289, 328, 21, 32, 85]\n[289, 328, 21, 32, 85, 27]\n[289, 328, 21, 32, 85, 27, 228]\n[289, 328, 21, 32, 85, 27, 228, 64]\n[289, 328, 21, 32, 85, 27, 228, 64, 3]\n[27, 228]\n[27, 228, 56]\n[27, 228, 56, 49]\n[373, 19]\n[373, 19, 183]\n[65, 64]\n[65, 64, 3]\n[65, 64, 3, 19]\n[65, 64, 3, 19, 504]\n[65, 64, 3, 19, 504, 21]\n[65, 64, 3, 19, 504, 21, 141]\n[65, 64, 3, 19, 504, 21, 141, 11]\n[65, 64, 3, 19, 504, 21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[21, 141]\n[21, 141, 11]\n[21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[289, 328, 21, 32, 85]\n[289, 328, 21, 32, 85, 27]\n[289, 328, 21, 32, 85, 27, 228]\n[289, 328, 21, 32, 85, 27, 228, 64]\n[289, 328, 21, 32, 85, 27, 228, 64, 3]\n[27, 228]\n[27, 228, 56]\n[27, 228, 56, 49]\n[373, 19]\n[373, 19, 183]\n[65, 64]\n[65, 64, 3]\n[65, 64, 3, 19]\n[65, 64, 3, 19, 504]\n[65, 64, 3, 19, 504, 21]\n[65, 64, 3, 19, 504, 21, 141]\n[65, 64, 3, 19, 504, 21, 141, 11]\n[65, 64, 3, 19, 504, 21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[21, 141]\n[21, 141, 11]\n[21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[21, 141]\n[21, 141, 11]\n[21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[21, 141]\n[21, 141, 11]\n[21, 141, 11, 397]\n[289, 328]\n[289, 328, 21]\n[289, 328, 21, 32]\n[289, 328, 21, 32, 85]\n[289, 328, 21, 32, 85, 46]\n[289, 328, 21, 32, 85, 46, 3]\n[289, 328, 21, 32, 85, 46, 3, 163]\n[289, 328, 21, 32, 85, 46, 3, 163, 253]\n[289, 328, 21, 32, 85, 46, 3, 163, 253, 2]\n[40, 11]\n[40, 11, 154]\n[40, 11, 154, 53]\n[40, 11, 154, 53, 4]\n[5, 42]\n[5, 42, 51]\n[5, 42, 51, 3]\n[40, 72]\n[40, 72, 1]\n[12, 32]\n[12, 32, 90]\n[12, 32, 90, 607]\n[12, 32, 90, 607, 189]\n[12, 32, 90, 607, 189, 18]\n[12, 32, 90, 607, 189, 18, 462]\n[12, 32, 90, 607, 189, 18, 462, 6]\n[12, 32, 90, 607, 189, 18, 462, 6, 10]\n[12, 32, 90, 607, 189, 18, 462, 6, 10, 374]\n[4, 45]\n[4, 45, 93]\n[5, 112]\n[5, 112, 3]\n[5, 112, 3, 128]\n[5, 112, 3, 128, 186]\n[5, 112, 3, 128, 186, 8]\n[46, 3]\n[46, 3, 163]\n[46, 3, 163, 253]\n[46, 3, 163, 253, 2]\n[9, 378]\n[9, 378, 48]\n[9, 378, 48, 6]\n[9, 378, 48, 6, 10]\n[9, 378, 48, 6, 10, 2]\n[271, 837]\n[271, 837, 46]\n[271, 837, 46, 3]\n[271, 837, 46, 3, 163]\n[271, 837, 46, 3, 163, 253]\n[271, 837, 46, 3, 163, 253, 2]\n[98, 1077]\n[98, 1077, 59]\n[98, 1077, 59, 2304]\n[98, 1077, 59, 2304, 96]\n[98, 1077, 59, 2304, 96, 1]\n[5, 91]\n[5, 91, 133]\n[5, 91, 133, 805]\n[5, 91, 133, 805, 15]\n[76, 2307]\n[76, 2307, 1090]\n[76, 2307, 1090, 6]\n[2308, 11]\n[2308, 11, 1]\n[2308, 11, 1, 1108]\n[2308, 11, 1, 1108, 9]\n[2308, 11, 1, 1108, 9, 751]\n[2308, 11, 1, 1108, 9, 751, 2]\n[2308, 11, 1, 1108, 9, 751, 2, 77]\n[20, 288]\n[20, 288, 838]\n[20, 288, 838, 1185]\n[20, 288, 838, 1185, 288]\n[20, 288, 838, 1185, 288, 838]\n[21, 812]\n[21, 812, 1]\n[2310, 115]\n[2310, 115, 46]\n[2310, 115, 46, 3]\n[2310, 115, 46, 3, 163]\n[2310, 115, 46, 3, 163, 253]\n[2310, 115, 46, 3, 163, 253, 2]\n[46, 3]\n[46, 3, 163]\n[46, 3, 163, 253]\n[46, 3, 163, 253, 2]\n[9, 378]\n[9, 378, 48]\n[9, 378, 48, 6]\n[9, 378, 48, 6, 10]\n[9, 378, 48, 6, 10, 2]\n[52, 48]\n[52, 48, 6]\n[52, 48, 6, 10]\n[52, 48, 6, 10, 2]\n[271, 837]\n[376, 5]\n[376, 5, 23]\n[376, 5, 23, 49]\n[376, 5, 23, 49, 13]\n[376, 5, 23, 49, 13, 36]\n[376, 5, 23, 49, 13, 36, 5]\n[376, 5, 23, 49, 13, 36, 5, 23]\n[376, 5, 23, 49, 13, 36, 5, 23, 49]\n[376, 5, 23, 49, 13, 36, 5, 23, 49, 13]\n[152, 38]\n[152, 38, 1012]\n[152, 38, 1012, 56]\n[152, 38, 1012, 56, 90]\n[50, 81]\n[50, 81, 48]\n[50, 81, 48, 2312]\n[50, 81, 48, 2312, 4]\n[50, 81, 48, 2312, 4, 37]\n[50, 81, 48, 2312, 4, 37, 1186]\n[50, 81, 48, 2312, 4, 37, 1186, 6]\n[50, 81, 48, 2312, 4, 37, 1186, 6, 1186]\n[37, 106]\n[37, 106, 72]\n[37, 106, 72, 66]\n[37, 106, 72, 66, 22]\n[37, 106, 72, 66, 22, 3]\n[37, 106, 72, 66, 22, 3, 66]\n[37, 106, 72, 66, 22, 3, 66, 22]\n[21, 70]\n[21, 70, 8]\n[21, 70, 8, 13]\n[21, 70, 8, 13, 21]\n[21, 70, 8, 13, 21, 70]\n[21, 70, 8, 13, 21, 70, 8]\n[21, 70, 8, 13, 21, 70, 8, 13]\n[21, 70, 8, 13, 21, 70, 8, 13, 18]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175, 207]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175, 207, 18]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175, 207, 18, 212]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175, 207, 18, 212, 48]\n[21, 70, 8, 13, 21, 70, 8, 13, 18, 212, 48, 175, 207, 18, 212, 48, 175]\n[12, 21]\n[12, 21, 3]\n[12, 21, 3, 4]\n[30, 38]\n[30, 38, 18]\n[30, 38, 18, 160]\n[30, 38, 18, 160, 46]\n[30, 38, 18, 160, 46, 30]\n[30, 38, 18, 160, 46, 30, 38]\n[30, 38, 18, 160, 46, 30, 38, 18]\n[30, 38, 18, 160, 46, 30, 38, 18, 160]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46, 46]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46, 46, 3]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46, 46, 3, 163]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46, 46, 3, 163, 253]\n[30, 38, 18, 160, 46, 30, 38, 18, 160, 46, 46, 3, 163, 253, 2]\n[46, 3]\n[46, 3, 163]\n[46, 3, 163, 253]\n[46, 3, 163, 253, 2]\n[9, 378]\n[9, 378, 48]\n[9, 378, 48, 6]\n[9, 378, 48, 6, 10]\n[9, 378, 48, 6, 10, 2]\n[271, 837]\n[271, 837, 133]\n[271, 837, 133, 79]\n[271, 837, 133, 79, 16]\n[271, 837, 133, 79, 16, 569]\n[271, 837, 133, 79, 16, 569, 44]\n[133, 79]\n[133, 79, 116]\n[133, 79, 116, 1164]\n[4, 189]\n[4, 189, 6]\n[4, 189, 6, 136]\n[4, 189, 6, 136, 1124]\n[4, 189, 6, 136, 1124, 116]\n[6, 10]\n[6, 10, 257]\n[6, 10, 257, 308]\n[6, 10, 257, 308, 5]\n[6, 10, 257, 308, 5, 23]\n[6, 10, 257, 308, 5, 23, 133]\n[6, 10, 257, 308, 5, 23, 133, 79]\n[6, 10, 257, 308, 5, 23, 133, 79, 29]\n[6, 10, 257, 308, 5, 23, 133, 79, 29, 2313]\n[6, 10, 257, 308, 5, 23, 133, 79, 29, 2313, 15]\n[116, 94]\n[116, 94, 2]\n[116, 94, 2, 41]\n[4, 1]\n[4, 1, 2315]\n[4, 1, 2315, 44]\n[4, 2316]\n[4, 2316, 137]\n[4, 2316, 137, 44]\n[563, 29]\n[563, 29, 16]\n[563, 29, 16, 263]\n[563, 29, 16, 263, 40]\n[563, 29, 16, 263, 40, 83]\n[563, 29, 16, 263, 40, 83, 43]\n[563, 29, 16, 263, 40, 83, 43, 43]\n[563, 29, 16, 263, 40, 83, 43, 43, 255]\n[563, 29, 16, 263, 40, 83, 43, 43, 255, 43]\n[563, 29, 16, 263, 40, 83, 43, 43, 255, 43, 43]\n[60, 16]\n[60, 16, 77]\n[60, 16, 77, 267]\n[75, 2318]\n[75, 2318, 1]\n[75, 2318, 1, 2319]\n[75, 2318, 1, 2319, 22]\n[75, 2318, 1, 2319, 22, 3]\n[75, 2318, 1, 2319, 22, 3, 43]\n[75, 2318, 1, 2319, 22, 3, 43, 43]\n[75, 2318, 1, 2319, 22, 3, 43, 43, 255]\n[75, 2318, 1, 2319, 22, 3, 43, 43, 255, 43]\n[75, 2318, 1, 2319, 22, 3, 43, 43, 255, 43, 43]\n[77, 777]\n[77, 777, 2320]\n[66, 791]\n[66, 791, 2]\n[66, 791, 2, 2322]\n[66, 791, 2, 2322, 955]\n[66, 791, 2, 2322, 955, 22]\n[66, 791, 2, 2322, 955, 22, 3]\n[66, 791, 2, 2322, 955, 22, 3, 43]\n[66, 791, 2, 2322, 955, 22, 3, 43, 43]\n[66, 791, 2, 2322, 955, 22, 3, 43, 43, 255]\n[66, 791, 2, 2322, 955, 22, 3, 43, 43, 255, 43]\n[66, 791, 2, 2322, 955, 22, 3, 43, 43, 255, 43, 43]\n[77, 676]\n[2323, 57]\n[2323, 57, 17]\n[2323, 57, 17, 624]\n[2323, 57, 17, 624, 1080]\n[2323, 57, 17, 624, 1080, 17]\n[2323, 57, 17, 624, 1080, 17, 155]\n[2323, 57, 17, 624, 1080, 17, 155, 43]\n[2323, 57, 17, 624, 1080, 17, 155, 43, 43]\n[2323, 57, 17, 624, 1080, 17, 155, 43, 43, 255]\n[2323, 57, 17, 624, 1080, 17, 155, 43, 43, 255, 43]\n[2323, 57, 17, 624, 1080, 17, 155, 43, 43, 255, 43, 43]\n[4, 244]\n[4, 244, 41]\n[4, 244, 41, 10]\n[4, 244, 41, 10, 17]\n[43, 43]\n[43, 43, 255]\n[43, 43, 255, 43]\n[43, 43, 255, 43, 43]\n[43, 43, 255, 43, 43, 255]\n[43, 43, 255, 43, 43, 255, 43]\n[43, 43, 255, 43, 43, 255, 43, 43]\n[43, 43, 255, 43, 43, 255, 43, 43, 255]\n[43, 43, 255, 43, 43, 255, 43, 43, 255, 256]\n[30, 2]\n[30, 2, 199]\n[162, 27]\n[162, 27, 1188]\n[162, 27, 1188, 1189]\n[162, 27, 1188, 1189, 9]\n[162, 27, 1188, 1189, 9, 415]\n[162, 27, 1188, 1189, 9, 415, 27]\n[22, 1]\n[22, 1, 217]\n[22, 1, 217, 7]\n[22, 1, 217, 7, 17]\n[12, 59]\n[12, 59, 839]\n[12, 59, 839, 179]\n[256, 285]\n[256, 285, 109]\n[256, 285, 109, 29]\n[256, 285, 109, 29, 84]\n[256, 285, 109, 29, 84, 61]\n[256, 285, 109, 29, 84, 61, 2]\n[256, 285, 109, 29, 84, 61, 2, 353]\n[29, 5]\n[29, 5, 61]\n[29, 5, 61, 2]\n[29, 5, 61, 2, 353]\n[29, 5, 61, 2, 353, 2326]\n[84, 503]\n[84, 503, 136]\n[84, 503, 136, 2329]\n[84, 503, 136, 2329, 2330]\n[84, 503, 136, 2329, 2330, 2]\n[84, 503, 136, 2329, 2330, 2, 549]\n[84, 139]\n[84, 139, 280]\n[84, 139, 280, 1]\n[84, 139, 280, 1, 2332]\n[84, 139, 280, 1, 2332, 104]\n[84, 139, 280, 1, 2332, 104, 95]\n[37, 2]\n[37, 2, 2333]\n[37, 2, 2333, 198]\n[37, 2, 2333, 198, 292]\n[37, 2, 2333, 198, 292, 5]\n[37, 2, 2333, 198, 292, 5, 332]\n[37, 2, 2333, 198, 292, 5, 332, 2334]\n[4, 9]\n[4, 9, 2335]\n[4, 9, 2335, 14]\n[4, 9, 2335, 14, 16]\n[4, 9, 2335, 14, 16, 176]\n[4, 9, 2335, 14, 16, 176, 42]\n[4, 9, 2335, 14, 16, 176, 42, 2336]\n[4, 60]\n[4, 60, 1]\n[84, 61]\n[190, 7]\n[190, 7, 2339]\n[190, 7, 2339, 16]\n[60, 209]\n[60, 209, 13]\n[60, 209, 13, 2]\n[4, 5]\n[4, 5, 35]\n[69, 171]\n[69, 171, 3]\n[95, 55]\n[95, 55, 14]\n[2341, 101]\n[2341, 101, 13]\n[2341, 101, 13, 2342]\n[2341, 101, 13, 2342, 7]\n[2341, 101, 13, 2342, 7, 1]\n[60, 2344]\n[60, 2344, 256]\n[30, 2]\n[30, 2, 199]\n[162, 27]\n[162, 27, 1188]\n[9, 415]\n[9, 415, 27]\n[22, 1]\n[22, 1, 217]\n[22, 1, 217, 7]\n[22, 1, 217, 7, 17]\n[300, 181]\n[300, 181, 9]\n[12, 59]\n[12, 59, 839]\n[12, 59, 839, 179]\n[256, 285]\n[256, 285, 256]\n[256, 9]\n[256, 9, 415]\n[256, 9, 415, 27]\n[22, 1]\n[22, 1, 217]\n[22, 1, 217, 7]\n[22, 1, 217, 7, 17]\n[12, 59]\n[12, 59, 839]\n[12, 59, 839, 179]\n[256, 285]\n[256, 285, 65]\n[256, 285, 65, 1]\n[256, 285, 65, 1, 79]\n[256, 285, 65, 1, 79, 18]\n[256, 285, 65, 1, 79, 18, 1170]\n[256, 285, 65, 1, 79, 18, 1170, 11]\n[256, 285, 65, 1, 79, 18, 1170, 11, 1]\n[4, 1176]\n[4, 1176, 1106]\n[4, 1176, 1106, 394]\n[4, 1176, 1106, 394, 1]\n[99, 144]\n[99, 144, 6]\n[99, 144, 6, 51]\n[99, 144, 6, 51, 247]\n[99, 144, 6, 51, 247, 19]\n[99, 144, 6, 51, 247, 19, 88]\n[99, 144, 6, 51, 247, 19, 88, 10]\n[144, 6]\n[144, 6, 46]\n[144, 6, 46, 247]\n[144, 6, 46, 247, 19]\n[144, 6, 46, 247, 19, 88]\n[144, 6, 46, 247, 19, 88, 10]\n[144, 6, 46, 247, 19, 88, 10, 487]\n[144, 6, 46, 247, 19, 88, 10, 487, 99]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108, 102]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108, 102, 180]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108, 102, 180, 6]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108, 102, 180, 6, 135]\n[144, 6, 46, 247, 19, 88, 10, 487, 99, 108, 102, 180, 6, 135, 13]\n[144, 6]\n[144, 6, 94]\n[144, 6, 94, 247]\n[144, 6, 94, 247, 19]\n[144, 6, 94, 247, 19, 88]\n[144, 6, 94, 247, 19, 88, 10]\n[26, 1]\n[26, 1, 151]\n[26, 1, 151, 1190]\n[87, 1]\n[87, 1, 2346]\n[822, 518]\n[822, 518, 4]\n[822, 518, 4, 47]\n[822, 518, 4, 47, 11]\n[822, 518, 4, 47, 11, 1]\n[822, 518, 4, 47, 11, 1, 1079]\n[822, 518, 4, 47, 11, 1, 1079, 398]\n[822, 518, 4, 47, 11, 1, 1079, 398, 12]\n[822, 518, 4, 47, 11, 1, 1079, 398, 12, 1]\n[822, 518, 4, 47, 11, 1, 1079, 398, 12, 1, 360]\n[822, 518, 4, 47, 11, 1, 1079, 398, 12, 1, 360, 7]\n[4, 9]\n[4, 9, 841]\n[4, 9, 841, 105]\n[87, 1191]\n[87, 1191, 4]\n[87, 686]\n[87, 686, 4]\n[87, 686, 4, 41]\n[87, 686, 4, 41, 512]\n[87, 686, 4, 41, 512, 18]\n[87, 686, 4, 41, 512, 18, 94]\n[87, 686, 4, 41, 512, 18, 94, 59]\n[11, 1]\n[11, 1, 419]\n[13, 1]\n[1, 360]\n[1, 360, 7]\n[1, 360, 7, 100]\n[1, 360, 7, 100, 12]\n[1, 360, 7, 100, 12, 1]\n[1, 360, 7, 100, 12, 1, 360]\n[1, 360, 7, 100, 12, 1, 360, 7]\n[4, 9]\n[4, 9, 841]\n[4, 9, 841, 105]\n[87, 1191]\n[87, 1191, 4]\n[87, 686]\n[87, 686, 4]\n[87, 686, 4, 41]\n[87, 686, 4, 41, 512]\n[87, 686, 4, 41, 512, 18]\n[87, 686, 4, 41, 512, 18, 94]\n[87, 686, 4, 41, 512, 18, 94, 59]\n[11, 1]\n[11, 1, 419]\n[13, 1]\n[1, 360]\n[1, 360, 7]\n[1, 360, 7, 100]\n[28, 31]\n[28, 31, 15]\n[28, 31]\n[28, 31, 15]\n[28, 31]\n[28, 31, 15]\n[28, 31, 15, 30]\n[28, 31, 15, 30, 293]\n[28, 31, 15, 30, 293, 100]\n[37, 20]\n[37, 20, 2348]\n[4, 45]\n[4, 45, 5]\n[4, 45]\n[4, 45, 5]\n[1, 2349]\n[1, 2349, 6]\n[1, 2349, 6, 20]\n[1, 2349, 6, 20, 943]\n[1, 2349, 6, 20, 943, 15]\n[16, 63]\n[16, 63, 123]\n[16, 63, 123, 624]\n[4, 5]\n[4, 5, 19]\n[75, 2350]\n[75, 2350, 95]\n[75, 2350, 95, 267]\n[75, 2350, 95, 267, 13]\n[75, 2350, 95, 267, 13, 1]\n[28, 31]\n[28, 31, 15]\n[28, 31, 15, 1]\n[14, 76]\n[14, 76, 140]\n[14, 76, 140, 477]\n[28, 31]\n[28, 31, 15]\n[28, 31, 15, 41]\n[28, 31, 15, 41, 60]\n[28, 31, 15, 41, 60, 31]\n[28, 31, 15, 41, 60, 31, 15]\n[28, 31, 15, 41, 60, 31, 15, 1]\n[28, 31, 15, 41, 60, 31, 15, 1, 202]\n[28, 31, 15, 41, 60, 31, 15, 1, 202, 12]\n[28, 31, 15, 41, 60, 31, 15, 1, 202, 12, 2]\n[28, 31, 15, 41, 60, 31, 15, 1, 202, 12, 2, 206]\n[4, 18]\n[4, 18, 314]\n[4, 18, 314, 9]\n[4, 18, 314, 9, 456]\n[67, 54]\n[67, 54, 1]\n[67, 54, 1, 1193]\n[67, 54, 1, 1193, 50]\n[67, 54, 1, 1193, 50, 48]\n[67, 54, 1, 1193, 50, 48, 794]\n[67, 54, 1, 1193, 50, 48, 794, 13]\n[67, 54, 1, 1193, 50, 48, 794, 13, 226]\n[11, 31]\n[11, 31, 329]\n[11, 31, 329, 456]\n[11, 31, 329, 456, 457]\n[11, 31, 329, 456, 457, 220]\n[11, 31, 329, 456, 457, 220, 115]\n[11, 31, 329, 456, 457, 220, 115, 220]\n[11, 31, 329, 456, 457, 220, 115, 220, 25]\n[11, 31, 329, 456, 457, 220, 115, 220, 25, 17]\n[11, 31, 329, 456, 457, 220, 115, 220, 25, 17, 131]\n[74, 94]\n[74, 94, 1194]\n[1, 202]\n[1, 202, 44]\n[1, 202, 44, 647]\n[1, 202, 44, 647, 178]\n[1, 202, 44, 647, 178, 372]\n[29, 1]\n[29, 1, 34]\n[29, 1, 34, 3]\n[29, 1, 34, 3, 41]\n[29, 1, 34, 3, 41, 15]\n[29, 1, 34, 3, 41, 15, 513]\n[29, 1, 34, 3, 41, 15, 513, 22]\n[29, 1, 34, 3, 41, 15, 513, 22, 31]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1, 1195]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1, 1195, 38]\n[11, 31]\n[11, 31, 329]\n[11, 31, 329, 456]\n[11, 31, 329, 456, 457]\n[11, 31, 329, 456, 457, 31]\n[11, 31, 329, 456, 457, 31, 15]\n[11, 31, 329, 456, 457, 31, 15, 1]\n[11, 31, 329, 456, 457, 31, 15, 1, 202]\n[11, 31, 329, 456, 457, 31, 15, 1, 202, 12]\n[11, 31, 329, 456, 457, 31, 15, 1, 202, 12, 2]\n[11, 31, 329, 456, 457, 31, 15, 1, 202, 12, 2, 206]\n[4, 18]\n[4, 18, 314]\n[4, 18, 314, 9]\n[4, 18, 314, 9, 456]\n[67, 54]\n[67, 54, 1]\n[67, 54, 1, 1193]\n[67, 54, 1, 1193, 50]\n[67, 54, 1, 1193, 50, 48]\n[67, 54, 1, 1193, 50, 48, 794]\n[67, 54, 1, 1193, 50, 48, 794, 13]\n[67, 54, 1, 1193, 50, 48, 794, 13, 226]\n[11, 31]\n[11, 31, 329]\n[11, 31, 329, 456]\n[11, 31, 329, 456, 457]\n[11, 31, 329, 456, 457, 220]\n[11, 31, 329, 456, 457, 220, 115]\n[11, 31, 329, 456, 457, 220, 115, 220]\n[11, 31, 329, 456, 457, 220, 115, 220, 25]\n[11, 31, 329, 456, 457, 220, 115, 220, 25, 17]\n[11, 31, 329, 456, 457, 220, 115, 220, 25, 17, 131]\n[74, 94]\n[74, 94, 1194]\n[1, 202]\n[1, 202, 44]\n[1, 202, 44, 647]\n[1, 202, 44, 647, 178]\n[1, 202, 44, 647, 178, 372]\n[29, 1]\n[29, 1, 34]\n[29, 1, 34, 3]\n[29, 1, 34, 3, 41]\n[29, 1, 34, 3, 41, 15]\n[29, 1, 34, 3, 41, 15, 513]\n[29, 1, 34, 3, 41, 15, 513, 22]\n[29, 1, 34, 3, 41, 15, 513, 22, 31]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1, 1195]\n[29, 1, 34, 3, 41, 15, 513, 22, 31, 15, 1, 202, 4, 1, 1195, 38]\n[11, 31]\n[11, 31, 329]\n[11, 31, 329, 456]\n[11, 31, 329, 456, 457]\n[11, 31, 329, 456, 457, 67]\n[11, 31, 329, 456, 457, 67, 54]\n[5, 44]\n[5, 44, 42]\n[5, 44, 42, 693]\n[5, 44, 42, 693, 22]\n[5, 44, 42, 693, 22, 2]\n[5, 44, 42, 693, 22, 2, 318]\n[5, 44, 42, 693, 22, 2, 318, 929]\n[5, 44, 42, 693, 22, 2, 318, 929, 98]\n[5, 44, 42, 693, 22, 2, 318, 929, 98, 2]\n[5, 44, 42, 693, 22, 2, 318, 929, 98, 2, 318]\n[19, 9]\n[37, 101]\n[37, 101, 258]\n[37, 101, 258, 6]\n[37, 101, 258, 6, 182]\n[37, 101, 258, 6, 182, 31]\n[45, 5]\n[45, 5, 51]\n[45, 5, 51, 14]\n[45, 5, 51, 14, 39]\n[45, 5, 51, 14, 39, 5]\n[45, 5, 51, 14, 39, 5, 157]\n[45, 5, 51, 14, 39, 5, 157, 694]\n[45, 5, 51, 14, 39, 5, 157, 694, 6]\n[45, 5, 51, 14, 39, 5, 157, 694, 6, 10]\n[5, 128]\n[5, 128, 695]\n[5, 128, 695, 16]\n[5, 128, 695, 16, 2352]\n[5, 128, 695, 16, 2352, 63]\n[5, 128, 695, 16, 2352, 63, 64]\n[5, 128, 695, 16, 2352, 63, 64, 15]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5, 51]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5, 51, 564]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5, 51, 564, 930]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5, 51, 564, 930, 266]\n[5, 128, 695, 16, 2352, 63, 64, 15, 14, 82, 5, 51, 564, 930, 266, 54]\n[109, 15]\n[109, 15, 16]\n[109, 15, 16, 368]\n[109, 15, 16, 368, 294]\n[109, 15, 16, 368, 294, 5]\n[109, 15, 16, 368, 294, 5, 52]\n[324, 5]\n[324, 5, 474]\n[64, 5]\n[64, 5, 159]\n[64, 5, 159, 292]\n[64, 5, 159, 292, 76]\n[29, 44]\n[29, 44, 16]\n[29, 44, 16, 368]\n[29, 44, 16, 368, 242]\n[29, 44, 16, 368, 242, 64]\n[29, 44, 16, 368, 242, 64, 5]\n[29, 44, 16, 368, 242, 64, 5, 159]\n[69, 5]\n[69, 5, 1154]\n[69, 5, 1154, 14]\n[69, 5, 1154, 14, 2]\n[69, 5, 1154, 14, 2, 55]\n[69, 5, 1154, 14, 2, 55, 44]\n[5, 19]\n[5, 19, 179]\n[5, 19, 179, 367]\n[5, 19, 179, 367, 65]\n[5, 19, 179, 367, 65, 226]\n[11, 14]\n[11, 14, 79]\n[11, 14, 79, 75]\n[11, 14, 79, 75, 724]\n[11, 14, 79, 75, 724, 2354]\n[11, 14, 79, 75, 724, 2354, 6]\n[11, 14, 79, 75, 724, 2354, 6, 10]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367, 4]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367, 4, 6]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367, 4, 6, 71]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367, 4, 6, 71, 16]\n[11, 14, 79, 75, 724, 2354, 6, 10, 367, 4, 6, 71, 16, 717]\n[50, 120]\n[50, 120, 2]\n[50, 120, 2, 2355]\n[27, 34]\n[27, 34, 320]\n[27, 34, 320, 64]\n[27, 34, 320, 64, 5]\n[211, 36]\n[211, 36, 10]\n[211, 36, 10, 2]\n[211, 36, 10, 2, 484]\n[37, 2357]\n[37, 2357, 6]\n[211, 5]\n[211, 5, 2358]\n[211, 5, 2358, 14]\n[211, 5, 2358, 14, 5]\n[211, 5, 2358, 14, 5, 159]\n[211, 5, 2358, 14, 5, 159, 294]\n[211, 5, 2358, 14, 5, 159, 294, 594]\n[211, 5, 2358, 14, 5, 159, 294, 594, 22]\n[211, 5, 2358, 14, 5, 159, 294, 594, 22, 20]\n[29, 44]\n[29, 44, 16]\n[29, 44, 16, 368]\n[29, 44, 16, 368, 242]\n[29, 44, 16, 368, 242, 64]\n[29, 44, 16, 368, 242, 64, 5]\n[29, 44, 16, 368, 242, 64, 5, 159]\n[29, 44]\n[29, 44, 16]\n[29, 44, 16, 368]\n[29, 44, 16, 368, 242]\n[29, 44, 16, 368, 242, 64]\n[29, 44, 16, 368, 242, 64, 5]\n[29, 44, 16, 368, 242, 64, 5, 159]\n[29, 44, 16, 368, 242, 64, 5, 159, 124]\n[29, 44, 16, 368, 242, 64, 5, 159, 124, 5]\n[29, 44, 16, 368, 242, 64, 5, 159, 124, 5, 23]\n[5, 383]\n[5, 383, 25]\n[92, 147]\n[92, 147, 2]\n[5, 23]\n[5, 23, 3]\n[5, 23, 3, 14]\n[5, 23, 3, 14, 233]\n[5, 23, 3, 14, 233, 13]\n[5, 23, 3, 14, 233, 13, 17]\n[15, 28]\n[15, 28, 436]\n[15, 28, 436, 2]\n[277, 5]\n[277, 5, 23]\n[277, 5, 23, 12]\n[14, 437]\n[14, 437, 38]\n[14, 437, 38, 438]\n[14, 437, 38, 438, 30]\n[14, 437, 38, 438, 30, 50]\n[14, 437, 38, 438, 30, 50, 332]\n[14, 437, 38, 438, 30, 50, 332, 26]\n[14, 437, 38, 438, 30, 50, 332, 26, 39]\n[14, 437, 38, 438, 30, 50, 332, 26, 39, 5]\n[14, 437, 38, 438, 30, 50, 332, 26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[1, 68]\n[1, 68, 3]\n[1, 68, 3, 171]\n[1, 68, 3, 171, 92]\n[1, 68, 3, 171, 92, 147]\n[1, 68, 3, 171, 92, 147, 2]\n[1, 68, 3, 171, 92, 147, 2, 113]\n[1, 68, 3, 171, 92, 147, 2, 113, 43]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43, 439]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43, 439, 43]\n[26, 39]\n[26, 39, 5]\n[26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[74, 41, 8, 54, 92]\n[74, 41, 8, 54, 92, 1]\n[74, 41, 8, 54, 92, 1, 68]\n[74, 41, 8, 54, 92, 1, 68, 3]\n[74, 41, 8, 54, 92, 1, 68, 3, 171]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2]\n[5, 23]\n[5, 383]\n[5, 383, 25]\n[92, 147]\n[92, 147, 2]\n[92, 147, 2, 113]\n[92, 147, 2, 113, 5]\n[92, 147, 2, 113, 5, 23]\n[14, 233]\n[14, 233, 13]\n[14, 233, 13, 17]\n[15, 28]\n[15, 28, 436]\n[15, 28, 436, 2]\n[277, 5]\n[277, 5, 23]\n[277, 5, 23, 12]\n[277, 5, 23, 12, 83]\n[277, 5, 23, 12, 83, 14]\n[277, 5, 23, 12, 83, 14, 437]\n[277, 5, 23, 12, 83, 14, 437, 38]\n[277, 5, 23, 12, 83, 14, 437, 38, 438]\n[277, 5, 23, 12, 83, 14, 437, 38, 438, 30]\n[277, 5, 23, 12, 83, 14, 437, 38, 438, 30, 50]\n[26, 39]\n[26, 39, 5]\n[26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[74, 41, 8, 54, 92]\n[74, 41, 8, 54, 92, 1]\n[74, 41, 8, 54, 92, 1, 68]\n[74, 41, 8, 54, 92, 1, 68, 3]\n[74, 41, 8, 54, 92, 1, 68, 3, 171]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359, 2360]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359, 2360, 6]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359, 2360, 6, 1]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359, 2360, 6, 1, 2361]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 99, 2, 2359, 2360, 6, 1, 2361, 7]\n[29, 1]\n[29, 1, 2362]\n[29, 1, 2362, 7]\n[29, 1, 2362, 7, 1]\n[29, 1, 2362, 7, 1, 1190]\n[29, 1, 2362, 7, 1, 1190, 33]\n[29, 1, 2362, 7, 1, 1190, 33, 19]\n[29, 1, 2362, 7, 1, 1190, 33, 19, 10]\n[29, 1, 2362, 7, 1, 1190, 33, 19, 10, 1127]\n[167, 2363]\n[167, 2363, 430]\n[167, 2363, 430, 4]\n[167, 2363, 430, 4, 9]\n[167, 2363, 430, 4, 9, 698]\n[167, 2363, 430, 4, 9, 698, 8]\n[12, 281]\n[12, 281, 22]\n[12, 281, 22, 31]\n[12, 281, 22, 31, 2364]\n[12, 281, 22, 31, 2364, 2365]\n[12, 281, 22, 31, 2364, 2365, 21]\n[12, 281, 22, 31, 2364, 2365, 21, 6]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41, 334]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41, 334, 9]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41, 334, 9, 15]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41, 334, 9, 15, 56]\n[12, 281, 22, 31, 2364, 2365, 21, 6, 10, 25, 3, 4, 19, 3, 187, 1, 41, 334, 9, 15, 56, 18]\n[12, 281]\n[12, 281, 22]\n[12, 281, 22, 31]\n[12, 281, 22, 31, 416]\n[12, 281, 22, 31, 416, 738]\n[12, 281, 22, 31, 416, 738, 1196]\n[12, 281, 22, 31, 416, 738, 1196, 14]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18, 35]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18, 35, 31]\n[4, 19]\n[4, 19, 3]\n[4, 19, 3, 187]\n[4, 19, 3, 187, 1]\n[4, 19, 3, 187, 1, 41]\n[4, 19, 3, 187, 1, 41, 334]\n[4, 19, 3, 187, 1, 41, 334, 69]\n[4, 19, 3, 187, 1, 41, 334, 69, 12]\n[4, 19, 3, 187, 1, 41, 334, 69, 12, 778]\n[4, 19, 3, 187, 1, 41, 334, 69, 12, 778, 6]\n[12, 281]\n[12, 281, 6]\n[12, 281, 6, 71]\n[12, 281, 6, 71, 608]\n[12, 281, 6, 71, 608, 4]\n[12, 281, 6, 71, 608, 4, 842]\n[12, 281, 6, 71, 608, 4, 842, 392]\n[12, 281, 6, 71, 608, 4, 842, 392, 1]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55, 22]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55, 22, 915]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55, 22, 915, 39]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55, 22, 915, 39, 50]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 99, 2, 55, 22, 915, 39, 50, 212]\n[14, 1]\n[14, 1, 2366]\n[14, 1, 2366, 2367]\n[14, 1, 2366, 2367, 841]\n[14, 1, 2366, 2367, 841, 105]\n[14, 1, 2366, 2367, 841, 105, 20]\n[14, 1, 2366, 2367, 841, 105, 20, 13]\n[99, 2]\n[99, 2, 349]\n[99, 2, 349, 4]\n[99, 2, 349, 4, 634]\n[99, 2, 349, 4, 634, 6]\n[99, 2, 349, 4, 634, 6, 1]\n[99, 2, 349, 4, 634, 6, 1, 1091]\n[29, 1]\n[29, 1, 63]\n[29, 1, 63, 7]\n[29, 1, 63, 7, 31]\n[29, 1, 63, 7, 31, 267]\n[29, 1, 63, 7, 31, 267, 759]\n[29, 1, 63, 7, 31, 267, 759, 2369]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41, 334]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41, 334, 9]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41, 334, 9, 15]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41, 334, 9, 15, 56]\n[29, 1, 63, 7, 31, 267, 759, 2369, 2370, 13, 55, 25, 722, 4, 19, 3, 187, 1, 41, 334, 9, 15, 56, 18]\n[12, 281]\n[12, 281, 22]\n[12, 281, 22, 31]\n[12, 281, 22, 31, 416]\n[12, 281, 22, 31, 416, 738]\n[12, 281, 22, 31, 416, 738, 1196]\n[12, 281, 22, 31, 416, 738, 1196, 14]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18, 35]\n[12, 281, 22, 31, 416, 738, 1196, 14, 18, 35, 31]\n[4, 19]\n[4, 19, 3]\n[4, 19, 3, 187]\n[4, 19, 3, 187, 1]\n[4, 19, 3, 187, 1, 41]\n[4, 19, 3, 187, 1, 41, 334]\n[4, 19, 3, 187, 1, 41, 334, 69]\n[4, 19, 3, 187, 1, 41, 334, 69, 12]\n[4, 19, 3, 187, 1, 41, 334, 69, 12, 778]\n[4, 19, 3, 187, 1, 41, 334, 69, 12, 778, 6]\n[12, 281]\n[12, 281, 6]\n[12, 281, 6, 71]\n[12, 281, 6, 71, 608]\n[12, 281, 6, 71, 608, 4]\n[12, 281, 6, 71, 608, 4, 842]\n[12, 281, 6, 71, 608, 4, 842, 392]\n[12, 281, 6, 71, 608, 4, 842, 392, 1]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216]\n[12, 281]\n[12, 281, 6]\n[12, 281, 6, 71]\n[12, 281, 6, 71, 608]\n[12, 281, 6, 71, 608, 4]\n[12, 281, 6, 71, 608, 4, 842]\n[12, 281, 6, 71, 608, 4, 842, 392]\n[12, 281, 6, 71, 608, 4, 842, 392, 1]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 528]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 528, 828]\n[12, 281, 6, 71, 608, 4, 842, 392, 1, 216, 374, 528, 828, 119]\n[528, 1172]\n[528, 1172, 1173]\n[832, 1175]\n[832, 1175, 2371]\n[832, 1175, 2371, 530]\n[832, 1175, 2371, 530, 6]\n[843, 1197]\n[843, 1197, 2376]\n[843, 1197, 2376, 1198]\n[843, 1197, 2376, 1198, 400]\n[528, 828]\n[528, 828, 119]\n[528, 828, 119, 829]\n[528, 828, 119, 829, 401]\n[400, 2379]\n[400, 2379, 400]\n[531, 43]\n[531, 43, 401]\n[531, 43, 401, 532]\n[1205, 530]\n[1205, 530, 6]\n[531, 43]\n[531, 43, 401]\n[531, 43, 401, 532]\n[531, 532]\n[531, 43]\n[531, 43, 401]\n[531, 43, 401, 532]\n[1205, 530]\n[1205, 530, 6]\n[531, 43]\n[531, 43, 401]\n[531, 43, 401, 532]\n[2381, 2382]\n[2381, 2382, 832]\n[2381, 2382, 832, 2383]\n[2381, 2382, 832, 2383, 119]\n[2381, 2382, 832, 2383, 119, 2384]\n[2381, 2382, 832, 2383, 119, 2384, 1198]\n[119, 2386]\n[119, 2386, 2387]\n[119, 2386, 2387, 2388]\n[1210, 1211]\n[1210, 1211, 1210]\n[1210, 1211, 1210, 1211]\n[1210, 1211, 1210, 1211, 401]\n[2392, 843]\n[2392, 843, 400]\n[2392, 843, 400, 1212]\n[1115, 400]\n[1115, 400, 1202]\n[1115, 400, 1202, 2394]\n[1115, 400, 1202, 2394, 46]\n[1115, 400, 1202, 2394, 46, 3]\n[29, 18]\n[29, 18, 171]\n[29, 18, 171, 20]\n[1213, 13]\n[1213, 13, 1]\n[46, 3]\n[29, 18]\n[29, 18, 171]\n[29, 18, 171, 20]\n[1, 795]\n[1, 795, 4]\n[1213, 13]\n[1213, 13, 1]\n[40, 11]\n[40, 11, 154]\n[193, 8]\n[193, 8, 17]\n[40, 11]\n[40, 11, 154]\n[40, 154]\n[40, 11]\n[40, 11, 154]\n[193, 8]\n[193, 8, 17]\n[40, 11]\n[40, 11, 154]\n[3, 38]\n[3, 38, 1]\n[3, 38, 1, 212]\n[3, 38, 1, 212, 34]\n[3, 38, 1, 212, 34, 14]\n[3, 38, 1, 212, 34, 14, 5]\n[3, 38, 1, 212, 34, 14, 5, 112]\n[398, 4]\n[398, 4, 398]\n[398, 4, 398, 844]\n[221, 16]\n[221, 16, 954]\n[221, 16, 954, 755]\n[398, 4]\n[398, 4, 398]\n[398, 4, 398, 844]\n[46, 9]\n[46, 9, 350]\n[46, 9, 350, 1134]\n[46, 9, 350, 1134, 787]\n[414, 16]\n[141, 11]\n[331, 6]\n[331, 6, 1]\n[4, 141]\n[4, 141, 11]\n[4, 141, 11, 2398]\n[4, 141, 11, 2398, 275]\n[201, 71]\n[201, 71, 68]\n[201, 71, 68, 71]\n[201, 12]\n[201, 12, 55]\n[201, 12, 55, 3]\n[1, 426]\n[1, 426, 7]\n[1, 426, 7, 2399]\n[20, 3]\n[20, 3, 145]\n[20, 3, 145, 1]\n[20, 3, 145, 1, 2400]\n[20, 3, 145, 1, 2400, 38]\n[50, 393]\n[50, 393, 6]\n[50, 393, 6, 167]\n[50, 393, 6, 167, 941]\n[64, 2402]\n[64, 2402, 2]\n[64, 2402, 2, 168]\n[64, 2402, 2, 168, 127]\n[64, 2402, 2, 168, 127, 31]\n[64, 2402, 2, 168, 127, 31, 103]\n[64, 2402, 2, 168, 127, 31, 103, 1120]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18, 145]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18, 145, 31]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18, 145, 31, 2403]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18, 145, 31, 2403, 15]\n[64, 2402, 2, 168, 127, 31, 103, 1120, 20, 18, 145, 31, 2403, 15, 59]\n[4, 201]\n[4, 201, 99]\n[4, 201, 99, 28]\n[4, 201, 99, 28, 180]\n[4, 201, 99, 28, 180, 6]\n[52, 1214]\n[52, 1214, 11]\n[52, 1214, 11, 1]\n[52, 1214, 11, 1, 2405]\n[125, 20]\n[125, 20, 3]\n[125, 20, 3, 145]\n[125, 20, 3, 145, 18]\n[125, 20, 3, 145, 18, 413]\n[125, 20, 3, 145, 18, 413, 138]\n[125, 20, 3, 145, 18, 413, 138, 18]\n[18, 2407]\n[18, 2407, 4]\n[18, 2407, 4, 18]\n[18, 2407, 4, 18, 647]\n[18, 2407, 4, 18, 647, 59]\n[1, 2410]\n[1, 2410, 40]\n[1, 2410, 40, 266]\n[1, 2410, 40, 266, 65]\n[1, 2410, 40, 266, 65, 1]\n[1, 2410, 40, 266, 65, 1, 73]\n[5, 163]\n[5, 163, 51]\n[5, 163, 51, 275]\n[52, 408]\n[201, 409]\n[201, 409, 11]\n[201, 409, 11, 1]\n[201, 409, 11, 1, 640]\n[59, 152]\n[59, 152, 44]\n[59, 152, 44, 145]\n[59, 152, 44, 145, 2]\n[4, 36]\n[4, 36, 3]\n[4, 36, 3, 38]\n[4, 36, 3, 38, 275]\n[4, 36, 3, 38, 275, 346]\n[4, 36, 3, 38, 275, 346, 4]\n[36, 791]\n[36, 791, 2]\n[29, 49]\n[29, 49, 160]\n[29, 49, 160, 67]\n[4, 585]\n[4, 585, 846]\n[56, 3]\n[56, 3, 38]\n[56, 3, 38, 275]\n[648, 1]\n[648, 178]\n[18, 303]\n[18, 303, 175]\n[18, 303, 175, 153]\n[18, 303, 175, 153, 7]\n[18, 303, 175, 153, 7, 1]\n[125, 20]\n[125, 20, 18]\n[125, 20, 18, 145]\n[18, 71]\n[18, 71, 59]\n[18, 71, 59, 649]\n[18, 71, 59, 649, 65]\n[18, 71, 59, 649, 65, 1]\n[18, 71]\n[18, 71, 59]\n[18, 71, 59, 649]\n[18, 71, 59, 649, 65]\n[18, 71, 59, 649, 65, 1]\n[1, 264]\n[1, 264, 15]\n[1, 264, 15, 194]\n[1, 264]\n[1, 264, 15]\n[1, 264, 15, 194]\n[18, 303]\n[18, 303, 1]\n[18, 303, 1, 458]\n[18, 303, 1, 458, 6]\n[18, 303, 1, 458, 6, 253]\n[18, 303]\n[18, 303, 1]\n[18, 303, 1, 458]\n[18, 303, 1, 458, 6]\n[18, 303, 1, 458, 6, 253]\n[18, 590]\n[18, 590, 57]\n[18, 590, 57, 1]\n[18, 590, 57, 1, 847]\n[18, 590]\n[18, 590, 57]\n[18, 590, 57, 1]\n[18, 590, 57, 1, 847]\n[18, 590, 57, 1, 847, 124]\n[18, 590, 57, 1, 847, 124, 275]\n[648, 1]\n[648, 1]\n[1, 2411]\n[1, 2411, 4]\n[1, 2411, 4, 1]\n[1, 230]\n[1, 230, 1216]\n[1, 230, 1216, 105]\n[1, 230, 1216, 105, 30]\n[1, 230, 1216, 105, 30, 18]\n[1, 230, 1216, 105, 30, 18, 145]\n[4, 27]\n[4, 27, 34]\n[4, 27, 34, 458]\n[4, 27, 34, 458, 275]\n[125, 106]\n[125, 106, 18]\n[93, 627]\n[93, 627, 4]\n[93, 627, 4, 93]\n[93, 627, 4, 93, 86]\n[4, 29]\n[4, 29, 18]\n[4, 29, 18, 67]\n[4, 29, 18, 67, 6]\n[4, 29, 18, 67, 6, 1]\n[36, 3]\n[36, 3, 38]\n[36, 3, 38, 74]\n[36, 3, 38, 74, 10]\n[13, 55]\n[13, 55, 74]\n[13, 55, 74, 358]\n[13, 55, 74, 358, 21]\n[13, 55, 74, 358, 21, 24]\n[13, 55, 74, 358, 21, 24, 5]\n[275, 346]\n[275, 346, 4]\n[3, 211]\n[3, 211, 94]\n[3, 211, 94, 846]\n[56, 3]\n[56, 3, 38]\n[56, 3, 38, 996]\n[5, 32]\n[5, 32, 6]\n[5, 32, 6, 393]\n[5, 32, 6, 393, 25]\n[5, 32, 6, 393, 25, 1]\n[1, 2413]\n[1, 2413, 4]\n[1, 2413, 4, 1]\n[1, 264]\n[1, 264, 15]\n[273, 5]\n[273, 5, 32]\n[273, 5, 32, 69]\n[273, 5, 32, 69, 9]\n[273, 5, 32, 69, 9, 2415]\n[273, 5, 32, 69, 9, 2415, 1]\n[273, 5, 32, 69, 9, 2415, 1, 426]\n[273, 5, 32, 69, 9, 2415, 1, 426, 244]\n[273, 5, 32, 69, 9, 2415, 1, 426, 244, 91]\n[273, 5, 32, 69, 9, 2415, 1, 426, 244, 91, 37]\n[98, 81]\n[98, 81, 14]\n[98, 81, 14, 5]\n[98, 81, 14, 5, 660]\n[98, 81, 14, 5, 660, 102]\n[26, 92]\n[26, 92, 3]\n[26, 92, 3, 23]\n[26, 92, 3, 23, 30]\n[26, 92, 3, 23, 30, 3]\n[26, 92, 3, 23, 30, 3, 32]\n[36, 3]\n[36, 3, 38]\n[36, 3, 38, 3]\n[36, 3, 38, 3, 38]\n[36, 3, 38, 3, 38, 17]\n[36, 3, 38, 3, 38, 17, 2416]\n[2417, 4]\n[773, 30]\n[773, 30, 84]\n[773, 30, 84, 2418]\n[773, 30, 84, 2418, 26]\n[3, 244]\n[3, 244, 240]\n[3, 244, 240, 2]\n[3, 244, 240, 2, 241]\n[4, 39]\n[4, 39, 1]\n[4, 39, 1, 241]\n[4, 39, 1, 241, 302]\n[4, 39, 1, 241, 302, 6]\n[6, 2419]\n[6, 2419, 1]\n[6, 2419, 1, 2420]\n[201, 14]\n[201, 14, 241]\n[201, 14, 241, 124]\n[64, 3]\n[64, 3, 38]\n[18, 71]\n[18, 71, 59]\n[18, 71, 59, 649]\n[18, 71, 59, 649, 65]\n[18, 71, 59, 649, 65, 1]\n[18, 647]\n[18, 647, 59]\n[18, 647, 59, 649]\n[18, 647, 59, 649, 65]\n[18, 647, 59, 649, 65, 1]\n[1, 264]\n[1, 264, 15]\n[1, 264, 15, 194]\n[4, 18]\n[4, 18, 558]\n[4, 18, 558, 1]\n[4, 18, 558, 1, 194]\n[18, 303]\n[18, 303, 1]\n[18, 303, 1, 458]\n[18, 303, 1, 458, 6]\n[18, 303, 1, 458, 6, 253]\n[18, 181]\n[18, 181, 288]\n[18, 181, 288, 2421]\n[18, 181, 288, 2421, 13]\n[18, 181, 288, 2421, 13, 59]\n[18, 590]\n[18, 590, 57]\n[18, 590, 57, 1]\n[18, 590, 57, 1, 847]\n[18, 48]\n[18, 48, 2423]\n[18, 48, 2423, 6]\n[18, 48, 2423, 6, 2424]\n[18, 48, 2423, 6, 2424, 124]\n[18, 48, 2423, 6, 2424, 124, 275]\n[1, 426]\n[1, 426, 1217]\n[1, 426, 1217, 13]\n[1, 426]\n[1, 230]\n[1, 230, 1216]\n[1, 230, 1216, 105]\n[1, 230, 1216, 105, 30]\n[1, 230, 1216, 105, 30, 18]\n[4, 27]\n[4, 27, 34]\n[4, 27, 34, 458]\n[28, 66]\n[28, 66, 75]\n[16, 333]\n[16, 333, 16]\n[16, 333, 16, 152]\n[16, 333, 16, 152, 2425]\n[4, 29]\n[4, 29, 5]\n[4, 29, 5, 91]\n[4, 29, 5, 91, 7]\n[36, 18]\n[36, 18, 38]\n[36, 18, 38, 75]\n[36, 18, 38, 75, 481]\n[36, 18, 38, 75, 481, 1]\n[75, 48]\n[75, 48, 16]\n[75, 48, 16, 152]\n[75, 48, 16, 152, 6]\n[75, 48, 16, 152, 6, 999]\n[116, 253]\n[116, 253, 59]\n[116, 253, 59, 402]\n[651, 56]\n[651, 56, 18]\n[651, 56, 18, 38]\n[215, 95]\n[215, 95, 419]\n[215, 95, 419, 689]\n[215, 95, 419, 689, 3]\n[215, 95, 419, 689, 3, 266]\n[56, 3]\n[3, 19]\n[3, 19, 94]\n[3, 19, 94, 846]\n[56, 3]\n[56, 3]\n[56, 3, 38]\n[56, 3, 38, 52]\n[56, 3, 38, 52, 3]\n[56, 3, 38, 52, 3, 2427]\n[56, 3, 38, 52, 3, 2427, 8]\n[56, 3, 38, 52, 3, 2427, 8, 77]\n[52, 3]\n[52, 3, 2428]\n[52, 3, 2428, 98]\n[49, 13]\n[49, 13, 16]\n[49, 13, 16, 33]\n[49, 13, 16, 33, 45]\n[49, 13, 16, 33, 45, 101]\n[49, 13, 16, 33, 45, 101, 17]\n[4, 5]\n[4, 5, 35]\n[4, 5, 35, 176]\n[4, 5, 35, 176, 11]\n[4, 5, 35, 176, 11, 1]\n[4, 5, 35, 176, 11, 1, 207]\n[197, 35]\n[197, 35, 176]\n[197, 35, 176, 11]\n[197, 35, 176, 11, 1]\n[197, 35, 176, 11, 1, 207]\n[197, 35, 176, 11, 1, 207, 220]\n[197, 35, 176, 11, 1, 207, 220, 125]\n[197, 35, 176, 11, 1, 207, 220, 125, 4]\n[197, 35, 176, 11, 1, 207, 220, 125, 4, 2430]\n[21, 2]\n[21, 2, 77]\n[21, 2, 77, 232]\n[21, 2, 77, 232, 18]\n[21, 2, 77, 232, 18, 48]\n[21, 2, 77, 232, 18, 48, 66]\n[21, 2, 77, 232, 18, 48, 66, 13]\n[2, 77]\n[2, 77, 2433]\n[2, 77, 2433, 2434]\n[2, 77, 2433, 2434, 52]\n[2, 77, 2433, 2434, 52, 1138]\n[2, 77, 2433, 2434, 52, 1138, 984]\n[2, 77, 2433, 2434, 52, 1138, 984, 89]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16, 2435]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16, 2435, 429]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16, 2435, 429, 17]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16, 2435, 429, 17, 773]\n[2, 77, 2433, 2434, 52, 1138, 984, 89, 54, 16, 2435, 429, 17, 773, 54]\n[39, 3]\n[39, 3, 804]\n[39, 3, 804, 9]\n[39, 3, 804, 9, 44]\n[39, 3, 804, 9, 44, 2436]\n[39, 3, 804, 9, 44, 2436, 8]\n[39, 3, 804, 9, 44, 2436, 8, 6]\n[39, 3, 804, 9, 44, 2436, 8, 6, 46]\n[39, 3, 804, 9, 44, 2436, 8, 6, 46, 423]\n[39, 3, 804, 9, 44, 2436, 8, 6, 46, 423, 5]\n[5, 19]\n[5, 19, 957]\n[5, 19, 957, 17]\n[5, 19, 957, 17, 402]\n[5, 19, 957, 17, 402, 5]\n[5, 19, 957, 17, 402, 5, 19]\n[5, 19, 957, 17, 402, 5, 19, 236]\n[5, 19, 957, 17, 402, 5, 19, 236, 9]\n[5, 19, 957, 17, 402, 5, 19, 236, 9, 435]\n[5, 19, 957, 17, 402, 5, 19, 236, 9, 435, 133]\n[5, 67]\n[5, 67, 483]\n[5, 67, 483, 394]\n[5, 67, 483, 394, 17]\n[5, 67, 483, 394, 17, 63]\n[5, 67, 483, 394, 17, 63, 4]\n[5, 67, 483, 394, 17, 63, 4, 198]\n[5, 67, 483, 394, 17, 63, 4, 198, 3]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48, 48]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48, 48, 2]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48, 48, 2, 198]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48, 48, 2, 198, 52]\n[5, 67, 483, 394, 17, 63, 4, 198, 3, 46, 48, 48, 2, 198, 52, 3]\n[71, 17]\n[71, 17, 2438]\n[71, 17, 2438, 263]\n[71, 17, 2438, 263, 40]\n[71, 17, 2438, 263, 40, 83]\n[71, 17, 2438, 263, 40, 83, 5]\n[71, 17, 2438, 263, 40, 83, 5, 35]\n[71, 17, 2438, 263, 40, 83, 5, 35, 1218]\n[71, 17, 2438, 263, 40, 83, 5, 35, 1218, 5]\n[71, 17, 2438, 263, 40, 83, 5, 35, 1218, 5, 35]\n[5, 35]\n[5, 35, 239]\n[5, 35, 239, 5]\n[5, 35, 239, 5, 485]\n[5, 35, 239, 5, 485, 222]\n[4, 5]\n[4, 5, 35]\n[4, 5, 35, 176]\n[4, 5, 35, 176, 11]\n[4, 5, 35, 176, 11, 1]\n[4, 5, 35, 176, 11, 1, 207]\n[197, 35]\n[197, 35, 176]\n[197, 35, 176, 11]\n[197, 35, 176, 11, 1]\n[197, 35, 176, 11, 1, 207]\n[197, 35, 176, 11, 1, 207, 220]\n[197, 35, 176, 11, 1, 207, 220, 1]\n[197, 35, 176, 11, 1, 207, 220, 1, 533]\n[197, 35, 176, 11, 1, 207, 220, 1, 533, 1]\n[197, 35, 176, 11, 1, 207, 220, 1, 533, 1, 533]\n[197, 35, 176, 11, 1, 207, 220, 1, 533, 1, 533, 1]\n[197, 35, 176, 11, 1, 207, 220, 1, 533, 1, 533, 1, 533]\n[197, 35, 176, 11, 1, 207, 220, 1, 533, 1, 533, 1, 533, 44]\n[1, 748]\n[1, 748, 1]\n[1, 748, 1, 2439]\n[1, 748, 1, 2439, 4]\n[1, 748, 1, 2439, 4, 1]\n[1, 748, 1, 2439, 4, 1, 402]\n[1, 748, 1, 2439, 4, 1, 402, 24]\n[1, 533]\n[1, 533, 1]\n[1, 533, 1, 533]\n[1, 533, 1, 533, 21]\n[1, 533, 1, 533, 21, 135]\n[135, 2]\n[135, 2, 77]\n[135, 2, 77, 1214]\n[135, 2, 77, 1214, 394]\n[135, 2, 77, 1214, 394, 17]\n[135, 2, 77, 1214, 394, 17, 402]\n[135, 2, 77, 1214, 394, 17, 402, 25]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353, 80]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353, 80, 38]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353, 80, 38, 65]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353, 80, 38, 65, 746]\n[135, 2, 77, 1214, 394, 17, 402, 25, 8, 45, 3, 353, 80, 38, 65, 746, 1]\n[3, 40]\n[3, 40, 65]\n[3, 40, 65, 299]\n[3, 40, 65, 299, 265]\n[3, 40, 65, 299, 265, 2440]\n[3, 40, 65, 299, 265, 2440, 7]\n[37, 2]\n[37, 2, 2442]\n[37, 2, 2442, 367]\n[37, 2, 2442, 367, 11]\n[37, 2, 2442, 367, 11, 16]\n[37, 2, 2442, 367, 11, 16, 2443]\n[17, 2444]\n[17, 2444, 134]\n[17, 2444, 134, 26]\n[17, 2444, 134, 26, 17]\n[17, 2444, 134, 26, 17, 2445]\n[17, 2444, 134, 26, 17, 2445, 38]\n[3, 145]\n[3, 145, 6]\n[3, 145, 6, 580]\n[3, 145, 6, 580, 2]\n[3, 145, 6, 580, 2, 77]\n[3, 145, 6, 580, 2, 77, 488]\n[3, 145, 6, 580, 2, 77, 488, 377]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4, 2448]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4, 2448, 560]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4, 2448, 560, 3]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4, 2448, 560, 3, 246]\n[3, 145, 6, 580, 2, 77, 488, 377, 1023, 35, 2446, 2447, 4, 2448, 560, 3, 246, 2449]\n[45, 2451]\n[45, 2451, 379]\n[45, 2451, 379, 132]\n[45, 2451, 379, 132, 2452]\n[45, 2451, 379, 132, 2452, 26]\n[45, 2451, 379, 132, 2452, 26, 2453]\n[45, 2451, 379, 132, 2452, 26, 2453, 2454]\n[45, 2451, 379, 132, 2452, 26, 2453, 2454, 3]\n[3, 21]\n[3, 21, 120]\n[3, 21, 120, 6]\n[3, 21, 120, 6, 10]\n[3, 21, 120, 6, 10, 179]\n[3, 21, 120, 6, 10, 179, 2455]\n[3, 21, 120, 6, 10, 179, 2455, 65]\n[3, 21, 120, 6, 10, 179, 2455, 65, 229]\n[3, 21, 120, 6, 10, 179, 2455, 65, 229, 6]\n[26, 834]\n[26, 834, 2456]\n[26, 834, 2456, 384]\n[26, 834, 2456, 384, 12]\n[26, 834, 2456, 384, 12, 1]\n[26, 834, 2456, 384, 12, 1, 384]\n[26, 834, 2456, 384, 12, 1, 384, 12]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1, 384]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1, 384, 12]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1, 384, 12, 1]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1, 384, 12, 1, 384]\n[26, 834, 2456, 384, 12, 1, 384, 12, 1, 384, 12, 1, 384, 3]\n[4, 29]\n[4, 29, 5]\n[4, 29, 5, 67]\n[4, 29, 5, 67, 394]\n[4, 29, 5, 67, 394, 17]\n[4, 29, 5, 67, 394, 17, 402]\n[4, 29, 5, 67, 394, 17, 402, 12]\n[4, 29, 5, 67, 394, 17, 402, 12, 1]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5, 52]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5, 52, 120]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5, 52, 120, 163]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5, 52, 120, 163, 1061]\n[4, 29, 5, 67, 394, 17, 402, 12, 1, 384, 14, 5, 51, 11, 3, 77, 80, 5, 52, 120, 163, 1061, 180]\n[90, 140]\n[90, 140, 848]\n[90, 140, 848, 435]\n[90, 140, 848, 435, 20]\n[90, 140, 848, 435, 20, 17]\n[90, 140]\n[90, 140, 848]\n[90, 140, 848, 435]\n[90, 140, 848, 435, 115]\n[90, 140, 848, 435, 115, 17]\n[4, 17]\n[4, 17, 2457]\n[4, 17, 2457, 4]\n[4, 17, 2457, 4, 17]\n[4, 39]\n[4, 39, 3]\n[4, 39, 3, 61]\n[4, 39, 3, 61, 2459]\n[4, 39, 3, 61, 2459, 551]\n[4, 39, 3, 61, 2459, 551, 10]\n[4, 39, 3, 61, 2459, 551, 10, 848]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13, 17]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13, 17, 402]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13, 17, 402, 1]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13, 17, 402, 1, 3]\n[4, 39, 3, 61, 2459, 551, 10, 848, 435, 115, 17, 575, 26, 13, 17, 402, 1, 3, 5]\n[15, 1017]\n[15, 1017, 1]\n[15, 1017, 1, 80]\n[15, 1017, 1, 80, 3]\n[15, 1017, 1, 80, 3, 150]\n[15, 1017, 1, 80, 3, 150, 801]\n[15, 1017, 1, 80, 3, 150, 801, 6]\n[639, 16]\n[639, 16, 148]\n[639, 16, 148, 40]\n[639, 16, 148, 40, 11]\n[169, 3]\n[169, 3, 639]\n[169, 3, 639, 2]\n[169, 3, 639, 2, 272]\n[169, 3, 639, 2, 272, 2460]\n[169, 3, 639, 2, 272, 2460, 148]\n[169, 3, 639, 2, 272, 2460, 148, 170]\n[169, 3, 639, 2, 272, 2460, 148, 170, 38]\n[169, 3, 639, 2, 272, 2460, 148, 170, 38, 3]\n[38, 3]\n[38, 3]\n[38, 3, 354]\n[38, 3, 354, 652]\n[652, 534]\n[652, 534, 2461]\n[652, 534, 2461, 534]\n[652, 534, 2461, 534, 19]\n[652, 534, 2461, 534, 19, 3]\n[652, 534, 2461, 534, 19, 3, 187]\n[49, 626]\n[49, 626, 49]\n[49, 626, 49, 626]\n[49, 626, 49, 626, 49]\n[49, 626, 49, 626, 49, 626]\n[5, 361]\n[5, 361, 49]\n[5, 361, 49, 690]\n[5, 361, 49, 690, 26]\n[5, 361, 49, 690, 26, 39]\n[5, 361, 49, 690, 26, 39, 3]\n[5, 361, 49, 690, 26, 39, 3, 162]\n[5, 361, 49, 690, 26, 39, 3, 162, 52]\n[5, 361, 49, 690, 26, 39, 3, 162, 52, 769]\n[3, 19]\n[3, 19, 769]\n[3, 19, 769, 16]\n[3, 19, 769, 16, 176]\n[3, 19, 769, 16, 176, 11]\n[3, 19, 769, 16, 176, 11, 1]\n[3, 19, 769, 16, 176, 11, 1, 207]\n[3, 19, 769, 16, 176, 11, 1, 207, 220]\n[3, 19, 769, 16, 176, 11, 1, 207, 220, 3]\n[3, 19, 769, 16, 176, 11, 1, 207, 220, 3, 35]\n[3, 19, 769, 16, 176, 11, 1, 207, 220, 3, 35, 30]\n[3, 19, 769, 16, 176, 11, 1, 207, 220, 3, 35, 30, 3]\n[30, 3]\n[30, 3, 2462]\n[30, 3, 2462, 30]\n[30, 3, 2462, 30, 3]\n[2463, 162]\n[2463, 162, 9]\n[2463, 162, 9, 2]\n[2463, 162, 9, 2, 2464]\n[106, 24]\n[106, 24, 2]\n[106, 24, 2, 2465]\n[106, 24, 2, 2465, 13]\n[5, 187]\n[5, 187, 32]\n[5, 187, 32, 5]\n[5, 187, 32, 5, 139]\n[48, 3]\n[48, 3, 88]\n[48, 3, 88, 433]\n[48, 3, 88, 433, 1]\n[28, 384]\n[28, 384, 98]\n[28, 384, 98, 2]\n[28, 384, 98, 2, 2466]\n[28, 384, 98, 2, 2466, 185]\n[28, 384, 98, 2, 2466, 185, 60]\n[28, 384, 98, 2, 2466, 185, 60, 12]\n[28, 384, 98, 2, 2466, 185, 60, 12, 2]\n[28, 384, 98, 2, 2466, 185, 60, 12, 2, 309]\n[28, 384, 98, 2, 2466, 185, 60, 12, 2, 309, 653]\n[28, 384, 98, 2, 2466, 185, 60, 12, 2, 309, 653, 25]\n[203, 293]\n[203, 293, 17]\n[203, 293, 17, 535]\n[203, 293, 17, 535, 28]\n[29, 1]\n[29, 1, 79]\n[29, 1, 79, 15]\n[29, 1, 79, 15, 1220]\n[29, 1, 79, 15, 1220, 4]\n[203, 293]\n[203, 293, 1]\n[203, 293, 1, 151]\n[203, 293, 1, 151, 336]\n[60, 1222]\n[60, 1222, 15]\n[60, 1222, 15, 1223]\n[60, 1222, 15, 1223, 20]\n[60, 1222, 15, 1223, 20, 96]\n[1, 1225]\n[1, 1225, 38]\n[1, 1225, 38, 1226]\n[1, 1225, 38, 1226, 54]\n[1, 1225, 38, 1226, 54, 1]\n[29, 203]\n[29, 203, 1228]\n[29, 203, 1228, 17]\n[3, 187]\n[3, 187, 28]\n[17, 535]\n[17, 535, 302]\n[17, 535, 302, 1231]\n[2, 208]\n[2, 208, 743]\n[12, 2]\n[12, 2, 309]\n[12, 2, 309, 653]\n[12, 2, 309, 653, 25]\n[27, 316]\n[27, 316, 14]\n[27, 316, 14, 12]\n[27, 316, 14, 12, 203]\n[27, 316, 14, 12, 203, 14]\n[27, 316, 14, 12, 203, 14, 18]\n[27, 316, 14, 12, 203, 14, 18, 41]\n[27, 316, 14, 12, 203, 14, 18, 41, 60]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309, 653]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309, 653, 25]\n[203, 293]\n[203, 293, 17]\n[203, 293, 17, 535]\n[203, 293, 17, 535, 28]\n[29, 1]\n[29, 1, 79]\n[29, 1, 79, 15]\n[29, 1, 79, 15, 1220]\n[29, 1, 79, 15, 1220, 4]\n[203, 293]\n[203, 293, 1]\n[203, 293, 1, 151]\n[203, 293, 1, 151, 336]\n[60, 1222]\n[60, 1222, 15]\n[60, 1222, 15, 1223]\n[60, 1222, 15, 1223, 20]\n[60, 1222, 15, 1223, 20, 96]\n[1, 1225]\n[1, 1225, 38]\n[1, 1225, 38, 1226]\n[1, 1225, 38, 1226, 54]\n[1, 1225, 38, 1226, 54, 1]\n[29, 203]\n[29, 203, 1228]\n[29, 203, 1228, 17]\n[3, 187]\n[3, 187, 28]\n[17, 535]\n[17, 535, 302]\n[17, 535, 302, 1231]\n[2, 208]\n[2, 208, 743]\n[12, 2]\n[12, 2, 309]\n[12, 2, 309, 653]\n[12, 2, 309, 653, 25]\n[27, 316]\n[27, 316, 14]\n[27, 316, 14, 12]\n[27, 316, 14, 12, 203]\n[27, 316, 14, 12, 203, 14]\n[27, 316, 14, 12, 203, 14, 18]\n[27, 316, 14, 12, 203, 14, 18, 41]\n[27, 316, 14, 12, 203, 14, 18, 41, 60]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309, 849]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309, 849, 25]\n[27, 316, 14, 12, 203, 14, 18, 41, 60, 12, 2, 309, 849, 25, 3]\n[1014, 32]\n[1014, 32, 3]\n[1014, 32, 3, 38]\n[292, 49]\n[292, 49, 21]\n[292, 49, 21, 2]\n[13, 1]\n[13, 1, 800]\n[786, 17]\n[786, 17, 780]\n[786, 17, 780, 15]\n[551, 42]\n[551, 42, 91]\n[551, 42, 91, 7]\n[2469, 15]\n[2469, 15, 1]\n[2469, 15, 1, 2470]\n[2469, 15, 1, 2470, 7]\n[2469, 15, 1, 2470, 7, 17]\n[2, 749]\n[2, 749, 2472]\n[2, 749, 2472, 514]\n[2, 749, 2472, 514, 29]\n[2, 749, 2472, 514, 29, 3]\n[2, 749, 2472, 514, 29, 3, 38]\n[17, 194]\n[17, 194, 2473]\n[17, 194, 2473, 15]\n[17, 194, 2473, 15, 782]\n[60, 12]\n[60, 12, 2]\n[60, 12, 2, 309]\n[60, 12, 2, 309, 849]\n[60, 12, 2, 309, 849, 25]\n[60, 12, 2, 309, 849, 25, 3]\n[2, 309]\n[2, 309, 309]\n[2, 309, 309, 849]\n[2, 309, 309, 849, 25]\n[2, 309, 309, 849, 25, 3]\n[2, 309, 309, 849, 25, 3, 12]\n[2, 309, 309, 849, 25, 3, 12, 83]\n[2, 309, 309, 849, 25, 3, 12, 83, 14]\n[2, 309, 309, 849, 25, 3, 12, 83, 14, 2474]\n[2, 309, 309, 849, 25, 3, 12, 83, 14, 2474, 4]\n[2, 309, 309, 849, 25, 3, 12, 83, 14, 2474, 4, 2475]\n[1004, 14]\n[1004, 14, 38]\n[4, 2478]\n[4, 2478, 4]\n[579, 17]\n[579, 17, 535]\n[2480, 630]\n[2480, 630, 2481]\n[2480, 630, 2481, 15]\n[2483, 2484]\n[2483, 2484, 2485]\n[2486, 2487]\n[2486, 2487, 2488]\n[2489, 2490]\n[2489, 2490, 2491]\n[2489, 2490, 2491, 2]\n[2493, 2494]\n[2493, 2494, 2495]\n[2493, 2494, 2495, 4]\n[2497, 805]\n[2497, 805, 55]\n[2497, 805, 55, 4]\n[2498, 4]\n[2498, 4, 2499]\n[2498, 4, 2499, 4]\n[2498, 4, 2499, 4, 2500]\n[2498, 4, 2499, 4, 2500, 38]\n[75, 2502]\n[75, 2502, 38]\n[75, 2502, 38, 370]\n[75, 2502, 38, 370, 309]\n[75, 2502, 38, 370, 309, 270]\n[26, 2504]\n[26, 2504, 7]\n[26, 2504, 7, 1]\n[26, 2504, 7, 1, 2505]\n[26, 2504, 7, 1, 2505, 2506]\n[26, 2504, 7, 1, 2505, 2506, 7]\n[26, 2504, 7, 1, 2505, 2506, 7, 1]\n[12, 203]\n[12, 203, 2507]\n[12, 203, 2507, 4]\n[12, 203, 2507, 4, 36]\n[12, 203, 2507, 4, 36, 18]\n[12, 203, 2507, 4, 36, 18, 183]\n[12, 203, 2507, 4, 36, 18, 183, 39]\n[12, 203, 2507, 4, 36, 18, 183, 39, 5]\n[12, 203, 2507, 4, 36, 18, 183, 39, 5, 42]\n[12, 203, 2507, 4, 36, 18, 183, 39, 5, 42, 149]\n[39, 5]\n[39, 5, 42]\n[39, 5, 42, 1073]\n[39, 5, 42, 1073, 31]\n[5, 128]\n[5, 128, 48]\n[5, 128, 48, 27]\n[5, 128, 48, 27, 1232]\n[69, 1233]\n[69, 1233, 100]\n[69, 1233, 100, 19]\n[69, 1233, 100, 19, 10]\n[69, 1233, 100, 19, 10, 4]\n[69, 1233, 100, 19, 10, 4, 39]\n[69, 1233, 100, 19, 10, 4, 39, 5]\n[69, 1233, 100, 19, 10, 4, 39, 5, 42]\n[69, 1233, 100, 19, 10, 4, 39, 5, 42, 149]\n[5, 128]\n[5, 128, 42]\n[5, 128, 42, 48]\n[5, 128, 42, 48, 2]\n[69, 54]\n[69, 54, 338]\n[69, 54, 338, 278]\n[69, 54, 338, 278, 94]\n[69, 54, 338, 278, 94, 13]\n[1, 2508]\n[1, 2508, 153]\n[1, 2508, 153, 7]\n[1, 2508, 153, 7, 8]\n[1, 2508, 153, 7, 8, 13]\n[1, 2508, 153, 7, 8, 13, 31]\n[1, 2508, 153, 7, 8, 13, 31, 33]\n[1, 2508, 153, 7, 8, 13, 31, 33, 28]\n[1, 2508, 153, 7, 8, 13, 31, 33, 28, 491]\n[1, 2508, 153, 7, 8, 13, 31, 33, 28, 491, 7]\n[491, 7]\n[491, 7, 1171]\n[491, 7, 1171, 4]\n[5, 19]\n[5, 19, 51]\n[5, 19, 51, 1]\n[5, 19, 51, 1, 622]\n[5, 19, 51, 1, 622, 28]\n[13, 17]\n[28, 657]\n[28, 657, 17]\n[28, 657, 17, 155]\n[28, 657, 17, 155, 4]\n[28, 657, 17, 155, 4, 37]\n[28, 657, 17, 155, 4, 37, 28]\n[28, 657, 17, 155, 4, 37, 28, 1234]\n[28, 657, 17, 155, 4, 37, 28, 1234, 6]\n[278, 48]\n[278, 48, 654]\n[278, 48, 654, 16]\n[278, 48, 654, 16, 161]\n[278, 48, 654, 16, 161, 100]\n[39, 5]\n[39, 5, 42]\n[39, 5, 42, 149]\n[39, 5, 42, 149, 3]\n[39, 5, 42, 149, 3, 39]\n[39, 5, 42, 149, 3, 39, 5]\n[39, 5, 42, 149, 3, 39, 5, 42]\n[39, 5, 42, 149, 3, 39, 5, 42, 149]\n[278, 10]\n[278, 10, 627]\n[278, 10, 627, 26]\n[278, 10, 627, 26, 2510]\n[278, 10, 627, 26, 2510, 24]\n[42, 417]\n[42, 417, 5]\n[42, 417, 5, 139]\n[2, 41]\n[2, 41, 28]\n[2, 41, 28, 231]\n[2, 41, 28, 231, 4]\n[2, 41, 28, 231, 4, 83]\n[2, 41, 28, 231, 4, 83, 37]\n[2, 41, 28, 231, 4, 83, 37, 28]\n[2, 41, 28, 231, 4, 83, 37, 28, 1234]\n[2, 41, 28, 231, 4, 83, 37, 28, 1234, 6]\n[278, 48]\n[278, 48, 654]\n[278, 48, 654, 16]\n[278, 48, 654, 16, 161]\n[278, 48, 654, 16, 161, 100]\n[39, 5]\n[39, 5, 42]\n[39, 5, 42, 149]\n[39, 5, 42, 149, 3]\n[39, 5, 42, 149, 3, 5]\n[39, 5, 42, 149, 3, 5, 190]\n[39, 5, 42, 149, 3, 5, 190, 59]\n[39, 5, 42, 149, 3, 5, 190, 59, 41]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514, 4]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514, 4, 807]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514, 4, 807, 139]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514, 4, 807, 139, 10]\n[39, 5, 42, 149, 3, 5, 190, 59, 41, 128, 10, 28, 206, 324, 18, 276, 1, 161, 33, 261, 5, 42, 149, 14, 514, 4, 807, 139, 10, 28]\n[20, 940]\n[20, 940, 501]\n[20, 940, 501, 105]\n[20, 940, 501, 105, 157]\n[20, 940, 501, 105, 157, 288]\n[20, 940, 501, 105, 157, 288, 2511]\n[20, 940, 501, 105, 157, 288, 2511, 13]\n[20, 940, 501, 105, 157, 288, 2511, 13, 1]\n[26, 166]\n[26, 166, 16]\n[26, 166, 16, 63]\n[26, 166, 16, 63, 15]\n[18, 157]\n[18, 157, 106]\n[18, 157, 106, 39]\n[18, 157, 106, 39, 5]\n[18, 157, 106, 39, 5, 42]\n[18, 157, 106, 39, 5, 42, 149]\n[99, 27]\n[99, 27, 430]\n[99, 27, 430, 5]\n[39, 5]\n[39, 5, 42]\n[39, 5, 42, 149]\n[39, 5, 42, 149, 31]\n[5, 128]\n[5, 128, 48]\n[5, 128, 48, 27]\n[5, 128, 48, 27, 1232]\n[39, 59]\n[39, 59, 55]\n[39, 59, 55, 123]\n[39, 59, 55, 123, 607]\n[39, 59, 55, 123, 607, 102]\n[39, 59, 55, 123, 607, 102, 916]\n[39, 59, 55, 123, 607, 102, 916, 76]\n[39, 59, 55, 123, 607, 102, 916, 76, 654]\n[39, 59, 55, 123, 607, 102, 916, 76, 654, 54]\n[69, 1233]\n[69, 1233, 100]\n[69, 1233, 100, 19]\n[69, 1233, 100, 19, 10]\n[69, 1233, 100, 19, 10, 5]\n[69, 1233, 100, 19, 10, 5, 190]\n[69, 1233, 100, 19, 10, 5, 190, 59]\n[69, 1233, 100, 19, 10, 5, 190, 59, 41]\n[69, 1233, 100, 19, 10, 5, 190, 59, 41, 128]\n[69, 1233, 100, 19, 10, 5, 190, 59, 41, 128, 10]\n[69, 1233, 100, 19, 10, 5, 190, 59, 41, 128, 10, 28]\n[324, 1235]\n[324, 1235, 71]\n[324, 1235, 71, 1]\n[324, 1235, 71, 1, 161]\n[324, 1235, 71, 1, 161, 33]\n[324, 1235, 71, 1, 161, 33, 261]\n[324, 1235, 71, 1, 161, 33, 261, 5]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190, 59]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190, 59, 41]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190, 59, 41, 128]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190, 59, 41, 128, 10]\n[324, 1235, 71, 1, 161, 33, 261, 5, 190, 59, 41, 128, 10, 28]\n[1235, 262]\n[1235, 262, 1]\n[1235, 262, 1, 2513]\n[1235, 262, 1, 2513, 394]\n[1235, 262, 1, 2513, 394, 130]\n[1235, 262, 1, 2513, 394, 130, 4]\n[1235, 262, 1, 2513, 394, 130, 4, 166]\n[1235, 262, 1, 2513, 394, 130, 4, 166, 16]\n[1235, 262, 1, 2513, 394, 130, 4, 166, 16, 63]\n[1235, 262, 1, 2513, 394, 130, 4, 166, 16, 63, 15]\n[18, 157]\n[18, 157, 106]\n[18, 157, 106, 18]\n[18, 157, 106, 18, 157]\n[4, 39]\n[4, 39, 5]\n[4, 39, 5, 42]\n[4, 39, 5, 42, 149]\n[278, 48]\n[278, 48, 654]\n[278, 48, 654, 16]\n[278, 48, 654, 16, 161]\n[278, 48, 654, 16, 161, 100]\n[278, 48, 654, 16, 161, 100, 87]\n[278, 48, 654, 16, 161, 100, 87, 838]\n[278, 48, 654, 16, 161, 100, 87, 838, 24]\n[278, 48, 654, 16, 161, 100, 87, 838, 24, 1]\n[278, 48, 654, 16, 161, 100, 87, 838, 24, 1, 185]\n[278, 48, 654, 16, 161, 100, 87, 838, 24, 1, 185, 42]\n[278, 48, 654, 16, 161, 100, 87, 838, 24, 1, 185, 42, 417]\n[851, 308]\n[851, 308, 5]\n[851, 308, 5, 23]\n[5, 383]\n[5, 383, 25]\n[92, 147]\n[92, 147, 2]\n[5, 23]\n[5, 23, 3]\n[5, 23, 3, 14]\n[5, 23, 3, 14, 233]\n[5, 23, 3, 14, 233, 13]\n[5, 23, 3, 14, 233, 13, 17]\n[15, 28]\n[15, 28, 436]\n[15, 28, 436, 2]\n[277, 5]\n[277, 5, 23]\n[277, 5, 23, 12]\n[14, 437]\n[14, 437, 38]\n[14, 437, 38, 438]\n[14, 437, 38, 438, 30]\n[14, 437, 38, 438, 30, 50]\n[14, 437, 38, 438, 30, 50, 332]\n[14, 437, 38, 438, 30, 50, 332, 26]\n[14, 437, 38, 438, 30, 50, 332, 26, 39]\n[14, 437, 38, 438, 30, 50, 332, 26, 39, 5]\n[14, 437, 38, 438, 30, 50, 332, 26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[1, 68]\n[1, 68, 3]\n[1, 68, 3, 171]\n[1, 68, 3, 171, 92]\n[1, 68, 3, 171, 92, 147]\n[1, 68, 3, 171, 92, 147, 2]\n[1, 68, 3, 171, 92, 147, 2, 113]\n[1, 68, 3, 171, 92, 147, 2, 113, 43]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43, 439]\n[1, 68, 3, 171, 92, 147, 2, 113, 43, 439, 43, 439, 43]\n[26, 39]\n[26, 39, 5]\n[26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[74, 41, 8, 54, 92]\n[74, 41, 8, 54, 92, 1]\n[74, 41, 8, 54, 92, 1, 68]\n[74, 41, 8, 54, 92, 1, 68, 3]\n[74, 41, 8, 54, 92, 1, 68, 3, 171]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2]\n[5, 23]\n[5, 383]\n[5, 383, 25]\n[92, 147]\n[92, 147, 2]\n[92, 147, 2, 113]\n[92, 147, 2, 113, 5]\n[92, 147, 2, 113, 5, 23]\n[14, 233]\n[14, 233, 13]\n[14, 233, 13, 17]\n[15, 28]\n[15, 28, 436]\n[15, 28, 436, 2]\n[277, 5]\n[277, 5, 23]\n[277, 5, 23, 12]\n[277, 5, 23, 12, 83]\n[277, 5, 23, 12, 83, 14]\n[277, 5, 23, 12, 83, 14, 437]\n[277, 5, 23, 12, 83, 14, 437, 38]\n[277, 5, 23, 12, 83, 14, 437, 38, 438]\n[277, 5, 23, 12, 83, 14, 437, 38, 438, 30]\n[277, 5, 23, 12, 83, 14, 437, 38, 438, 30, 50]\n[26, 39]\n[26, 39, 5]\n[26, 39, 5, 23]\n[5, 23]\n[5, 23, 30]\n[5, 23, 30, 3]\n[74, 41]\n[74, 41, 8]\n[74, 41, 8, 54]\n[74, 41, 8, 54, 92]\n[74, 41, 8, 54, 92, 1]\n[74, 41, 8, 54, 92, 1, 68]\n[74, 41, 8, 54, 92, 1, 68, 3]\n[74, 41, 8, 54, 92, 1, 68, 3, 171]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 90]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 90, 35]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 90, 35, 2]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 90, 35, 2, 142]\n[74, 41, 8, 54, 92, 1, 68, 3, 171, 92, 147, 2, 113, 90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[29, 1]\n[29, 1, 478]\n[29, 1, 478, 578]\n[29, 1, 478, 578, 800]\n[4, 49]\n[4, 49, 739]\n[4, 49, 739, 4]\n[65, 17]\n[65, 17, 427]\n[65, 17, 427, 260]\n[21, 363]\n[21, 363, 30]\n[21, 363, 30, 17]\n[21, 363, 30, 17, 103]\n[21, 363, 30, 17, 103, 2514]\n[620, 90]\n[620, 90, 35]\n[620, 90, 35, 2]\n[620, 90, 35, 2, 142]\n[620, 90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 989]\n[90, 35, 989, 86]\n[90, 35, 989, 86, 76]\n[90, 35, 989, 86, 76, 35]\n[90, 35, 989, 86, 76, 35, 287]\n[36, 286]\n[36, 286, 423]\n[36, 286, 423, 5]\n[36, 286, 423, 5, 312]\n[36, 286, 423, 5, 312, 46]\n[36, 286, 423, 5, 312, 46, 22]\n[18, 2515]\n[18, 2515, 165]\n[18, 2515, 165, 4]\n[18, 2515, 165, 4, 18]\n[18, 2515, 165, 4, 18, 51]\n[18, 2515, 165, 4, 18, 51, 9]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35, 2, 142, 13, 8]\n[90, 35, 2, 142, 13, 8, 133]\n[90, 35, 2, 142, 13, 8, 133, 207]\n[90, 35, 2, 142, 13, 8, 133, 207, 742]\n[90, 35, 2, 142, 13, 8, 133, 207, 742, 279]\n[2, 77]\n[2, 77, 700]\n[2, 77, 700, 2516]\n[2, 77, 700, 2516, 247]\n[2, 77, 700, 2516, 247, 5]\n[2517, 4]\n[2517, 4, 1051]\n[26, 2518]\n[26, 2518, 7]\n[26, 2518, 7, 274]\n[26, 2518, 7, 274, 44]\n[26, 2518, 7, 274, 44, 88]\n[26, 2518, 7, 274, 44, 88, 41]\n[26, 2518, 7, 274, 44, 88, 41, 3]\n[26, 2518, 7, 274, 44, 88, 41, 3, 1]\n[26, 2518, 7, 274, 44, 88, 41, 3, 1, 68]\n[26, 2518, 7, 274, 44, 88, 41, 3, 1, 68, 5]\n[12, 8]\n[12, 8, 4]\n[4, 24]\n[4, 24, 1]\n[4, 24, 1, 311]\n[4, 24, 1, 311, 53]\n[598, 59]\n[598, 59, 914]\n[598, 59, 914, 44]\n[598, 59, 914, 44, 42]\n[49, 160]\n[12, 59]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35]\n[90, 35, 2]\n[90, 35, 2, 142]\n[90, 35, 2, 142, 13]\n[90, 35, 2, 142, 13, 8]\n[90, 35, 2, 142, 13, 8, 5]\n[90, 35, 2, 142, 13, 8, 5, 48]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519, 2520]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519, 2520, 7]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519, 2520, 7, 2]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519, 2520, 7, 2, 108]\n[90, 35, 2, 142, 13, 8, 5, 48, 2519, 2520, 7, 2, 108, 246]\n[56, 2]\n[56, 2, 655]\n[56, 2, 655, 114]\n[56, 2, 655, 114, 128]\n[56, 2, 655, 114, 128, 10]\n[56, 2, 655, 114, 128, 10, 449]\n[56, 2, 655, 114, 128, 10, 449, 22]\n[56, 1]\n[56, 1, 1094]\n[56, 1, 1094, 128]\n[56, 1, 1094, 128, 2521]\n[56, 1, 1094, 128, 2521, 29]\n[56, 1, 1094, 128, 2521, 29, 50]\n[56, 1, 1094, 128, 2521, 29, 50, 51]\n[56, 1, 1094, 128, 2521, 29, 50, 51, 16]\n[4, 2]\n[4, 2, 241]\n[4, 2, 241, 822]\n[4, 2, 241, 822, 443]\n[4, 2, 241, 822, 443, 31]\n[4, 2, 241, 822, 443, 31, 15]\n[4, 2, 241, 822, 443, 31, 15, 56]\n[4, 2, 241, 822, 443, 31, 15, 56, 37]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36, 563]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36, 563, 5]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36, 563, 5, 19]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36, 563, 5, 19, 53]\n[4, 2, 241, 822, 443, 31, 15, 56, 37, 258, 6, 10, 75, 10, 36, 563, 5, 19, 53, 1]\n[5, 44]\n[5, 44, 94]\n[5, 44, 94, 16]\n[5, 44, 94, 16, 68]\n[5, 44, 94, 16, 68, 39]\n[5, 44, 94, 16, 68, 39, 5]\n[5, 44, 94, 16, 68, 39, 5, 19]\n[5, 44, 94, 16, 68, 39, 5, 19, 10]\n[5, 23]\n[5, 23, 95]\n[5, 23, 95, 2522]\n[5, 23, 95, 2522, 128]\n[5, 23, 95, 2522, 128, 10]\n[5, 23, 95, 2522, 128, 10, 559]\n[5, 23, 95, 2522, 128, 10, 559, 16]\n[29, 5]\n[29, 5, 53]\n[29, 5, 53, 1]\n[29, 5, 53, 1, 459]\n[29, 5, 53, 1, 459, 75]\n[29, 5, 53, 1, 459, 75, 10]\n[29, 5, 53, 1, 459, 75, 10, 106]\n[29, 5, 53, 1, 459, 75, 10, 106, 56]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167, 2523]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167, 2523, 478]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167, 2523, 478, 6]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167, 2523, 478, 6, 2524]\n[29, 5, 53, 1, 459, 75, 10, 106, 56, 5, 1135, 89, 167, 2523, 478, 6, 2524, 16]\n[292, 14]\n[292, 14, 478]\n[292, 14, 478, 244]\n[292, 14, 478, 244, 1145]\n[292, 14, 478, 244, 1145, 9]\n[292, 14, 478, 244, 1145, 9, 44]\n[292, 14, 478, 244, 1145, 9, 44, 481]\n[292, 14, 478, 244, 1145, 9, 44, 481, 8]\n[292, 14, 478, 244, 1145, 9, 44, 481, 8, 6]\n[4, 2]\n[4, 2, 734]\n[4, 2, 734, 311]\n[4, 2, 734, 311, 128]\n[4, 2, 734, 311, 128, 10]\n[4, 2, 734, 311, 128, 10, 559]\n[4, 2, 734, 311, 128, 10, 559, 1]\n[9, 279]\n[9, 279, 135]\n[9, 279, 135, 2]\n[9, 279, 135, 2, 2525]\n[9, 279, 135, 2, 2525, 26]\n[9, 279, 135, 2, 2525, 26, 324]\n[9, 279, 135, 2, 2525, 26, 324, 75]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67, 266]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67, 266, 5]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67, 266, 5, 19]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67, 266, 5, 19, 53]\n[9, 279, 135, 2, 2525, 26, 324, 75, 51, 9, 87, 4, 5, 169, 67, 266, 5, 19, 53, 1]\n[4, 75]\n[4, 75, 409]\n[4, 75, 409, 11]\n[4, 75, 409, 11, 688]\n[4, 75, 409, 11, 688, 27]\n[4, 75, 409, 11, 688, 27, 5]\n[4, 75, 409, 11, 688, 27, 5, 169]\n[4, 75, 409, 11, 688, 27, 5, 169, 2526]\n[12, 167]\n[12, 167, 2527]\n[26, 5]\n[26, 5, 169]\n[26, 5, 169, 917]\n[26, 5, 169, 917, 361]\n[26, 5, 169, 917, 361, 852]\n[26, 5, 169, 917, 361, 852, 5]\n[26, 5, 169, 917, 361, 852, 5, 53]\n[26, 5, 169, 917, 361, 852, 5, 53, 1]\n[4, 16]\n[4, 16, 998]\n[4, 16, 998, 15]\n[4, 16, 998, 15, 673]\n[4, 16, 998, 15, 673, 60]\n[4, 16, 998, 15, 673, 60, 300]\n[4, 16, 998, 15, 673, 60, 300, 26]\n[4, 16, 998, 15, 673, 60, 300, 26, 6]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323, 1]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323, 1, 2529]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323, 1, 2529, 15]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323, 1, 2529, 15, 1]\n[4, 16, 998, 15, 673, 60, 300, 26, 6, 67, 323, 1, 2529, 15, 1, 2530]\n[22, 2]\n[22, 2, 655]\n[22, 2, 655, 527]\n[22, 2, 655, 527, 15]\n[22, 2, 655, 527, 15, 2531]\n[22, 2, 655, 527, 15, 2531, 115]\n[22, 2, 655, 527, 15, 2531, 115, 136]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081, 267]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081, 267, 5]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081, 267, 5, 44]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081, 267, 5, 44, 53]\n[22, 2, 655, 527, 15, 2531, 115, 136, 63, 60, 32, 2, 1081, 267, 5, 44, 53, 1]\n[5, 44]\n[5, 44, 1237]\n[5, 44, 1237, 1]\n[5, 44, 1237, 1, 33]\n[5, 44, 1237, 1, 33, 5]\n[5, 44, 1237, 1, 33, 5, 44]\n[5, 44, 1237, 1, 33, 5, 44, 245]\n[5, 44, 1237, 1, 33, 5, 44, 245, 178]\n[5, 52]\n[5, 52, 756]\n[5, 52, 756, 69]\n[5, 52, 756, 69, 108]\n[5, 52, 756, 69, 108, 5]\n[5, 52, 756, 69, 108, 5, 19]\n[5, 52, 756, 69, 108, 5, 19, 53]\n[5, 52, 756, 69, 108, 5, 19, 53, 1]\n[852, 5]\n[852, 5, 94]\n[852, 5, 94, 16]\n[852, 5, 94, 16, 655]\n[852, 5, 94, 16, 655, 114]\n[852, 5, 94, 16, 655, 114, 449]\n[852, 5, 94, 16, 655, 114, 449, 13]\n[852, 5, 94, 16, 655, 114, 449, 13, 17]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1, 33]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1, 33, 5]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1, 33, 5, 44]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1, 33, 5, 44, 245]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 5, 44, 1237, 1, 33, 5, 44, 245, 178]\n[852, 5]\n[852, 5, 94]\n[852, 5, 94, 16]\n[852, 5, 94, 16, 655]\n[852, 5, 94, 16, 655, 114]\n[852, 5, 94, 16, 655, 114, 449]\n[852, 5, 94, 16, 655, 114, 449, 13]\n[852, 5, 94, 16, 655, 114, 449, 13, 17]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 76]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 76, 35]\n[852, 5, 94, 16, 655, 114, 449, 13, 17, 656, 76, 35, 27]\n[6, 259]\n[6, 259, 8]\n[6, 71]\n[6, 71, 8]\n[98, 71]\n[98, 71, 8]\n[5, 107]\n[26, 45]\n[26, 45, 37]\n[36, 38]\n[36, 38, 27]\n[36, 38, 27, 200]\n[36, 38, 27, 200, 11]\n[36, 38, 27, 200, 11, 8]\n[36, 38, 27, 200, 11, 8, 818]\n[36, 38, 27, 200, 11, 8, 818, 97]\n[36, 38, 27, 200, 11, 8, 818, 97, 1]\n[36, 38, 27, 200, 11, 8, 818, 97, 1, 8]\n[36, 38, 27, 200, 11, 8, 818, 97, 1, 8, 2534]\n[125, 1]\n[125, 1, 212]\n[125, 1, 212, 68]\n[125, 1, 212, 68, 6]\n[5, 120]\n[5, 120, 1]\n[5, 120, 1, 33]\n[5, 120, 1, 33, 6]\n[373, 88]\n[373, 88, 395]\n[373, 88, 395, 8]\n[373, 88, 395, 8, 76]\n[373, 88, 395, 8, 76, 35]\n[373, 88, 395, 8, 76, 35, 27]\n[28, 5]\n[28, 5, 48]\n[37, 101]\n[37, 101, 2535]\n[37, 101, 2535, 57]\n[37, 101, 2535, 57, 6]\n[2537, 35]\n[26, 3]\n[26, 3, 19]\n[36, 38]\n[36, 38, 27]\n[36, 38, 27, 200]\n[36, 38, 27, 200, 11]\n[36, 38, 27, 200, 11, 8]\n[36, 38, 27, 200, 11, 8, 3]\n[36, 38, 27, 200, 11, 8, 3, 48]\n[36, 38, 27, 200, 11, 8, 3, 48, 27]\n[17, 656]\n[17, 656, 15]\n[6, 41]\n[6, 41, 8]\n[6, 41, 8, 115]\n[6, 41, 8, 115, 1]\n[6, 41, 8, 115, 1, 2538]\n[301, 301]\n[39, 3]\n[39, 3, 128]\n[278, 1182]\n[278, 1182, 16]\n[278, 1182, 16, 200]\n[278, 1182, 16, 200, 22]\n[278, 1182, 16, 200, 22, 3]\n[278, 1182, 16, 200, 22, 3, 90]\n[278, 1182, 16, 200, 22, 3, 90, 35]\n[278, 1182, 16, 200, 22, 3, 90, 35, 27]\n[1239, 2540]\n[1239, 2540, 1239]\n[17, 2542]\n[17, 2542, 2543]\n[17, 2542, 2543, 15]\n[17, 2542, 2543, 15, 1060]\n[17, 2542, 2543, 15, 1060, 43]\n[76, 35]\n[26, 860]\n[278, 560]\n[278, 560, 16]\n[278, 560, 16, 200]\n[278, 560, 16, 200, 22]\n[278, 560, 16, 200, 22, 3]\n[278, 560, 16, 200, 22, 3, 89]\n[278, 560, 16, 200, 22, 3, 89, 56]\n[278, 560, 16, 200, 22, 3, 89, 56, 1]\n[278, 560, 16, 200, 22, 3, 89, 56, 1, 2545]\n[99, 2]\n[99, 2, 2547]\n[56, 8]\n[56, 8, 4]\n[56, 8, 4, 2549]\n[26, 278]\n[26, 278, 496]\n[26, 278, 496, 53]\n[26, 278, 496, 53, 25]\n[26, 278, 496, 53, 25, 3]\n[26, 278, 496, 53, 25, 3, 221]\n[26, 278, 496, 53, 25, 3, 221, 36]\n[26, 278, 496, 53, 25, 3, 221, 36, 38]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200, 11]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200, 11, 8]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200, 11, 8, 40]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200, 11, 8, 40, 183]\n[26, 278, 496, 53, 25, 3, 221, 36, 38, 27, 200, 11, 8, 40, 183, 17]\n[9, 44]\n[9, 44, 10]\n[21, 135]\n[21, 135, 16]\n[4, 259]\n[4, 259, 9]\n[4, 259, 9, 521]\n[4, 259, 9, 521, 5]\n[4, 259, 9, 521, 5, 44]\n[4, 259, 9, 521, 5, 44, 1052]\n[65, 20]\n[65, 20, 96]\n[5, 44]\n[5, 44, 10]\n[52, 3]\n[52, 3, 476]\n[52, 3, 476, 22]\n[52, 3, 476, 22, 34]\n[52, 3, 476, 22, 34, 28]\n[3, 332]\n[3, 332, 28]\n[16, 656]\n[16, 656, 44]\n[16, 656, 44, 259]\n[141, 3]\n[141, 3, 627]\n[141, 3, 627, 4]\n[31, 2550]\n[31, 2550, 1136]\n[146, 10]\n[5, 44]\n[5, 44, 10]\n[52, 3]\n[52, 3, 476]\n[52, 3, 476, 215]\n[52, 3, 476, 215, 74]\n[52, 3, 476, 215, 74, 10]\n[52, 3, 476, 215, 74, 10, 13]\n[52, 3, 476, 215, 74, 10, 13, 16]\n[170, 74]\n[170, 74, 10]\n[170, 74, 10, 13]\n[170, 74, 10, 13, 16]\n[65, 31]\n[65, 31, 79]\n[45, 4]\n[45, 4, 308]\n[74, 10]\n[74, 10, 13]\n[74, 10, 13, 16]\n[27, 228]\n[27, 228, 30]\n[27, 228, 30, 50]\n[74, 10]\n[74, 10, 66]\n[74, 10, 66, 13]\n[74, 10, 66, 13, 16]\n[150, 109]\n[150, 109, 146]\n[150, 109, 146, 50]\n[150, 109, 146, 50, 554]\n[150, 109, 146, 50, 554, 1]\n[150, 109, 146, 50, 554, 1, 68]\n[150, 109, 146, 50, 554, 1, 68, 18]\n[50, 21]\n[50, 21, 52]\n[50, 21, 52, 2552]\n[50, 21, 52, 2552, 30]\n[50, 21, 52, 2552, 30, 50]\n[50, 21, 52, 2552, 30, 50, 146]\n[5, 23]\n[5, 23, 93]\n[5, 23, 93, 421]\n[5, 23, 93, 421, 26]\n[5, 23, 93, 421, 26, 483]\n[5, 23, 93, 421, 26, 483, 124]\n[93, 101]\n[93, 101, 14]\n[93, 101, 14, 421]\n[93, 101, 14, 421, 54]\n[93, 101, 14, 421, 54, 20]\n[93, 101, 14, 421, 54, 20, 4]\n[93, 101, 14, 421, 54, 20, 4, 74]\n[93, 101, 14, 421, 54, 20, 4, 74, 10]\n[93, 101, 14, 421, 54, 20, 4, 74, 10, 13]\n[93, 101, 14, 421, 54, 20, 4, 74, 10, 13, 16]\n[170, 74]\n[170, 74, 10]\n[170, 74, 10, 13]\n[170, 74, 10, 13, 16]\n[65, 31]\n[65, 31, 79]\n[45, 4]\n[45, 4, 308]\n[45, 4, 308, 144]\n[45, 4, 308, 144, 52]\n[45, 4, 308, 144, 52, 331]\n[45, 4, 308, 144, 52, 331, 6]\n[215, 30]\n[215, 30, 46]\n[215, 30, 46, 50]\n[18, 145]\n[18, 145, 175]\n[18, 145, 175, 207]\n[18, 145, 175, 207, 6]\n[18, 145, 175, 207, 6, 48]\n[18, 145, 175, 207, 6, 48, 6]\n[519, 51]\n[519, 51, 13]\n[519, 51, 13, 55]\n[519, 51, 13, 55, 5]\n[519, 51, 13, 55, 5, 23]\n[519, 51, 13, 55, 5, 23, 29]\n[519, 51, 13, 55, 5, 23, 29, 1236]\n[519, 51, 13, 55, 5, 23, 29, 1236, 365]\n[519, 51, 13, 55, 5, 23, 29, 1236, 365, 3]\n[519, 51, 13, 55, 5, 23, 29, 1236, 365, 3, 3]\n[519, 51, 13, 55, 5, 23, 29, 1236, 365, 3, 3, 211]\n[519, 51, 13, 55, 5, 23, 29, 1236, 365, 3, 3, 211, 10]\n[5, 244]\n[5, 244, 101]\n[5, 244, 101, 10]\n[5, 244, 101, 10, 25]\n[26, 3]\n[26, 3, 35]\n[26, 3, 35, 6]\n[26, 3, 35, 6, 259]\n[519, 51]\n[519, 51, 13]\n[519, 51, 13, 55]\n[519, 51, 13, 55, 5]\n[116, 242]\n[116, 242, 274]\n[116, 242, 274, 165]\n[116, 242, 274, 165, 215]\n[116, 242, 274, 165, 215, 74]\n[116, 242, 274, 165, 215, 74, 10]\n[116, 242, 274, 165, 215, 74, 10, 13]\n[116, 242, 274, 165, 215, 74, 10, 13, 16]\n[392, 8]\n[392, 8, 74]\n[392, 8, 74, 10]\n[392, 8, 74, 10, 13]\n[392, 8, 74, 10, 13, 16]\n[75, 10]\n[75, 10, 36]\n[75, 10, 36, 65]\n[75, 10, 36, 65, 31]\n[75, 10, 36, 65, 31, 79]\n[45, 4]\n[45, 4, 308]\n[45, 4, 308, 144]\n[45, 4, 308, 144, 74]\n[45, 4, 308, 144, 74, 10]\n[45, 4, 308, 144, 74, 10, 13]\n[45, 4, 308, 144, 74, 10, 13, 16]\n[27, 228]\n[27, 228, 30]\n[27, 228, 30, 50]\n[74, 10]\n[74, 10, 66]\n[74, 10, 66, 13]\n[74, 10, 66, 13, 16]\n[150, 150]\n[150, 150, 75]\n[150, 150, 75, 10]\n[150, 150, 75, 10, 25]\n[75, 10]\n[75, 10, 36]\n[75, 10, 36, 22]\n[75, 10, 36, 22, 3]\n[150, 4]\n[150, 4, 150]\n[150, 4, 150, 21]\n[150, 4, 150, 21, 67]\n[150, 4, 150, 21, 67, 515]\n[150, 4, 150, 21, 67, 515, 17]\n[21, 67]\n[21, 67, 515]\n[21, 67, 515, 17]\n[21, 67]\n[21, 67, 515]\n[21, 67, 515, 17]\n[75, 10]\n"
]
],
[
[
"\nEl relleno asegurarรก que todos los puntos de datos tengan la misma longitud porque las oraciones de texto pueden tener longitudes variables",
"_____no_output_____"
]
],
[
[
"max_length = 20\nsequences = pad_sequences(datalist, maxlen=max_length, padding='pre')\nX = sequences[:, :-1]\ny = sequences[:, -1]\ny = to_categorical(y, num_classes=vocab_size)\nseq_length = X.shape[1]",
"_____no_output_____"
]
],
[
[
"Entrenamiento del modelo LSTM",
"_____no_output_____"
]
],
[
[
"model = Sequential()\nmodel.add(Embedding(vocab_size, 50, input_length=seq_length))\nmodel.add(LSTM(100, return_sequences=True))\nmodel.add(LSTM(100))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(vocab_size, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n#it will take sometime to complete traning\nmodel.fit(X, y, batch_size=32, epochs=50)",
"Epoch 1/50\n393/393 [==============================] - 23s 51ms/step - loss: 6.3287 - accuracy: 0.0530\nEpoch 2/50\n393/393 [==============================] - 19s 48ms/step - loss: 5.8860 - accuracy: 0.0535\nEpoch 3/50\n393/393 [==============================] - 16s 40ms/step - loss: 5.7146 - accuracy: 0.0599\nEpoch 4/50\n393/393 [==============================] - 16s 40ms/step - loss: 5.5218 - accuracy: 0.0704\nEpoch 5/50\n393/393 [==============================] - 16s 40ms/step - loss: 5.3725 - accuracy: 0.0753\nEpoch 6/50\n393/393 [==============================] - 16s 40ms/step - loss: 5.2484 - accuracy: 0.0779\nEpoch 7/50\n393/393 [==============================] - 16s 41ms/step - loss: 5.1404 - accuracy: 0.0814\nEpoch 8/50\n393/393 [==============================] - 16s 41ms/step - loss: 5.0350 - accuracy: 0.0900\nEpoch 9/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.9296 - accuracy: 0.0970\nEpoch 10/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.8220 - accuracy: 0.1076\nEpoch 11/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.7137 - accuracy: 0.1175\nEpoch 12/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.6012 - accuracy: 0.1304\nEpoch 13/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.4858 - accuracy: 0.1451\nEpoch 14/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.3724 - accuracy: 0.1570\nEpoch 15/50\n393/393 [==============================] - 16s 41ms/step - loss: 4.2654 - accuracy: 0.1668\nEpoch 16/50\n393/393 [==============================] - 16s 40ms/step - loss: 4.1648 - accuracy: 0.1801\nEpoch 17/50\n393/393 [==============================] - 16s 41ms/step - loss: 4.0760 - accuracy: 0.1913\nEpoch 18/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.9910 - accuracy: 0.2010\nEpoch 19/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.9077 - accuracy: 0.2049\nEpoch 20/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.8308 - accuracy: 0.2129\nEpoch 21/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.7595 - accuracy: 0.2209\nEpoch 22/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.6886 - accuracy: 0.2327\nEpoch 23/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.6257 - accuracy: 0.2364\nEpoch 24/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.5819 - accuracy: 0.2443\nEpoch 25/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.5041 - accuracy: 0.2524\nEpoch 26/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.4451 - accuracy: 0.2596\nEpoch 27/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.3856 - accuracy: 0.2675\nEpoch 28/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.3312 - accuracy: 0.2746\nEpoch 29/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.2758 - accuracy: 0.2854\nEpoch 30/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.2282 - accuracy: 0.2885\nEpoch 31/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.1763 - accuracy: 0.2966\nEpoch 32/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.1357 - accuracy: 0.3007\nEpoch 33/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.0855 - accuracy: 0.3098\nEpoch 34/50\n393/393 [==============================] - 16s 40ms/step - loss: 3.0459 - accuracy: 0.3174\nEpoch 35/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.9965 - accuracy: 0.3276\nEpoch 36/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.9579 - accuracy: 0.3350\nEpoch 37/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.9171 - accuracy: 0.3384\nEpoch 38/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.8753 - accuracy: 0.3462\nEpoch 39/50\n393/393 [==============================] - 16s 41ms/step - loss: 2.8337 - accuracy: 0.3565\nEpoch 40/50\n393/393 [==============================] - 16s 41ms/step - loss: 2.8036 - accuracy: 0.3622\nEpoch 41/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.7625 - accuracy: 0.3696\nEpoch 42/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.7326 - accuracy: 0.3692\nEpoch 43/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.6880 - accuracy: 0.3804\nEpoch 44/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.6551 - accuracy: 0.3856\nEpoch 45/50\n393/393 [==============================] - 16s 41ms/step - loss: 2.6270 - accuracy: 0.3953\nEpoch 46/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.5986 - accuracy: 0.3933\nEpoch 47/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.5581 - accuracy: 0.4062\nEpoch 48/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.5212 - accuracy: 0.4140\nEpoch 49/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.4975 - accuracy: 0.4183\nEpoch 50/50\n393/393 [==============================] - 16s 40ms/step - loss: 2.4674 - accuracy: 0.4206\n"
],
[
"poetry_length = 10\ndef generate_poetry(seed_text, n_lines):\n for i in range(n_lines):\n text = []\n for _ in range(poetry_length):\n encoded = token.texts_to_sequences([seed_text])\n encoded = pad_sequences(encoded, maxlen=seq_length, padding='pre')\n\n y_pred = np.argmax(model.predict(encoded), axis=-1)\n\n predicted_word = \"\"\n for word, index in token.word_index.items():\n if index == y_pred:\n predicted_word = word\n break\n\n seed_text = seed_text + ' ' + predicted_word\n text.append(predicted_word)\n\n seed_text = text[-1]\n text = ' '.join(text)\n print(text)",
"_____no_output_____"
],
[
"seed_text = 'world'\ngenerate_poetry(seed_text, 2)",
"that visions are seldom what they seem down the coast\nand each other's comme one world how far you're changing\n"
],
[
"seed_text = 'feet'\ngenerate_poetry(seed_text, 2)",
"as the dark side of the other side to it's\na small world after all it's a small world after\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
eca4c57815f30d1e53a69510dfcba7ba85c10ae5 | 12,229 | ipynb | Jupyter Notebook | HowEarthEngineWorks/DeferredExecution.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | 1 | 2020-03-20T19:39:34.000Z | 2020-03-20T19:39:34.000Z | HowEarthEngineWorks/DeferredExecution.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | HowEarthEngineWorks/DeferredExecution.ipynb | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | 74.115152 | 7,208 | 0.830076 | [
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/HowEarthEngineWorks/DeferredExecution.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/HowEarthEngineWorks/DeferredExecution.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=HowEarthEngineWorks/DeferredExecution.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/HowEarthEngineWorks/DeferredExecution.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.\nThe following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.",
"_____no_output_____"
]
],
[
[
"import subprocess\n\ntry:\n import geehydro\nexcept ImportError:\n print('geehydro package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geehydro'])",
"_____no_output_____"
]
],
[
[
"Import libraries",
"_____no_output_____"
]
],
[
[
"import ee\nimport folium\nimport geehydro",
"_____no_output_____"
]
],
[
[
"Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ",
"_____no_output_____"
]
],
[
[
"try:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize()",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThis step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. \nThe optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.",
"_____no_output_____"
]
],
[
[
"Map = folium.Map(location=[40, -100], zoom_start=4)\nMap.setOptions('HYBRID')",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
],
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)\nMap",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
eca4e83a080d893b1526513a8e016b009e9edaa3 | 40,716 | ipynb | Jupyter Notebook | quotes_scrape.ipynb | AfroQ/Mars_Mission | 1a889a0be2de1f7a630fd30eaba0895c798cbd4c | [
"MIT"
] | null | null | null | quotes_scrape.ipynb | AfroQ/Mars_Mission | 1a889a0be2de1f7a630fd30eaba0895c798cbd4c | [
"MIT"
] | null | null | null | quotes_scrape.ipynb | AfroQ/Mars_Mission | 1a889a0be2de1f7a630fd30eaba0895c798cbd4c | [
"MIT"
] | null | null | null | 44.792079 | 1,095 | 0.498698 | [
[
[
"# Import scraping tools\n\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager",
"C:\\Users\\nyaku\\anaconda3\\lib\\site-packages\\requests\\__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.8) or chardet (3.0.4) doesn't match a supported version!\n warnings.warn(\"urllib3 ({}) or chardet ({}) doesn't match a supported \"\n"
],
[
"# Set up Splinter\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)",
"\n\n====== WebDriver manager ======\nCurrent google-chrome version is 98.0.4758\nGet LATEST chromedriver version for 98.0.4758 google-chrome\nThere is no [win32] chromedriver for browser in cache\nTrying to download new driver from https://chromedriver.storage.googleapis.com/98.0.4758.102/chromedriver_win32.zip\nDriver has been saved in cache [C:\\Users\\nyaku\\.wdm\\drivers\\chromedriver\\win32\\98.0.4758.102]\n"
],
[
"# Visit the Quotes to Scrape site\nurl = 'http://quotes.toscrape.com/'\nbrowser.visit(url)",
"_____no_output_____"
],
[
"# Parse the HTML\nhtml = browser.html\nhtml_soup = soup(html, 'html.parser')",
"_____no_output_____"
],
[
"# Scrape the Title\ntitle = html_soup.find('h2').text\ntitle",
"_____no_output_____"
],
[
"html_soup.body",
"_____no_output_____"
],
[
"# Scrape the top ten tags\ntags = html_soup.find('div', class_='tags-box')\n# Tag names\ntag_names = tags.find_all('a', class_='tag')\nfor tag in tag_names:\n print(tag.text)",
"love\ninspirational\nlife\nhumor\nbooks\nreading\nfriendship\nfriends\ntruth\nsimile\n"
],
[
"for x in range(1, 6):\n html = browser.html\n quote_soup = soup(html, 'html.parser')\n quotes = quote_soup.find_all('span', class_='text')\n for quote in quotes:\n print('page', x, '----------')\n print(quote.text)\n browser.links.find_by_partial_text('Next').click()",
"page 1 ----------\nโThe world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.โ\npage 1 ----------\nโIt is our choices, Harry, that show what we truly are, far more than our abilities.โ\npage 1 ----------\nโThere are only two ways to live your life. One is as though nothing is a miracle. The other is as though everything is a miracle.โ\npage 1 ----------\nโThe person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.โ\npage 1 ----------\nโImperfection is beauty, madness is genius and it's better to be absolutely ridiculous than absolutely boring.โ\npage 1 ----------\nโTry not to become a man of success. Rather become a man of value.โ\npage 1 ----------\nโIt is better to be hated for what you are than to be loved for what you are not.โ\npage 1 ----------\nโI have not failed. I've just found 10,000 ways that won't work.โ\npage 1 ----------\nโA woman is like a tea bag; you never know how strong it is until it's in hot water.โ\npage 1 ----------\nโA day without sunshine is like, you know, night.โ\npage 2 ----------\nโThis life is what you make it. No matter what, you're going to mess up sometimes, it's a universal truth. But the good part is you get to decide how you're going to mess it up. Girls will be your friends - they'll act like it anyway. But just remember, some come, some go. The ones that stay with you through everything - they're your true best friends. Don't let go of them. Also remember, sisters make the best friends in the world. As for lovers, well, they'll come and go too. And baby, I hate to say it, most of them - actually pretty much all of them are going to break your heart, but you can't give up because if you give up, you'll never find your soulmate. You'll never find that half who makes you whole and that goes for everything. Just because you fail once, doesn't mean you're gonna fail at everything. Keep trying, hold on, and always, always, always believe in yourself, because if you don't, then who will, sweetie? So keep your head high, keep your chin up, and most importantly, keep smiling, because life's a beautiful thing and there's so much to smile about.โ\npage 2 ----------\nโIt takes a great deal of bravery to stand up to our enemies, but just as much to stand up to our friends.โ\npage 2 ----------\nโIf you can't explain it to a six year old, you don't understand it yourself.โ\npage 2 ----------\nโYou may not be her first, her last, or her only. She loved before she may love again. But if she loves you now, what else matters? She's not perfectโyou aren't either, and the two of you may never be perfect together but if she can make you laugh, cause you to think twice, and admit to being human and making mistakes, hold onto her and give her the most you can. She may not be thinking about you every second of the day, but she will give you a part of her that she knows you can breakโher heart. So don't hurt her, don't change her, don't analyze and don't expect more than she can give. Smile when she makes you happy, let her know when she makes you mad, and miss her when she's not there.โ\npage 2 ----------\nโI like nonsense, it wakes up the brain cells. Fantasy is a necessary ingredient in living.โ\npage 2 ----------\nโI may not have gone where I intended to go, but I think I have ended up where I needed to be.โ\npage 2 ----------\nโThe opposite of love is not hate, it's indifference. The opposite of art is not ugliness, it's indifference. The opposite of faith is not heresy, it's indifference. And the opposite of life is not death, it's indifference.โ\npage 2 ----------\nโIt is not a lack of love, but a lack of friendship that makes unhappy marriages.โ\npage 2 ----------\nโGood friends, good books, and a sleepy conscience: this is the ideal life.โ\npage 2 ----------\nโLife is what happens to us while we are making other plans.โ\npage 3 ----------\nโI love you without knowing how, or when, or from where. I love you simply, without problems or pride: I love you in this way because I do not know any other way of loving but this, in which there is no I or you, so intimate that your hand upon my chest is my hand, so intimate that when I fall asleep your eyes close.โ\npage 3 ----------\nโFor every minute you are angry you lose sixty seconds of happiness.โ\npage 3 ----------\nโIf you judge people, you have no time to love them.โ\npage 3 ----------\nโAnyone who thinks sitting in church can make you a Christian must also think that sitting in a garage can make you a car.โ\npage 3 ----------\nโBeauty is in the eye of the beholder and it may be necessary from time to time to give a stupid or misinformed beholder a black eye.โ\npage 3 ----------\nโToday you are You, that is truer than true. There is no one alive who is Youer than You.โ\npage 3 ----------\nโIf you want your children to be intelligent, read them fairy tales. If you want them to be more intelligent, read them more fairy tales.โ\npage 3 ----------\nโIt is impossible to live without failing at something, unless you live so cautiously that you might as well not have lived at all - in which case, you fail by default.โ\npage 3 ----------\nโLogic will get you from A to Z; imagination will get you everywhere.โ\npage 3 ----------\nโOne good thing about music, when it hits you, you feel no pain.โ\npage 4 ----------\nโThe more that you read, the more things you will know. The more that you learn, the more places you'll go.โ\npage 4 ----------\nโOf course it is happening inside your head, Harry, but why on earth should that mean that it is not real?โ\npage 4 ----------\nโThe truth is, everyone is going to hurt you. You just got to find the ones worth suffering for.โ\npage 4 ----------\nโNot all of us can do great things. But we can do small things with great love.โ\npage 4 ----------\nโTo the well-organized mind, death is but the next great adventure.โ\npage 4 ----------\nโAll you need is love. But a little chocolate now and then doesn't hurt.โ\npage 4 ----------\nโWe read to know we're not alone.โ\npage 4 ----------\nโAny fool can know. The point is to understand.โ\npage 4 ----------\nโI have always imagined that Paradise will be a kind of library.โ\npage 4 ----------\nโIt is never too late to be what you might have been.โ\npage 5 ----------\nโA reader lives a thousand lives before he dies, said Jojen. The man who never reads lives only one.โ\npage 5 ----------\nโYou can never get a cup of tea large enough or a book long enough to suit me.โ\npage 5 ----------\nโYou believe lies so you eventually learn to trust no one but yourself.โ\npage 5 ----------\nโIf you can make a woman laugh, you can make her do anything.โ\npage 5 ----------\nโLife is like riding a bicycle. To keep your balance, you must keep moving.โ\npage 5 ----------\nโThe real lover is the man who can thrill you by kissing your forehead or smiling into your eyes or just staring into space.โ\npage 5 ----------\nโA wise girl kisses but doesn't love, listens but doesn't believe, and leaves before she is left.โ\npage 5 ----------\nโOnly in the darkness can you see the stars.โ\npage 5 ----------\nโIt matters not what someone is born, but what they grow to be.โ\npage 5 ----------\nโLove does not begin and end the way we seem to think it does. Love is a battle, love is a war; love is a growing up.โ\n"
],
[
"quote_list=[]\nauthors_list = []\nfor x in range(1, 6):\n html = browser.html\n quote_soup = soup(html, 'html.parser')\n quotes = quote_soup.find_all('span', class_='text')\n authors = quote_soup.find_all('small', class_='author')\n for quote in quotes:\n quote_list.append(quote.text)\n #print('page', x, '----------')\n #print(quote.text)\n for author in authors:\n authors_list.append(author.text)\n #print('page', x, '----------')\n #print(quote.text)\n browser.links.find_by_partial_text('Next').click()\n#print(quote_list)\n#print(authors_list)\n ",
"_____no_output_____"
],
[
"author = quote_soup.find('small', class_='author')\nauthor.text",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"quote_df =pd.DataFrame({\"Quotes\": quote_list})\nquote_df[\"Author\"] = authors_list\nquote_df",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca4ee3459fa355b665e20f33fdc0e7373a589e0 | 184,626 | ipynb | Jupyter Notebook | Scrapper.ipynb | mahdi-darvish/End-to-End-Price-Analysis-and-Prediction | 2cf8c87e390a16951b84b59e94f0d12080f8f050 | [
"MIT"
] | null | null | null | Scrapper.ipynb | mahdi-darvish/End-to-End-Price-Analysis-and-Prediction | 2cf8c87e390a16951b84b59e94f0d12080f8f050 | [
"MIT"
] | null | null | null | Scrapper.ipynb | mahdi-darvish/End-to-End-Price-Analysis-and-Prediction | 2cf8c87e390a16951b84b59e94f0d12080f8f050 | [
"MIT"
] | null | null | null | 49.684069 | 160 | 0.519006 | [
[
[
"# Amazon Scrapper\n",
"_____no_output_____"
],
[
"### Requirements",
"_____no_output_____"
]
],
[
[
"import csv\nfrom bs4 import BeautifulSoup\nimport pandas as pd",
"_____no_output_____"
],
[
"from selenium import webdriver",
"_____no_output_____"
]
],
[
[
"## Starting Web Driver",
"_____no_output_____"
]
],
[
[
"driver = webdriver.Chrome()",
"_____no_output_____"
]
],
[
[
"## Collection",
"_____no_output_____"
]
],
[
[
"pages = range(1, 168)\ndf = pd.DataFrame()\nfailed_list = []\npage_cnt = 1\nfor page in pages:\n link = \"https://www.amazon.com/s?i=computers-intl-ship&bbn=16225007011&rh=n%3A16225007011%2Cn%3A13896617011%2Cn%3A565108&dc&page={}\".format(page)\n driver.get(link)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n results = soup.find_all('a', {\"class\": \"a-link-normal a-text-normal\"}, href=True)\n urls = []\n\n for html in results:\n href = html.get('href')\n href = 'https://www.amazon.com/' + href\n urls.append(href)\n cnt = 1\n mx = len(urls)\n for url in urls:\n attrs = {}\n attrs_failed = {}\n failed_str = ''\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n try:\n results = soup.find('span', {\"id\": \"priceblock_ourprice\"})\n attrs['price'] = results.text\n except:\n failed_str += 'price, '\n try:\n results = soup.find('span', {\"class\": \"a-icon-alt\"})\n attrs['rating'] = results.text[:3]\n except:\n failed_str += 'rating, '\n data = []\n data2 = []\n try:\n table = soup.find('table', attrs={'id':'productDetails_techSpec_section_1'})\n table_body = table.find('tbody')\n\n rows = table_body.find_all('tr')\n for row in rows:\n key = row.find_all('th')\n value = row.find_all('td')\n keys = [ele.text.strip() for ele in key]\n values = [ele.text.strip() for ele in value]\n data.append(keys[0])\n data2.append(values[0][1:])\n\n\n for i in range(len(data)):\n attrs[data[i]] = data2[i]\n except:\n failed_str += 'table_1, '\n data = []\n data2 = []\n try:\n table = soup.find('table', attrs={'id':'productDetails_techSpec_section_2'})\n table_body = table.find('tbody')\n\n rows = table_body.find_all('tr')\n for row in rows:\n key = row.find_all('th')\n value = row.find_all('td')\n keys = [ele.text.strip() for ele in key]\n values = [ele.text.strip() for ele in value]\n data.append(keys[0])\n data2.append(values[0][1:])\n\n for i in range(len(data)):\n attrs[data[i]] = data2[i]\n except:\n failed_str += 'table_2,'\n print('added item', cnt, ' out of ', mx, '(page {} out of 168)'.format(page_cnt))\n\n cnt += 1\n df = df.append(attrs, ignore_index=True)\n if failed_str != '':\n failed_list.append([failed_str, url])\n print(len(failed_list))\n page_cnt += 1\n",
"added item 1 out of 24 (page 1 out of 168)\nadded item 2 out of 24 (page 1 out of 168)\nadded item 3 out of 24 (page 1 out of 168)\nadded item 4 out of 24 (page 1 out of 168)\nadded item 5 out of 24 (page 1 out of 168)\nadded item 6 out of 24 (page 1 out of 168)\nadded item 7 out of 24 (page 1 out of 168)\nadded item 8 out of 24 (page 1 out of 168)\nadded item 9 out of 24 (page 1 out of 168)\nadded item 10 out of 24 (page 1 out of 168)\nadded item 11 out of 24 (page 1 out of 168)\nadded item 12 out of 24 (page 1 out of 168)\nadded item 13 out of 24 (page 1 out of 168)\nadded item 14 out of 24 (page 1 out of 168)\nadded item 15 out of 24 (page 1 out of 168)\nadded item 16 out of 24 (page 1 out of 168)\nadded item 17 out of 24 (page 1 out of 168)\nadded item 18 out of 24 (page 1 out of 168)\nadded item 19 out of 24 (page 1 out of 168)\nadded item 20 out of 24 (page 1 out of 168)\nadded item 21 out of 24 (page 1 out of 168)\nadded item 22 out of 24 (page 1 out of 168)\nadded item 23 out of 24 (page 1 out of 168)\nadded item 24 out of 24 (page 1 out of 168)\n1\nadded item 1 out of 24 (page 2 out of 168)\nadded item 2 out of 24 (page 2 out of 168)\nadded item 3 out of 24 (page 2 out of 168)\nadded item 4 out of 24 (page 2 out of 168)\nadded item 5 out of 24 (page 2 out of 168)\nadded item 6 out of 24 (page 2 out of 168)\nadded item 7 out of 24 (page 2 out of 168)\nadded item 8 out of 24 (page 2 out of 168)\nadded item 9 out of 24 (page 2 out of 168)\nadded item 10 out of 24 (page 2 out of 168)\nadded item 11 out of 24 (page 2 out of 168)\nadded item 12 out of 24 (page 2 out of 168)\nadded item 13 out of 24 (page 2 out of 168)\nadded item 14 out of 24 (page 2 out of 168)\nadded item 15 out of 24 (page 2 out of 168)\nadded item 16 out of 24 (page 2 out of 168)\nadded item 17 out of 24 (page 2 out of 168)\nadded item 18 out of 24 (page 2 out of 168)\nadded item 19 out of 24 (page 2 out of 168)\nadded item 20 out of 24 (page 2 out of 168)\nadded item 21 out of 24 (page 2 out of 168)\nadded item 22 out of 24 (page 2 out of 168)\nadded item 23 out of 24 (page 2 out of 168)\nadded item 24 out of 24 (page 2 out of 168)\n3\nadded item 1 out of 24 (page 3 out of 168)\nadded item 2 out of 24 (page 3 out of 168)\nadded item 3 out of 24 (page 3 out of 168)\nadded item 4 out of 24 (page 3 out of 168)\nadded item 5 out of 24 (page 3 out of 168)\nadded item 6 out of 24 (page 3 out of 168)\nadded item 7 out of 24 (page 3 out of 168)\nadded item 8 out of 24 (page 3 out of 168)\nadded item 9 out of 24 (page 3 out of 168)\nadded item 10 out of 24 (page 3 out of 168)\nadded item 11 out of 24 (page 3 out of 168)\nadded item 12 out of 24 (page 3 out of 168)\nadded item 13 out of 24 (page 3 out of 168)\nadded item 14 out of 24 (page 3 out of 168)\nadded item 15 out of 24 (page 3 out of 168)\nadded item 16 out of 24 (page 3 out of 168)\nadded item 17 out of 24 (page 3 out of 168)\nadded item 18 out of 24 (page 3 out of 168)\nadded item 19 out of 24 (page 3 out of 168)\nadded item 20 out of 24 (page 3 out of 168)\nadded item 21 out of 24 (page 3 out of 168)\nadded item 22 out of 24 (page 3 out of 168)\nadded item 23 out of 24 (page 3 out of 168)\nadded item 24 out of 24 (page 3 out of 168)\n10\nadded item 1 out of 25 (page 4 out of 168)\nadded item 2 out of 25 (page 4 out of 168)\nadded item 3 out of 25 (page 4 out of 168)\nadded item 4 out of 25 (page 4 out of 168)\nadded item 5 out of 25 (page 4 out of 168)\nadded item 6 out of 25 (page 4 out of 168)\nadded item 7 out of 25 (page 4 out of 168)\nadded item 8 out of 25 (page 4 out of 168)\nadded item 9 out of 25 (page 4 out of 168)\nadded item 10 out of 25 (page 4 out of 168)\nadded item 11 out of 25 (page 4 out of 168)\nadded item 12 out of 25 (page 4 out of 168)\nadded item 13 out of 25 (page 4 out of 168)\nadded item 14 out of 25 (page 4 out of 168)\nadded item 15 out of 25 (page 4 out of 168)\nadded item 16 out of 25 (page 4 out of 168)\nadded item 17 out of 25 (page 4 out of 168)\nadded item 18 out of 25 (page 4 out of 168)\nadded item 19 out of 25 (page 4 out of 168)\nadded item 20 out of 25 (page 4 out of 168)\nadded item 21 out of 25 (page 4 out of 168)\nadded item 22 out of 25 (page 4 out of 168)\nadded item 23 out of 25 (page 4 out of 168)\nadded item 24 out of 25 (page 4 out of 168)\nadded item 25 out of 25 (page 4 out of 168)\n13\nadded item 1 out of 24 (page 5 out of 168)\nadded item 2 out of 24 (page 5 out of 168)\nadded item 3 out of 24 (page 5 out of 168)\nadded item 4 out of 24 (page 5 out of 168)\nadded item 5 out of 24 (page 5 out of 168)\nadded item 6 out of 24 (page 5 out of 168)\nadded item 7 out of 24 (page 5 out of 168)\nadded item 8 out of 24 (page 5 out of 168)\nadded item 9 out of 24 (page 5 out of 168)\nadded item 10 out of 24 (page 5 out of 168)\nadded item 11 out of 24 (page 5 out of 168)\nadded item 12 out of 24 (page 5 out of 168)\nadded item 13 out of 24 (page 5 out of 168)\nadded item 14 out of 24 (page 5 out of 168)\nadded item 15 out of 24 (page 5 out of 168)\nadded item 16 out of 24 (page 5 out of 168)\nadded item 17 out of 24 (page 5 out of 168)\nadded item 18 out of 24 (page 5 out of 168)\nadded item 19 out of 24 (page 5 out of 168)\nadded item 20 out of 24 (page 5 out of 168)\nadded item 21 out of 24 (page 5 out of 168)\nadded item 22 out of 24 (page 5 out of 168)\nadded item 23 out of 24 (page 5 out of 168)\nadded item 24 out of 24 (page 5 out of 168)\n20\nadded item 1 out of 25 (page 6 out of 168)\nadded item 2 out of 25 (page 6 out of 168)\nadded item 3 out of 25 (page 6 out of 168)\nadded item 4 out of 25 (page 6 out of 168)\nadded item 5 out of 25 (page 6 out of 168)\nadded item 6 out of 25 (page 6 out of 168)\nadded item 7 out of 25 (page 6 out of 168)\nadded item 8 out of 25 (page 6 out of 168)\nadded item 9 out of 25 (page 6 out of 168)\nadded item 10 out of 25 (page 6 out of 168)\nadded item 11 out of 25 (page 6 out of 168)\nadded item 12 out of 25 (page 6 out of 168)\nadded item 13 out of 25 (page 6 out of 168)\nadded item 14 out of 25 (page 6 out of 168)\nadded item 15 out of 25 (page 6 out of 168)\nadded item 16 out of 25 (page 6 out of 168)\nadded item 17 out of 25 (page 6 out of 168)\nadded item 18 out of 25 (page 6 out of 168)\nadded item 19 out of 25 (page 6 out of 168)\nadded item 20 out of 25 (page 6 out of 168)\nadded item 21 out of 25 (page 6 out of 168)\nadded item 22 out of 25 (page 6 out of 168)\nadded item 23 out of 25 (page 6 out of 168)\nadded item 24 out of 25 (page 6 out of 168)\nadded item 25 out of 25 (page 6 out of 168)\n27\nadded item 1 out of 24 (page 7 out of 168)\nadded item 2 out of 24 (page 7 out of 168)\nadded item 3 out of 24 (page 7 out of 168)\nadded item 4 out of 24 (page 7 out of 168)\nadded item 5 out of 24 (page 7 out of 168)\nadded item 6 out of 24 (page 7 out of 168)\nadded item 7 out of 24 (page 7 out of 168)\nadded item 8 out of 24 (page 7 out of 168)\nadded item 9 out of 24 (page 7 out of 168)\nadded item 10 out of 24 (page 7 out of 168)\nadded item 11 out of 24 (page 7 out of 168)\nadded item 12 out of 24 (page 7 out of 168)\nadded item 13 out of 24 (page 7 out of 168)\nadded item 14 out of 24 (page 7 out of 168)\nadded item 15 out of 24 (page 7 out of 168)\nadded item 16 out of 24 (page 7 out of 168)\nadded item 17 out of 24 (page 7 out of 168)\nadded item 18 out of 24 (page 7 out of 168)\nadded item 19 out of 24 (page 7 out of 168)\nadded item 20 out of 24 (page 7 out of 168)\nadded item 21 out of 24 (page 7 out of 168)\nadded item 22 out of 24 (page 7 out of 168)\nadded item 23 out of 24 (page 7 out of 168)\nadded item 24 out of 24 (page 7 out of 168)\n34\nadded item 1 out of 24 (page 8 out of 168)\nadded item 2 out of 24 (page 8 out of 168)\nadded item 3 out of 24 (page 8 out of 168)\nadded item 4 out of 24 (page 8 out of 168)\nadded item 5 out of 24 (page 8 out of 168)\nadded item 6 out of 24 (page 8 out of 168)\nadded item 7 out of 24 (page 8 out of 168)\nadded item 8 out of 24 (page 8 out of 168)\nadded item 9 out of 24 (page 8 out of 168)\nadded item 10 out of 24 (page 8 out of 168)\n"
]
],
[
[
"# Create Dataframe\n",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.to_csv('data.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca4fcaf549da819f2c5eec6c27ebc590453cf35 | 17,963 | ipynb | Jupyter Notebook | 7 QUORA INSINCERE QUESTIONN/a-look-at-different-embeddings.ipynb | MLVPRASAD/KaggleProjects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | 2 | 2020-01-25T08:31:14.000Z | 2022-03-23T18:24:03.000Z | 7 QUORA INSINCERE QUESTIONN/a-look-at-different-embeddings.ipynb | MLVPRASAD/KaggleProjects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | null | null | null | 7 QUORA INSINCERE QUESTIONN/a-look-at-different-embeddings.ipynb | MLVPRASAD/KaggleProjects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | null | null | null | 17,963 | 17,963 | 0.756722 | [
[
[
"**Notebook Objective:**\n\nObjective of the notebook is to look at the different pretrained embeddings provided in the dataset and to see how they are useful in the model building process. \n\nFirst let us import the necessary modules and read the input data.",
"_____no_output_____"
]
],
[
[
"import os\nimport time\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom tqdm import tqdm\nimport math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D\nfrom keras.layers import Bidirectional, GlobalMaxPool1D\nfrom keras.models import Model\nfrom keras import initializers, regularizers, constraints, optimizers, layers",
"_____no_output_____"
],
[
"train_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\nprint(\"Train shape : \",train_df.shape)\nprint(\"Test shape : \",test_df.shape)",
"_____no_output_____"
]
],
[
[
"Next steps are as follows:\n * Split the training dataset into train and val sample. Cross validation is a time consuming process and so let us do simple train val split.\n * Fill up the missing values in the text column with '_na_'\n * Tokenize the text column and convert them to vector sequences\n * Pad the sequence as needed - if the number of words in the text is greater than 'max_len' trunacate them to 'max_len' or if the number of words in the text is lesser than 'max_len' add zeros for remaining values.",
"_____no_output_____"
]
],
[
[
"## split to train and val\ntrain_df, val_df = train_test_split(train_df, test_size=0.1, random_state=2018)\n\n## some config values \nembed_size = 300 # how big is each word vector\nmax_features = 50000 # how many unique words to use (i.e num rows in embedding vector)\nmaxlen = 100 # max number of words in a question to use\n\n## fill up the missing values\ntrain_X = train_df[\"question_text\"].fillna(\"_na_\").values\nval_X = val_df[\"question_text\"].fillna(\"_na_\").values\ntest_X = test_df[\"question_text\"].fillna(\"_na_\").values\n\n## Tokenize the sentences\ntokenizer = Tokenizer(num_words=max_features)\ntokenizer.fit_on_texts(list(train_X))\ntrain_X = tokenizer.texts_to_sequences(train_X)\nval_X = tokenizer.texts_to_sequences(val_X)\ntest_X = tokenizer.texts_to_sequences(test_X)\n\n## Pad the sentences \ntrain_X = pad_sequences(train_X, maxlen=maxlen)\nval_X = pad_sequences(val_X, maxlen=maxlen)\ntest_X = pad_sequences(test_X, maxlen=maxlen)\n\n## Get the target values\ntrain_y = train_df['target'].values\nval_y = val_df['target'].values",
"_____no_output_____"
]
],
[
[
"**Without Pretrained Embeddings:**\n\nNow that we are done with all the necessary preprocessing steps, we can first train a Bidirectional GRU model. We will not use any pre-trained word embeddings for this model and the embeddings will be learnt from scratch. Please check out the model summary for the details of the layers used. ",
"_____no_output_____"
]
],
[
[
"inp = Input(shape=(maxlen,))\nx = Embedding(max_features, embed_size)(inp)\nx = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)\nx = GlobalMaxPool1D()(x)\nx = Dense(16, activation=\"relu\")(x)\nx = Dropout(0.1)(x)\nx = Dense(1, activation=\"sigmoid\")(x)\nmodel = Model(inputs=inp, outputs=x)\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nprint(model.summary())",
"_____no_output_____"
]
],
[
[
"Train the model using train sample and monitor the metric on the valid sample. This is just a sample model running for 2 epochs. Changing the epochs, batch_size and model parameters might give us a better model.",
"_____no_output_____"
]
],
[
[
"## Train the model \nmodel.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))",
"_____no_output_____"
]
],
[
[
"Now let us get the validation sample predictions and also get the best threshold for F1 score. ",
"_____no_output_____"
]
],
[
[
"pred_noemb_val_y = model.predict([val_X], batch_size=1024, verbose=1)\nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_noemb_val_y>thresh).astype(int))))",
"_____no_output_____"
]
],
[
[
"Now let us get the test set predictions as well and save them",
"_____no_output_____"
]
],
[
[
"pred_noemb_test_y = model.predict([test_X], batch_size=1024, verbose=1)",
"_____no_output_____"
]
],
[
[
"Now that our model building is done, it might be a good idea to clean up some memory before we go to the next step.",
"_____no_output_____"
]
],
[
[
"del model, inp, x\nimport gc; gc.collect()\ntime.sleep(10)",
"_____no_output_____"
]
],
[
[
"So we got some baseline GRU model without pre-trained embeddings. Now let us use the provided embeddings and rebuild the model again to see the performance. \n\n",
"_____no_output_____"
]
],
[
[
"!ls ../input/embeddings/",
"_____no_output_____"
]
],
[
[
"We have four different types of embeddings.\n * GoogleNews-vectors-negative300 - https://code.google.com/archive/p/word2vec/\n * glove.840B.300d - https://nlp.stanford.edu/projects/glove/\n * paragram_300_sl999 - https://cogcomp.org/page/resource_view/106\n * wiki-news-300d-1M - https://fasttext.cc/docs/en/english-vectors.html\n \n A very good explanation for different types of embeddings are given in this [kernel](https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge). Please refer the same for more details..\n\n**Glove Embeddings:**\n\nIn this section, let us use the Glove embeddings and rebuild the GRU model.",
"_____no_output_____"
]
],
[
[
"EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\nembeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(EMBEDDING_FILE))\n\nall_embs = np.stack(embeddings_index.values())\nemb_mean,emb_std = all_embs.mean(), all_embs.std()\nembed_size = all_embs.shape[1]\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\n \ninp = Input(shape=(maxlen,))\nx = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)\nx = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)\nx = GlobalMaxPool1D()(x)\nx = Dense(16, activation=\"relu\")(x)\nx = Dropout(0.1)(x)\nx = Dense(1, activation=\"sigmoid\")(x)\nmodel = Model(inputs=inp, outputs=x)\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())",
"_____no_output_____"
],
[
"model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))",
"_____no_output_____"
],
[
"pred_glove_val_y = model.predict([val_X], batch_size=1024, verbose=1)\nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_glove_val_y>thresh).astype(int))))",
"_____no_output_____"
]
],
[
[
"Results seem to be better than the model without pretrained embeddings.",
"_____no_output_____"
]
],
[
[
"pred_glove_test_y = model.predict([test_X], batch_size=1024, verbose=1)",
"_____no_output_____"
],
[
"del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x\nimport gc; gc.collect()\ntime.sleep(10)",
"_____no_output_____"
]
],
[
[
"**Wiki News FastText Embeddings:**\n\nNow let us use the FastText embeddings trained on Wiki News corpus in place of Glove embeddings and rebuild the model.",
"_____no_output_____"
]
],
[
[
"EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\nembeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(EMBEDDING_FILE) if len(o)>100)\n\nall_embs = np.stack(embeddings_index.values())\nemb_mean,emb_std = all_embs.mean(), all_embs.std()\nembed_size = all_embs.shape[1]\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\n \ninp = Input(shape=(maxlen,))\nx = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)\nx = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)\nx = GlobalMaxPool1D()(x)\nx = Dense(16, activation=\"relu\")(x)\nx = Dropout(0.1)(x)\nx = Dense(1, activation=\"sigmoid\")(x)\nmodel = Model(inputs=inp, outputs=x)\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])",
"_____no_output_____"
],
[
"model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))",
"_____no_output_____"
],
[
"pred_fasttext_val_y = model.predict([val_X], batch_size=1024, verbose=1)\nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_fasttext_val_y>thresh).astype(int))))",
"_____no_output_____"
],
[
"pred_fasttext_test_y = model.predict([test_X], batch_size=1024, verbose=1)",
"_____no_output_____"
],
[
"del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x\nimport gc; gc.collect()\ntime.sleep(10)",
"_____no_output_____"
]
],
[
[
"**Paragram Embeddings:**\n\nIn this section, we can use the paragram embeddings and build the model and make predictions.",
"_____no_output_____"
]
],
[
[
"EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\nembeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(EMBEDDING_FILE, encoding=\"utf8\", errors='ignore') if len(o)>100)\n\nall_embs = np.stack(embeddings_index.values())\nemb_mean,emb_std = all_embs.mean(), all_embs.std()\nembed_size = all_embs.shape[1]\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\n \ninp = Input(shape=(maxlen,))\nx = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)\nx = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)\nx = GlobalMaxPool1D()(x)\nx = Dense(16, activation=\"relu\")(x)\nx = Dropout(0.1)(x)\nx = Dense(1, activation=\"sigmoid\")(x)\nmodel = Model(inputs=inp, outputs=x)\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])",
"_____no_output_____"
],
[
"model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))",
"_____no_output_____"
],
[
"pred_paragram_val_y = model.predict([val_X], batch_size=1024, verbose=1)\nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_paragram_val_y>thresh).astype(int))))",
"_____no_output_____"
],
[
"pred_paragram_test_y = model.predict([test_X], batch_size=1024, verbose=1)",
"_____no_output_____"
],
[
"del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x\nimport gc; gc.collect()\ntime.sleep(10)",
"_____no_output_____"
]
],
[
[
"**Observations:**\n * Overall pretrained embeddings seem to give better results comapred to non-pretrained model. \n * The performance of the different pretrained embeddings are almost similar.\n \n**Final Blend:**\n\nThough the results of the models with different pre-trained embeddings are similar, there is a good chance that they might capture different type of information from the data. So let us do a blend of these three models by averaging their predictions.",
"_____no_output_____"
]
],
[
[
"pred_val_y = 0.33*pred_glove_val_y + 0.33*pred_fasttext_val_y + 0.34*pred_paragram_val_y \nfor thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n print(\"F1 score at threshold {0} is {1}\".format(thresh, metrics.f1_score(val_y, (pred_val_y>thresh).astype(int))))",
"_____no_output_____"
]
],
[
[
"The result seems to better than individual pre-trained models and so we let us create a submission file using this model blend.",
"_____no_output_____"
]
],
[
[
"pred_test_y = 0.33*pred_glove_test_y + 0.33*pred_fasttext_test_y + 0.34*pred_paragram_test_y\npred_test_y = (pred_test_y>0.35).astype(int)\nout_df = pd.DataFrame({\"qid\":test_df[\"qid\"].values})\nout_df['prediction'] = pred_test_y\nout_df.to_csv(\"submission.csv\", index=False)",
"_____no_output_____"
]
],
[
[
"\n**References:**\n\nThanks to the below kernels which helped me with this one. \n1. https://www.kaggle.com/jhoward/improved-lstm-baseline-glove-dropout\n2. https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
eca4ff4cb386836be74766d644616ec58b5df99e | 2,539 | ipynb | Jupyter Notebook | np1.ipynb | twinspica14/jupyter_files | e2d530e14e0241045467d1f757f13a964903a410 | [
"MIT"
] | null | null | null | np1.ipynb | twinspica14/jupyter_files | e2d530e14e0241045467d1f757f13a964903a410 | [
"MIT"
] | null | null | null | np1.ipynb | twinspica14/jupyter_files | e2d530e14e0241045467d1f757f13a964903a410 | [
"MIT"
] | null | null | null | 22.078261 | 118 | 0.499015 | [
[
[
"import numpy as np\nimport time as tm\nstart = tm.perf_counter()\na1 = tm.localtime()\nb = np.array([1,2,3,4])\na = np.array([[1,2,2,3],[2,2,2,2]])# don't use extra space between two rows\na\nend = tm.perf_counter()\ntime_left = end - start\nprint(time_left)\nb1 = tm.localtime()\nprint(\"started at\" + tm.strftime(\"%X\", a1))\nprint(\"ended at\" + tm.strftime(\"%X\", b1))\n\n\n_a = np.array([[1,2,3],[4,5,6]])\n_b = np.array([[7,8], [9,10], [11,12]])\n\nprint(_b, end=\"\")\nprint(_a)\n#print(_a*_b) #it's wrong because it's array not matrix, according to operation\n#it's type of object is usaully decided \n\n#even if array is formed, it doesn't follows matrix law",
"0.00040333338438358624\nstarted at17:28:16\nended at17:28:16\n[[ 7 8]\n [ 9 10]\n [11 12]][[1 2 3]\n [4 5 6]]\n"
],
[
"# we use dtype method to find it's type\n# we can make I matrix not array by using np.eye(n), np.arange(n1,n2,step) to create array single dimensional\n# np.linspace(n1,n2,space) creates equal space among them, also has n1 and n2\n\n\n_d = np.dot(_a,_b)# matrix multiplication\nprint(_d)\n\n\n\n",
"_____no_output_____"
],
[
"\n\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
eca511305364f8bc2fa4e6d895f5d83092edaeef | 150,615 | ipynb | Jupyter Notebook | py_evm/testing evm_py.ipynb | tvaranka/Python-evm | ee23e35d77aaa9d9ef77f288173a7e4ea135947a | [
"MIT"
] | null | null | null | py_evm/testing evm_py.ipynb | tvaranka/Python-evm | ee23e35d77aaa9d9ef77f288173a7e4ea135947a | [
"MIT"
] | null | null | null | py_evm/testing evm_py.ipynb | tvaranka/Python-evm | ee23e35d77aaa9d9ef77f288173a7e4ea135947a | [
"MIT"
] | null | null | null | 1,053.251748 | 84,240 | 0.954095 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport py_evm",
"_____no_output_____"
],
[
"video = py_evm.load_video(\"../data/baby.mp4\")",
"_____no_output_____"
],
[
"video = video[:50]\nvideo = video[..., 0]",
"_____no_output_____"
],
[
"plt.imshow(video[0], \"gray\")",
"_____no_output_____"
],
[
"mm_video = py_evm.magnify(video, alpha=10)",
"_____no_output_____"
],
[
"plt.imshow(mm_video[20], \"gray\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca511854f624066cabe720a2ce9747c75732b52 | 5,107 | ipynb | Jupyter Notebook | docs/examples/provenance.ipynb | maxnoe/ctapipe | 87273b3419edb67757ae0830905f2e2277aad915 | [
"BSD-3-Clause"
] | null | null | null | docs/examples/provenance.ipynb | maxnoe/ctapipe | 87273b3419edb67757ae0830905f2e2277aad915 | [
"BSD-3-Clause"
] | null | null | null | docs/examples/provenance.ipynb | maxnoe/ctapipe | 87273b3419edb67757ae0830905f2e2277aad915 | [
"BSD-3-Clause"
] | null | null | null | 24.203791 | 366 | 0.541218 | [
[
[
"# Using the ctapipe Provenance service\n\nThe provenance functionality is used automatically when you use most of ctapipe functionality (particularly `ctapipe.core.Tool` and functions in `ctapipe.io` and `ctapipe.utils`), so normally you don't have to work with it directly. It tracks both input and output files, as well as details of the machine and software environment on which a Tool executed. \n\nHere we show some very low-level functions of this system:",
"_____no_output_____"
]
],
[
[
"from ctapipe.core import Provenance\nfrom ctapipe.utils import json2fits\nfrom pprint import pprint",
"_____no_output_____"
]
],
[
[
"## Activities\n\nThe basis of Provenance is an *activity*, which is generally an executable or step in a script. Activities can be nested (e.g. with sub-activities), as shown below, but normally this is not required:",
"_____no_output_____"
]
],
[
[
"p = Provenance() # note this is a singleton, so only ever one global provenence object\np.clear()\np.start_activity()\np.add_input_file(\"test.txt\")\n\np.start_activity(\"sub\")\np.add_input_file(\"subinput.txt\")\np.add_input_file(\"anothersubinput.txt\")\np.add_output_file(\"suboutput.txt\")\np.finish_activity(\"sub\")\n\np.start_activity(\"sub2\")\np.add_input_file(\"sub2input.txt\")\np.finish_activity(\"sub2\")\n\np.finish_activity()",
"_____no_output_____"
],
[
"p.finished_activity_names",
"_____no_output_____"
]
],
[
[
"Activities have associated input and output *entities* (files or other objects)",
"_____no_output_____"
]
],
[
[
"[ (x['activity_name'], x['input']) for x in p.provenance]",
"_____no_output_____"
]
],
[
[
"Activities track when they were started and finished:",
"_____no_output_____"
]
],
[
[
"[ (x['activity_name'],x['duration_min']) for x in p.provenance]",
"_____no_output_____"
]
],
[
[
"## Full provenance\n\nThe provence object is a list of activitites, and for each lots of details are collected:",
"_____no_output_____"
]
],
[
[
"p.provenance[0]",
"_____no_output_____"
]
],
[
[
"This can be better represented in JSON:",
"_____no_output_____"
]
],
[
[
"print(p.as_json(indent=2))",
"_____no_output_____"
]
],
[
[
"## Storing provenance info in output files\n\n* already this can be stored in something like an HDF5 file header, which allows hierarchies.\n* Try to flatted the data so it can be stored in a key=value header in a **FITS file** (using the FITS extended keyword convention to allow >8 character keywords), or as a table ",
"_____no_output_____"
]
],
[
[
"def flatten_dict(y):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '.')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '.')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out",
"_____no_output_____"
],
[
"d = dict(activity=p.provenance)",
"_____no_output_____"
],
[
"pprint(flatten_dict(d))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca538bfa27335e83a81276d3da7339cf17590c9 | 491,832 | ipynb | Jupyter Notebook | azuresqlworkshop/02-DeployAndConfigure/verifydeployment/VerifyDeployment-SS.ipynb | peterlil/sqlworkshops-azuresqlworkshop | bf6bbb01b35de7fbf7cc4b820989f3f02db26a27 | [
"CC-BY-4.0",
"MIT"
] | 74 | 2020-04-03T17:27:02.000Z | 2022-03-21T23:01:18.000Z | azuresqlworkshop/02-DeployAndConfigure/verifydeployment/VerifyDeployment-SS.ipynb | peterlil/sqlworkshops-azuresqlworkshop | bf6bbb01b35de7fbf7cc4b820989f3f02db26a27 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | azuresqlworkshop/02-DeployAndConfigure/verifydeployment/VerifyDeployment-SS.ipynb | peterlil/sqlworkshops-azuresqlworkshop | bf6bbb01b35de7fbf7cc4b820989f3f02db26a27 | [
"CC-BY-4.0",
"MIT"
] | 45 | 2020-06-04T17:59:44.000Z | 2022-03-18T14:05:50.000Z | 61.540541 | 44,919 | 0.234629 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
eca53cdabb85a3d7baf71e313f3186e984181d3c | 31,541 | ipynb | Jupyter Notebook | etl/load_database.ipynb | gfidanli/crypto-algo-trading | e302935c62420774b50d4471a5227622003dbbec | [
"MIT"
] | 1 | 2021-08-31T00:50:18.000Z | 2021-08-31T00:50:18.000Z | etl/load_database.ipynb | gfidanli/crypto-algo-trading | e302935c62420774b50d4471a5227622003dbbec | [
"MIT"
] | null | null | null | etl/load_database.ipynb | gfidanli/crypto-algo-trading | e302935c62420774b50d4471a5227622003dbbec | [
"MIT"
] | null | null | null | 33.951561 | 95 | 0.325893 | [
[
[
"import sqlite3\nimport pandas as pd",
"_____no_output_____"
],
[
"conn = sqlite3.connect('db/crypto-analysis.db')\nc = conn.cursor()",
"_____no_output_____"
],
[
"# load the data into a Pandas DataFrame\nlunarCrush = pd.read_csv('./output/lunarCrush_data.csv')\ncoinbase = pd.read_csv('./output/cbpro_data.csv')\n\n# write the data to a sqlite table\nlunarCrush.to_sql('lunarCrush', conn, if_exists='replace', index = False)\ncoinbase.to_sql('coinbase', conn, if_exists='replace', index = False)",
"_____no_output_____"
],
[
"pd.read_sql('''SELECT * FROM lunarCrush''', conn)",
"_____no_output_____"
],
[
"pd.read_sql('''SELECT * FROM coinbase''', conn)",
"_____no_output_____"
],
[
"pd.read_sql('''\n SELECT \n coinbase.symbol, \n coinbase.date, \n coinbase.close, \n lunarCrush.social_dominance\n FROM coinbase\n LEFT JOIN lunarCrush \n ON lunarCrush.symbol = coinbase.symbol\n AND lunarCrush.time = coinbase.date\n ''',\n conn\n)",
"_____no_output_____"
],
[
"df = pd.read_sql(\"\"\"\n SELECT \n cb.symbol, \n cb.date, \n cb.close, \n lc.social_dominance,\n lc.galaxy_score\n FROM coinbase AS cb\n LEFT JOIN lunarCrush AS lc\n ON lc.symbol = cb.symbol\n AND lc.time = cb.date\n WHERE cb.symbol IN ('BTC','ETH') AND cb.date BETWEEN '2021-01-01' AND '2021-08-15'\n \"\"\",\n conn\n)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca544fc69fb74fd54627bfa6c241ea6fa9f3078 | 2,511 | ipynb | Jupyter Notebook | Digital_Watch.ipynb | JDaniel-JD/Digital_Watch | b89b29087c8a27ed882e96dddafd0b9c73fb1930 | [
"MIT"
] | null | null | null | Digital_Watch.ipynb | JDaniel-JD/Digital_Watch | b89b29087c8a27ed882e96dddafd0b9c73fb1930 | [
"MIT"
] | null | null | null | Digital_Watch.ipynb | JDaniel-JD/Digital_Watch | b89b29087c8a27ed882e96dddafd0b9c73fb1930 | [
"MIT"
] | null | null | null | 29.197674 | 92 | 0.440064 | [
[
[
"from tkinter import *\nimport tkinter\nfrom datetime import datetime\n\nimport pyglet\npyglet.font.add_file('digital-7.ttf')\n\ncor1 = \"#3d3d3d\" # preta\ncor2 = \"#fafcff\" # branca\ncor3 = \"#21c25c\" # verde\ncor4 = \"#eb463b\" # vermelha\ncor5 = \"#dedcdc\" # cinza\ncor6 = \"#3080f0\" # azul\n\nfundo = cor1\ncor = cor3\n\n\njanela = Tk()\njanela.title(\"\")\njanela.geometry('320x170')\njanela.resizable(width=FALSE, height=FALSE)\njanela.configure(background=fundo)\n\n\ndef relogio():\n tempo = datetime.now()\n hora = tempo.strftime(\"%H:%M:%S\")\n dia_semana = tempo.strftime(\"%A\")\n dia = tempo.day\n mes = tempo.strftime(\"%b\")\n ano = tempo.strftime(\"%Y\")\n\n l1.config(text=hora)\n l1.after(200, relogio)\n l2.config(text=dia_semana + \" \" + str(dia) +\n \"/\" + str(mes) + \"/\" + (ano))\n\n\nl1 = Label(janela, text=\"10:05:05\", font=('digital-7 80'), bg=fundo, fg=cor)\nl1.grid(row=0, column=0, sticky=NW, padx=5)\nl2 = Label(janela, font=('digital-7 20'), bg=fundo, fg=cor)\nl2.grid(row=1, column=0, sticky=NW, padx=5)\n\n\n# executando\nrelogio()\njanela.mainloop()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
eca5477b599288d1541ed733bde4d3a21c20803b | 27,053 | ipynb | Jupyter Notebook | Model backlog/Inference/35-commonlit-inf-roberta-base-aux-5-cats.ipynb | dimitreOliveira/CommonLit-Readability-Prize | e2abad78a3f79119521a480391dc1254b1dd6566 | [
"MIT"
] | null | null | null | Model backlog/Inference/35-commonlit-inf-roberta-base-aux-5-cats.ipynb | dimitreOliveira/CommonLit-Readability-Prize | e2abad78a3f79119521a480391dc1254b1dd6566 | [
"MIT"
] | null | null | null | Model backlog/Inference/35-commonlit-inf-roberta-base-aux-5-cats.ipynb | dimitreOliveira/CommonLit-Readability-Prize | e2abad78a3f79119521a480391dc1254b1dd6566 | [
"MIT"
] | null | null | null | 34.114754 | 243 | 0.531438 | [
[
[
"## Dependencies",
"_____no_output_____"
]
],
[
[
"import warnings, math, json, glob\nimport pandas as pd\nimport tensorflow.keras.layers as L\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import Model\nfrom transformers import TFAutoModelForSequenceClassification, TFAutoModel, AutoTokenizer\nfrom commonlit_scripts import *\n\n\nseed = 0\nseed_everything(seed)\nwarnings.filterwarnings('ignore')\npd.set_option('display.max_colwidth', 150)",
"_____no_output_____"
]
],
[
[
"### Hardware configuration",
"_____no_output_____"
]
],
[
[
"strategy, tpu = get_strategy()\nAUTO = tf.data.AUTOTUNE\nREPLICAS = strategy.num_replicas_in_sync\nprint(f'REPLICAS: {REPLICAS}')",
"REPLICAS: 1\n"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"base_path = '/kaggle/input/'\ntest_filepath = base_path + 'commonlitreadabilityprize/test.csv'\ntest = pd.read_csv(test_filepath)\nprint(f'Test samples: {len(test)}')\ndisplay(test.head())",
"Test samples: 7\n"
]
],
[
[
"# Model parameters",
"_____no_output_____"
]
],
[
[
"input_noteboks = [x for x in os.listdir(base_path) if '-commonlit-' in x]\ninput_base_path = f'{base_path}{input_noteboks[0]}/'\nwith open(input_base_path + 'config.json') as json_file:\n config = json.load(json_file)\n\nconfig",
"_____no_output_____"
]
],
[
[
"## Auxiliary functions",
"_____no_output_____"
]
],
[
[
"# Datasets utility functions\ndef custom_standardization(text, is_lower=True):\n if is_lower:\n text = text.lower() # if encoder is uncased\n text = text.strip()\n return text\n\ndef sample_target(features, target):\n mean, stddev = target\n sampled_target = tf.random.normal([], mean=tf.cast(mean, dtype=tf.float32), \n stddev=tf.cast(stddev, dtype=tf.float32), dtype=tf.float32)\n return (features, sampled_target)\n\ndef get_dataset(pandas_df, tokenizer, labeled=True, ordered=False, repeated=False, \n is_sampled=False, batch_size=32, seq_len=128, is_lower=True):\n \"\"\"\n Return a Tensorflow dataset ready for training or inference.\n \"\"\"\n text = [custom_standardization(text, is_lower) for text in pandas_df['excerpt']]\n \n # Tokenize inputs\n tokenized_inputs = tokenizer(text, max_length=seq_len, truncation=True, \n padding='max_length', return_tensors='tf')\n \n if labeled:\n dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']}, \n (pandas_df['target'], pandas_df['standard_error'])))\n if is_sampled:\n dataset = dataset.map(sample_target, num_parallel_calls=tf.data.AUTOTUNE)\n else:\n dataset = tf.data.Dataset.from_tensor_slices({'input_ids': tokenized_inputs['input_ids'], \n 'attention_mask': tokenized_inputs['attention_mask']})\n \n if repeated:\n dataset = dataset.repeat()\n if not ordered:\n dataset = dataset.shuffle(2048)\n dataset = dataset.batch(batch_size)\n dataset = dataset.cache()\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n return dataset",
"_____no_output_____"
],
[
"model_path_list = glob.glob(f'{input_base_path}*.h5')\nmodel_path_list.sort()\n\nprint('Models to predict:')\nprint(*model_path_list, sep='\\n')",
"Models to predict:\n/kaggle/input/35-commonlit-roberta-base-aux-5-cats/model_0.h5\n"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"def model_fn(encoder, seq_len=256):\n input_ids = L.Input(shape=(seq_len,), dtype=tf.int32, name='input_ids')\n input_attention_mask = L.Input(shape=(seq_len,), dtype=tf.int32, name='attention_mask')\n \n outputs = encoder({'input_ids': input_ids, \n 'attention_mask': input_attention_mask})\n last_hidden_state = outputs['last_hidden_state']\n \n cls_token = last_hidden_state[:, 0, :]\n output = L.Dense(1, name='output')(cls_token)\n output_sample = L.Dense(1, name='output_sample')(cls_token)\n output_aux = L.Dense(5, activation='softmax', name='output_aux')(cls_token)\n \n model = Model(inputs=[input_ids, input_attention_mask], \n outputs=[output, output_sample, output_aux])\n return model\n\n\nwith strategy.scope():\n encoder = TFAutoModel.from_pretrained(config['BASE_MODEL'])\n # Freeze embeddings\n encoder.layers[0].embeddings.trainable = False\n model = model_fn(encoder, config['SEQ_LEN'])\n \nmodel.summary()",
"Some layers from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/ were not used when initializing TFRobertaModel: ['lm_head']\n- This IS expected if you are initializing TFRobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing TFRobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nAll the layers of TFRobertaModel were initialized from the model checkpoint at /kaggle/input/huggingface-roberta/roberta-base/.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaModel for predictions without further training.\n"
]
],
[
[
"# Test set predictions",
"_____no_output_____"
]
],
[
[
"tokenizer = AutoTokenizer.from_pretrained(config['BASE_MODEL'])\ntest_pred = []\n\nfor model_path in model_path_list:\n print(model_path)\n if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)\n K.clear_session()\n model.load_weights(model_path)\n\n # Test predictions\n test_ds = get_dataset(test, tokenizer, labeled=False, ordered=True, \n batch_size=config['BATCH_SIZE'], seq_len=config['SEQ_LEN'])\n x_test = test_ds.map(lambda sample: sample)\n test_pred.append(model.predict(x_test)[0])",
"/kaggle/input/35-commonlit-roberta-base-aux-5-cats/model_0.h5\n"
]
],
[
[
"# Test set predictions",
"_____no_output_____"
]
],
[
[
"submission = test[['id']]\nsubmission['target'] = np.mean(test_pred, axis=0)\nsubmission.to_csv('submission.csv', index=False)\ndisplay(submission.head(10))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca55473d450e63bbe77dc3a2587144389e67d29 | 62,173 | ipynb | Jupyter Notebook | notebooks/c2_interactive_interface.ipynb | jonabox/CyberBattleSim | 511a079d1ee8663233ffbbe9e7d2155e82e27bd2 | [
"MIT"
] | null | null | null | notebooks/c2_interactive_interface.ipynb | jonabox/CyberBattleSim | 511a079d1ee8663233ffbbe9e7d2155e82e27bd2 | [
"MIT"
] | null | null | null | notebooks/c2_interactive_interface.ipynb | jonabox/CyberBattleSim | 511a079d1ee8663233ffbbe9e7d2155e82e27bd2 | [
"MIT"
] | null | null | null | 199.272436 | 45,168 | 0.898139 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.\n\n# Command and Control interface\nThis notebooks shows how to interact with the command&control server to observe the environment and initiate actions on the nodes where the attacker client is installed.",
"_____no_output_____"
]
],
[
[
"import networkx as nx\nfrom tabulate import tabulate\nimport cyberbattle.simulation.model as model\nimport cyberbattle.simulation.actions as actions\nimport cyberbattle.simulation.commandcontrol as commandcontrol\nimport importlib\nimportlib.reload(model)\nimportlib.reload(actions)\nimportlib.reload(commandcontrol)\nimport plotly.offline as plo\nplo.init_notebook_mode(connected=True)",
"_____no_output_____"
]
],
[
[
"We first create a simulation environment from a randomly generated network graph.",
"_____no_output_____"
]
],
[
[
"g = nx.erdos_renyi_graph(35,0.05,directed=True)\ng = model.assign_random_labels(g)\nenv = model.Environment(network=g, vulnerability_library=dict([]), identifiers=model.SAMPLE_IDENTIFIERS)\n",
"_____no_output_____"
]
],
[
[
"We create the `CommandControl` object used to the environment and execute actions, and plot the graph explored so far.\n",
"_____no_output_____"
]
],
[
[
"c = commandcontrol.CommandControl(env)",
"_____no_output_____"
],
[
"c.plot_nodes()\nprint(\"Nodes disovered so far: \" + str(c.list_nodes()))\nstarting_node = c.list_nodes()[0]['id']",
"Nodes disovered so far: [{'id': '27', 'status': 'owned'}]\n"
]
],
[
[
"For debugging purpose it's also convient to view the internals of the environment via the `EnvironmentDebugging` object. For instance we can use it to plot the entire graph, including nodes that were not discovered yet by the attacker.",
"_____no_output_____"
]
],
[
[
"dbg = commandcontrol.EnvironmentDebugging(c)",
"_____no_output_____"
],
[
"env.plot_environment_graph()\nprint(nx.info(env.network))",
"['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34']\n[('0', '2'), ('0', '11'), ('0', '34'), ('2', '23'), ('3', '6'), ('3', '12'), ('3', '23'), ('3', '32'), ('4', '15'), ('5', '19'), ('6', '34'), ('7', '1'), ('7', '23'), ('8', '18'), ('8', '22'), ('8', '30'), ('9', '2'), ('9', '19'), ('10', '1'), ('10', '19'), ('11', '34'), ('12', '3'), ('12', '13'), ('12', '21'), ('13', '26'), ('16', '5'), ('18', '25'), ('19', '10'), ('19', '18'), ('19', '22'), ('19', '30'), ('20', '2'), ('21', '0'), ('21', '17'), ('22', '7'), ('22', '11'), ('22', '28'), ('23', '32'), ('23', '33'), ('24', '7'), ('25', '34'), ('26', '0'), ('26', '11'), ('27', '14'), ('27', '16'), ('29', '6'), ('29', '12'), ('29', '20'), ('29', '33'), ('30', '19'), ('30', '28'), ('30', '33'), ('31', '4'), ('31', '5'), ('32', '16'), ('32', '24'), ('33', '8'), ('33', '13'), ('33', '17')]\nName: \nType: DiGraph\nNumber of nodes: 35\nNumber of edges: 59\nAverage in degree: 1.6857\nAverage out degree: 1.6857\n"
],
[
"print(tabulate(c.list_all_attacks(),{}))",
" id status properties local_attacks remote_attacks\n---- -------- ------------------------ ---------------------------- ----------------\n 27 owned ['Linux', 'PortRDPOpen'] ['RecentlyAccessedMachines'] []\n"
],
[
"outcome = c.run_attack(starting_node, 'RecentlyAccessedMachines')\noutcome",
"_____no_output_____"
],
[
"c.plot_nodes()",
"_____no_output_____"
],
[
"print(tabulate(c.list_nodes(),{}))",
" id status\n---- ----------\n 27 owned\n 16 discovered\n 14 discovered\n"
],
[
"print(tabulate(c.list_all_attacks(),{}))",
" id status properties local_attacks remote_attacks\n---- ---------- ------------ ---------------------------- ----------------\n 6 owned ['Windows'] ['RecentlyAccessedMachines'] []\n 24 discovered []\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca557a8ab207f4903ec0e228e546a72f40b1f1b | 28,654 | ipynb | Jupyter Notebook | Week 9-13 - Curve Fitting Techniques/NuMeth_4_Curve_Fitting.ipynb | eugeneembalzado/Numeths | 6f8f6059e9b15b923c471fb9e34e450ce23d6496 | [
"Apache-2.0"
] | 1 | 2021-08-20T11:01:13.000Z | 2021-08-20T11:01:13.000Z | Week 9-13 - Curve Fitting Techniques/NuMeth_4_Curve_Fitting.ipynb | eugeneembalzado/Numeths | 6f8f6059e9b15b923c471fb9e34e450ce23d6496 | [
"Apache-2.0"
] | null | null | null | Week 9-13 - Curve Fitting Techniques/NuMeth_4_Curve_Fitting.ipynb | eugeneembalzado/Numeths | 6f8f6059e9b15b923c471fb9e34e450ce23d6496 | [
"Apache-2.0"
] | 10 | 2021-02-01T11:03:28.000Z | 2021-08-23T02:36:49.000Z | 38.513441 | 680 | 0.521044 | [
[
[
"<a href=\"https://colab.research.google.com/github/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_Curve_Fitting.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Curve Fitting Techniques\n$_{\\text{ยฉD.J. Lopez | 2021 | Computational Methods for Computer Engineers}}$\n\nCurve fitting is one of the most used algorithms for optmization expecially in business applications. The use of curve-fitting functions ranges from engineering and signal applications such as approximations and signal replication to business applications in forecasting and operations optimization. In this module, we will discuss the several techniques that can be used in curve fitting. Specifically, we will cover:\n\n* Linear Regression\n* Multiple Linear Regression\n* Least-Squares Method (Normal Equation Method)\n* Metrics of Regression\n* Linear Interpolation\n* Lagrange Method\n* Newton's Method",
"_____no_output_____"
],
[
"## 4.1 Curves\nWhen talking about curves, it is not simply wavy lines or simple drawing elements. Rather in our course, we take curves as functions. In the previous lessons we have seen the graphs of the functions and we call them curves as well. But in this module, we are going to identify what is the function based on sets of data. We can use this for idnetifying missing data or creating approximation for new data considering the function we created. ",
"_____no_output_____"
],
[
"### 4.1.1 Extrapolation\nExtrapolation can be imagined as the appoximation of data beyond the dataset based on the given data,function, or curve describing a specific dataset. One method in data extrapolation that we will discuss in this course is regression.",
"_____no_output_____"
],
[
"### 4.1.2 Interpolation\nInterpolation similar to extrapolation approximates data based on existing data, function, or curve but rather finding data beyond the given set of data it finds more specific or missing data points within a dataset.",
"_____no_output_____"
],
[
"## 4.2 Extrapolation / Regression",
"_____no_output_____"
],
[
"### 4.2.1 Linear Regression\nAs the name suggests, linear regression tries to find the best fit straight line to a given dataset. This algorithm is one of the simplest yet most important alogorithm in regression since it is the foundation of many more complex regression techniques.\n\nThe goal of this algorithm is finding a linear equation that would best describe a set of data. The equation to be used in finding that linear equation is function is given as:\n$$y = \\omega_0 + \\omega_1 X \\\\ _{\\text{(Eq. 4.1)}}$$\nWhereas $X$ is the dataset while $y$ is the corresponding values for each datapoint in $X$. The variable $\\omega$ is called the weight of the dataset consiting of $\\omega_0$ and $\\omega_1$. In other literature, $\\omega_0$ is called the bias term sometimes written as $b$. The following equations are used to solve for $\\omega_0$ and $\\omega_1$:\n$$\\omega_0 = r\\frac{\\sigma_y}{\\sigma_x}=\\frac{\\bar{y}*\\sum(x_i^2)-\\bar{x}\\sum(x_i*y_i)}{\\sum(x^2_i-n\\bar{x}^2)}\\\\ _{\\text{(Eq. 4.2.1)}}$$\n$$\\omega_1 =\\bar{y}-\\omega_0\\bar{x}= \\frac{\\sum(x_i*y_i)-\\bar{x}\\sum(y_i)}{\\sum(x^2_i-n\\bar{x}^2)}\\\\ _{\\text{(Eq. 4.2.2)}}$$\n\n$$\\omega_0 = r\\frac{\\sigma_y}{\\sigma_x}\\\\ _{\\text{(Eq. 4.2.3)}}$$\nWhereas $r$ is the Pearson correlation solved as:\n$$r = \\frac{\\sum((x-\\bar{x})(y-\\bar{y}))}{\\sqrt{\\sum(x-\\bar{x})\\sum(y-\\bar{y})}}\\\\ _{\\text{(Eq. 4.2.4)}}$$\n\n$$\\omega_1 =\\bar{y}-\\omega_0\\bar{x} \\\\ _{\\text{(Eq. 4.2.5)}}$$\n\n",
"_____no_output_____"
]
],
[
[
"'''\nSince we are going to use datasets for this module, we will be generating dummy \ndata with numpy. We will use as matplotlib for visualizing the results as well.\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"n = 10\nX = np.arange(0,n,1,dtype=np.float64)\n\nm = np.random.uniform(0.4,0.5,(n,))\nb = np.random.uniform(8,10,(n,))\n\ny = m*X+b \n\nprint(f\"X: {X}\")\nprint(f\"y: {y}\")\nprint(f\"w1 approx = {m.mean()},w0 approx. = {b.mean()}\")\n\nplt.figure(figsize=(5,5))\nplt.grid()\nplt.scatter(X,y)\nplt.show()",
"_____no_output_____"
],
[
"def linear_regressor(X,y):\n X = np.array(X)\n y = np.array(y)\n n = X.size\n w0 = (y.mean()*np.sum(X**2)-X.mean()*np.sum(X*y)) / (np.sum(X**2) - n*X.mean()**2)\n w1 = (np.sum(X*y) - X.mean()*np.sum(y)) / (np.sum(X**2) - n*X.mean()**2)\n return w0,w1\nw0,w1 = linear_regressor(X,y)\nprint(\"Linear Regression Equation: y = {:.3f}x + {:.3f}\".format(w1, w0))",
"_____no_output_____"
],
[
"## Plotting the Regression line\ndef show_regline(X,y,w1,w0):\n x_min, x_max = X.min() - 1, X.max() + 1\n linex = np.linspace(x_min, x_max)\n liney = w1*linex+w0\n plt.figure(figsize=(5,5))\n plt.grid()\n plt.scatter(X,y)\n plt.plot(linex, liney, c='red')\n plt.show()\nshow_regline(X,y,w1,w0)",
"_____no_output_____"
],
[
"def lin_reg(val,w0,w1):\n return w1*val + w0 #model\nprint(lin_reg(10, w0, w1))\nX_new, y_new = X.copy(), y.copy()\nfor i in range(10,16):\n X_new = np.insert(X_new,-1, i)\n y_new = np.insert(y_new,-1, lin_reg(i,w0,w1))\nshow_regline(X_new, y_new, w1, w0)",
"_____no_output_____"
],
[
"np.random.seed(100)\nX_1 = np.arange(0, 20, 1)\ny_1 = X_1 - 2 * (X_1 ** 2) + 0.5 * (X_1 ** 3) + np.random.normal(-3, 3, 20)\n\nplt.figure(figsize=(5,5))\nplt.grid()\nplt.scatter(X_1, y_1)\nplt.show()",
"_____no_output_____"
],
[
"w0_q,w1_q = linear_regressor(X_1, y_1)\nshow_regline(X_1,y_1,w0_q,w1_q) ",
"_____no_output_____"
]
],
[
[
"### 4.2.2 Multiple Linear Regression\nMultiple linear regression, as the name suggests uses more linear regressors in the algorithm. This can be used if there are more than one features to a dataset. The MLR can be formulated as:\n$$y = \\omega_0 + \\omega_1 x_1 + \\omega_2 x_2 + ... + \\omega_n x_n \\\\ _{\\text{(Eq. 4.4)}}$$\nWhereas $\\omega_0$ is the bias term while $\\omega_n$ are the weights or slopes of the features $x_n$. The simplest way to implement an MLR algorithm is looping over each feature and their dataset and compute the corresponding weights. In this course, we are going to implement vectorization in implementing MLR. So instead of hte linear equation in Eq. 4.4 we can re-form the equation to the matrix equation:\n$$y = \\omega X^T$$\nWhereas $\\omega$ is a vector that includes all the weights of the features $\\begin{bmatrix}\\omega_0 \\\\ \\omega_1 \\\\ \\omega_2 \\\\ \\vdots \\\\ \\omega_n\\end{bmatrix}$. While $X$ are the data of each feature vector $\\begin{bmatrix}1\\\\ x_1 \\\\ x_2 \\\\ \\vdots \\\\ x_n\\end{bmatrix}$.\n\nWe will use the **Normal Equation** in solving MLR. The Normal equation uses the Least-Squares Cost function and is formulated as:\n$$\\theta = (X^TX)^{-1}X^Ty \\\\ _{\\text{(Eq. 4.5)}}$$\nWhereas $\\theta$ is the hypothesis or model to be created while $X$ represents the data vector and $y$ represents the labels or values corresponding to the data vector. The term $(X^TX)^{-1}$ is called the **pseudoinverse** or the **Moore-Penrose** matrix. The pseudoinverse of a matrix term of Eq. 4.5 assures that the data are normal or orthogonal. This helps check the property of Autocorrelation between the features of the data. The other properties of datasets that are safe for linear regression are Homoscedasticity, Non-multicollinearity, and Non-endogeneity. These properties will be discussed in depth in the Machine Learning Course of the AIDA Electives. \n",
"_____no_output_____"
]
],
[
[
"X = np.array([\n [1,2,3],\n [7,3,2],\n [9,6,10],\n])\ny = np.array([[4,3,8]]).T\nbias = np.ones(y.shape)\nX_train = np.append(bias,X, axis=1).T\nX_dot = X_train @ X_train.T\npseudoinv = np.linalg.inv(X_dot)\ny_dot = X_train @ y\ntheta = pseudoinv @ y_dot\nfor i in range(len(theta)):\n print(f\"w{i} : {float(theta[i])}\")",
"_____no_output_____"
]
],
[
[
"### 4.2.3 Metrics of Regression\nFor us to determine how regression models are reliable or accurate we can use the following statistics.",
"_____no_output_____"
],
[
"#### Measures of Reliability\nMeasures of reliability or predictability tells how models are reliable for predicting new values. Some of statistics used here are the R-Squared and the Adjusted R-Squared.",
"_____no_output_____"
],
[
"##### *R-Squared ($R^2$)*\nRepresents the proportion of the variance for a prediction that is explained by the inputs in a regression model. The formula is given as:\n\n$$\\text{R}^2 = 1 - \\frac{\\sum(y-\\hat{y})^2}{\\sum(y-\\bar{y})^2} \\\\ _{\\text{(Eq. 4.6)}}$$\nWhereas the numerator for the rational part is called the **residual of the sum of squares** in which $\\hat{y}$ is the prediction of the model and $y$ is from the testing dataset. The denominator of the rational part is called the **total sum of squares**.\n",
"_____no_output_____"
],
[
"###### *Adjusted R-Squared ($\\text{Adj }R^2$)*\nA modified version of the R-squared which has been adjusted to the number of predictors in the model. The adjusted R-squared increases only if the new term improves the model more than would be expected by chance. The formula is given as:\n$$\\text{Adj. R}^2 = 1 - \\begin{bmatrix}\\frac{(1-\\text{R}^2)(n-1)}{n-p-1}\\end{bmatrix} \\\\ _{\\text{(Eq. 4.6)}}$$\nWhereas $n$ is the size of the sample and $p$ is the number of predictors.",
"_____no_output_____"
],
[
"#### Measures of Error \nMeasures of error can tell how \"off\" predicted values are from the true values or ground truth. These stastical measures can also serve as a minimisation cost function in optimizing a model.",
"_____no_output_____"
],
[
"##### *Mean Squared Error (MSE)*\nThe MSE shows an estimation of the deviations of the predictions from the ground truths by getting the average of the squared errors. It can be also interpreted as the mean of the Euclidean distances of the predictions and ground truths. MSE is best when considering the existence of outliers in the data. The formula is given as:\n$$\\\\ \\text{MSE}=\\frac{1}{n}\\sum(y-\\hat{y})^2 \\\\ _{\\text{(Eq. 4.7)}}$$ ",
"_____no_output_____"
],
[
"##### *Root Mean Squared Error (MSE)*\nThe limitations of MSE as a measure of measure is its intepretability wherein it does not express the error in the original measurement units. The RMSE can also be considered as the standard deviation of the residuals unlike the MSE which is the variance. The formula of is given as:\n$$\\text{RMSE}= \\sqrt{\\text{MSE}} \\\\ _{\\text{(Eq. 4.8)}}$$ ",
"_____no_output_____"
],
[
"##### *Mean Absolute Error (MAE)*\nThe MAE, as its name suggests, takes the average of the Manhattan distances of the predictions and the ground truths. If outliers are not much of a concern for the problem MAE can be a better choice than MSE and RMSE. The formula is given as:\n$$\\\\ \\text{MAE}=\\frac{1}{n}\\sum{|y-\\hat{y}|} \\\\ _{\\text{(Eq. 4.9)}}$$ ",
"_____no_output_____"
],
[
"### 4.2.4 Applied Uses of Linear Regression\nRefered discussion: [Applied Linear Regression](https://colab.research.google.com/github/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_5_Applied_Linear_Regression.ipynb)",
"_____no_output_____"
],
[
"## 4.3 Interpolation\nInterpolation as previously discussed, pertains to the approximation of values within a given range. It is unlike regression or extrapolation that is trying to approximate values beyond the range. Interpolation can be used to increase the resolution of values between such ranges.",
"_____no_output_____"
],
[
"### 4.3.1 Linear Interpolation\nThis method is the simplest implementation of interpolation considering the modified midpoint formula. Like linear regression, this method is best for linear equations but would have inaccurate approximations for polynomials with higher degrees. The formula is given as:\n\n$$ y = y_1 + \\frac{y_2-y_1}{x_2-x_1}(x-x_1) \\\\ _{\\text{(Eq. 4.9)}}$$\n",
"_____no_output_____"
]
],
[
[
"np.random.seed(30)\nX_2 = np.arange(0, 8, 1, dtype=float)\ny_2 = X_2 + 4*(X_2 ** 2) - 3*np.random.normal(0, 3, X_2.size)\n\nplt.figure(figsize=(5,5))\nplt.grid()\nplt.scatter(X_2, y_2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see in this sample that there is a big gap between 5 and 6 of the independent variables. We can apply linear interpolation in bridging the gap.",
"_____no_output_____"
]
],
[
[
"def lin_interp(x, x1, x2, y1, y2): \n return y1 + ((y2-y1)/(x2-x1)) * (x-x1)",
"_____no_output_____"
],
[
"y_56 = lin_interp(2.5, X_2[2], X_2[3], y_1[2], y_2[3])\nprint(y_56)\nX_2new = X_2.copy()\ny_2new = y_2.copy()\nX_2new = np.insert(X_2new, 3, 2.5)\ny_2new = np.insert(y_2new, 3, y_25)\n\nplt.figure(figsize=(5,5))\nplt.grid()\nplt.scatter(X_2new, y_2new)\nplt.plot(X_2new, y_2new)\nplt.show()",
"_____no_output_____"
],
[
"## If we want to increase the resolution of the graph we can perform linear interpolation for every datapoint\n## We need to make a routine using the formula we created.\ninp = 0\nX_2new = X_2.copy()\ny_2new = y_2.copy()\nfor i, xi in enumerate(X_2):\n if i !=0:\n xi -= 0.5\n y = lin_interp(xi, X_2[i-1], X_2[i], y_2[i-1], y_2[i])\n print(xi, y)\n X_2new = np.insert(X_2new, 2*i-1, xi)\n y_2new = np.insert(y_2new, 2*i-1, y)\n\nplt.figure(figsize=(12,12))\nplt.grid()\nplt.scatter(X_2, y_2)\nplt.plot(X_2new, y_2new)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### 4.3.2 Lagrange Method\nThe Lagrange method is based on creating a polynomial of degree $n-1$. The degree is dependent on the number of points considered in the dataset $n$. It can be characterized as:\n$$ y(x) = P_1(x)y_1 + P_2(x)y_3 + P_3(x)y_3 + ... + P_n(x)y_n \\\\ _{\\text{(Eq. 4.10)}}$$\nThis can also be expressed as:\n$$ y(x) = \\sum_{i=0}^n P_i(x)y_i \\\\ _{\\text{(Eq. 4.11)}}$$\nWhereas $P(x)$ is the function for the lagrangian polynomial coefficient. Formulated as:\n$$ P_i(x) = \\prod_{j=0 \\\\ j\\neq i}^n \\frac{(x-x_j)}{(x_i-x_j)} \\\\ _{\\text{(Eq. 4.12)}}$$\nEq. 4.11 can then be re-formulated as:\n$$ y(x) = \\sum_{i=0}^n y_i \\begin{pmatrix}\\prod_{j=0 \\\\ j\\neq i}^n \\frac{(x-x_j)}{(x_i-x_j)} \\end{pmatrix} \\\\ _{\\text{(Eq. 4.13)}}$$",
"_____no_output_____"
]
],
[
[
"def coeff(x,i,X):\n x_temp = np.delete(X,i)\n return ((x-x_temp)/(X[i]-x_temp)).prod()",
"_____no_output_____"
],
[
"x = 0.5\nfor i in range(X_2.size):\n Pi = coeff(x,i,X_2)\n print(Pi)",
"_____no_output_____"
],
[
"def lagrange(x,Y,X):\n y = 0\n for i in range(X.size):\n y += Y[i]*coeff(x,i,X)\n return y ",
"_____no_output_____"
],
[
"lagrange(0.5, y_2, X_2)",
"_____no_output_____"
],
[
"X_3new = X_2.copy()\ny_3new = y_2.copy()\nfor i, xi in enumerate(X_2):\n if i !=0:\n xi -= 0.5\n y = lagrange(xi,y_3new,X_3new)\n X_3new = np.insert(X_3new, 2*i-1, xi)\n y_3new = np.insert(y_3new, 2*i-1, y)\n\nplt.figure(figsize=(12,12))\nplt.grid()\n# plt.scatter(X_3new, y_3new)\n\nplt.plot(X_3new, y_3new, label=\"Lagrange\")\nplt.plot(X_2new, y_2new, label=\"Linear\", color='green')\nplt.scatter(X_2, y_2, color='red')\nplt.legend()\nplt.show()\nprint(X_3new)\nprint(y_3new)",
"_____no_output_____"
]
],
[
[
"### 4.3.3 Newton's Method\nThe Newton's method can be applied to datapoints to obtain Newton's polynomial. Unlike Lagrange's Method, In Newton's Method, when more data points are to be used, additional basis polynomials and the corresponding coefficients can be calculated, while all existing basis polynomials and their coefficients remain unchanged. Due to the additional terms, the degree of interpolation polynomial is higher and the approximation error may be reduced. This can be used when the interval difference is not same for all sequence of values. This method Newton's polynomial is in the form:\n$$y(x) = a_0 + (x-x_1)a_1 + (x-x_1)(x-x_2)a_2 + ... + (x-x_1)(x-x_2)...*(x-x_n)a_n \\\\_{\\text{Eq.4.14}}$$\nThe two steps in obtaining the polynomial are:\n1. Dividing the Differences\n2. Substitution",
"_____no_output_____"
],
[
"#### 4.3.3.1 Dividing the Differences\nThis step is done to obtain the coefficients of the polynomials. These coeefficients are the $a_i$ from Eq. 4.14.\nThe divided differences are applied to create a table of values whereas column indicate the degree of the polynomial ($n$) plus 1. While the rows described by the datapoints ($x_i$). For example of a cubic polynomial with 4 datapoints:\n\n<table style=\"width:200%\">\n<tr><th>----(0)-----</th><th>----(1)-----</th><th>----(2)-----</th><th>----(3)-----</th><th>----(4)-----</th>\n</tr>\n<tr><td>$x_1$</td><td>$y_1^{(1)}=y_1$</td></tr>\n<tr><td>$x_2$</td><td>$y_2^{(1)}=y_2$</td><td>$y_2^{(2)}$</td></tr>\n<tr><td>$x_3$</td><td>$y_3^{(1)}=y_3$</td><td>$y_3^{(2)}$</td><td>$y_3^{(3)}$</td></tr>\n<tr><td>$x_4$</td><td>$y_4^{(1)}=y_4$</td><td>$y_4^{(2)}$</td><td>$y_4^{(3)}$</td><td>$y_4^{(4)}$</td></tr>\n</table>\nThe general equation to be used in deriving each $y_i$ is formulated as:\n$$y_i^{(j+1)} = \\frac{y_i^{(j)}-y^{(j)}}{x_i - x_j}, \\text{for: }j = \\{0,1,2,..n\\} \\text{ and: } i = \\{j+1,...,n+1\\} \\\\_{\\text{Eq. 4.15}}$$\nThe coefficients $a$ can be obtained from the main diagonal of the table, such that $a = \\{y_i^{(j)} | i=j\\} $\n\n",
"_____no_output_____"
]
],
[
[
"n = X_2.size\ny_temp = np.zeros((n, n))\ny_temp[:,0] = y_2\nfor j in range(n-1):\n for i in range(j+1, n):\n y_temp[i, j+1] = (y_temp[i,j]-y_temp[j,j])/(X_2[i] - X_2[j])\nprint(y_temp)",
"_____no_output_____"
],
[
"def newton_coeff(X,y):\n n = X.size\n y_temp = np.zeros((n, n))\n y_temp[:,0] = y\n for j in range(n-1):\n for i in range(j+1, n):\n y_temp[i, j+1] = (y_temp[i,j]-y_temp[j,j])/(X[i] - X[j])\n a = np.diag(y_temp)\n return y_temp, a\n\nnewton_coeff(X_2,y_2)",
"_____no_output_____"
]
],
[
[
"#### 4.3.3.2 Substitution\nFor the last step, the polynomial is calculated for a given $x$ value in Eq. 4.14. We can re-formulate Eq. 4.14 into its general form by:\n$$y(x) = a_0 + \\sum^n_{i=0}\\begin{bmatrix}\\prod^i_{j=1}(x-x_j)\\end{bmatrix}a_i$$",
"_____no_output_____"
]
],
[
[
"### Newton coeff matrix\nxp = 5.6\n_, a = newton_coeff(X_2,y_2)\ncoeff_mat = np.zeros(n)\nfor i in range(0,n): \n coeff_mat[i] = 1 if i==0 else np.product(xp-X_2[:i])\nyp = a @ coeff_mat\nyp",
"_____no_output_____"
],
[
"def newton_interp(xp,y,X):\n n = X.size\n _, a = newton_coeff(X,y)\n coeff_mat = np.zeros(n)\n for i in range(0,n): \n coeff_mat[i] = 1 if i==0 else np.product(xp-X[:i])\n return a @ coeff_mat",
"_____no_output_____"
],
[
"X_4new = X_2.copy()\ny_4new = y_2.copy()\nfor i, xi in enumerate(X_2):\n if i !=0:\n xi -= 0.5\n y = newton_interp(xi,y_4new,X_4new)\n X_4new = np.insert(X_4new, 2*i-1, xi)\n y_4new = np.insert(y_4new, 2*i-1, y)\n\nplt.figure(figsize=(12,12))\nplt.grid()\n\nplt.plot(X_4new, y_4new, label=\"Newton\", color=\"purple\")\n# plt.plot(X_3new, y_3new, label=\"Lagrange\", color=\"blue\")\nplt.plot(X_2new, y_2new, label=\"Linear\", color='green')\nplt.scatter(X_2, y_2, color='red')\nplt.legend()\nplt.show()\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca569cf123676018990498f24f4aa51de25dbd6 | 72,336 | ipynb | Jupyter Notebook | beamz_scratch.ipynb | connorferster/beamz9000 | 2bcb5f8931d31338a0d3a45ef3c6621802160053 | [
"BSD-3-Clause"
] | null | null | null | beamz_scratch.ipynb | connorferster/beamz9000 | 2bcb5f8931d31338a0d3a45ef3c6621802160053 | [
"BSD-3-Clause"
] | null | null | null | beamz_scratch.ipynb | connorferster/beamz9000 | 2bcb5f8931d31338a0d3a45ef3c6621802160053 | [
"BSD-3-Clause"
] | null | null | null | 399.646409 | 68,276 | 0.943942 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"import pathlib",
"_____no_output_____"
],
[
"beamz_dir = pathlib.Path(\"C:/Users/cferster/Notebooks/package_development/beamz9000/beamz9000/svg_supports\")\nimport shutil",
"_____no_output_____"
],
[
"svg_names = [\n beamz_dir/\"V_ROLLER.svg\",\n beamz_dir/\"PINNED.svg\",\n beamz_dir/\"FIXED.svg\",\n beamz_dir/\"H_SPRING.svg\",\n beamz_dir/\"V_SPRING.svg\",\n beamz_dir/\"M_SPRING.svg\",\n beamz_dir/\"T_SPRING.svg\",\n]",
"_____no_output_____"
],
[
"import beamz9000 as beamz\nfrom dataclasses import dataclass\nfrom typing import Union\nfrom matplotlib.patches import PathPatch",
"_____no_output_____"
],
[
"n01 = beamz.Node(0, \"A\")\nn02 = beamz.Node(5.5, \"B\")\nn03 = beamz.Node(12, \"D\")\nn04 = beamz.Node(10, \"C\")\n\npin = beamz.Support(n01, 2)\nroller = beamz.Support(n02, 0)\nfix = beamz.Support(n04, 3)\n\nudl = beamz.Load(200, n01, n02)\n\nbeam = beamz.Beam(\n nodes=[n01, n02, n04, n03], \n supports=[pin, roller, fix], \n loads=udl,\n dimensions=[n01, n02, n04, n03], \n depth=0.66)",
"_____no_output_____"
],
[
"bp = beamz.BeamPlotter(beam)\nbp.plot(facecolor=\"#ddd\", edgecolor=\"#000\")",
"(12.000000000000002, 0.6600000000000001)\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca58d9f878a0cd976e4fa6ed2dcce61301f23dd | 70,193 | ipynb | Jupyter Notebook | ML_Production/02 - Boston Housing - XGBoost (Batch Transform) - Low Level.ipynb | mhmdreda99/UDACITY_ML_Engineer_ND | a6943dc96b1b28f256d06c8a429b738062a33a9a | [
"MIT"
] | null | null | null | ML_Production/02 - Boston Housing - XGBoost (Batch Transform) - Low Level.ipynb | mhmdreda99/UDACITY_ML_Engineer_ND | a6943dc96b1b28f256d06c8a429b738062a33a9a | [
"MIT"
] | null | null | null | ML_Production/02 - Boston Housing - XGBoost (Batch Transform) - Low Level.ipynb | mhmdreda99/UDACITY_ML_Engineer_ND | a6943dc96b1b28f256d06c8a429b738062a33a9a | [
"MIT"
] | null | null | null | 57.021121 | 16,128 | 0.66028 | [
[
[
"# Predicting Boston Housing Prices\n\n## Using XGBoost in SageMaker (Batch Transform)\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nAs an introduction to using SageMaker's Low Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass.\n\nThe documentation reference for the API used in this notebook is the [SageMaker Developer's Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/)\n\n## General Outline\n\nTypically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nIn this notebook we will only be covering steps 1 through 5 as we just want to get a feel for using SageMaker. In later notebooks we will talk about deploying a trained model in much more detail.",
"_____no_output_____"
],
[
"## Step 0: Setting up the notebook\n\nWe begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport os\n\nimport time\nfrom time import gmtime, strftime\n\n# standard libraries\nimport numpy as np\nimport pandas as pd\n\n# visualization\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_boston\nimport sklearn.model_selection",
"_____no_output_____"
]
],
[
[
"In addition to the modules above, we need to import the various bits of SageMaker that we will be using. \n\n- `get_execution_role` - retrieves the IAM role \n- `get_image_uri`- ",
"_____no_output_____"
]
],
[
[
"import sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\n\n# This is an object that represents the SageMaker session that we are currently operating in. This\n# object contains some useful information that we will need to access later such as our region.\nsession = sagemaker.Session()\n\n# This is an object that represents the IAM role that we are currently assigned. When we construct\n# and launch the training job later we will need to tell it what IAM role it should have. Since our\n# use case is relatively simple we will simply assign the training job the role we currently have.\nrole = get_execution_role()",
"_____no_output_____"
],
[
"print(\"session:\\n\", session)\nprint(\"role:\\n\", role)",
"session:\n <sagemaker.session.Session object at 0x7fd12154ef60>\nrole:\n arn:aws:iam::394610559328:role/service-role/AmazonSageMaker-ExecutionRole-20190722T122791\n"
]
],
[
[
"## Step 1: Downloading the data\n\nFortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward.",
"_____no_output_____"
]
],
[
[
"boston = load_boston()",
"_____no_output_____"
]
],
[
[
"## Step 2: Preparing and splitting the data\n\nGiven that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets.",
"_____no_output_____"
]
],
[
[
"# First we package up the input data and the target variable (the median value) as pandas dataframes. This\n# will make saving the data to a file a little easier later on.\n\nX_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)\nY_bos_pd = pd.DataFrame(boston.target)\n\n# We split the dataset into 2/3 training and 1/3 testing sets.\nX_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)\n\n# Then we split the training set further into 2/3 training and 1/3 validation sets.\nX_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)",
"_____no_output_____"
]
],
[
[
"## Step 3: Uploading the data files to S3\n\nWhen a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. In addition, when we perform a batch transform job, SageMaker expects the input data to be stored on S3. We can use the SageMaker API to do this and hide some of the details.\n\n### Save the data locally\n\nFirst we need to create the test, train and validation csv files which we will then upload to S3.\n\n- Create or check whether data dir is available",
"_____no_output_____"
]
],
[
[
"# This is our local data directory. \n# We need to make sure that it exists.\ndata_dir = '../data/boston'\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)",
"_____no_output_____"
]
],
[
[
"- Use pandas to save test, train and validation data to csv file\n- Requirement by built-in algorithm: No header or index\n - First entry in each row is the target variable",
"_____no_output_____"
]
],
[
[
"Y_val[:5]",
"_____no_output_____"
],
[
"pd.concat([Y_val[:5], X_val[:5]], axis=1)",
"_____no_output_____"
],
[
"X_test.to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)\n\npd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)\npd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Upload to S3\n\nSince we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project.",
"_____no_output_____"
]
],
[
[
"prefix = 'boston-xgboost-LL'\n\ntest_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)\nval_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"## Step 4: Train and construct the XGBoost model\n\nNow that we have the training and validation data uploaded to S3, we can construct a training job for our XGBoost model and build the model itself.\n\n### Set up the training job\n\nFirst, we will set up and execute a training job for our model. To do this we need to specify some information that SageMaker will use to set up and properly execute the computation. For additional documentation on constructing a training job, see the [CreateTrainingJob API](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) reference.",
"_____no_output_____"
]
],
[
[
"# We will need to know the name of the container that we want to use for training. SageMaker provides\n# a nice utility method to construct this for us.\ncontainer = get_image_uri(session.boto_region_name, 'xgboost')\n\n# We now specify the parameters we wish to use for our training job\ntraining_params = {}\n\n# We need to specify the permissions that this training job will have. For our purposes we can use\n# the same permissions that our current SageMaker session has.\ntraining_params['RoleArn'] = role\n\n# Here we describe the algorithm we wish to use. The most important part is the container which\n# contains the training code.\ntraining_params['AlgorithmSpecification'] = {\n \"TrainingImage\": container,\n \"TrainingInputMode\": \"File\"\n}\n\n# We also need to say where we would like the resulting model artifacts stored.\ntraining_params['OutputDataConfig'] = {\n \"S3OutputPath\": \"s3://\" + session.default_bucket() + \"/\" + prefix + \"/output\"\n}\n\n# We also need to set some parameters for the training job itself. Namely we need to describe what sort of\n# compute instance we wish to use along with a stopping condition to handle the case that there is\n# some sort of error and the training script doesn't terminate.\ntraining_params['ResourceConfig'] = {\n \"InstanceCount\": 1,\n \"InstanceType\": \"ml.m4.xlarge\",\n \"VolumeSizeInGB\": 5\n}\n \ntraining_params['StoppingCondition'] = {\n \"MaxRuntimeInSeconds\": 86400\n}\n\n# Next we set the algorithm specific hyperparameters. You may wish to change these to see what effect\n# there is on the resulting model.\ntraining_params['HyperParameters'] = {\n \"max_depth\": \"5\",\n \"eta\": \"0.2\",\n \"gamma\": \"4\",\n \"min_child_weight\": \"6\",\n \"subsample\": \"0.8\",\n \"objective\": \"reg:linear\",\n \"early_stopping_rounds\": \"10\",\n \"num_round\": \"200\"\n}\n\n# Now we need to tell SageMaker where the data should be retrieved from.\ntraining_params['InputDataConfig'] = [\n {\n \"ChannelName\": \"train\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": train_location,\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"csv\",\n \"CompressionType\": \"None\"\n },\n {\n \"ChannelName\": \"validation\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": val_location,\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"csv\",\n \"CompressionType\": \"None\"\n }\n]",
"_____no_output_____"
]
],
[
[
"### Execute the training job\n\nNow that we've built the dictionary object containing the training job parameters, we can ask SageMaker to execute the job.",
"_____no_output_____"
]
],
[
[
"# First we need to choose a training job name. This is useful for if we want to recall information about our\n# training job at a later date. Note that SageMaker requires a training job name and that the name needs to\n# be unique, which we accomplish by appending the current timestamp.\ntraining_job_name = \"boston-xgboost-\" + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\ntraining_params['TrainingJobName'] = training_job_name\n\n# And now we ask SageMaker to create (and execute) the training job\ntraining_job = session.sagemaker_client.create_training_job(**training_params)",
"_____no_output_____"
]
],
[
[
"The training job has now been created by SageMaker and is currently running. Since we need the output of the training job, we may wish to wait until it has finished. We can do so by asking SageMaker to output the logs generated by the training job and continue doing so until the training job terminates.",
"_____no_output_____"
]
],
[
[
"session.logs_for_job(training_job_name, wait=True)",
"2019-07-31 12:41:53 Starting - Launching requested ML instances.........\n2019-07-31 12:42:59 Starting - Preparing the instances for training......\n2019-07-31 12:44:03 Downloading - Downloading input data...\n2019-07-31 12:44:58 Training - Training image download completed. Training in progress.\n2019-07-31 12:44:58 Uploading - Uploading generated training model.\n\u001b[31mArguments: train\u001b[0m\n\u001b[31m[2019-07-31:12:44:54:INFO] Running standalone xgboost training.\u001b[0m\n\u001b[31m[2019-07-31:12:44:54:INFO] File size need to be processed in the node: 0.03mb. Available memory size in the node: 8463.77mb\u001b[0m\n\u001b[31m[2019-07-31:12:44:54:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[31m[12:44:54] S3DistributionType set as FullyReplicated\u001b[0m\n\u001b[31m[12:44:54] 227x13 matrix with 2951 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[31m[2019-07-31:12:44:54:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[31m[12:44:54] S3DistributionType set as FullyReplicated\u001b[0m\n\u001b[31m[12:44:54] 112x13 matrix with 1456 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 8 extra nodes, 0 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[0]#011train-rmse:19.2174#011validation-rmse:19.9423\u001b[0m\n\u001b[31mMultiple eval metrics have been passed: 'validation-rmse' will be used for early stopping.\n\u001b[0m\n\u001b[31mWill train until validation-rmse hasn't improved in 10 rounds.\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 2 pruned nodes, max_depth=3\u001b[0m\n\u001b[31m[1]#011train-rmse:15.7009#011validation-rmse:16.3686\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[2]#011train-rmse:12.8901#011validation-rmse:13.5889\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[3]#011train-rmse:10.6171#011validation-rmse:11.2856\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[4]#011train-rmse:8.82356#011validation-rmse:9.51166\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[5]#011train-rmse:7.35277#011validation-rmse:8.13019\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[6]#011train-rmse:6.24568#011validation-rmse:7.0647\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[7]#011train-rmse:5.30557#011validation-rmse:6.15661\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[8]#011train-rmse:4.58931#011validation-rmse:5.50695\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[9]#011train-rmse:4.07256#011validation-rmse:5.06682\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[10]#011train-rmse:3.57697#011validation-rmse:4.69267\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[11]#011train-rmse:3.20003#011validation-rmse:4.40985\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[12]#011train-rmse:2.94949#011validation-rmse:4.19459\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[13]#011train-rmse:2.75237#011validation-rmse:4.0049\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 10 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[14]#011train-rmse:2.59985#011validation-rmse:3.89143\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[15]#011train-rmse:2.47312#011validation-rmse:3.83874\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[16]#011train-rmse:2.38778#011validation-rmse:3.77918\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[17]#011train-rmse:2.26696#011validation-rmse:3.73478\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[18]#011train-rmse:2.18208#011validation-rmse:3.72584\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[19]#011train-rmse:2.103#011validation-rmse:3.66634\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[20]#011train-rmse:2.01504#011validation-rmse:3.59642\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[21]#011train-rmse:1.98067#011validation-rmse:3.58329\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[22]#011train-rmse:1.93717#011validation-rmse:3.5266\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 6 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[23]#011train-rmse:1.88343#011validation-rmse:3.51308\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[24]#011train-rmse:1.82494#011validation-rmse:3.4952\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[25]#011train-rmse:1.75528#011validation-rmse:3.45994\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[26]#011train-rmse:1.71848#011validation-rmse:3.47191\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[27]#011train-rmse:1.6747#011validation-rmse:3.44763\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[28]#011train-rmse:1.61366#011validation-rmse:3.44348\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[29]#011train-rmse:1.59846#011validation-rmse:3.44575\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 2 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[30]#011train-rmse:1.52324#011validation-rmse:3.45522\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[31]#011train-rmse:1.49188#011validation-rmse:3.46196\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[32]#011train-rmse:1.43452#011validation-rmse:3.47185\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[33]#011train-rmse:1.39613#011validation-rmse:3.45091\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 22 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[34]#011train-rmse:1.34962#011validation-rmse:3.45513\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 6 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[35]#011train-rmse:1.32161#011validation-rmse:3.42859\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 16 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[36]#011train-rmse:1.29408#011validation-rmse:3.44106\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 18 extra nodes, 6 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[37]#011train-rmse:1.24011#011validation-rmse:3.44482\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 4 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[38]#011train-rmse:1.20963#011validation-rmse:3.46474\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 20 extra nodes, 10 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[39]#011train-rmse:1.16533#011validation-rmse:3.49319\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[40]#011train-rmse:1.13818#011validation-rmse:3.46749\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 6 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[41]#011train-rmse:1.12363#011validation-rmse:3.48875\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 8 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[42]#011train-rmse:1.10439#011validation-rmse:3.4872\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 0 pruned nodes, max_depth=5\u001b[0m\n\u001b[31m[43]#011train-rmse:1.08959#011validation-rmse:3.48543\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 14 extra nodes, 14 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[44]#011train-rmse:1.06641#011validation-rmse:3.49961\u001b[0m\n\u001b[31m[12:44:54] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 12 extra nodes, 10 pruned nodes, max_depth=4\u001b[0m\n\u001b[31m[45]#011train-rmse:1.0458#011validation-rmse:3.47871\u001b[0m\n\u001b[31mStopping. Best iteration:\u001b[0m\n\u001b[31m[35]#011train-rmse:1.32161#011validation-rmse:3.42859\n\u001b[0m\n"
]
],
[
[
"### Build the model\n\nNow that the training job has completed, we have some model artifacts which we can use to build a model. Note that here we mean SageMaker's definition of a model, which is a collection of information about a specific algorithm along with the artifacts which result from a training job.",
"_____no_output_____"
]
],
[
[
"# We begin by asking SageMaker to describe for us the results of the training job. The data structure\n# returned contains a lot more information than we currently need, try checking it out yourself in\n# more detail.\ntraining_job_info = session.sagemaker_client.describe_training_job(TrainingJobName=training_job_name)\n\nmodel_artifacts = training_job_info['ModelArtifacts']['S3ModelArtifacts']",
"_____no_output_____"
],
[
"training_job_info",
"_____no_output_____"
],
[
"training_job_name",
"_____no_output_____"
],
[
"# Just like when we created a training job, the model name must be unique\nmodel_name = training_job_name + \"-model\"\n\n# We also need to tell SageMaker which container should be used for inference and where it should\n# retrieve the model artifacts from. In our case, the xgboost container that we used for training\n# can also be used for inference.\nprimary_container = {\n \"Image\": container,\n \"ModelDataUrl\": model_artifacts\n}\n\n# And lastly we construct the SageMaker model\nmodel_info = session.sagemaker_client.create_model(\n ModelName = model_name,\n ExecutionRoleArn = role,\n PrimaryContainer = primary_container)",
"_____no_output_____"
],
[
"model_info",
"_____no_output_____"
]
],
[
[
"## Step 5: Testing the model\n\nNow that we have fit our model to the training data, using the validation data to avoid overfitting, we can test our model. To do this we will make use of SageMaker's Batch Transform functionality. In other words, we need to set up and execute a batch transform job, similar to the way that we constructed the training job earlier.\n\n### Set up the batch transform job\n\nJust like when we were training our model, we first need to provide some information in the form of a data structure that describes the batch transform job which we wish to execute.\n\nWe will only be using some of the options available here but to see some of the additional options please see the SageMaker documentation for [creating a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTransformJob.html).",
"_____no_output_____"
]
],
[
[
"# Just like in each of the previous steps, we need to make sure to name our job and the name should be unique.\ntransform_job_name = 'boston-xgboost-batch-transform-' + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n\n# Now we construct the data structure which will describe the batch transform job.\ntransform_request = \\\n{\n \"TransformJobName\": transform_job_name,\n \n # This is the name of the model that we created earlier.\n \"ModelName\": model_name,\n \n # This describes how many compute instances should be used at once. If you happen to be doing a very large\n # batch transform job it may be worth running multiple compute instances at once.\n \"MaxConcurrentTransforms\": 1,\n \n # This says how big each individual request sent to the model should be, at most. One of the things that\n # SageMaker does in the background is to split our data up into chunks so that each chunks stays under\n # this size limit.\n \"MaxPayloadInMB\": 6,\n \n # Sometimes we may want to send only a single sample to our endpoint at a time, however in this case each of\n # the chunks that we send should contain multiple samples of our input data.\n \"BatchStrategy\": \"MultiRecord\",\n \n # This next object describes where the output data should be stored. Some of the more advanced options which\n # we don't cover here also describe how SageMaker should collect output from various batches.\n \"TransformOutput\": {\n \"S3OutputPath\": \"s3://{}/{}/batch-bransform/\".format(session.default_bucket(),prefix)\n },\n \n # Here we describe our input data. Of course, we need to tell SageMaker where on S3 our input data is stored, in\n # addition we need to detail the characteristics of our input data. In particular, since SageMaker may need to\n # split our data up into chunks, it needs to know how the individual samples in our data file appear. In our\n # case each line is its own sample and so we set the split type to 'line'. We also need to tell SageMaker what\n # type of data is being sent, in this case csv, so that it can properly serialize the data.\n \"TransformInput\": {\n \"ContentType\": \"text/csv\",\n \"SplitType\": \"Line\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": test_location,\n }\n }\n },\n \n # And lastly we tell SageMaker what sort of compute instance we would like it to use.\n \"TransformResources\": {\n \"InstanceType\": \"ml.m4.xlarge\",\n \"InstanceCount\": 1\n }\n}",
"_____no_output_____"
]
],
[
[
"### Execute the batch transform job\n\nNow that we have created the request data structure, it is time to ask SageMaker to set up and run our batch transform job. Just like in the previous steps, SageMaker performs these tasks in the background so that if we want to wait for the transform job to terminate (and ensure the job is progressing) we can ask SageMaker to wait of the transform job to complete.",
"_____no_output_____"
]
],
[
[
"transform_response = session.sagemaker_client.create_transform_job(**transform_request)",
"_____no_output_____"
],
[
"transform_desc = session.wait_for_transform_job(transform_job_name)",
"........................................!\n"
]
],
[
[
"### Analyze the results\n\nNow that the transform job has completed, the results are stored on S3 as we requested. Since we'd like to do a bit of analysis in the notebook we can use some notebook magic to copy the resulting output from S3 and save it locally.",
"_____no_output_____"
]
],
[
[
"transform_output = \"s3://{}/{}/batch-bransform/\".format(session.default_bucket(),prefix)",
"_____no_output_____"
],
[
"transform_output",
"_____no_output_____"
],
[
"!aws s3 cp --recursive $transform_output $data_dir",
"Completed 2.3 KiB/2.3 KiB (4.2 KiB/s) with 1 file(s) remaining\rdownload: s3://sagemaker-us-west-2-394610559328/boston-xgboost-LL/batch-bransform/test.csv.out to ../data/boston/test.csv.out\r\n"
]
],
[
[
"To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement.",
"_____no_output_____"
]
],
[
[
"Y_pred = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)",
"_____no_output_____"
],
[
"plt.scatter(Y_test, Y_pred)\nplt.xlabel(\"Median Price\")\nplt.ylabel(\"Predicted Price\")\nplt.title(\"Median Price vs Predicted Price\")",
"_____no_output_____"
]
],
[
[
"## Optional: Clean up\n\nThe default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.",
"_____no_output_____"
]
],
[
[
"# First we will remove all of the files contained in the data_dir directory\n!rm $data_dir/*\n\n# And then we delete the directory itself\n!rmdir $data_dir",
"_____no_output_____"
],
[
"print(\"Done!\")",
"Done!\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca5ab0b06675443cf4d5aaa442cc90606bd4cfa | 662,842 | ipynb | Jupyter Notebook | titanic/logistic-regression-with-stratifiedkfold.ipynb | kupparsudhir/kaggle | 29966f1277067022e00c568e7576a3b94f295383 | [
"MIT"
] | 4 | 2018-10-10T00:32:57.000Z | 2020-11-08T04:55:07.000Z | titanic/logistic-regression-with-stratifiedkfold.ipynb | sudhirnl7/kaggle | 29966f1277067022e00c568e7576a3b94f295383 | [
"MIT"
] | null | null | null | titanic/logistic-regression-with-stratifiedkfold.ipynb | sudhirnl7/kaggle | 29966f1277067022e00c568e7576a3b94f295383 | [
"MIT"
] | null | null | null | 256.915504 | 102,774 | 0.882946 | [
[
[
"# Import library",
"_____no_output_____"
]
],
[
[
"#Import library\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix, roc_auc_score ,roc_curve,auc\nfrom sklearn.model_selection import GridSearchCV,StratifiedKFold\nfrom sklearn.preprocessing import LabelEncoder\nseed =45\n% matplotlib inline\nplt.style.use('fivethirtyeight')",
"_____no_output_____"
]
],
[
[
"# Import data set",
"_____no_output_____"
]
],
[
[
"path = '../input/'\n#path = ''\ntrain = pd.read_csv(path+'train.csv')\ntest = pd.read_csv(path+'test.csv')\nprint('Number rows and columns:',train.shape)\nprint('Number rows and columns:',test.shape)",
"Number rows and columns: (891, 12)\nNumber rows and columns: (418, 11)\n"
]
],
[
[
"# Explore data set",
"_____no_output_____"
]
],
[
[
"train.head(5)",
"_____no_output_____"
]
],
[
[
"# Dependent varaiable",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12,6))\nsns.countplot(train['Survived'],palette='Blues')\nplt.title('Dependent variable distribution plot')\nplt.xlabel('Survived')\n\ntrain['Survived'].value_counts()",
"_____no_output_____"
]
],
[
[
"The 'target' variable is balanced ",
"_____no_output_____"
],
[
"# CORELATION PLOT",
"_____no_output_____"
]
],
[
[
"cor = train.corr()\nplt.figure(figsize=(12,6))\nsns.heatmap(cor,cmap='Set1',annot=True)",
"_____no_output_____"
]
],
[
[
"# Missing value is data set",
"_____no_output_____"
]
],
[
[
"k= pd.DataFrame()\nk['train']= train.isnull().sum()\nk['test'] = test.isnull().sum()\nk.T",
"_____no_output_____"
]
],
[
[
"Missing value in test train data set are in same propotion and same column",
"_____no_output_____"
]
],
[
[
"def missing_value(df):\n col = df.columns\n for i in col:\n if df[i].isnull().sum()>0:\n df[i].fillna(df[i].mode()[0],inplace=True)",
"_____no_output_____"
],
[
"missing_value(train)\nmissing_value(test)",
"_____no_output_____"
],
[
"def uniq(df):\n col = df.columns\n k = pd.DataFrame(index=col)\n for i in col:\n k['No of Unique'] = df[i].nunique()\n k['first Unique values'] = df[i].unique()[0]\n k['sencond Unique values'] = df[i].unique()[1]\n return k.T\nuniq(train)",
"_____no_output_____"
],
[
"def category_type(df):\n col = df.columns\n for i in col:\n if df[i].nunique()<=7:\n df[i] = df[i].astype('category')\ncategory_type(train)\ncategory_type(test)",
"_____no_output_____"
]
],
[
[
"# Univariate analysis",
"_____no_output_____"
]
],
[
[
"train.select_dtypes(include=['category']).head()",
"_____no_output_____"
],
[
"fig ,ax = plt.subplots(2,2,figsize=(16,16))\nax1,ax2,ax3,ax4 = ax.flatten()\nsns.countplot(data=train,x='Pclass',hue='Survived',palette='gist_rainbow',ax=ax1)\nsns.countplot(data=train,x='Sex',hue='Survived',palette='viridis',ax=ax2)\nsns.countplot(data=train,x='SibSp',hue='Survived',palette='viridis',ax=ax3)\nsns.countplot(data=train,x='Parch',hue='Survived',palette='gist_rainbow',ax=ax4)",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,4))\nsns.countplot(data=train,x='Embarked',hue='Survived',palette='gist_rainbow')",
"_____no_output_____"
],
[
"train[['Age','Fare']].describe()",
"_____no_output_____"
],
[
"fig,ax = plt.subplots(2,2,figsize=(16,16))\nax1,ax2,ax3,ax4 = ax.flatten()\nsns.distplot(train['Age'],bins=20,color='r',ax=ax1)\nsns.boxplot(y='Age',x='Survived',data=train,ax=ax2)\nsns.pointplot(y='Age',x='Survived',data=train,ax=ax3)\nsns.violinplot(y='Age',x='Survived',data=train,ax=ax4)",
"_____no_output_____"
],
[
"fig,ax = plt.subplots(2,2,figsize = (16,16))\nax1,ax2,ax3,ax4 = ax.flatten()\nsns.distplot(train['Fare'],bins=50,color='r',ax=ax1)\nsns.boxplot(y='Fare',x='Survived',data=train,ax=ax2)\nsns.pointplot(y='Fare',x='Survived',data=train,ax=ax3)\nsns.violinplot(y='Fare',x='Survived',data=train,ax=ax4)",
"_____no_output_____"
]
],
[
[
"# Extract features from Name",
"_____no_output_____"
]
],
[
[
"corpus = [w.split() for w in train['Name']]\ncorpus[0:20]",
"_____no_output_____"
],
[
"def Name_extract(df):\n k = []\n corpus = [w.split() for w in df['Name']]\n \n for i in corpus:\n if 'Mr.' in i:\n k.append('Mr.')\n elif 'Mrs.' in i:\n k.append('Mrs')\n elif 'Miss.' in i:\n k.append('Miss.')\n elif 'Master.' in i:\n k.append('Master.')\n elif 'Dr.' in i:\n k.append('Dr.')\n elif 'Capt.' in i:\n k.append('Capt.')\n elif 'Don.' in i:\n k.append('Don.')\n elif 'Col.' in i:\n k.append('Col.')\n elif 'Major.' in i:\n k.append('Major.')\n else:\n k.append('other')\n \n no_word = [len(l.split()) for l in df['Name']]\n no_char = [len(m) for m in df['Name']]\n df['name_category'],df['no_word'],df['no_char'] = k, no_word,no_char\n df['name_category'] = df['name_category'].astype('category')\n df['no_word'] = df['no_word'].astype('category')\n return df\n",
"_____no_output_____"
],
[
"train = Name_extract(train)\ntest = Name_extract(test)",
"_____no_output_____"
],
[
"train['name_category'].value_counts()",
"_____no_output_____"
]
],
[
[
"Look thier is 1 captain, 1 Don , 2 Col , 2 Major ,7 Doctor in data set.\nWe have categories for all the name.",
"_____no_output_____"
]
],
[
[
"fig,ax = plt.subplots(2,1,figsize=(16,8))\nax1,ax2 = ax.flatten()\nsns.countplot(data=train,x='name_category',hue='Survived',ax=ax1,palette='gist_rainbow')\nsns.countplot(data=train,x='no_word',hue='Survived',palette='viridis',ax=ax2)",
"_____no_output_____"
]
],
[
[
"The captain of the ship did not survived",
"_____no_output_____"
]
],
[
[
"fig,ax = plt.subplots(2,2,figsize=(16,16))\nax1,ax2,ax3,ax4 = ax.flatten()\nsns.distplot(train['no_char'],bins=50,color='r',ax=ax1)\nsns.boxplot(data=train,y='no_char',x='Survived',ax=ax2)\nsns.pointplot(data=train,y='no_char',x='Survived',ax=ax3)\nsns.violinplot(data=train,y='no_char',x='Survived',ax=ax4)",
"_____no_output_____"
]
],
[
[
"# Extract feature in Cabin variable",
"_____no_output_____"
]
],
[
[
"def extract_cabin(df): \n no_cabin = [len(w.split()) for w in df['Cabin']]\n df['no_cabin'] = no_cabin\n df['no_cabin'] = df['no_cabin'].astype('category')\n\nextract_cabin(train)\nextract_cabin(test)",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,8))\nsns.countplot(data=train,x='no_cabin',hue='Survived',palette='gist_rainbow')",
"_____no_output_____"
]
],
[
[
"# Co relation plot",
"_____no_output_____"
]
],
[
[
"cor = train.corr()\nplt.figure(figsize=(10,4))\nsns.heatmap(cor,annot=True)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"# Determine outliers in dataset",
"_____no_output_____"
]
],
[
[
"def outlier(df,columns):\n for i in columns:\n quartile_1,quartile_3 = np.percentile(df[i],[25,75])\n quartile_f,quartile_l = np.percentile(df[i],[1,99])\n IQR = quartile_3-quartile_1\n lower_bound = quartile_1 - (1.5*IQR)\n upper_bound = quartile_3 + (1.5*IQR)\n print(i,lower_bound,upper_bound,quartile_f,quartile_l)\n \n df[i].loc[df[i] < lower_bound] = quartile_f\n df[i].loc[df[i] > upper_bound] = quartile_l\nnum_col = ['Fare','Age','no_char'] \noutlier(train,num_col)\noutlier(test,num_col)",
"Fare -26.724 65.6344 0.0 249.00622\nAge"
]
],
[
[
"# One Hot Encoding",
"_____no_output_____"
]
],
[
[
"def OHE(df1,df2):\n #cat_col = column\n\n len_df1 = df1.shape[0]\n \n df = pd.concat([df1,df2],ignore_index=True)\n cat_col = df1.select_dtypes(include =['category']).columns\n c2,c3 = [],{}\n \n print('Categorical feature',len(cat_col))\n for c in cat_col:\n if df[c].nunique()>2 :\n c2.append(c)\n c3[c] = 'ohe_'+c\n \n df = pd.get_dummies(df, prefix=c3, columns=c2,drop_first=True)\n\n df1 = df.loc[:len_df1-1]\n df2 = df.loc[len_df1:]\n print('Train',df1.shape)\n print('Test',df2.shape)\n return df1,df2",
"_____no_output_____"
],
[
"train1,test1 = OHE(train,test)",
"Categorical feature 9\nTrain (891, 45)\nTest (418, 45)\n"
]
],
[
[
"# Encoder ",
"_____no_output_____"
]
],
[
[
"le = LabelEncoder()\n#col =['Sex']\ntrain1['Sex'] = le.fit_transform(train1['Sex'])\ntest1['Sex'] = le.fit_transform(test1['Sex'])\n",
"_____no_output_____"
],
[
"train1.head().T",
"_____no_output_____"
]
],
[
[
"# Split data set",
"_____no_output_____"
]
],
[
[
"train1.columns",
"_____no_output_____"
],
[
"unwanted = ['PassengerId','Survived','Name','Cabin','Ticket']\nX = train1.drop(unwanted,axis=1)\ny = train1['Survived'].astype('category')\nx_test = test1.drop(unwanted,axis=1)\n#del train1,test1",
"_____no_output_____"
]
],
[
[
"# Hyperparameter tuning",
"_____no_output_____"
]
],
[
[
"#Grid Search\nlogreg = LogisticRegression(class_weight='balanced')\nparam = {'C':[0.001,0.003,0.005,0.01,0.03,0.05,0.1,0.3,0.5,1,2,3,3,4,5,10,20]}\nclf = GridSearchCV(logreg,param,scoring='roc_auc',refit=True,cv=10)\nclf.fit(X,y)\nprint('Best roc_auc: {:.4}, with best C: {}'.format(clf.best_score_, clf.best_params_))",
"Best roc_auc: 0.8616, with best C: {'C': 1}\n"
]
],
[
[
"# Logistic Regression model\nLogistic regression is used for modelling. The data set is split using Stratified Kfold. In each split model is created and predicted using that model. The final predicted value is average of all model. ",
"_____no_output_____"
]
],
[
[
"kf = StratifiedKFold(n_splits=5,shuffle=True,random_state=seed)\npred_test_full =0\ncv_score =[]\ni=1\nfor train_index,test_index in kf.split(X,y):\n print('{} of KFold {}'.format(i,kf.n_splits))\n xtr,xvl = X.loc[train_index],X.loc[test_index]\n ytr,yvl = y.loc[train_index],y.loc[test_index]\n \n #model\n lr = LogisticRegression(C=2)\n lr.fit(xtr,ytr)\n score = roc_auc_score(yvl,lr.predict(xvl))\n print('ROC AUC score:',score)\n cv_score.append(score) \n pred_test = lr.predict_proba(x_test)[:,1]\n pred_test_full +=pred_test\n i+=1",
"1 of KFold 5\nROC AUC score: 0.835902503294\n2 of KFold 5\nROC AUC score: 0.783201581028\n3 of KFold 5\nROC AUC score: 0.81243315508\n4 of KFold 5\nROC AUC score: 0.809625668449\n5 of KFold 5\nROC AUC score: 0.773070696168\n"
],
[
"print('Confusion matrix\\n',confusion_matrix(yvl,lr.predict(xvl)))\nprint('Cv',cv_score,'\\nMean cv Score',np.mean(cv_score))",
"Confusion matrix\n [[98 11]\n [24 44]]\nCv [0.83590250329380766, 0.7832015810276679, 0.81243315508021385, 0.80962566844919781, 0.77307069616837554] \nMean cv Score 0.802846720804\n"
],
[
"lr.coef_",
"_____no_output_____"
],
[
"lr.score(xvl,yvl)",
"_____no_output_____"
]
],
[
[
"# Reciever Operating Characteristics",
"_____no_output_____"
]
],
[
[
"proba = lr.predict_proba(xvl)[:,1]\nfrp,trp, threshold = roc_curve(yvl,proba)\nroc_auc_ = auc(frp,trp)\n\nplt.figure(figsize=(14,8))\nplt.title('Reciever Operating Characteristics')\nplt.plot(frp,trp,'r',label = 'AUC = %0.2f' % roc_auc_)\nplt.legend(loc='lower right')\nplt.plot([0,1],[0,1],'b--')\nplt.ylabel('True positive rate')\nplt.xlabel('False positive rate')",
"_____no_output_____"
]
],
[
[
"# Predict for unsen data set",
"_____no_output_____"
]
],
[
[
"y_pred = pred_test_full/5\nsubmit = pd.DataFrame({'PassengerId':test['PassengerId'],'Survived':y_pred})\nsubmit['Survived'] = submit['Survived'].apply(lambda x: 1 if x>0.5 else 0)\n#submit.to_csv('lr_titanic.csv.gz',index=False,compression='gzip') \nsubmit.to_csv('lr_titanic.csv',index=False) ",
"_____no_output_____"
],
[
"submit.head()",
"_____no_output_____"
]
],
[
[
"# Thank you for visiting",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
eca5b011034481ef92c1a3ae7f5d71708aa24715 | 12,247 | ipynb | Jupyter Notebook | Extras/new-cross-validation-method.ipynb | nan645/Kaggle_ML_Course | e4b3f6f04a62c123de035c86c59718e58d4eb7ca | [
"Apache-2.0"
] | null | null | null | Extras/new-cross-validation-method.ipynb | nan645/Kaggle_ML_Course | e4b3f6f04a62c123de035c86c59718e58d4eb7ca | [
"Apache-2.0"
] | null | null | null | Extras/new-cross-validation-method.ipynb | nan645/Kaggle_ML_Course | e4b3f6f04a62c123de035c86c59718e58d4eb7ca | [
"Apache-2.0"
] | null | null | null | 12,247 | 12,247 | 0.712174 | [
[
[
"In this competition there are a lot of discussions and notebooks on hyper-parameter tuning, XGBoost and stacking, since they are the most effective techniques.\n\nYet, something is missing. For instance, there little reasoning has been done on the best way to cross-validate.\n\nActually, if you watch carefully the data, it seems like data distributions are segregated into specific portions of space, something reminiscent ot me of the Madelon dataset created by Isabelle Guyon, one of the ideators of the Support Vector Machines (see for Guyon's contribution: https://www.kdnuggets.com/2016/07/guyon-data-mining-history-svm-support-vector-machines.html for the Madelon dataset see instead: https://archive.ics.uci.edu/ml/datasets/madelon).\n\nI therefore tried to stratifiy my folds based on a k-means clustering of the non-noisy data (see https://www.kaggle.com/c/30-days-of-ml/discussion/267931) and my local cv has become more reliable (very correlated with the public leaderboard) and my models are performing much better with cv prediction.\n\nTry it and let me know, if it works also on your models!\n\nHappy Kaggling!",
"_____no_output_____"
]
],
[
[
"# Importing core libraries\nimport numpy as np\nimport pandas as pd\nimport joblib\n\n# Importing from Scikit-Learn\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA",
"_____no_output_____"
],
[
"# Loading data \nX_train = pd.read_csv(\"../input/30-days-of-ml/train.csv\")\nX_test = pd.read_csv(\"../input/30-days-of-ml/test.csv\")",
"_____no_output_____"
],
[
"# Preparing data as a tabular matrix\ny_train = X_train.target\nX_train = X_train.set_index('id').drop('target', axis='columns')\nX_test = X_test.set_index('id')",
"_____no_output_____"
],
[
"# Pointing out categorical features\ncategoricals = [item for item in X_train.columns if 'cat' in item]",
"_____no_output_____"
],
[
"# Dealing with categorical data using get_dummies\ndummies = pd.get_dummies(X_train.append(X_test)[categoricals])\nX_train[dummies.columns] = dummies.iloc[:len(X_train), :]\nX_test[dummies.columns] = dummies.iloc[len(X_train): , :]\ndel(dummies)",
"_____no_output_____"
],
[
"# Dealing with categorical data using OrdinalEncoder (only when there are 3 or more levels)\nordinal_encoder = OrdinalEncoder()\nX_train[categoricals[3:]] = ordinal_encoder.fit_transform(X_train[categoricals[3:]]).astype(int)\nX_test[categoricals[3:]] = ordinal_encoder.transform(X_test[categoricals[3:]]).astype(int)\nX_train = X_train.drop(categoricals[:3], axis=\"columns\")\nX_test = X_test.drop(categoricals[:3], axis=\"columns\")",
"_____no_output_____"
],
[
"# Feature selection (https://www.kaggle.com/lucamassaron/tutorial-feature-selection-with-boruta-shap)\nimportant_features = ['cat1_A', 'cat1_B', 'cat5', 'cat8', 'cat8_C', 'cat8_E', 'cont0', \n 'cont1', 'cont10', 'cont11', 'cont12', 'cont13', 'cont2', 'cont3', \n 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9']\n\ncategoricals = ['cat5', 'cat8']\n\nX_train = X_train[important_features]\nX_test = X_test[important_features]",
"_____no_output_____"
],
[
"# Stratifying the data\n\npca = PCA(n_components=16, random_state=0)\nkm = KMeans(n_clusters=32, random_state=0)\n\npca.fit(X_train)\nkm.fit(pca.transform(X_train))\n\nprint(np.unique(km.labels_, return_counts=True))\n\ny_stratified = km.labels_",
"(array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31],\n dtype=int32), array([ 8885, 23161, 18215, 13810, 13461, 18955, 8334, 8874, 3903,\n 11152, 8617, 3471, 8492, 10460, 2726, 3428, 12002, 8632,\n 7151, 7087, 14235, 10591, 2337, 12269, 3762, 10066, 1579,\n 14104, 14622, 2005, 7083, 6531]))\n"
],
[
"# Creating your folds for repeated use (for instance, stacking)\n\nfolds = 10\nseeds = [42, 0, 101]\nfold_idxs = list()\n\nfor seed in seeds:\n skf = StratifiedKFold(n_splits=folds,\n shuffle=True, \n random_state=seed)\n\n fold_idxs.append(list(skf.split(X_train, y_stratified)))",
"_____no_output_____"
],
[
"# Checking the produced folds\nfor j, fold_idxs_ in enumerate(fold_idxs):\n print(f\"\\n--- seed={seeds[j]} ---\")\n for k, (train_idx, validation_idx) in enumerate(fold_idxs_):\n print(f\"fold {k} train idxs: {len(train_idx)} validation idxs: {len(validation_idx)} -> {validation_idx[:10]}\")",
"\n--- seed=42 ---\nfold 0 train idxs: 270000 validation idxs: 30000 -> [ 1 17 21 26 31 34 38 40 54 62]\nfold 1 train idxs: 270000 validation idxs: 30000 -> [ 2 11 27 33 44 45 48 52 63 64]\nfold 2 train idxs: 270000 validation idxs: 30000 -> [ 4 5 12 15 23 39 42 53 58 82]\nfold 3 train idxs: 270000 validation idxs: 30000 -> [16 19 29 30 55 57 66 72 75 78]\nfold 4 train idxs: 270000 validation idxs: 30000 -> [ 10 13 18 71 80 94 98 103 120 133]\nfold 5 train idxs: 270000 validation idxs: 30000 -> [ 6 8 22 24 25 28 32 61 83 85]\nfold 6 train idxs: 270000 validation idxs: 30000 -> [ 41 50 59 65 67 74 90 92 102 134]\nfold 7 train idxs: 270000 validation idxs: 30000 -> [ 0 3 14 36 56 73 87 93 97 99]\nfold 8 train idxs: 270000 validation idxs: 30000 -> [ 9 37 46 47 69 76 77 96 115 123]\nfold 9 train idxs: 270000 validation idxs: 30000 -> [ 7 20 35 43 49 51 60 68 70 86]\n\n--- seed=0 ---\nfold 0 train idxs: 270000 validation idxs: 30000 -> [ 4 17 26 31 41 56 66 96 102 125]\nfold 1 train idxs: 270000 validation idxs: 30000 -> [13 18 28 29 34 40 47 49 65 68]\nfold 2 train idxs: 270000 validation idxs: 30000 -> [ 1 6 32 37 38 50 73 79 100 116]\nfold 3 train idxs: 270000 validation idxs: 30000 -> [ 3 5 12 15 23 24 25 63 74 78]\nfold 4 train idxs: 270000 validation idxs: 30000 -> [ 7 9 14 16 44 48 52 64 70 90]\nfold 5 train idxs: 270000 validation idxs: 30000 -> [ 0 19 27 33 36 58 67 83 88 92]\nfold 6 train idxs: 270000 validation idxs: 30000 -> [ 11 51 87 89 101 114 121 131 142 146]\nfold 7 train idxs: 270000 validation idxs: 30000 -> [ 2 30 43 46 54 61 69 72 76 86]\nfold 8 train idxs: 270000 validation idxs: 30000 -> [ 8 10 22 55 57 59 60 62 80 81]\nfold 9 train idxs: 270000 validation idxs: 30000 -> [ 20 21 35 39 42 45 53 75 84 107]\n\n--- seed=101 ---\nfold 0 train idxs: 270000 validation idxs: 30000 -> [19 31 34 37 38 41 52 71 79 81]\nfold 1 train idxs: 270000 validation idxs: 30000 -> [10 20 27 29 30 46 49 55 61 66]\nfold 2 train idxs: 270000 validation idxs: 30000 -> [17 23 35 36 44 54 56 83 91 99]\nfold 3 train idxs: 270000 validation idxs: 30000 -> [ 5 21 28 47 50 62 70 77 78 80]\nfold 4 train idxs: 270000 validation idxs: 30000 -> [ 14 18 25 64 72 73 100 112 113 127]\nfold 5 train idxs: 270000 validation idxs: 30000 -> [ 9 16 32 45 59 65 68 84 103 130]\nfold 6 train idxs: 270000 validation idxs: 30000 -> [ 0 1 2 6 12 15 24 33 48 101]\nfold 7 train idxs: 270000 validation idxs: 30000 -> [ 11 53 75 76 87 104 106 120 132 133]\nfold 8 train idxs: 270000 validation idxs: 30000 -> [ 3 4 8 13 22 26 42 51 63 88]\nfold 9 train idxs: 270000 validation idxs: 30000 -> [ 7 39 40 43 57 58 60 92 109 117]\n"
],
[
"# Storing into the notebook for future use\njoblib.dump(fold_idxs, './fold_idxs.job')",
"_____no_output_____"
],
[
"# Retrieving from the notebook\nfold_idxs = joblib.load('./fold_idxs.job')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca5b7ade130721763b789a41aeee724b567046d | 123,010 | ipynb | Jupyter Notebook | parcels/examples/tutorial_nemo_curvilinear.ipynb | MariusWiggert/parcels | 265af7415472929c21ee64341ef6a49648a1235a | [
"MIT"
] | 1 | 2020-03-20T10:22:21.000Z | 2020-03-20T10:22:21.000Z | parcels/examples/tutorial_nemo_curvilinear.ipynb | MariusWiggert/parcels | 265af7415472929c21ee64341ef6a49648a1235a | [
"MIT"
] | null | null | null | parcels/examples/tutorial_nemo_curvilinear.ipynb | MariusWiggert/parcels | 265af7415472929c21ee64341ef6a49648a1235a | [
"MIT"
] | null | null | null | 624.416244 | 66,484 | 0.947045 | [
[
[
"## Tutorial on how to use Parcels on NEMO curvilinear grids",
"_____no_output_____"
],
[
"Parcels also supports [curvilinear grids](https://www.nemo-ocean.eu/doc/node108.html) such as those used in the [NEMO models](https://www.nemo-ocean.eu/).\n\nWe will be using the example data in the `NemoCurvilinear_data/` directory. These fiels are a purely zonal flow on an aqua-planet (so zonal-velocity is 1 m/s and meridional-velocity is 0 m/s everywhere, and no land). However, because of the curvilinear grid, the `U` and `V` fields vary north of 20N.",
"_____no_output_____"
]
],
[
[
"from parcels import FieldSet, ParticleSet, JITParticle, ParticleFile, plotTrajectoriesFile\nfrom parcels import AdvectionRK4\nimport numpy as np\nfrom datetime import timedelta as delta\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"We can create a `FieldSet` just like we do for normal grids.\nNote that NEMO is discretised on a C-grid. U and V velocities are not located on the same nodes (see https://www.nemo-ocean.eu/doc/node19.html ).\n\n```\n __V1__\n| |\nU0 U1\n|__V0__|\n```\n\nTo interpolate U, V velocities on the C-grid, Parcels needs to read the f-nodes, which are located on the corners of the cells (for indexing details: https://www.nemo-ocean.eu/doc/img360.png ).",
"_____no_output_____"
]
],
[
[
"data_path = 'NemoCurvilinear_data/'\nfilenames = {'U': {'lon': data_path + 'mesh_mask.nc4',\n 'lat': data_path + 'mesh_mask.nc4',\n 'data': data_path + 'U_purely_zonal-ORCA025_grid_U.nc4'},\n 'V': {'lon': data_path + 'mesh_mask.nc4',\n 'lat': data_path + 'mesh_mask.nc4',\n 'data': data_path + 'V_purely_zonal-ORCA025_grid_V.nc4'}}\nvariables = {'U': 'U',\n 'V': 'V'}\ndimensions = {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'}\nfield_set = FieldSet.from_nemo(filenames, variables, dimensions, allow_time_extrapolation=True)",
"WARNING: Casting lon data to np.float32\nWARNING: Casting lat data to np.float32\nWARNING: Casting depth data to np.float32\n"
]
],
[
[
"And we can plot the `U` field.",
"_____no_output_____"
]
],
[
[
"field_set.U.show()",
"_____no_output_____"
]
],
[
[
"As you see above, the `U` field indeed is 1 m/s south of 20N, but varies with longitude and latitude north of that\n\nNow we can run particles as normal. Parcels will take care to rotate the `U` and `V` fields",
"_____no_output_____"
]
],
[
[
"# Start 20 particles on a meridional line at 180W\nnpart = 20\nlonp = -180 * np.ones(npart)\nlatp = [i for i in np.linspace(-70, 88, npart)]\n\n# Create a periodic boundary condition kernel\ndef periodicBC(particle, fieldset, time):\n if particle.lon > 180:\n particle.lon -= 360\n\npset = ParticleSet.from_list(field_set, JITParticle, lon=lonp, lat=latp)\npfile = ParticleFile(\"nemo_particles\", pset, outputdt=delta(days=1))\nkernels = pset.Kernel(AdvectionRK4) + periodicBC\npset.execute(kernels, runtime=delta(days=50), dt=delta(hours=6),\n output_file=pfile)",
"INFO: Compiled JITParticleAdvectionRK4periodicBC ==> /var/folders/r2/8593q8z93kd7t4j9kbb_f7p00000gr/T/parcels-504/31a86092b3849f0d767018c4324d26a8.so\n100% (4320000.0 of 4320000.0) |##########| Elapsed Time: 0:00:01 Time: 0:00:01\n"
]
],
[
[
"And then we can plot these trajectories. As expected, all trajectories go exactly zonal and due to the curvature of the earth, ones at higher latitude move more degrees eastward (even though the distance in km is equal for all particles)",
"_____no_output_____"
]
],
[
[
"pfile.export() # export the trajectory data to a netcdf file\nplotTrajectoriesFile(\"nemo_particles.nc\");",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca5b8728e6a94720f5c55fb01d408bcfe96d6c8 | 6,795 | ipynb | Jupyter Notebook | user/tutorials/iris.ipynb | phanhongan/h1st | ddde59018b7ad44133135d2bfc7f47dc7277e00e | [
"Apache-2.0"
] | 2 | 2021-08-20T18:11:54.000Z | 2021-09-28T15:59:58.000Z | user/tutorials/iris.ipynb | phanhongan/h1st | ddde59018b7ad44133135d2bfc7f47dc7277e00e | [
"Apache-2.0"
] | null | null | null | user/tutorials/iris.ipynb | phanhongan/h1st | ddde59018b7ad44133135d2bfc7f47dc7277e00e | [
"Apache-2.0"
] | 1 | 2021-06-05T01:30:47.000Z | 2021-06-05T01:30:47.000Z | 33.472906 | 319 | 0.54805 | [
[
[
"# A Tutorial on Modelers/Models",
"_____no_output_____"
],
[
"This tutorial shows how you can create Modeler and Model using the `iris` dataset as an example.\nFirstly, let's create an `MLModeler` with `load_data` to load the `iris` dataset and generate training data to train a LogisticRegression base model in `train`. The `h1st` framework provides the `build` method which calls `load_data` and `train` and produces the corresponding `MLModel` which you needs to define.",
"_____no_output_____"
]
],
[
[
"import os\nfrom typing import Any, Dict\nimport tempfile\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import f1_score\nfrom h1st.model.model import Model\nfrom h1st.model.ml_modeler import MLModeler\nfrom h1st.model.ml_model import MLModel\n\nclass MyMLModeler(MLModeler):\n def __init__(self):\n self.stats = {}\n self.example_test_data_ratio = 0.2\n\n def load_data(self) -> Dict:\n df_raw = datasets.load_iris(as_frame=True).frame\n return self.generate_training_data({'df_raw': df_raw})\n \n def preprocess(self, data):\n self.stats['scaler'] = StandardScaler()\n return self.stats['scaler'].fit_transform(data) \n \n def generate_training_data(self, data: Dict[str, Any]) -> Dict[str, Any]:\n df_raw = data['df_raw']\n df_raw.columns = ['sepal_length','sepal_width','petal_length','petal_width', 'species']\n \n self.stats['targets'] = ['Setosa', 'Versicolour', 'Virginica']\n self.stats['targets_dict'] = {k: v for v, k in enumerate(self.stats['targets'])}\n\n # Shuffle all the df_raw\n df_raw = df_raw.sample(frac=1, random_state=5).reset_index(drop=True)\n \n # Preprocess data\n df_raw.loc[:, 'sepal_length':'petal_width'] = self.preprocess(\n df_raw.loc[:, 'sepal_length':'petal_width'])\n\n # Split to training and testing data\n n = df_raw.shape[0]\n n_test = int(n * self.example_test_data_ratio)\n training_data = df_raw.iloc[n_test:, :].reset_index(drop=True)\n test_data = df_raw.iloc[:n_test, :].reset_index(drop=True)\n\n # Split the data to features and labels\n train_data_x = training_data.loc[:, 'sepal_length':'petal_width']\n train_data_y = training_data['species']\n test_data_x = test_data.loc[:, 'sepal_length':'petal_width']\n test_data_y = test_data['species']\n\n # When returning many variables, it is a good practice to give them names:\n return {\n 'train_x':train_data_x,\n 'train_y':train_data_y,\n 'test_x':test_data_x,\n 'test_y':test_data_y,\n }\n\n def train_model(self, data: Dict[str, Any]) -> Any:\n X, y = data['train_x'], data['train_y']\n model = LogisticRegression(random_state=0)\n model.fit(X, y)\n return model\n \n def evaluate_model(self, data: Dict, model: MLModel) -> Dict:\n super().evaluate(data, model)\n X, y_true = data['test_x'], data['test_y']\n y_pred = pd.Series(model.predict({'X': X, 'y': y_true})['species']).map(model.stats['targets_dict'])\n return {'micro_f1_score': f1_score(y_true, y_pred, average='micro')}",
"_____no_output_____"
]
],
[
[
"Here, we define a `MLModel` with `process` method which will be used to generate prediction.",
"_____no_output_____"
]
],
[
[
"class MyMLModel(MLModel):\n def preprocess(self, data: Dict[str, Any]) -> Dict[str, Any]:\n raw_data = data['X']\n return {\n 'X': self.stats['scaler'].transform(raw_data)\n }\n\n def process(self, input_data: dict) -> dict:\n preprocess_data = self.preprocess(input_data)\n y = self.base_model.predict(preprocess_data['X'])\n return {'species': [self.stats['targets'][item] for item in y]}",
"_____no_output_____"
]
],
[
[
"Now is the time to use our `MLModeler` and `MLModel` to create a classification model and generate prediction.",
"_____no_output_____"
]
],
[
[
"my_ml_modeler = MyMLModeler()\nmy_ml_modeler.model_class = MyMLModel\n\nmy_ml_model = my_ml_modeler.build_model()\n\nprint(my_ml_model.metrics)\n\nprediction = my_ml_model.predict({\n 'X': pd.DataFrame(\n [[5.1, 3.5, 1.5, 0.2],\n [7.1, 3.5, 1.5, 0.6]], \n columns=['sepal_length','sepal_width','petal_length','petal_width'])\n})\nprediction",
"{'micro_f1_score': 0.3}\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca5b8f94f6dd067f6e9985ecf94b2d35d6ff99c | 18,011 | ipynb | Jupyter Notebook | mavenn/development/20.09.01_get_1p_effects.ipynb | jbkinney/mave-nn | bc1de2e147f3f2be38c42e7bd1cf921ec6870879 | [
"MIT"
] | 12 | 2020-09-15T04:20:48.000Z | 2022-02-12T00:51:05.000Z | mavenn/development/20.09.01_get_1p_effects.ipynb | jbkinney/mave-nn | bc1de2e147f3f2be38c42e7bd1cf921ec6870879 | [
"MIT"
] | 12 | 2020-06-07T21:15:59.000Z | 2022-03-03T18:10:46.000Z | mavenn/development/20.09.01_get_1p_effects.ipynb | jbkinney/mave-nn | bc1de2e147f3f2be38c42e7bd1cf921ec6870879 | [
"MIT"
] | 1 | 2022-01-04T18:22:27.000Z | 2022-01-04T18:22:27.000Z | 35.807157 | 330 | 0.418411 | [
[
[
"# Standard imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Add mavenn to path\nimport sys\npath_to_mavenn_local = '/Users/jkinney/github/mavenn'\nsys.path.insert(0,path_to_mavenn_local)\n\n# Load mavenn and check path\nimport mavenn\nmavenn.__path__",
"_____no_output_____"
],
[
"# Load model\nge_model = mavenn.load('20.08.26_load_models_for_JBK/model_files/gaussian_GB1_model')\n\n# Set wt sequence\nwt_seq = 'QYKLILNGKTLKGETTTEAVDAATAEKVFKQYANDNGVDGEWTYDDATKTFTVTE'\n\n# Comptue effects\nmat_df = ge_model.get_1pt_effects(wt_seq=wt_seq, out_format=\"matrix\")\nmat_df.head()",
"/Users/jkinney/opt/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n/Users/jkinney/opt/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n"
],
[
"# Comptue effects\nmat_df = ge_model.get_1pt_effects(wt_seq=wt_seq, out_format=\"tidy\")\nmat_df.head()",
"/Users/jkinney/opt/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.\nIf you want the future behaviour and silence this warning, you can specify \"categories='auto'\".\nIn case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.\n warnings.warn(msg, FutureWarning)\n"
],
[
"# from mavenn.src.error_handling import check, handle_errors\n# from mavenn.src.validate import validate_alphabet\n# from mavenn.src.utils import get_1pt_variants\n\n# @handle_errors\n# def get_1pt_effects(model, wt_seq, out_format=\"matrix\"):\n# \"\"\"\n \n# parameters\n# ----------\n \n# model: (mavenn.Model)\n# A MAVE-NN model.\n\n# wt_seq: (str)\n# The wild-type sequence.\n\n# out_format: (\"matrix\" or \"tidy\")\n# If matrix, a 2D matrix of dphi values is\n# returned, with characters across columns and\n# positions across rows. If \"tidy\", a tidy\n# dataframe is returned that additionally lists\n# all variant sequences, phi values, etc.\n \n# returns\n# -------\n \n# out_df: (pd.DataFrame)\n# Dataframe containing dphi values and other \n# information.\n# \"\"\"\n \n# # Get all 1pt variant sequences\n# df = get_1pt_variants(wt_seq=wt_seq, alphabet=model.alphabet, include_wt=True)\n# x = df['seq'].values\n \n# # Compute dphi values\n# df['phi'] = ge_model.x_to_phi(x)\n# df['dphi'] = df['phi'] - df['phi']['WT']\n \n# if out_format==\"tidy\":\n# mut_df = df\n# elif out_format==\"matrix\":\n# # Keep only non-wt rows\n# ix = (df.index != 'WT')\n# tmp_df = df[ix]\n \n# # Pivot matrix and return\n# mut_df = tmp_df.pivot(index='pos',columns='mut_char',values='dphi')\n# mut_df.fillna(0, inplace=True)\n# mut_df.columns.name=None\n# else:\n# mut_df = None\n# check(out_format in [\"tidy\",\"matrix\"],\n# f\"out_format={out_format}; must be 'tidy' or 'matrix'.\")\n \n# return mut_df",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
eca5ba8a5bf708fdb20dee70011f6d119769fc43 | 13,596 | ipynb | Jupyter Notebook | category_pdf_processing/cl@rivate/read_pdf.ipynb | Anshul-GH/upwork_projects | 600c2ccf79e5ed5cf962f1ae05c59f18ad411f63 | [
"MIT"
] | 2 | 2020-07-23T19:32:58.000Z | 2020-10-24T20:54:09.000Z | category_pdf_processing/cl@rivate/read_pdf.ipynb | Anshul-GH/upwork_projects | 600c2ccf79e5ed5cf962f1ae05c59f18ad411f63 | [
"MIT"
] | null | null | null | category_pdf_processing/cl@rivate/read_pdf.ipynb | Anshul-GH/upwork_projects | 600c2ccf79e5ed5cf962f1ae05c59f18ad411f63 | [
"MIT"
] | 1 | 2021-07-11T14:41:04.000Z | 2021-07-11T14:41:04.000Z | 39.068966 | 3,994 | 0.612974 | [
[
[
"from PyPDF4 import PdfFileReader",
"_____no_output_____"
],
[
"# file path\nroot = !pwd\nroot = root[0]\npath = root + '/data/cl@rivate_1.pdf'\nprint(path)",
"/home/anshul/code/freelance_projects/category_python_doc_processing/cl@rivate/data/cl@rivate_1.pdf\n"
],
[
"with open(path, 'rb') as f:\n pdf = PdfFileReader(f)\n information = pdf.getDocumentInfo()\n number_of_pages = pdf.getNumPages()\n\n txt = f\"\"\"\n Information about {path}: \n\n Author: {information.author}\n Creator: {information.creator}\n Producer: {information.producer}\n Subject: {information.subject}\n Title: {information.title}\n Number of pages: {number_of_pages}\n \"\"\"\n\nprint(txt)",
"\n Information about /home/anshul/code/freelance_projects/category_python_doc_processing/cl@rivate/data/cl@rivate_1.pdf: \n\n Author: None\n Creator: Crystal Reports\n Producer: Powered By Crystal\n Subject: None\n Title: None\n Number of pages: 195\n \n"
],
[
"print(information)",
"{'/Producer': 'Powered By Crystal', '/Creator': 'Crystal Reports'}\n"
],
[
"print(number_of_pages)",
"195\n"
],
[
"f.close()",
"_____no_output_____"
],
[
"pdfFileObj = open(path, 'rb')\n\n# create a pdf reader object\npdfReader = PdfFileReader(pdfFileObj)",
"_____no_output_____"
],
[
"pdfReader.numPages",
"_____no_output_____"
],
[
"pageObj = pdfReader.getPage(0)\n# print(type(pageObj.extractText()))\n\nextracted_text = pageObj.extractText()",
"_____no_output_____"
],
[
"txtFile = open('extracted_01.txt', 'a')\ntxtFile.write(extracted_text)\ntxtFile.close()",
"_____no_output_____"
],
[
"extracted_text",
"_____no_output_____"
],
[
"!pip install tabula-py",
"Collecting tabula-py\n Downloading tabula_py-2.2.0-py3-none-any.whl (11.7 MB)\n\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 11.7 MB 748 kB/s \n\u001b[?25hCollecting distro\n Downloading distro-1.5.0-py2.py3-none-any.whl (18 kB)\nRequirement already satisfied: pandas>=0.25.3 in /home/anshul/anaconda3/envs/py38/lib/python3.8/site-packages (from tabula-py) (1.0.3)\nRequirement already satisfied: numpy in /home/anshul/anaconda3/envs/py38/lib/python3.8/site-packages (from tabula-py) (1.18.4)\nRequirement already satisfied: python-dateutil>=2.6.1 in /home/anshul/anaconda3/envs/py38/lib/python3.8/site-packages (from pandas>=0.25.3->tabula-py) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /home/anshul/anaconda3/envs/py38/lib/python3.8/site-packages (from pandas>=0.25.3->tabula-py) (2020.1)\nRequirement already satisfied: six>=1.5 in /home/anshul/anaconda3/envs/py38/lib/python3.8/site-packages (from python-dateutil>=2.6.1->pandas>=0.25.3->tabula-py) (1.15.0)\nInstalling collected packages: distro, tabula-py\nSuccessfully installed distro-1.5.0 tabula-py-2.2.0\n"
],
[
"import tabula",
"_____no_output_____"
],
[
"# df = tabula.read_pdf(path,pages='all')\ndf = tabula.read_pdf(path, pages=1, lattice=True, area=(0, 0, 80, 100), relative_area=True)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca5c5086d57723c654f7d47669e179c3620730d | 137,017 | ipynb | Jupyter Notebook | FakeNews_v2.ipynb | h4k1m13or/FakeNewsRNN | 2014634518197bd059a8f24beecd114d8ac987e4 | [
"Apache-2.0"
] | 3 | 2021-03-16T21:47:20.000Z | 2021-04-22T04:16:44.000Z | FakeNews_v2.ipynb | h4k1m13or/FakeNewsRNN | 2014634518197bd059a8f24beecd114d8ac987e4 | [
"Apache-2.0"
] | null | null | null | FakeNews_v2.ipynb | h4k1m13or/FakeNewsRNN | 2014634518197bd059a8f24beecd114d8ac987e4 | [
"Apache-2.0"
] | null | null | null | 73.54643 | 30,168 | 0.679602 | [
[
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import pos_tag, word_tokenize\nwordnet_lemmatizer = WordNetLemmatizer()\n\nnltk.download('wordnet')\nnltk.download(\"stopwords\")\nnltk.download('averaged_perceptron_tagger')\nnltk.download('punkt')\nfrom google.colab import drive\ndrive.mount('/content/drive')\n%cd /content/drive/Shared drives/AARN/Fake-News-RNN",
"[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /root/nltk_data...\n[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\nMounted at /content/drive\n/content/drive/Shared drives/AARN/Fake-News-RNN\n"
]
],
[
[
"# 1-Preprocessing \n",
"_____no_output_____"
]
],
[
[
"#inspecting our data\n! ls\ntrain_set='./data/train.csv'\ntest_set='data/test.csv'\nsubmit='data/submit.csv'\n\ndf_train=pd.read_csv(train_set)\ndf_test=pd.read_csv(test_set)\ndf_submit=pd.read_csv(submit)\n#concatinate test with submit\n#print(df_submit)\ndf_test['label']=df_submit['label'].values\n#print(df_test)\n\n#concatinate test with train set, we will split them later \ndf=pd.concat([df_train,df_test],ignore_index=True,sort=True,).reset_index()\nprint(df.isnull().sum()) # how much null values we have for each collumn\ndf=df.dropna() # delete null values\nprint(len(df))\n",
" data\t\t\t FakeNews_v2.ipynb\t saved\n'Fake News detector.ipynb' FakeNews-v2.ipynb\nindex 0\nauthor 2460\nid 0\nlabel 0\ntext 46\ntitle 680\ndtype: int64\n22860\n"
],
[
"def lemmatization(text):\n wnl = WordNetLemmatizer()\n punctuations=\"?:!.,;\"\n sentence_words = nltk.word_tokenize(text)\n lemmas=[]\n for word, tag in pos_tag(word_tokenize(text)):\n wntag = tag[0].lower()\n wntag = wntag if wntag in ['a', 'r', 'n', 'v'] else None\n if not wntag:\n lemma = word\n else:\n lemma = wnl.lemmatize(word, wntag)\n lemmas.append(lemma)\n \n \n\n return \" \".join(lemmas)\n\ndef stemming(text):\n ps = PorterStemmer()\n text = re.sub('[^a-zA-Z]',' ',text) \n text = text.lower()\n text = text.split()\n text = [ps.stem(word) for word in text if not word in stopwords.words(\"english\")]\n text = \" \".join(text)\n return text\n\ndef trimming(text):\n text = text.split(maxsplit=600)\n text = ' '.join(text[:600])\n return text\n\n\ndf['title']=df['title'].apply(lambda x: trimming(x))\ndf['title']=df['title'].apply(lambda x: lemmatization(x))\n#df['title']=df['title'].apply(lambda x: stemming(x))\ndf['text']=df['text'].apply(lambda x: trimming(x))\ndf['text']=df['text'].apply(lambda x: lemmatization(x))\n#df['text']=df['text'].apply(lambda x: stemming(x))\n\n#df['titletext']=df['titletext'].apply(lambda x: lemmatization(x))\n#df['titletext']=df['titletext'].apply(lambda x: stemming(x))\ndf['titletext'] = df['title'] + \" . \" + df['text']\ndf['titletext']=df['titletext'].apply(lambda x: trimming(x))\ndf = df.reindex(columns=['label', 'title', 'text', 'titletext'])\n\n\nprint(df.head())",
" label ... titletext\n0 1 ... House Dem Aide : We Didn โ t Even See Comey โ ...\n1 0 ... FLYNN : Hillary Clinton , Big Woman on Campus ...\n2 1 ... Why the Truth Might Get You Fired . Why the Tr...\n3 1 ... 15 Civilians Killed In Single US Airstrike Hav...\n4 1 ... Iranian woman jail for fictional unpublished s...\n\n[5 rows x 4 columns]\n"
],
[
"!ls data",
"preprocessed test.csv\n"
],
[
"df.drop( df[df.text.str.len() < 100].index, inplace=True) # drop short texts\n\n# Split by label\ndf_real = df[df['label'] == 0]\ndf_fake = df[df['label'] == 1]\n\n# Train-test split\ndf_real_full_train, df_real_test = train_test_split(df_real, train_size = 0.7,test_size=0.3, random_state = 102)\ndf_fake_full_train, df_fake_test = train_test_split(df_fake, train_size = 0.7,test_size=0.3, random_state = 102)\n\n# Train-valid split\ndf_real_train, df_real_valid = train_test_split(df_real_full_train, train_size = 0.7,test_size=0.3, random_state = 102)\ndf_fake_train, df_fake_valid = train_test_split(df_fake_full_train, train_size = 0.7,test_size=0.3, random_state = 102)\n\n# Concatenate splits of different labels\ndf_train = pd.concat([df_real_train, df_fake_train], ignore_index=True, sort=False)\ndf_valid = pd.concat([df_real_valid, df_fake_valid], ignore_index=True, sort=False)\ndf_test = pd.concat([df_real_test, df_fake_test], ignore_index=True, sort=False)\n\n# Write preprocessed data\ndf_train.to_csv( './data/preprocessed/train.csv', index=False)\ndf_valid.to_csv('./data/preprocessed/valid.csv', index=False)\ndf_test.to_csv( './data/preprocessed/test.csv', index=False)\n",
"_____no_output_____"
]
],
[
[
"# 2- loading dataset",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchtext.data import Field, TabularDataset, BucketIterator\n\n# Evaluation\n\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nimport seaborn as sns",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"!pip install sacremoses\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# Fields\n\nlabel_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)\ntext_field = Field(tokenize='moses', lower=True, include_lengths=True, batch_first=True)\nfields = [('label', label_field), ('title', text_field), ('text', text_field), ('titletext', text_field)]\n\n# TabularDataset\n\ntrain, valid, test = TabularDataset.splits(path='./data/preprocessed/', train='train.csv', validation='valid.csv', test='test.csv',\n format='CSV', fields=fields, skip_header=True)\n\n# Iterators\n\ntrain_iter = BucketIterator(train, batch_size=64, sort_key=lambda x: len(x.text),\n device=device, sort=False, sort_within_batch=True)\nvalid_iter = BucketIterator(valid, batch_size=64, sort_key=lambda x: len(x.text),\n device=device, sort=False, sort_within_batch=True)\ntest_iter = BucketIterator(test, batch_size=64, sort_key=lambda x: len(x.text),\n device=device, sort=False, sort_within_batch=True)\n\n# Vocabulary\n\ntext_field.build_vocab(train, min_freq=5,)",
"Collecting sacremoses\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)\n\r\u001b[K |โ | 10kB 24.4MB/s eta 0:00:01\r\u001b[K |โ | 20kB 5.7MB/s eta 0:00:01\r\u001b[K |โโ | 30kB 6.9MB/s eta 0:00:01\r\u001b[K |โโ | 40kB 7.7MB/s eta 0:00:01\r\u001b[K |โโ | 51kB 6.8MB/s eta 0:00:01\r\u001b[K |โโโ | 61kB 7.7MB/s eta 0:00:01\r\u001b[K |โโโ | 71kB 8.0MB/s eta 0:00:01\r\u001b[K |โโโ | 81kB 7.3MB/s eta 0:00:01\r\u001b[K |โโโโ | 92kB 7.8MB/s eta 0:00:01\r\u001b[K |โโโโ | 102kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโ | 112kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโ | 122kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโ | 133kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโ | 143kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโ | 153kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโ | 163kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโ | 174kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโ | 184kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโ | 194kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโ | 204kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโ | 215kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโ | 225kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโ | 235kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโ | 245kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโ | 256kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโ | 266kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโ | 276kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโ | 286kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโ | 296kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโ | 307kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโ | 317kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโ | 327kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโ | 337kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโ | 348kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโ | 358kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโ | 368kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโ | 378kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโ | 389kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโ | 399kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโ | 409kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโ | 419kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโ | 430kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโ | 440kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโ | 450kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโ | 460kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโ | 471kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโ | 481kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโ | 491kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโ | 501kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโ | 512kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโ | 522kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโ | 532kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโ | 542kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโ | 552kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโ | 563kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโ | 573kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโ | 583kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโ | 593kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโ | 604kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโ | 614kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโ | 624kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโ | 634kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโ | 645kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโ | 655kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโ | 665kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโ | 675kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโ | 686kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโ | 696kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโ | 706kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโ | 716kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโ | 727kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโ | 737kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโ | 747kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 757kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 768kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 778kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 788kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 798kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 808kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 819kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 829kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 839kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | 849kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 860kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 870kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 880kB 8.1MB/s eta 0:00:01\r\u001b[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 890kB 8.1MB/s \n\u001b[?25hRequirement already satisfied: regex in /usr/local/lib/python3.6/dist-packages (from sacremoses) (2019.12.20)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from sacremoses) (1.15.0)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses) (0.16.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from sacremoses) (4.41.1)\nBuilding wheels for collected packages: sacremoses\n Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for sacremoses: filename=sacremoses-0.0.43-cp36-none-any.whl size=893257 sha256=7d0c24eb45d272010692a09c3c2b8a8cb7d4d10374403ebf0d9ee0fab0fe8d48\n Stored in directory: /root/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45\nSuccessfully built sacremoses\nInstalling collected packages: sacremoses\nSuccessfully installed sacremoses-0.0.43\n"
]
],
[
[
"# 3- Model:",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence",
"_____no_output_____"
],
[
"class FakeNewsNet(nn.Module):\n def __init__(self,vocab_size=len(text_field.vocab),hidden_size=100,num_layers=1,bi_lstm=False):\n super(FakeNewsNet, self).__init__()\n self.vocab_size=vocab_size\n self.hidden_size=hidden_size\n self.num_layers=num_layers\n self.bi_lstm=bi_lstm\n self.embedding = nn.Embedding(self.vocab_size,256)\n self.LSTM = nn.LSTM(input_size=256,hidden_size=self.hidden_size,num_layers=self.num_layers,bidirectional=self.bi_lstm,batch_first=True)\n self.drop= nn.Dropout(p=0.5)\n if bi_lstm:\n self.out = nn.Linear(2*self.hidden_size, 1)\n else:\n self.out = nn.Linear(self.hidden_size, 1)\n\n def forward(self, inp, input_len):\n\n embeded_text = self.embedding(inp)\n packed_input = pack_padded_sequence(embeded_text, input_len, batch_first=True, enforce_sorted=False)\n packed_output, _ = self.LSTM(packed_input)\n output, _ = pad_packed_sequence(packed_output, batch_first=True)\n\n out_forward = output[range(len(output)), input_len - 1, :self.hidden_size]\n out_reverse = output[:, 0, self.hidden_size:]\n out_reduced = torch.cat((out_forward, out_reverse), 1)\n text_fea = self.drop(out_reduced)\n\n text_fea = self.out(text_fea)\n text_fea = torch.squeeze(text_fea, 1)\n text_out = torch.sigmoid(text_fea)\n\n return text_out",
"_____no_output_____"
]
],
[
[
"# 4-Training\n",
"_____no_output_____"
]
],
[
[
"def save_checkpoint(save_path, model, optimizer, valid_loss):\n\n if save_path == None:\n return\n \n state_dict = {'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'valid_loss': valid_loss}\n \n torch.save(state_dict, save_path)\n print(f'Model saved to :{save_path}')\n\n\ndef load_checkpoint(load_path, model, optimizer):\n\n if load_path==None:\n return\n \n state_dict = torch.load(load_path, map_location=device)\n print(f'Model loaded from : {load_path}')\n \n model.load_state_dict(state_dict['model_state_dict'])\n optimizer.load_state_dict(state_dict['optimizer_state_dict'])\n \n return state_dict['valid_loss']\n\n\ndef save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):\n\n if save_path == None:\n return\n \n state_dict = {'train_loss_list': train_loss_list,\n 'valid_loss_list': valid_loss_list,\n 'global_steps_list': global_steps_list}\n \n torch.save(state_dict, save_path)\n print(f'Model saved to: {save_path}')\n\n\ndef load_metrics(load_path):\n\n if load_path==None:\n return\n \n state_dict = torch.load(load_path, map_location=device)\n print(f'Model loaded from: {load_path}')\n \n return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']\n\n\n\n",
"_____no_output_____"
],
[
"import torch.optim as optim\n\n",
"_____no_output_____"
],
[
"def train(model,\n optimizer,\n criterion = nn.BCELoss(),\n train_loader = train_iter,\n valid_loader = valid_iter,\n num_epochs = 100,\n eval_every = len(train_iter) // 2,\n file_path = './saved',\n best_valid_loss = float(\"Inf\")):\n \n # initialize running values\n running_loss = 0.0\n valid_running_loss = 0.0\n global_step = 0\n train_loss_list = []\n valid_loss_list = []\n global_steps_list = []\n\n # training loop\n print(\"training ...\")\n model.train()\n for epoch in range(num_epochs):\n for (labels, (title, title_len), (text, text_len), (titletext, titletext_len)), _ in train_loader: \n labels = labels.to(device)\n titletext = titletext.to(device)\n titletext_len = titletext_len.to(device)\n output = model(titletext, titletext_len)\n loss = criterion(output, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # update running values\n running_loss += loss.item()\n global_step += 1\n\n # evaluation step\n if global_step % eval_every == 0:\n model.eval()\n with torch.no_grad(): \n # validation loop\n for (labels, (title, title_len), (text, text_len), (titletext, titletext_len)), _ in valid_loader:\n labels = labels.to(device)\n titletext = titletext.to(device)\n titletext_len = titletext_len.to(device)\n output = model(titletext, titletext_len)\n\n loss = criterion(output, labels)\n valid_running_loss += loss.item()\n\n # evaluation\n average_train_loss = running_loss / eval_every\n average_valid_loss = valid_running_loss / len(valid_loader)\n train_loss_list.append(average_train_loss)\n valid_loss_list.append(average_valid_loss)\n global_steps_list.append(global_step)\n\n # resetting running values\n running_loss = 0.0 \n valid_running_loss = 0.0\n model.train()\n\n # print progress\n print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'\n .format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),\n average_train_loss, average_valid_loss))\n \n # checkpoint\n if best_valid_loss > average_valid_loss:\n best_valid_loss = average_valid_loss\n save_checkpoint(file_path + '/model.pt', model, optimizer, best_valid_loss)\n save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)\n \n save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)\n print('Finished Training!')\n\n\nmodel = FakeNewsNet(hidden_size=300,num_layers=1,bi_lstm=True).to(device)\nprint(model)\noptimizer = optim.Adam(model.parameters(), lr=0.01,eps=1e-6,)\n\ntrain(model=model, optimizer=optimizer, num_epochs=7,eval_every=2)",
"FakeNewsNet(\n (embedding): Embedding(39578, 256)\n (LSTM): LSTM(256, 300, batch_first=True, bidirectional=True)\n (drop): Dropout(p=0.5, inplace=False)\n (out): Linear(in_features=600, out_features=1, bias=True)\n)\ntraining ...\nEpoch [1/7], Step [2/1218], Train Loss: 0.8550, Valid Loss: 0.7061\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [4/1218], Train Loss: 0.6939, Valid Loss: 0.6672\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [6/1218], Train Loss: 0.6564, Valid Loss: 0.6801\nEpoch [1/7], Step [8/1218], Train Loss: 0.4592, Valid Loss: 1.4873\nEpoch [1/7], Step [10/1218], Train Loss: 1.3791, Valid Loss: 0.6523\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [12/1218], Train Loss: 0.7037, Valid Loss: 0.8266\nEpoch [1/7], Step [14/1218], Train Loss: 0.9253, Valid Loss: 0.6928\nEpoch [1/7], Step [16/1218], Train Loss: 0.7024, Valid Loss: 0.6436\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [18/1218], Train Loss: 0.6871, Valid Loss: 0.6548\nEpoch [1/7], Step [20/1218], Train Loss: 0.6523, Valid Loss: 0.6326\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [22/1218], Train Loss: 0.7059, Valid Loss: 0.6142\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [24/1218], Train Loss: 0.5324, Valid Loss: 0.6013\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [26/1218], Train Loss: 0.5131, Valid Loss: 0.5649\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [28/1218], Train Loss: 0.6470, Valid Loss: 0.5825\nEpoch [1/7], Step [30/1218], Train Loss: 0.5123, Valid Loss: 0.5395\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [32/1218], Train Loss: 0.4995, Valid Loss: 0.5205\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [34/1218], Train Loss: 0.5503, Valid Loss: 0.4884\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [36/1218], Train Loss: 0.4345, Valid Loss: 0.4740\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [38/1218], Train Loss: 0.7076, Valid Loss: 0.4337\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [40/1218], Train Loss: 0.4051, Valid Loss: 0.4711\nEpoch [1/7], Step [42/1218], Train Loss: 0.5164, Valid Loss: 0.5102\nEpoch [1/7], Step [44/1218], Train Loss: 0.5963, Valid Loss: 0.8480\nEpoch [1/7], Step [46/1218], Train Loss: 0.7230, Valid Loss: 0.4750\nEpoch [1/7], Step [48/1218], Train Loss: 0.4851, Valid Loss: 0.4628\nEpoch [1/7], Step [50/1218], Train Loss: 0.4675, Valid Loss: 0.4546\nEpoch [1/7], Step [52/1218], Train Loss: 0.7040, Valid Loss: 0.4756\nEpoch [1/7], Step [54/1218], Train Loss: 0.4787, Valid Loss: 0.4785\nEpoch [1/7], Step [56/1218], Train Loss: 0.4722, Valid Loss: 0.4215\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [58/1218], Train Loss: 0.5247, Valid Loss: 0.4160\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [60/1218], Train Loss: 0.2220, Valid Loss: 1.2894\nEpoch [1/7], Step [62/1218], Train Loss: 0.9252, Valid Loss: 0.6702\nEpoch [1/7], Step [64/1218], Train Loss: 0.6081, Valid Loss: 0.5371\nEpoch [1/7], Step [66/1218], Train Loss: 0.5592, Valid Loss: 0.4785\nEpoch [1/7], Step [68/1218], Train Loss: 0.5320, Valid Loss: 0.5361\nEpoch [1/7], Step [70/1218], Train Loss: 0.4352, Valid Loss: 0.6548\nEpoch [1/7], Step [72/1218], Train Loss: 0.6906, Valid Loss: 0.3678\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [74/1218], Train Loss: 0.4306, Valid Loss: 0.3772\nEpoch [1/7], Step [76/1218], Train Loss: 0.4819, Valid Loss: 0.3709\nEpoch [1/7], Step [78/1218], Train Loss: 0.2085, Valid Loss: 0.3749\nEpoch [1/7], Step [80/1218], Train Loss: 0.4637, Valid Loss: 0.3659\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [82/1218], Train Loss: 0.3876, Valid Loss: 0.3625\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [84/1218], Train Loss: 0.4076, Valid Loss: 0.3570\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [86/1218], Train Loss: 0.3082, Valid Loss: 0.3553\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [88/1218], Train Loss: 0.2443, Valid Loss: 0.3526\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [90/1218], Train Loss: 0.2525, Valid Loss: 0.3517\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [92/1218], Train Loss: 0.3266, Valid Loss: 0.3544\nEpoch [1/7], Step [94/1218], Train Loss: 0.2451, Valid Loss: 0.3712\nEpoch [1/7], Step [96/1218], Train Loss: 0.3441, Valid Loss: 0.3837\nEpoch [1/7], Step [98/1218], Train Loss: 0.5882, Valid Loss: 0.3519\nEpoch [1/7], Step [100/1218], Train Loss: 0.4056, Valid Loss: 0.3415\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [102/1218], Train Loss: 0.3288, Valid Loss: 0.3809\nEpoch [1/7], Step [104/1218], Train Loss: 0.4259, Valid Loss: 0.4084\nEpoch [1/7], Step [106/1218], Train Loss: 0.4643, Valid Loss: 0.3488\nEpoch [1/7], Step [108/1218], Train Loss: 0.3705, Valid Loss: 0.3495\nEpoch [1/7], Step [110/1218], Train Loss: 0.3836, Valid Loss: 0.3565\nEpoch [1/7], Step [112/1218], Train Loss: 0.3775, Valid Loss: 0.3555\nEpoch [1/7], Step [114/1218], Train Loss: 0.3389, Valid Loss: 0.3469\nEpoch [1/7], Step [116/1218], Train Loss: 0.2611, Valid Loss: 0.3445\nEpoch [1/7], Step [118/1218], Train Loss: 0.3920, Valid Loss: 0.3442\nEpoch [1/7], Step [120/1218], Train Loss: 0.2315, Valid Loss: 0.3577\nEpoch [1/7], Step [122/1218], Train Loss: 0.1720, Valid Loss: 0.3868\nEpoch [1/7], Step [124/1218], Train Loss: 0.7611, Valid Loss: 0.3745\nEpoch [1/7], Step [126/1218], Train Loss: 0.3017, Valid Loss: 0.3567\nEpoch [1/7], Step [128/1218], Train Loss: 0.2932, Valid Loss: 0.3414\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [130/1218], Train Loss: 0.3244, Valid Loss: 0.3364\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [132/1218], Train Loss: 0.2993, Valid Loss: 0.3594\nEpoch [1/7], Step [134/1218], Train Loss: 0.3176, Valid Loss: 0.3901\nEpoch [1/7], Step [136/1218], Train Loss: 0.3517, Valid Loss: 0.3896\nEpoch [1/7], Step [138/1218], Train Loss: 0.3694, Valid Loss: 0.3753\nEpoch [1/7], Step [140/1218], Train Loss: 0.3559, Valid Loss: 0.3467\nEpoch [1/7], Step [142/1218], Train Loss: 0.4554, Valid Loss: 0.3274\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [144/1218], Train Loss: 0.3469, Valid Loss: 0.3218\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [146/1218], Train Loss: 0.2908, Valid Loss: 0.3330\nEpoch [1/7], Step [148/1218], Train Loss: 0.4650, Valid Loss: 0.3438\nEpoch [1/7], Step [150/1218], Train Loss: 0.4127, Valid Loss: 0.3503\nEpoch [1/7], Step [152/1218], Train Loss: 0.3253, Valid Loss: 0.3681\nEpoch [1/7], Step [154/1218], Train Loss: 0.3318, Valid Loss: 0.3509\nEpoch [1/7], Step [156/1218], Train Loss: 0.3268, Valid Loss: 0.3241\nEpoch [1/7], Step [158/1218], Train Loss: 0.2368, Valid Loss: 0.3110\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [1/7], Step [160/1218], Train Loss: 0.4690, Valid Loss: 0.3173\nEpoch [1/7], Step [162/1218], Train Loss: 0.1686, Valid Loss: 0.3357\nEpoch [1/7], Step [164/1218], Train Loss: 0.2988, Valid Loss: 0.3499\nEpoch [1/7], Step [166/1218], Train Loss: 0.6775, Valid Loss: 0.3532\nEpoch [1/7], Step [168/1218], Train Loss: 0.3188, Valid Loss: 0.3382\nEpoch [1/7], Step [170/1218], Train Loss: 0.4139, Valid Loss: 0.3196\nEpoch [1/7], Step [172/1218], Train Loss: 0.2831, Valid Loss: 0.3124\nEpoch [1/7], Step [174/1218], Train Loss: 0.3530, Valid Loss: 0.3139\nEpoch [2/7], Step [176/1218], Train Loss: 0.1106, Valid Loss: 0.3162\nEpoch [2/7], Step [178/1218], Train Loss: 0.2698, Valid Loss: 0.3151\nEpoch [2/7], Step [180/1218], Train Loss: 0.3382, Valid Loss: 0.3117\nEpoch [2/7], Step [182/1218], Train Loss: 0.2240, Valid Loss: 0.3178\nEpoch [2/7], Step [184/1218], Train Loss: 0.1832, Valid Loss: 0.3293\nEpoch [2/7], Step [186/1218], Train Loss: 0.2769, Valid Loss: 0.3433\nEpoch [2/7], Step [188/1218], Train Loss: 0.2027, Valid Loss: 0.3489\nEpoch [2/7], Step [190/1218], Train Loss: 0.3969, Valid Loss: 0.3489\nEpoch [2/7], Step [192/1218], Train Loss: 0.3775, Valid Loss: 0.3349\nEpoch [2/7], Step [194/1218], Train Loss: 0.3428, Valid Loss: 0.3211\nEpoch [2/7], Step [196/1218], Train Loss: 0.3553, Valid Loss: 0.3158\nEpoch [2/7], Step [198/1218], Train Loss: 0.1907, Valid Loss: 0.3161\nEpoch [2/7], Step [200/1218], Train Loss: 0.3029, Valid Loss: 0.3189\nEpoch [2/7], Step [202/1218], Train Loss: 0.1516, Valid Loss: 0.3247\nEpoch [2/7], Step [204/1218], Train Loss: 0.2661, Valid Loss: 0.3385\nEpoch [2/7], Step [206/1218], Train Loss: 0.1408, Valid Loss: 0.3452\nEpoch [2/7], Step [208/1218], Train Loss: 0.1704, Valid Loss: 0.3478\nEpoch [2/7], Step [210/1218], Train Loss: 0.1424, Valid Loss: 0.3487\nEpoch [2/7], Step [212/1218], Train Loss: 0.4067, Valid Loss: 0.3291\nEpoch [2/7], Step [214/1218], Train Loss: 0.1475, Valid Loss: 0.3332\nEpoch [2/7], Step [216/1218], Train Loss: 0.4377, Valid Loss: 0.3943\nEpoch [2/7], Step [218/1218], Train Loss: 0.4539, Valid Loss: 0.4344\nEpoch [2/7], Step [220/1218], Train Loss: 0.4054, Valid Loss: 0.4844\nEpoch [2/7], Step [222/1218], Train Loss: 0.4584, Valid Loss: 0.4864\nEpoch [2/7], Step [224/1218], Train Loss: 0.4854, Valid Loss: 0.5049\nEpoch [2/7], Step [226/1218], Train Loss: 0.5189, Valid Loss: 0.4830\nEpoch [2/7], Step [228/1218], Train Loss: 0.4324, Valid Loss: 0.4744\nEpoch [2/7], Step [230/1218], Train Loss: 0.3463, Valid Loss: 0.4879\nEpoch [2/7], Step [232/1218], Train Loss: 0.5653, Valid Loss: 0.4376\nEpoch [2/7], Step [234/1218], Train Loss: 0.4325, Valid Loss: 0.4296\nEpoch [2/7], Step [236/1218], Train Loss: 0.4152, Valid Loss: 0.4506\nEpoch [2/7], Step [238/1218], Train Loss: 0.4340, Valid Loss: 0.3737\nEpoch [2/7], Step [240/1218], Train Loss: 0.3263, Valid Loss: 0.3591\nEpoch [2/7], Step [242/1218], Train Loss: 0.3320, Valid Loss: 0.4153\nEpoch [2/7], Step [244/1218], Train Loss: 0.2555, Valid Loss: 0.3972\nEpoch [2/7], Step [246/1218], Train Loss: 0.4232, Valid Loss: 0.3570\nEpoch [2/7], Step [248/1218], Train Loss: 0.2262, Valid Loss: 0.3492\nEpoch [2/7], Step [250/1218], Train Loss: 0.2947, Valid Loss: 0.3600\nEpoch [2/7], Step [252/1218], Train Loss: 0.5170, Valid Loss: 0.3925\nEpoch [2/7], Step [254/1218], Train Loss: 0.4002, Valid Loss: 0.4109\nEpoch [2/7], Step [256/1218], Train Loss: 0.1789, Valid Loss: 0.3944\nEpoch [2/7], Step [258/1218], Train Loss: 0.1981, Valid Loss: 0.3853\nEpoch [2/7], Step [260/1218], Train Loss: 0.3389, Valid Loss: 0.3695\nEpoch [2/7], Step [262/1218], Train Loss: 0.3701, Valid Loss: 0.3574\nEpoch [2/7], Step [264/1218], Train Loss: 0.2070, Valid Loss: 0.3490\nEpoch [2/7], Step [266/1218], Train Loss: 0.3115, Valid Loss: 0.3417\nEpoch [2/7], Step [268/1218], Train Loss: 0.3005, Valid Loss: 0.3402\nEpoch [2/7], Step [270/1218], Train Loss: 0.2688, Valid Loss: 0.3382\nEpoch [2/7], Step [272/1218], Train Loss: 0.2053, Valid Loss: 0.3357\nEpoch [2/7], Step [274/1218], Train Loss: 0.3802, Valid Loss: 0.3338\nEpoch [2/7], Step [276/1218], Train Loss: 0.4270, Valid Loss: 0.3290\nEpoch [2/7], Step [278/1218], Train Loss: 0.1668, Valid Loss: 0.3365\nEpoch [2/7], Step [280/1218], Train Loss: 0.2843, Valid Loss: 0.3440\nEpoch [2/7], Step [282/1218], Train Loss: 0.1756, Valid Loss: 0.3344\nEpoch [2/7], Step [284/1218], Train Loss: 0.1901, Valid Loss: 0.3276\nEpoch [2/7], Step [286/1218], Train Loss: 0.3310, Valid Loss: 0.3244\nEpoch [2/7], Step [288/1218], Train Loss: 0.3375, Valid Loss: 0.3230\nEpoch [2/7], Step [290/1218], Train Loss: 0.3971, Valid Loss: 0.3155\nEpoch [2/7], Step [292/1218], Train Loss: 0.2729, Valid Loss: 0.3122\nEpoch [2/7], Step [294/1218], Train Loss: 0.2958, Valid Loss: 0.3140\nEpoch [2/7], Step [296/1218], Train Loss: 0.2905, Valid Loss: 0.3155\nEpoch [2/7], Step [298/1218], Train Loss: 0.2816, Valid Loss: 0.3144\nEpoch [2/7], Step [300/1218], Train Loss: 0.2772, Valid Loss: 0.3187\nEpoch [2/7], Step [302/1218], Train Loss: 0.4009, Valid Loss: 0.3202\nEpoch [2/7], Step [304/1218], Train Loss: 0.4298, Valid Loss: 0.3137\nEpoch [2/7], Step [306/1218], Train Loss: 0.1813, Valid Loss: 0.3146\nEpoch [2/7], Step [308/1218], Train Loss: 0.3167, Valid Loss: 0.3157\nEpoch [2/7], Step [310/1218], Train Loss: 0.2090, Valid Loss: 0.3093\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [2/7], Step [312/1218], Train Loss: 0.2752, Valid Loss: 0.3051\nModel saved to :./saved/model.pt\nModel saved to: ./saved/metrics.pt\nEpoch [2/7], Step [314/1218], Train Loss: 0.2317, Valid Loss: 0.3063\nEpoch [2/7], Step [316/1218], Train Loss: 0.2195, Valid Loss: 0.3173\nEpoch [2/7], Step [318/1218], Train Loss: 0.2797, Valid Loss: 0.3146\nEpoch [2/7], Step [320/1218], Train Loss: 0.3990, Valid Loss: 0.3099\nEpoch [2/7], Step [322/1218], Train Loss: 0.2001, Valid Loss: 0.3134\nEpoch [2/7], Step [324/1218], Train Loss: 0.2128, Valid Loss: 0.3120\nEpoch [2/7], Step [326/1218], Train Loss: 0.1234, Valid Loss: 0.3102\nEpoch [2/7], Step [328/1218], Train Loss: 0.1925, Valid Loss: 0.3100\nEpoch [2/7], Step [330/1218], Train Loss: 0.3481, Valid Loss: 0.3126\nEpoch [2/7], Step [332/1218], Train Loss: 0.1972, Valid Loss: 0.3544\nEpoch [2/7], Step [334/1218], Train Loss: 0.3593, Valid Loss: 0.3074\nEpoch [2/7], Step [336/1218], Train Loss: 0.2324, Valid Loss: 0.3051\nEpoch [2/7], Step [338/1218], Train Loss: 0.2095, Valid Loss: 0.3135\nEpoch [2/7], Step [340/1218], Train Loss: 0.4997, Valid Loss: 0.3179\nEpoch [2/7], Step [342/1218], Train Loss: 0.1392, Valid Loss: 0.3191\nEpoch [2/7], Step [344/1218], Train Loss: 0.2608, Valid Loss: 0.3231\nEpoch [2/7], Step [346/1218], Train Loss: 0.4989, Valid Loss: 0.3243\nEpoch [2/7], Step [348/1218], Train Loss: 0.2852, Valid Loss: 0.3217\nEpoch [3/7], Step [350/1218], Train Loss: 0.1868, Valid Loss: 0.3238\nEpoch [3/7], Step [352/1218], Train Loss: 0.2626, Valid Loss: 0.3262\nEpoch [3/7], Step [354/1218], Train Loss: 0.2945, Valid Loss: 0.3255\nEpoch [3/7], Step [356/1218], Train Loss: 0.1823, Valid Loss: 0.3239\nEpoch [3/7], Step [358/1218], Train Loss: 0.1622, Valid Loss: 0.3207\nEpoch [3/7], Step [360/1218], Train Loss: 0.1410, Valid Loss: 0.3153\nEpoch [3/7], Step [362/1218], Train Loss: 0.1801, Valid Loss: 0.3102\nEpoch [3/7], Step [364/1218], Train Loss: 0.1371, Valid Loss: 0.3087\nEpoch [3/7], Step [366/1218], Train Loss: 0.2699, Valid Loss: 0.3100\nEpoch [3/7], Step [368/1218], Train Loss: 0.2048, Valid Loss: 0.3133\nEpoch [3/7], Step [370/1218], Train Loss: 0.2939, Valid Loss: 0.3161\nEpoch [3/7], Step [372/1218], Train Loss: 0.1883, Valid Loss: 0.3178\nEpoch [3/7], Step [374/1218], Train Loss: 0.2159, Valid Loss: 0.3187\nEpoch [3/7], Step [376/1218], Train Loss: 0.2714, Valid Loss: 0.3288\nEpoch [3/7], Step [378/1218], Train Loss: 0.1661, Valid Loss: 0.3360\nEpoch [3/7], Step [380/1218], Train Loss: 0.1411, Valid Loss: 0.3371\nEpoch [3/7], Step [382/1218], Train Loss: 0.0970, Valid Loss: 0.3317\nEpoch [3/7], Step [384/1218], Train Loss: 0.2599, Valid Loss: 0.3279\nEpoch [3/7], Step [386/1218], Train Loss: 0.2461, Valid Loss: 0.3263\nEpoch [3/7], Step [388/1218], Train Loss: 0.1865, Valid Loss: 0.3208\nEpoch [3/7], Step [390/1218], Train Loss: 0.2014, Valid Loss: 0.3175\nEpoch [3/7], Step [392/1218], Train Loss: 0.3206, Valid Loss: 0.3167\nEpoch [3/7], Step [394/1218], Train Loss: 0.1331, Valid Loss: 0.3193\nEpoch [3/7], Step [396/1218], Train Loss: 0.2083, Valid Loss: 0.3214\nEpoch [3/7], Step [398/1218], Train Loss: 0.1693, Valid Loss: 0.3341\nEpoch [3/7], Step [400/1218], Train Loss: 0.0869, Valid Loss: 0.3369\nEpoch [3/7], Step [402/1218], Train Loss: 0.2088, Valid Loss: 0.3302\nEpoch [3/7], Step [404/1218], Train Loss: 0.1279, Valid Loss: 0.3245\nEpoch [3/7], Step [406/1218], Train Loss: 0.1454, Valid Loss: 0.3213\nEpoch [3/7], Step [408/1218], Train Loss: 0.1738, Valid Loss: 0.3254\nEpoch [3/7], Step [410/1218], Train Loss: 0.1895, Valid Loss: 0.3376\nEpoch [3/7], Step [412/1218], Train Loss: 0.1931, Valid Loss: 0.3389\nEpoch [3/7], Step [414/1218], Train Loss: 0.1835, Valid Loss: 0.3321\nEpoch [3/7], Step [416/1218], Train Loss: 0.1600, Valid Loss: 0.3275\nEpoch [3/7], Step [418/1218], Train Loss: 0.1013, Valid Loss: 0.3295\nEpoch [3/7], Step [420/1218], Train Loss: 0.1497, Valid Loss: 0.3343\nEpoch [3/7], Step [422/1218], Train Loss: 0.1167, Valid Loss: 0.3368\nEpoch [3/7], Step [424/1218], Train Loss: 0.2105, Valid Loss: 0.3346\nEpoch [3/7], Step [426/1218], Train Loss: 0.2173, Valid Loss: 0.3347\nEpoch [3/7], Step [428/1218], Train Loss: 0.1705, Valid Loss: 0.3339\nEpoch [3/7], Step [430/1218], Train Loss: 0.2058, Valid Loss: 0.3336\nEpoch [3/7], Step [432/1218], Train Loss: 0.1373, Valid Loss: 0.3310\nEpoch [3/7], Step [434/1218], Train Loss: 0.1740, Valid Loss: 0.3217\nEpoch [3/7], Step [436/1218], Train Loss: 0.1972, Valid Loss: 0.3201\nEpoch [3/7], Step [438/1218], Train Loss: 0.1531, Valid Loss: 0.3204\nEpoch [3/7], Step [440/1218], Train Loss: 0.1429, Valid Loss: 0.3230\nEpoch [3/7], Step [442/1218], Train Loss: 0.1729, Valid Loss: 0.3240\nEpoch [3/7], Step [444/1218], Train Loss: 0.1854, Valid Loss: 0.3259\nEpoch [3/7], Step [446/1218], Train Loss: 0.0931, Valid Loss: 0.3284\nEpoch [3/7], Step [448/1218], Train Loss: 0.2254, Valid Loss: 0.3307\nEpoch [3/7], Step [450/1218], Train Loss: 0.2525, Valid Loss: 0.3296\nEpoch [3/7], Step [452/1218], Train Loss: 0.2326, Valid Loss: 0.3279\nEpoch [3/7], Step [454/1218], Train Loss: 0.1741, Valid Loss: 0.3331\nEpoch [3/7], Step [456/1218], Train Loss: 0.1766, Valid Loss: 0.3372\nEpoch [3/7], Step [458/1218], Train Loss: 0.1740, Valid Loss: 0.3320\nEpoch [3/7], Step [460/1218], Train Loss: 0.0886, Valid Loss: 0.3266\nEpoch [3/7], Step [462/1218], Train Loss: 0.1043, Valid Loss: 0.3313\nEpoch [3/7], Step [464/1218], Train Loss: 0.0774, Valid Loss: 0.3421\nEpoch [3/7], Step [466/1218], Train Loss: 0.0903, Valid Loss: 0.3476\nEpoch [3/7], Step [468/1218], Train Loss: 0.1944, Valid Loss: 0.3482\nEpoch [3/7], Step [470/1218], Train Loss: 0.0698, Valid Loss: 0.4000\nEpoch [3/7], Step [472/1218], Train Loss: 0.2801, Valid Loss: 0.4119\nEpoch [3/7], Step [474/1218], Train Loss: 0.3206, Valid Loss: 0.3933\nEpoch [3/7], Step [476/1218], Train Loss: 0.2820, Valid Loss: 0.3815\nEpoch [3/7], Step [478/1218], Train Loss: 0.1448, Valid Loss: 0.3791\nEpoch [3/7], Step [480/1218], Train Loss: 0.1075, Valid Loss: 0.3796\nEpoch [3/7], Step [482/1218], Train Loss: 0.2604, Valid Loss: 0.3793\nEpoch [3/7], Step [484/1218], Train Loss: 0.2151, Valid Loss: 0.3775\nEpoch [3/7], Step [486/1218], Train Loss: 0.3375, Valid Loss: 0.3656\nEpoch [3/7], Step [488/1218], Train Loss: 0.2232, Valid Loss: 0.3635\nEpoch [3/7], Step [490/1218], Train Loss: 0.3073, Valid Loss: 0.3648\nEpoch [3/7], Step [492/1218], Train Loss: 0.1990, Valid Loss: 0.3578\nEpoch [3/7], Step [494/1218], Train Loss: 0.1442, Valid Loss: 0.3450\nEpoch [3/7], Step [496/1218], Train Loss: 0.2346, Valid Loss: 0.3372\nEpoch [3/7], Step [498/1218], Train Loss: 0.1280, Valid Loss: 0.3339\nEpoch [3/7], Step [500/1218], Train Loss: 0.2053, Valid Loss: 0.3333\nEpoch [3/7], Step [502/1218], Train Loss: 0.1566, Valid Loss: 0.3371\nEpoch [3/7], Step [504/1218], Train Loss: 0.1494, Valid Loss: 0.3514\nEpoch [3/7], Step [506/1218], Train Loss: 0.1958, Valid Loss: 0.3558\nEpoch [3/7], Step [508/1218], Train Loss: 0.1152, Valid Loss: 0.3535\nEpoch [3/7], Step [510/1218], Train Loss: 0.1834, Valid Loss: 0.3496\nEpoch [3/7], Step [512/1218], Train Loss: 0.2381, Valid Loss: 0.3410\nEpoch [3/7], Step [514/1218], Train Loss: 0.2608, Valid Loss: 0.3358\nEpoch [3/7], Step [516/1218], Train Loss: 0.1879, Valid Loss: 0.3414\nEpoch [3/7], Step [518/1218], Train Loss: 0.1479, Valid Loss: 0.3555\nEpoch [3/7], Step [520/1218], Train Loss: 0.2020, Valid Loss: 0.3717\nEpoch [3/7], Step [522/1218], Train Loss: 0.2262, Valid Loss: 0.3968\nEpoch [4/7], Step [524/1218], Train Loss: 0.1374, Valid Loss: 0.4092\nEpoch [4/7], Step [526/1218], Train Loss: 0.1396, Valid Loss: 0.3889\nEpoch [4/7], Step [528/1218], Train Loss: 0.0342, Valid Loss: 0.3682\nEpoch [4/7], Step [530/1218], Train Loss: 0.0593, Valid Loss: 0.3527\nEpoch [4/7], Step [532/1218], Train Loss: 0.1167, Valid Loss: 0.3433\nEpoch [4/7], Step [534/1218], Train Loss: 0.0919, Valid Loss: 0.3433\nEpoch [4/7], Step [536/1218], Train Loss: 0.0971, Valid Loss: 0.3507\nEpoch [4/7], Step [538/1218], Train Loss: 0.1573, Valid Loss: 0.3520\nEpoch [4/7], Step [540/1218], Train Loss: 0.1153, Valid Loss: 0.3517\nEpoch [4/7], Step [542/1218], Train Loss: 0.0272, Valid Loss: 0.3548\nEpoch [4/7], Step [544/1218], Train Loss: 0.1360, Valid Loss: 0.3605\nEpoch [4/7], Step [546/1218], Train Loss: 0.1285, Valid Loss: 0.3662\nEpoch [4/7], Step [548/1218], Train Loss: 0.1275, Valid Loss: 0.3671\nEpoch [4/7], Step [550/1218], Train Loss: 0.1594, Valid Loss: 0.3657\nEpoch [4/7], Step [552/1218], Train Loss: 0.0610, Valid Loss: 0.3636\nEpoch [4/7], Step [554/1218], Train Loss: 0.0781, Valid Loss: 0.3638\nEpoch [4/7], Step [556/1218], Train Loss: 0.0990, Valid Loss: 0.3658\nEpoch [4/7], Step [558/1218], Train Loss: 0.0493, Valid Loss: 0.3714\nEpoch [4/7], Step [560/1218], Train Loss: 0.1593, Valid Loss: 0.3738\nEpoch [4/7], Step [562/1218], Train Loss: 0.1735, Valid Loss: 0.3718\nEpoch [4/7], Step [564/1218], Train Loss: 0.0514, Valid Loss: 0.3699\nEpoch [4/7], Step [566/1218], Train Loss: 0.0813, Valid Loss: 0.3716\nEpoch [4/7], Step [568/1218], Train Loss: 0.0606, Valid Loss: 0.3703\nEpoch [4/7], Step [570/1218], Train Loss: 0.1698, Valid Loss: 0.3676\nEpoch [4/7], Step [572/1218], Train Loss: 0.0942, Valid Loss: 0.3671\nEpoch [4/7], Step [574/1218], Train Loss: 0.0241, Valid Loss: 0.3699\nEpoch [4/7], Step [576/1218], Train Loss: 0.0847, Valid Loss: 0.3728\nEpoch [4/7], Step [578/1218], Train Loss: 0.0815, Valid Loss: 0.3760\nEpoch [4/7], Step [580/1218], Train Loss: 0.0648, Valid Loss: 0.3795\nEpoch [4/7], Step [582/1218], Train Loss: 0.0613, Valid Loss: 0.3817\nEpoch [4/7], Step [584/1218], Train Loss: 0.1161, Valid Loss: 0.3835\nEpoch [4/7], Step [586/1218], Train Loss: 0.1267, Valid Loss: 0.3858\nEpoch [4/7], Step [588/1218], Train Loss: 0.1478, Valid Loss: 0.3879\nEpoch [4/7], Step [590/1218], Train Loss: 0.0874, Valid Loss: 0.3899\nEpoch [4/7], Step [592/1218], Train Loss: 0.1116, Valid Loss: 0.3905\nEpoch [4/7], Step [594/1218], Train Loss: 0.1411, Valid Loss: 0.3915\nEpoch [4/7], Step [596/1218], Train Loss: 0.1684, Valid Loss: 0.3920\nEpoch [4/7], Step [598/1218], Train Loss: 0.0541, Valid Loss: 0.3929\nEpoch [4/7], Step [600/1218], Train Loss: 0.1172, Valid Loss: 0.3978\nEpoch [4/7], Step [602/1218], Train Loss: 0.2472, Valid Loss: 0.4071\nEpoch [4/7], Step [604/1218], Train Loss: 0.1567, Valid Loss: 0.4048\nEpoch [4/7], Step [606/1218], Train Loss: 0.1181, Valid Loss: 0.3914\nEpoch [4/7], Step [608/1218], Train Loss: 0.1444, Valid Loss: 0.3902\nEpoch [4/7], Step [610/1218], Train Loss: 0.1348, Valid Loss: 0.4002\nEpoch [4/7], Step [612/1218], Train Loss: 0.0855, Valid Loss: 0.4138\nEpoch [4/7], Step [614/1218], Train Loss: 0.2024, Valid Loss: 0.4184\nEpoch [4/7], Step [616/1218], Train Loss: 0.0781, Valid Loss: 0.4118\nEpoch [4/7], Step [618/1218], Train Loss: 0.0518, Valid Loss: 0.4056\nEpoch [4/7], Step [620/1218], Train Loss: 0.2194, Valid Loss: 0.4039\nEpoch [4/7], Step [622/1218], Train Loss: 0.2027, Valid Loss: 0.4060\nEpoch [4/7], Step [624/1218], Train Loss: 0.0231, Valid Loss: 0.4079\nEpoch [4/7], Step [626/1218], Train Loss: 0.1095, Valid Loss: 0.4068\nEpoch [4/7], Step [628/1218], Train Loss: 0.0622, Valid Loss: 0.4069\nEpoch [4/7], Step [630/1218], Train Loss: 0.0499, Valid Loss: 0.4108\nEpoch [4/7], Step [632/1218], Train Loss: 0.0638, Valid Loss: 0.4182\nEpoch [4/7], Step [634/1218], Train Loss: 0.1614, Valid Loss: 0.4163\nEpoch [4/7], Step [636/1218], Train Loss: 0.1011, Valid Loss: 0.4197\nEpoch [4/7], Step [638/1218], Train Loss: 0.1592, Valid Loss: 0.4201\nEpoch [4/7], Step [640/1218], Train Loss: 0.0845, Valid Loss: 0.4186\nEpoch [4/7], Step [642/1218], Train Loss: 0.0805, Valid Loss: 0.4138\nEpoch [4/7], Step [644/1218], Train Loss: 0.0124, Valid Loss: 0.4131\nEpoch [4/7], Step [646/1218], Train Loss: 0.0462, Valid Loss: 0.4151\nEpoch [4/7], Step [648/1218], Train Loss: 0.1725, Valid Loss: 0.4123\nEpoch [4/7], Step [650/1218], Train Loss: 0.1473, Valid Loss: 0.4110\nEpoch [4/7], Step [652/1218], Train Loss: 0.1547, Valid Loss: 0.4142\nEpoch [4/7], Step [654/1218], Train Loss: 0.1013, Valid Loss: 0.4148\nEpoch [4/7], Step [656/1218], Train Loss: 0.0700, Valid Loss: 0.4172\nEpoch [4/7], Step [658/1218], Train Loss: 0.0761, Valid Loss: 0.4209\nEpoch [4/7], Step [660/1218], Train Loss: 0.0178, Valid Loss: 0.4264\nEpoch [4/7], Step [662/1218], Train Loss: 0.2152, Valid Loss: 0.4204\nEpoch [4/7], Step [664/1218], Train Loss: 0.1996, Valid Loss: 0.4146\nEpoch [4/7], Step [666/1218], Train Loss: 0.2235, Valid Loss: 0.4218\nEpoch [4/7], Step [668/1218], Train Loss: 0.0577, Valid Loss: 0.4422\nEpoch [4/7], Step [670/1218], Train Loss: 0.0504, Valid Loss: 0.4550\nEpoch [4/7], Step [672/1218], Train Loss: 0.1416, Valid Loss: 0.4424\nEpoch [4/7], Step [674/1218], Train Loss: 0.1791, Valid Loss: 0.4202\nEpoch [4/7], Step [676/1218], Train Loss: 0.0867, Valid Loss: 0.4193\nEpoch [4/7], Step [678/1218], Train Loss: 0.2005, Valid Loss: 0.4238\nEpoch [4/7], Step [680/1218], Train Loss: 0.1339, Valid Loss: 0.4257\nEpoch [4/7], Step [682/1218], Train Loss: 0.0561, Valid Loss: 0.4312\nEpoch [4/7], Step [684/1218], Train Loss: 0.1045, Valid Loss: 0.4327\nEpoch [4/7], Step [686/1218], Train Loss: 0.1955, Valid Loss: 0.4230\nEpoch [4/7], Step [688/1218], Train Loss: 0.0645, Valid Loss: 0.4207\nEpoch [4/7], Step [690/1218], Train Loss: 0.1611, Valid Loss: 0.4249\nEpoch [4/7], Step [692/1218], Train Loss: 0.1335, Valid Loss: 0.4314\nEpoch [4/7], Step [694/1218], Train Loss: 0.1200, Valid Loss: 0.4324\nEpoch [4/7], Step [696/1218], Train Loss: 0.1126, Valid Loss: 0.4291\nEpoch [5/7], Step [698/1218], Train Loss: 0.0353, Valid Loss: 0.4236\nEpoch [5/7], Step [700/1218], Train Loss: 0.0642, Valid Loss: 0.4156\nEpoch [5/7], Step [702/1218], Train Loss: 0.0420, Valid Loss: 0.4132\nEpoch [5/7], Step [704/1218], Train Loss: 0.0838, Valid Loss: 0.4133\nEpoch [5/7], Step [706/1218], Train Loss: 0.0725, Valid Loss: 0.4163\nEpoch [5/7], Step [708/1218], Train Loss: 0.0841, Valid Loss: 0.4200\nEpoch [5/7], Step [710/1218], Train Loss: 0.0243, Valid Loss: 0.4232\nEpoch [5/7], Step [712/1218], Train Loss: 0.0836, Valid Loss: 0.4271\nEpoch [5/7], Step [714/1218], Train Loss: 0.0547, Valid Loss: 0.4289\nEpoch [5/7], Step [716/1218], Train Loss: 0.0571, Valid Loss: 0.4286\nEpoch [5/7], Step [718/1218], Train Loss: 0.0370, Valid Loss: 0.4308\nEpoch [5/7], Step [720/1218], Train Loss: 0.0301, Valid Loss: 0.4343\nEpoch [5/7], Step [722/1218], Train Loss: 0.0374, Valid Loss: 0.4369\nEpoch [5/7], Step [724/1218], Train Loss: 0.0354, Valid Loss: 0.4398\nEpoch [5/7], Step [726/1218], Train Loss: 0.0570, Valid Loss: 0.4395\nEpoch [5/7], Step [728/1218], Train Loss: 0.0458, Valid Loss: 0.4398\nEpoch [5/7], Step [730/1218], Train Loss: 0.0199, Valid Loss: 0.4413\nEpoch [5/7], Step [732/1218], Train Loss: 0.0182, Valid Loss: 0.4424\nEpoch [5/7], Step [734/1218], Train Loss: 0.0407, Valid Loss: 0.4427\nEpoch [5/7], Step [736/1218], Train Loss: 0.0279, Valid Loss: 0.4423\nEpoch [5/7], Step [738/1218], Train Loss: 0.0220, Valid Loss: 0.4420\nEpoch [5/7], Step [740/1218], Train Loss: 0.0348, Valid Loss: 0.4422\nEpoch [5/7], Step [742/1218], Train Loss: 0.0460, Valid Loss: 0.4430\nEpoch [5/7], Step [744/1218], Train Loss: 0.0150, Valid Loss: 0.4448\nEpoch [5/7], Step [746/1218], Train Loss: 0.0528, Valid Loss: 0.4461\nEpoch [5/7], Step [748/1218], Train Loss: 0.0895, Valid Loss: 0.4459\nEpoch [5/7], Step [750/1218], Train Loss: 0.1253, Valid Loss: 0.4440\nEpoch [5/7], Step [752/1218], Train Loss: 0.0370, Valid Loss: 0.4422\nEpoch [5/7], Step [754/1218], Train Loss: 0.0378, Valid Loss: 0.4404\nEpoch [5/7], Step [756/1218], Train Loss: 0.0717, Valid Loss: 0.4384\nEpoch [5/7], Step [758/1218], Train Loss: 0.1029, Valid Loss: 0.4368\nEpoch [5/7], Step [760/1218], Train Loss: 0.0484, Valid Loss: 0.4368\nEpoch [5/7], Step [762/1218], Train Loss: 0.0205, Valid Loss: 0.4387\nEpoch [5/7], Step [764/1218], Train Loss: 0.0199, Valid Loss: 0.4401\nEpoch [5/7], Step [766/1218], Train Loss: 0.0586, Valid Loss: 0.4410\nEpoch [5/7], Step [768/1218], Train Loss: 0.0524, Valid Loss: 0.4437\nEpoch [5/7], Step [770/1218], Train Loss: 0.0827, Valid Loss: 0.4482\nEpoch [5/7], Step [772/1218], Train Loss: 0.0604, Valid Loss: 0.4513\nEpoch [5/7], Step [774/1218], Train Loss: 0.0375, Valid Loss: 0.4517\nEpoch [5/7], Step [776/1218], Train Loss: 0.0471, Valid Loss: 0.4520\nEpoch [5/7], Step [778/1218], Train Loss: 0.1298, Valid Loss: 0.4529\nEpoch [5/7], Step [780/1218], Train Loss: 0.0661, Valid Loss: 0.4555\nEpoch [5/7], Step [782/1218], Train Loss: 0.0832, Valid Loss: 0.4538\nEpoch [5/7], Step [784/1218], Train Loss: 0.0087, Valid Loss: 0.4541\nEpoch [5/7], Step [786/1218], Train Loss: 0.0607, Valid Loss: 0.4531\nEpoch [5/7], Step [788/1218], Train Loss: 0.0335, Valid Loss: 0.4534\nEpoch [5/7], Step [790/1218], Train Loss: 0.0480, Valid Loss: 0.4549\nEpoch [5/7], Step [792/1218], Train Loss: 0.0735, Valid Loss: 0.4557\nEpoch [5/7], Step [794/1218], Train Loss: 0.1511, Valid Loss: 0.4545\nEpoch [5/7], Step [796/1218], Train Loss: 0.0692, Valid Loss: 0.4604\nEpoch [5/7], Step [798/1218], Train Loss: 0.0526, Valid Loss: 0.4679\nEpoch [5/7], Step [800/1218], Train Loss: 0.1027, Valid Loss: 0.4763\nEpoch [5/7], Step [802/1218], Train Loss: 0.0738, Valid Loss: 0.4837\nEpoch [5/7], Step [804/1218], Train Loss: 0.0241, Valid Loss: 0.4853\nEpoch [5/7], Step [806/1218], Train Loss: 0.0673, Valid Loss: 0.4841\nEpoch [5/7], Step [808/1218], Train Loss: 0.0662, Valid Loss: 0.4729\nEpoch [5/7], Step [810/1218], Train Loss: 0.0770, Valid Loss: 0.4647\nEpoch [5/7], Step [812/1218], Train Loss: 0.0355, Valid Loss: 0.4626\nEpoch [5/7], Step [814/1218], Train Loss: 0.1215, Valid Loss: 0.4572\nEpoch [5/7], Step [816/1218], Train Loss: 0.0479, Valid Loss: 0.4556\nEpoch [5/7], Step [818/1218], Train Loss: 0.0582, Valid Loss: 0.4931\nEpoch [5/7], Step [820/1218], Train Loss: 0.0715, Valid Loss: 0.5742\nEpoch [5/7], Step [822/1218], Train Loss: 0.2267, Valid Loss: 0.5444\nEpoch [5/7], Step [824/1218], Train Loss: 0.0540, Valid Loss: 0.4810\nEpoch [5/7], Step [826/1218], Train Loss: 0.0246, Valid Loss: 0.4793\nEpoch [5/7], Step [828/1218], Train Loss: 0.1061, Valid Loss: 0.5101\nEpoch [5/7], Step [830/1218], Train Loss: 0.0695, Valid Loss: 0.5419\nEpoch [5/7], Step [832/1218], Train Loss: 0.0544, Valid Loss: 0.5545\nEpoch [5/7], Step [834/1218], Train Loss: 0.1372, Valid Loss: 0.5285\nEpoch [5/7], Step [836/1218], Train Loss: 0.1221, Valid Loss: 0.4982\nEpoch [5/7], Step [838/1218], Train Loss: 0.0396, Valid Loss: 0.4791\nEpoch [5/7], Step [840/1218], Train Loss: 0.0232, Valid Loss: 0.4763\nEpoch [5/7], Step [842/1218], Train Loss: 0.0934, Valid Loss: 0.4763\nEpoch [5/7], Step [844/1218], Train Loss: 0.1441, Valid Loss: 0.4769\nEpoch [5/7], Step [846/1218], Train Loss: 0.0461, Valid Loss: 0.4784\nEpoch [5/7], Step [848/1218], Train Loss: 0.0971, Valid Loss: 0.4780\nEpoch [5/7], Step [850/1218], Train Loss: 0.0556, Valid Loss: 0.4758\nEpoch [5/7], Step [852/1218], Train Loss: 0.1517, Valid Loss: 0.4745\nEpoch [5/7], Step [854/1218], Train Loss: 0.0889, Valid Loss: 0.4746\nEpoch [5/7], Step [856/1218], Train Loss: 0.0454, Valid Loss: 0.4756\nEpoch [5/7], Step [858/1218], Train Loss: 0.0765, Valid Loss: 0.4747\nEpoch [5/7], Step [860/1218], Train Loss: 0.0962, Valid Loss: 0.4757\nEpoch [5/7], Step [862/1218], Train Loss: 0.0823, Valid Loss: 0.4729\nEpoch [5/7], Step [864/1218], Train Loss: 0.0161, Valid Loss: 0.4698\nEpoch [5/7], Step [866/1218], Train Loss: 0.0443, Valid Loss: 0.4697\nEpoch [5/7], Step [868/1218], Train Loss: 0.1006, Valid Loss: 0.4724\nEpoch [5/7], Step [870/1218], Train Loss: 0.1069, Valid Loss: 0.4776\nEpoch [6/7], Step [872/1218], Train Loss: 0.0256, Valid Loss: 0.4819\nEpoch [6/7], Step [874/1218], Train Loss: 0.0660, Valid Loss: 0.4872\nEpoch [6/7], Step [876/1218], Train Loss: 0.0402, Valid Loss: 0.4945\nEpoch [6/7], Step [878/1218], Train Loss: 0.0222, Valid Loss: 0.4989\nEpoch [6/7], Step [880/1218], Train Loss: 0.0429, Valid Loss: 0.4995\nEpoch [6/7], Step [882/1218], Train Loss: 0.0065, Valid Loss: 0.5031\nEpoch [6/7], Step [884/1218], Train Loss: 0.0472, Valid Loss: 0.5071\nEpoch [6/7], Step [886/1218], Train Loss: 0.0172, Valid Loss: 0.5106\nEpoch [6/7], Step [888/1218], Train Loss: 0.0463, Valid Loss: 0.5131\nEpoch [6/7], Step [890/1218], Train Loss: 0.0139, Valid Loss: 0.5180\nEpoch [6/7], Step [892/1218], Train Loss: 0.0284, Valid Loss: 0.5207\nEpoch [6/7], Step [894/1218], Train Loss: 0.0855, Valid Loss: 0.5216\nEpoch [6/7], Step [896/1218], Train Loss: 0.0249, Valid Loss: 0.5195\nEpoch [6/7], Step [898/1218], Train Loss: 0.0160, Valid Loss: 0.5175\nEpoch [6/7], Step [900/1218], Train Loss: 0.0260, Valid Loss: 0.5197\nEpoch [6/7], Step [902/1218], Train Loss: 0.0835, Valid Loss: 0.5247\nEpoch [6/7], Step [904/1218], Train Loss: 0.0150, Valid Loss: 0.5332\nEpoch [6/7], Step [906/1218], Train Loss: 0.0606, Valid Loss: 0.5439\nEpoch [6/7], Step [908/1218], Train Loss: 0.0188, Valid Loss: 0.5492\nEpoch [6/7], Step [910/1218], Train Loss: 0.0287, Valid Loss: 0.5530\nEpoch [6/7], Step [912/1218], Train Loss: 0.0651, Valid Loss: 0.5524\nEpoch [6/7], Step [914/1218], Train Loss: 0.0311, Valid Loss: 0.5466\nEpoch [6/7], Step [916/1218], Train Loss: 0.0582, Valid Loss: 0.5382\nEpoch [6/7], Step [918/1218], Train Loss: 0.0222, Valid Loss: 0.5376\nEpoch [6/7], Step [920/1218], Train Loss: 0.0136, Valid Loss: 0.5376\nEpoch [6/7], Step [922/1218], Train Loss: 0.0091, Valid Loss: 0.5380\nEpoch [6/7], Step [924/1218], Train Loss: 0.0105, Valid Loss: 0.5399\nEpoch [6/7], Step [926/1218], Train Loss: 0.0857, Valid Loss: 0.5398\nEpoch [6/7], Step [928/1218], Train Loss: 0.0436, Valid Loss: 0.5451\nEpoch [6/7], Step [930/1218], Train Loss: 0.0301, Valid Loss: 0.5524\nEpoch [6/7], Step [932/1218], Train Loss: 0.0089, Valid Loss: 0.5579\nEpoch [6/7], Step [934/1218], Train Loss: 0.0276, Valid Loss: 0.5585\nEpoch [6/7], Step [936/1218], Train Loss: 0.0506, Valid Loss: 0.5546\nEpoch [6/7], Step [938/1218], Train Loss: 0.0537, Valid Loss: 0.5450\nEpoch [6/7], Step [940/1218], Train Loss: 0.0435, Valid Loss: 0.5401\nEpoch [6/7], Step [942/1218], Train Loss: 0.0217, Valid Loss: 0.5424\nEpoch [6/7], Step [944/1218], Train Loss: 0.0306, Valid Loss: 0.5461\nEpoch [6/7], Step [946/1218], Train Loss: 0.0533, Valid Loss: 0.5500\nEpoch [6/7], Step [948/1218], Train Loss: 0.1149, Valid Loss: 0.5536\nEpoch [6/7], Step [950/1218], Train Loss: 0.0165, Valid Loss: 0.5606\nEpoch [6/7], Step [952/1218], Train Loss: 0.0088, Valid Loss: 0.5678\nEpoch [6/7], Step [954/1218], Train Loss: 0.0134, Valid Loss: 0.5717\nEpoch [6/7], Step [956/1218], Train Loss: 0.0525, Valid Loss: 0.5669\nEpoch [6/7], Step [958/1218], Train Loss: 0.0227, Valid Loss: 0.5633\nEpoch [6/7], Step [960/1218], Train Loss: 0.0534, Valid Loss: 0.5669\nEpoch [6/7], Step [962/1218], Train Loss: 0.0059, Valid Loss: 0.5752\nEpoch [6/7], Step [964/1218], Train Loss: 0.0438, Valid Loss: 0.5840\nEpoch [6/7], Step [966/1218], Train Loss: 0.0183, Valid Loss: 0.5938\nEpoch [6/7], Step [968/1218], Train Loss: 0.0467, Valid Loss: 0.6015\nEpoch [6/7], Step [970/1218], Train Loss: 0.0187, Valid Loss: 0.6064\nEpoch [6/7], Step [972/1218], Train Loss: 0.0450, Valid Loss: 0.6116\nEpoch [6/7], Step [974/1218], Train Loss: 0.0625, Valid Loss: 0.6109\nEpoch [6/7], Step [976/1218], Train Loss: 0.0167, Valid Loss: 0.6049\nEpoch [6/7], Step [978/1218], Train Loss: 0.0669, Valid Loss: 0.5965\nEpoch [6/7], Step [980/1218], Train Loss: 0.0616, Valid Loss: 0.5882\nEpoch [6/7], Step [982/1218], Train Loss: 0.0978, Valid Loss: 0.5745\nEpoch [6/7], Step [984/1218], Train Loss: 0.0701, Valid Loss: 0.5704\nEpoch [6/7], Step [986/1218], Train Loss: 0.0538, Valid Loss: 0.5728\nEpoch [6/7], Step [988/1218], Train Loss: 0.0129, Valid Loss: 0.5755\nEpoch [6/7], Step [990/1218], Train Loss: 0.0247, Valid Loss: 0.5732\nEpoch [6/7], Step [992/1218], Train Loss: 0.0276, Valid Loss: 0.5659\nEpoch [6/7], Step [994/1218], Train Loss: 0.0351, Valid Loss: 0.5581\nEpoch [6/7], Step [996/1218], Train Loss: 0.0792, Valid Loss: 0.5525\nEpoch [6/7], Step [998/1218], Train Loss: 0.0183, Valid Loss: 0.5500\nEpoch [6/7], Step [1000/1218], Train Loss: 0.0359, Valid Loss: 0.5488\nEpoch [6/7], Step [1002/1218], Train Loss: 0.0182, Valid Loss: 0.5488\nEpoch [6/7], Step [1004/1218], Train Loss: 0.0478, Valid Loss: 0.5481\nEpoch [6/7], Step [1006/1218], Train Loss: 0.0723, Valid Loss: 0.5432\nEpoch [6/7], Step [1008/1218], Train Loss: 0.0204, Valid Loss: 0.5421\nEpoch [6/7], Step [1010/1218], Train Loss: 0.0202, Valid Loss: 0.5428\nEpoch [6/7], Step [1012/1218], Train Loss: 0.0541, Valid Loss: 0.5431\nEpoch [6/7], Step [1014/1218], Train Loss: 0.1515, Valid Loss: 0.5420\nEpoch [6/7], Step [1016/1218], Train Loss: 0.0535, Valid Loss: 0.5426\nEpoch [6/7], Step [1018/1218], Train Loss: 0.0666, Valid Loss: 0.5452\nEpoch [6/7], Step [1020/1218], Train Loss: 0.0493, Valid Loss: 0.5536\nEpoch [6/7], Step [1022/1218], Train Loss: 0.0348, Valid Loss: 0.5678\nEpoch [6/7], Step [1024/1218], Train Loss: 0.0492, Valid Loss: 0.5857\nEpoch [6/7], Step [1026/1218], Train Loss: 0.0294, Valid Loss: 0.6103\nEpoch [6/7], Step [1028/1218], Train Loss: 0.0477, Valid Loss: 0.6223\nEpoch [6/7], Step [1030/1218], Train Loss: 0.0469, Valid Loss: 0.6232\nEpoch [6/7], Step [1032/1218], Train Loss: 0.0977, Valid Loss: 0.5939\nEpoch [6/7], Step [1034/1218], Train Loss: 0.0865, Valid Loss: 0.5623\nEpoch [6/7], Step [1036/1218], Train Loss: 0.0780, Valid Loss: 0.5447\nEpoch [6/7], Step [1038/1218], Train Loss: 0.1767, Valid Loss: 0.5461\nEpoch [6/7], Step [1040/1218], Train Loss: 0.0512, Valid Loss: 0.5576\nEpoch [6/7], Step [1042/1218], Train Loss: 0.0943, Valid Loss: 0.5630\nEpoch [6/7], Step [1044/1218], Train Loss: 0.0273, Valid Loss: 0.5685\nEpoch [7/7], Step [1046/1218], Train Loss: 0.0362, Valid Loss: 0.5735\nEpoch [7/7], Step [1048/1218], Train Loss: 0.0457, Valid Loss: 0.5761\nEpoch [7/7], Step [1050/1218], Train Loss: 0.0256, Valid Loss: 0.5734\nEpoch [7/7], Step [1052/1218], Train Loss: 0.0460, Valid Loss: 0.5671\nEpoch [7/7], Step [1054/1218], Train Loss: 0.0223, Valid Loss: 0.5613\nEpoch [7/7], Step [1056/1218], Train Loss: 0.0430, Valid Loss: 0.5566\nEpoch [7/7], Step [1058/1218], Train Loss: 0.0230, Valid Loss: 0.5534\nEpoch [7/7], Step [1060/1218], Train Loss: 0.0293, Valid Loss: 0.5531\nEpoch [7/7], Step [1062/1218], Train Loss: 0.0074, Valid Loss: 0.5538\nEpoch [7/7], Step [1064/1218], Train Loss: 0.0196, Valid Loss: 0.5591\nEpoch [7/7], Step [1066/1218], Train Loss: 0.0251, Valid Loss: 0.5647\nEpoch [7/7], Step [1068/1218], Train Loss: 0.0172, Valid Loss: 0.5711\nEpoch [7/7], Step [1070/1218], Train Loss: 0.0253, Valid Loss: 0.5771\nEpoch [7/7], Step [1072/1218], Train Loss: 0.0066, Valid Loss: 0.5840\nEpoch [7/7], Step [1074/1218], Train Loss: 0.0072, Valid Loss: 0.5902\nEpoch [7/7], Step [1076/1218], Train Loss: 0.0202, Valid Loss: 0.5964\nEpoch [7/7], Step [1078/1218], Train Loss: 0.0068, Valid Loss: 0.6017\nEpoch [7/7], Step [1080/1218], Train Loss: 0.0167, Valid Loss: 0.6061\nEpoch [7/7], Step [1082/1218], Train Loss: 0.0198, Valid Loss: 0.6078\nEpoch [7/7], Step [1084/1218], Train Loss: 0.0087, Valid Loss: 0.6093\nEpoch [7/7], Step [1086/1218], Train Loss: 0.0163, Valid Loss: 0.6133\nEpoch [7/7], Step [1088/1218], Train Loss: 0.0556, Valid Loss: 0.6088\nEpoch [7/7], Step [1090/1218], Train Loss: 0.0121, Valid Loss: 0.6475\nEpoch [7/7], Step [1092/1218], Train Loss: 0.0572, Valid Loss: 0.7086\nEpoch [7/7], Step [1094/1218], Train Loss: 0.0490, Valid Loss: 0.6962\nEpoch [7/7], Step [1096/1218], Train Loss: 0.0262, Valid Loss: 0.6471\nEpoch [7/7], Step [1098/1218], Train Loss: 0.0220, Valid Loss: 0.6202\nEpoch [7/7], Step [1100/1218], Train Loss: 0.0266, Valid Loss: 0.6124\nEpoch [7/7], Step [1102/1218], Train Loss: 0.0461, Valid Loss: 0.6179\nEpoch [7/7], Step [1104/1218], Train Loss: 0.0883, Valid Loss: 0.6260\nEpoch [7/7], Step [1106/1218], Train Loss: 0.0876, Valid Loss: 0.6275\nEpoch [7/7], Step [1108/1218], Train Loss: 0.0135, Valid Loss: 0.6208\nEpoch [7/7], Step [1110/1218], Train Loss: 0.0661, Valid Loss: 0.6166\nEpoch [7/7], Step [1112/1218], Train Loss: 0.0430, Valid Loss: 0.6157\nEpoch [7/7], Step [1114/1218], Train Loss: 0.0328, Valid Loss: 0.6222\nEpoch [7/7], Step [1116/1218], Train Loss: 0.1019, Valid Loss: 0.6246\nEpoch [7/7], Step [1118/1218], Train Loss: 0.0467, Valid Loss: 0.6249\nEpoch [7/7], Step [1120/1218], Train Loss: 0.0265, Valid Loss: 0.6258\nEpoch [7/7], Step [1122/1218], Train Loss: 0.0128, Valid Loss: 0.6261\nEpoch [7/7], Step [1124/1218], Train Loss: 0.0338, Valid Loss: 0.6192\nEpoch [7/7], Step [1126/1218], Train Loss: 0.0304, Valid Loss: 0.6144\nEpoch [7/7], Step [1128/1218], Train Loss: 0.0528, Valid Loss: 0.6071\nEpoch [7/7], Step [1130/1218], Train Loss: 0.0158, Valid Loss: 0.6231\nEpoch [7/7], Step [1132/1218], Train Loss: 0.0203, Valid Loss: 0.6248\nEpoch [7/7], Step [1134/1218], Train Loss: 0.0356, Valid Loss: 0.6272\nEpoch [7/7], Step [1136/1218], Train Loss: 0.0520, Valid Loss: 0.6261\nEpoch [7/7], Step [1138/1218], Train Loss: 0.0386, Valid Loss: 0.6081\nEpoch [7/7], Step [1140/1218], Train Loss: 0.0247, Valid Loss: 0.6082\nEpoch [7/7], Step [1142/1218], Train Loss: 0.0204, Valid Loss: 0.6096\nEpoch [7/7], Step [1144/1218], Train Loss: 0.0221, Valid Loss: 0.6111\nEpoch [7/7], Step [1146/1218], Train Loss: 0.0167, Valid Loss: 0.6136\nEpoch [7/7], Step [1148/1218], Train Loss: 0.0290, Valid Loss: 0.6185\nEpoch [7/7], Step [1150/1218], Train Loss: 0.0270, Valid Loss: 0.6241\nEpoch [7/7], Step [1152/1218], Train Loss: 0.0192, Valid Loss: 0.6279\nEpoch [7/7], Step [1154/1218], Train Loss: 0.0133, Valid Loss: 0.6295\nEpoch [7/7], Step [1156/1218], Train Loss: 0.0281, Valid Loss: 0.6326\nEpoch [7/7], Step [1158/1218], Train Loss: 0.0186, Valid Loss: 0.6353\nEpoch [7/7], Step [1160/1218], Train Loss: 0.0216, Valid Loss: 0.6365\nEpoch [7/7], Step [1162/1218], Train Loss: 0.0535, Valid Loss: 0.6559\nEpoch [7/7], Step [1164/1218], Train Loss: 0.0481, Valid Loss: 0.6634\nEpoch [7/7], Step [1166/1218], Train Loss: 0.0169, Valid Loss: 0.6713\nEpoch [7/7], Step [1168/1218], Train Loss: 0.0530, Valid Loss: 0.6902\nEpoch [7/7], Step [1170/1218], Train Loss: 0.0509, Valid Loss: 0.6897\nEpoch [7/7], Step [1172/1218], Train Loss: 0.0922, Valid Loss: 0.6908\nEpoch [7/7], Step [1174/1218], Train Loss: 0.0151, Valid Loss: 0.6928\nEpoch [7/7], Step [1176/1218], Train Loss: 0.0267, Valid Loss: 0.6993\nEpoch [7/7], Step [1178/1218], Train Loss: 0.0721, Valid Loss: 0.7038\nEpoch [7/7], Step [1180/1218], Train Loss: 0.0581, Valid Loss: 0.6859\nEpoch [7/7], Step [1182/1218], Train Loss: 0.0003, Valid Loss: 0.6912\nEpoch [7/7], Step [1184/1218], Train Loss: 0.0336, Valid Loss: 0.6943\nEpoch [7/7], Step [1186/1218], Train Loss: 0.0239, Valid Loss: 0.6960\nEpoch [7/7], Step [1188/1218], Train Loss: 0.0451, Valid Loss: 0.6716\nEpoch [7/7], Step [1190/1218], Train Loss: 0.0107, Valid Loss: 0.6676\nEpoch [7/7], Step [1192/1218], Train Loss: 0.0193, Valid Loss: 0.6668\nEpoch [7/7], Step [1194/1218], Train Loss: 0.0407, Valid Loss: 0.6679\nEpoch [7/7], Step [1196/1218], Train Loss: 0.0474, Valid Loss: 0.6761\nEpoch [7/7], Step [1198/1218], Train Loss: 0.0987, Valid Loss: 0.6726\nEpoch [7/7], Step [1200/1218], Train Loss: 0.0668, Valid Loss: 0.6773\nEpoch [7/7], Step [1202/1218], Train Loss: 0.0368, Valid Loss: 0.6878\nEpoch [7/7], Step [1204/1218], Train Loss: 0.0303, Valid Loss: 0.6971\nEpoch [7/7], Step [1206/1218], Train Loss: 0.0786, Valid Loss: 0.7210\nEpoch [7/7], Step [1208/1218], Train Loss: 0.0218, Valid Loss: 0.7281\nEpoch [7/7], Step [1210/1218], Train Loss: 0.0655, Valid Loss: 0.7216\nEpoch [7/7], Step [1212/1218], Train Loss: 0.0881, Valid Loss: 0.7147\nEpoch [7/7], Step [1214/1218], Train Loss: 0.0215, Valid Loss: 0.7081\nEpoch [7/7], Step [1216/1218], Train Loss: 0.0063, Valid Loss: 0.7056\nEpoch [7/7], Step [1218/1218], Train Loss: 0.0611, Valid Loss: 0.7006\nModel saved to: ./saved/metrics.pt\nFinished Training!\n"
],
[
"import matplotlib.pyplot as plt\ntrain_loss_list, valid_loss_list, global_steps_list = load_metrics( './saved/metrics.pt')\nplt.plot(global_steps_list, train_loss_list, label='Train')\nplt.plot(global_steps_list, valid_loss_list, label='Valid')\nplt.xlabel('Global Steps')\nplt.ylabel('Loss')\nplt.legend()\nplt.show() ",
"Model loaded from: ./saved/metrics.pt\n"
],
[
"# Evaluation Function\n\ndef evaluate(model, test_loader, version='titletext', threshold=0.5):\n y_pred = []\n y_true = []\n\n model.eval()\n with torch.no_grad():\n for (labels, (title, title_len), (text, text_len), (titletext, titletext_len)), _ in test_loader: \n labels = labels.to(device)\n titletext = titletext.to(device)\n \n titletext_len = titletext_len.to(device)\n output = model(titletext, titletext_len)\n\n output = (output > threshold).int()\n y_pred.extend(output.tolist())\n y_true.extend(labels.tolist())\n \n print('Classification Report:')\n print(classification_report(y_true, y_pred, labels=[1,0], digits=4))\n \n cm = confusion_matrix(y_true, y_pred, labels=[1,0])\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax, cmap='Blues', fmt=\"d\")\n\n ax.set_title('Confusion Matrix')\n\n ax.set_xlabel('Predicted Labels')\n ax.set_ylabel('True Labels')\n\n ax.xaxis.set_ticklabels(['FAKE', 'REAL'])\n ax.yaxis.set_ticklabels(['FAKE', 'REAL'])\n \n \nbest_model = FakeNewsNet(hidden_size=300,num_layers=1,bi_lstm=True).to(device)\nprint(best_model)\noptimizer = optim.Adam(best_model.parameters(), lr=0.01)\n\nload_checkpoint('./saved/model.pt', best_model, optimizer)\nevaluate(best_model, test_iter)",
"FakeNewsNet(\n (embedding): Embedding(39578, 256)\n (LSTM): LSTM(256, 300, batch_first=True, bidirectional=True)\n (drop): Dropout(p=0.5, inplace=False)\n (out): Linear(in_features=600, out_features=1, bias=True)\n)\nModel loaded from : ./saved/model.pt\nClassification Report:\n precision recall f1-score support\n\n 1 0.8640 0.8734 0.8687 3010\n 0 0.8981 0.8902 0.8942 3772\n\n accuracy 0.8828 6782\n macro avg 0.8810 0.8818 0.8814 6782\nweighted avg 0.8829 0.8828 0.8828 6782\n\n"
],
[
"def predict(model, sentence):\n from sacremoses import MosesTokenizer\n mt = MosesTokenizer(lang='en')\n tokenized = [tok for tok in mt.tokenize(sentence)] #tokenize the sentence \n indexed = [text_field.vocab.stoi[t] for t in tokenized] #convert to integer sequence\n length = [len(indexed)] #compute no. of words\n tensor = torch.LongTensor(indexed).to(device) #convert to tensor\n tensor = tensor.unsqueeze(1).T #reshape in form of batch,no. of words\n length_tensor = torch.LongTensor(length) #convert to tensor\n prediction = model(tensor, length_tensor) #prediction \n return prediction.item() ",
"_____no_output_____"
],
[
"best_model = FakeNewsNet().to(device)\nprint(predict(best_model,\"Russian warships launched a massive missile attack on the terrorists near Aleppo\"))\n",
"0.46486130356788635\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca5cc5c1936865e6ca3c64c892434d4cf19eaa8 | 76,346 | ipynb | Jupyter Notebook | Module2/Module2 - Lab5.ipynb | ashishrajsrivastava/DAT210x | 3e90fbe1584ab4584e0db535a4984d99d5e8465b | [
"MIT"
] | null | null | null | Module2/Module2 - Lab5.ipynb | ashishrajsrivastava/DAT210x | 3e90fbe1584ab4584e0db535a4984d99d5e8465b | [
"MIT"
] | null | null | null | Module2/Module2 - Lab5.ipynb | ashishrajsrivastava/DAT210x | 3e90fbe1584ab4584e0db535a4984d99d5e8465b | [
"MIT"
] | null | null | null | 34.436626 | 311 | 0.249234 | [
[
[
"# DAT210x - Programming with Python for DS",
"_____no_output_____"
],
[
"## Module2 - Lab5",
"_____no_output_____"
],
[
"Import and alias Pandas:",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"As per usual, load up the specified dataset, setting appropriate header labels.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('C:\\\\Users\\\\ashish.r\\\\Documents\\\\GitHub\\\\DAT210x\\\\Module2\\\\Datasets\\\\census.data',names=['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification'],na_values='?')\ndf",
"_____no_output_____"
]
],
[
[
"Excellent.\n\nNow, use basic pandas commands to look through the dataset. Get a feel for it before proceeding!\n\nDo the data-types of each column reflect the values you see when you look through the data using a text editor / spread sheet program? If you see `object` where you expect to see `int32` or `float64`, that is a good indicator that there might be a string or missing value or erroneous value in the column.",
"_____no_output_____"
]
],
[
[
"df.dtypes",
"_____no_output_____"
]
],
[
[
"Try use `your_data_frame['your_column'].unique()` or equally, `your_data_frame.your_column.unique()` to see the unique values of each column and identify the rogue values.\n\nIf you find any value that should be properly encoded to NaNs, you can convert them either using the `na_values` parameter when loading the dataframe. Or alternatively, use one of the other methods discussed in the reading.",
"_____no_output_____"
]
],
[
[
"df.classification.unique()",
"_____no_output_____"
]
],
[
[
"Look through your data and identify any potential categorical features. Ensure you properly encode any ordinal and nominal types using the methods discussed in the chapter.\n\nBe careful! Some features can be represented as either categorical or continuous (numerical). If you ever get confused, think to yourself what makes more sense generally---to represent such features with a continuous numeric type... or a series of categories?",
"_____no_output_____"
]
],
[
[
"df.education = df.education.astype(\"category\",\n ordered=True,\n categories=['Preschool','1st-4th','5th-6th','7th-8th','9th','10th','HS-grad','11th','12th','Some-college','Bachelors','Masters','Doctorate']\n ).cat.codes\n\ndf.classification = df.classification.astype(\"category\",\n ordered=True,\n categories=['<=50K','>50K']).cat.codes\ndf.race = df.race.astype(\"category\").cat.codes\ndf.sex = df.sex.astype(\"category\").cat.codes\n",
"_____no_output_____"
]
],
[
[
"Lastly, print out your dataframe!",
"_____no_output_____"
]
],
[
[
"df = pd.get_dummies(df,columns=['race','sex'])\ndf",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca5dbb63d4ccb7e6b6a681308c85288bbae8c93 | 7,809 | ipynb | Jupyter Notebook | Chapter05/Exercise82/Exercise82.ipynb | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | Chapter05/Exercise82/Exercise82.ipynb | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | Chapter05/Exercise82/Exercise82.ipynb | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | 26.381757 | 1,318 | 0.527084 | [
[
[
"import datetime",
"_____no_output_____"
],
[
"class MyDate(datetime.date):\n def add_days(self, n):\n return self + datetime.timedelta(n)",
"_____no_output_____"
],
[
"d = MyDate(2019, 12, 1)\nprint(d.add_days(40))\nprint(d.add_days(400))",
"2020-01-10\n2021-01-04\n"
],
[
"class MyInt(int):\n def is_divisible_by(self, x):\n return self % x == 0",
"_____no_output_____"
],
[
"a = MyInt(8)",
"_____no_output_____"
],
[
"a.is_divisible_by(8)",
"_____no_output_____"
],
[
"class Person():\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n @property\n def full_name(self):\n return '%s %s' % (self.first_name, self.last_name)\n @full_name.setter\n def full_name(self, name):\n first, last = name.split(' ')\n self.first_name = first\n self.last_name = last\n def speak(self):\n print(\"Hello I am {}\".format(self.first_name))\n print(\"How are you?\")",
"_____no_output_____"
],
[
"p = Person('john','smith')",
"_____no_output_____"
],
[
"p.full_name",
"_____no_output_____"
],
[
"p.full_name = 'john joey smith'",
"_____no_output_____"
],
[
"class BetterPerson(Person):\n @property\n def full_name(self):\n return '%s %s' % (self.first_name, self.last_name)\n @full_name.setter\n def full_name(self, name):\n name_split = name.split(' ')\n self.first_name = name_split[0]\n self.last_name = ' '.join(name_split[1:])\n ",
"_____no_output_____"
],
[
"x = BetterPerson('john','smith')",
"_____no_output_____"
],
[
"x.full_name",
"_____no_output_____"
],
[
"x.full_name = 'john joey smith'",
"_____no_output_____"
],
[
"x.full_name",
"_____no_output_____"
],
[
"class TalkativePerson(Person):\n def __init__(self,first_name, last_name,very_talkative=5):\n super(TalkativePerson,self).__init__(first_name,last_name)\n self.very_talkative = very_talkative\n \n def speak(self):\n super().speak()\n for i in range(self.very_talkative):\n print('It is a pleasure to meet you!')\njohn = TalkativePerson('John', 'Tomic')\njohn.speak()",
"Hello I am John\nHow are you?\nIt is a pleasure to meet you!\nIt is a pleasure to meet you!\nIt is a pleasure to meet you!\nIt is a pleasure to meet you!\nIt is a pleasure to meet you!\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca5f218d7186c0244678bb4c504be053e5845f7 | 8,467 | ipynb | Jupyter Notebook | rq_vae_transformer_demo.ipynb | kaz12tech/ai_demos | 6925d1885815a61d6f17eb6af53172c7ed1d99b1 | [
"MIT"
] | 2 | 2022-02-15T00:54:54.000Z | 2022-03-21T14:12:58.000Z | rq_vae_transformer_demo.ipynb | kaz12tech/ai_demos | 6925d1885815a61d6f17eb6af53172c7ed1d99b1 | [
"MIT"
] | null | null | null | rq_vae_transformer_demo.ipynb | kaz12tech/ai_demos | 6925d1885815a61d6f17eb6af53172c7ed1d99b1 | [
"MIT"
] | 1 | 2022-02-15T00:55:03.000Z | 2022-02-15T00:55:03.000Z | 26.709779 | 239 | 0.454234 | [
[
[
"่ซๆ \nhttps://arxiv.org/abs/2203.01941<br>\n<br> \nGitHub \nhttps://github.com/kakaobrain/rq-vae-transformer<br>\n<br>\n<a href=\"https://colab.research.google.com/github/kaz12tech/ai_demos/blob/master/rq_vae_transformer_demo.ipynb\" target=\"_blank\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## ็ฐๅขใปใใใขใใ",
"_____no_output_____"
],
[
"## GPU็ขบ่ช",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"_____no_output_____"
]
],
[
[
"## GitHubใใใณใผใๅๅพ",
"_____no_output_____"
]
],
[
[
"%cd /content\n\n!git clone https://github.com/kakaobrain/rq-vae-transformer.git",
"_____no_output_____"
]
],
[
[
"## ใฉใคใใฉใชใฎใคใณในใใผใซ",
"_____no_output_____"
]
],
[
[
"%cd /content/rq-vae-transformer\n\n# ใใ RESTART RUNTIMEใ่กจ็คบใใใใใใฉใณใฟใคใ ใโใใฉใณใฟใคใ ใๅ่ตทๅใ\n!pip install -r requirements.txt",
"_____no_output_____"
]
],
[
[
"## ใฉใคใใฉใชใฎใคใณใใผใ",
"_____no_output_____"
]
],
[
[
"%cd /content/rq-vae-transformer\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom PIL import Image\nimport yaml\nimport torch\nimport torchvision\nimport clip\nimport torch.nn.functional as F\n\nfrom notebooks.notebook_utils import TextEncoder, load_model, get_generated_images_by_texts",
"_____no_output_____"
]
],
[
[
"## ๅญฆ็ฟๆธใฟใขใใซใฎใปใใใขใใ",
"_____no_output_____"
]
],
[
[
"%cd /content/rq-vae-transformer\n!mkdir pretrained\n%cd pretrained\n\n!wget https://arena.kakaocdn.net/brainrepo/models/RQVAE/dcd95e8f08408e113aab6451fae895f5/cc3m.tar.gz\n!tar -xvf cc3m.tar.gz",
"_____no_output_____"
]
],
[
[
"## ใขใใซใฎใญใผใ",
"_____no_output_____"
]
],
[
[
"vqvae_path = '/content/rq-vae-transformer/pretrained/cc3m/stage1/model.pt'\nmodel_path = '/content/rq-vae-transformer/pretrained/cc3m/stage2/model.pt'\n\n# load stage 1 model: RQ-VAE\nmodel_vqvae, _ = load_model(vqvae_path)\n\n# load stage 2 model: RQ-Transformer\nmodel_ar, config = load_model(model_path, ema=False)\n\n# GPUใซใปใใ\nmodel_ar = model_ar.cuda().eval()\nmodel_vqvae = model_vqvae.cuda().eval()\n\n# CLIPใขใใซใฎใใฆใณใญใผใ\nmodel_clip, preprocess_clip = clip.load(\"ViT-B/32\", device='cpu')\nmodel_clip = model_clip.cuda().eval()\n\n# prepare text encoder to tokenize natual languages\ntext_encoder = TextEncoder(tokenizer_name=config.dataset.txt_tok_name, \n context_length=config.dataset.context_length)",
"_____no_output_____"
]
],
[
[
"# Text to Image",
"_____no_output_____"
]
],
[
[
"#@markdown ๅ
ฅๅใใญในใใ่จญๅฎใใฆใใ ใใใ\ntext_prompts = \"'a photo of \\\"Cherry blossoms in the snow\\\"'\" #@param {type:\"string\"}\n\nnum_samples = 16\ntemperature= 1.0\ntop_k=1024\ntop_p=0.95",
"_____no_output_____"
],
[
"pixels = get_generated_images_by_texts(model_ar,\n model_vqvae,\n text_encoder,\n model_clip,\n preprocess_clip,\n text_prompts,\n num_samples,\n temperature,\n top_k,\n top_p,\n )",
"_____no_output_____"
]
],
[
[
"## ็ตๆใฎ่กจ็คบ",
"_____no_output_____"
]
],
[
[
"num_visualize_samples = 16\nimages = [pixel.cpu().numpy() * 0.5 + 0.5 for pixel in pixels]\nimages = torch.from_numpy(np.array(images[:num_visualize_samples]))\nimages = torch.clamp(images, 0, 1)\ngrid = torchvision.utils.make_grid(images, nrow=4)\n\nimg = Image.fromarray(np.uint8(grid.numpy().transpose([1,2,0])*255))\ndisplay(img)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca5fec5f5017c75c172eac09d379c2ffb1d4edd | 708,274 | ipynb | Jupyter Notebook | inverse_problem/pecann_inverse_hydraulic_conductivity.ipynb | shamsbasir/PECANN | 4b7f3ca376282d4d7c43abdbccfb5d2680f0c837 | [
"MIT"
] | 2 | 2022-03-20T00:21:10.000Z | 2022-03-28T09:28:54.000Z | inverse_problem/pecann_inverse_hydraulic_conductivity.ipynb | shamsbasir/PECANN | 4b7f3ca376282d4d7c43abdbccfb5d2680f0c837 | [
"MIT"
] | null | null | null | inverse_problem/pecann_inverse_hydraulic_conductivity.ipynb | shamsbasir/PECANN | 4b7f3ca376282d4d7c43abdbccfb5d2680f0c837 | [
"MIT"
] | 4 | 2021-09-30T03:55:22.000Z | 2022-01-05T11:52:36.000Z | 487.456297 | 61,372 | 0.9352 | [
[
[
"# Author : Shamsulhaq Basir\n# email : [email protected]",
"_____no_output_____"
],
[
"import time\nimport os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.autograd import grad\n\nfrom torch.optim import LBFGS, Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.nn.parameter import Parameter\nfrom tqdm import tqdm\nimport scipy.io\n\n\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\nimport matplotlib.gridspec as gridspec\n\ntorch.set_default_dtype(torch.float64)\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\ndtype = torch.float64",
"_____no_output_____"
],
[
"data_hi = scipy.io.loadmat('./High_fidelity/HF_data.mat')\ndata_lo = scipy.io.loadmat('./Low_fidelity/LF_data.mat')\n\nx_lo = data_lo['x'].T\nh_lo = data_lo['h'].T\nK_lo = data_lo['K'].T\n\n\nx_hi = data_hi['x'].T\nh_hi = data_hi['h'].T\nK_hi = data_hi['K'].T\n\n\n# chosen high fidelity data \nvx_hi = torch.as_tensor(np.array([x_hi[12],x_hi[23]]))\nvh_hi = torch.as_tensor(np.array([h_hi[12],h_hi[23]]))\nvK_hi = torch.as_tensor(np.array([K_hi[12],K_hi[23]]))\n\n# chosen low fidelity data\nvx_lo = torch.as_tensor(np.array(x_lo))\nvh_lo = torch.as_tensor(np.array(h_lo))",
"_____no_output_____"
],
[
"def fetch_interior_data(domain,N_data):\n x_min = domain[0]\n x_max = domain[1]\n soboleng = torch.quasirandom.SobolEngine(dimension=1,scramble=True)\n x = soboleng.draw(N_data,dtype=torch.float64)*(x_max - x_min) + x_min\n x = x.requires_grad_(True)\n return x\n\n\ndef fetch_boundary_data(domain):\n x = torch.tensor([[0.0],[200.0]])\n return x",
"_____no_output_____"
],
[
"class ConventBlock(nn.Module):\n def __init__(self,in_N,out_N):\n super(ConventBlock, self).__init__()\n self.Ls = None\n self.net =nn.Sequential(nn.Linear(in_N,out_N),nn.Tanh()) \n\n def forward(self, x):\n out = self.net(x)\n return out \n \n\nclass ModifiedResBlock(nn.Module):\n def __init__(self,in_N,out_N):\n super(ModifiedResBlock, self).__init__()\n self.Ls = None\n if in_N != out_N:\n self.Ls = nn.Linear(in_N,out_N)\n self.net = nn.Sequential(nn.Linear(in_N,out_N),nn.Tanh())\n def forward(self, x):\n out = self.net(x)\n if self.Ls : \n x = self.Ls(x)\n out = out + x\n return out ",
"_____no_output_____"
],
[
"class Network(torch.nn.Module):\n def __init__(self,in_N,m,H_Layer,out_N,**kwargs):\n super(Network,self).__init__()\n self.a = nn.Parameter(torch.tensor(0.057-0.015) * torch.rand(1) + 0.015,requires_grad=True)\n self.m = nn.Parameter(torch.tensor(0.40 -0.31 ) * torch.rand(1) + 0.31 ,requires_grad=True)\n self.Ks = torch.tensor(1.04)\n \n self.mu = kwargs[\"mean\"]\n self.std = kwargs[\"stdev\"]\n \n layers = []\n layers.append(ConventBlock(in_N,m))\n for i in range(0,H_Layer-1):\n layers.append(ConventBlock(m,m))\n layers.append(nn.Linear(m,out_N))\n # total layers\n self.net = nn.Sequential(*layers)\n \n\n def forward(self,x):\n # normalize the input\n data = (x - self.mu)/self.std\n out = self.net(data)\n return out\n \n def Se(self,h):\n n = 1/(1-self.m)\n Se = 1/(1 + torch.abs(self.a*h).pow(n)).pow(self.m)\n return Se \n \n \n def K(self,Se):\n K = self.Ks*Se.pow(0.5)*(1 - (1 - Se.pow(1.0/self.m)).pow(self.m)).pow(2)\n return K\n \n \ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_normal_(m.weight.data)\n nn.init.zeros_(m.bias)",
"_____no_output_____"
],
[
"def physics_loss(model,x,qe):\n h = model(x)\n Se = model.Se(h)\n K = model.K(Se)\n h_x = grad(outputs=h,inputs=x,grad_outputs = torch.ones_like(h),create_graph=True,retain_graph=True)[0]\n q = -K*h_x\n loss = (q - qe).pow(2)\n return loss",
"_____no_output_____"
],
[
"def boundary_loss(model,x,ue):\n u = model(x)\n loss = (u - ue).pow(2)\n return loss",
"_____no_output_____"
],
[
"kwargs ={\"mean\":100.0, \"stdev\":57.743} \ndomain = [200.0 , 0.0]\nmodel_mf = Network(1,20,2,1,**kwargs)\nmodel_hi = Network(1,20,2,1,**kwargs)\nprint(model_mf)",
"Network(\n (net): Sequential(\n (0): ConventBlock(\n (net): Sequential(\n (0): Linear(in_features=1, out_features=20, bias=True)\n (1): Tanh()\n )\n )\n (1): ConventBlock(\n (net): Sequential(\n (0): Linear(in_features=20, out_features=20, bias=True)\n (1): Tanh()\n )\n )\n (2): Linear(in_features=20, out_features=1, bias=True)\n )\n)\n100.0\n"
],
[
"# exact boundary conditions \nhe_b = torch.tensor([[-3.0],[-10.0]])\n\n# exact flux \nqe = torch.tensor(0.0122)",
"_____no_output_____"
],
[
"# maximum penalty value for safeguarding\nmu_inf = torch.tensor(1.0e4)\n\n# penalty multiplication factor\neta = torch.tensor(0.0)\n\nepsilon = torch.tensor(1e-8)\n\n# generate boundary conditions \nx_b = fetch_boundary_data(domain)\n\n# number of epochs\nepochs = 2000\n\n# print to dipslay epoch\ndisp = 2000",
"_____no_output_____"
]
],
[
[
"#### ----------Training High Fidelity Model ----------",
"_____no_output_____"
]
],
[
[
"# start 10 trials \n\n# vector of learned parameters from high fidelity model\na_hi = []\nm_hi = []\n\nfor trial in range(1,11):\n print(\"*\"*20 + f' run({trial}) '+\"*\"*20)\n model_hi.apply(init_weights)\n optimizer = torch.optim.Adam(model_hi.parameters(), lr = 1e-2) \n # initialize penalty parameter\n mu = torch.tensor(1.0)\n # lagrange multiplier for boundary conditions \n Lambda_bc = torch.zeros_like(x_b)\n # lagrange multiplier for high fidelity data \n Lambda_hi = torch.zeros_like(vx_hi)\n \n # starting to train high fidelity model \n for epoch in tqdm(range(epochs)):\n optimizer.zero_grad()\n \n # generate domain collocation points \n x = fetch_interior_data(domain,N_data=400)\n \n # approximate physics loss \n pde_loss = physics_loss(model_hi,x,qe)\n \n # approximate boundary error square\n bc_ls = boundary_loss(model_hi,x_b,he_b)\n \n # approximate total boundary loss \n bc_loss = (Lambda_bc*bc_ls).sum()\n\n # approximate high fidelity data loss \n hi_ls = boundary_loss(model_hi,vx_hi,vh_hi)\n hi_loss = (Lambda_hi*hi_ls).sum()\n \n # approximate penalty term \n \n penalty = bc_ls.pow(2).sum() + hi_ls.pow(2).sum()\n \n # total loss \n loss = pde_loss.sum() + bc_loss + hi_loss + 0.5 * mu * penalty\n \n # calculate the gradients\n loss.backward()\n \n # update the parameters\n optimizer.step()\n \n # reduce the learning rate\n #scheduler.step(loss.item())\n \n # update the multipliers \n \n with torch.no_grad():\n if (torch.sqrt(penalty) >= 0.25*eta) and (torch.sqrt(penalty) > epsilon):\n # update penalty paramter\n mu = min(2.0*mu,mu_inf)\n \n # update boundary lagrange multipliers\n Lambda_bc += mu * bc_ls \n # update high fidelity lagrange multipliers\n Lambda_hi += mu * hi_ls \n eta = torch.sqrt(penalty)\n if (epoch + 1% disp == 0 ):\n print(f\" Epoch : {epoch}, Avg physics loss:{pde_loss.mean():2.3e}\")\n # finished current trial \n # save the learned coefficients \n a_hi.append(np.abs(model_hi.a.detach().item()))\n m_hi.append(model_hi.m.detach().item())\n \n # display the learned parameters from the current trial \n print(f\"a : {np.abs(model_hi.a.detach().item()):2.5f}, m : {model_hi.m.detach().item():2.5f}\")\n \n # save the current trained model \n PATH = os.getcwd()+f\"/trained_models/PECANN_hi_{trial}.pt\"\n torch.save(model_hi.state_dict(), PATH)",
"******************** run(1) ********************\n"
]
],
[
[
"#### ----------- reporting the statistics of high fidelity models after 10 trials ---------------",
"_____no_output_____"
]
],
[
[
"print(\"-*-\"*20)\nstd_a_hi,mean_a_hi= torch.std_mean(torch.abs(torch.tensor(a_hi)))\nprint(f\"mean a: {mean_a_hi:2.4f}\")\nprint(f\"std a: {std_a_hi:2.3e}\")\n\nprint(\"-*-\"*20)\nstd_m_hi,mean_m_hi= torch.std_mean(torch.tensor(m_hi))\nprint(f\"mean m: {mean_m_hi:2.4f}\")\nprint(f\"std m: {std_m_hi:2.3e}\")",
"-*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*-\nmean a: 0.0351\nstd a: 7.181e-04\n-*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*-\nmean m: 0.3536\nstd m: 2.783e-03\n"
],
[
"alpha_rel_error = 100 * abs(mean_a_hi.item() - 0.036)/0.036 \nm_rel_error = 100 * abs(mean_m_hi.item() - 0.36)/0.36 \n\nprint(f\"Relative error in alpha :{alpha_rel_error: 2.3f}%\")\nprint(f\"Relative error in m : {m_rel_error :2.3f}%\")",
"Relative error in alpha : 2.579%\nRelative error in m : 1.784%\n"
]
],
[
[
"#### ----------Training Multi-Fidelity Model ----------",
"_____no_output_____"
]
],
[
[
"# start 10 trials \n\n# vector of learned parameters from high fidelity model\na_mf = []\nm_mf = []\n\nfor trial in range(1,11):\n print(\"*\"*20 + f' run({trial}) '+\"*\"*20)\n model_mf.apply(init_weights)\n optimizer = torch.optim.Adam(model_mf.parameters(), lr = 1e-2) \n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,patience=100,factor=0.95)\n \n # initialize penalty parameter\n mu = torch.tensor(1.0)\n \n # lagrange multiplier for boundary conditions \n Lambda_bc = torch.zeros_like(x_b)\n # lagrange multiplier for high fidelity data \n Lambda_hi = torch.zeros_like(vx_hi)\n \n # starting to train high fidelity model \n for epoch in tqdm(range(epochs)):\n optimizer.zero_grad()\n \n # generate domain collocation points \n x = fetch_interior_data(domain,N_data=400)\n \n # approximate physics loss \n pde_loss = physics_loss(model_mf,x,qe)\n \n # approximate boundary error square\n bc_ls = boundary_loss(model_mf,x_b,he_b)\n \n # approximate total boundary loss \n bc_loss = (Lambda_bc*bc_ls).sum()\n\n # approximate high fidelity data loss \n hi_ls = boundary_loss(model_mf,vx_hi,vh_hi)\n hi_loss = (Lambda_hi*hi_ls).sum()\n \n # approximate low fidelity data loss \n lo_loss = boundary_loss(model_mf,vx_lo,vh_lo)\n \n # approximate penalty term \n penalty = bc_ls.pow(2).sum() + hi_ls.pow(2).sum()\n \n # total loss \n loss = pde_loss.sum() + lo_loss.sum() + bc_loss + hi_loss + 0.5 * mu * penalty\n \n # calculate the gradients\n loss.backward()\n \n # update the parameters\n optimizer.step()\n \n # reduce the learning rate\n #scheduler.step(loss.item())\n \n # update the multipliers \n \n with torch.no_grad():\n if (torch.sqrt(penalty) >= 0.25*eta) and (torch.sqrt(penalty) > epsilon):\n # update penalty paramter\n mu = min(2.0*mu,mu_inf)\n \n # update boundary lagrange multipliers\n Lambda_bc += mu * bc_ls \n # update high fidelity lagrange multipliers\n Lambda_hi += mu * hi_ls \n eta = torch.sqrt(penalty)\n if (epoch + 1% disp == 0 ):\n print(f\" Epoch : {epoch}, Avg physics loss:{pde_loss.mean():2.3e}\")\n \n # finished current trial \n # save the learned coefficients \n a_mf.append(np.abs(model_mf.a.detach().item()))\n m_mf.append(model_mf.m.detach().item())\n \n # display the learned parameters from the current trial \n print(f\"a : {np.abs(model_mf.a.detach().item()):2.5f}, m : {model_mf.m.detach().item():2.5f}\")\n \n # save the current trained model \n PATH = os.getcwd()+f\"/trained_models/PECANN_mf_{trial}.pt\"\n torch.save(model_mf.state_dict(), PATH)",
"******************** run(1) ********************\n"
],
[
"print(\"-*-\"*20)\nstd_a_mf,mean_a_mf= torch.std_mean(torch.abs(torch.tensor(a_mf)))\nprint(f\"mean a: {mean_a_mf:2.4f}\")\nprint(f\"std a: {std_a_mf:2.3e}\")\n\nprint(\"-*-\"*20)\nstd_m_mf,mean_m_mf= torch.std_mean(torch.tensor(m_mf))\nprint(f\"mean m: {mean_m_mf:2.4f}\")\nprint(f\"std m: {std_m_mf:2.3e}\")",
"-*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*-\nmean a: 0.0359\nstd a: 7.511e-04\n-*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*-\nmean m: 0.3569\nstd m: 2.744e-03\n"
],
[
"alpha_rel_error = 100 * abs(mean_a_mf.item() - 0.036)/0.036 \nm_rel_error = 100 * abs(mean_m_mf.item() - 0.36)/0.36 \n\nprint(f\"Relative error in alpha :{alpha_rel_error: 2.3f}%\")\nprint(f\"Relative error in m : {m_rel_error :2.3f}%\")",
"Relative error in alpha : 0.303%\nRelative error in m : 0.858%\n"
]
],
[
[
"### ------- plots for paper ------- ",
"_____no_output_____"
]
],
[
[
"# https://joseph-long.com/writing/colorbars/\ndef colorbar(mappable,min_val,max_val):\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n import matplotlib.pyplot as plt\n last_axes = plt.gca()\n ax = mappable.axes\n fig = ax.figure\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n ticks = np.linspace(min_val, max_val, 4, endpoint=True)\n cbar = fig.colorbar(mappable, cax=cax,ticks=ticks)\n cbar.formatter.set_powerlimits((0, 0))\n plt.sca(last_axes)\n return cbar\n\nparams = {\n 'text.latex.preamble': '\\\\usepackage{gensymb}',\n 'image.origin': 'lower',\n 'image.interpolation': 'nearest',\n 'image.cmap': 'gray',\n 'axes.grid': False,\n 'savefig.dpi': 150, # to adjust notebook inline plot size\n 'axes.labelsize' : 18, # fontsize for x and y labels\n 'axes.titlesize' : 18,\n 'font.size' : 18, \n 'legend.fontsize': 16, \n 'xtick.labelsize': 18,\n 'ytick.labelsize': 18,\n 'text.usetex': False,\n 'figure.figsize': [3, 3],\n 'font.family': 'serif',\n}\nplt.rcParams.update(params)",
"_____no_output_____"
],
[
"for trial in range(1,11):\n PATH = os.getcwd()+f\"/trained_models/PECANN_mf_{trial}.pt\"\n model_mf.load_state_dict(torch.load(PATH))\n \n PATH = os.getcwd()+f\"/trained_models/PECANN_hi_{trial}.pt\"\n model_hi.load_state_dict(torch.load(PATH))\n \n \n \n model_mf.eval()\n model_hi.eval()\n with torch.no_grad(): \n print(\"*\"*50)\n\n # prediction time \n x = torch.tensor(x_hi)\n hp_mf = model_mf(x)\n Se_mf = model_mf.Se(hp_mf)\n Kp_mf = model_mf.K(Se_mf)\n \n \n hp_hi = model_hi(x)\n Se_hi = model_hi.Se(hp_hi)\n Kp_hi = model_hi.K(Se_hi)\n \n L2r_h = np.linalg.norm(torch.tensor(h_hi)- hp_mf.detach(), 2)/np.linalg.norm(torch.tensor(h_hi), 2)\n L2r_k = np.linalg.norm(torch.tensor(K_hi)- Kp_mf.detach(), 2)/np.linalg.norm(torch.tensor(K_hi), 2)\n print(f\" Trial:{trial:2d}, a:{model_mf.a.detach().item():2.4f},m:{model_mf.m.detach().item():2.4f},L2r_h :{L2r_h:2.3e}, L2r_k:{L2r_k:2.3e} \\n\")\n \n \n plt.rcParams['figure.figsize'] = (24,4)\n gs = gridspec.GridSpec(1, 4)\n gs.update(wspace=0.6)\n \n \n \n plt.figure()\n # a) high fidelity and low fidelity data \n ax = plt.subplot(gs[0,0])\n ax.plot(x_hi,h_hi,'k-',label='Exact')\n ax.plot(vx_hi,vh_hi,'1r',mew=2,markersize=10,label='HF')\n ax.plot(vx_lo,vh_lo,'bo--',mew=2,markerfacecolor='None',markersize=4,linewidth=1,label=\"LF\")\n ax.set_yticks([-10,-6,-2])\n ax.set_xticks([0,100,200])\n ax.set_xlabel('$x$')\n ax.set_ylabel('$h(x)$')\n ax.legend(bbox_to_anchor=(0.75, 1.),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.192, -0.20,'(a)' ,wrap=True, horizontalalignment='center', fontsize=16)\n\n\n # b) corresponding K values for low fidelity and high fidelity data \n ax = plt.subplot(gs[0,1])\n ax.plot(h_hi,K_hi,'k-',label='Exact')\n ax.plot(vh_hi,vK_hi,'1r',mew=2,markersize=10,label='HF')\n ax.plot(vh_lo,K_lo,'bo--',mew=2,markerfacecolor='None',markersize=4,linewidth=1,label=\"LF\")\n ax.set_xticks([-10,-6,-2])\n ax.set_yticks([0.2,0.4,0.6])\n ax.set_xlabel('$h$')\n ax.set_ylabel('$K(h)$')\n ax.legend(bbox_to_anchor=(0.25, 1.),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.405, -0.20,'(b)' ,wrap=True, horizontalalignment='center', fontsize=16)\n\n \n # c) multifidelity model prediction \n ax = plt.subplot(gs[0,2])\n ax.plot(x_hi,h_hi,'k-',linewidth=3,label='Exact')\n ax.plot(x,hp_mf,'c-',linewidth=3,label='MF Model')\n ax.plot(x,hp_hi,'r--',linewidth=3,label='HF Model')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$h(x)$')\n ax.set_xticks([0,100,200])\n ax.set_yticks([-10,-6,-2])\n ax.legend(bbox_to_anchor=(0.70, 1),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.62, -0.20,'(c)' ,wrap=True, horizontalalignment='center', fontsize=16)\n\n\n #d) Low and high-fidelity hydraulic conductivity\n ax = plt.subplot(gs[0,3])\n ax.plot(h_hi,K_hi,'k--',linewidth=3,label='Exact')\n ax.plot(hp_mf,Kp_mf,'c-' , linewidth=3,label='MF Model')\n ax.plot(hp_hi,Kp_hi,'r--',linewidth=3,label='HF Model')\n \n ax.set_xlabel('$h$')\n ax.set_ylabel('$K(h)$')\n ax.set_yticks([0.2,0.4,0.6])\n ax.set_xticks([-10,-6,-2])\n ax.legend(bbox_to_anchor=(0.35, 1),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.835, -0.20,'(d)' ,wrap=True, horizontalalignment='center', fontsize=16)\n\n plt.show()",
"**************************************************\n Trial: 1, a:0.0368,m:0.3598,L2r_h :1.285e-02, L2r_k:1.809e-02 \n\n"
],
[
"trial = 6\nPATH = os.getcwd()+f\"/trained_models/PECANN_mf_{trial}.pt\"\nmodel_mf.load_state_dict(torch.load(PATH))\n\nPATH = os.getcwd()+f\"/trained_models/PECANN_hi_{trial}.pt\"\nmodel_hi.load_state_dict(torch.load(PATH))\n\n\n\nmodel_mf.eval()\nmodel_hi.eval()\nwith torch.no_grad(): \n # prediction time \n x = torch.tensor(x_hi)\n hp_mf = model_mf(x)\n Se_mf = model_mf.Se(hp_mf)\n Kp_mf = model_mf.K(Se_mf)\n\n\n hp_hi = model_hi(x)\n Se_hi = model_hi.Se(hp_hi)\n Kp_hi = model_hi.K(Se_hi)\n\n L2r_h = np.linalg.norm(torch.tensor(h_hi)- hp_mf.detach(), 2)/np.linalg.norm(torch.tensor(h_hi), 2)\n L2r_k = np.linalg.norm(torch.tensor(K_hi)- Kp_mf.detach(), 2)/np.linalg.norm(torch.tensor(K_hi), 2)\n print(f\" Trial:{trial:2d}, a:{model_mf.a.detach().item():2.4f},m:{model_mf.m.detach().item():2.4f},L2r_h :{L2r_h:2.3e}, L2r_k:{L2r_k:2.3e} \\n\")\n\n\n plt.rcParams['figure.figsize'] = (24,4)\n gs = gridspec.GridSpec(1, 4)\n gs.update(wspace=0.6)\n\n\n\n plt.figure()\n # a) high fidelity and low fidelity data \n ax = plt.subplot(gs[0,0])\n ax.plot(x_hi,h_hi,'k-',label='Exact')\n ax.plot(vx_hi,vh_hi,'1r',mew=2,markersize=10,label='HF')\n ax.plot(vx_lo,vh_lo,'bo--',mew=2,markerfacecolor='None',markersize=4,linewidth=1,label=\"LF\")\n ax.set_yticks([-10,-6,-2])\n ax.set_xticks([0,100,200])\n ax.set_xlabel('$x$')\n ax.set_ylabel('$h(x)$')\n ax.legend(bbox_to_anchor=(0.75, 1.),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.192, -0.20,'(a)' ,wrap=True, horizontalalignment='center')\n\n\n # b) corresponding K values for low fidelity and high fidelity data \n ax = plt.subplot(gs[0,1])\n ax.plot(h_hi,K_hi,'k-',label='Exact')\n ax.plot(vh_hi,vK_hi,'1r',mew=2,markersize=10,label='HF')\n ax.plot(vh_lo,K_lo,'bo--',mew=2,markerfacecolor='None',markersize=4,linewidth=1,label=\"LF\")\n ax.set_xticks([-10,-6,-2])\n ax.set_yticks([0.2,0.4,0.6])\n ax.set_xlabel('$h$')\n ax.set_ylabel('$K(h)$')\n ax.legend(bbox_to_anchor=(0.25, 1.),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.405, -0.20,'(b)' ,wrap=True, horizontalalignment='center')\n\n\n # c) multifidelity model prediction \n ax = plt.subplot(gs[0,2])\n ax.plot(x_hi,h_hi,'k-',linewidth=3,label='Exact')\n ax.plot(x,hp_mf,'c-',linewidth=3,label='MF Model')\n ax.plot(x,hp_hi,'r--',linewidth=3,label='HF Model')\n ax.set_xlabel('$x$')\n ax.set_ylabel('$h(x)$')\n ax.set_xticks([0,100,200])\n ax.set_yticks([-10,-6,-2])\n ax.legend(bbox_to_anchor=(0.70, 1),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.62, -0.20,'(c)' ,wrap=True, horizontalalignment='center')\n\n\n #d) Low and high-fidelity hydraulic conductivity\n ax = plt.subplot(gs[0,3])\n ax.plot(h_hi,K_hi,'k--',linewidth=3,label='Exact')\n ax.plot(hp_mf,Kp_mf,'c-' , linewidth=3,label='MF Model')\n ax.plot(hp_hi,Kp_hi,'r--',linewidth=3,label='HF Model')\n\n ax.set_xlabel('$h$')\n ax.set_ylabel('$K(h)$')\n ax.set_yticks([0.2,0.4,0.6])\n ax.set_xticks([-10,-6,-2])\n ax.legend(bbox_to_anchor=(0.35, 1),loc='upper center',ncol=1,frameon=False)\n plt.figtext(0.835, -0.20,'(d)' ,wrap=True, horizontalalignment='center')\nplt.savefig('non_linear_soil_conductivity.png', bbox_inches='tight', pad_inches=0.02)",
" Trial: 6, a:0.0352,m:0.3552,L2r_h :7.450e-03, L2r_k:1.189e-02 \n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca6000507f16eff58261dac3ca57be641d3a24c | 264,088 | ipynb | Jupyter Notebook | 2017/DSMCER_content/StatisticsW2L2/.ipynb_checkpoints/W2L2 Basic Stats-checkpoint.ipynb | ShahResearchGroup/UWDIRECT.github.io | d4db958a6bfe151b6f7b1eb4772d8fd1b9bb0c3e | [
"BSD-3-Clause"
] | 1 | 2021-01-26T19:55:02.000Z | 2021-01-26T19:55:02.000Z | 2017/DSMCER_content/StatisticsW2L2/W2L2 Basic Stats.ipynb | ShahResearchGroup/UWDIRECT.github.io | d4db958a6bfe151b6f7b1eb4772d8fd1b9bb0c3e | [
"BSD-3-Clause"
] | null | null | null | 2017/DSMCER_content/StatisticsW2L2/W2L2 Basic Stats.ipynb | ShahResearchGroup/UWDIRECT.github.io | d4db958a6bfe151b6f7b1eb4772d8fd1b9bb0c3e | [
"BSD-3-Clause"
] | null | null | null | 357.358593 | 142,216 | 0.915013 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"/Users/jpfaendt/anaconda/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.\n warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')\n"
]
],
[
[
"## Getting started - we all need to download 1 zip file with needed text files \n\n### The location fo the file is: prg.washington.edu/DIRECTfiles/W2L2files.zip",
"_____no_output_____"
],
[
"### Warmup - learning about sample size and statistical results\n\n* Person 1: type the cell \n* Person 2: explain what you think each line is doing while your partner types it, if neither of you know what a line does make a guess based on the syntax \n* Execute the cell (using shift-enter) 5 times with the default values (T=5,N=10)\n* Execute the cell increasing N by 10X each time and observe results \n* What level of sampling are you and your partner comfortable with to achieve the expected results? \n* What is the difference between the average bar plot of five N=10 samples and the bar plot of one N=50 sample\n* How could we quantify this? ",
"_____no_output_____"
]
],
[
[
"tables=5\nN=100\nrands=np.random.randint(tables, size=N)\n\nplt.hist(rands, bins=5, alpha=0.5)\nplt.grid(True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Lets look at some descriptive statistics on simulation results of a simple molecule \n\n* Molecule description\n* Why it matters and what matters about it\n* What we will be looking at \n\n<img src=\"https://www.researchgate.net/profile/Davide_Branduardi/publication/220258613/figure/fig1/AS:277569061572636@1443189029820/FIG-2-Ball-and-stick-representation-of-alanine-dipeptide-Ace-Ala-Nme-in-vacuum-The.png\">\n\n<img src=\"http://prg.washington.edu/DIRECTimg/fes_bias.png\">",
"_____no_output_____"
]
],
[
[
"data=np.genfromtxt('W2L2files/phipsi.dat',comments='#')",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"%matplotlib inline\nplt.figure(figsize=(6, 6))\nplt.scatter(data[::10,0],data[::10,1])\nplt.scatter(data[::10,0],data[::10,2],marker='^',color='green')\nplt.grid()",
"_____no_output_____"
],
[
"%matplotlib inline\nplt.figure(figsize=(6, 6))\nplt.scatter(data[:,1],data[:,2],marker='o')\nplt.xlim([-np.pi,np.pi])\nplt.ylim([-np.pi,np.pi])\nplt.grid()",
"_____no_output_____"
]
],
[
[
"### Assuming this is the exact data we want , lets look at some descriptive statistics \n\n* Please note - in reality you would **never** calculate descriptive statistics on this data without some conditioning\n* The reason is that the data in x/y (phi and psi) are **periodic**. So the value of phi at -pi = pi. Therefore, as Dave pointed out in lecture - if your data are 50% -pi and 50% +pi your mean would be zero, which is not accurate \n* For the purposes of these calculations we will treat them like continuous data and neglect the periodicity ",
"_____no_output_____"
],
[
"# Key Definitions \n\n## Variable types \n\n1. Discrete vs continuous \n * The value of the dihedral angle at time = 10 ns \n * Which week of Winter quarter are we in? \n \n2. **uni**variate vs **multi**variate data sets\n * Only the angle _phi_ is monitored in the experiment \n * Both the angle _phi_ and the terminal C=O bind length are monitored in experiment (e.g., **bi**variate) \n \n3. Differentiating between the **population** and the **sample**\n \n## Three standard categories of \"measurements\" of our data\n\n1. Measures of **center**: \n * _Population_ mean: $\\mu = \\sum X_i / N $ \n * _Sample_ mean: $\\bar x = \\sum x_i / n $ \n * Often $X_i$ and $N$ are used for **population** and $x_i$ and $n_ for **sample**\n * Median: _the value that separates the upper and lower 50% of data when sorted_\n * Use of median can protect against outliers skewing the data \n\n2. Measures of **position**: \n * Quartile: _the value that markes a gradation in 25% of the data set_ \n * Percentile: _the value that markes the $n^{th}$ percentage of the data set_ \n * The 2nd quartile is identical to the median \n * If you are in the 90th _percentile_ of a group of scores, your score is higher than 90% of test takers\n * If a measure of position falls between two numbers the _mean_ of the pair is used: \n * The mean of $[1,2,3,4]$ is $\\frac{2+3}{2}=2.5$\n \n3. Measures of **spread** or **variability**: \n * _Population_ variance: $\\sigma^2 = \\frac{\\sum (X_i - \\mu)^2}{ N} $\n * _Population_ standard deviation: $\\sigma =\\ \\sqrt{\\frac{\\sum (X_i - \\mu)^2}{ N}}$\n * _Sample_ variance: $s^2 = \\frac{\\sum (x_i - \\bar x)^2}{n-1} $\n * _Sample_ standard deviation: $s =\\ \\sqrt{\\frac{\\sum (x_i - \\bar x)^2}{n-1}}$\n * The range of +/- 1Q around the median (i.e., _interquartile range_) is also used\n * $IQR = Q3-Q1$",
"_____no_output_____"
],
[
"### Sample mean calculated in 3 ways (maybe 3.5 ways) ",
"_____no_output_____"
]
],
[
[
"# Brutest force \nsum=0\ncount=0\n\nfor x in np.nditer(data[:,1]):\n sum+=x\n count+=1\n \nmean=sum/count\n\nprint(mean)",
"-1.93639673557\n"
],
[
"# Brute force \n\nmean=np.sum(data[:,1])/np.shape(data[:,1])[0]\nprint(mean)",
"-1.93639673557\n"
],
[
"# The right way to do it \n\nprint (np.mean(data[:,1]))",
"-1.93639673557\n"
],
[
"# Lets blaze\nprint (data[:,1].mean())",
"-1.93639673557\n"
]
],
[
[
"### With a partner [whoever is newer at Python should type first with the more experienced person helping]\n\n1) Read in the same phile (phipsi.data) as a pandas data frame. Reminder: the three columns are the time step of the observation, the angle $\\Phi$ and the angle $\\Psi$ \n2) Repeat the calculation of the mean with the pandas data set \n3) Switch to a different person working \n4) Calculate the sample **and** population standard deviation using one (_not both_) of the brute force ways and compare them to the automatic ways to do it both with numpy and pandas",
"_____no_output_____"
]
],
[
[
"pddata=pd.read_csv('W2L2files/phipsi.dat',comment='#',header=0,names=[\"time\",\"phi\",\"psi\"],delimiter=' ')\nprint(pddata.describe())\n\nsum=0\ncount=0\nfor x in np.nditer(pddata.phi):\n sum+=x\n count+=1\n\n \nprint (sum/count)\nprint (pddata.phi.sum()/len(pddata.phi))\nprint (np.mean(pddata.phi))\nprint (pddata.phi.mean())\n \n## I have evidenced a very bad principle in this cell related to use of Jupyter notebooks (hint line 1). Why?",
" time phi psi\ncount 49998.000000 49998.000000 49998.000000\nmean 2499.950119 -1.936392 1.510293\nstd 1443.332440 0.918635 1.595412\nmin 0.100000 -3.141553 -3.141346\n25% 1250.025059 -2.671801 0.845398\n50% 2499.950119 -1.878216 1.870596\n75% 3749.875178 -1.368765 2.751570\nmax 4999.800237 3.141560 3.141501\n-1.93639163739\n-1.93639163739\n-1.93639163739\n-1.93639163739\n"
]
],
[
[
"### Lets regroup \n\n* Any questions asbout what we just did. We often do a lot of mathematical operations that **don't have built in libraries** so it is useful to begin to get some practice doing calculations on our data. More soon! \n* One note of advice - towards building great 1-liners \n* One final note of caution - degrees of freedom! \n",
"_____no_output_____"
]
],
[
[
"# Some further calculation of descriptive statistics - the standard deviation \n\n# this is a python 1-liner to calculate the population standard deviation \n# see if you can break it out into separate parts and connect it to the math written above \nmystd=np.sqrt(np.sum(np.power(pddata.phi-pddata.phi.mean(),2))/len(pddata.phi))\n\n# how would you change it to the calculatino of the sample standard deviation? \n\nprint (mystd)\n\n#pandas calculation of standard deviation \nprint pddata.phi.std()\n#numpy calculation of standard deviation \nprint np.std(pddata.phi)",
"0.918626247319\n0.918635434087\n0.918626247319\n"
]
],
[
[
"### It is important to me that you understand why the default pandas and numpy standard deviations are different\n\n* You can modify the function call of **both** functions to get either the sample or population standard deviation. \n* There is an additional argument ddof that controls this ",
"_____no_output_____"
],
[
"### Calculating descriptive statistics and making a box plot ",
"_____no_output_____"
]
],
[
[
"# Option 1 - pandas \npddata.describe()\n\n# what's missing? (hint the answer is median)",
"_____no_output_____"
],
[
"print (pddata.median())\n\nprint (pddata.phi.median())\n\n# Quick question - if asked you to manually calculate the some of the measures of position\n# what would your strategy be? ",
"time 2499.950119\nphi -1.878216\npsi 1.870596\ndtype: float64\n-1.8782165\n"
],
[
"# a basic box plot to show some of the descriptive statistics \n\npddata.boxplot(['phi','psi']);",
"/Users/jpfaendt/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:1: FutureWarning: \nThe default value for 'return_type' will change to 'axes' in a future release.\n To use the future behavior now, set return_type='axes'.\n To keep the previous behavior and silence this warning, set return_type='dict'.\n if __name__ == '__main__':\n"
],
[
"# we can infer what is on the box plot even if we don't understand it :) \npddata.describe()",
"_____no_output_____"
]
],
[
[
"### Previewing what is next - distributions \n\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
eca604c8f0555d36f66996b45073cfb8b3a1fcd2 | 5,842 | ipynb | Jupyter Notebook | Coursera-ML-AndrewNg-Notes-master/code/ex6-SVM/4- spam filter.ipynb | yang-233/ML | 4e78ea2dea83db68fcd860dabe7562b066b7f9b0 | [
"Apache-2.0"
] | null | null | null | Coursera-ML-AndrewNg-Notes-master/code/ex6-SVM/4- spam filter.ipynb | yang-233/ML | 4e78ea2dea83db68fcd860dabe7562b066b7f9b0 | [
"Apache-2.0"
] | null | null | null | Coursera-ML-AndrewNg-Notes-master/code/ex6-SVM/4- spam filter.ipynb | yang-233/ML | 4e78ea2dea83db68fcd860dabe7562b066b7f9b0 | [
"Apache-2.0"
] | null | null | null | 20.790036 | 88 | 0.476036 | [
[
[
"# 4-ๅๅพ้ฎไปถๆฃๆต",
"_____no_output_____"
]
],
[
[
"from sklearn import svm\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\n\nimport scipy.io as sio",
"_____no_output_____"
]
],
[
[
"> I think the hard part is how to vecotrize emails. \nUsing this preprocessed data set is cheating XD",
"_____no_output_____"
]
],
[
[
"mat_tr = sio.loadmat('data/spamTrain.mat')\nmat_tr.keys()",
"_____no_output_____"
]
],
[
[
"> be careful with the column vector : `(4000, 1)` is not the same as `(4000, )`",
"_____no_output_____"
]
],
[
[
"X, y = mat_tr.get('X'), mat_tr.get('y').ravel()\nX.shape, y.shape",
"_____no_output_____"
],
[
"mat_test = sio.loadmat('data/spamTest.mat')\nmat_test.keys()",
"_____no_output_____"
],
[
"test_X, test_y = mat_test.get('Xtest'), mat_test.get('ytest').ravel()\ntest_X.shape, test_y.shape",
"_____no_output_____"
]
],
[
[
"# fit SVM model",
"_____no_output_____"
]
],
[
[
"svc = svm.SVC()",
"_____no_output_____"
],
[
"svc.fit(X, y)",
"_____no_output_____"
],
[
"pred = svc.predict(test_X)\nprint(metrics.classification_report(test_y, pred))",
" precision recall f1-score support\n\n 0 0.94 0.99 0.97 692\n 1 0.98 0.87 0.92 308\n\navg / total 0.95 0.95 0.95 1000\n\n"
]
],
[
[
"# what about linear logistic regresion?",
"_____no_output_____"
]
],
[
[
"logit = LogisticRegression()\nlogit.fit(X, y)",
"_____no_output_____"
],
[
"pred = logit.predict(test_X)\nprint(metrics.classification_report(test_y, pred))",
" precision recall f1-score support\n\n 0 1.00 0.99 1.00 692\n 1 0.99 0.99 0.99 308\n\navg / total 0.99 0.99 0.99 1000\n\n"
]
],
[
[
".......... ็ปๆ๏ผ้ป่พๅๅฝ็็ปๆๆฏsvmๅผบๅคไบ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
eca6085ae66f6e2d004016b961694e8ac67bb50f | 6,472 | ipynb | Jupyter Notebook | docs/source/install.ipynb | isabella232/evalml | 5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d | [
"BSD-3-Clause"
] | null | null | null | docs/source/install.ipynb | isabella232/evalml | 5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d | [
"BSD-3-Clause"
] | 1 | 2022-02-19T12:59:09.000Z | 2022-02-19T12:59:09.000Z | docs/source/install.ipynb | isabella232/evalml | 5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d | [
"BSD-3-Clause"
] | null | null | null | 44.328767 | 496 | 0.648022 | [
[
[
"# Install\n\nEvalML is available for Python 3.8 with experimental support 3.9. It can be installed with pip or conda.\n\n## Time Series support with Facebook's Prophet \n\nTo support the `Prophet` time series estimator, be sure to install it as an extra requirement. Please note that this may take a few minutes.\nProphet is currently only supported via pip installation in EvalML for Mac with CmdStan as a backend.\n```shell\npip install evalml[prophet]\n```\nAnother option for installing Prophet with CmdStan as a backend is to use `make installdeps-prophet`.\n\nNote: In order to do this, you must have the EvalML repo cloned and you must be in the top level folder `<your_directory>/evalml/` to execute this command.\nThis command will do the following:\n- Pip install `cmdstanpy==0.9.68`\n- Execute the `install_cmdstan.py` script found within your `site-packages/cmdstanpy` which builds `cmdstan` in your `site-packages`.\n- Install `Prophet==1.0.1` with the `CMDSTAN` and `STAN_BACKEND` environment variables set.\n\nIf the `site-packages` path is incorrect or you'd like to specify a different one, just run `make installdeps-prophet SITE_PACKAGES_DIR=\"<path_to_your_site_packages>\"`.\n\nIf you'd like to have more fine-tuned control over the installation steps for Prophet, such as specifying the backend, follow these steps:\n\nFor CmdStanPy as a backend:\n1. `pip install cmdstanpy==0.9.68`\n2. `python <path_to_installed_cmdstanpy>/install_cmdstan.py --dir <path_to_build_cmdstan> -v <version_to_use>`\n3. `CMDSTAN=<path_to_build_cmdstan>/cmdstan-<version_to_use> STAN_BACKEND=CMDSTANPY pip install prophet==1.0.1`\n\nFor PyStan as a backend (PyStan is used by default):\n1. `pip install prophet==1.0.1`\n\n\n## Pip with all dependencies\n\nTo install evalml with pip, run the following command:\n\n```bash\npip install evalml\n```\n\n## Pip with core dependencies\n\nEvalML includes several optional dependencies. The `xgboost` and `catboost` packages support pipelines built around those modeling libraries. The `plotly` and `ipywidgets` packages support plotting functionality in automl searches. These dependencies are recommended, and are included with EvalML by default but are not required in order to install and use EvalML.\n\nEvalML's core dependencies are listed in `core-requirements.txt` in the source code, and optional requirements are isted in `requirements.txt`.\n\nTo install EvalML with only the core required dependencies, download the EvalML source [from pypi](https://pypi.org/project/evalml/#files) to access the requirements files. Then run the following:\n\n```bash\npip install evalml --no-dependencies\npip install -r core-requirements.txt\n```\n\n#### Add-ons\nYou can install add-ons individually or all at once by running:\n```bash\npip install evalml[complete]\n```\n\n**Time Series Support** <br>\n\nAdd time series support with Facebook's Prophet\n```bash\npip install evalml[prophet]\n```\nPlease note that this may take a few minutes. Prophet is currently only supported via pip installation in EvalML.\n\n**Update checker** <br>\n\nReceive automatic notifications of new EvalML releases\n```bash\npip install evalml[update_checker]\n```\n\n## Conda with all dependencies\n\nTo install evalml with conda run the following command:\n\n```bash\nconda install -c conda-forge evalml\n```\n\n## Conda with core dependencies \n\nTo install evalml with only core dependencies run the following command:\n\n```bash\nconda install -c conda-forge evalml-core\n```\n\n## Windows\n\nAdditionally, if you are using `pip` to install EvalML, it is recommended you first install the following packages using conda:\n* `numba` (needed for `shap` and prediction explanations). Install with `conda install -c conda-forge numba`\n* `graphviz` if you're using EvalML's plotting utilities. Install with `conda install -c conda-forge python-graphviz`\n\nThe [XGBoost](https://pypi.org/project/xgboost/) library may not be pip-installable in some Windows environments. If you are encountering installation issues, please try installing XGBoost from [Github](https://xgboost.readthedocs.io/en/latest/build.html) before installing EvalML or install evalml with conda.\n\n## Mac\n\nIn order to run on Mac, [LightGBM](https://pypi.org/project/lightgbm/) requires the `OpenMP` library to be installed, which can be done with [HomeBrew](https://brew.sh/) by running \n\n```bash\nbrew install libomp\n```\n\nAdditionally, `graphviz` can be installed by running\n\n```bash\nbrew install graphviz\n```\n\n## Python 3.9 support\n\nEvalml can still be installed with pip in python 3.9 but note that `sktime`, one of our dependencies, will not be installed because that library does not yet support python 3.9. This means the ``PolynomialDetrending`` component will not be usable in python 3.9. You can try to install `sktime` [from source](https://www.sktime.org/en/latest/installation.html#building-from-source) in python 3.9 to use the ``PolynomialDetrending`` component but be warned that we only test it in python 3.8.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
eca61c1b8cecfa1fc6f28b00407a1b610365c370 | 13,647 | ipynb | Jupyter Notebook | assessment/sr15_2.3.3_global_emissions_statistics.ipynb | volker-krey/ipcc_sr15_scenario_analysis | 02bd691a865981211527da77bf2de2d6add2ffee | [
"Apache-2.0"
] | null | null | null | assessment/sr15_2.3.3_global_emissions_statistics.ipynb | volker-krey/ipcc_sr15_scenario_analysis | 02bd691a865981211527da77bf2de2d6add2ffee | [
"Apache-2.0"
] | null | null | null | assessment/sr15_2.3.3_global_emissions_statistics.ipynb | volker-krey/ipcc_sr15_scenario_analysis | 02bd691a865981211527da77bf2de2d6add2ffee | [
"Apache-2.0"
] | null | null | null | 27.85102 | 213 | 0.54327 | [
[
[
"### *IPCC SR15 scenario assessment*\n\n<img style=\"float: right; height: 80px; padding-left: 20px;\" src=\"../_static/IIASA_logo.png\">\n<img style=\"float: right; height: 80px;\" src=\"../_static/IAMC_logo.jpg\">\n\n# Analysis of global CO2 and Kyoto emissions, (BE)CCS <br /> and year of net-zero\n\nThis notebook computes indicators and diagnostics of emissions pathways, the use of carbon capture and sequestration, and the timing of net-zero of different emissions categories \nin the IPCC's _\"Special Report on Global Warming of 1.5ยฐC\"_. The notebook generates the data for **Table 2.4** in the Special Report.\n\nThe scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).",
"_____no_output_____"
],
[
"## Load `pyam` package and other dependencies",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport warnings\nimport io\nimport itertools\nimport yaml\nimport math\nimport pyam",
"_____no_output_____"
]
],
[
[
"## Import scenario data, categorization and specifications files\n\nThe metadata file must be generated from the notebook `sr15_2.0_categories_indicators` included in this repository. \nIf the snapshot file has been updated, make sure that you rerun the categorization notebook.\n\nThe last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.",
"_____no_output_____"
]
],
[
[
"sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r2.0.xlsx')",
"_____no_output_____"
],
[
"sr1p5.load_metadata('../data/sr15_metadata_indicators_ar6_fod.xlsx')",
"_____no_output_____"
],
[
"with open(\"sr15_specs.yaml\", 'r') as stream:\n specs = yaml.load(stream, Loader=yaml.FullLoader)\n\nrc = pyam.run_control()\nfor item in specs.pop('run_control').items():\n rc.update({item[0]: item[1]})\ncats = specs.pop('cats')\ncats_15_no_lo = specs.pop('cats_15_no_lo')",
"_____no_output_____"
]
],
[
[
"## Downselect scenario ensemble to categories of interest for this assessment\n\nTo reduce potential bias by many scenarios from the same modelling framework, 13 scenarios submitted by the 'AIM' model are excluded from the assessment underpinning this statement (cf. SPM Statement C1).\n\nAlso, note that we apply the filter by relevant years *after computing the year of netzero*.",
"_____no_output_____"
]
],
[
[
"cats.remove('Above 2C')",
"_____no_output_____"
],
[
"sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)",
"_____no_output_____"
],
[
"sr1p5.meta.rename(columns={'Kyoto-GHG|2020 (AR4)': 'kyoto_ghg_2020'}, inplace=True)",
"_____no_output_____"
],
[
"filter_args_aim = dict(model='AIM*',\n scenario=['SFCM*_1p5Degree', 'EMF33_Med2C_nofuel', 'EMF33_Med2C_none'],\n keep=False)",
"_____no_output_____"
],
[
"df = (\n sr1p5\n .filter(kyoto_ghg_2010='in range', category=cats)\n .filter(kyoto_ghg_2020='in range', category=cats)\n .filter(**filter_args_aim)\n)",
"_____no_output_____"
]
],
[
[
"## Initialize a `pyam.Statistics` instance",
"_____no_output_____"
]
],
[
[
"stats = pyam.Statistics(df=df,\n filters=[\n ('below 1.5', {'category': 'Below 1.5C'}),\n ('lo os 1.5', {'category': '1.5C low overshoot'}),\n ('no & lo os 1.5', {'category': cats_15_no_lo}),\n ('hi os 1.5', {'category': ['1.5C high overshoot']}),\n ('lower 2.0', {'category': ['Lower 2C']}),\n ('higher 2.0', {'category': ['Higher 2C']}),\n ('lower 2.5', {'category': ['Lower 2.5C']}),\n ('lower 3.0', {'category': ['Lower 3C']})]\n , rows=True)",
"_____no_output_____"
],
[
"years = [2010, 2020, 2030, 2040, 2050, 2060, 2070, 2080, 2090, 2100]\ncompare_years = [(2010, 2030), (2020, 2030), (2030, 2050)]",
"_____no_output_____"
]
],
[
[
"## Function to compute the year of netzero and add growth statistics to the summary",
"_____no_output_____"
]
],
[
[
"def year_of_net_zero(data, years, threshold):\n prev_val = 0\n prev_yr = np.nan\n\n for yr, val in zip(years, data):\n if np.isnan(val):\n continue\n \n if val < threshold:\n x = (val - prev_val) / (yr - prev_yr) # absolute change per year\n return prev_yr + int((threshold - prev_val) / x) + 1 # add one because int() rounds down\n \n prev_val = val\n prev_yr = yr\n return np.inf",
"_____no_output_____"
],
[
"header='Annual emissions/sequestration (GtCO2)'\nheader_change='Absolute annual change (GtCO2)'\nheader_zero='Timing of global zero'\n\nstatistics_settings = dict(\n header=header,\n header_change=header_change,\n header_zero= header_zero,\n years=years,\n compare_years=[(2010, 2030), (2020, 2030), (2030, 2050)],\n)",
"_____no_output_____"
],
[
"def add_statistics(data, row, years, compare_years,\n header, header_change, header_zero, add_netzero=False):\n stats.add(data[years], header=header, row=row)\n for i, j in compare_years:\n abs_ann_change = (data[j] - data[i]) / (j - i)\n stats.add(abs_ann_change, header=header_change, row=row,\n subheader='{}-{}'.format(i,j))\n if add_netzero:\n netzero = data.apply(year_of_net_zero, years=data.columns, threshold=0, axis=1)\n stats.add(netzero, header=header_zero, row=row, subheader='year')",
"_____no_output_____"
]
],
[
[
"## Get timeseries of total CO2 emissions",
"_____no_output_____"
]
],
[
[
"co2 = (\n df.filter(variable='Emissions|CO2')\n .convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})\n .timeseries()\n)",
"_____no_output_____"
],
[
"add_statistics(co2, 'Total CO2 (net)', **statistics_settings, add_netzero=True)",
"_____no_output_____"
],
[
"co2_gross_seq_variables = [\n 'Carbon Sequestration|CCS|Biomass',\n 'Carbon Sequestration|Land Use',\n 'Carbon Sequestration|Direct Air Capture',\n 'Carbon Sequestration|Enhanced Weathering'\n]\nagg_sequestration = (\n df.filter(variable=co2_gross_seq_variables)\n .convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})\n .timeseries()\n)\nagg_sequestration = agg_sequestration.groupby(pyam.META_IDX).sum()",
"_____no_output_____"
],
[
"co2_ene_ind = (\n df.filter(variable='Emissions|CO2|Energy and Industrial Processes')\n .convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})\n .timeseries()\n)\nco2_ene_ind.index = co2_ene_ind.index.droplevel([2, 3, 4])",
"_____no_output_____"
],
[
"co2_ene_ind_gross = (co2_ene_ind + agg_sequestration).combine_first(co2_ene_ind)",
"_____no_output_____"
],
[
"add_statistics(co2_ene_ind_gross, 'CO2 from fossil fuels and industry (gross)', **statistics_settings)",
"_____no_output_____"
],
[
"add_statistics(co2_ene_ind, 'CO2 from fossil fuels and industry (net)', **statistics_settings)",
"_____no_output_____"
],
[
"co2_afolu = (\n df.filter(variable='Emissions|CO2|AFOLU')\n .convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})\n .timeseries()\n)",
"_____no_output_____"
],
[
"add_statistics(co2_afolu, 'CO2 from AFOLU', **statistics_settings)",
"_____no_output_____"
]
],
[
[
"## CCS from bioenergy",
"_____no_output_____"
]
],
[
[
"ccs_bio = (\n df.filter(variable='Carbon Sequestration|CCS|Biomass')\n .convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})\n .timeseries()\n)",
"_____no_output_____"
],
[
"add_statistics(ccs_bio, 'Bioenergy combined with carbon capture and storage (BECCS)',**statistics_settings)",
"_____no_output_____"
]
],
[
[
"## Total greenhouse gases according to the Kyoto protocol",
"_____no_output_____"
]
],
[
[
"ghg = (\n df.filter(variable='Emissions|Kyoto Gases (AR4-GWP100)')\n .convert_unit({'Mt CO2-equiv/yr': ('Gt CO2-equiv/yr', 0.001)})\n .timeseries()\n)",
"_____no_output_____"
],
[
"add_statistics(ghg, 'Kyoto GHG (AR4, GtCO2e)', **statistics_settings, add_netzero=True)",
"_____no_output_____"
]
],
[
[
"## Display and export summary statistics to `xlsx`\n\nNote that in Table 2.4 as printed in the Special Report, the full range is shown for any cells with less than 7 scenarios, and interquartile ranges are shown otherwise.\nThis formatting was implemented manually ex-post, as it is currently not supported by the `pyam.Statistics` module.",
"_____no_output_____"
]
],
[
[
"summary = stats.summarize(center='median', interquartile=True, custom_format='{:.1f}')\nindex = summary.index.get_level_values(1).unique()\nsummary = (\n summary\n .swaplevel(0, 1, axis=0)\n .reindex(index=index, level=0)\n)\nsummary",
"_____no_output_____"
],
[
"summary.to_excel('output/table_2.4_emission_statistics_ar6_fod.xlsx')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca6279616ea31de2259bb4e5a7b6d43188d9fae | 6,917 | ipynb | Jupyter Notebook | notebooks/09_ecosystem.ipynb | AaltoRSE/julia-introduction | 114eaa8328391341b70c39522630d23cfdcc6bea | [
"MIT"
] | 7 | 2021-04-22T10:58:54.000Z | 2022-03-28T08:41:56.000Z | notebooks/09_ecosystem.ipynb | AaltoRSE/julia-introduction | 114eaa8328391341b70c39522630d23cfdcc6bea | [
"MIT"
] | 17 | 2021-03-18T14:51:45.000Z | 2021-10-06T11:59:24.000Z | notebooks/09_ecosystem.ipynb | AaltoRSE/julia-introduction | 114eaa8328391341b70c39522630d23cfdcc6bea | [
"MIT"
] | 9 | 2021-04-26T12:30:56.000Z | 2022-03-20T22:20:47.000Z | 38.642458 | 234 | 0.641752 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
eca63e615a027e64c8a67f4b623e2310f6852e45 | 15,739 | ipynb | Jupyter Notebook | notebooks/option_chain.ipynb | snhuber/ib_insync | 43f8431fa92444c6a5fad6667209bc5a662240b6 | [
"BSD-2-Clause"
] | 1 | 2020-07-16T03:39:41.000Z | 2020-07-16T03:39:41.000Z | notebooks/option_chain.ipynb | alobbs/ib_insync | 1ca3a4ac9e44bf1327cbfbb28a7618af6356bbae | [
"BSD-2-Clause"
] | null | null | null | notebooks/option_chain.ipynb | alobbs/ib_insync | 1ca3a4ac9e44bf1327cbfbb28a7618af6356bbae | [
"BSD-2-Clause"
] | null | null | null | 37.65311 | 2,973 | 0.547684 | [
[
[
"Option chains\n=======",
"_____no_output_____"
]
],
[
[
"from ib_insync import *\nutil.startLoop()\n\nib = IB()\nib.connect('127.0.0.1', 7497, clientId=12)",
"_____no_output_____"
]
],
[
[
"Suppose we want to find the options on the SPX, with the following conditions:\n\n* Use the next three monthly expiries;\n* Use strike prices within +- 20 dollar of the current SPX value;\n* Use strike prices that are a multitude of 5 dollar.",
"_____no_output_____"
],
[
"To get the current market value, first create a contract for the underlyer (the S&P 500 index):",
"_____no_output_____"
]
],
[
[
"spx = Index('SPX', 'CBOE')\nib.qualifyContracts(spx)",
"_____no_output_____"
]
],
[
[
"To avoid issues with market data permissions, we'll use delayed data:",
"_____no_output_____"
]
],
[
[
"ib.reqMarketDataType(4)",
"_____no_output_____"
]
],
[
[
"Then get the ticker. Requesting a ticker can take up to 11 seconds.",
"_____no_output_____"
]
],
[
[
"[ticker] = ib.reqTickers(spx)\nticker",
"_____no_output_____"
]
],
[
[
"Take the current market value of the ticker:",
"_____no_output_____"
]
],
[
[
"spxValue = ticker.marketPrice()\nspxValue",
"_____no_output_____"
]
],
[
[
"The following request fetches a list of option chains:",
"_____no_output_____"
]
],
[
[
"chains = ib.reqSecDefOptParams(spx.symbol, '', spx.secType, spx.conId)\n\nutil.df(chains)",
"_____no_output_____"
]
],
[
[
"These are four option chains that differ in ``exchange`` and ``tradingClass``. The latter is 'SPX' for the monthly and 'SPXW' for the weekly options. Note that the weekly expiries are disjoint from the monthly ones, so when interested in the weekly options the monthly options can be added as well.\n\nIn this case we're only interested in the montly options trading on SMART:",
"_____no_output_____"
]
],
[
[
"chain = next(c for c in chains if c.tradingClass == 'SPX' and c.exchange == 'SMART')\nchain",
"_____no_output_____"
]
],
[
[
"What we have here is the full matrix of expirations x strikes. From this we can build all the option contracts that meet our conditions:",
"_____no_output_____"
]
],
[
[
"strikes = [strike for strike in chain.strikes\n if strike % 5 == 0\n and spxValue - 20 < strike < spxValue + 20]\nexpirations = sorted(exp for exp in chain.expirations)[:3]\nrights = ['P', 'C']\n\ncontracts = [Option('SPX', expiration, strike, right, 'SMART', tradingClass='SPX')\n for right in rights\n for expiration in expirations\n for strike in strikes]\n\ncontracts = ib.qualifyContracts(*contracts)\nlen(contracts)",
"_____no_output_____"
],
[
"contracts[0]",
"_____no_output_____"
]
],
[
[
"Now to get the market data for all options in one go:",
"_____no_output_____"
]
],
[
[
"tickers = ib.reqTickers(*contracts)\n\ntickers[0]",
"_____no_output_____"
]
],
[
[
"The option greeks are available from the ``modelGreeks`` attribute, and if there is a bid, ask resp. last price available also from ``bidGreeks``, ``askGreeks`` and ``lastGreeks``. For streaming ticks the greek values will be kept up to date to the current market situation.",
"_____no_output_____"
]
],
[
[
"ib.disconnect()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca6412691290e1aeed528f57a28974f32b96ec0 | 140,444 | ipynb | Jupyter Notebook | 3) semantic segmentation.ipynb | KeremTurgutlu/siim-acr-pneumothorax | 66dffb6fabd8f358720be9554ec4354e108f7d2c | [
"MIT"
] | 4 | 2019-10-02T00:56:51.000Z | 2020-02-04T13:11:43.000Z | 3) semantic segmentation.ipynb | KeremTurgutlu/siim-acr-pneumothorax | 66dffb6fabd8f358720be9554ec4354e108f7d2c | [
"MIT"
] | 1 | 2020-04-19T05:38:42.000Z | 2020-04-19T17:33:11.000Z | 3) semantic segmentation.ipynb | KeremTurgutlu/siim-acr-pneumothorax | 66dffb6fabd8f358720be9554ec4354e108f7d2c | [
"MIT"
] | 1 | 2019-12-07T16:33:39.000Z | 2019-12-07T16:33:39.000Z | 186.018543 | 89,716 | 0.905144 | [
[
[
"### note: implement dice as a callback",
"_____no_output_____"
],
[
"### imports",
"_____no_output_____"
]
],
[
[
"%reload_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from fastai.vision import *\nfrom fastai.vision.interpret import *\nfrom losses import *\nimport fastai; fastai.__version__",
"_____no_output_____"
],
[
"data_path = Path(\"../../data/siim_acr_pneu/\"); data_path.ls()",
"_____no_output_____"
],
[
"torch.cuda.set_device(2)",
"_____no_output_____"
],
[
"chx_mimic_stats = [tensor([0.485, 0.456, 0.406]), tensor([0.229, 0.224, 0.225])]",
"_____no_output_____"
]
],
[
[
"### databunch",
"_____no_output_____"
]
],
[
[
"bs,sz = 8,224\nfold_idx = 0 # for kfold\ntfms = get_transforms()",
"_____no_output_____"
],
[
"from sklearn.model_selection import KFold\nkfold = KFold(n_splits=5, random_state=42)",
"_____no_output_____"
],
[
"get_y_fn = lambda x: data_path/f'train/masks_{sz}/{Path(x).stem}.png'\ncodes = ['void', 'pthorax']",
"_____no_output_____"
],
[
"class SegmentationLabelList(SegmentationLabelList):\n def open(self, fn): return open_mask(fn, div=True)\n\nclass SegmentationItemList(SegmentationItemList):\n _label_cls = SegmentationLabelList",
"_____no_output_____"
],
[
"items = (SegmentationItemList.from_csv(data_path, 'seg_df.csv', folder=f'train/images_{sz}', suffix='.jpg'))\ntrn_idxs, val_idxs = list(kfold.split(range(len(items))))[fold_idx]\ndata = (items.split_by_idxs(trn_idxs, val_idxs)\n .label_from_func(get_y_fn, classes=codes)\n .transform(tfms=tfms, size=sz, tfm_y=True, resize_method=ResizeMethod.NO, padding_mode='reflection')\n .databunch(bs=bs)\n .normalize(chx_mimic_stats))",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"# data.show_batch()",
"_____no_output_____"
]
],
[
[
"### model",
"_____no_output_____"
]
],
[
[
"# from fastai.train import BnFreeze\n# from unet import unet_learner #custom unet_learner\n\n# def xresnet50(pretrained=True):\n# return models.xresnet50(pretrained=pretrained)\n\n# def xresnet34(pretrained=True):\n# return models.xresnet34(pretrained=pretrained)",
"_____no_output_____"
],
[
"from fastai.callbacks.hooks import model_sizes\n\nbody = create_body(models.densenet121)\nchx_state_dict = torch.load(\"/home/turgutluk/data/models/chx-mimic-only-pneumo-densenet121-320.pth\")\nfor n, p in list(body.named_parameters()): p.data = chx_state_dict['0.' + n]\nbody = body[0].cpu() ",
"_____no_output_____"
],
[
"model_sizes(body)",
"_____no_output_____"
],
[
"densenet_children = list(body.children())\nunet_densenet_body = nn.Sequential(nn.Sequential(*densenet_children[:3]),\n nn.Sequential(*densenet_children[3:5]),\n nn.Sequential(*densenet_children[5:7]),\n nn.Sequential(*densenet_children[7:9]),\n nn.Sequential(*densenet_children[9:]))",
"_____no_output_____"
],
[
"model_sizes(unet_densenet_body)",
"_____no_output_____"
],
[
"def _new_densenet_split(m:nn.Module): return (m[1])",
"_____no_output_____"
],
[
"try:size = data.train_ds[0][0].size\nexcept: size = next(iter(data.train_dl))[0].shape[-2:]\nmodel = to_device(models.unet.DynamicUnet(unet_densenet_body, n_classes=data.c, img_size=size), data.device)\nlearn = Learner(data, model)\nlearn.split(_new_densenet_split)\napply_init(learn.model[1:], nn.init.kaiming_normal_)",
"_____no_output_____"
],
[
"# MONKEY PATCH: Remove print statement at every improvement\nfrom fastai.callbacks import SaveModelCallback\ndef _on_epoch_end(self, epoch:int, **kwargs:Any)->None:\n \"Compare the value monitored to its best score and maybe save the model.\"\n if self.every==\"epoch\": self.learn.save(f'{self.name}_{epoch}')\n else: #every=\"improvement\"\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n self.best = current\n self.learn.save(f'{self.name}')\n \nSaveModelCallback.on_epoch_end = _on_epoch_end\nlearn.callbacks.append(SaveModelCallback(learn, monitor='dice', name=f\"seg-densenet121-{sz}-{fold_idx}\"))",
"_____no_output_____"
],
[
"learn.to_fp16()\nlearn.freeze() \nlearn.loss_func = dice_loss\nlearn.metrics = [dice]",
"_____no_output_____"
],
[
"# learn.lr_find()\n# learn.recorder.plot()",
"_____no_output_____"
],
[
"lr = 1e-4\nlearn.fit_one_cycle(10, lr, moms=(0.8,0.7))",
"_____no_output_____"
],
[
"learn.unfreeze()",
"_____no_output_____"
],
[
"lr /= 2\nlearn.fit_one_cycle(10, slice(lr), moms=(0.8,0.7))",
"_____no_output_____"
],
[
"learn.to_fp32();",
"_____no_output_____"
]
],
[
[
"### add test",
"_____no_output_____"
]
],
[
[
"test = ImageList.from_folder(data_path/f'test/images_{sz}', extensions='.jpg')",
"_____no_output_____"
],
[
"learn.data.add_test(test, tfm_y=False)",
"_____no_output_____"
],
[
"# learn.load(f\"seg-resnet34-{sz}\");\n# learn.load(f\"seg-chexpert-ft-resnet34-{sz}\");\nlearn.load(f\"seg-chexpert-ft-resnext34-{sz}-{fold_idx}\");",
"_____no_output_____"
],
[
"learn.validate()",
"_____no_output_____"
]
],
[
[
"### save all learn obj",
"_____no_output_____"
]
],
[
[
"os.makedirs(data_path/\"learn\", exist_ok=True)",
"_____no_output_____"
],
[
"with ModelOnCPU(learn.model) as model:\n try_save({\"data\":learn.data, \"model\":model}, data_path, f\"learn/seg-chexpert-ft-resnext34-{sz}-{fold_idx}\")",
"_____no_output_____"
],
[
"f\"learn/seg-chexpert-ft-resnext34-{sz}-{fold_idx}\"",
"_____no_output_____"
]
],
[
[
"### interpret",
"_____no_output_____"
]
],
[
[
"interp = SegmentationInterpretation.from_learner(learn=learn)",
"_____no_output_____"
],
[
"top_losses, top_idxs = interp.top_losses((sz,sz))",
"_____no_output_____"
],
[
"top_losses.shape, top_idxs.shape",
"_____no_output_____"
],
[
"interp.show_xyz(np.random.choice(top_idxs))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
eca6431d07d27f5fe0d94f8e50af95a75e3be7f6 | 153,511 | ipynb | Jupyter Notebook | notebooks/semisupervised/cifar10/learned-metric/augmented-nothresh-Y/cifar10-aug-16ex-learned-nothresh-Y.ipynb | timsainb/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
] | 124 | 2020-09-27T23:59:01.000Z | 2022-03-22T06:27:35.000Z | notebooks/semisupervised/cifar10/learned-metric/augmented-nothresh-Y/cifar10-aug-16ex-learned-nothresh-Y.ipynb | kiminh/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
] | 2 | 2021-02-05T18:13:13.000Z | 2021-11-01T14:55:08.000Z | notebooks/semisupervised/cifar10/learned-metric/augmented-nothresh-Y/cifar10-aug-16ex-learned-nothresh-Y.ipynb | kiminh/ParametricUMAP_paper | 00b4d676647e45619552aec8f2663c0903a83e3f | [
"MIT"
] | 16 | 2020-09-28T07:43:21.000Z | 2022-03-21T00:31:34.000Z | 167.041349 | 106,528 | 0.898372 | [
[
[
"# reload packages\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"### Choose GPU",
"_____no_output_____"
]
],
[
[
"%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n%env CUDA_VISIBLE_DEVICES=0",
"env: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=0\n"
],
[
"import tensorflow as tf\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif len(gpu_devices)>0:\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\nprint(gpu_devices)\ntf.keras.backend.clear_session()",
"[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]\n"
]
],
[
[
"### Load packages",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.autonotebook import tqdm\nfrom IPython import display\nimport pandas as pd\nimport umap\nimport copy\nimport os, tempfile\nimport tensorflow_addons as tfa\nimport pickle\n",
"/mnt/cube/tsainbur/conda_envs/tpy3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n"
]
],
[
[
"### parameters",
"_____no_output_____"
]
],
[
[
"dataset = \"cifar10\"\nlabels_per_class = 16 # 'full'\nn_latent_dims = 1024\nconfidence_threshold = 0.0 # minimum confidence to include in UMAP graph for learned metric\nlearned_metric = True # whether to use a learned metric, or Euclidean distance between datapoints\n\naugmented = True #\nmin_dist= 0.001 # min_dist parameter for UMAP\nnegative_sample_rate = 5 # how many negative samples per positive sample \nbatch_size = 128 # batch size \noptimizer = tf.keras.optimizers.Adam(1e-3) # the optimizer to train\noptimizer = tfa.optimizers.MovingAverage(optimizer)\nlabel_smoothing = 0.2 # how much label smoothing to apply to categorical crossentropy\nmax_umap_iterations = 500 # how many times, maximum, to recompute UMAP\nmax_epochs_per_graph = 10 # how many epochs maximum each graph trains for (without early stopping)\ngraph_patience = 10 # how many times without improvement to train a new graph\nmin_graph_delta = 0.0025 # minimum improvement on validation acc to consider an improvement for training",
"_____no_output_____"
],
[
"from datetime import datetime\n\ndatestring = datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\")\ndatestring = (\n str(dataset)\n + \"_\"\n + str(confidence_threshold)\n + \"_\"\n + str(labels_per_class)\n + \"____\"\n + datestring \n + '_umap_augmented'\n)\nprint(datestring)",
"cifar10_0.0_16____2020_08_19_00_40_13_037112_umap_augmented\n"
]
],
[
[
"#### Load dataset",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import load_dataset",
"_____no_output_____"
],
[
"(\n X_train,\n X_test,\n X_labeled,\n Y_labeled,\n Y_masked,\n X_valid,\n Y_train,\n Y_test,\n Y_valid,\n Y_valid_one_hot,\n Y_labeled_one_hot,\n num_classes,\n dims\n) = load_dataset(dataset, labels_per_class)",
"_____no_output_____"
]
],
[
[
"### load architecture",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import load_architecture",
"_____no_output_____"
],
[
"encoder, classifier, embedder = load_architecture(dataset, n_latent_dims)",
"_____no_output_____"
]
],
[
[
"### load pretrained weights",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import load_pretrained_weights",
"_____no_output_____"
],
[
"encoder, classifier = load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier)",
"WARNING: Logging before flag parsing goes to stderr.\nW0819 00:40:15.899877 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fc3ecf28> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21fc398c50>).\nW0819 00:40:15.902516 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fc406518> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21fc3b2f28>).\nW0819 00:40:15.931944 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fefcf0b8> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff287be0>).\nW0819 00:40:15.935132 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff287be0> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21ff282630>).\nW0819 00:40:15.939033 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21ff330630> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff330ac8>).\nW0819 00:40:15.941791 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff330ac8> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21ff330eb8>).\nW0819 00:40:15.945672 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21ff453080> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff453390>).\nW0819 00:40:15.948315 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff453390> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21ff453518>).\nW0819 00:40:15.954293 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21ff07e780> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff07ecc0>).\nW0819 00:40:15.956913 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21ff07ecc0> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21ff091080>).\nW0819 00:40:15.960654 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fc5ac828> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc5ace10>).\nW0819 00:40:15.963308 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc5ace10> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21fc5b20f0>).\nW0819 00:40:15.967027 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fc583b70> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc583e48>).\nW0819 00:40:15.969703 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc583e48> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21fc588128>).\nW0819 00:40:15.975593 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow_addons.layers.wrappers.WeightNormalization object at 0x7f21fc4e52b0> and <tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc4e58d0>).\nW0819 00:40:15.978224 139788739721024 base.py:272] Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n\nTwo checkpoint references resolved to different objects (<tensorflow.python.keras.layers.normalization_v2.BatchNormalization object at 0x7f21fc4e58d0> and <tensorflow.python.keras.layers.advanced_activations.LeakyReLU object at 0x7f21fc4e5b00>).\n"
]
],
[
[
"#### compute pretrained accuracy",
"_____no_output_____"
]
],
[
[
"# test current acc\npretrained_predictions = classifier.predict(encoder.predict(X_test, verbose=True), verbose=True)\npretrained_predictions = np.argmax(pretrained_predictions, axis=1)\npretrained_acc = np.mean(pretrained_predictions == Y_test)\nprint('pretrained acc: {}'.format(pretrained_acc))",
"313/313 [==============================] - 2s 7ms/step\n313/313 [==============================] - 1s 2ms/step\npretrained acc: 0.4042\n"
]
],
[
[
"### get a, b parameters for embeddings",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import find_a_b",
"_____no_output_____"
],
[
"a_param, b_param = find_a_b(min_dist=min_dist)",
"_____no_output_____"
]
],
[
[
"### build network",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import build_model",
"_____no_output_____"
],
[
"model = build_model(\n batch_size=batch_size,\n a_param=a_param,\n b_param=b_param,\n dims=dims,\n encoder=encoder,\n classifier=classifier,\n negative_sample_rate=negative_sample_rate,\n optimizer=optimizer,\n label_smoothing=label_smoothing,\n embedder = embedder,\n)",
"_____no_output_____"
]
],
[
[
"### build labeled iterator",
"_____no_output_____"
]
],
[
[
"from tfumap.semisupervised_keras import build_labeled_iterator",
"_____no_output_____"
],
[
"labeled_dataset = build_labeled_iterator(X_labeled, Y_labeled_one_hot, augmented, dims)",
"_____no_output_____"
]
],
[
[
"### training",
"_____no_output_____"
]
],
[
[
"from livelossplot import PlotLossesKerasTF\nfrom tfumap.semisupervised_keras import get_edge_dataset\nfrom tfumap.semisupervised_keras import zip_datasets",
"_____no_output_____"
]
],
[
[
"#### callbacks",
"_____no_output_____"
]
],
[
[
"# plot losses callback\ngroups = {'acccuracy': ['classifier_accuracy', 'val_classifier_accuracy'], 'loss': ['classifier_loss', 'val_classifier_loss']}\nplotlosses = PlotLossesKerasTF(groups=groups)",
"_____no_output_____"
],
[
"history_list = []\ncurrent_validation_acc = 0\nbatches_per_epoch = np.floor(len(X_train)/batch_size).astype(int)\nepochs_since_last_improvement = 0\ncurrent_umap_iterations = 0\ncurrent_epoch = 0",
"_____no_output_____"
],
[
"from tfumap.paths import MODEL_DIR, ensure_dir\nsave_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring\nensure_dir(save_folder / 'test_loss.npy')\n\nfor cui in tqdm(np.arange(current_epoch, max_umap_iterations)):\n \n if len(history_list) > graph_patience+1:\n previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]\n best_of_patience = np.max(previous_history[-graph_patience:])\n best_of_previous = np.max(previous_history[:-graph_patience])\n if (best_of_previous + min_graph_delta) > best_of_patience:\n print('Early stopping')\n break\n \n # make dataset\n edge_dataset = get_edge_dataset(\n model,\n classifier,\n encoder,\n X_train,\n Y_masked,\n batch_size,\n confidence_threshold,\n labeled_dataset,\n dims,\n learned_metric = learned_metric\n )\n \n # zip dataset\n zipped_ds = zip_datasets(labeled_dataset, edge_dataset, batch_size)\n \n # train dataset\n history = model.fit(\n zipped_ds,\n epochs= current_epoch + max_epochs_per_graph,\n initial_epoch = current_epoch,\n validation_data=(\n (X_valid, tf.zeros_like(X_valid), tf.zeros_like(X_valid)),\n {\"classifier\": Y_valid_one_hot},\n ),\n callbacks = [plotlosses],\n max_queue_size = 100,\n steps_per_epoch = batches_per_epoch,\n #verbose=0\n )\n current_epoch+=len(history.history['loss'])\n history_list.append(history)\n \n # save score \n class_pred = classifier.predict(encoder.predict(X_test))\n class_acc = np.mean(np.argmax(class_pred, axis=1) == Y_test)\n np.save(save_folder / 'test_loss.npy', (np.nan, class_acc))\n \n # save weights\n encoder.save_weights((save_folder / \"encoder\").as_posix())\n classifier.save_weights((save_folder / \"classifier\").as_posix())\n \n # save history\n with open(save_folder / 'history.pickle', 'wb') as file_pi:\n pickle.dump([i.history for i in history_list], file_pi)\n\n current_umap_iterations += 1",
"_____no_output_____"
],
[
"if len(history_list) > graph_patience+1:\n previous_history = [np.mean(i.history['val_classifier_accuracy']) for i in history_list]\n best_of_patience = np.max(previous_history[-graph_patience:])\n best_of_previous = np.max(previous_history[:-graph_patience])\n if (best_of_previous + min_graph_delta) > best_of_patience:\n print('Early stopping')",
"Early stopping\n"
],
[
"plt.plot(previous_history)",
"_____no_output_____"
],
[
"(best_of_previous + min_graph_delta) , best_of_patience",
"_____no_output_____"
]
],
[
[
"### save embedding",
"_____no_output_____"
]
],
[
[
"z = encoder.predict(X_train)",
"_____no_output_____"
],
[
"reducer = umap.UMAP(verbose=True)\n\nembedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))\n\nplt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)",
"UMAP(dens_frac=0.0, dens_lambda=0.0, verbose=True)\nConstruct fuzzy simplicial set\nWed Aug 19 10:25:28 2020 Finding Nearest Neighbors\nWed Aug 19 10:25:28 2020 Building RP forest with 15 trees\nWed Aug 19 10:25:28 2020 parallel NN descent for 15 iterations\n\t 0 / 15\n\t 1 / 15\n\t 2 / 15\n\t 3 / 15\nWed Aug 19 10:25:36 2020 Finished Nearest Neighbor Search\nWed Aug 19 10:25:38 2020 Construct embedding\n\tcompleted 0 / 200 epochs\n\tcompleted 20 / 200 epochs\n\tcompleted 40 / 200 epochs\n\tcompleted 60 / 200 epochs\n\tcompleted 80 / 200 epochs\n\tcompleted 100 / 200 epochs\n\tcompleted 120 / 200 epochs\n\tcompleted 140 / 200 epochs\n\tcompleted 160 / 200 epochs\n\tcompleted 180 / 200 epochs\nWed Aug 19 10:26:19 2020 Finished embedding\n"
],
[
"np.save(save_folder / 'train_embedding.npy', embedding)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca64ab7697c9f98946b902bdfcbfb14e5343b5e | 7,288 | ipynb | Jupyter Notebook | 2_Correlations.ipynb | hjelleyman/Antarctica-M.1 | c9d21f76f4ce5acb75def124bdabb67b4ac2cb5e | [
"MIT"
] | null | null | null | 2_Correlations.ipynb | hjelleyman/Antarctica-M.1 | c9d21f76f4ce5acb75def124bdabb67b4ac2cb5e | [
"MIT"
] | null | null | null | 2_Correlations.ipynb | hjelleyman/Antarctica-M.1 | c9d21f76f4ce5acb75def124bdabb67b4ac2cb5e | [
"MIT"
] | null | null | null | 31.964912 | 208 | 0.524973 | [
[
[
"# Correlations\n\nThis notebook is used to generate results for the correlation analysis.",
"_____no_output_____"
]
],
[
[
"# Loading modules.\n# correlations can be accessed with corr.\n# plotting can be accessed with plot.\n\nfrom modules import *\nimport itertools\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"# What data to load\nload_seaice = True\nload_indicies = True\nload_ERA5 = False\n\n# What indicies and variables\nindicies = ['SAM','IPO', 'DMI', 'ENSO']\nvariables = ['t2m']\n\n# Resolutions to save data as.\nresolutions = [1]\nn = 5\n\n# temporal averages\ntemporal_resolution = ['monthly', 'seasonal', 'annual']\ntemporal_resolution = ['seasonal', 'annual']\n\n# temporal_breakdown\ntemporal_decomposition = ['raw', 'anomalous']\n\n# detrending\ndetrend = ['raw', 'detrended']\ndetrend = ['raw']\n\nseaice_source = 'nsidc'\n\nminyear = 1980\nmaxyear = 2019",
"_____no_output_____"
],
[
"# seaice_source = 'ecmwf'\n# for n, temp_res, temp_decomp, dt in itertools.product(resolutions, temporal_resolution, temporal_decomposition, detrend):\n# print(n, temp_res, temp_decomp, dt)\n# correlator = corr.correlator(process_seaice = load_seaice,\n# process_indicies = load_indicies,\n# indicies = indicies,\n# anomlous = temp_decomp == 'anomalous',\n# temporal_resolution = temp_res,\n# spatial_resolution = n,\n# detrend = dt == 'detrended',\n# outputfolder = 'processed_data/correlations/',\n# input_folder = 'processed_data/',\n# seaice_source = seaice_source)\n# print(' Computing correlation for mean SIC')\n# correlator.correlate_mean_sic_indicies()\n# print(' Computing spatial correlations')\n# correlator.correlate_spatial_sic_indicies()\n# print(' Saving to file')\n# correlator.save_data()",
"_____no_output_____"
],
[
"seaice_source = 'nsidc'\nfor n, temp_res, temp_decomp, dt in itertools.product(resolutions, temporal_resolution, temporal_decomposition, detrend):\n print(n, temp_res, temp_decomp, dt)\n correlator = corr.correlator(process_seaice = load_seaice,\n process_indicies = load_indicies,\n indicies = indicies,\n anomlous = temp_decomp == 'anomalous',\n temporal_resolution = temp_res,\n spatial_resolution = n,\n detrend = dt == 'detrended',\n outputfolder = 'processed_data/correlations/',\n input_folder = 'processed_data/',\n seaice_source = seaice_source)\n print(' Computing correlation for mean SIC')\n correlator.correlate_mean_sic_indicies()\n print(' Computing spatial correlations')\n correlator.correlate_spatial_sic_indicies()\n print(' Saving to file')\n correlator.save_data()",
"1 seasonal raw raw\n Computing correlation for mean SIC\n Computing spatial correlations\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
eca6591d8e1d2eca821c00bc3be845280076ea40 | 262,313 | ipynb | Jupyter Notebook | notebooks/sklearn_EOF_decomposition.ipynb | nicolasfauchereau/metocean | 8186299865380da7a03dedadf809faa93fdb1695 | [
"Unlicense"
] | 29 | 2015-02-24T20:35:11.000Z | 2022-03-25T00:32:03.000Z | notebooks/sklearn_EOF_decomposition.ipynb | crocha700/metocean | 8186299865380da7a03dedadf809faa93fdb1695 | [
"Unlicense"
] | 1 | 2017-07-19T02:42:41.000Z | 2017-07-19T02:42:41.000Z | notebooks/sklearn_EOF_decomposition.ipynb | crocha700/metocean | 8186299865380da7a03dedadf809faa93fdb1695 | [
"Unlicense"
] | 18 | 2015-02-24T23:54:35.000Z | 2022-03-25T00:32:08.000Z | 208.350278 | 86,463 | 0.887478 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
eca666f2ba38087268131d1fdbbff927d3f2eff9 | 146,972 | ipynb | Jupyter Notebook | Prot_poly_analysis/C2_bpeg_restrained_11_20_2019.ipynb | UWPRG/Nance_Enzyme_Encap_MD | 52d536f7a2bf8b45195f2bc46f36755c792710fd | [
"MIT"
] | 1 | 2021-03-15T20:52:01.000Z | 2021-03-15T20:52:01.000Z | Prot_poly_analysis/C2_bpeg_restrained_11_20_2019.ipynb | UWPRG/Nance_Enzyme_Encap_MD | 52d536f7a2bf8b45195f2bc46f36755c792710fd | [
"MIT"
] | null | null | null | Prot_poly_analysis/C2_bpeg_restrained_11_20_2019.ipynb | UWPRG/Nance_Enzyme_Encap_MD | 52d536f7a2bf8b45195f2bc46f36755c792710fd | [
"MIT"
] | null | null | null | 29.721335 | 151 | 0.439492 | [
[
[
"#import the necessary modules \n%matplotlib inline \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport pandas as pd \n#import scipy\nimport sklearn\nimport itertools as it\nfrom itertools import cycle \nimport os.path as op\nimport timeit \nimport json\nfrom matplotlib import animation\nimport matplotlib.font_manager as font_manager\nfrom collections import namedtuple\n#from functools import partial\n#from pathlib import Path",
"_____no_output_____"
],
[
"# Set plotting style\nplt.style.use('seaborn-white')\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n#import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"%matplotlib widget",
"_____no_output_____"
],
[
"import multiprocessing as m_proc\nm_proc.cpu_count()",
"_____no_output_____"
]
],
[
[
"### Now use MD Analysis to calculate no. of frames a center PEG residues and terminal PEG residue is with 4 Angstroms of BSA, CONFIGURATION 2",
"_____no_output_____"
],
[
"Import MDAnalysis",
"_____no_output_____"
]
],
[
[
"from prot_polymer_analysis import get_protresd_list, aa_frmcount, grptwocnt_aa, gtwo_trjcnt \nfrom prot_polymer_analysis import frac_cont, bavg_frac_cnt, prot_poly_cntmovie, AA_list_org",
"_____no_output_____"
],
[
"# Import MDAnalysis\nimport MDAnalysis as mda\nimport MDAnalysis.analysis.distances as maa_dist",
"_____no_output_____"
]
],
[
[
"### First table will be total fractional contacts and oligomer occupancy values for each Rg value ",
"_____no_output_____"
],
[
"#### Distance-based analysis \n\nFind residues that have at least one atom within a cutoff $d = 4.0$ Angstrom near water molecules in BSA/water simulation\n\nCalculate the number of surface bsa residues from a 1 ns BSA/water simulation",
"_____no_output_____"
]
],
[
[
"#Units of Angstroms \ndmax = 4.0 ",
"_____no_output_____"
],
[
"def middle_of_band(band_start, band_stop, plot_min=0, plot_max=60):\n half_way = (band_stop - band_start) / 2\n mid_band = band_start + half_way\n plot_fraction = (mid_band - plot_min) / (plot_max - plot_min)\n\n return plot_fraction",
"_____no_output_____"
]
],
[
[
"# 0.9 nm PEG restrained Rg 100 ns trajectory C2",
"_____no_output_____"
],
[
"Load the rg = 1.2 nm (3 PLGA N = 20 oligomer/BSA system) ",
"_____no_output_____"
]
],
[
[
"# Set up the MD Simulation, Make sure you do gmx trjconv -s topol.tpr -f confout.gro -o new_conf.pdb \n# -dump 0 -n bsaplga_nk.ndx to generate \n# a new pdb file that contains unique chain identifiers \nuC2_n20PEG = mda.Universe(\"bsapeg_n20waterT2/C2_pegRes/0.9nmpeg_ResC2/bpeg_0.9nmRes.pdb\", \n \"bsapeg_n20waterT2/C2_pegRes/0.9nmpeg_ResC2/C2_0.9nmbpeg.xtc\")",
"_____no_output_____"
],
[
"uC2_n20PEG",
"_____no_output_____"
]
],
[
[
"Check that we are on the first frame",
"_____no_output_____"
]
],
[
[
"uC2_n20PEG.trajectory.frame",
"_____no_output_____"
],
[
"pn20_lenC2 = len(uC2_n20PEG.trajectory)\npn20_lenC2",
"_____no_output_____"
],
[
"# Select one polymer chain, heavy atoms only \n#all_pn20 = u_pn20.select_atoms(\"(resname sPLG PLG tPLG and segid B) and not type H\")\n\n#Select all the PLGA residues, heavy atoms only\nall_n20PEGC2 = uC2_n20PEG.select_atoms(\"resname sPEG PEG tPEG and not type H\")\n#list(all_n20PEG)",
"_____no_output_____"
],
[
"# Select BSA residues, heavy atoms only \nprot_09nmC2 = uC2_n20PEG.select_atoms(\"protein and not type H\")\nprot_09nmC2",
"_____no_output_____"
]
],
[
[
"Calculate AA frame counts for PLGA residues, 1.2 nm RG restraint, 100ns trajectory ",
"_____no_output_____"
]
],
[
[
"#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)\nstart = 0\nend = pn20_lenC2 - 1\ns_time = timeit.default_timer()\nh2di_09nmC2 = aa_frmcount(prot_09nmC2, all_n20PEGC2, dmax, uC2_n20PEG, start, end)\ntimeit.default_timer() - s_time",
"_____no_output_____"
],
[
"len(h2di_09nmC2.keys())",
"_____no_output_____"
],
[
"pr_res_PEGC2 = list(prot_09nmC2.residues)\nss_res_PEGC2 = [str(row) for row in pr_res_PEGC2]\nrkg09_C2 = {key:h2di_09nmC2[key][1] for key, value in h2di_09nmC2.items()}\nplgC2_09nm_occ = pd.DataFrame(data=ss_res_PEGC2, columns=[\"BSA_des_res\"])\nplgC2_09nm_occ['mda_occ_0.9nmC2'] = plgC2_09nm_occ['BSA_des_res'].map(rkg09_C2)\nplgC2_09nm_occ['mda_occ_0.9nmC2'] = plgC2_09nm_occ['mda_occ_0.9nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_09nm_occ['mda_occ_0.9nmC2'] = plgC2_09nm_occ['mda_occ_0.9nmC2'].round(2)\nplgC2_09nm_occ",
"_____no_output_____"
],
[
"plgC2_09nm_occ['mda_occ_0.9nmC2'][plgC2_09nm_occ['mda_occ_0.9nmC2'] != 0]",
"_____no_output_____"
],
[
"bsa_09C2 = np.array(list(prot_09nmC2.resids)) # shape is 4652\nmC2_occ_09r = np.array(list(plgC2_09nm_occ['mda_occ_0.9nmC2'])) # shape is 583\nmC2_occ = np.zeros(shape=(4653))\natC2_ind = np.where(bsa_09C2[:-1] != bsa_09C2[1:])[0]\natC2_in_nw = np.sort(np.append([0,4653],atC2_ind))\nnw_v = 0\nfor i in range(583):\n b = atC2_in_nw[i+1] +1\n mC2_occ[nw_v:b] = mC2_occ_09r[i]\n nw_v = atC2_in_nw[i+1] + 1 ",
"_____no_output_____"
],
[
"np.nonzero(mC2_occ)",
"_____no_output_____"
]
],
[
[
"### Visualize Occupanct on protein ",
"_____no_output_____"
]
],
[
[
"prot_09nmC2.occupancies = mC2_occ\nprot_09nmC2.occupancies",
"_____no_output_____"
],
[
"with mda.Writer(\"prot_09nmC2pegRes.pdb\") as pdb:\n pdb.write(prot_09nmC2)",
"_____no_output_____"
],
[
"# Frame count and occupancy for each residue\n#h2di",
"_____no_output_____"
],
[
"len(h2di_09nmC2.keys())",
"_____no_output_____"
]
],
[
[
"### Residue Importance: 0.9 nm restrained C2",
"_____no_output_____"
]
],
[
[
"# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number\ntrjmap_09nmPEGC2 = prot_poly_cntmovie(prot_09nmC2, all_n20PEGC2, dmax, uC2_n20PEG, 0, 10000)\n#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)",
"_____no_output_____"
],
[
"np.save('0.9nmPEG_resC2.npy', trjmap_09nmPEGC2) # .npy extension is added if not given",
"_____no_output_____"
],
[
"trjmap_09nmPEGC2 = np.load('0.9nmPEG_resC2.npy', allow_pickle=True)",
"_____no_output_____"
],
[
"trjmap_09nmPEGC2[0].shape",
"_____no_output_____"
],
[
"np.sum(trjmap_09nmPEGC2[1000][0])",
"_____no_output_____"
],
[
"kj = np.zeros(shape=(10000, 583))\nkj[:,582].shape",
"_____no_output_____"
],
[
"ppC2_09nm_ct = np.zeros(shape=(10000, 583))\nfor i in range(10000):\n for j in range(583):\n ppC2_09nm_ct[i][j] = np.sum(trjmap_09nmPEGC2[i][j])",
"_____no_output_____"
],
[
"ppC2_09nmtot = np.zeros(shape=(583))\nfor i in range(583):\n ppC2_09nmtot[i] = np.sum(ppC2_09nm_ct[:,i])\n#pp_12nmtot",
"_____no_output_____"
],
[
"np.nonzero(ppC2_09nmtot)",
"_____no_output_____"
],
[
"y_pos = np.arange(583) + 1\nwid = np.zeros(shape=583)\nwid += 1.5\n#wid\nfig = plt.figure(figsize=(12,12))\nfig.canvas.layout.width = '800px'\nfig.canvas.layout.height = '700px'\nplt.bar(y_pos, ppC2_09nmtot/np.sum(ppC2_09nmtot), align='center', color='#1D77CF',width=wid, alpha=0.4, label='0.9 nm PEG')\nplt.title(\"BSA in water with PEG Restrained Config 2, 100 ns\", fontsize=18)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlim([0,600])\nplt.ylim([0,0.16])\nplt.legend(fontsize=14)\nplt.ylabel(r'Normalized Total No. of PEG contacts', fontsize=15)\nplt.xlabel(r'BSA Residue ID', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total number of residues that are within 4 angstroms of a PEG oligomer residue within a 100 ns trajectory block C2",
"_____no_output_____"
]
],
[
[
"a_a = [\"GLY\",\"ALA\",\"VAL\",\"LEU\",\"ILE\",\"MET\",\"PHE\",\"TRP\",\"PRO\",\"SER\",\"THR\",\"CYS\",\"TYR\",\"ASN\",\"GLN\",\"ASP\"\n ,\"GLU\",\"LYS\",\"ARG\",\"HIS\"]",
"_____no_output_____"
],
[
"# This code chunk gets the BSA residues and their corresponding number in a pandas dataframe \nred_bsa = []\nbh = np.arange(0,584)\nfor i in range(583):\n b_str = str(list(prot_09nmC2.residues[i:i+1]))\n if str(bh[i+1]) in b_str: \n red_bsa.append(str(b_str[10:13])+\" \"+str(bh[i+1]))",
"_____no_output_____"
],
[
"pr_resC2 = list(prot_09nmC2.residues)\nss_resC2 = [str(row) for row in pr_resC2]\nrkg_C2 = {key:h2di_09nmC2[key][0] for key, value in h2di_09nmC2.items()}\nplgC2_09nmaa = pd.DataFrame(data=ss_resC2, columns=[\"BSA_des_res\"])\nplgC2_09nmaa['mda_plga_frm_0.9nmC2'] = plgC2_09nmaa['BSA_des_res'].map(rkg_C2)\nplgC2_09nmaa['BSA_des_res'] = red_bsa\nplgC2_09nmaa['mda_plga_frm_0.9nmC2'] = plgC2_09nmaa['mda_plga_frm_0.9nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_09nmaa.tail()",
"_____no_output_____"
],
[
"# Read in data from the oputput of wrapper.sh, where the frame count is given for each BSA residue that was within \n# 4 angstroms of PLGA trimer \nwat_data = pd.read_csv('occ_BSA1ns.txt', sep=\" \", header=None, usecols=None ,index_col=None)\nwat_data.columns = [\"BSA_res_no\",\"No. of frames (VMD)\"]\nwat_data = wat_data.drop(\"BSA_res_no\", axis=1)\n\npr_res = list(prot_09nmC2.residues)\nss_res = [str(row) for row in pr_res]\n\nwat_data['BSA_des_res'] = ss_res\nwat_data = wat_data[['BSA_des_res',\"No. of frames (VMD)\"]]\n#wat_data.head()\n\n# load MDAnalysis values from MDA_BSA1ns.txt file(129003 atoms SOL group was used to calc. frame counts for txt.\n# file)\nh2ob_dict = json.load(open(\"MDA_BSA1ns.txt\"))\nwat_data['Mda_frames'] = wat_data['BSA_des_res'].map(h2ob_dict)\n\n\n# From MD Analysis\n#Get the count of bsa residues that have 1001 or 1002 frames ( I ran a 1 ns NPT simulation of 1 BSA in water )\n\n#aa_count = pd.DataFrame(data=a_a)\nc_list = []\n\nfor i in range(len(a_a)):\n count = 0\n for index, row in wat_data.iterrows():\n if a_a[i] in row[\"BSA_des_res\"]:\n if row['Mda_frames'] == 1001: \n count += 1\n #c_list.append(str(str(a_a[i])+\" \"+str(row['No. of frames']))) \n elif row['Mda_frames'] == 1000:\n count += 1\n #c_list.append(str(str(a_a[i])+\" \"+str(row['No. of frames'])))\n c_list.append(str(str(a_a[i])+\" \"+str(count)))\n \n#c_list\n\n# From VMD\n#Get the count of bsa residues that have 1001 or 1002 frames ( I ran a 1 ns NPT simulation of 1 BSA in water )\n\n#aa_count = pd.DataFrame(data=a_a)\nvmd_list = []\n\nfor i in range(len(a_a)):\n count = 0\n for index, row in wat_data.iterrows():\n if a_a[i] in row[\"BSA_des_res\"]:\n if row[\"No. of frames (VMD)\"] == 1001: \n count += 1\n #c_list.append(str(str(a_a[i])+\" \"+str(row['No. of frames']))) \n elif row[\"No. of frames (VMD)\"] == 1002:\n count += 1\n #c_list.append(str(str(a_a[i])+\" \"+str(row['No. of frames'])))\n vmd_list.append(str(str(a_a[i])+\" \"+str(count)))\n\n# Main difference is that Alanine 583 is counted for all 1001 frames. It seems VMD is unable to calc dist for that res\n#vmd_list\n\n#hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'GLY', 'PRO','PHE', 'TRP','MET']\n#polar_res = ['ASN', 'CYS', 'GLN', 'SER', 'THR','TYR']\n#neg_res = ['ASP', 'GLU']\n#pos_res = ['ARG', 'HIS', 'LYS']\n# aromatic_res = ['PHE', 'TRP', 'TYR', 'HIS']\n#all_res = [pos_res, neg_res, polar_res, hydrophobic_res]\n\n# Put the AA count in a pandas dataframe \ndg , ji = AA_list_org(c_list)\naa_count = pd.DataFrame(data=dg, index=None, columns=['Amino_acids'])\nnew_lf = pd.Series(data=ji, index=None)\nvmg, vmdj = AA_list_org(vmd_list)\nn2lf = pd.Series(data=vmdj, index=None)\naa_count['No_of_surf_res (MDAnalysis)'] = new_lf\naa_count['No_of_surf_res (VMD)'] = n2lf",
"_____no_output_____"
],
[
"apl_list = []\n\n# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,\n# Put residues that do have contact with BSA in a separate list\nfor index, r_pl in plgC2_09nmaa.iterrows():\n if r_pl['mda_plga_frm_0.9nmC2'] != 0:\n apl_list.append(r_pl['BSA_des_res'])\n \n# This chunk of code gets an AA count from the above list, in order \n# to get a total number of residues that contact BSA\ncpl_l = []\n\nfor index, r_a in aa_count.iterrows():\n count = 0\n for i in range(len(apl_list)):\n if r_a['Amino_acids'] in apl_list[i]:\n count += 1\n cpl_l.append(count) \n \naa_count['pegC2_0.9nm_100ns'] = cpl_l\naa_count",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue\n# within a 100 ns trajectory block\naa_count['pegC2_0.9nm_100ns'].sum()",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a water molecule\n# within a 1 ns trajectory block\naa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
],
[
"# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory\naa_count['pegC2_0.9nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
]
],
[
[
"Calculate mean occupancy and the standard deviation for 1.2 nm trajectory\n\nNumpy mean and std function was used to calculate mean occupancy and std dev using occ values from aa_frmcount output",
"_____no_output_____"
]
],
[
[
"# Mean occupancy and std deviation \nll_mo = [value[1] for key, value in h2di_09nmC2.items()]\nprint(\"Mean Occpancy (1.2 nm Rg): \"+str(np.mean(ll_mo)), \"Occ. std. dev.: \"+str(np.std(ll_mo)))",
"Mean Occpancy (1.2 nm Rg): 0.09867777777777777 Occ. std. dev.: 0.18033977285346545\n"
]
],
[
[
"### Calc. fractional contacts for each AA group type ",
"_____no_output_____"
]
],
[
[
"cd_09nmC2 = frac_cont(h2di_09nmC2)\ncd_09nmC2",
"_____no_output_____"
],
[
"cd = frac_cont(h2di_09nmC2)\nkklh = []\nfor key, value in cd.items():\n kklh.append(value[1])\n# Must substract aromatic residues, since they are already counted\nsum(kklh) - cd['Aromatic'][1]",
"_____no_output_____"
],
[
"no_surf = aa_count['No_of_surf_res (MDAnalysis)'].sum()\nno_surf",
"_____no_output_____"
],
[
"fcntC2_rg09nm, prgrp_09nmC2, aa_matx09nmC2 = bavg_frac_cnt(5, prot_09nmC2, all_n20PEGC2, dmax, uC2_n20PEG, no_surf, 0, 10000)",
"0\n2000\n2000\n4000\n4000\n6000\n6000\n8000\n8000\n10000\n"
],
[
"fcntC2_rg09nm",
"_____no_output_____"
],
[
"prgrp_09nmC2",
"_____no_output_____"
],
[
"fc_09nmC2_mean = np.array([np.mean(fcntC2_rg09nm['Negative']), np.mean(fcntC2_rg09nm['Positive'])\n ,np.mean(fcntC2_rg09nm['Polar']),np.mean(fcntC2_rg09nm['Hydrophobic'])\n , np.mean(fcntC2_rg09nm['Aromatic'])])\nfc_09nmC2_mean",
"_____no_output_____"
],
[
"fc_09nmC2_std = np.array([np.std(fcntC2_rg09nm['Negative']), np.std(fcntC2_rg09nm['Positive'])\n ,np.std(fcntC2_rg09nm['Polar']),np.std(fcntC2_rg09nm['Hydrophobic'])\n , np.std(fcntC2_rg09nm['Aromatic'])])\nfc_09nmC2_std",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '500px'\nfig.canvas.layout.height = '400px'\nplt.bar(x_pos, fc_09nmC2_mean, yerr=fc_09nmC2_std, ecolor='black',capsize=5, color='c')\nplt.title(r'Fractional Contacts 0.9 nm Rg restrained Config 2', fontsize=15)\nplt.xticks(x_pos, labels=aa_types, fontsize=12)\nplt.ylabel(r'Fractional Contacts', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total fraction of contacts: averages and std dev calc from 5 20ns blocks",
"_____no_output_____"
]
],
[
[
"# Average of total fraction of contacts\nnp.mean(fcntC2_rg09nm['total_frac'])",
"_____no_output_____"
],
[
"# Std Deviation of total fraction of contacts\nnp.std(fcntC2_rg09nm['total_frac'])",
"_____no_output_____"
]
],
[
[
"### Avg no. PEG residues per BSA AA residue group ",
"_____no_output_____"
]
],
[
[
"prgrp_09nmC2",
"_____no_output_____"
],
[
"mean_09nmC2 = np.zeros(shape=5)\nstd_09nmC2 = np.zeros(shape=5)\ncount = 0\nfor key, value in prgrp_09nmC2.items():\n mpl_09nmC2 = []\n var_09nmC2 = []\n for i in prgrp_09nmC2[str(key)].flat:\n mpl_09nmC2.append(i[0])\n var_09nmC2.append((i[1])**2)\n \n # calc frac cont averages\n mean_09nmC2[count] = np.mean(mpl_09nmC2)\n \n # calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation \n std_09nmC2[count] = np.std(mpl_09nmC2)\n # std_12nm[count] = np.sqrt(np.sum(var_12nm)/5)\n \n count += 1\n",
"_____no_output_____"
],
[
"mean_09nmC2",
"_____no_output_____"
],
[
"std_09nmC2",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '500px'\nfig.canvas.layout.height = '400px'\nplt.bar(x_pos, mean_09nmC2, yerr=std_09nmC2, ecolor='black',capsize=5)\nplt.title(r'No. of PEG residues 0.9 nm Rg restrained Config 2', fontsize=15)\nplt.xticks(x_pos, labels=aa_types, fontsize=12)\nplt.ylabel(r'No. of PLGA residues', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Protein/polymer contact map movie",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(10,10))\n\n# Set the axis and the plot titles pp\n\nplt.title(\"BSA/PEG contact map 0.9 nm res, Config 2\", fontsize=22, loc='left')\nplt.xlabel(\"PLGA Residue No.\", fontsize=22)\nplt.ylabel(\"BSA Residue No.\", fontsize=20)\n\n # Set the axis range \nplt.ylim(583, 0)\nplt.xlim(0, 60)\n\n# Plot bands for each chain \nBANDS = (\n (0, 20, \"purple\", \"B\"),\n (20, 40, \"blue\", \"C\"),\n (40, 60, \"green\", \"D\"),\n)\n \ntext_y = 0.98 # Close to the top\nfor start, stop, color, band in BANDS:\n plt.axvspan(start, stop,color=color, alpha=0.15)\n text_x = middle_of_band(start,stop)\n plt.text(\n text_x,\n text_y,\n \"PLGA chain \" + band,\n color=color,\n fontsize=18,\n transform=fig.gca().transAxes,\n horizontalalignment='center',\n verticalalignment='center',\n style='italic',\n )\n \nplt.text(0.93, 1, \"Time [ns]:\", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n\n# Set tick label size\nfig.gca().tick_params(axis='both', which='major', labelsize=20)\n\nims = []\nfor i in range(10000):\n data = trj_ppmap_12nm[i]\n im = plt.imshow(data, aspect='auto', cmap='Greys')\n t_sim = plt.text(1, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n ims.append([im, t_sim])\n \nani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)\nani.save('1.2nm_res.mp4',writer='ffmpeg', fps=50, bitrate=100000)\n#plt.tight_layout()\n#plt.show()",
"_____no_output_____"
]
],
[
[
"# 1.1 nm PEG restrained Rg 100 ns trajectory C2",
"_____no_output_____"
],
[
"Load the rg = 1.5 nm (3 PLGA N = 20 oligomer/BSA system) ",
"_____no_output_____"
]
],
[
[
"# Set up the MD Simulation\nu11nmC2_n20PEG = mda.Universe(\"bsapeg_n20waterT2/C2_pegRes/1.1nmpeg_ResC2/bpeg_1.1nmRes.pdb\",\n \"bsapeg_n20waterT2/C2_pegRes/1.1nmpeg_ResC2/C2_1.1nmbpeg.xtc\")",
"_____no_output_____"
],
[
"u11nmC2_n20PEG",
"_____no_output_____"
],
[
"pn20_11nmC2 = len(u11nmC2_n20PEG.trajectory)\npn20_11nmC2",
"_____no_output_____"
],
[
"#Select all the PLGA residues, heavy atoms only \nn20PEG_11nmC2 = u11nmC2_n20PEG.select_atoms(\"resname sPEG PEG tPEG and not type H\")\nn20PEG_11nmC2",
"_____no_output_____"
],
[
"# Select BSA residues, heavy atoms only \nprotC2_11nmPEG = u11nmC2_n20PEG.select_atoms(\"protein and not type H\")\nprotC2_11nmPEG",
"_____no_output_____"
]
],
[
[
"### Contact Analysis",
"_____no_output_____"
]
],
[
[
"#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)\nstart = 0\nend = pn20_11nmC2 - 1\ns_time = timeit.default_timer()\nh2di_11nmC2 = aa_frmcount(protC2_11nmPEG, n20PEG_11nmC2, dmax, u11nmC2_n20PEG, start, end)\ntimeit.default_timer() - s_time\n#h2di_11nm",
"_____no_output_____"
],
[
"len(h2di_11nmC2.keys())",
"_____no_output_____"
],
[
"pr_res_PEGC2 = list(protC2_11nmPEG.residues)\nss_res_PEGC2 = [str(row) for row in pr_res_PEGC2]\nrkg_11C2 = {key:h2di_11nmC2[key][1] for key, value in h2di_11nmC2.items()}\nplgC2_09nm_occ['mda_occ_1.1nmC2'] = plgC2_09nm_occ['BSA_des_res'].map(rkg_11C2)\nplgC2_09nm_occ['mda_occ_1.1nmC2'] = plgC2_09nm_occ['mda_occ_1.1nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_09nm_occ['mda_occ_1.1nmC2'] = plgC2_09nm_occ['mda_occ_1.1nmC2'].round(2)\nplgC2_09nm_occ",
"_____no_output_____"
],
[
"pr_res11nmC2 = list(protC2_11nmPEG.residues)\nss_res11nmC2 = [str(row) for row in pr_res11nmC2]\nrkg_11nmC2 = {key:h2di_11nmC2[key][0] for key, value in h2di_11nmC2.items()}\nplgC2_1_1nmaa = pd.DataFrame(data=ss_res11nmC2, columns=[\"BSA_des_res\"])\nplgC2_1_1nmaa['mda_plga_frm_1.1nmC2'] = plgC2_1_1nmaa['BSA_des_res'].map(rkg_11nmC2)\nplgC2_1_1nmaa['BSA_des_res'] = red_bsa\nplgC2_1_1nmaa['mda_plga_frm_1.1nmC2'] = plgC2_1_1nmaa['mda_plga_frm_1.1nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_1_1nmaa.head()",
"_____no_output_____"
],
[
"bsa_11mnC2 = np.array(list(protC2_11nmPEG.resids)) # shape is 4652\nm_occ_11nmC2 = np.array(list(plgC2_09nm_occ['mda_occ_1.1nmC2'])) # shape is 583\nm_occ_11NewC2 = np.zeros(shape=(4653))\nat_ind = np.where(bsa_11mnC2[:-1] != bsa_11mnC2[1:])[0]\nat_in_nw = np.sort(np.append([0,4653],at_ind))\nnw_v = 0\nfor i in range(583):\n b = at_in_nw[i+1] +1\n m_occ_11NewC2[nw_v:b] = m_occ_11nmC2[i]\n nw_v = at_in_nw[i+1] + 1 ",
"_____no_output_____"
],
[
"np.nonzero(m_occ_11NewC2)",
"_____no_output_____"
]
],
[
[
"### Visualize Occupanct on protein ",
"_____no_output_____"
]
],
[
[
"protC2_11nmPEG.occupancies = m_occ_11NewC2\nprotC2_11nmPEG.occupancies[0:33]",
"_____no_output_____"
],
[
"with mda.Writer(\"prot_11nmC2pegRes.pdb\") as pdb:\n pdb.write(protC2_11nmPEG)",
"_____no_output_____"
]
],
[
[
"### Residue Importance: 1.1 nm restrained ",
"_____no_output_____"
]
],
[
[
"# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number\ntrjmap_11nmPEGC2 = prot_poly_cntmovie(protC2_11nmPEG, n20PEG_11nmC2, dmax, u11nmC2_n20PEG, 0, 10000)\n#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)",
"_____no_output_____"
],
[
"#trjmap_11nmPEGC2 = np.load('1.1nm_PEGres.npy', allow_pickle=True)",
"_____no_output_____"
],
[
"np.save('1.1nmPEG_resC2.npy', trjmap_11nmPEGC2) # .npy extension is added if not given",
"_____no_output_____"
],
[
"trjmap_11nmPEGC2[0].shape",
"_____no_output_____"
],
[
"np.sum(trjmap_11nmPEGC2[1000][0])",
"_____no_output_____"
],
[
"kj = np.zeros(shape=(10000, 583))\nkj[:,582].shape",
"_____no_output_____"
],
[
"ppC2_11nm_ct = np.zeros(shape=(10000, 583))\nfor i in range(10000):\n for j in range(583):\n ppC2_11nm_ct[i][j] = np.sum(trjmap_11nmPEGC2[i][j])",
"_____no_output_____"
],
[
"ppC2_11nmtot = np.zeros(shape=(583))\nfor i in range(583):\n ppC2_11nmtot[i] = np.sum(ppC2_11nm_ct[:,i])\n#pp_12nmtot",
"_____no_output_____"
],
[
"np.nonzero(ppC2_11nmtot)",
"_____no_output_____"
],
[
"y_pos = np.arange(583) + 1\nwid = np.zeros(shape=583)\nwid += 1.5\n#wid\nfig = plt.figure(figsize=(12,12))\nfig.canvas.layout.width = '800px'\nfig.canvas.layout.height = '700px'\nplt.bar(y_pos, ppC2_11nmtot/np.sum(ppC2_11nmtot), align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')\n#plt.bar(y_pos, pp_09nmtot/np.sum(pp_09nmtot), align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')\nplt.title(\"BSA in water with PEG Restrained Config 2, 100 ns\", fontsize=18)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlim([0,600])\nplt.ylim([0,0.2])\nplt.legend(fontsize=14)\nplt.ylabel(r'Normalized Total No. of PEG contacts', fontsize=15)\nplt.xlabel(r'BSA Residue ID', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total number of residues that are within 4 angstroms of a PEG oligomer residue within a 100 ns trajectory block C2",
"_____no_output_____"
]
],
[
[
"apl_11nmC2 = []\n\n# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,\n# Put residues that do have contact with BSA in a separate list\nfor index, r_pl in plgC2_1_1nmaa.iterrows():\n if r_pl['mda_plga_frm_1.1nmC2'] != 0:\n apl_11nmC2.append(r_pl['BSA_des_res'])\n \n# This chunk of code gets an AA count from the above list, in order \n# to get a total number of residues that contact BSA\ncpl_11nmC2 = []\n\nfor index, r_a in aa_count.iterrows():\n count = 0\n for i in range(len(apl_11nmC2)):\n if r_a['Amino_acids'] in apl_11nmC2[i]:\n count += 1\n cpl_11nmC2.append(count) \n \naa_count['pegC2_1.1nm_100ns'] = cpl_11nmC2\n#aa_count.drop('No_of_surf_res (VMD)', axis=1, inplace=True)\naa_count",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue\n# within a 100 ns trajectory block\naa_count['pegC2_1.1nm_100ns'].sum()",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a water molecule\n# within a 1 ns trajectory block\naa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
],
[
"# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory\naa_count['pegC2_1.1nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
],
[
"# Mean occupancy and std deviation \nll_mo11 = [value[1] for key, value in h2di_11nmC2.items()]\nprint(\"Mean Occpancy (1.1 nm Rg): \"+str(np.mean(ll_mo11)), \"Occ. std. dev.: \"+str(np.std(ll_mo11)))",
"Mean Occpancy (1.1 nm Rg): 0.06964912280701753 Occ. std. dev.: 0.15552870229019672\n"
],
[
"cd_11nmC2 = frac_cont(h2di_11nmC2)\ncd_11nmC2",
"_____no_output_____"
]
],
[
[
"### Calc. fractional contacts for each AA group type C2",
"_____no_output_____"
]
],
[
[
"fcntC2_rg11nm, prgrp_11nmC2, aa_matx_11nmC2 = bavg_frac_cnt(5, protC2_11nmPEG, n20PEG_11nmC2, dmax \n ,u11nmC2_n20PEG, no_surf, 0, 10000)",
"0\n2000\n2000\n4000\n4000\n6000\n6000\n8000\n8000\n10000\n"
],
[
"fcntC2_rg11nm",
"_____no_output_____"
],
[
"fc_11nmC2_mean = np.array([np.mean(fcntC2_rg11nm['Negative']), np.mean(fcntC2_rg11nm['Positive'])\n ,np.mean(fcntC2_rg11nm['Polar']),np.mean(fcntC2_rg11nm['Hydrophobic'])\n , np.mean(fcntC2_rg11nm['Aromatic'])])\nfc_11nmC2_mean",
"_____no_output_____"
],
[
"fc_11nmC2_std = np.array([np.std(fcntC2_rg11nm['Negative']), np.std(fcntC2_rg11nm['Positive'])\n ,np.std(fcntC2_rg11nm['Polar']),np.std(fcntC2_rg11nm['Hydrophobic'])\n , np.std(fcntC2_rg11nm['Aromatic'])])\nfc_11nmC2_std",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\nwidth = 0.35\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '500px'\nfig.canvas.layout.height = '400px'\nplt.bar(x_pos, fc_09nmC2_mean, width, yerr=fc_09nmC2_std, ecolor='black',capsize=5, color='royalblue')\nplt.bar(x_pos+width, fc_11nmC2_mean, width, yerr=fc_11nmC2_std, ecolor='black',capsize=5, color='c')\nplt.title(r'Fractional Contacts Rg restrained Config 2', fontsize=15)\nplt.xticks(x_pos+width/2, labels=aa_types, fontsize=12)\nplt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm'], frameon=False)\nplt.ylabel(r'Fractional Contacts', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total fraction of contacts: averages and std dev calc from 5 20 ns blocks",
"_____no_output_____"
]
],
[
[
"np.mean(fcntC2_rg11nm['total_frac'])",
"_____no_output_____"
],
[
"np.std(fcntC2_rg11nm['total_frac'])",
"_____no_output_____"
]
],
[
[
"### Avg no. PEG residues per BSA AA residue group ",
"_____no_output_____"
]
],
[
[
"prgrp_11nmC2",
"_____no_output_____"
],
[
"mean_11nmC2 = np.zeros(shape=5)\nstd_11nmC2 = np.zeros(shape=5)\ncount = 0\nfor key, value in prgrp_11nmC2.items():\n mpl_11nmC2 = []\n var_11nmC2 = []\n for i in prgrp_11nmC2[str(key)].flat:\n mpl_11nmC2.append(i[0])\n var_11nmC2.append((i[1])**2)\n \n # calc frac cont averages\n mean_11nmC2[count] = np.mean(mpl_11nmC2)\n \n # calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation \n std_11nmC2[count] = np.std(mpl_11nmC2)\n #std_15nm[count] = np.sqrt(np.sum(var_15nm)/5)\n \n count += 1\n",
"_____no_output_____"
],
[
"mean_11nmC2",
"_____no_output_____"
],
[
"std_11nmC2",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\nwidth = 0.35\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '500px'\nfig.canvas.layout.height = '400px'\nplt.bar(x_pos, mean_09nmC2, width, yerr=std_09nmC2, ecolor='black',capsize=5, color='royalblue')\nplt.bar(x_pos+width, mean_11nmC2, width, yerr=std_11nmC2, ecolor='black',capsize=5, color='c')\nplt.title(r'No. of PEG residues, Rg restrained, Config. 2', fontsize=15)\nplt.xticks(x_pos+width/2, labels=aa_types, fontsize=12)\nplt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm'], frameon=False)\nplt.ylabel(r'No. of PEG residues', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Protein/polymer contact map movie",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(10,10))\n\n# Set the axis and the plot titles pp\n\nplt.title(\"BSA/PLGA contact map 1.5 nm res.\", fontsize=22, loc='left')\nplt.xlabel(\"PLGA Residue No.\", fontsize=22)\nplt.ylabel(\"BSA Residue No.\", fontsize=20)\n\n # Set the axis range \nplt.ylim(583, 0)\nplt.xlim(0, 60)\n\n# Plot bands for each chain \nBANDS = (\n (0, 20, \"purple\", \"B\"),\n (20, 40, \"blue\", \"C\"),\n (40, 60, \"green\", \"D\"),\n)\n \ntext_y = 0.98 # Close to the top\nfor start, stop, color, band in BANDS:\n plt.axvspan(start, stop,color=color, alpha=0.15)\n text_x = middle_of_band(start,stop)\n plt.text(\n text_x,\n text_y,\n \"PLGA chain \" + band,\n color=color,\n fontsize=18,\n transform=fig.gca().transAxes,\n horizontalalignment='center',\n verticalalignment='center',\n style='italic',\n )\n \nplt.text(0.94, 1, \"Time [ns]:\", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n\n# Set tick label size\nfig.gca().tick_params(axis='both', which='major', labelsize=20)\n\nims = []\nfor i in range(10000):\n data = trj_ppmap_15nm[i]\n im = plt.imshow(data, aspect='auto', cmap='Greys')\n t_sim = plt.text(1.03, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n ims.append([im, t_sim])\n \nani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)\nani.save('1.5nm_res.mp4', writer='ffmpeg', fps=50, bitrate=100000)\n#plt.tight_layout()\n#plt.show()",
"_____no_output_____"
]
],
[
[
"# 1.4 nm restrained Rg PEG 100 ns trajectory C2",
"_____no_output_____"
],
[
"### Contact Analyis",
"_____no_output_____"
]
],
[
[
"# Set up the MD Simulation\nu14nmC2_n20PEG = mda.Universe(\"bsapeg_n20waterT2/C2_pegRes/1.4nmpeg_ResC2/bpeg_1.4nmRes.pdb\"\n , \"bsapeg_n20waterT2/C2_pegRes/1.4nmpeg_ResC2/C2Res_1.4nmbpeg.xtc\")",
"_____no_output_____"
],
[
"u14nmC2_n20PEG",
"_____no_output_____"
],
[
"pn20_len14nmC2 = len(u14nmC2_n20PEG.trajectory)\npn20_len14nmC2",
"_____no_output_____"
],
[
"#Select all the PLGA residues, heavy atoms only \npn20_14nmC2 = u14nmC2_n20PEG.select_atoms(\"resname sPEG PEG tPEG and not type H\")\npn20_14nmC2",
"_____no_output_____"
],
[
"# Select BSA residues, heavy atoms only \nprot_14nmC2 = u14nmC2_n20PEG.select_atoms(\"protein and not type H\")\nprot_14nmC2",
"_____no_output_____"
],
[
"#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)\nstart = 0\nend = pn20_len14nmC2 - 1\ns_time = timeit.default_timer()\nh2di_14nmC2 = aa_frmcount(prot_14nmC2, pn20_14nmC2, dmax, u14nmC2_n20PEG, start, end)\ntimeit.default_timer() - s_time\n#h2di_14nm",
"_____no_output_____"
],
[
"#h2di_14nmC2",
"_____no_output_____"
],
[
"len(h2di_14nmC2.keys())",
"_____no_output_____"
],
[
"pr_res14nmC2 = list(prot_14nmC2.residues)\nss_res14nmC2 = [str(row) for row in pr_res14nmC2]\nrkg_14nmC2 = {key:h2di_14nmC2[key][0] for key, value in h2di_14nmC2.items()}\nplgC2_14nmaa = pd.DataFrame(data=ss_res14nmC2, columns=[\"BSA_des_res\"])\nplgC2_14nmaa['mda_plga_frm_1.4nmC2'] = plgC2_14nmaa['BSA_des_res'].map(rkg_14nmC2)\nplgC2_14nmaa['BSA_des_res'] = red_bsa\nplgC2_14nmaa['mda_plga_frm_1.4nmC2'] = plgC2_14nmaa['mda_plga_frm_1.4nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_14nmaa.head()",
"_____no_output_____"
],
[
"# Extract mean occupancy values\npr_res_14ur = list(prot_14nmC2.residues)\nss_res_14ur = [str(row) for row in pr_res_14ur]\nrkg_14ur = {key:h2di_14nmC2[key][1] for key, value in h2di_14nmC2.items()}\nplgC2_09nm_occ['mda_occ_1.4nmC2'] = plgC2_09nm_occ['BSA_des_res'].map(rkg_14ur)\nplgC2_09nm_occ['mda_occ_1.4nmC2'] = plgC2_09nm_occ['mda_occ_1.4nmC2'].replace('nan', np.nan).fillna(0)\nplgC2_09nm_occ['mda_occ_1.4nmC2'] = plgC2_09nm_occ['mda_occ_1.4nmC2'].round(2)\nplgC2_09nm_occ",
"_____no_output_____"
],
[
"bsa_14nmC2 = np.array(list(prot_14nmC2.resids)) # shape is 4652\nm_occ_14nmC2 = np.array(list(plgC2_09nm_occ['mda_occ_1.4nmC2'])) # shape is 583\nm_occ_14NewC2 = np.zeros(shape=(4653))\natC2_ind = np.where(bsa_14nmC2[:-1] != bsa_14nmC2[1:])[0]\natC2_in_nw = np.sort(np.append([0,4653],atC2_ind))\nnw_v = 0\nfor i in range(583):\n b = atC2_in_nw[i+1] +1\n m_occ_14NewC2[nw_v:b] = m_occ_14nmC2[i]\n nw_v = atC2_in_nw[i+1] + 1 ",
"_____no_output_____"
],
[
"np.nonzero(m_occ_14NewC2)",
"_____no_output_____"
]
],
[
[
"### Visualize Occupanct on protein C2",
"_____no_output_____"
]
],
[
[
"prot_14nmC2.occupancies = m_occ_14NewC2\nprot_14nmC2.occupancies",
"_____no_output_____"
],
[
"with mda.Writer(\"prot_14nmC2pegRes.pdb\") as pdb:\n pdb.write(prot_14nmC2)",
"_____no_output_____"
]
],
[
[
"### Residue Importance: 1.4 nm restrained ",
"_____no_output_____"
]
],
[
[
"# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number\ntrjmap_14nmPEGC2 = prot_poly_cntmovie(prot_14nmC2, pn20_14nmC2, dmax, u14nmC2_n20PEG, 0, 10000)\n#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)",
"_____no_output_____"
],
[
"np.save('1.4nmPEG_resC2.npy', trjmap_14nmPEGC2) # .npy extension is added if not given",
"_____no_output_____"
],
[
"trjmap_14nmPEGC2 = np.load('1.4nmPEG_resC2.npy', allow_pickle=True)\ntrjmap_14nmPEGC2",
"_____no_output_____"
],
[
"trjmap_14nmPEGC2[0].shape",
"_____no_output_____"
],
[
"np.sum(trjmap_14nmPEGC2[1000][0])",
"_____no_output_____"
],
[
"kj = np.zeros(shape=(10000, 583))\nkj[:,582].shape",
"_____no_output_____"
],
[
"ppC2_14nm_ct = np.zeros(shape=(10000, 583))\nfor i in range(10000):\n for j in range(583):\n ppC2_14nm_ct[i][j] = np.sum(trjmap_14nmPEGC2[i][j])",
"_____no_output_____"
],
[
"ppC2_14nmtot = np.zeros(shape=(583))\nfor i in range(583):\n ppC2_14nmtot[i] = np.sum(ppC2_14nm_ct[:,i])\n#pp_12nmtot",
"_____no_output_____"
],
[
"np.nonzero(ppC2_14nmtot)",
"_____no_output_____"
],
[
"a_peg = np.sum(ppC2_14nmtot)",
"_____no_output_____"
],
[
"b_peg = np.sum(ppC2_11nmtot)",
"_____no_output_____"
],
[
"c_peg = np.sum(ppC2_09nmtot)",
"_____no_output_____"
],
[
"plt.close('all')",
"_____no_output_____"
],
[
"y_pos = np.arange(583) + 1\nwid = np.zeros(shape=583)\nwid += 1.5\n#wid\nfig = plt.figure(figsize=(12,12))\nfig.canvas.layout.width = '800px'\nfig.canvas.layout.height = '700px'\n#plt.bar(y_pos, ppC2_09nmtot/c_peg, align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')\n#plt.bar(y_pos+0.25, ppC2_11nmtot/b_peg, align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')\nplt.bar(y_pos+0.3, ppC2_14nmtot/a_peg, align='center',width=wid, color='#4E4C4D', alpha=0.3, label='1.4 nm PEG')\n#plt.bar(y_pos+0.25, pp_11nmtot/b_peg, align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')\n#plt.bar(y_pos, pp_09nmtot/c_peg, align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')\nplt.title(\"BSA in water with PEG restrained, Config 2, 100 ns\", fontsize=18)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.xlim([0,600])\nplt.ylim([0,0.2])\nplt.legend(fontsize=14)\nplt.ylabel(r'Normalized Total No. of PEG contacts', fontsize=15)\nplt.xlabel(r'BSA Residue ID', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total number of residues that are within 4 angstroms of a PEG oligomer residue within a 100 ns trajectory block C2",
"_____no_output_____"
]
],
[
[
"apl_14nmC2 = []\n\n# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,\n# Put residues that do have contact with BSA in a separate list\nfor index, r_pl in plgC2_14nmaa.iterrows():\n if r_pl['mda_plga_frm_1.4nmC2'] != 0:\n apl_14nmC2.append(r_pl['BSA_des_res'])\n \n# This chunk of code gets an AA count from the above list, in order \n# to get a total number of residues that contact BSA\ncpl_14nmC2 = []\n\nfor index, r_a in aa_count.iterrows():\n count = 0\n for i in range(len(apl_14nmC2)):\n if r_a['Amino_acids'] in apl_14nmC2[i]:\n count += 1\n cpl_14nmC2.append(count) \n \naa_count['pegC2_1.4nm_100ns'] = cpl_14nmC2\n#aa_count.drop('No_of_surf_res (VMD)', axis=1, inplace=True)\naa_count",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue\n# within a 100 ns trajectory block\naa_count['pegC2_1.4nm_100ns'].sum()",
"_____no_output_____"
],
[
"# This gives the total number of residues that are within 4 angstroms of a water molecule\n# within a 1 ns trajectory block\naa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
],
[
"# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory\naa_count['pegC2_1.4nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()",
"_____no_output_____"
],
[
"# Mean occupancy and std deviation \nll_mo14nm = [value[1] for key, value in h2di_14nmC2.items()]\nprint(\"Mean Occpancy (1.4 nm Rg): \"+str(np.mean(ll_mo14nm)), \"Occ. std. dev.: \"+str(np.std(ll_mo14nm)))",
"Mean Occpancy (1.4 nm Rg): 0.02847704918032787 Occ. std. dev.: 0.049686123482341624\n"
]
],
[
[
"### Calc. fractional contacts for each AA group type ",
"_____no_output_____"
]
],
[
[
"fcntC2_rg14nm, prgrp_14nmC2, aa_matx_14nmC2 = bavg_frac_cnt(5, prot_14nmC2, pn20_14nmC2, dmax, u14nmC2_n20PEG, no_surf, 0, 10000)",
"0\n2000\n2000\n4000\n4000\n6000\n6000\n8000\n8000\n10000\n"
],
[
"fcntC2_rg14nm",
"_____no_output_____"
],
[
"fc_14nmC2_mean = np.array([np.mean(fcntC2_rg14nm['Negative']), np.mean(fcntC2_rg14nm['Positive'])\n ,np.mean(fcntC2_rg14nm['Polar']),np.mean(fcntC2_rg14nm['Hydrophobic'])\n , np.mean(fcntC2_rg14nm['Aromatic'])])\nfc_14nmC2_mean",
"_____no_output_____"
],
[
"fc_14nmC2_std = np.array([np.std(fcntC2_rg14nm['Negative']), np.std(fcntC2_rg14nm['Positive'])\n ,np.std(fcntC2_rg14nm['Polar']),np.std(fcntC2_rg14nm['Hydrophobic'])\n , np.std(fcntC2_rg14nm['Aromatic'])])\nfc_14nmC2_std",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\nwidth = 0.28\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '700px'\nfig.canvas.layout.height = '700px'\nplt.bar(x_pos, fc_09nmC2_mean, width, yerr=fc_09nmC2_std, ecolor='black',capsize=5, color='royalblue')\nplt.bar(x_pos+width, fc_11nmC2_mean, width, yerr=fc_11nmC2_std, ecolor='black',capsize=5, color='c')\nplt.bar(x_pos+(2*width), fc_14nmC2_mean, width, yerr=fc_14nmC2_std, ecolor='black',capsize=5, color='lightslategray')\nplt.title(r'Fractional Contacts Rg restrained Config 2', fontsize=15)\nplt.xticks(x_pos+width, labels=aa_types, fontsize=12)\nplt.ylim(0,0.4)\nplt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm', 'Rg = 1.4 nm'], frameon=False)\nplt.ylabel(r'Fractional Contacts', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Total fraction of contacts: averages and std dev calc from 5 20 ns blocks",
"_____no_output_____"
]
],
[
[
"np.mean(fcntC2_rg14nm['total_frac'])",
"_____no_output_____"
],
[
"np.std(fcntC2_rg14nm['total_frac'])",
"_____no_output_____"
]
],
[
[
"### Avg no. PEG residues per BSA AA residue group ",
"_____no_output_____"
]
],
[
[
"prgrp_14nmC2",
"_____no_output_____"
],
[
"mean_14nmC2 = np.zeros(shape=5)\nstd_14nmC2 = np.zeros(shape=5)\ncount = 0\nfor key, value in prgrp_14nmC2.items():\n mpl_14nmC2 = []\n var_14nmC2 = []\n for i in prgrp_14nmC2[str(key)].flat:\n mpl_14nmC2.append(i[0])\n var_14nmC2.append((i[1])**2)\n \n # calc frac cont averages\n mean_14nmC2[count] = np.mean(mpl_14nmC2)\n \n # calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation \n std_14nmC2[count] = np.std(mpl_14nmC2)\n #std_2nm[count] = np.sqrt(np.sum(var_2nm)/5)\n \n count += 1\n",
"_____no_output_____"
],
[
"mean_14nmC2",
"_____no_output_____"
],
[
"std_14nmC2",
"_____no_output_____"
],
[
"x_pos = np.arange(5)\nwidth = 0.28\naa_types = [\"Negative\", \"Positive\", \"Polar\", \"Hydrophobic\", \"Aromatic\"]\nfig = plt.figure(figsize=(7,7))\nfig.canvas.layout.width = '500px'\nfig.canvas.layout.height = '400px'\nplt.bar(x_pos, mean_09nmC2, width, yerr=std_09nmC2, ecolor='black',capsize=5, color='royalblue')\nplt.bar(x_pos+width, mean_11nmC2, width, yerr=std_11nmC2, ecolor='black',capsize=5, color='c')\nplt.bar(x_pos+(2*width), mean_14nmC2, width, yerr=std_14nmC2, ecolor='black',capsize=5, color='lightslategray')\nplt.title(r'No. of PEG residues, Rg restrained, Config 2', fontsize=15)\nplt.xticks(x_pos+width, labels=aa_types, fontsize=12)\nplt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm', 'Rg = 1.4 nm'], frameon=False)\nplt.ylabel(r'No. of PEG residues', fontsize=15)",
"_____no_output_____"
]
],
[
[
"### Protein/polymer contact map movie",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(10,10))\n\n# Set the axis and the plot titles pp\n\nplt.title(\"BSA/PEG contact map 1.4 nm restrained\", fontsize=22, loc='left')\nplt.xlabel(\"PLGA Residue No.\", fontsize=22)\nplt.ylabel(\"BSA Residue No.\", fontsize=20)\n\n # Set the axis range \nplt.ylim(583, 0)\nplt.xlim(0, 60)\n\n# Plot bands for each chain \nBANDS = (\n (0, 20, \"purple\", \"B\"),\n (20, 40, \"blue\", \"C\"),\n (40, 60, \"green\", \"D\"),\n)\n \ntext_y = 0.98 # Close to the top\nfor start, stop, color, band in BANDS:\n plt.axvspan(start, stop,color=color, alpha=0.15)\n text_x = middle_of_band(start,stop)\n plt.text(\n text_x,\n text_y,\n \"PLGA chain \" + band,\n color=color,\n fontsize=18,\n transform=fig.gca().transAxes,\n horizontalalignment='center',\n verticalalignment='center',\n style='italic',\n )\n \nplt.text(0.94, 1, \"Time [ns]:\", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n\n# Set tick label size\nfig.gca().tick_params(axis='both', which='major', labelsize=20)\n\nims = []\nfor i in range(10000):\n data = trj_ppmap_2nm[i]\n im = plt.imshow(data, aspect='auto', cmap='Greys')\n t_sim = plt.text(1.03, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')\n ims.append([im, t_sim])\n \nani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)\nani.save('2nm_res.mp4', writer='ffmpeg', fps=50, bitrate=100000)\n#plt.tight_layout()\n#plt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca66fbccd7b8de1c6be834c0d28681e4112d616 | 59,971 | ipynb | Jupyter Notebook | Unit 3 - Windowed Scalogram Difference of Water and Energy.ipynb | kcraath/2018INFINITI_Workshop | f9029890dbca238d47e73709d3db81554ca61870 | [
"MIT"
] | null | null | null | Unit 3 - Windowed Scalogram Difference of Water and Energy.ipynb | kcraath/2018INFINITI_Workshop | f9029890dbca238d47e73709d3db81554ca61870 | [
"MIT"
] | null | null | null | Unit 3 - Windowed Scalogram Difference of Water and Energy.ipynb | kcraath/2018INFINITI_Workshop | f9029890dbca238d47e73709d3db81554ca61870 | [
"MIT"
] | 1 | 2021-01-05T19:13:09.000Z | 2021-01-05T19:13:09.000Z | 239.884 | 50,854 | 0.90282 | [
[
[
"# Windowed Scalogram",
"_____no_output_____"
],
[
"This is from the new package still being developed called wavScalogram. It is an R package for time series analysis using the Wavelet Scalogram see the github [link](https://github.com/rbensua/wavScalogram) for more details. ",
"_____no_output_____"
]
],
[
[
"# R package which includes Quantitative Financial Modelling Frameworks.\n#https://www.rdocumentation.org/packages/quantmod\n#install.packages(\"quantmod\")\nrequire(quantmod)\n\n# An R Package of time series tools and utilities; Rmetrics - Financial Time Series Objects\n#https://www.rdocumentation.org/packages/timeSeries\n#install.packages(\"timeSeries\")\nrequire(timeSeries)\n\n# devtools: Tools to Make Developing R Packages Easier \n#https://www.rdocumentation.org/packages/devtools\n#install.packages(\"devtools\")\nrequire(devtools) # using devtools to download from github \n\n# R package for time series analysis using the Wavelet Scalogram \n# from https://github.com/rbensua/wavScalogram\n#install_github(\"rbensua/wavScalogram\")\nrequire(wavScalogram)",
"_____no_output_____"
],
[
"# Identify the tickers of interest\ntickers <- c(\"CGW\",\"XLE\")\n\n# Download these tickers from Yahoo for the dates in the presentation\ngetSymbols(tickers,src=\"yahoo\", from = \"2007-06-01\",to = \"2018-01-26\")\n# Merge all the Price series into one dataframe\nAllPrices <- do.call(merge, lapply(tickers, function(x) get(x)))\n\n#Some of these series have (NA) missing values for dates when others \n# do not have missiong vaulesin the series so we interpolate for these values\nAllPrices$CGW.Close <- interpNA(AllPrices$CGW.Close)\nAllPrices$XLE.Close <- interpNA(AllPrices$XLE.Close)\n\n#Set up the correct data frame\nrCGW <- as.data.frame((AllPrices$CGW.Close))\nrXLE <- as.data.frame((AllPrices$XLE.Close))\n\n#Retrieve specific dates\ndate1 <- index(AllPrices)\n\n#save Prices in Matrix\nrW <- cbind(1:(length(AllPrices$CGW.Close)), rCGW$CGW.Close[1: length(AllPrices$CGW.Close)])\nrE <- cbind(1:(length(AllPrices$XLE.Close)), rXLE$XLE.Close[1: length(AllPrices$XLE.Close)])\n",
"_____no_output_____"
],
[
"# This function computes the Windowed Scalogram Difference of two signals. \n# The definition and details can be found in (Bolรณs et al. 2017).\n\n# For help on the exact arguments of this function see:\n?wsd\n\n# wname: A string, equal to \"MORLET\", \"DOG\", \"PAUL\", \"HAAR\" or \"HAAR2\". \n# The difference between \"HAAR\" and \"HAAR2\" is that \"HAAR2\" is more accurate but slower.\nwname <- \"MORLET\"\n\n#delta_t: Numeric. Increment of time for the construction of windows central times.\ndelta_t <- 1\n\nnt <- length(rW)/2 # number of time points\nt <- 1:nt # time vector\n\n# mc_nrand: Integer. Number of Montecarlo simulations to be performed \n# in order to determine the 95% and 5% significance contours.\nmc_nrand <- 10 # MonteCarlo repetitions (for significance contours)\n\n#windowrad: Numeric. Time radius for the windows\nwindowrad <- 64 #floor(nt/60); # % time radius for windowed scalogram (width)\n\n#rdist: Numeric. Log-scale radius for the windows measured in suboctaves.\nrdist <- floor(nt/300); # % Scale radius for distance (height)\n\n# Defining the scales (Torrence and Compo's way)\n# scaleparam: A vector of three elements with the minimum scale, \n# the maximum scale and the number of suboctaves per octave for constructing power 2 scales \n# (following Torrence and Compo 1998), measured in units of time.\n\ndt <- 1; s0 <- 2*dt; Dj <- 12\nwaverad <- 3 # Morlet wavelet radius\nsmax <- (nt-1-2*windowrad)/(2*waverad)\nscales <- c(s0, smax, Dj)\n\n#signal1: A vector containing the first signal.\n#signal2: A vector containing the second signal (its length should be equal to that of signal1)\n\n#parallel: Logical. If TRUE (default) uses function parApply from package parallel for the Montecarlo simulations. \n# When FALSE is uses the normal apply function\n\n#border_effects: String, equal to \"BE\", \"INNER\", \"PER\" or \"SYM\", which indicates how to manage the border effects \n# which arise usually when a convolution is performed on finite-lenght signals.\n# \"PER\": With border effects, using boundary wavelets (periodization of the original time series).\n\nwsd <- wsd(signal1 = rW[,2], signal2 = rE[,2], scaleparam = scales, delta_t = delta_t, \n windowrad = windowrad, rdist = rdist, mc_nrand = mc_nrand, wname = wname, \n parallel = TRUE, makefigure = FALSE, normalize = \"TRUE\")\n\n#Plotting the Windowed Scalogram Difference\n#?wavPlot\nwavPlot(Z = -log2(wsd$wsd), X = wsd$t, Y = wsd$scales, Ylog = TRUE, coi = wsd$coi, \n rdist = wsd$rdist, sig95 = wsd$signif95, sig05 = wsd$signif05, Xname = \"WSD: Water and Energy Prices\", \n Yname =\"Scales (days)\", Zname = \" \")\n#Adding in the dates for each of these 2 year spans (250 x 2)\naxis(3, at = c(0,500, 1000, 1500, 2000, 2500),labels=c(\"2007\", \"2009\", \"2011\", \"2013\",\"2015\", \"2017\"))\n\n#Add annual lines and lines to distinguish between investment horizons\nn = length(rW[, 1])\nabline(v = seq(250, n, 250), h = 1:16, col = \"grey\", lty = 1, lwd = 1)",
"Loading required package: abind\nLoading required package: Matrix\n"
]
],
[
[
"# References:\n\nR Core Team (2018). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.\n\nDiethelm Wuertz, Tobias Setz and Yohan Chalabi (2017). timeSeries: Rmetrics - Financial Time Series Objects. R package version 3042.102. https://CRAN.R-project.org/package=timeSeries\n\nJeffrey A. Ryan and Joshua M. Ulrich (2018). quantmod: Quantitative Financial Modelling Framework. R package version 0.4-13. https://CRAN.R-project.org/package=quantmod\n\nHadley Wickham, Jim Hester and Winston Chang (2018). devtools: Tools to Make Developing R Packages Easier. R package version 1.13.5. https://CRAN.R-project.org/package=devtools\n\nC. Torrence, G. P. Compo. A practical guide to wavelet analysis. B. Am. Meteorol. Soc. 79 (1998), 61โ78.\n\nV. J. Bolรณs, R. Benรญtez, R. Ferrer, R. Jammazi. The windowed scalogram difference: a novel wavelet tool for comparing time series. Appl. Math. Comput., 312 (2017), 49-65.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
eca6711391888a14e2a2d1ec7929846e32a05b21 | 22,234 | ipynb | Jupyter Notebook | notebooks/01b-instructor-joint-conditional-probability.ipynb | francescolosterzo/bayesian-stats-modelling-tutorial | c3e96f411269b7dc53a390a84c6dba48df77d83c | [
"MIT"
] | 3 | 2020-03-06T10:21:31.000Z | 2020-03-18T19:37:18.000Z | notebooks/01b-instructor-joint-conditional-probability.ipynb | anmin/bayesian-stats-modelling-tutorial | afc521d9f6acf83b993f36c858340c8c72589cb3 | [
"MIT"
] | null | null | null | notebooks/01b-instructor-joint-conditional-probability.ipynb | anmin/bayesian-stats-modelling-tutorial | afc521d9f6acf83b993f36c858340c8c72589cb3 | [
"MIT"
] | 3 | 2020-10-04T13:04:37.000Z | 2021-08-04T19:12:23.000Z | 38.667826 | 6,728 | 0.690834 | [
[
[
"# Joint Probability, Conditional Probability and Bayes' Rule",
"_____no_output_____"
]
],
[
[
"#Import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set()",
"_____no_output_____"
]
],
[
[
"## Learning Objectives of Part 1-b",
"_____no_output_____"
],
[
"- To understand and be able to simulate joint probabilities and conditional probabilities;\n- To understand Bayes' Theorem and its utility.",
"_____no_output_____"
],
[
"## Joint Probability & Conditional Probability",
"_____no_output_____"
],
[
"### Joint Probability",
"_____no_output_____"
],
[
"We have already encountered joint probabilities in the previous notebook, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring.\n* For example, getting two heads in a row.\n\nIf $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case.\n\nOne way to think of this is considering \"AND\" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B.",
"_____no_output_____"
],
[
"#### Hands-On: Joint Probability and Coin Flipping",
"_____no_output_____"
],
[
"Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by \n- first simulating two coins being flipped together and calculating the proportion of occurences with two heads;\n- then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions.\n\nYour two calculations should give \"pretty close\" results and not the same results due to the (in)accuracy of simulation. ",
"_____no_output_____"
]
],
[
[
"# Solution: Calculate P(A,B)\nx_0 = np.random.binomial(2, 0.5, 10000)\np_ab = sum(x_0==2)/len(x_0)\n\n# Now, plot the histogram of the results\nplt.hist(x_0);\nprint(p_ab)",
"0.2456\n"
],
[
"# Solution: Calculate P(A)P(B)\nx_1 = np.random.binomial(1, 0.5, 10000)\nx_2 = np.random.binomial(1, 0.5, 10000)\np_a = sum(x_1 == 1)/len(x_1)\np_b = sum(x_2 == 1)/len(x_2)\np_a*p_b",
"_____no_output_____"
]
],
[
[
"**Note:** In order to use such simulation and _hacker statistics_ approaches to \"prove\" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially\n- Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation).\n\nHaving stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. ",
"_____no_output_____"
],
[
"#### Hands-On: Joint probability for birds",
"_____no_output_____"
],
[
"What is the probability that two randomly selected birds have beak depths over 10 ?",
"_____no_output_____"
]
],
[
[
"# Import data & store lengths in a pandas series\ndf_12 = pd.read_csv('../data/finch_beaks_2012.csv')\nlengths = df_12['blength']\n\n# Calculate P(A)P(B) of two birds having beak lengths > 10\np_a = (sum(lengths > 10))/len(lengths)\np_b = (sum(lengths > 10))/len(lengths)\np_a*p_b",
"_____no_output_____"
]
],
[
[
"* Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$:",
"_____no_output_____"
]
],
[
[
"# Calculate P(A)P(B) using resampling methods\nn_samples = 100000\np_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples\np_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples\np_a*p_b",
"_____no_output_____"
]
],
[
[
"Now calculate $P(A,B)$:",
"_____no_output_____"
]
],
[
[
"# Calculate P(A,B) using resampling methods\nn_samples = 100000\nsamples = np.random.choice(lengths, (n_samples,2), replace=True)\n_ = samples > (10, 10)\np_ab = sum(np.prod(_, axis=1))/n_samples\np_ab",
"_____no_output_____"
]
],
[
[
"**Task:** Interpret the results of your simulations.",
"_____no_output_____"
],
[
"### Conditional Probability",
"_____no_output_____"
],
[
"Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question \"What is the probability of a finch beak having depth $<10$, knowing that the finch of of species 'fortis'?\"",
"_____no_output_____"
],
[
"#### Example: conditional probability for birds",
"_____no_output_____"
],
[
"1. What is the probability of a finch beak having depth > 10 ?\n2. What if we know the finch is of species 'fortis'?\n3. What if we know the finch is of species 'scandens'?",
"_____no_output_____"
]
],
[
[
"sum(df_12.blength > 10)/len(df_12)",
"_____no_output_____"
],
[
"df_fortis = df_12.loc[df_12['species'] == 'fortis']\nsum(df_fortis.blength > 10)/len(df_fortis)",
"_____no_output_____"
],
[
"df_scandens = df_12.loc[df_12['species'] == 'scandens']\nsum(df_scandens.blength > 10)/len(df_scandens)",
"_____no_output_____"
]
],
[
[
"**Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches.",
"_____no_output_____"
],
[
"### Joint and conditional probabilities\n\nConditional and joint probabilites are related by the following:\n$$ P(A,B) = P(A|B)P(B)$$",
"_____no_output_____"
],
[
"**Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"### Hands on example: drug testing",
"_____no_output_____"
],
[
"**Question:** Suppose that a test for using a particular drug is 99% sensitive and 99% specific. That is, the test will produce 99% true positive results for drug users and 99% true negative results for non-drug users. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user?\n\n**If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.**",
"_____no_output_____"
],
[
"In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. \n\n* Before doing so, what do you think the answer to the question _\"What is the probability that a randomly selected individual with a positive test is a drug user?\"_ is? Write down your guess.",
"_____no_output_____"
]
],
[
[
"# Take 10,000 subjects\nn = 100000\n# Sample for number of users, non-users\nusers = np.random.binomial(n, 0.005, 1) \nnon_users = n - users",
"_____no_output_____"
],
[
"# How many of these users tested +ve ?\nu_pos = np.random.binomial(users, 0.99)\n# How many of these non-users tested +ve ?\nnon_pos = np.random.binomial(non_users, 0.01)",
"_____no_output_____"
],
[
"# how many of those +ve tests were for users?\nu_pos/(u_pos+non_pos)",
"_____no_output_____"
]
],
[
[
"**Discussion**: What you have been able to do here is to solve the following problem: you knew $P(+|user)=0.99$, but you were trying to figure out $P(user|+)$. Is the answer what you expected? If not, why not?\n\n**Key note:** This is related to the serious scientific challenge posed at the beginning here: if you know the underlying parameters/model, you can figure out the distribution and the result, but often we have only the experimental result and we're trying to figure out the most appropriate model and parameters.\n\nIt is Bayes' Theorem that lets us move between these.",
"_____no_output_____"
],
[
"## 2. Bayes' Theorem\n\n$$P(B|A) = \\frac{P(A|B)P(B)}{P(A)}$$",
"_____no_output_____"
],
[
"As you may have guessed, it is Bayes' Theorem that will allow us to move back and forth between $P(data|model)$ and $P(model|data)$. As we have seen, $P(model|data)$ is usually what we're interested in as data scientists yet $P(data|model)$ is what we can easily compute, either by simulating our model or using analytic equations.",
"_____no_output_____"
],
[
"**One of the coolest things:** Bayes Theorem can be proved with a few lines of mathematics. Your instructor will do this on the chalk/white-board now.",
"_____no_output_____"
],
[
"### Bayes Theorem solves the above drug testing problem\n\nBayes Theorem can be used to analytically derive the solution to the 'drug testing' example above as follows.",
"_____no_output_____"
],
[
"From Bayes Theorem, \n\n$$P(user|+) = \\frac{P(+|user)P(user)}{P(+)}$$\n\n",
"_____no_output_____"
],
[
"We can expand the denominator here into \n\n$$P(+) = P(+,user) + P(+,non-user) $$\n\nso that\n\n$$ P(+)=P(+|user)P(user) + P(+|non-user)P(non-user)$$\n\nand \n\n$$P(user|+) = \\frac{P(+|user)P(user)}{P(+|user)P(user) + P(+|non-user)P(non-user)}$$.",
"_____no_output_____"
],
[
"Calculating this explicitly yields\n\n$$P(user|+) = \\frac{0.99\\times 0.005}{0.99\\times 0.005 + 0.01\\times 0.995} = 0.332 $$",
"_____no_output_____"
],
[
"This means that if an individual tests positive, there is still only a 33.2% chance that they are a user! This is because the number of non-users is so high compared to the number of users.",
"_____no_output_____"
],
[
"Coming up: from Bayes Theorem to Bayesian Inference!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
eca671627826a318c4cdb6828c1390d2010f6b26 | 5,986 | ipynb | Jupyter Notebook | notebooks/wide_residual_network_cifar10.ipynb | tayden/DeepModels | 91f3478fb336d0e76d28c51267a5c81c99a3c9fd | [
"MIT"
] | 1 | 2019-03-07T22:01:28.000Z | 2019-03-07T22:01:28.000Z | notebooks/wide_residual_network_cifar10.ipynb | tayden/DeepModels | 91f3478fb336d0e76d28c51267a5c81c99a3c9fd | [
"MIT"
] | 20 | 2020-01-28T21:47:10.000Z | 2022-03-11T23:20:29.000Z | notebooks/wide_residual_network_cifar10.ipynb | tayden/DeepModels | 91f3478fb336d0e76d28c51267a5c81c99a3c9fd | [
"MIT"
] | null | null | null | 24.838174 | 109 | 0.539759 | [
[
[
"# Wide Residual Networks CIFAR 10 Example",
"_____no_output_____"
],
[
"## Import and process the data",
"_____no_output_____"
]
],
[
[
"from keras import utils\nfrom keras.datasets import cifar10\n\nclasses = 10\nimg_rows, img_cols, img_channels = 32, 32, 3\n\n(trainX, trainY), (testX, testY) = cifar10.load_data()\n\n# Rescale and change data type of images\ntrainX = trainX.astype('float32') / 255.\ntestX = testX.astype('float32') / 255.\n\n# Convert labels to one-hot\ntrainY = utils.to_categorical(trainY, classes)\n\nprint(\"Train shape:\", trainX.shape)\nprint(\"Test shape:\", testX.shape)",
"_____no_output_____"
]
],
[
[
"## Data generators",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing.image import ImageDataGenerator\n\ntrain_generator = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n width_shift_range=4./img_cols,\n height_shift_range=4./img_rows,\n fill_mode='reflect',\n horizontal_flip=True)\ntrain_generator.fit(trainX, seed=0)\n\ntest_generator = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True)\ntest_generator.fit(trainX, seed=0)",
"_____no_output_____"
]
],
[
[
"## Instantiate the Wide Residual Network",
"_____no_output_____"
]
],
[
[
"from deep_models import wide_residual_network as wrn\nfrom keras.utils import plot_model\n\nn = 4 # 6 * n + 4 is the depth\nk = 10 # k is the width\ndropout = 0.3\n\nfname = 'WRN-{}-{}{}'.format(N * 6 + 4, k, '-dropout' if dropout > 0 else '')\nmodel_path = '{}.h5'.format(fname)\n\n# Create the model\nmodel = wrn.build_model((img_cols, img_rows, img_channels), classes=classes, n=n, k=k, dropout=dropout)",
"_____no_output_____"
]
],
[
[
"## Train the model",
"_____no_output_____"
]
],
[
[
"import keras.callbacks as callbacks\nfrom keras.optimizers import SGD\n\nepochs = 200\nbatch_size = 64\n\n\ndef scheduler(epoch, lr):\n if epoch in [60, 120, 160]:\n lr *= 0.2\n return lr\n\n\nsgd = SGD(lr=0.1, momentum=0.9, decay=0.0, nesterov=True)\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=sgd, metrics=[\"acc\"])\n\n# Train the model\nhistory = model.fit_generator(\n train_generator.flow(trainX, trainY, batch_size=batch_size), \n steps_per_epoch=len(trainX) / batch_size, \n epochs=epochs,\n validation_data=test_generator.flow(testX, testY),\n callbacks=[\n callbacks.ModelCheckpoint(model_path, monitor=\"val_acc\", save_best_only=True),\n callbacks.LearningRateScheduler(scheduler)\n ])",
"_____no_output_____"
]
],
[
[
"## Save the final version",
"_____no_output_____"
]
],
[
[
"import time\n\nmodel.save('{}-{}.h5'.format(fname, time.time()))",
"_____no_output_____"
]
],
[
[
"## Print model score",
"_____no_output_____"
]
],
[
[
"from keras.models import load_model\nmodel = load_model(model_path)\n\nmetrics = model.evaluate_generator(test_generator.flow(valX, valY, shuffle=False))\n\naccuracy = metrics[1] * 100\nerror = 100 - accuracy\nprint(\"Accuracy : \", accuracy)\nprint(\"Error : \", error)",
"_____no_output_____"
]
],
[
[
"## Show the training history",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\n# summarize history for accuracy \nplt.figure(1) \nplt.plot(history.history['acc']) \nplt.plot(history.history['val_acc']) \nplt.title('model accuracy') \nplt.ylabel('accuracy') \nplt.xlabel('epoch') \nplt.legend(['train', 'test'], loc='upper left') \n \n# summarize history for loss \nplt.figure(2)\nplt.plot(history.history['loss']) \nplt.plot(history.history['val_loss']) \nplt.title('model loss') \nplt.ylabel('loss') \nplt.xlabel('epoch') \nplt.legend(['train', 'test'], loc='upper left') \nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca672e5015532c0d4d6a90c150af8a973617a98 | 4,241 | ipynb | Jupyter Notebook | Creating_labels.ipynb | Jitender46559/Eye-For-BlindPerson | 22659fd1d387636df432d649feee3cb70549996f | [
"MIT"
] | null | null | null | Creating_labels.ipynb | Jitender46559/Eye-For-BlindPerson | 22659fd1d387636df432d649feee3cb70549996f | [
"MIT"
] | null | null | null | Creating_labels.ipynb | Jitender46559/Eye-For-BlindPerson | 22659fd1d387636df432d649feee3cb70549996f | [
"MIT"
] | null | null | null | 20.687805 | 129 | 0.487149 | [
[
[
"import os\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\nimport time\nstart_time = time.time()",
"_____no_output_____"
],
[
"train_labels = []\ntesting_path = []\ntrain_dir = \"C:/Users/Jitender kumar/Desktop/Mini Project KCC/Train_data\"\nfor entry_name in tqdm(os.listdir(train_dir)):\n entry_path = os.path.join(train_dir, entry_name)\n testing_path.append(entry_name)",
"100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 8794/8794 [00:00<00:00, 73109.39it/s]\n"
],
[
"for values in testing_path:\n train_labels.append(int(values.split('_')[1]))",
"_____no_output_____"
],
[
"train_labels = np.array(train_labels)\ntype(train_labels)",
"_____no_output_____"
],
[
"test_labels = []\ntesting_path2 = []\ntest_dir = \"C:/Users/Jitender kumar/Desktop/Mini Project KCC/Test_data\"\nfor entry_name in tqdm(os.listdir(test_dir)):\n entry_path = os.path.join(test_dir, entry_name)\n testing_path2.append(entry_name)",
"100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 977/977 [00:00<00:00, 50680.03it/s]\n"
],
[
"for values in testing_path2:\n test_labels.append(int(values.split('_')[1]))",
"_____no_output_____"
],
[
"test_labels = np.array(test_labels)\ntype(test_labels)",
"_____no_output_____"
],
[
"test_labels.shape",
"_____no_output_____"
],
[
"#pickle_out = open(\"train_labels.pickle\",\"wb\")\n#pickle.dump(train_labels, pickle_out)\n#pickle_out.close()",
"_____no_output_____"
],
[
"#pickle_out = open(\"test_labels.pickle\",\"wb\")\n#pickle.dump(test_labels, pickle_out)\n#pickle_out.close()",
"_____no_output_____"
],
[
"print(\"--- %s seconds ---\" % (time.time() - start_time))",
"--- 0.768824577331543 seconds ---\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca69ca9031f7e5298d2050b859564f8426d6009 | 131,053 | ipynb | Jupyter Notebook | jupyter_matlab/Chapter J8.ipynb | StaThin/statmat | f80eff25a09b78a923079db9658a5a96224b0dd9 | [
"MIT"
] | null | null | null | jupyter_matlab/Chapter J8.ipynb | StaThin/statmat | f80eff25a09b78a923079db9658a5a96224b0dd9 | [
"MIT"
] | null | null | null | jupyter_matlab/Chapter J8.ipynb | StaThin/statmat | f80eff25a09b78a923079db9658a5a96224b0dd9 | [
"MIT"
] | null | null | null | 65.592092 | 34,620 | 0.632576 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
eca6a245de0edbe7dcaf6396435f18fa2afea78b | 732,337 | ipynb | Jupyter Notebook | notebooks/.ipynb_checkpoints/FourierTransform-checkpoint.ipynb | tomtrogdon/AKNS.jl | 05d452c8268300aa0cba269a0dde9289f151bec1 | [
"MIT"
] | 1 | 2021-04-13T07:36:05.000Z | 2021-04-13T07:36:05.000Z | notebooks/.ipynb_checkpoints/FourierTransform-checkpoint.ipynb | tomtrogdon/AKNS.jl | 05d452c8268300aa0cba269a0dde9289f151bec1 | [
"MIT"
] | 6 | 2021-03-26T00:11:34.000Z | 2021-03-30T00:13:35.000Z | notebooks/FourierTransform.ipynb | tomtrogdon/AKNS.jl | 05d452c8268300aa0cba269a0dde9289f151bec1 | [
"MIT"
] | null | null | null | 184.746973 | 22,271 | 0.675765 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
eca6b393a7b1aecc16a5bb1e5d9849a84a44f687 | 956,728 | ipynb | Jupyter Notebook | Application_of_Bounded_CNN_For_Digit_Recognition.ipynb | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | Application_of_Bounded_CNN_For_Digit_Recognition.ipynb | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | Application_of_Bounded_CNN_For_Digit_Recognition.ipynb | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | 60.636836 | 26,135 | 0.610715 | [
[
[
"# DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.\n#\n# This material is based upon work supported by the Assistant Secretary of Defense for Research and\n# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,\n# findings, conclusions or recommendations expressed in this material are those of the author(s) and\n# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and\n# Engineering.\n#\n# ยฉ 2018 Massachusetts Institute of Technology.\n#\n# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)\n#\n# The software/firmware is provided to you on an As-Is basis\n#\n# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or\n# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are\n# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than\n# as specifically authorized by the U.S. Government may violate any copyrights that exist in this\n# work.",
"_____no_output_____"
]
],
[
[
"# Let's see a bounded CNN in action ",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt \nfrom pprint import pprint\nimport numpy as np\nfrom sklearn.datasets import load_digits\nfrom sklearn.mixture import GaussianMixture\nimport time\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport random\nimport sys\n\ndef timeit(function, description=\"Please provide a description | it \"):\n start = time.time()\n values = function()\n end = time.time()\n print(description + \" took \" + str(np.round(end - start, 4)) + \" seconds\")\n return values\n\ndef hello_person(person):\n print(\"Hello \" + person)\n return \"Passes along the return values\"\n\nprint(timeit(lambda: hello_person(\"John\"), description=\"Saying hello to John\"))",
"Hello John\nSaying hello to John took 0.0001 seconds\nPasses along the return values\n"
]
],
[
[
"# The task: classify these tiny images of digits",
"_____no_output_____"
]
],
[
[
"digits = load_digits()\n\nfig = plt.figure(figsize=(8, 1))\nfor i in range(10):\n ax = fig.add_subplot(1, 10, i+1)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n ax.imshow(digits.images[i])\nfig.canvas.draw()\ntime.sleep(0.2)\nplt.close(fig)",
"_____no_output_____"
],
[
"print(len(digits.images))\nprint(digits.images[0].shape)",
"1797\n(8, 8)\n"
],
[
"n_layers = 3\nn_classes = 10\nclass Baseline(nn.Module):\n \n def __init__(self, n_layers=n_layers, n_classes=n_classes):\n super(Baseline, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(1, n_layers, kernel_size=2, stride=2),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(n_layers, n_layers, kernel_size=2, stride=2),\n nn.LeakyReLU(inplace=True),\n )\n \n self.classifier = nn.Sequential(\n nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2),\n nn.LeakyReLU(inplace=True),\n nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2),\n nn.LeakyReLU(inplace=True),\n nn.Linear(n_layers * 2 * 2, n_classes),\n )\n return\n \n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), n_layers * 2 * 2)\n x = self.classifier(x)\n return x",
"_____no_output_____"
]
],
[
[
"# Make sure our base line works well",
"_____no_output_____"
]
],
[
[
"baseline = Baseline()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(baseline.parameters(), lr=0.001)\n\ndata, labels = digits.images[:].reshape(-1, 1, 8, 8), digits.target\ndata, labels = Variable(torch.tensor(data).float()), Variable(torch.tensor(labels))\n\nprint_every = (4, 9, 99, 499, 999,)\nloss_history = list()\nfor epoch in range(1000):\n running_loss = 0.0\n \n optimizer.zero_grad()\n \n outputs = baseline(data)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n running_loss = loss.item()\n loss_history.append(running_loss)\n \n if epoch in print_every:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(loss_history)\n fig.subplots_adjust(bottom=0.15)\n fig.canvas.draw()\n print(\"Loss: \" + str(loss_history[-1]))\n _, predictions = F.softmax(baseline(data), dim=1).max(1)\n accuracy = 1 - (torch.nonzero(predictions - labels).size(0)) / predictions.size(0)\n print(\"Accuracy: \" + str(accuracy))\n time.sleep(0.2)\n plt.close(fig)",
"_____no_output_____"
]
],
[
[
"# Build our fitted Neural Network",
"_____no_output_____"
]
],
[
[
"class DomainNet(nn.Module):\n \n def __init__(self, n_layers=n_layers, n_classes=n_classes):\n super(DomainNet, self).__init__()\n \n self.n_layers = n_layers\n self.n_classes = n_classes\n \n ##### layers #####\n self.conv1 = nn.Conv2d(1, n_layers, kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(n_layers, n_layers, kernel_size=2, stride=2)\n \n self.linear1 = nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2)\n self.linear2 = nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2)\n self.linear3 = nn.Linear(n_layers * 2 * 2, n_classes)\n #### layers #####\n \n ### Gaussian Mixture Models ###\n self.gmm_inputs = list()\n self.gmm_conv1 = list()\n self.gmm_conv2 = None\n \n self.gmm_linear1 = None\n self.gmm_linear2 = None\n self.gmm_linear3 = None\n ### Gaussian Mixture Models ###\n return\n \n def forward(self, x):\n outputs = F.leaky_relu(self.conv1(x))\n outputs = F.leaky_relu(self.conv2(outputs))\n \n outputs = outputs.view(outputs.shape[0], self.n_layers * 2 * 2)\n \n outputs = F.leaky_relu(self.linear1(outputs))\n outputs = F.leaky_relu(self.linear2(outputs))\n outputs = self.linear3(outputs)\n return outputs\n \n def _fit_domain_of_convolutional_layer(self, outputs, val=None, n_components=50):\n gmms = list()\n for j in range(0, outputs.shape[2] - 1, 2):\n for k in range(0, outputs.shape[3] - 1, 2):\n kernel = outputs[:, :, j:j+2, k:k+2]\n gmm = GaussianMixture(covariance_type='full',\n n_components=n_components).fit(\n kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n if val is not None:\n kernel = val[:, :, j:j+2, k:k+2]\n probabilities = gmm.score_samples(kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n gmm.threshold = np.amin(probabilities)\n gmms.append(gmm)\n return gmms\n \n def _fit_domain_of_linear_layer(self, outputs, val=None, n_components=50):\n gmm = GaussianMixture(covariance_type='full',\n n_components=n_components).fit(outputs.detach().numpy())\n if val is None:\n probabilities = gmm.score_samples(outputs.detach().numpy())\n else:\n probabilities = gmm.score_samples(val.detach().numpy())\n gmm.threshold = np.amin(probabilities)\n return gmm\n \n def fit_domain(self, x, val=None, n_components=50):\n self.gmm_inputs = self._fit_domain_of_convolutional_layer(x, val=val, n_components=n_components)\n outputs = self.conv1(x)\n if val is not None:\n val = self.conv1(val)\n \n self.gmm_conv1 = self._fit_domain_of_convolutional_layer(outputs, val=val, n_components=n_components)\n outputs = self.conv2(F.leaky_relu(outputs))\n if val is not None:\n val = self.conv2(F.leaky_relu(val))\n \n outputs = outputs.view(outputs.size(0), self.n_layers * 2 * 2)\n if val is not None:\n val = val.view(val.size(0), self.n_layers * 2 * 2)\n \n self.gmm_conv2 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear1(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear1(F.leaky_relu(val))\n \n self.gmm_linear1 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear2(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear2(F.leaky_relu(val))\n \n self.gmm_linear2 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear3(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear3(F.leaky_relu(val))\n \n self.gmm_linear3 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n return\n \n def _domain_check_convolutional_layer(self, outputs, gmms):\n i = 0\n for j in range(0, outputs.shape[2] - 1, 2):\n for k in range(0, outputs.shape[3] - 1, 2):\n kernel = outputs[:, :, j:j+2, k:k+2]\n probabilities = gmms[i].score_samples(\n kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n if probabilities[0] < gmms[i].threshold:\n return False\n i +=1\n return True\n \n def _domain_check_linear_layer(self, outputs, gmm):\n probabilities = gmm.score_samples(outputs.detach().numpy())\n if probabilities[0] < gmm.threshold:\n return False\n return True\n \n def domain_check(self, x):\n \"\"\"\n Currently only supports checking one image at a time\n \"\"\"\n passing = self._domain_check_convolutional_layer(x, self.gmm_inputs)\n if not passing:\n return -1\n outputs = self.conv1(x)\n \n passing = self._domain_check_convolutional_layer(outputs, self.gmm_conv1)\n if not passing:\n return -2\n outputs = self.conv2(F.leaky_relu(outputs))\n \n outputs = outputs.view(outputs.size(0), self.n_layers * 2 * 2)\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_conv2)\n if not passing:\n return -3\n outputs = self.linear1(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear1)\n if not passing:\n return -4\n outputs = self.linear2(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear2)\n if not passing:\n return -5\n outputs = self.linear3(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear3)\n if not passing:\n return -6\n \n # It is in domain\n return 0\n \n def sample_from_linear(self, gmm, target, target_gmm, linear_transformation, sample_size=100, temp=1):\n max_iterations = 1000\n for i in range(max_iterations):\n if max_iterations - i < 5:\n print(\"sampling iteration: \" + str(i))\n samples, _ = gmm.sample(sample_size)\n scores = gmm.score_samples(samples)\n idxs = np.where(scores >= gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n squashed_sample = linear_transformation(F.leaky_relu(torch.tensor(samples).float())).detach().numpy()\n scores = target_gmm.score_samples(squashed_sample)\n idxs = np.where(scores >= target_gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n squashed_sample = squashed_sample[idxs]\n break\n if i == (max_iterations - 1):\n sys.exit(\"Sampling took to long we timed out\") \n negative_distances = -1 * np.linalg.norm(squashed_sample - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n new_target = samples[idx]\n return new_target\n \n def sample_from_outputs(self, gmm, target, sample_size=100, temp=1):\n max_iterations = 1000\n for i in range(max_iterations):\n if max_iterations - i < 5:\n print(\"sampling iteration: \" + str(i))\n samples, _ = gmm.sample(sample_size)\n scores = gmm.score_samples(samples)\n idxs = np.where(scores >= gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n break\n if i == (max_iterations - 1):\n sys.exit(\"Sampling took to long we timed out\")\n \n squashed_samples = F.softmax(torch.tensor(samples).float(), dim=1).detach().numpy()\n negative_distances = -1 * np.linalg.norm(squashed_samples - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n return samples[idx]\n \n def generate_one_sample_from_convolutional(self, gmms, target,\n conv_filter, sample_size=100, temp=1, is_inputs=False):\n max_iterations = 1000\n # create an array filled with nans that has the shape of a sample\n sample = np.zeros((1, conv_filter.weight.shape[1], target.shape[2] * 2, target.shape[3] * 2))\n sample.fill(np.nan)\n \n # create an array filled with nans that has the shape of target\n transformed_sample = np.zeros(target.shape)\n transformed_sample.fill(np.nan)\n \n # create a list of all the kernel locations and shuffle it\n kernel_idxs = list()\n i = 0\n for j in range(target.shape[2]):\n for k in range(target.shape[3]):\n kernel_idxs.append((i, j, k))\n i += 1\n random.shuffle(kernel_idxs)\n \n # sample from each kernel\n # make sure each sample is in domain. If you can't generate an in domain sample give up after a while\n # and return None\n for (i, j, k) in kernel_idxs:\n kernel_target = target[:, :, j, k].reshape(-1, self.n_layers)\n for m in range(max_iterations):\n if max_iterations - m < 5:\n print(\"sampling iteration: \" + str(m))\n kernel_sample, _ = gmms[i].sample(sample_size)\n scores = gmms[i].score_samples(kernel_sample)\n print\n idxs = np.where(scores >= gmms[i].threshold)[0]\n if idxs.shape[0] != 0:\n kernel_sample = kernel_sample[idxs]\n kernel_sample = kernel_sample.reshape(-1, conv_filter.weight.shape[1], 2, 2)\n break\n if m == (max_iterations - 1):\n print(\"Gave up on this iteration\")\n return None, None\n \n if is_inputs:\n # If its the input there shouldn't be any non-linearity\n transformed_kernel_sample = conv_filter(\n torch.tensor(kernel_sample).float()).detach().numpy().reshape(-1, self.n_layers)\n else:\n transformed_kernel_sample = conv_filter(F.leaky_relu(\n torch.tensor(kernel_sample).float())).detach().numpy().reshape(-1, self.n_layers)\n \n negative_distances = -1 * np.linalg.norm(transformed_kernel_sample - kernel_target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n sample_j, sample_k = (j * 2), (k *2)\n sample[0, :, sample_j:sample_j+2, sample_k:sample_k+2] = kernel_sample[idx, :, :, :]\n transformed_sample[0, :, j, k] = transformed_kernel_sample[idx, :]\n return sample, transformed_sample\n \n def sample_from_convolutional(self, gmms, target\n , target_gmms, conv_filter, sample_size=100\n , temp=1, is_inputs=False, is_last_conv_layer=False):\n samples = list()\n transformed_samples = list()\n \n for i in range(int(sample_size/10)):\n sample, transformed_sample = self.generate_one_sample_from_convolutional(gmms, target\n , conv_filter, sample_size=100\n , temp=temp, is_inputs=is_inputs)\n if sample is not None:\n samples.append(sample)\n transformed_samples.append(transformed_sample)\n \n if len(samples) == 0:\n sys.exit(\"Uanble to generate a single in domain sample\")\n \n if not is_last_conv_layer:\n in_domain = np.array([self._domain_check_convolutional_layer(torch.tensor(transformed_sample).float()\n , target_gmms) for transformed_sample in transformed_samples])\n else:\n in_domain = np.array([self._domain_check_linear_layer(torch.tensor(transformed_sample.reshape(-1, self.n_layers * 2 * 2)).float()\n , target_gmms) for transformed_sample in transformed_samples])\n idxs = np.where(in_domain == True)[0]\n if idxs.shape[0] == 0:\n sys.exit(\"No samples were in domain\")\n \n samples = np.concatenate(samples, axis=0)\n transformed_samples = np.concatenate(transformed_samples, axis=0)\n \n samples = samples[idxs]\n transformed_samples = transformed_samples[idxs].reshape(samples.shape[0], -1)\n target = target.reshape(1, -1)\n negative_distances = -1 * np.linalg.norm(transformed_samples - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n return np.expand_dims(samples[idx], axis=0)\n \n def sample(self, target=None, sample_size=1000, temp=1):\n if target is None:\n target, _ = self.gmm_linear3.sample()\n else:\n target = self.sample_from_outputs(self.gmm_linear3, target, sample_size=sample_size, temp=temp)\n \n target = self.sample_from_linear(self.gmm_linear2, target, self.gmm_linear3, self.linear3, sample_size=sample_size, temp=temp)\n target = self.sample_from_linear(self.gmm_linear1, target, self.gmm_linear2, self.linear2, sample_size=sample_size, temp=temp)\n target = self.sample_from_linear(self.gmm_conv2, target, self.gmm_linear1, self.linear1, sample_size=sample_size, temp=temp)\n \n target = target.reshape(-1, self.n_layers, 2, 2)\n \n target = self.sample_from_convolutional(self.gmm_conv1, target, self.gmm_conv2, self.conv2, sample_size=sample_size, temp=temp\n , is_last_conv_layer=True)\n target = self.sample_from_convolutional(self.gmm_inputs, target, self.gmm_conv1, self.conv1, sample_size=sample_size, temp=temp\n , is_inputs=True)\n return target\n",
"_____no_output_____"
],
[
"domain_net = DomainNet()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(domain_net.parameters(), lr=0.001)\n\ndata, labels = digits.images[:].reshape(-1, 1, 8, 8), digits.target\nidxs = np.arange(data.shape[0])\nnp.random.shuffle(idxs)\nval_data, val_labels = data[idxs[1000:]], labels[idxs[1000:]]\ndata, labels = data[idxs[:1000]], labels[idxs[:1000]]\ndata, labels = Variable(torch.tensor(data).float()), Variable(torch.tensor(labels))\nval_data, val_labels = Variable(torch.tensor(val_data).float()), Variable(torch.tensor(val_labels))\n\nprint_every = (4, 9, 99, 499)\nloss_history = list()\nfor epoch in range(500):\n running_loss = 0.0\n \n optimizer.zero_grad()\n \n outputs = domain_net(data)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n running_loss = loss.item()\n loss_history.append(running_loss)\n \n if epoch in print_every:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(loss_history)\n fig.subplots_adjust(bottom=0.15)\n fig.canvas.draw()\n print(\"Epoch: \" + str(epoch))\n print(\"Loss: \" + str(loss_history[-1]))\n _, predictions = F.softmax(domain_net(data), dim=1).max(1)\n accuracy = 1 - (torch.nonzero(predictions - labels).size(0)) / predictions.size(0)\n print(\"Accuracy: \" + str(accuracy))\n _, predictions = F.softmax(domain_net(val_data), dim=1).max(1)\n accuracy = 1 - (torch.nonzero(predictions - val_labels).size(0)) / predictions.size(0)\n print(\"Validation Accuracy: \" + str(accuracy))\n time.sleep(0.2)\n plt.close(fig)\n timeit(lambda: domain_net.fit_domain(data, val=val_data, n_components=50), description=\"Fit domain\")\n img = timeit(lambda: domain_net.sample(target=np.array([1,0,0,0,0,0,0,0,0,0]), temp=0.0001, sample_size=500)\n , description=\"Sampling an image from the domain\")\n fig = plt.figure(figsize=(1, 1))\n ax = fig.add_subplot(1, 1, 1)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n ax.imshow(img[0, 0, :, :])\n fig.canvas.draw()\n time.sleep(0.2)\n plt.close(fig)\n print(\"0 means in domain \" + str(timeit(lambda: domain_net.domain_check(torch.tensor(img).float())\n , description=\"Checking whether an image is in domain\")))\n print()\n print()",
"_____no_output_____"
],
[
"img = timeit(lambda: domain_net.sample(target=np.array([[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]]), temp=1, sample_size=500), description=\"Sampling\")\nimg = np.clip(img, 0, 16).astype(int)\n# print(img)\nfig = plt.figure(figsize=(1, 1))\nax = fig.add_subplot(1, 1, 1)\nax.get_yaxis().set_visible(False)\nax.get_xaxis().set_visible(False)\nax.imshow(img[0, 0, :, :])\nfig.canvas.draw()\ntime.sleep(0.2)\nplt.close(fig)\n\nprint(\"0 means in domain \" + str(timeit(lambda: domain_net.domain_check(torch.tensor(img).float())\n , description=\"Checking whether an image is in domain\")))\n\na = np.round(F.softmax(domain_net(torch.tensor(img).float()), dim=1).detach().numpy(), 2)\nprint(a[0])\nprint(np.argmax(a[0]))",
"Distance between sampled point and target point 0.6895751928415847\nTarget [[0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1]]\nSampled Point [7.0650899e-01 1.0212325e-03 4.0855562e-11 9.9559074e-05 7.7029428e-04\n 2.7612785e-01 4.8149088e-03 1.5670809e-09 9.2522670e-03 1.4049315e-03]\nDistance between sampled point and target point 6.817134209508278\nDistance between sampled point and target point 5.882221586320048\nDistance between sampled point and target point 6.977690233281409\nDistance between sampled point and target point 3.5739029934576583\nDistance between sampled point and target point 8.747076372093552\nSampling took 6.2419 seconds\n"
],
[
"import torchvision",
"_____no_output_____"
]
],
[
[
"# We can use fashion MNIST for out of domain detection",
"_____no_output_____"
]
],
[
[
"trainset = torchvision.datasets.FashionMNIST(root='./data/fashion', train=True, download=True)\ntrainset.train_data = F.pad(trainset.train_data, (2, 2, 2, 2), 'constant', 100)\ntestset = torchvision.datasets.FashionMNIST(root='./data/fashion', train=False, download=True)\ntestset.test_data = F.pad(testset.test_data, (2, 2, 2, 2), 'constant', 100)\nprint(testset.test_data[0].shape)",
"torch.Size([32, 32])\n"
],
[
"fig = plt.figure(figsize=(8, 1))\nfor i in range(10):\n ax = fig.add_subplot(1, 10, i+1)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n ax.imshow(testset.test_data[i])\nfig.canvas.draw()\ntime.sleep(0.2)\nplt.close(fig)",
"_____no_output_____"
]
],
[
[
"# We can also use EMNIST for out of domain detection",
"_____no_output_____"
]
],
[
[
"trainset = torchvision.datasets.EMNIST(root='./data/emnist', split='letters', train=True, download=True)\ntrainset.train_data = F.pad(trainset.train_data, (2, 2, 2, 2), 'constant', 100)\ntestset = torchvision.datasets.EMNIST(root='./data/emnist', split='letters', train=False, download=True)\ntestset.test_data = F.pad(testset.test_data, (2, 2, 2, 2), 'constant', 100)\nprint(testset.test_data[0].shape)",
"Downloading http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip\nExtracting zip archive\nExtracting emnist-mnist-train-images-idx3-ubyte.gz\nExtracting emnist-byclass-train-labels-idx1-ubyte.gz\nExtracting emnist-letters-train-images-idx3-ubyte.gz\nExtracting emnist-mnist-train-labels-idx1-ubyte.gz\nExtracting emnist-byclass-train-images-idx3-ubyte.gz\nExtracting emnist-bymerge-train-images-idx3-ubyte.gz\nExtracting emnist-byclass-test-images-idx3-ubyte.gz\nExtracting emnist-balanced-test-labels-idx1-ubyte.gz\nExtracting emnist-balanced-test-images-idx3-ubyte.gz\nExtracting emnist-digits-train-labels-idx1-ubyte.gz\nExtracting emnist-digits-test-labels-idx1-ubyte.gz\nExtracting emnist-letters-test-images-idx3-ubyte.gz\nExtracting emnist-balanced-train-images-idx3-ubyte.gz\nExtracting emnist-letters-test-labels-idx1-ubyte.gz\nExtracting emnist-mnist-test-labels-idx1-ubyte.gz\nExtracting emnist-letters-train-labels-idx1-ubyte.gz\nExtracting emnist-digits-train-images-idx3-ubyte.gz\nExtracting emnist-balanced-train-labels-idx1-ubyte.gz\nExtracting emnist-bymerge-test-images-idx3-ubyte.gz\nExtracting emnist-digits-test-images-idx3-ubyte.gz\nExtracting emnist-byclass-test-labels-idx1-ubyte.gz\nExtracting emnist-mnist-test-images-idx3-ubyte.gz\nExtracting emnist-bymerge-test-labels-idx1-ubyte.gz\nExtracting emnist-bymerge-train-labels-idx1-ubyte.gz\nProcessing byclass\nProcessing bymerge\nProcessing balanced\nProcessing letters\nProcessing digits\nProcessing mnist\nDone!\ntorch.Size([32, 32])\n"
],
[
"len(testset)",
"_____no_output_____"
],
[
"random_idxs = np.random.randint(1, 20800, 20)\nfig = plt.figure(figsize=(8, 2))\nfor i in range(20):\n ax = fig.add_subplot(2, 10, i+1)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n ax.imshow(testset.test_data[random_idxs[i]])\nfig.canvas.draw()\ntime.sleep(0.2)\nplt.close(fig)",
"_____no_output_____"
]
],
[
[
"# Do the same thing but with MNIST",
"_____no_output_____"
]
],
[
[
"trainset = torchvision.datasets.MNIST(root='./data/mnist', train=True, download=True)\ntrainset.train_data = F.pad(trainset.train_data, (2, 2, 2, 2), 'constant', 100)\ntestset = torchvision.datasets.MNIST(root='./data/mnist', train=False, download=True)\ntestset.test_data = F.pad(testset.test_data, (2, 2, 2, 2), 'constant', 100)",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\nProcessing...\nDone!\n"
],
[
"fig = plt.figure(figsize=(8, 1))\nfor i in range(10):\n ax = fig.add_subplot(1, 10, i+1)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n ax.imshow(testset.test_data[i])\nfig.canvas.draw()\ntime.sleep(0.2)\nplt.close(fig)",
"_____no_output_____"
],
[
"class DomainNet(nn.Module):\n \n def __init__(self, n_layers=n_layers, n_classes=n_classes):\n super(DomainNet, self).__init__()\n \n self.n_layers = n_layers\n self.n_classes = n_classes\n \n ##### layers #####\n self.conv1 = nn.Conv2d(1, n_layers, kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(n_layers, n_layers, kernel_size=2, stride=2)\n \n self.linear1 = nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2)\n self.linear2 = nn.Linear(n_layers * 2 * 2, n_layers * 2 * 2)\n self.linear3 = nn.Linear(n_layers * 2 * 2, n_classes)\n #### layers #####\n \n ### Gaussian Mixture Models ###\n self.gmm_inputs = list()\n self.gmm_conv1 = list()\n self.gmm_conv2 = None\n \n self.gmm_linear1 = None\n self.gmm_linear2 = None\n self.gmm_linear3 = None\n ### Gaussian Mixture Models ###\n return\n \n def forward(self, x):\n outputs = F.leaky_relu(self.conv1(x))\n outputs = F.leaky_relu(self.conv2(outputs))\n \n outputs = outputs.view(outputs.shape[0], self.n_layers * 2 * 2)\n \n outputs = F.leaky_relu(self.linear1(outputs))\n outputs = F.leaky_relu(self.linear2(outputs))\n outputs = self.linear3(outputs)\n return outputs\n \n def _fit_domain_of_convolutional_layer(self, outputs, val=None, n_components=50):\n gmms = list()\n for j in range(0, outputs.shape[2] - 1, 2):\n for k in range(0, outputs.shape[3] - 1, 2):\n kernel = outputs[:, :, j:j+2, k:k+2]\n gmm = GaussianMixture(covariance_type='full',\n n_components=n_components).fit(\n kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n if val is not None:\n kernel = val[:, :, j:j+2, k:k+2]\n probabilities = gmm.score_samples(kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n gmm.threshold = np.amin(probabilities)\n gmms.append(gmm)\n return gmms\n \n def _fit_domain_of_linear_layer(self, outputs, val=None, n_components=50):\n gmm = GaussianMixture(covariance_type='full',\n n_components=n_components).fit(outputs.detach().numpy())\n if val is None:\n probabilities = gmm.score_samples(outputs.detach().numpy())\n else:\n probabilities = gmm.score_samples(val.detach().numpy())\n gmm.threshold = np.amin(probabilities)\n return gmm\n \n def fit_domain(self, x, val=None, n_components=50):\n self.gmm_inputs = self._fit_domain_of_convolutional_layer(x, val=val, n_components=n_components)\n outputs = self.conv1(x)\n if val is not None:\n val = self.conv1(val)\n \n self.gmm_conv1 = self._fit_domain_of_convolutional_layer(outputs, val=val, n_components=n_components)\n outputs = self.conv2(F.leaky_relu(outputs))\n if val is not None:\n val = self.conv2(F.leaky_relu(val))\n \n outputs = outputs.view(outputs.size(0), self.n_layers * 2 * 2)\n if val is not None:\n val = val.view(val.size(0), self.n_layers * 2 * 2)\n \n self.gmm_conv2 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear1(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear1(F.leaky_relu(val))\n \n self.gmm_linear1 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear2(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear2(F.leaky_relu(val))\n \n self.gmm_linear2 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n outputs = self.linear3(F.leaky_relu(outputs))\n if val is not None:\n val = self.linear3(F.leaky_relu(val))\n \n self.gmm_linear3 = self._fit_domain_of_linear_layer(outputs, val=val, n_components=n_components)\n return\n \n def _domain_check_convolutional_layer(self, outputs, gmms):\n i = 0\n for j in range(0, outputs.shape[2] - 1, 2):\n for k in range(0, outputs.shape[3] - 1, 2):\n kernel = outputs[:, :, j:j+2, k:k+2]\n probabilities = gmms[i].score_samples(\n kernel.detach().numpy().reshape(-1, kernel.shape[1] * 2 * 2))\n if probabilities[0] < gmms[i].threshold:\n return False\n i +=1\n return True\n \n def _domain_check_linear_layer(self, outputs, gmm):\n probabilities = gmm.score_samples(outputs.detach().numpy())\n if probabilities[0] < gmm.threshold:\n return False\n return True\n \n def domain_check(self, x):\n \"\"\"\n Currently only supports checking one image at a time\n \"\"\"\n passing = self._domain_check_convolutional_layer(x, self.gmm_inputs)\n if not passing:\n return -1\n outputs = self.conv1(x)\n \n passing = self._domain_check_convolutional_layer(outputs, self.gmm_conv1)\n if not passing:\n return -2\n outputs = self.conv2(F.leaky_relu(outputs))\n \n outputs = outputs.view(outputs.size(0), self.n_layers * 2 * 2)\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_conv2)\n if not passing:\n return -3\n outputs = self.linear1(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear1)\n if not passing:\n return -4\n outputs = self.linear2(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear2)\n if not passing:\n return -5\n outputs = self.linear3(F.leaky_relu(outputs))\n \n passing = self._domain_check_linear_layer(outputs, self.gmm_linear3)\n if not passing:\n return -6\n \n # It is in domain\n return 0\n \n def sample_from_linear(self, gmm, target, target_gmm, linear_transformation, sample_size=100, temp=1):\n max_iterations = 1000\n for i in range(max_iterations):\n if max_iterations - i < 5:\n print(\"sampling iteration: \" + str(i))\n samples, _ = gmm.sample(sample_size)\n scores = gmm.score_samples(samples)\n idxs = np.where(scores >= gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n squashed_sample = linear_transformation(F.leaky_relu(torch.tensor(samples).float())).detach().numpy()\n scores = target_gmm.score_samples(squashed_sample)\n idxs = np.where(scores >= target_gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n squashed_sample = squashed_sample[idxs]\n break\n if i == (max_iterations - 1):\n sys.exit(\"Sampling took to long we timed out\") \n negative_distances = -1 * np.linalg.norm(squashed_sample - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n new_target = samples[idx]\n return new_target\n \n def sample_from_outputs(self, gmm, target, sample_size=100, temp=1):\n max_iterations = 1000\n for i in range(max_iterations):\n if max_iterations - i < 5:\n print(\"sampling iteration: \" + str(i))\n samples, _ = gmm.sample(sample_size)\n scores = gmm.score_samples(samples)\n idxs = np.where(scores >= gmm.threshold)[0]\n if idxs.shape[0] != 0:\n samples = samples[idxs]\n break\n if i == (max_iterations - 1):\n sys.exit(\"Sampling took to long we timed out\")\n \n squashed_samples = F.softmax(torch.tensor(samples).float(), dim=1).detach().numpy()\n negative_distances = -1 * np.linalg.norm(squashed_samples - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n return samples[idx]\n \n def generate_one_sample_from_convolutional(self, gmms, target,\n conv_filter, sample_size=100, temp=1, is_inputs=False):\n max_iterations = 1000\n # create an array filled with nans that has the shape of a sample\n sample = np.zeros((1, conv_filter.weight.shape[1], target.shape[2] * 2, target.shape[3] * 2))\n sample.fill(np.nan)\n \n # create an array filled with nans that has the shape of target\n transformed_sample = np.zeros(target.shape)\n transformed_sample.fill(np.nan)\n \n # create a list of all the kernel locations and shuffle it\n kernel_idxs = list()\n i = 0\n for j in range(target.shape[2]):\n for k in range(target.shape[3]):\n kernel_idxs.append((i, j, k))\n i += 1\n random.shuffle(kernel_idxs)\n \n # sample from each kernel\n # make sure each sample is in domain. If you can't generate an in domain sample give up after a while\n # and return None\n for (i, j, k) in kernel_idxs:\n kernel_target = target[:, :, j, k].reshape(-1, self.n_layers)\n for m in range(max_iterations):\n if max_iterations - m < 5:\n print(\"sampling iteration: \" + str(m))\n kernel_sample, _ = gmms[i].sample(sample_size)\n scores = gmms[i].score_samples(kernel_sample)\n print\n idxs = np.where(scores >= gmms[i].threshold)[0]\n if idxs.shape[0] != 0:\n kernel_sample = kernel_sample[idxs]\n kernel_sample = kernel_sample.reshape(-1, conv_filter.weight.shape[1], 2, 2)\n break\n if m == (max_iterations - 1):\n print(\"Gave up on this iteration\")\n return None, None\n \n if is_inputs:\n # If its the input there shouldn't be any non-linearity\n transformed_kernel_sample = conv_filter(\n torch.tensor(kernel_sample).float()).detach().numpy().reshape(-1, self.n_layers)\n else:\n transformed_kernel_sample = conv_filter(F.leaky_relu(\n torch.tensor(kernel_sample).float())).detach().numpy().reshape(-1, self.n_layers)\n \n negative_distances = -1 * np.linalg.norm(transformed_kernel_sample - kernel_target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n sample_j, sample_k = (j * 2), (k *2)\n sample[0, :, sample_j:sample_j+2, sample_k:sample_k+2] = kernel_sample[idx, :, :, :]\n transformed_sample[0, :, j, k] = transformed_kernel_sample[idx, :]\n return sample, transformed_sample\n \n def sample_from_convolutional(self, gmms, target\n , target_gmms, conv_filter, sample_size=100\n , temp=1, is_inputs=False, is_last_conv_layer=False):\n samples = list()\n transformed_samples = list()\n \n for i in range(int(sample_size/10)):\n sample, transformed_sample = self.generate_one_sample_from_convolutional(gmms, target\n , conv_filter, sample_size=100\n , temp=temp, is_inputs=is_inputs)\n if sample is not None:\n samples.append(sample)\n transformed_samples.append(transformed_sample)\n \n if len(samples) == 0:\n sys.exit(\"Uanble to generate a single in domain sample\")\n \n if not is_last_conv_layer:\n in_domain = np.array([self._domain_check_convolutional_layer(torch.tensor(transformed_sample).float()\n , target_gmms) for transformed_sample in transformed_samples])\n else:\n in_domain = np.array([self._domain_check_linear_layer(torch.tensor(transformed_sample.reshape(-1, self.n_layers * 2 * 2)).float()\n , target_gmms) for transformed_sample in transformed_samples])\n idxs = np.where(in_domain == True)[0]\n if idxs.shape[0] == 0:\n sys.exit(\"No samples were in domain\")\n \n samples = np.concatenate(samples, axis=0)\n transformed_samples = np.concatenate(transformed_samples, axis=0)\n \n samples = samples[idxs]\n transformed_samples = transformed_samples[idxs].reshape(samples.shape[0], -1)\n target = target.reshape(1, -1)\n negative_distances = -1 * np.linalg.norm(transformed_samples - target, axis=1) / temp\n probability_weights = F.softmax(torch.tensor(negative_distances).float(), dim=0).detach().numpy()\n idx = np.random.choice(np.arange(probability_weights.shape[0]), p=probability_weights)\n print(\"Distance between sampled point and target point \" + str(-1 * negative_distances[idx] * temp))\n return np.expand_dims(samples[idx], axis=0)\n \n def sample(self, target=None, sample_size=1000, temp=1):\n if target is None:\n target, _ = self.gmm_linear3.sample()\n else:\n target = self.sample_from_outputs(self.gmm_linear3, target, sample_size=sample_size, temp=temp)\n \n target = self.sample_from_linear(self.gmm_linear2, target, self.gmm_linear3, self.linear3, sample_size=sample_size, temp=temp)\n target = self.sample_from_linear(self.gmm_linear1, target, self.gmm_linear2, self.linear2, sample_size=sample_size, temp=temp)\n target = self.sample_from_linear(self.gmm_conv2, target, self.gmm_linear1, self.linear1, sample_size=sample_size, temp=temp)\n \n target = target.reshape(-1, self.n_layers, 2, 2)\n \n target = self.sample_from_convolutional(self.gmm_conv1, target, self.gmm_conv2, self.conv2, sample_size=sample_size, temp=temp\n , is_last_conv_layer=True)\n target = self.sample_from_convolutional(self.gmm_inputs, target, self.gmm_conv1, self.conv1, sample_size=sample_size, temp=temp\n , is_inputs=True)\n return target",
"_____no_output_____"
],
[
"a = min(np.array([5]), np.array([1]))\na",
"_____no_output_____"
],
[
"np.exp(1)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
eca6b8fb67fba95df7ca604ada69bcb04065c32f | 3,618 | ipynb | Jupyter Notebook | interactivemap.ipynb | AutoGIS-2020/exercise-5-joviri | 14b3ecf8ecbbf576bcc556e84a4253f3953c3b68 | [
"MIT"
] | null | null | null | interactivemap.ipynb | AutoGIS-2020/exercise-5-joviri | 14b3ecf8ecbbf576bcc556e84a4253f3953c3b68 | [
"MIT"
] | null | null | null | interactivemap.ipynb | AutoGIS-2020/exercise-5-joviri | 14b3ecf8ecbbf576bcc556e84a4253f3953c3b68 | [
"MIT"
] | null | null | null | 28.046512 | 119 | 0.475401 | [
[
[
"# Import needed libraries\nimport folium\nimport pandas as pd\nimport geopandas as gpd\nfrom pyproj import CRS\nimport matplotlib.pyplot as plt\nimport contextily as ctx",
"_____no_output_____"
],
[
"# Filepaths\nmapdata_fp = \"data/maakunnat_2021_milj.shp\"\ninfo_fp = \"data/info.txt\"\nfor_tooltips_fp = \"data/pop_data.geojson\"\n\n# Read files\nmapdata = gpd.read_file(mapdata_fp)\ninfo = pd.read_csv(info_fp, sep=';')\nfor_tooltips = gpd.read_file(for_tooltips_fp)",
"_____no_output_____"
],
[
"# Merge data sets\ndata = mapdata.merge(info, left_on='NAMEFIN', right_on='name')",
"_____no_output_____"
],
[
"# Re-project to WGS84\ndata = data.to_crs(epsg=4326)",
"_____no_output_____"
],
[
"# convert 'pop_den' values from str to float\ndata['pop_den'] = data['pop_den'].astype(float)",
"_____no_output_____"
],
[
"# Set indeces\ndata['geoid'] = data.index.astype(str)\n\n# Create a folium map\nm = folium.Map(location=[65, 25], zoom_start=5)\n\n# Set bins (natural breaks)\nbins = [1.9, 3.5, 11.3, 16.3, 23.8, 27.5, 32.8, 41.6, 45.2, 187.5]\n\n# Create a choropleth map\nfolium.Choropleth(geo_data = data,\n data = data,\n columns=['geoid','pop_den'],\n key_on='feature.id',\n fill_color='OrRd',\n line_color='black',\n line_weight=1,\n bins=bins,\n legend_name= 'Population density in Finland (persons per square kilometer)').add_to(m)\n\n# Add tooltips into the map\nfolium.features.GeoJson(for_tooltips, \n name='Labels',\n style_function=lambda x: {'color':'transparent','fillColor':'transparent','weight':0},\n tooltip=folium.features.GeoJsonTooltip(fields=['NAMEFIN','pop_den'],\n aliases = ['Region','Persons/km2'],\n labels=True,\n sticky=False,)\n ).add_to(m)\n\n# Save the map\noutfp = \"docs/interactivemap.html\"\nm.save(outfp)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca6d1eb4e57ac5e008a21a6f01377b66a9c304e | 7,425 | ipynb | Jupyter Notebook | examples/notebooks/11_Find_Stations.ipynb | schluchc/python-connector-api | 79586811b73f866c872db9c2404cf03dfddddf79 | [
"MIT"
] | null | null | null | examples/notebooks/11_Find_Stations.ipynb | schluchc/python-connector-api | 79586811b73f866c872db9c2404cf03dfddddf79 | [
"MIT"
] | null | null | null | examples/notebooks/11_Find_Stations.ipynb | schluchc/python-connector-api | 79586811b73f866c872db9c2404cf03dfddddf79 | [
"MIT"
] | null | null | null | 30.430328 | 333 | 0.547879 | [
[
[
"# Find Stations",
"_____no_output_____"
],
[
"First you have to import the meteomatics module and the datetime module",
"_____no_output_____"
]
],
[
[
"import datetime as dt\nimport meteomatics.api as api",
"_____no_output_____"
]
],
[
[
"Input here your username and password from your meteomatics profile",
"_____no_output_____"
]
],
[
[
"###Credentials:\nusername = 'python-community'\npassword = 'Umivipawe179'",
"_____no_output_____"
]
],
[
[
"Input here a startdate, an enddate and the time interval, all as datetime-objects. The interval tells you, if you get the data in hourly steps, daily steps or every five minutes in between the startdate and the enddate. Make sure that the enddate is not in the future, because there are no station measurements from the future.",
"_____no_output_____"
]
],
[
[
"startdate_station_ts = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)-dt.timedelta(days=2)\nenddate_station_ts = startdate_station_ts + dt.timedelta(days=1)\ninterval_station_ts = dt.timedelta(hours=1)",
"_____no_output_____"
]
],
[
[
"Choose the parameters you want to get and put them into a list. Check here which parameters are available: https://www.meteomatics.com/en/api/available-parameters/",
"_____no_output_____"
]
],
[
[
"parameters_station_ts = ['t_2m:C']",
"_____no_output_____"
]
],
[
[
"Optional: Input here an elevation (height above sea level). It will look first for stations that are close to this height.",
"_____no_output_____"
]
],
[
[
"elevation = 1300",
"_____no_output_____"
]
],
[
[
"Optional: Define here a location, from where you are looking for a station. \nThere are different possibilities to define the location in this query:\n+ location = โlat,lonโ ex: location = โ47,8โ\n+ location = โlat_max,lon_min_lat_min,lon_maxโ ex: location = โ47,8_40,15โ (This covers an area from 40ยฐN to 47ยฐN and from 8ยฐE to 15ยฐE)\n+ location = โpredefined areaโ ex: location = โukโ\n\nPay attention: In contrary to other queries, the coordinates have to be in the form of strings without spaces in between.",
"_____no_output_____"
]
],
[
[
"location = '47,9_46,10'",
"_____no_output_____"
]
],
[
[
"In the following, the request will start. If there is an error in the request as for example a wrong parameter or a date that doesn't exist, you get a message.",
"_____no_output_____"
]
],
[
[
"print(\"find stations:\")\ntry:\n met = api.query_station_list(username, password, startdate=startdate_station_ts, enddate=enddate_station_ts,\n parameters=parameters_station_ts, location=location, elevation=elevation)\n print(met.head())\nexcept Exception as e:\n print(\"Failed, the exception is {}\".format(e))",
"\nfind stations:\n2019-03-20 14:31:45| INFO |Calling URL: https://api.meteomatics.com/find_station?parameters=t_2m:C&startdate=2019-03-18T00Z&enddate=2019-03-19T00Z&location=47,9_46,10&elevation=1300 (username = python-community)\n Station Category Station Type ID Hash WMO ID Alternative IDs \\\n0 SYNOP SYNO 749326929 NaN VLS \n1 SYNOP SYNO 2557177498 NaN LAT \n2 SYNOP SYNO 3299121550 NaN VIO \n3 SYNOP SYNO 944085932 NaN VAB \n4 SYNOP SYNA 2220442281 67840.0 NaN \n\n Name Elevation Start Date End Date \\\n0 Vals 1242m 2017-02-28T23:00:00Z 2019-03-20T12:10:00Z \n1 Bergรผn/Latsch 1407m 2017-02-28T23:00:00Z 2019-03-20T12:10:00Z \n2 Vicosoprano 1089m 2017-02-28T23:00:00Z 2019-03-20T12:10:00Z \n3 Valbella 1569m 2017-02-28T23:00:00Z 2019-03-20T12:10:00Z \n4 Davos 1592m 2017-01-01T00:00:00Z 2019-03-20T13:00:00Z \n\n Horizontal Distance Vertical Distance Effective Distance lat \\\n0 -999 -58 6734.33 46.6278 \n1 -999 107 13267.70 46.6273 \n2 -999 -211 27134.30 46.3530 \n3 -999 269 34867.70 46.7550 \n4 -999 292 37934.30 46.8200 \n\n lon \n0 9.18870 \n1 9.75369 \n2 9.62778 \n3 9.55441 \n4 9.85000 \n"
]
],
[
[
"As output you get a pandas dataframe filled with stations. The best matching stations to your request appear on top.\n\n",
"_____no_output_____"
],
[
"Now you can work on the data by using pandas commands. Here are some examples how you can access to the different datapoints.",
"_____no_output_____"
]
],
[
[
"best_fitting_station = met.iloc[0]\nname_of_best_fitting_station = met.loc[0, 'Name']\nstation_with_least_vertical_distance = met.iloc[abs(met['Vertical Distance']).idxmin()]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
eca6e7989db6239868737d78519b4edfcb43e27b | 69,379 | ipynb | Jupyter Notebook | Notebooks/logis2e.ipynb | eacunafer/DataMining-Machine-Learning-Py3 | e97f32720412c61ea406bc5dcc44edf97f16e2bd | [
"MIT"
] | null | null | null | Notebooks/logis2e.ipynb | eacunafer/DataMining-Machine-Learning-Py3 | e97f32720412c61ea406bc5dcc44edf97f16e2bd | [
"MIT"
] | null | null | null | Notebooks/logis2e.ipynb | eacunafer/DataMining-Machine-Learning-Py3 | e97f32720412c61ea406bc5dcc44edf97f16e2bd | [
"MIT"
] | null | null | null | 126.604015 | 20,872 | 0.860722 | [
[
[
"## Data Mining and Machine Learning\n### Logistic Regression: The ROC curve\n### Libraries:scikit-learn and h2o\n#### Edgar Acuna",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nimport h2o\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\n\n#h2o.connect()\n#h2o.no_progress()\nh2o.init(ip=\"localhost\", port=54323)",
"Checking whether there is an H2O instance running at http://localhost:54323. connected.\n"
]
],
[
[
"### I Regresion Logistica para Diabetes usando scikit learn",
"_____no_output_____"
]
],
[
[
"url= \"http://academic.uprm.edu/eacuna/diabetes.dat\"\nnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndata = pd.read_table(url, names=names,header=None)\n#La variable de respuesta y debe ser binaria (0,1)\ny=data['class']-1\nX=data.iloc[:,0:8]\n#Haciendo la regresion logistica ya calculando su precision\nmodel = LogisticRegression()\nmodel = model.fit(X, y)\nprint(model.coef_)",
"C:\\Users\\edgar2017\\Anaconda2\\envs\\ipykernel_py3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n"
],
[
"# Tasa de precision\nmodel.score(X, y)\npredictions = model.predict(X)\nprint(classification_report(y, predictions))",
" precision recall f1-score support\n\n 0 0.79 0.90 0.84 500\n 1 0.74 0.55 0.63 268\n\n micro avg 0.77 0.77 0.77 768\n macro avg 0.76 0.72 0.73 768\nweighted avg 0.77 0.77 0.77 768\n\n"
]
],
[
[
"### II. ROC curve using scikit-learn",
"_____no_output_____"
]
],
[
[
"#Hallando las probabilidades posteriores\nprobs = model.predict_proba(X)\npreds = probs[:,1]\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y, preds)\nroc_auc = auc(false_positive_rate, true_positive_rate)",
"_____no_output_____"
],
[
"plt.title('The ROC curve')\nplt.plot(false_positive_rate, true_positive_rate, 'b',\nlabel='AUC = %0.2f'% roc_auc)\nplt.legend(loc='lower right')\nplt.plot([0,1],[0,1],'r--')\nplt.xlim([0,1.0])\nplt.ylim([0,1.0])\nplt.ylabel('True Positive Rate')\nplt.xlabel('False Positive Rate')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The AUC value represents the area under the curve ROC (azul). Ia classifier has an AUC between .9 and 1 then its predictions are very good, if the AUC lies between .8 y .89 its prediction are good. A poor classifier is one with an AUC less than de .60 de AUC.\n",
"_____no_output_____"
],
[
"### III Intersection of the sensitivity and specifity curves to choose the threshold",
"_____no_output_____"
]
],
[
[
"plt.title('Choice of the optimal Threshold')\nplt.plot(thresholds, true_positive_rate, 'b',label='Sensitivity')\nplt.legend(loc='lower right')\nplt.plot(thresholds, 1-false_positive_rate,'r--')\nplt.xlim([0,1.0])\nplt.ylim([0,1.0])\nplt.ylabel('Sensitivity ')\nplt.xlabel('Probability')\nplt.show()",
"_____no_output_____"
]
],
[
[
"El threshold que deberia ser usado en lugar de p=.5 para hacer la clasificacion sera aprox .35",
"_____no_output_____"
],
[
"### IV. ROC curve using H20",
"_____no_output_____"
]
],
[
[
"diabetes = h2o.import_file(\"https://academic.uprm.edu/eacuna/diabetes.dat\")\nmyx=['C1','C2','C3','C4','C5','C6','C7','C8']\ndiabetes['C9']=diabetes['C9'].asfactor()\nmyy='C9'\nglm_model = H2OGeneralizedLinearEstimator(family= \"binomial\", lambda_ = 0, compute_p_values = True)\nglm_model.train(myx, myy, training_frame= diabetes)\nglm_model\nglm_model._model_json['output']['coefficients_table']",
"Parse progress: |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 100%\nglm Model Build progress: |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 100%\nCoefficients: glm coefficients\n\n"
],
[
"perf = glm_model.model_performance() #train=True is the default, so it's not needed\nperf.plot()",
"_____no_output_____"
],
[
"#Effect after using the threshokd\n#Number of instances assigned to class 1 using p=.5\ndp=data[preds>.5]\ndp['class'].value_counts()",
"_____no_output_____"
],
[
"#Accuracy with p=.5\n595*100/768.0",
"_____no_output_____"
],
[
"#Number of instances assigned to class 1 using p=.35\ndp1=data[preds>.35]\ndp1['class'].value_counts()",
"_____no_output_____"
],
[
"#New accuracy\n591*100/768.0",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca6fe5933450013cb9b2acc90199a4334600772 | 18,866 | ipynb | Jupyter Notebook | NeuralNetworks/NN_simulation.ipynb | IPINGCHOU/FrontierLab-BPSs-code | 10bb56525a54e66c5422e350fd40770a94b204f4 | [
"MIT"
] | 1 | 2020-12-22T09:21:59.000Z | 2020-12-22T09:21:59.000Z | NeuralNetworks/NN_simulation.ipynb | IPINGCHOU/FrontierLab-BPSs-code | 10bb56525a54e66c5422e350fd40770a94b204f4 | [
"MIT"
] | null | null | null | NeuralNetworks/NN_simulation.ipynb | IPINGCHOU/FrontierLab-BPSs-code | 10bb56525a54e66c5422e350fd40770a94b204f4 | [
"MIT"
] | null | null | null | 32.194539 | 254 | 0.597742 | [
[
[
"# Package import",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.lines as mlines\nimport tensorflow as tf\nimport gc\nimport time\nfrom scipy.io import loadmat\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport DBPS_TF_NN_util as d_util\nimport SBPS_TF_NN_util as s_util\nimport MH_TF_NN_util as mh_util",
"_____no_output_____"
]
],
[
[
"# Data import",
"_____no_output_____"
]
],
[
[
"# dataset with 20*20, 400 pics for each label (training), 100 pics for each label (testing)\nX_train = np.load('/home/user/chou/Py_BPSs_NN_TF/X_train.npy')\nX_test = np.load('/home/user/chou/Py_BPSs_NN_TF/X_test.npy')\ny_train = np.load('/home/user/chou/Py_BPSs_NN_TF/y_train.npy')\ny_test = np.load('/home/user/chou/Py_BPSs_NN_TF/y_test.npy')",
"_____no_output_____"
],
[
"# data from keras, 28 * 28, each labels has different pics, check it below\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\nwant = [4,9]\nnewX_train = x_train[np.in1d(y_train, want)]\nnewY_train = y_train[np.in1d(y_train, want)]\nnewX_test = x_test[np.in1d(y_test, want)]\nnewY_test = y_test[np.in1d(y_test, want)]\nencoder = OneHotEncoder(sparse=False)\nnewY_train_onehot = encoder.fit_transform(newY_train.reshape(-1, 1))\nencoder = OneHotEncoder(sparse=False)\nnewY_test_onehot = encoder.fit_transform(newY_test.reshape(-1, 1))\nprint(np.shape(newX_train))\nprint(np.shape(newX_test))\n\nnp.save('X_train_keras49', newX_train.reshape(len(newX_train), 28*28))\nnp.save('Y_train_keras49', newY_train_onehot)\nnp.save('X_test_keras49', newX_test.reshape(len(newX_test), 28*28))\nnp.save('Y_test_keras49', newY_test_onehot)",
"_____no_output_____"
],
[
"np.shape(newX_train.reshape(len(newX_train), 28*28))",
"_____no_output_____"
],
[
"# training\ntotal = 0\nfor i in range(10):\n want = [i]\n a = sum(np.in1d(y_train, want))\n print('num:' +str(i) + ': '+ str(a))\n total += a\nprint(total)",
"num:0: 5923\nnum:1: 6742\nnum:2: 5958\nnum:3: 6131\nnum:4: 5842\nnum:5: 5421\nnum:6: 5918\nnum:7: 6265\nnum:8: 5851\nnum:9: 5949\n60000\n"
],
[
"# testing\ntotal = 0\nfor i in range(10):\n want = [i]\n a = sum(np.in1d(y_test, want))\n print('num:' +str(i) + ': '+ str(a))\n total += a\nprint(total)",
"num:0: 980\nnum:1: 1135\nnum:2: 1032\nnum:3: 1010\nnum:4: 982\nnum:5: 892\nnum:6: 958\nnum:7: 1028\nnum:8: 974\nnum:9: 1009\n10000\n"
],
[
"# keras data set load\nX_train = np.load('/home/user/chou/Py_BPSs_NN_TF/X_train_keras49.npy')\nX_test = np.load('/home/user/chou/Py_BPSs_NN_TF/X_test_keras49.npy')\ny_train = np.load('/home/user/chou/Py_BPSs_NN_TF/Y_train_keras49.npy')\ny_test = np.load('/home/user/chou/Py_BPSs_NN_TF/Y_test_keras49.npy')",
"_____no_output_____"
],
[
"print(np.shape(X_train))\nprint(np.shape(X_test))\nprint(np.shape(y_train))\nprint(np.shape(y_test))",
"(11791, 784)\n(1991, 784)\n(11791, 2)\n(1991, 2)\n"
]
],
[
[
"# Simulation\nEvery settings are similar to Bayesian Logistic Regression, check BLR_simulation.ipynb for more details",
"_____no_output_____"
]
],
[
[
"store_skip = 100\niterations = 1e07\nverbose = 1e04\nsave_iter = 10000\nburninIters = iterations /10 * 3\n\nMH_settings = []\nBPS_settings = []\nSBPS_settings = []\nDBPS_settings = []",
"_____no_output_____"
],
[
"# ======\n# don't forget to fix the input_size in SBPS_util.py and DBPS_util.py correspond to the input size of the data set\n# ======\n# DBPS_settings.append([runtime, totaltime, NN_DBPS.burnin_sample, NN_DBPS.stage1_prob, NN_DBPS.stage2_prob])\n# SBPS_settings.append([runtime, totaltime, NN_SBPS.burnin_sample, NN_SBPS.after_burnin_storage_time, NN_SBPS.all_storage_time, NN_SBPS.theta_prior_bounce_count, NN_SBPS.bias_prior_bounce_count, NN_SBPS.likelihood_bounce_count])\n# MH_settings.append([runtime, totaltime, NN_MH.burnin_sample, NN_MH.accept_count])\n\n# DBPS\nsubset = 0\nsto = 0\nk = np.array([-2], dtype = 'f')\nd = np.array([-2], dtype = 'f')\nkappa = 10**k\ndelta = 10**d\na = time.time()\nNN_DBPS = d_util.DBPS(X_train, y_train, delta, store_skip, save_iter)\nNN_DBPS.DBPS_sampler(iterations, burninIters, verbose, kappa, subset , sto)\nb = time.time()\nruntime = b-NN_DBPS.burnin_time\ntotaltime = b-a\nDBPS_settings.append([runtime, totaltime, NN_DBPS.burnin_sample, NN_DBPS.stage1_prob, NN_DBPS.stage2_prob])\ndel NN_DBPS\ngc.collect()\n\nnp.save('DBPSsetting', np.array(DBPS_settings))\n\n# SBPS\nT = 10000\ndt = 1\nvariance = 1\nmini_batch = 500\nref = 10\nsample_time = 1e-03\n\na = time.time()\nNN_SBPS = s_util.SBPS(X_train, y_train, T, dt, variance, mini_batch, save_iter)\nNN_SBPS.SBPS_sampler(ref, sample_time, iterations, burninIters, verbose)\nb = time.time()\nruntime = b-NN_SBPS.burnin_time\ntotaltime = b-a\nSBPS_settings.append([runtime, totaltime, NN_SBPS.burnin_sample, NN_SBPS.after_burnin_storage_time, NN_SBPS.all_storage_time, NN_SBPS.theta_prior_bounce_count, NN_SBPS.bias_prior_bounce_count, NN_SBPS.likelihood_bounce_count, NN_SBPS.ref_count])\ndel NN_SBPS\ngc.collect()\nnp.save('SBPSsetting', np.array(SBPS_settings))\n\n# BPS\nT = 1000000\ndt = 1\nvariance = 1\nmini_batch = len(X_train)\nref = 10\nsample_time = 1e-03\n\na = time.time()\nNN_SBPS = s_util.SBPS(X_train, y_train, T, dt, variance, mini_batch, save_iter)\nNN_SBPS.SBPS_sampler(ref, sample_time, iterations, burninIters, verbose)\nb = time.time()\nruntime = b-NN_SBPS.burnin_time\ntotaltime = b-a\nBPS_settings.append([runtime, totaltime, NN_SBPS.burnin_sample, NN_SBPS.after_burnin_storage_time, NN_SBPS.all_storage_time, NN_SBPS.theta_prior_bounce_count, NN_SBPS.bias_prior_bounce_count, NN_SBPS.likelihood_bounce_count, NN_SBPS.ref_count])\ndel NN_SBPS\ngc.collect()\n\nnp.save('BPSsetting', np.array(BPS_settings))\n\n# MH\ncan_sd = 0.5\na = time.time()\nNN_MH = mh_util.MH(X_train, y_train,store_skip)\nNN_MH.MH_sampler(can_sd, burninIters, iterations, verbose, save_iter)\nb = time.time()\nruntime = b-NN_MH.burnin_time\ntotaltime = b-a\nMH_settings.append([runtime, totaltime, NN_MH.burnin_sample, NN_MH.accept_count])\ndel NN_MH\ngc.collect()\n\nnp.save('MHsetting', np.array(MH_settings))\n",
"Current clock: 0.18308024466182174\nCurrent counts: 10000\nCurrent clock: 0.23759932573145792\nCurrent counts: 20000\nCurrent clock: 0.25493086930559\nCurrent counts: 30000\nCurrent clock: 0.28802619457093037\nCurrent counts: 40000\nCurrent clock: 0.3178923745958832\nCurrent counts: 50000\nCurrent clock: 0.3620578028998632\nCurrent counts: 60000\nCurrent clock: 0.38574251674294624\nCurrent counts: 70000\nCurrent clock: 0.45336423026757666\nCurrent counts: 80000\nCurrent clock: 0.5005491960544247\nCurrent counts: 90000\nCurrent clock: 0.5466468523951702\nCurrent counts: 100000\nCurrent clock: 0.5759937249335271\nCurrent counts: 110000\nCurrent clock: 0.6038659058667135\nCurrent counts: 120000\nCurrent clock: 0.6199034940591363\nCurrent counts: 130000\nCurrent clock: 0.6792161902606929\nCurrent counts: 140000\nCurrent clock: 0.6911706865856124\nCurrent counts: 150000\nCurrent clock: 0.7753529352399439\nCurrent counts: 160000\nCurrent clock: 0.8564776617692564\nCurrent counts: 170000\nCurrent clock: 0.9032564457551319\nCurrent counts: 180000\nCurrent clock: 0.917588408698638\nCurrent counts: 190000\nCurrent clock: 0.9622753959756359\nCurrent counts: 200000\nCurrent clock: 1.0323934305028744\nCurrent counts: 210000\nCurrent clock: 1.0411404811118172\nCurrent counts: 220000\nCurrent clock: 1.0475074948209748\nCurrent counts: 230000\nCurrent clock: 1.0543718793954786\nCurrent counts: 240000\nCurrent clock: 1.059447954895465\nCurrent counts: 250000\nCurrent clock: 1.065320298631701\nCurrent counts: 260000\nCurrent clock: 1.0733196270593284\nCurrent counts: 270000\nCurrent clock: 1.1360003626927446\nCurrent counts: 280000\nCurrent clock: 1.2241563324475917\nCurrent counts: 290000\nCurrent clock: 1.2662352491475353\nCurrent counts: 300000\nCurrent clock: 1.3449537708031774\nCurrent counts: 310000\nCurrent clock: 1.4583212365488838\nCurrent counts: 320000\nCurrent clock: 1.5367107439661296\nCurrent counts: 330000\nCurrent clock: 1.5654404921316585\nCurrent counts: 340000\nCurrent clock: 1.5864108544902764\nCurrent counts: 350000\nCurrent clock: 1.6753780892155925\nCurrent counts: 360000\nCurrent clock: 1.7331125728800199\nCurrent counts: 370000\nCurrent clock: 1.8404505163616862\nCurrent counts: 380000\nCurrent clock: 1.8983264516522638\nCurrent counts: 390000\nCurrent clock: 2.003376839205952\nCurrent counts: 400000\nCurrent clock: 2.10133906286225\nCurrent counts: 410000\nCurrent clock: 2.1815076914757743\nCurrent counts: 420000\nCurrent clock: 2.2103924370054964\nCurrent counts: 430000\nCurrent clock: 2.4517215214654517\nCurrent counts: 440000\nCurrent clock: 2.5901884772867714\nCurrent counts: 450000\nCurrent clock: 2.6656496380697408\nCurrent counts: 460000\nCurrent clock: 2.7315593521459816\nCurrent counts: 470000\nCurrent clock: 2.746638741617198\nCurrent counts: 480000\nCurrent clock: 2.764693801378054\nCurrent counts: 490000\nCurrent clock: 2.9893087890473797\nCurrent counts: 500000\nCurrent clock: 3.206769039381898\nCurrent counts: 510000\nCurrent clock: 3.4023928064992037\nCurrent counts: 520000\nCurrent clock: 3.6299032412618777\nCurrent counts: 530000\nCurrent clock: 3.660859606006323\nCurrent counts: 540000\nCurrent clock: 3.7397844054768985\nCurrent counts: 550000\nCurrent clock: 3.8430455396038434\nCurrent counts: 560000\nCurrent clock: 3.9806260302443857\nCurrent counts: 570000\nCurrent clock: 4.011787971629388\nCurrent counts: 580000\nCurrent clock: 4.092319019030991\nCurrent counts: 590000\nCurrent clock: 4.203001778855898\nCurrent counts: 600000\nCurrent clock: 4.251382983544638\nCurrent counts: 610000\nCurrent clock: 4.354527407015479\nCurrent counts: 620000\nCurrent clock: 4.429820151162032\nCurrent counts: 630000\nCurrent clock: 4.537288312788181\nCurrent counts: 640000\nCurrent clock: 4.552419740941616\nCurrent counts: 650000\nCurrent clock: 4.60196948484375\nCurrent counts: 660000\nCurrent clock: 4.640470364453765\nCurrent counts: 670000\nCurrent clock: 4.766453724885332\nCurrent counts: 680000\nCurrent clock: 4.878713934303336\nCurrent counts: 690000\nCurrent clock: 4.978467933419933\nCurrent counts: 700000\nCurrent clock: 5.060013713264824\nCurrent counts: 710000\nCurrent clock: 5.07751859000334\nCurrent counts: 720000\nCurrent clock: 5.180990794622325\nCurrent counts: 730000\nCurrent clock: 5.2609238624383154\nCurrent counts: 740000\nCurrent clock: 5.39769961307895\nCurrent counts: 750000\nCurrent clock: 5.53541995315819\nCurrent counts: 760000\nCurrent clock: 5.6195286761158725\nCurrent counts: 770000\nCurrent clock: 5.645891321985957\nCurrent counts: 780000\nCurrent clock: 5.662168014820228\nCurrent counts: 790000\nCurrent clock: 5.715374304546236\nCurrent counts: 800000\nCurrent clock: 5.830821772035664\nCurrent counts: 810000\nCurrent clock: 5.966498495667457\nCurrent counts: 820000\nCurrent clock: 6.115884397375968\nCurrent counts: 830000\nCurrent clock: 6.298261101394313\nCurrent counts: 840000\nCurrent clock: 6.418417039081491\nCurrent counts: 850000\nCurrent clock: 6.668569136177264\nCurrent counts: 860000\nCurrent clock: 6.8054440184858125\nCurrent counts: 870000\nCurrent clock: 6.972605560577337\nCurrent counts: 880000\nCurrent clock: 7.12238660205298\nCurrent counts: 890000\nCurrent clock: 7.231007761000952\nCurrent counts: 900000\nCurrent clock: 7.348281647114607\nCurrent counts: 910000\nCurrent clock: 7.488539367353759\nCurrent counts: 920000\nCurrent clock: 7.617541402164676\nCurrent counts: 930000\nCurrent clock: 7.802878751488258\nCurrent counts: 940000\nCurrent clock: 7.917216347488273\nCurrent counts: 950000\nCurrent clock: 7.935172698367123\nCurrent counts: 960000\nCurrent clock: 8.081875714627735\nCurrent counts: 970000\nCurrent clock: 8.101045244402318\nCurrent counts: 980000\nCurrent clock: 8.243998093935872\nCurrent counts: 990000\nCurrent clock: 8.284509610023653\nCurrent counts: 1000000\nCurrent clock: 8.461662663544852\nCurrent counts: 1010000\nCurrent clock: 8.712131315062747\nCurrent counts: 1020000\nCurrent clock: 9.01731469593963\nCurrent counts: 1030000\nCurrent clock: 9.271019328266323\nCurrent counts: 1040000\nCurrent clock: 9.587911989865901\nCurrent counts: 1050000\nCurrent clock: 9.714382209148248\nCurrent counts: 1060000\nCurrent clock: 9.934330877940798\nCurrent counts: 1070000\nPart: 1 saved\nCurrent clock: 10.150866628671922\nCurrent counts: 1080000\nCurrent clock: 10.2970717178696\nCurrent counts: 1090000\nCurrent clock: 10.385106857041174\nCurrent counts: 1100000\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca728cf09ff771439d1afa33ca1777d729ad6da | 7,656 | ipynb | Jupyter Notebook | Data Structure Algorithm/Week 2/.ipynb_checkpoints/4. Topological Sort-checkpoint.ipynb | nomancseku/NSL-HW | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | 1 | 2020-11-29T20:02:08.000Z | 2020-11-29T20:02:08.000Z | Data Structure Algorithm/Week 2/4. Topological Sort.ipynb | nomancseku/NSL-RA-Training | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | 1 | 2020-10-25T18:59:03.000Z | 2020-10-25T18:59:21.000Z | Data Structure Algorithm/Week 2/4. Topological Sort.ipynb | nomancseku/NSL-RA-Training | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | null | null | null | 29.110266 | 179 | 0.450627 | [
[
[
"# Topological Sort Algorithm Implementation",
"_____no_output_____"
],
[
"- Topological Sorting is an ordering of vertices in such a way that for every directed edge ab, node or vertex a should visit before node โbโ or vertex โbโ.\n- Consider a graph, 1 -> 2 -> 3\n\n#### There are two conditions in order to find a topological ordering or sorting of a graph. Those are:-\n\n- The graph should be directed acyclic graph\n- The vertex in a topological graph should be a vertex with no incoming edges.",
"_____no_output_____"
],
[
"### Algorithm for topological sort\n- Step -1:- Identify vertices that have no incoming edges. Select that vertex as starting vertex of a graph\n- Step -2:- Delete the starting vertex or the vertex with no incoming edges and delete all its outgoing edges from the graph. Place the deleted vertex in the output list.\n- Step -3:- Repeat Step -1 and Step -2 until the graph is empty.",
"_____no_output_____"
],
[
"### Implementation of Topological Sorting in Python",
"_____no_output_____"
]
],
[
[
"from collections import defaultdict\n\n\nclass Graph:\n def __init__(self, directed=False): # default directed is set to false\n self.graph = defaultdict(list)\n self.directed = directed\n print('after creating an object')\n print(self.graph)\n print(self.directed)\n \n \n\n def addEdge(self, frm, to):\n print('adding {} to {} with an edge'.format(frm, to))\n self.graph[frm].append(to)\n\n if self.directed is False:\n self.graph[to].append(frm)\n else:\n self.graph[to] = self.graph[to] # an empty list or which is existed\n print('after adding an edge')\n print(self.graph)\n print(self.graph[frm])\n print(self.graph[to])\n \n# {1: [2, 3], 2: [4, 5], 3: [4, 6], 4: [6], 5: [], 6: []}\n\n def topoSortvisit(self, s, visited, sortlist):\n visited[s] = True\n print(s, sortlist)\n\n for i in self.graph[s]:\n if not visited[i]:\n self.topoSortvisit(i, visited, sortlist)\n \n\n sortlist.insert(0, s)\n print(sortlist)\n \n \n def topoSort(self):\n visited = {i: False for i in self.graph}\n print('after making visited list')\n print(visited)\n print('whats inside the self.graph? lets see!')\n print(type(self.graph))\n for v in self.graph:\n print(v, self.graph[v])\n print('end of inspection\\n\\n\\n')\n \n sortlist = []\n \n for v in self.graph:\n if not visited[v]:\n self.topoSortvisit(v, visited, sortlist)\n #print(sortlist)\n print(v, visited[v])\n\n print('after all done')\n print(sortlist)\n\n\nif __name__ == '__main__':\n \n g = Graph(directed=True)\n\n g.addEdge(1, 2)\n g.addEdge(1, 3)\n g.addEdge(2, 4)\n g.addEdge(2, 5)\n g.addEdge(3, 4)\n g.addEdge(3, 6)\n g.addEdge(4, 6)\n \n print('\\n\\n\\n\\n\\n')\n print(g.graph)\n print('\\n\\n\\n\\n\\n')\n \n print(\"Topological Sort:\")\n g.topoSort()",
"after creating an object\ndefaultdict(<class 'list'>, {})\nTrue\nadding 1 to 2 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2], 2: []})\n[2]\n[]\nadding 1 to 3 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [], 3: []})\n[2, 3]\n[]\nadding 2 to 4 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4], 3: [], 4: []})\n[4]\n[]\nadding 2 to 5 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4, 5], 3: [], 4: [], 5: []})\n[4, 5]\n[]\nadding 3 to 4 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4, 5], 3: [4], 4: [], 5: []})\n[4]\n[]\nadding 3 to 6 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4, 5], 3: [4, 6], 4: [], 5: [], 6: []})\n[4, 6]\n[]\nadding 4 to 6 with an edge\nafter adding an edge\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4, 5], 3: [4, 6], 4: [6], 5: [], 6: []})\n[6]\n[]\n\n\n\n\n\n\ndefaultdict(<class 'list'>, {1: [2, 3], 2: [4, 5], 3: [4, 6], 4: [6], 5: [], 6: []})\n\n\n\n\n\n\nTopological Sort:\nafter making visited list\n{1: False, 2: False, 3: False, 4: False, 5: False, 6: False}\nwhats inside the self.graph? lets see!\n<class 'collections.defaultdict'>\n1 [2, 3]\n2 [4, 5]\n3 [4, 6]\n4 [6]\n5 []\n6 []\nend of inspection\n\n\n\n1 []\n2 []\n4 []\n6 []\n[6]\n[4, 6]\n5 [4, 6]\n[5, 4, 6]\n[2, 5, 4, 6]\n3 [2, 5, 4, 6]\n[3, 2, 5, 4, 6]\n[1, 3, 2, 5, 4, 6]\n1 True\n2 True\n3 True\n4 True\n5 True\n6 True\nafter all done\n[1, 3, 2, 5, 4, 6]\n"
]
],
[
[
"### Explanation:-\n- Vertex 1 has no incoming edges so it becomes the starting node.\n- Vertex 1 has two outgoing edges, vertex 2 and 3.\n- The ordering can start with either 1, 2 or 1, 3.\n- There is no cyclic component in the above graph.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
eca72bbb8f0433bc330106f0d88cca2d73472b2a | 22,969 | ipynb | Jupyter Notebook | notebooks/build_index.ipynb | jlikhuva/loompy | b694a4d299604299b27e2a407f508ccf6baa8823 | [
"BSD-2-Clause"
] | 111 | 2017-10-18T06:09:18.000Z | 2022-03-13T01:42:27.000Z | notebooks/build_index.ipynb | jlikhuva/loompy | b694a4d299604299b27e2a407f508ccf6baa8823 | [
"BSD-2-Clause"
] | 158 | 2017-10-18T06:24:46.000Z | 2022-03-31T12:30:17.000Z | notebooks/build_index.ipynb | jlikhuva/loompy | b694a4d299604299b27e2a407f508ccf6baa8823 | [
"BSD-2-Clause"
] | 31 | 2017-10-18T21:42:37.000Z | 2021-12-31T02:39:05.000Z | 43.337736 | 595 | 0.584048 | [
[
[
"## Building a genome index for use with loompy and cytograph\n\nWARNING: these instruction will create a genome index that is *only* suitable for use with 10x Chromium data. We create a composite index, with separate sequence fragments representing unspliced and spliced transcripts. Unspliced fragments will be generated from genomic sequence upstream of every poly-A stretch located inside of a gene locus. Spliced fragments will be generated from transcriptomic sequence upstream of every polyadenylation site in every known spliced transcript. This reflects the locations of reads generated by Chromium, but may not be suitable for other methods.\n\nThe instructions below show how to build a genome index for the human genome. For other species, the code below will have to be modified to accomodate different sources and formats of metadata for those species. Instructions for mouse are available in the loompy/kallisto subdirectory.\n\n### Install prerequisites\n\nInstall the [BioPython](https://biopython.org/wiki/Packages) package in your python distribution:\n\n```\npip install biopython # Method recommended by the BioPython people\nconda install -c conda-forge biopython # Alternative, for Anaconda\n```\n\nInstall [gawk](https://www.gnu.org/software/gawk/) (on Linux, this is typically already installed), [bedtools](https://bedtools.readthedocs.io/en/latest/), and [kallisto](https://pachterlab.github.io/kallisto/) (instructions below are for macOS using [Brew](https://brew.s)):\n```\nbrew install gawk\nbrew install bedtools\nbrew install kallisto\n```\n\n### Download genome data\n\n#### Download transcript sequences and metadata from [Gencode](https://www.gencodegenes.org/human/)\n\nGet the \"Genome sequence, primary assembly (GRCh38)โ file named [GRCh38.primary_assembly.genome.fa.gz](ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_31/GRCh38.primary_assembly.genome.fa.gz)\n\nDownload the transcript sequences as a fasta file, replacing XX with the version number you want [gencode.vXX.transcripts.fa.gz](ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_XX/gencode.vXX.transcripts.fa.gz) . XX = 31 and 38 have been tested.\n\nDownload the comprehensive PRI gene annotation on the primary assembly GTF file, replacing XX with the version number you want [gencode.vXX.primary_assembly.annotation.gtf](ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_XX/gencode.vXX.primary_assembly.annotation.gtf.gz) . XX = 31 and 38 have been tested.\n\n#### Download additional gene metadata from [HGNC](https://www.genenames.org/download/statistics-and-files/)\n\nGet the โComplete HGNC datasetโ as TXT, a file named [hgnc_complete_set.txt](ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/json/hgnc_complete_set.txt)\n\n#### Download 10x Chromium barcode whitelists\n\nChromium cell barcodes are generated from a set of known sequences, which differ by version of the Chromium kits. Get three files from the [cellranger GitHub repository](https://github.com/10XGenomics/cellranger/tree/master/lib/python/cellranger/barcodes) and rename them like this:\n\n```\n3M-february-2018.txt.gz -> (unzip) -> 10xv3_whitelist.txt\n737K-august-2016.txt -> 10xv2_whitelist.txt\n737K-april-2014_rc.txt -> 10xv1_whitelist.txt\n```\n\n#### Download the human transcription factor motif database\n\nGet the [human_tfs_consensus.tab](https://storage.googleapis.com/linnarsson-lab-www-blobs/human_tfs_consensus.tab) file from our Google Cloud. These metadata will be used to determine which genes are transcription factors, and to assign them to families.\n\n### Preprocess the genome data\n\nUnpack gzip:ed files:\n\n```\ngunzip -c gencode.*.primary_assembly.annotation.gtf.gz > gencode.primary_assembly.annotation.gtf\ngunzip -c gencode.*.transcripts.fa.gz > gencode.transcripts.fa\ngunzip GRCh38.primary_assembly.genome.fa.gz\n```\nCreate a BED file with all the genes:\n\n```\ncat gencode.primary_assembly.annotation.gtf | gawk 'OFS=\"\\t\" {if ($3==\"gene\") {print $1,$4-1,$5,$10,0,$7}}' | tr -d '\";' > gencode.primary_assembly.annotation.bed\n```\n\nCreate a fasta file of pre-mRNA (exons + introns) transcripts:\n\n```\nbedtools sort -i gencode.primary_assembly.annotation.bed > gencode.primary_assembly.annotation.sorted.bed\n\nbedtools merge -i gencode.primary_assembly.annotation.sorted.bed -s -c 4 -o collapse > gencode.primary_assembly.annotation.merged.bed\n\nbedtools getfasta -name -fo gencode.unspliced.fa -fi GRCh38.primary_assembly.genome.fa -bed gencode.primary_assembly.annotation.sorted.bed\n```\n\n### Create the index manifest file, and an index directory\n\nA *manifest* file is necessary to tell cytograph how to find the relevant files in the index. Create a file `manifest.json` with the following content:\n\n```\n{\n \"species\": \"Homo sapiens\",\n \"index_file\": \"gencode.fragments.idx\",\n \"gene_metadata_file\": \"gencode.metadata.tab\",\n \"gene_metadata_key\": \"AccessionVersion\",\n \"fragments_to_genes_file\": \"fragments2genes.txt\",\n \"layers\": {\n \"unspliced\": \"unspliced_fragments.txt\",\n \"spliced\": \"spliced_fragments.txt\"\n }\n}\n```\n\nThe `gene_metadata_key` indicated which column in the metadata file (`gencode.metadata.tab`) contains the gene IDs (as used in `fragments2genes.txt`).\n\nFinally, organize your files in a directory and subdirectory as follows:\n\n```\n10xv1_whitelist.txt\n10xv2_whitelist.txt\n10xv3_whitelist.txt\ninputs/\n GRCh38.primary_assembly.genome.fa\n gencode.unspliced.fa\n gencode.primary_assembly.annotation.bed\n gencode.primary_assembly.annotation.gtf\n gencode.primary_assembly.annotation.sorted.bed\n gencode.transcripts.fa\n hgnc_complete_set.txt\n human_tfs_consensus.tab\n```\n\nThe `inputs` directory will only be used to generate the index, and can be discarded afterwards.\n\n\n### Run the following scripts\n\nBefore starting, set `d` to the full path of your index directory, and `extent` to the desired window size (windows are the sequences upstream of a polyadenylation site or a genomic polyA/T sequence that are used to build the index).",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nfrom typing import *\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Seq import Seq\n\nd = \"/Users/stelin/kallisto_GRCh38/human_GRCh38_gencode/\"\nextent = 600 # how many bases away from polya to include\nmin_len = 90 # how many non-repeat bases required to make a transcript",
"_____no_output_____"
]
],
[
[
"Now run each of the following code blocks:",
"_____no_output_____"
]
],
[
[
"# Make a gene metadata table from hgnc and gencode gtf\n# First, load and index the HGNC annotations from ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt\nhgnc = {}\nwith open(d + \"inputs/hgnc_complete_set.txt\") as f:\n\thgnc_headers = f.readline()[:-1].split(\"\\t\")\n\tfor line in f:\n\t\titems = line[:-1].split(\"\\t\")\n\t\thgnc[items[0]] = items\n\n# Load human consensus TFs\ntfs = {}\nwith open(d + \"inputs/human_tfs_consensus.tab\") as f:\n\ttfs_headers = f.readline()[:-1].split(\"\\t\")\n\tfor line in f:\n\t\titems = line[:-1].split(\"\\t\")\n\t\ttfs[items[0]] = items\n\n# Next load the gencode GTF and create a genome annotation tsv\nwith open(d + \"gencode.metadata.tab\", \"w\") as fout:\n\tfout.write(\"\\t\".join([\n\t\t\"Accession\",\n\t\t\"AccessionVersion\",\n\t\t\"Gene\",\n\t\t\"FullName\",\n\t\t\"GeneType\",\n\t\t\"HgncID\",\n\t\t\"Chromosome\",\n\t\t\"Strand\",\n\t\t\"ChromosomeStart\",\n\t\t\"ChromosomeEnd\",\n\t\t\"LocusGroup\",\n\t\t\"LocusType\",\n\t\t\"Location\",\n\t\t\"LocationSortable\",\n\t\t\"Aliases\",\n\t\t\"VegaID\",\n\t\t\"UcscID\",\n\t\t\"RefseqID\",\n\t\t\"CcdsID\",\n\t\t\"UniprotID\",\n\t\t\"PubmedID\",\n\t\t\"MgdID\",\n\t\t\"RgdID\",\n\t\t\"CosmicID\",\n\t\t\"OmimID\",\n\t\t\"MirBaseID\",\n\t\t\"IsTF\",\n\t\t\"DnaBindingDomain\"\n\t]))\n\tfout.write(\"\\n\")\n\twith open(d + \"inputs/gencode.primary_assembly.annotation.gtf\") as f:\n\t\tfor line in f:\n\t\t\tif line.startswith(\"##\"):\n\t\t\t\tcontinue\n\t\t\titems = line[:-1].split(\"\\t\")\n\t\t\tif items[2] != \"gene\":\n\t\t\t\tcontinue\n\t\t\textra = {x.strip().split(\" \")[0]: x.strip().split(\" \")[1].strip('\"') for x in items[8].split(\";\")[:-1]}\n\t\t\tif \"hgnc_id\" in extra and extra[\"hgnc_id\"] in hgnc:\n\t\t\t\tacc = extra[\"hgnc_id\"]\n\t\t\t\tensemblID = extra[\"gene_id\"].split(\".\")[0]\n\t\t\t\tfout.write(\"\\t\".join([\n\t\t\t\t\tensemblID,\n\t\t\t\t\textra[\"gene_id\"],\n\t\t\t\t\thgnc[acc][1], # gene symbol\n\t\t\t\t\thgnc[acc][2], # full name\n\t\t\t\t\textra[\"gene_type\"], # gene type from gencode\n\t\t\t\t\tacc, # hgnc ID\n\t\t\t\t\titems[0], # Chromosome\n\t\t\t\t\titems[3], # Start\n\t\t\t\t\titems[4], # End\n\t\t\t\t\thgnc[acc][3], # Locus group\n\t\t\t\t\thgnc[acc][4], # Locus type\n\t\t\t\t\thgnc[acc][6], # Location\n\t\t\t\t\thgnc[acc][7], # Location, sortable\n\t\t\t\t\thgnc[acc][8], # Aliases\n\t\t\t\t\thgnc[acc][20], # VEGA id\n\t\t\t\t\thgnc[acc][21], # UCSC id\n\t\t\t\t\thgnc[acc][23], # Refseq id\n\t\t\t\t\thgnc[acc][24], # CCDS id\n\t\t\t\t\thgnc[acc][25], # Uniprot id\n\t\t\t\t\thgnc[acc][26], # Pubmed id\n\t\t\t\t\thgnc[acc][27], # MGD id\n\t\t\t\t\thgnc[acc][28], # RGD id\n\t\t\t\t\thgnc[acc][30], # COSMIC id\n\t\t\t\t\thgnc[acc][31], # OMIM id\n\t\t\t\t\thgnc[acc][32], # MIRbase id\n\t\t\t\t\t\"True\" if (ensemblID in tfs and tfs[ensemblID][3] == \"Yes\") else \"False\", # IsTF?\n\t\t\t\t\ttfs[ensemblID][2] if (ensemblID in tfs and tfs[ensemblID][3] == \"Yes\") else \"\" # DBD\n\t\t\t\t]))\n\t\t\telse:\n\t\t\t\tensemblID = extra[\"gene_id\"].split(\".\")[0]\n\t\t\t\tfout.write(\"\\t\".join([\n\t\t\t\t\tensemblID,\n\t\t\t\t\textra[\"gene_id\"],\n\t\t\t\t\textra.get(\"gene_name\", \"\"), # gene symbol\n\t\t\t\t\textra.get(\"gene_name\", \"\"), # full name\n\t\t\t\t\textra[\"gene_type\"], # gene type from gencode\n\t\t\t\t\t\"\", # HGNC id\n\t\t\t\t\titems[0], # Chromosome\n\t\t\t\t\titems[6], # Strand\n\t\t\t\t\titems[3], # Start\n\t\t\t\t\titems[4], # End\n\t\t\t\t\t\"\", # Locus group\n\t\t\t\t\t\"\", # Locus type\n\t\t\t\t\t\"\", # Location\n\t\t\t\t\t\"\", # Location, sortable\n\t\t\t\t\t\"\", # Aliases\n\t\t\t\t\t\"\", # VEGA id\n\t\t\t\t\t\"\", # UCSC id\n\t\t\t\t\t\"\", # Refseq id\n\t\t\t\t\t\"\", # CCDS id\n\t\t\t\t\t\"\", # Uniprot id\n\t\t\t\t\t\"\", # Pubmed id\n\t\t\t\t\t\"\", # MGD id\n\t\t\t\t\t\"\", # RGD id\n\t\t\t\t\t\"\", # COSMIC id\n\t\t\t\t\t\"\", # OMIM id\n\t\t\t\t\t\"\", # MIRbase id\n\t\t\t\t\t\"True\" if (ensemblID in tfs and tfs[ensemblID][3] == \"Yes\") else \"False\", # IsTF?\n\t\t\t\t\ttfs[ensemblID][2] if (ensemblID in tfs and tfs[ensemblID][3] == \"Yes\") else \"\" # DBD\n\t\t\t\t]))\n\t\t\tfout.write(\"\\n\")",
"_____no_output_____"
],
[
"def find_polys(seq: SeqRecord, c: str = \"A\", n: int = 15) -> List[Tuple[int, int]]:\n\tfound = []\n\tcount = seq[:n].count(c) # Count occurences in the first k-mer\n\tif count >= n - 1: # We have a match\n\t\tfound.append(0)\n\tix = 0\n\twhile ix < len(seq) - n - 1:\n\t\tif seq[ix] == c: # Outgoing base\n\t\t\tcount -= 1\n\t\tif seq[ix + n] == c: # Incoming base\n\t\t\tcount += 1\n\t\tix += 1\n\t\tif count >= n - 1: # We have a match\n\t\t\tfound.append(ix)\n\t\n\tsorted_by_lower_bound = [(f, f + n) for f in found]\n\t# merge intervals (https://codereview.stackexchange.com/questions/69242/merging-overlapping-intervals)\n\tmerged = []\n\tfor higher in sorted_by_lower_bound:\n\t\tif not merged:\n\t\t\tmerged.append(higher)\n\t\telse:\n\t\t\tlower = merged[-1]\n\t\t\t# test for intersection between lower and higher:\n\t\t\t# we know via sorting that lower[0] <= higher[0]\n\t\t\tif higher[0] <= lower[1]:\n\t\t\t\tupper_bound = max(lower[1], higher[1])\n\t\t\t\tmerged[-1] = (lower[0], upper_bound) # replace by merged interval\n\t\t\telse:\n\t\t\t\tmerged.append(higher)\n\treturn merged\n",
"_____no_output_____"
],
[
"polyAs = {}\npolyTs = {}\nfor fasta in SeqIO.parse(open(d + \"inputs/gencode.unspliced.fa\"),'fasta'):\n\tgene_id = fasta.id\n\tintervals = find_polys(fasta.seq, c=\"A\", n=14)\n\tif len(intervals) > 0:\n\t\tpolyAs[gene_id] = intervals\n\t# Collect fragments on the opposite strand, downstream of poly-Ts (not sure if such reads really happen?)\n\tintervals = find_polys(fasta.seq, c=\"T\", n=14)\n\tif len(intervals) > 0:\n\t\tpolyTs[gene_id] = intervals",
"_____no_output_____"
],
[
"tr2g = {}\nwith open(d + \"inputs/gencode.primary_assembly.annotation.gtf\") as f:\n\tfor line in f:\n\t\tif \"\\ttranscript\\t\" in line:\n\t\t\titems = line.split(\"; \")\n\t\t\tchrom, _, _, start, end, _, strand, _, gid = items[0].split(\"\\t\")\n\t\t\tgene_id = gid.split('\"')[1]\n\t\t\ttranscript_id = items[1].split('\"')[1]\n\t\t\tgene_type = items[2].split('\"')[1]\n\t\t\tgene_name = items[3].split('\"')[1]\n\t\t\ttr2g[transcript_id] = (chrom, start, end, strand, gene_id, gene_type, gene_name)",
"_____no_output_____"
],
[
"count = 0\nwith open(d + \"fragments2genes.txt\", \"w\") as ftr2g:\n\twith open(d + \"inputs/gencode.fragments.fa\", \"w\") as fout:\n\t\t# Write the nascent fragments, with one partial transcript per internal poly-A/T site\n\t\twith open(d + \"unspliced_fragments.txt\", \"w\") as fucapture:\n\t\t\tfor fasta in SeqIO.parse(open(d + \"inputs/gencode.unspliced.fa\"),'fasta'): # Note we're in the masked file now\n\t\t\t\tgene_id = fasta.id\n\t\t\t\tif gene_id in polyAs:\n\t\t\t\t\tfor interval in polyAs[gene_id]:\n\t\t\t\t\t\tseq = str(fasta.seq[max(0, interval[0] - extent):interval[0]])\n\t\t\t\t\t\t#seq = seq.translate(tr).strip(\"N\")\n\t\t\t\t\t\tif len(seq) >= min_len:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\ttranscript_id = f\"{gene_id}.A{interval[0]}\"\n\t\t\t\t\t\t\ttrseq = SeqRecord(Seq(seq), transcript_id, '', '')\n\t\t\t\t\t\t\tfout.write(trseq.format(\"fasta\"))\n\t\t\t\t\t\t\tftr2g.write(f\"{transcript_id}\\t{gene_id}\\n\")\n\t\t\t\t\t\t\tfucapture.write(f\"{transcript_id}\\n\")\n\t\t\t\tif gene_id in polyTs:\n\t\t\t\t\tfor interval in polyTs[gene_id]:\n\t\t\t\t\t\tseq = str(fasta.seq[interval[1]:interval[1] + extent].reverse_complement())\n\t\t\t\t\t\t#seq = seq.translate(tr).strip(\"N\")\n\t\t\t\t\t\tif len(seq) >= min_len:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\ttranscript_id = f\"{gene_id}.T{interval[0]}\"\n\t\t\t\t\t\t\ttrseq = SeqRecord(Seq(seq), transcript_id, '', '')\n\t\t\t\t\t\t\tfout.write(trseq.format(\"fasta\"))\n\t\t\t\t\t\t\tftr2g.write(f\"{transcript_id}\\t{gene_id}\\n\")\n\t\t\t\t\t\t\tfucapture.write(f\"{transcript_id}\\n\")\n\t\t# Write the mature fragments, covering the 3' end of each mature transcript\n\t\twith open(d + \"spliced_fragments.txt\", \"w\") as fscapture:\n\t\t\tfor fasta in SeqIO.parse(open(d + \"inputs/gencode.transcripts.fa\"),'fasta'): # Note we're in the masked file now\n\t\t\t\ttranscript_id = fasta.id.split(\"|\")[0]\n\t\t\t\tgene_id = fasta.id.split(\"|\")[1]\n\t\t\t\tattrs = tr2g[transcript_id]\n\t\t\t\tseq = str(fasta.seq[-extent:])\n\t\t\t\tif len(seq) >= min_len:\n\t\t\t\t\tcount += 1\n\t\t\t\t\ttrseq = SeqRecord(Seq(seq), f\"{transcript_id}.{count} gene_id:{attrs[4]} gene_name:{attrs[6]}\", '', '')\n\t\t\t\t\tfout.write(trseq.format(\"fasta\"))\n\t\t\t\t\tftr2g.write(f\"{transcript_id}.{count}\\t{attrs[4]}\\n\")\n\t\t\t\t\tfscapture.write(f\"{transcript_id}.{count}\\n\")",
"_____no_output_____"
]
],
[
[
"### Build the kallisto index\n\nRun the following on the command line (it might take half an hour):\n\n```\nkallisto index -i gencode.fragments.idx -k 31 inputs/gencode.fragments.fa\n```\n\nYou should now have the following directory structure:\n\n```\n10xv1_whitelist.txt\n10xv2_whitelist.txt\n10xv3_whitelist.txt\nfragments2genes.txt\ngencode.fragments.idx\ngencode.metadata.tab\nmanifest.json\nspliced_fragments.txt\nunspliced_fragments.txt\ninputs/\n```\n\nYou can now remove the `inputs/` subdirectory.\n\n### What just happened?\n\nWe created a composite kallisto index (`gencode.fragments.idx`), with separate sequence fragments for spliced and unspliced transcripts. The IDs of spliced and unspliced fragments are listed in the `spliced_fragments.txt` and `unspliced_fragments.txt` files, so that we can later count them separately. For example, here's the first few lines of `unspliced_fragments.txt` (the last part of each ID, e.g. `A14056`, indicates that this fragment is located upstream of a poly-A stretch at position 14056):\n\n```\nENSG00000277400.1.A14056\nENSG00000277400.1.A32841\nENSG00000277400.1.A35311\nENSG00000277400.1.A36796\nENSG00000277400.1.A44325\nENSG00000277400.1.A45592\nENSG00000277400.1.A47356\nENSG00000277400.1.A49571\nENSG00000277400.1.A53084\nENSG00000277400.1.A53231\n```\n\nWe also created a mapping from fragments to genes, `fragments2genes.txt` so that we can later pool counts by gene. Here's the first few lines of that file:\n\n```\nENSG00000277400.1.A14056\tENSG00000277400.1\nENSG00000277400.1.A32841\tENSG00000277400.1\nENSG00000277400.1.A35311\tENSG00000277400.1\nENSG00000277400.1.A36796\tENSG00000277400.1\nENSG00000277400.1.A44325\tENSG00000277400.1\nENSG00000277400.1.A45592\tENSG00000277400.1\nENSG00000277400.1.A47356\tENSG00000277400.1\nENSG00000277400.1.A49571\tENSG00000277400.1\nENSG00000277400.1.A53084\tENSG00000277400.1\nENSG00000277400.1.A53231\tENSG00000277400.1\n```\n\nFinally, we created a consolidated metadata file `gencode.metadata.tab` which collects a lot of useful annotation about each gene:\n\n```\nAccession # ENSEMBL accession\nAccessionVersion # ENSEMBL accession.version\nGene # HGNC official gene symbol\nFullName # Gene long name\nGeneType # Like 'protein_coding', 'pseudogene', 'snRNA', 'rRNA', etc.\nHgncID\nChromosome # Like 'chr1', 'chr2', 'chrM', etc.\nStrand # '+' or '-'\nChromosomeStart # Integer start position of the gene\nChromosomeEnd # Integer end position of the gene\nLocusGroup\nLocusType\nLocation # Like '1p36.33'\nLocationSortable # Like '01p36.33'\nAliases\nVegaID\nUcscID\nRefseqID\nCcdsID\nUniprotID\nPubmedID\nMgdID\nRgdID\nCosmicID\nOmimID\nMirBaseID\nIsTF # 'True' if the gene is a transcription factor\nDnaBindingDomain # Like 'HMG/Sox', 'Homeodomain', etc.\n```\n\nOf these, only `Accession` and `Gene` are required by Cytograph.\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
eca73246c5d2dac64e5497f44aa33cb28e65bf86 | 24,070 | ipynb | Jupyter Notebook | PAL-AMDP/BW-AMDP-HANAML-example/BW_deployment_public.ipynb | pbaumann76/hana-ml-samples | a2180c9d4cf4ec989234f79ec0d118ed658bd224 | [
"Apache-2.0"
] | 1 | 2022-03-24T07:51:05.000Z | 2022-03-24T07:51:05.000Z | PAL-AMDP/BW-AMDP-HANAML-example/BW_deployment_public.ipynb | xinchen510/hana-ml-samples | c09ed92fcbe2af5c168bbd52923d431b0227113c | [
"Apache-2.0"
] | null | null | null | PAL-AMDP/BW-AMDP-HANAML-example/BW_deployment_public.ipynb | xinchen510/hana-ml-samples | c09ed92fcbe2af5c168bbd52923d431b0227113c | [
"Apache-2.0"
] | null | null | null | 34.985465 | 626 | 0.478563 | [
[
[
"### Imports and connection setup",
"_____no_output_____"
]
],
[
[
"import hana_ml\nimport hana_ml.dataframe as dataframe\n\nfrom hana_ml.algorithms.pal.unified_classification import UnifiedClassification\nfrom hana_ml.algorithms.pal.partition import train_test_val_split\n\nfrom hana_ml.artifacts.generators import AMDPGenerator\n\nprint(hana_ml.__version__)",
"2.11.22010700\n"
],
[
"hana_url = 'xxx'\nport = 'xx'\nuser = 'xx'\npassword = 'xxx'\n\nprint(\"URL: \" + hana_url)\nprint(\"User: \" + user)",
"_____no_output_____"
],
[
"conn = dataframe.ConnectionContext(\"{}\".format(hana_url),int(\"{}\".format(port)), \"{}\".format(user), \"{}\".format(password), encrypt=\"true\", sslValidateCertificate=\"false\")\nconn.hana_version()",
"_____no_output_____"
]
],
[
[
"### Connect to the data",
"_____no_output_____"
],
[
"We will generate an SAP HANA DataFrame from our source data. Please note the usage of the placeholder clause in the SQL statement, since our source is an SAP BW query with ...",
"_____no_output_____"
]
],
[
[
"df_hana_source = dataframe.DataFrame(conn,'SELECT \"4ZANON_03_ID\" as ID, \"4ZANON_03_GENDER\" as GENDER, \"4ZANON_03_LASTNAME\" as LASTNAME, \"4ZANON_03_EDUCATION\" as EDUCATION, \"4ZANON_03_REGION\" as REGION, \"4ZANON_03_START_YEAR\" as START_YEAR, \"4ZANON_03_ZIPCODE\" as ZIPCODE, \"T_LEVEL\", (\"5I3Y60GN5JCU6PGWX331P8G0G\") AS \"SALARY\" FROM \"_SYS_BIC\".\"system-local.bw.bw2hana.query.zanon_03/Z_QRY_EXIT_001\"(\\'PLACEHOLDER\\' = (\\'$$ZV_REGION_IP$$\\', \\'EMEA\\'))') \ndf_hana_source = df_hana_source.cast('ID', 'INTEGER')",
"_____no_output_____"
],
[
"df_hana_source.collect()",
"_____no_output_____"
]
],
[
[
"Generate seperate data set for Training, Testing and Validation of our model. We will skipp validation here, since the ML part is not the focus of this demo.",
"_____no_output_____"
]
],
[
[
"train, test, valid = train_test_val_split(data=df_hana_source, training_percentage = 0.8, validation_percentage = 0, testing_percentage = 0.2, random_seed = 41)",
"_____no_output_____"
]
],
[
[
"### ML Training",
"_____no_output_____"
],
[
"We will leverage the AMDP generator to get our AMDP/ABAP automatically generated. This requires the SQL logging to be turned on. ",
"_____no_output_____"
]
],
[
[
"conn.sql_tracer.enable_sql_trace(True)\nconn.sql_tracer.enable_trace_history(True)",
"_____no_output_____"
],
[
"uni_hgbt = UnifiedClassification(func='HybridGradientBoostingTree')",
"_____no_output_____"
],
[
"uni_hgbt.fit(data = train,\n key= 'ID',\n label= 'T_LEVEL',\n features=['GENDER', 'EDUCATION', 'REGION', 'START_YEAR', 'ZIPCODE', 'SALARY']\n )",
"INFO:hana_ml.ml_base:Executing SQL: CREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_MATERIALIZED_INPUT_F14494DB_7697_4392_8793_464A1C921B8D\" AS (SELECT \"ID\", \"GENDER\", \"EDUCATION\", \"REGION\", \"START_YEAR\", \"ZIPCODE\", \"SALARY\", \"T_LEVEL\" FROM (SELECT a.* FROM #PAL_PARTITION_DATA_TBL_026B3402_7238_11EC_8EA8_0242AC110002 a inner join #PAL_PARTITION_RESULT_TBL_026B3402_7238_11EC_8EA8_0242AC110002 b on a.\"ID\" = b.\"ID\" where b.\"PARTITION_TYPE\" = 1) AS \"DT_4\")\nINFO:hana_ml.ml_base:Executing SQL: DO (IN in_0 TABLE (\"ID\" INT, \"GENDER\" NVARCHAR(100), \"EDUCATION\" NVARCHAR(50), \"REGION\" NVARCHAR(100), \"START_YEAR\" NVARCHAR(100), \"ZIPCODE\" NVARCHAR(10), \"SALARY\" DOUBLE, \"T_LEVEL\" NVARCHAR(5)) => \"#PAL_UNIFIED_CLASSIFICATION_MATERIALIZED_INPUT_F14494DB_7697_4392_8793_464A1C921B8D\")\nBEGIN\nDECLARE param_name VARCHAR(5000) ARRAY;\nDECLARE int_value INTEGER ARRAY;\nDECLARE double_value DOUBLE ARRAY;\nDECLARE string_value VARCHAR(5000) ARRAY;\nparam_name[1] := N'FUNCTION';\nint_value[1] := NULL;\ndouble_value[1] := NULL;\nstring_value[1] := N'HGBT';\nparam_name[2] := N'KEY';\nint_value[2] := 1;\ndouble_value[2] := NULL;\nstring_value[2] := NULL;\nparams = UNNEST(:param_name, :int_value, :double_value, :string_value);\nCALL _SYS_AFL.PAL_UNIFIED_CLASSIFICATION(:in_0, :params, out_0, out_1, out_2, out_3, out_4, out_5, out_6, out_7);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_MODEL_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_0);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_IMPORTANCE_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_1);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_STATS_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_2);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_OPT_PARAM_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_3);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_CONFUSION_MATRIX_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_4);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_METRICS_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_5);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_PARTITION_TYPE_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_6);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_PLACE_HOLDER2_0_04F1B66A_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_7);\nEND\n\nINFO:hana_ml.ml_base:Executing SQL: DROP TABLE \"#PAL_UNIFIED_CLASSIFICATION_MATERIALIZED_INPUT_F14494DB_7697_4392_8793_464A1C921B8D\"\n"
],
[
"result = uni_hgbt.predict(\n data=test.select('ID', 'GENDER', 'EDUCATION', 'REGION', 'START_YEAR', 'ZIPCODE', 'SALARY'),\n features=['GENDER', 'EDUCATION', 'REGION', 'START_YEAR', 'ZIPCODE', 'SALARY' ], \n key= 'ID')",
"INFO:hana_ml.ml_base:Executing SQL: CREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIFICATION_PREDICT_MATERIALIZED_INPUT_BA1B51B7_9C57_47DE_B0B0_5F0D7EB4A0FC\" AS (SELECT \"ID\", \"GENDER\", \"EDUCATION\", \"REGION\", \"START_YEAR\", \"ZIPCODE\", \"SALARY\" FROM (SELECT \"ID\", \"GENDER\", \"EDUCATION\", \"REGION\", \"START_YEAR\", \"ZIPCODE\", \"SALARY\" FROM (SELECT a.* FROM #PAL_PARTITION_DATA_TBL_026B3402_7238_11EC_8EA8_0242AC110002 a inner join #PAL_PARTITION_RESULT_TBL_026B3402_7238_11EC_8EA8_0242AC110002 b on a.\"ID\" = b.\"ID\" where b.\"PARTITION_TYPE\" = 2) AS \"DT_5\") AS \"DT_27\")\nINFO:hana_ml.ml_base:Executing SQL: DO (IN in_0 TABLE (\"ID\" INT, \"GENDER\" NVARCHAR(100), \"EDUCATION\" NVARCHAR(50), \"REGION\" NVARCHAR(100), \"START_YEAR\" NVARCHAR(100), \"ZIPCODE\" NVARCHAR(10), \"SALARY\" DOUBLE) => \"#PAL_UNIFIED_CLASSIFICATION_PREDICT_MATERIALIZED_INPUT_BA1B51B7_9C57_47DE_B0B0_5F0D7EB4A0FC\",\n IN in_1 TABLE (\"ROW_INDEX\" INT, \"PART_INDEX\" INT, \"MODEL_CONTENT\" NCLOB) => \"#PAL_UNIFIED_CLASSIFICATION_MODEL_0_04F1B66A_7238_11EC_8EA8_0242AC110002\")\nBEGIN\nDECLARE param_name VARCHAR(5000) ARRAY;\nDECLARE int_value INTEGER ARRAY;\nDECLARE double_value DOUBLE ARRAY;\nDECLARE string_value VARCHAR(5000) ARRAY;\nparam_name[1] := N'FUNCTION';\nint_value[1] := NULL;\ndouble_value[1] := NULL;\nstring_value[1] := N'HGBT';\nparams = UNNEST(:param_name, :int_value, :double_value, :string_value);\nCALL _SYS_AFL.PAL_UNIFIED_CLASSIFICATION_PREDICT(:in_0, :in_1, :params, out_0, out_1);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIF_PREDICT_RESULT_TBL_0_0A80B6A8_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_0);\nCREATE LOCAL TEMPORARY COLUMN TABLE \"#PAL_UNIFIED_CLASSIF_PREDICT_PH_TBL_0_0A80B6A8_7238_11EC_8EA8_0242AC110002\" AS (SELECT * FROM :out_1);\nEND\n\nINFO:hana_ml.ml_base:Executing SQL: DROP TABLE \"#PAL_UNIFIED_CLASSIFICATION_PREDICT_MATERIALIZED_INPUT_BA1B51B7_9C57_47DE_B0B0_5F0D7EB4A0FC\"\n"
]
],
[
[
"### Generate ABAP code template",
"_____no_output_____"
]
],
[
[
"generator = AMDPGenerator(project_name=\"T-Level\", version=\"1\", connection_context=conn, outputdir=\"out/\")\ngenerator.generate()",
"\n"
]
],
[
[
"### Preview results and clean up trace configuration",
"_____no_output_____"
]
],
[
[
"result.collect().head(10)",
"_____no_output_____"
],
[
"conn.sql_tracer.clean_trace_history()",
"_____no_output_____"
],
[
"conn.sql_tracer.enable_sql_trace(False)\nconn.sql_tracer.enable_trace_history(False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca742859b3a5565629beddfadda08ae04e938c8 | 41,373 | ipynb | Jupyter Notebook | notebooks/MNIST Classification - ViT.ipynb | ManojKesani/Transformer-Implementations | faca89d44523da80073790d53e53b4e80bde736f | [
"MIT"
] | null | null | null | notebooks/MNIST Classification - ViT.ipynb | ManojKesani/Transformer-Implementations | faca89d44523da80073790d53e53b4e80bde736f | [
"MIT"
] | null | null | null | notebooks/MNIST Classification - ViT.ipynb | ManojKesani/Transformer-Implementations | faca89d44523da80073790d53e53b4e80bde736f | [
"MIT"
] | null | null | null | 76.19337 | 11,596 | 0.723926 | [
[
[
"# Handwritten Image Classification with Vision in Transformers (ViT)",
"_____no_output_____"
]
],
[
[
"import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, random_split, DataLoader\n\nfrom torchvision import datasets, transforms, models\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, random_split, DataLoader\nfrom torchvision.utils import save_image\n\nfrom torchsummary import summary\n\nimport spacy\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\nimport time\nimport math\nfrom PIL import Image\nimport glob\nfrom IPython.display import display",
"_____no_output_____"
],
[
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)",
"cuda\n"
],
[
"torch.manual_seed(0)\nnp.random.seed(0)",
"_____no_output_____"
],
[
"BATCH_SIZE = 64\nLR = 5e-5\nNUM_EPOCHES = 25",
"_____no_output_____"
]
],
[
[
"## Preprocessing",
"_____no_output_____"
]
],
[
[
"mean, std = (0.5,), (0.5,)\n\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])",
"_____no_output_____"
],
[
"trainset = datasets.MNIST('../data/MNIST/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)\n\ntestset = datasets.MNIST('../data/MNIST/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)",
"_____no_output_____"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"from models.transformer import ViT",
"_____no_output_____"
],
[
"image_size = 28\nchannel_size = 1\npatch_size = 7\nembed_size = 512\nnum_heads = 8\nclasses = 10\nnum_layers = 3\nhidden_size = 256\ndropout = 0.2\n\nmodel = ViT(image_size, channel_size, patch_size, embed_size, num_heads, classes, num_layers, hidden_size, dropout=dropout).to(device)\nmodel",
"_____no_output_____"
],
[
"for img, label in trainloader:\n img = img.to(device)\n label = label.to(device)\n \n print(\"Input Image Dimensions: {}\".format(img.size()))\n print(\"Label Dimensions: {}\".format(label.size()))\n print(\"-\"*100)\n \n out = model(img)\n \n print(\"Output Dimensions: {}\".format(out.size()))\n break",
"Input Image Dimensions: torch.Size([64, 1, 28, 28])\nLabel Dimensions: torch.Size([64])\n----------------------------------------------------------------------------------------------------\nOutput Dimensions: torch.Size([64, 10])\n"
],
[
"criterion = nn.NLLLoss()\noptimizer = torch.optim.Adam(params=model.parameters(), lr=LR)",
"_____no_output_____"
],
[
"loss_hist = {}\nloss_hist[\"train accuracy\"] = []\nloss_hist[\"train loss\"] = []\n\nfor epoch in range(1, NUM_EPOCHES+1):\n model.train()\n \n epoch_train_loss = 0\n \n y_true_train = []\n y_pred_train = []\n \n for batch_idx, (img, labels) in enumerate(trainloader):\n img = img.to(device)\n labels = labels.to(device)\n \n preds = model(img)\n \n loss = criterion(preds, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n y_pred_train.extend(preds.detach().argmax(dim=-1).tolist())\n y_true_train.extend(labels.detach().tolist())\n \n epoch_train_loss += loss.item()\n \n loss_hist[\"train loss\"].append(epoch_train_loss)\n \n total_correct = len([True for x, y in zip(y_pred_train, y_true_train) if x==y])\n total = len(y_pred_train)\n accuracy = total_correct * 100 / total\n \n loss_hist[\"train accuracy\"].append(accuracy)\n \n print(\"-------------------------------------------------\")\n print(\"Epoch: {} Train mean loss: {:.8f}\".format(epoch, epoch_train_loss))\n print(\" Train Accuracy%: \", accuracy, \"==\", total_correct, \"/\", total)\n print(\"-------------------------------------------------\")",
"-------------------------------------------------\nEpoch: 1 Train mean loss: 510.82354890\n Train Accuracy%: 82.54 == 49524 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 2 Train mean loss: 218.59212352\n Train Accuracy%: 92.71166666666667 == 55627 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 3 Train mean loss: 169.03965779\n Train Accuracy%: 94.39666666666666 == 56638 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 4 Train mean loss: 140.14847237\n Train Accuracy%: 95.34666666666666 == 57208 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 5 Train mean loss: 125.35997567\n Train Accuracy%: 95.79333333333334 == 57476 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 6 Train mean loss: 114.78742353\n Train Accuracy%: 96.11333333333333 == 57668 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 7 Train mean loss: 102.45704766\n Train Accuracy%: 96.50166666666667 == 57901 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 8 Train mean loss: 94.18124880\n Train Accuracy%: 96.79333333333334 == 58076 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 9 Train mean loss: 87.59821097\n Train Accuracy%: 97.025 == 58215 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 10 Train mean loss: 80.95912049\n Train Accuracy%: 97.18166666666667 == 58309 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 11 Train mean loss: 77.87691876\n Train Accuracy%: 97.32 == 58392 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 12 Train mean loss: 70.29910550\n Train Accuracy%: 97.56833333333333 == 58541 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 13 Train mean loss: 66.29535512\n Train Accuracy%: 97.73333333333333 == 58640 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 14 Train mean loss: 63.38367621\n Train Accuracy%: 97.835 == 58701 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 15 Train mean loss: 60.14360138\n Train Accuracy%: 97.86333333333333 == 58718 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 16 Train mean loss: 56.20866929\n Train Accuracy%: 98.065 == 58839 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 17 Train mean loss: 53.43044022\n Train Accuracy%: 98.15166666666667 == 58891 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 18 Train mean loss: 50.20360140\n Train Accuracy%: 98.21 == 58926 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 19 Train mean loss: 49.50997618\n Train Accuracy%: 98.255 == 58953 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 20 Train mean loss: 47.42421756\n Train Accuracy%: 98.33833333333334 == 59003 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 21 Train mean loss: 44.47633225\n Train Accuracy%: 98.41 == 59046 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 22 Train mean loss: 42.53861594\n Train Accuracy%: 98.46333333333334 == 59078 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 23 Train mean loss: 41.59131836\n Train Accuracy%: 98.51 == 59106 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 24 Train mean loss: 38.65641099\n Train Accuracy%: 98.67833333333333 == 59207 / 60000\n-------------------------------------------------\n-------------------------------------------------\nEpoch: 25 Train mean loss: 39.32817434\n Train Accuracy%: 98.55166666666666 == 59131 / 60000\n-------------------------------------------------\n"
]
],
[
[
"## Test",
"_____no_output_____"
]
],
[
[
"plt.plot(loss_hist[\"train accuracy\"])\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.show()",
"_____no_output_____"
],
[
"plt.plot(loss_hist[\"train loss\"])\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.show()",
"_____no_output_____"
],
[
"with torch.no_grad():\n model.eval()\n \n y_true_test = []\n y_pred_test = []\n \n for batch_idx, (img, labels) in enumerate(testloader):\n img = img.to(device)\n label = label.to(device)\n \n preds = model(img)\n \n y_pred_test.extend(preds.detach().argmax(dim=-1).tolist())\n y_true_test.extend(labels.detach().tolist())\n \n total_correct = len([True for x, y in zip(y_pred_test, y_true_test) if x==y])\n total = len(y_pred_test)\n accuracy = total_correct * 100 / total\n \n print(\"Test Accuracy%: \", accuracy, \"==\", total_correct, \"/\", total)",
"Test Accuracy%: 98.41 == 9841 / 10000\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca74b1002343283258a7fbffcee96761023f6f9 | 37,315 | ipynb | Jupyter Notebook | 01-MNIST-dense.ipynb | t-systems-on-site-services-gmbh/nn-demo-keras | 34eff81328020319a766280fb5c2228d713fb0ed | [
"BSD-2-Clause"
] | 2 | 2019-09-02T06:53:15.000Z | 2019-09-14T17:56:28.000Z | 01-MNIST-dense.ipynb | t-systems-on-site-services-gmbh/nn-demo-keras | 34eff81328020319a766280fb5c2228d713fb0ed | [
"BSD-2-Clause"
] | null | null | null | 01-MNIST-dense.ipynb | t-systems-on-site-services-gmbh/nn-demo-keras | 34eff81328020319a766280fb5c2228d713fb0ed | [
"BSD-2-Clause"
] | null | null | null | 90.35109 | 19,240 | 0.782581 | [
[
[
"from keras.datasets import mnist\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras import layers\nfrom keras import models\nfrom keras.utils import to_categorical\n\n%matplotlib inline \n",
"Using TensorFlow backend.\n"
],
[
"(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n",
"_____no_output_____"
],
[
"print('train_images.shape:', train_images.shape)\nprint('train_labels.shape:', train_labels.shape)\nprint('test_images.shape:', test_images.shape)\nprint('test_labels.shape:', test_labels.shape)\n",
"train_images.shape: (60000, 28, 28)\ntrain_labels.shape: (60000,)\ntest_images.shape: (10000, 28, 28)\ntest_labels.shape: (10000,)\n"
],
[
"pixels = train_images[4]\nprint('pixels.shape:', pixels.shape)\nprint('pixels:', pixels)\n",
"pixels.shape: (28, 28)\npixels: [[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 55 148 210 253 253 113\n 87 148 55 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 87 232 252 253 189 210 252\n 252 253 168 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 4 57 242 252 190 65 5 12 182\n 252 253 116 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 96 252 252 183 14 0 0 92 252\n 252 225 21 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 132 253 252 146 14 0 0 0 215 252\n 252 79 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 126 253 247 176 9 0 0 8 78 245 253\n 129 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 16 232 252 176 0 0 0 36 201 252 252 169\n 11 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 22 252 252 30 22 119 197 241 253 252 251 77\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 16 231 252 253 252 252 252 226 227 252 231 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 55 235 253 217 138 42 24 192 252 143 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 62 255 253 109 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 71 253 252 21 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 253 252 21 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 71 253 252 21 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 106 253 252 21 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 45 255 253 21 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 218 252 56 0\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 96 252 189 42\n 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 184 252 170\n 11 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 147 252\n 42 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]]\n"
],
[
"plt.imshow(pixels, cmap='gray')\nplt.show()\n",
"_____no_output_____"
],
[
"train_images = train_images.reshape((60000, 784)) # 28 * 28 = 784\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 784)) # 28 * 28 = 784\ntest_images = test_images.astype('float32') / 255\n",
"_____no_output_____"
],
[
"print('train_images.shape:', train_images.shape)\nprint('train_labels.shape:', train_labels.shape)\nprint('test_images.shape:', test_images.shape)\nprint('test_labels.shape:', test_labels.shape)\n",
"train_images.shape: (60000, 784)\ntrain_labels.shape: (60000,)\ntest_images.shape: (10000, 784)\ntest_labels.shape: (10000,)\n"
],
[
"print('train label example:',train_labels[4])\n",
"train label example: 9\n"
],
[
"train_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n",
"_____no_output_____"
],
[
"print('train label example:',train_labels[4])\n",
"train label example: [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]\n"
],
[
"model = models.Sequential()\nmodel.add(layers.Dense(512, activation='relu', input_shape=(784,)))\nmodel.add(layers.Dense(512, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\nmodel.summary()\n",
"WARNING:tensorflow:From C:\\ProgramData\\Miniconda3\\envs\\nn-demo\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 512) 401920 \n_________________________________________________________________\ndense_2 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndense_3 (Dense) (None, 10) 5130 \n=================================================================\nTotal params: 669,706\nTrainable params: 669,706\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'],\n )\n",
"_____no_output_____"
],
[
"EPOCHS = 4\n\nhistory = model.fit(train_images, train_labels, \n validation_data=(test_images, test_labels),\n epochs=EPOCHS, \n batch_size=128\n )\n",
"WARNING:tensorflow:From C:\\ProgramData\\Miniconda3\\envs\\nn-demo\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1/4\n60000/60000 [==============================] - 10s 169us/step - loss: 0.2166 - acc: 0.9360 - val_loss: 0.1035 - val_acc: 0.9674\nEpoch 2/4\n60000/60000 [==============================] - 10s 161us/step - loss: 0.0801 - acc: 0.9755 - val_loss: 0.0722 - val_acc: 0.9767\nEpoch 3/4\n60000/60000 [==============================] - 9s 144us/step - loss: 0.0508 - acc: 0.9843 - val_loss: 0.0784 - val_acc: 0.9763\nEpoch 4/4\n60000/60000 [==============================] - 9s 143us/step - loss: 0.0347 - acc: 0.9887 - val_loss: 0.0697 - val_acc: 0.9814\n"
],
[
"# list of values for (train) accuracy\nacc_values = history.history['acc']\n\n# list of values for validationaccuracy\nval_acc_values = history.history['val_acc']\n",
"_____no_output_____"
],
[
"best_val_acc_value = max(val_acc_values)\nprint('best_val_acc_value:', best_val_acc_value)\n",
"best_val_acc_value: 0.9814\n"
],
[
"epochs = range(1, EPOCHS + 1) \n\nplt.plot(epochs, acc_values, 'b', label='Training acc')\nplt.plot(epochs, val_acc_values, 'g', label='Validation acc')\nplt.xticks(np.arange(min(epochs), max(epochs)+1, 1.0))\nplt.title('Training and validation acc')\nplt.xlabel('Epochs')\nplt.ylabel('ACC')\nplt.legend()\n\nplt.show()\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca76d206a4d31222acf3fe9494be1907e2554a1 | 691,875 | ipynb | Jupyter Notebook | site/en/tutorials/audio/simple_audio.ipynb | frosties1010/opencv | d113e210424ac633ab90407e5c79ab5fef8016b2 | [
"Apache-2.0"
] | null | null | null | site/en/tutorials/audio/simple_audio.ipynb | frosties1010/opencv | d113e210424ac633ab90407e5c79ab5fef8016b2 | [
"Apache-2.0"
] | null | null | null | site/en/tutorials/audio/simple_audio.ipynb | frosties1010/opencv | d113e210424ac633ab90407e5c79ab5fef8016b2 | [
"Apache-2.0"
] | null | null | null | 534.679289 | 428,946 | 0.935209 | [
[
[
"##### Copyright 2020 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Simple audio recognition: Recognizing keywords",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/audio/simple_audio\">\n <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />\n View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/audio/simple_audio.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/audio/simple_audio.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/audio/simple_audio.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"This tutorial demonstrates how to preprocess audio files in the WAV format and build and train a basic <a href=\"https://en.wikipedia.org/wiki/Speech_recognition\" class=\"external\">automatic speech recognition</a> (ASR) model for recognizing ten different words. You will use a portion of the [Speech Commands dataset](https://www.tensorflow.org/datasets/catalog/speech_commands) (<a href=\"https://arxiv.org/abs/1804.03209\" class=\"external\">Warden, 2018</a>), which contains short (one-second or less) audio clips of commands, such as \"down\", \"go\", \"left\", \"no\", \"right\", \"stop\", \"up\" and \"yes\".\n\nReal-world speech and audio recognition <a href=\"https://ai.googleblog.com/search/label/Speech%20Recognition\" class=\"external\">systems</a> are complex. But, like [image classification with the MNIST dataset](../quickstart/beginner.ipynb), this tutorial should give you a basic understanding of the techniques involved.",
"_____no_output_____"
],
[
"## Setup\n\nImport necessary modules and dependencies. Note that you'll be using <a href=\"https://seaborn.pydata.org/\" class=\"external\">seaborn</a> for visualization in this tutorial.",
"_____no_output_____"
]
],
[
[
"import os\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow as tf\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom IPython import display\n\n# Set the seed value for experiment reproducibility.\nseed = 42\ntf.random.set_seed(seed)\nnp.random.seed(seed)",
"_____no_output_____"
]
],
[
[
"## Import the mini Speech Commands dataset\n\nTo save time with data loading, you will be working with a smaller version of the Speech Commands dataset. The [original dataset](https://www.tensorflow.org/datasets/catalog/speech_commands) consists of over 105,000 audio files in the <a href=\"https://www.aelius.com/njh/wavemetatools/doc/riffmci.pdf\" class=\"external\">WAV (Waveform) audio file format</a> of people saying 35 different words. This data was collected by Google and released under a CC BY license.\n\nDownload and extract the `mini_speech_commands.zip` file containing the smaller Speech Commands datasets with `tf.keras.utils.get_file`:",
"_____no_output_____"
]
],
[
[
"DATASET_PATH = 'data/mini_speech_commands'\n\ndata_dir = pathlib.Path(DATASET_PATH)\nif not data_dir.exists():\n tf.keras.utils.get_file(\n 'mini_speech_commands.zip',\n origin=\"http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip\",\n extract=True,\n cache_dir='.', cache_subdir='data')",
"Downloading data from http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip\n182083584/182082353 [==============================] - 6s 0us/step\n182091776/182082353 [==============================] - 6s 0us/step\n"
]
],
[
[
"The dataset's audio clips are stored in eight folders corresponding to each speech command: `no`, `yes`, `down`, `go`, `left`, `up`, `right`, and `stop`:",
"_____no_output_____"
]
],
[
[
"commands = np.array(tf.io.gfile.listdir(str(data_dir)))\ncommands = commands[commands != 'README.md']\nprint('Commands:', commands)",
"Commands: ['up' 'stop' 'no' 'right' 'left' 'down' 'go' 'yes']\n"
]
],
[
[
"Extract the audio clips into a list called `filenames`, and shuffle it:",
"_____no_output_____"
]
],
[
[
"filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')\nfilenames = tf.random.shuffle(filenames)\nnum_samples = len(filenames)\nprint('Number of total examples:', num_samples)\nprint('Number of examples per label:',\n len(tf.io.gfile.listdir(str(data_dir/commands[0]))))\nprint('Example file tensor:', filenames[0])",
"Number of total examples: 8000\nNumber of examples per label: 1000\nExample file tensor: tf.Tensor(b'data/mini_speech_commands/down/a3255f5c_nohash_1.wav', shape=(), dtype=string)\n"
]
],
[
[
"Split `filenames` into training, validation and test sets using a 80:10:10 ratio, respectively:",
"_____no_output_____"
]
],
[
[
"train_files = filenames[:6400]\nval_files = filenames[6400: 6400 + 800]\ntest_files = filenames[-800:]\n\nprint('Training set size', len(train_files))\nprint('Validation set size', len(val_files))\nprint('Test set size', len(test_files))",
"Training set size 6400\nValidation set size 800\nTest set size 800\n"
]
],
[
[
"## Read the audio files and their labels",
"_____no_output_____"
],
[
"In this section you will preprocess the dataset, creating decoded tensors for the waveforms and the corresponding labels. Note that:\n\n- Each WAV file contains time-series data with a set number of samples per second.\n- Each sample represents the <a href=\"https://en.wikipedia.org/wiki/Amplitude\" class=\"external\">amplitude</a> of the audio signal at that specific time.\n- In a <a href=\"https://en.wikipedia.org/wiki/Audio_bit_depth\" class=\"external\">16-bit</a> system, like the WAV files in the mini Speech Commands dataset, the amplitude values range from -32,768 to 32,767.\n- The <a href=\"https://en.wikipedia.org/wiki/Sampling_(signal_processing)#Audio_sampling\" class=\"external\">sample rate</a> for this dataset is 16kHz.\n\nThe shape of the tensor returned by `tf.audio.decode_wav` is `[samples, channels]`, where `channels` is `1` for mono or `2` for stereo. The mini Speech Commands dataset only contains mono recordings. ",
"_____no_output_____"
]
],
[
[
"test_file = tf.io.read_file(DATASET_PATH+'/down/0a9f9af7_nohash_0.wav')\ntest_audio, _ = tf.audio.decode_wav(contents=test_file)\ntest_audio.shape",
"_____no_output_____"
]
],
[
[
"Now, let's define a function that preprocesses the dataset's raw WAV audio files into audio tensors:",
"_____no_output_____"
]
],
[
[
"def decode_audio(audio_binary):\n # Decode WAV-encoded audio files to `float32` tensors, normalized\n # to the [-1.0, 1.0] range. Return `float32` audio and a sample rate.\n audio, _ = tf.audio.decode_wav(contents=audio_binary)\n # Since all the data is single channel (mono), drop the `channels`\n # axis from the array.\n return tf.squeeze(audio, axis=-1)",
"_____no_output_____"
]
],
[
[
"Define a function that creates labels using the parent directories for each file:\n\n- Split the file paths into `tf.RaggedTensor`s (tensors with ragged dimensionsโwith slices that may have different lengths).",
"_____no_output_____"
]
],
[
[
"def get_label(file_path):\n parts = tf.strings.split(\n input=file_path,\n sep=os.path.sep)\n # Note: You'll use indexing here instead of tuple unpacking to enable this\n # to work in a TensorFlow graph.\n return parts[-2]",
"_____no_output_____"
]
],
[
[
"Define another helper functionโ`get_waveform_and_label`โthat puts it all together:\n\n- The input is the WAV audio filename.\n- The output is a tuple containing the audio and label tensors ready for supervised learning.",
"_____no_output_____"
]
],
[
[
"def get_waveform_and_label(file_path):\n label = get_label(file_path)\n audio_binary = tf.io.read_file(file_path)\n waveform = decode_audio(audio_binary)\n return waveform, label",
"_____no_output_____"
]
],
[
[
"Build the training set to extract the audio-label pairs:\n\n- Create a `tf.data.Dataset` with `Dataset.from_tensor_slices` and `Dataset.map`, using `get_waveform_and_label` defined earlier.\n\nYou'll build the validation and test sets using a similar procedure later on.",
"_____no_output_____"
]
],
[
[
"AUTOTUNE = tf.data.AUTOTUNE\n\nfiles_ds = tf.data.Dataset.from_tensor_slices(train_files)\n\nwaveform_ds = files_ds.map(\n map_func=get_waveform_and_label,\n num_parallel_calls=AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"Let's plot a few audio waveforms:",
"_____no_output_____"
]
],
[
[
"rows = 3\ncols = 3\nn = rows * cols\nfig, axes = plt.subplots(rows, cols, figsize=(10, 12))\n\nfor i, (audio, label) in enumerate(waveform_ds.take(n)):\n r = i // cols\n c = i % cols\n ax = axes[r][c]\n ax.plot(audio.numpy())\n ax.set_yticks(np.arange(-1.2, 1.2, 0.2))\n label = label.numpy().decode('utf-8')\n ax.set_title(label)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Convert waveforms to spectrograms\n\nThe waveforms in the dataset are represented in the time domain. Next, you'll transform the waveforms from the time-domain signals into the time-frequency-domain signals by computing the <a href=\"https://en.wikipedia.org/wiki/Short-time_Fourier_transform\" class=\"external\">short-time Fourier transform (STFT)</a> to convert the waveforms to as <a href=\"https://en.wikipedia.org/wiki/Spectrogram\" clas=\"external\">spectrograms</a>, which show frequency changes over time and can be represented as 2D images. You will feed the spectrogram images into your neural network to train the model.\n\nA Fourier transform (`tf.signal.fft`) converts a signal to its component frequencies, but loses all time information. In comparison, STFT (`tf.signal.stft`) splits the signal into windows of time and runs a Fourier transform on each window, preserving some time information, and returning a 2D tensor that you can run standard convolutions on.\n\nCreate a utility function for converting waveforms to spectrograms:\n\n- The waveforms need to be of the same length, so that when you convert them to spectrograms, the results have similar dimensions. This can be done by simply zero-padding the audio clips that are shorter than one second (using `tf.zeros`).\n- When calling `tf.signal.stft`, choose the `frame_length` and `frame_step` parameters such that the generated spectrogram \"image\" is almost square. For more information on the STFT parameters choice, refer to <a href=\"https://www.coursera.org/lecture/audio-signal-processing/stft-2-tjEQe\" class=\"external\">this Coursera video</a> on audio signal processing and STFT.\n- The STFT produces an array of complex numbers representing magnitude and phase. However, in this tutorial you'll only use the magnitude, which you can derive by applying `tf.abs` on the output of `tf.signal.stft`.",
"_____no_output_____"
]
],
[
[
"def get_spectrogram(waveform):\n # Zero-padding for an audio waveform with less than 16,000 samples.\n input_len = 16000\n waveform = waveform[:input_len]\n zero_padding = tf.zeros(\n [16000] - tf.shape(waveform),\n dtype=tf.float32)\n # Cast the waveform tensors' dtype to float32.\n waveform = tf.cast(waveform, dtype=tf.float32)\n # Concatenate the waveform with `zero_padding`, which ensures all audio\n # clips are of the same length.\n equal_length = tf.concat([waveform, zero_padding], 0)\n # Convert the waveform to a spectrogram via a STFT.\n spectrogram = tf.signal.stft(\n equal_length, frame_length=255, frame_step=128)\n # Obtain the magnitude of the STFT.\n spectrogram = tf.abs(spectrogram)\n # Add a `channels` dimension, so that the spectrogram can be used\n # as image-like input data with convolution layers (which expect\n # shape (`batch_size`, `height`, `width`, `channels`).\n spectrogram = spectrogram[..., tf.newaxis]\n return spectrogram",
"_____no_output_____"
]
],
[
[
"Next, start exploring the data. Print the shapes of one example's tensorized waveform and the corresponding spectrogram, and play the original audio:",
"_____no_output_____"
]
],
[
[
"for waveform, label in waveform_ds.take(1):\n label = label.numpy().decode('utf-8')\n spectrogram = get_spectrogram(waveform)\n\nprint('Label:', label)\nprint('Waveform shape:', waveform.shape)\nprint('Spectrogram shape:', spectrogram.shape)\nprint('Audio playback')\ndisplay.display(display.Audio(waveform, rate=16000))",
"Label: down\nWaveform shape: (16000,)\nSpectrogram shape: (124, 129, 1)\nAudio playback\n"
]
],
[
[
"Now, define a function for displaying a spectrogram:",
"_____no_output_____"
]
],
[
[
"def plot_spectrogram(spectrogram, ax):\n if len(spectrogram.shape) > 2:\n assert len(spectrogram.shape) == 3\n spectrogram = np.squeeze(spectrogram, axis=-1)\n # Convert the frequencies to log scale and transpose, so that the time is\n # represented on the x-axis (columns).\n # Add an epsilon to avoid taking a log of zero.\n log_spec = np.log(spectrogram.T + np.finfo(float).eps)\n height = log_spec.shape[0]\n width = log_spec.shape[1]\n X = np.linspace(0, np.size(spectrogram), num=width, dtype=int)\n Y = range(height)\n ax.pcolormesh(X, Y, log_spec)",
"_____no_output_____"
]
],
[
[
"Plot the example's waveform over time and the corresponding spectrogram (frequencies over time):",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(2, figsize=(12, 8))\ntimescale = np.arange(waveform.shape[0])\naxes[0].plot(timescale, waveform.numpy())\naxes[0].set_title('Waveform')\naxes[0].set_xlim([0, 16000])\n\nplot_spectrogram(spectrogram.numpy(), axes[1])\naxes[1].set_title('Spectrogram')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Now, define a function that transforms the waveform dataset into spectrograms and their corresponding labels as integer IDs:",
"_____no_output_____"
]
],
[
[
"def get_spectrogram_and_label_id(audio, label):\n spectrogram = get_spectrogram(audio)\n label_id = tf.argmax(label == commands)\n return spectrogram, label_id",
"_____no_output_____"
]
],
[
[
"Map `get_spectrogram_and_label_id` across the dataset's elements with `Dataset.map`:",
"_____no_output_____"
]
],
[
[
"spectrogram_ds = waveform_ds.map(\n map_func=get_spectrogram_and_label_id,\n num_parallel_calls=AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"Examine the spectrograms for different examples of the dataset:",
"_____no_output_____"
]
],
[
[
"rows = 3\ncols = 3\nn = rows*cols\nfig, axes = plt.subplots(rows, cols, figsize=(10, 10))\n\nfor i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)):\n r = i // cols\n c = i % cols\n ax = axes[r][c]\n plot_spectrogram(spectrogram.numpy(), ax)\n ax.set_title(commands[label_id.numpy()])\n ax.axis('off')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Build and train the model\n\nRepeat the training set preprocessing on the validation and test sets:",
"_____no_output_____"
]
],
[
[
"def preprocess_dataset(files):\n files_ds = tf.data.Dataset.from_tensor_slices(files)\n output_ds = files_ds.map(\n map_func=get_waveform_and_label,\n num_parallel_calls=AUTOTUNE)\n output_ds = output_ds.map(\n map_func=get_spectrogram_and_label_id,\n num_parallel_calls=AUTOTUNE)\n return output_ds",
"_____no_output_____"
],
[
"train_ds = spectrogram_ds\nval_ds = preprocess_dataset(val_files)\ntest_ds = preprocess_dataset(test_files)",
"_____no_output_____"
]
],
[
[
"Batch the training and validation sets for model training:",
"_____no_output_____"
]
],
[
[
"batch_size = 64\ntrain_ds = train_ds.batch(batch_size)\nval_ds = val_ds.batch(batch_size)",
"_____no_output_____"
]
],
[
[
"Add `Dataset.cache` and `Dataset.prefetch` operations to reduce read latency while training the model:",
"_____no_output_____"
]
],
[
[
"train_ds = train_ds.cache().prefetch(AUTOTUNE)\nval_ds = val_ds.cache().prefetch(AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"For the model, you'll use a simple convolutional neural network (CNN), since you have transformed the audio files into spectrogram images.\n\nYour `tf.keras.Sequential` model will use the following Keras preprocessing layers:\n\n- `tf.keras.layers.Resizing`: to downsample the input to enable the model to train faster.\n- `tf.keras.layers.Normalization`: to normalize each pixel in the image based on its mean and standard deviation.\n\nFor the `Normalization` layer, its `adapt` method would first need to be called on the training data in order to compute aggregate statistics (that is, the mean and the standard deviation).",
"_____no_output_____"
]
],
[
[
"for spectrogram, _ in spectrogram_ds.take(1):\n input_shape = spectrogram.shape\nprint('Input shape:', input_shape)\nnum_labels = len(commands)\n\n# Instantiate the `tf.keras.layers.Normalization` layer.\nnorm_layer = layers.Normalization()\n# Fit the state of the layer to the spectrograms\n# with `Normalization.adapt`.\nnorm_layer.adapt(data=spectrogram_ds.map(map_func=lambda spec, label: spec))\n\nmodel = models.Sequential([\n layers.Input(shape=input_shape),\n # Downsample the input.\n layers.Resizing(32, 32),\n # Normalize.\n norm_layer,\n layers.Conv2D(32, 3, activation='relu'),\n layers.Conv2D(64, 3, activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.25),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dropout(0.5),\n layers.Dense(num_labels),\n])\n\nmodel.summary()",
"Input shape: (124, 129, 1)\nModel: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n resizing (Resizing) (None, 32, 32, 1) 0 \n \n normalization (Normalizatio (None, 32, 32, 1) 3 \n n) \n \n conv2d (Conv2D) (None, 30, 30, 32) 320 \n \n conv2d_1 (Conv2D) (None, 28, 28, 64) 18496 \n \n max_pooling2d (MaxPooling2D (None, 14, 14, 64) 0 \n ) \n \n dropout (Dropout) (None, 14, 14, 64) 0 \n \n flatten (Flatten) (None, 12544) 0 \n \n dense (Dense) (None, 128) 1605760 \n \n dropout_1 (Dropout) (None, 128) 0 \n \n dense_1 (Dense) (None, 8) 1032 \n \n=================================================================\nTotal params: 1,625,611\nTrainable params: 1,625,608\nNon-trainable params: 3\n_________________________________________________________________\n"
]
],
[
[
"Configure the Keras model with the Adam optimizer and the cross-entropy loss:",
"_____no_output_____"
]
],
[
[
"model.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'],\n)",
"_____no_output_____"
]
],
[
[
"Train the model over 10 epochs for demonstration purposes:",
"_____no_output_____"
]
],
[
[
"EPOCHS = 10\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=EPOCHS,\n callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),\n)",
"Epoch 1/10\n100/100 [==============================] - 29s 190ms/step - loss: 1.7373 - accuracy: 0.3702 - val_loss: 1.3460 - val_accuracy: 0.5312\nEpoch 2/10\n100/100 [==============================] - 1s 14ms/step - loss: 1.1873 - accuracy: 0.5791 - val_loss: 0.9391 - val_accuracy: 0.6825\nEpoch 3/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.9010 - accuracy: 0.6756 - val_loss: 0.8118 - val_accuracy: 0.7175\nEpoch 4/10\n100/100 [==============================] - 2s 18ms/step - loss: 0.7587 - accuracy: 0.7317 - val_loss: 0.7029 - val_accuracy: 0.7650\nEpoch 5/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.6418 - accuracy: 0.7703 - val_loss: 0.6502 - val_accuracy: 0.7862\nEpoch 6/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.5611 - accuracy: 0.8014 - val_loss: 0.5925 - val_accuracy: 0.7975\nEpoch 7/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.5021 - accuracy: 0.8214 - val_loss: 0.5584 - val_accuracy: 0.8238\nEpoch 8/10\n100/100 [==============================] - 2s 19ms/step - loss: 0.4375 - accuracy: 0.8456 - val_loss: 0.5358 - val_accuracy: 0.8250\nEpoch 9/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.4112 - accuracy: 0.8494 - val_loss: 0.5364 - val_accuracy: 0.8313\nEpoch 10/10\n100/100 [==============================] - 1s 13ms/step - loss: 0.3742 - accuracy: 0.8675 - val_loss: 0.5228 - val_accuracy: 0.8375\n"
]
],
[
[
"Let's plot the training and validation loss curves to check how your model has improved during training:",
"_____no_output_____"
]
],
[
[
"metrics = history.history\nplt.plot(history.epoch, metrics['loss'], metrics['val_loss'])\nplt.legend(['loss', 'val_loss'])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Evaluate the model performance\n\nRun the model on the test set and check the model's performance:",
"_____no_output_____"
]
],
[
[
"test_audio = []\ntest_labels = []\n\nfor audio, label in test_ds:\n test_audio.append(audio.numpy())\n test_labels.append(label.numpy())\n\ntest_audio = np.array(test_audio)\ntest_labels = np.array(test_labels)",
"_____no_output_____"
],
[
"y_pred = np.argmax(model.predict(test_audio), axis=1)\ny_true = test_labels\n\ntest_acc = sum(y_pred == y_true) / len(y_true)\nprint(f'Test set accuracy: {test_acc:.0%}')",
"Test set accuracy: 82%\n"
]
],
[
[
"### Display a confusion matrix\n\nUse a <a href=\"https://developers.google.com/machine-learning/glossary#confusion-matrix\" class=\"external\">confusion matrix</a> to check how well the model did classifying each of the commands in the test set:\n",
"_____no_output_____"
]
],
[
[
"confusion_mtx = tf.math.confusion_matrix(y_true, y_pred)\nplt.figure(figsize=(10, 8))\nsns.heatmap(confusion_mtx,\n xticklabels=commands,\n yticklabels=commands,\n annot=True, fmt='g')\nplt.xlabel('Prediction')\nplt.ylabel('Label')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Run inference on an audio file\n\nFinally, verify the model's prediction output using an input audio file of someone saying \"no\". How well does your model perform?",
"_____no_output_____"
]
],
[
[
"sample_file = data_dir/'no/01bb6a2a_nohash_0.wav'\n\nsample_ds = preprocess_dataset([str(sample_file)])\n\nfor spectrogram, label in sample_ds.batch(1):\n prediction = model(spectrogram)\n plt.bar(commands, tf.nn.softmax(prediction[0]))\n plt.title(f'Predictions for \"{commands[label[0]]}\"')\n plt.show()",
"_____no_output_____"
]
],
[
[
"As the output suggests, your model should have recognized the audio command as \"no\".",
"_____no_output_____"
],
[
"## Next steps\n\nThis tutorial demonstrated how to carry out simple audio classification/automatic speech recognition using a convolutional neural network with TensorFlow and Python. To learn more, consider the following resources:\n\n- The [Sound classification with YAMNet](https://www.tensorflow.org/hub/tutorials/yamnet) tutorial shows how to use transfer learning for audio classification.\n- The notebooks from <a href=\"https://www.kaggle.com/c/tensorflow-speech-recognition-challenge/overview\" class=\"external\">Kaggle's TensorFlow speech recognition challenge</a>.\n- The \n<a href=\"https://codelabs.developers.google.com/codelabs/tensorflowjs-audio-codelab/index.html#0\" class=\"external\">TensorFlow.js - Audio recognition using transfer learning codelab</a> teaches how to build your own interactive web app for audio classification.\n- <a href=\"https://arxiv.org/abs/1709.04396\" class=\"external\">A tutorial on deep learning for music information retrieval</a> (Choi et al., 2017) on arXiv.\n- TensorFlow also has additional support for [audio data preparation and augmentation](https://www.tensorflow.org/io/tutorials/audio) to help with your own audio-based projects.\n- Consider using the <a href=\"https://librosa.org/\" class=\"external\">librosa</a> libraryโa Python package for music and audio analysis.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
eca77260a7117ab42203805e4f5a5149afcc885c | 441,254 | ipynb | Jupyter Notebook | hw5/hw5-soln.ipynb | TheAnig/computer-vision | 8305de003896cb1b9a8c7302fa832c5b8f271477 | [
"MIT"
] | null | null | null | hw5/hw5-soln.ipynb | TheAnig/computer-vision | 8305de003896cb1b9a8c7302fa832c5b8f271477 | [
"MIT"
] | null | null | null | hw5/hw5-soln.ipynb | TheAnig/computer-vision | 8305de003896cb1b9a8c7302fa832c5b8f271477 | [
"MIT"
] | null | null | null | 496.907658 | 147,988 | 0.94015 | [
[
[
"### Imports",
"_____no_output_____"
]
],
[
[
"from skimage.io import imread\nfrom skimage.filters import gaussian\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom skimage import img_as_float\nimport math",
"_____no_output_____"
]
],
[
[
"# Find best image differencing parameters.",
"_____no_output_____"
]
],
[
[
"im1 = imread('./data/aerobic-001.bmp')\nim1 = img_as_float(im1)\nim2 = imread('./data/aerobic-002.bmp')\nim2 = img_as_float(im2)",
"_____no_output_____"
]
],
[
[
"## Simple Image Difference\n\nFor sake of simplicity of implementation, we shall use the naive differencing function, as given as below instead of using Weber's difference.\n\n$\\Delta I = 1 \\text{ if } |I_t - I_{t-1}| > \\tau$",
"_____no_output_____"
]
],
[
[
"threshLevels = [0.05, 0.08, 0.10, 0.12, 0.15, 0.2]\nf, axarr = plt.subplots(2, 3, sharex='col', sharey='row', dpi=200)\nfor thresh in threshLevels:\n magIm = np.abs(im1 - im2)\n tImg = magIm > thresh\n idx = threshLevels.index(thresh)\n #print((int(idx/3), idx%3))\n axarr[int(idx/3), idx%3].axis('off')\n axarr[int(idx/3), idx%3].set_title(f'Value = {thresh}')\n axarr[int(idx/3), idx%3].imshow(tImg, cmap = 'gray', aspect='auto')",
"_____no_output_____"
]
],
[
[
"Notice that by default, the thresholding generates a fractured image, which is not ideal for motion analysis. This can be fixed by using morphological operators as discussed below.",
"_____no_output_____"
]
],
[
[
"from skimage.morphology import closing\nfrom skimage.morphology import square\n\nthreshLevels = [0.05, 0.08, 0.10, 0.12, 0.15, 0.2]\nf, axarr = plt.subplots(2, 3, sharex='col', sharey='row', dpi=200)\nfor thresh in threshLevels:\n magIm = np.abs(im1 - im2)\n magIm = closing(magIm, square(5))\n tImg = magIm > thresh\n idx = threshLevels.index(thresh)\n #print((int(idx/3), idx%3))\n axarr[int(idx/3), idx%3].axis('off')\n axarr[int(idx/3), idx%3].set_title(f'Value = {thresh}')\n axarr[int(idx/3), idx%3].imshow(tImg, cmap = 'gray', aspect='auto')\n",
"_____no_output_____"
]
],
[
[
"The results upon using closing, I found generates the best result. But I have also included other operations for sake of comparision.",
"_____no_output_____"
]
],
[
[
"from skimage.morphology import opening\nthreshLevels = [0.05, 0.08, 0.10, 0.12, 0.15, 0.2]\nf, axarr = plt.subplots(2, 3, sharex='col', sharey='row', dpi=200)\nfor thresh in threshLevels:\n magIm = np.abs(im1 - im2)\n magIm = opening(magIm, square(5))\n tImg = magIm > thresh\n idx = threshLevels.index(thresh)\n #print((int(idx/3), idx%3))\n axarr[int(idx/3), idx%3].axis('off')\n axarr[int(idx/3), idx%3].set_title(f'Value = {thresh}')\n axarr[int(idx/3), idx%3].imshow(tImg, cmap = 'gray', aspect='auto')",
"_____no_output_____"
]
],
[
[
"Opening fails because it erodes first causing our fractured image to vanish.",
"_____no_output_____"
]
],
[
[
"from skimage.morphology import dilation\nthreshLevels = [0.05, 0.08, 0.10, 0.12, 0.15, 0.2]\nf, axarr = plt.subplots(2, 3, sharex='col', sharey='row', dpi=200)\nfor thresh in threshLevels:\n magIm = np.abs(im1 - im2)\n magIm = dilation(magIm, square(5))\n tImg = magIm > thresh\n idx = threshLevels.index(thresh)\n #print((int(idx/3), idx%3))\n axarr[int(idx/3), idx%3].axis('off')\n axarr[int(idx/3), idx%3].set_title(f'Value = {thresh}')\n axarr[int(idx/3), idx%3].imshow(tImg, cmap = 'gray', aspect='auto')",
"_____no_output_____"
]
],
[
[
"While dilation generates a good result, the chunkyness could lead to false positives for motion detectors",
"_____no_output_____"
]
],
[
[
"from skimage.filters.rank import median\n\nthreshLevels = [0.05, 0.08, 0.10, 0.12, 0.15, 0.2]\nf, axarr = plt.subplots(2, 3, sharex='col', sharey='row', dpi=200)\nfor thresh in threshLevels:\n im1 = median(im1)\n im2 = median(im2)\n magIm = np.abs(im1 - im2)\n tImg = magIm > thresh\n idx = threshLevels.index(thresh)\n #print((int(idx/3), idx%3))\n axarr[int(idx/3), idx%3].axis('off')\n axarr[int(idx/3), idx%3].set_title(f'Value = {thresh}')\n axarr[int(idx/3), idx%3].imshow(tImg, cmap = 'gray', aspect='auto')",
"/usr/lib/python3.7/site-packages/skimage/util/dtype.py:130: UserWarning: Possible precision loss when converting from float64 to uint8\n .format(dtypeobj_in, dtypeobj_out))\n"
]
],
[
[
"I'm not sure if this is an implementation fault, or the result of sharpening an image with a lot of information, whatever be the case, the image generated cannot be used for upcoming exercise.",
"_____no_output_____"
],
[
"# Perform Motion History Imaging and Motion Energy Imaging using the best differencing technique",
"_____no_output_____"
]
],
[
[
"aer_cube = []\n\nfor i in range(1,23):\n tmp = imread('./data/aerobic-{:03}.bmp'.format(i))\n tmp = img_as_float(tmp)\n aer_cube.append(tmp)\n \naer_cube=np.array(aer_cube)",
"_____no_output_____"
],
[
"f, axarr = plt.subplots(5, 5, sharex='col', sharey='row', dpi=200, figsize=(10,10))\nfor idx in range(0, 21):\n axarr[int(idx/5), idx%5].axis('off')\n axarr[int(idx/5), idx%5].imshow(np.abs(aer_cube[idx]-aer_cube[idx+1]) > 0.08, cmap = 'gray')\n \nfor idx in range(21, 25):\n axarr[idx//5, idx%5].axis('off')",
"_____no_output_____"
]
],
[
[
"Just a plot of consecutive image differences.",
"_____no_output_____"
]
],
[
[
"aer_cube.shape\n\naer_cube[0].max()\n\nT = aer_cube.shape[0]",
"_____no_output_____"
],
[
"aer_diff = np.abs(aer_cube[1:,:,:] - aer_cube[:-1,:,:])\n\nfor i in range(aer_diff.shape[0]):\n aer_diff[i] = closing(aer_diff[i], square(5)) > 0.08",
"_____no_output_____"
],
[
"MEI = [aer_diff[:i, :, :].max(axis=0) for i in range(1, T)]",
"_____no_output_____"
],
[
"f, axarr = plt.subplots(5, 5, sharex='col', sharey='row', dpi=200, figsize=(10,10))\nfor idx in range(0, 21):\n axarr[int(idx/5), idx%5].axis('off')\n axarr[int(idx/5), idx%5].imshow(MEI[idx], cmap = 'gray')\n \nfor idx in range(21, 25):\n axarr[idx//5, idx%5].axis('off')",
"_____no_output_____"
]
],
[
[
"We can see that MEI imaging produces a chunky representation of the motion",
"_____no_output_____"
]
],
[
[
"MHI = np.zeros(aer_cube.shape)\n\nfor idx in range(1, T-1):\n for i in range(0, aer_cube.shape[1]):\n for j in range(0, aer_cube.shape[2]):\n if aer_diff[idx][i][j] == 1:\n MHI[idx][i][j] = T\n else:\n MHI[idx][i][j] = max(0, MHI[idx-1][i][j] - 1)",
"_____no_output_____"
],
[
"f, axarr = plt.subplots(5, 5, sharex='col', sharey='row', dpi=200, figsize=(10,10))\nfor idx in range(0, 21):\n axarr[int(idx/5), idx%5].axis('off')\n axarr[int(idx/5), idx%5].imshow(MHI[idx], cmap = 'gray')\n \nfor idx in range(21, 25):\n axarr[idx//5, idx%5].axis('off')",
"_____no_output_____"
]
],
[
[
"The MHI imaging we can see produces a smoother texture which better captures the direction of the movement. MEI would result in the same image if the hand was being raised or lowered while MHI would generate opposite textures, thus making it more useful.",
"_____no_output_____"
]
],
[
[
"delta_T = (T-15)/(8)\n\nMHI_deltaT = np.zeros(MHI.shape)\n\nfor idx in range(1, T-1):\n for i in range(0, aer_cube.shape[1]):\n for j in range(0, aer_cube.shape[2]):\n if MHI[idx][i][j] - delta_T > 0:\n MHI_deltaT[idx][i][j] = MHI[idx][i][j] - delta_T\n\nf, axarr = plt.subplots(5, 5, sharex='col', sharey='row', dpi=200, figsize=(10,10))\nfor idx in range(0, 21):\n axarr[int(idx/5), idx%5].axis('off')\n axarr[int(idx/5), idx%5].imshow(MHI_deltaT[idx], cmap = 'gray')\n \nfor idx in range(21, 25):\n axarr[idx//5, idx%5].axis('off')",
"_____no_output_____"
]
],
[
[
"This is the implementation of MHI but with $\\tau - \\Delta\\tau$",
"_____no_output_____"
],
[
"## Similtude Moments for the last MHI and MEI",
"_____no_output_____"
]
],
[
[
"from numpy import mgrid, sum\n\n\ndef similitudeMoments(image):\n \n ### Make sure the image is a grayscale image\n assert len(image.shape) == 2\n \n ## Temp grid for storing intermediate operations\n x, y = mgrid[:image.shape[0],:image.shape[1]]\n \n ## Our final dictionary that contains the \n moments = {}\n \n \n moments['mean_x'] = sum(x*image)/sum(image)\n moments['mean_y'] = sum(y*image)/sum(image)\n \n ## Spatial moments: Spatial moments often used to describe region shape\n \n # Zeroth Order\n moments['m00'] = sum(image)\n \n # First Order\n moments['m01'] = sum(x*image)\n moments['m10'] = sum(y*image)\n \n # Second Order\n moments['m11'] = sum(y*x*image)\n moments['m02'] = sum(x**2*image)\n moments['m20'] = sum(y**2*image)\n \n # Third Order\n moments['m12'] = sum(x*y**2*image)\n moments['m21'] = sum(x**2*y*image)\n moments['m03'] = sum(x**3*image)\n moments['m30'] = sum(y**3*image)\n \n ## Central moments: Translation Invariant\n \n # First Order (Seem useless, in terms of calculating the final nu moments. But still kept)\n moments['mu01']= sum((y-moments['mean_y'])*image)\n moments['mu10']= sum((x-moments['mean_x'])*image)\n \n # Second Order (Moment Ellipse Orientation)\n moments['mu11'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])*image)\n moments['mu02'] = sum((y-moments['mean_y'])**2*image)\n moments['mu20'] = sum((x-moments['mean_x'])**2*image)\n \n # Third Order (Skewness of the Image)\n moments['mu12'] = sum((x-moments['mean_x'])*(y-moments['mean_y'])**2*image)\n moments['mu21'] = sum((x-moments['mean_x'])**2*(y-moments['mean_y'])*image)\n moments['mu03'] = sum((y-moments['mean_y'])**3*image)\n moments['mu30'] = sum((x-moments['mean_x'])**3*image)\n \n # Similitude Moments: Invariant to translation and scale\n \n moments['eta11'] = moments['mu11'] / sum(image)**(2/2+1)\n moments['eta12'] = moments['mu12'] / sum(image)**(3/2+1)\n moments['eta21'] = moments['mu21'] / sum(image)**(3/2+1)\n moments['eta02'] = moments['mu02'] / sum(image)**(2/2+1)\n moments['eta20'] = moments['mu20'] / sum(image)**(2/2+1)\n moments['eta03'] = moments['mu03'] / sum(image)**(3/2+1)\n moments['eta30'] = moments['mu30'] / sum(image)**(3/2+1)\n \n return moments",
"_____no_output_____"
],
[
"lastMHI = MHI_deltaT[-2]\nlastMHI = lastMHI/T\n\nplt.imshow(lastMHI, cmap='gray')",
"_____no_output_____"
],
[
"lastMHI_moments = similitudeMoments(lastMHI)\nprint(lastMHI_moments)",
"{'mean_x': 154.03915051895743, 'mean_y': 86.05553257989888, 'm00': 2266.8920454545455, 'm01': 349190.125, 'm10': 195078.60227272726, 'm11': 29705127.90340909, 'm02': 54553827.30681817, 'm20': 18033560.238636363, 'm12': 2710335140.607955, 'm21': 4595268018.482954, 'm03': 8643881351.704546, 'm30': 1782693918.6022725, 'mu01': 2.091837814077735e-11, 'mu10': -9.094947017729282e-12, 'mu11': -344614.2751073751, 'mu02': 1245967.225114544, 'mu20': 764877.0822096244, 'mu12': -8227229.426036114, 'mu21': 6777535.711377485, 'mu03': 16361541.68607024, 'mu30': 4814103.814391278, 'eta11': -0.06706124106216878, 'eta12': -0.03362610091249923, 'eta21': 0.027700953500533403, 'eta02': 0.2424627024313879, 'eta20': 0.14884353347523346, 'eta03': 0.06687243339522808, 'eta30': 0.019676069826578574}\n"
]
],
[
[
"We can see that as a consequence of being smoother textured, the moments reflect this in their means, where the mean is lower than the one for MEI, reflecting the change in perceived center of attention.",
"_____no_output_____"
]
],
[
[
"lastMEI = MEI[-1]\nlastMEI_normalized = np.zeros(lastMEI.shape)\nfor i in range(lastMEI.shape[0]):\n for j in range(lastMEI.shape[1]):\n t = (lastMEI[i][j]*(20. - 1.)/21.)\n if t > 0:\n lastMEI_normalized[i][j] = t\n\nplt.imshow(lastMEI_normalized, cmap='gray')",
"_____no_output_____"
],
[
"lastMEI_moments = similitudeMoments(lastMEI_normalized)\nprint(lastMEI_moments)",
"{'mean_x': 161.99532324621734, 'mean_y': 86.74140302613479, 'm00': 3288.809523809524, 'm01': 532771.7619047619, 'm10': 285275.95238095237, 'm11': 45932242.142857134, 'm02': 87706841.85714287, 'm20': 26192081.76190476, 'm12': 4179727007.095238, 'm21': 7530974327.285713, 'm03': 14661736276.333332, 'm30': 2537229224.5238094, 'mu01': 3.3651303965598345e-11, 'mu10': -2.546585164964199e-11, 'mu11': -281127.97746774094, 'mu02': 1446845.4027641318, 'mu20': 1400308.0709242157, 'mu12': -14496874.024974491, 'mu21': 14242644.772700459, 'mu03': 14288503.925605964, 'mu30': -48638.40669892728, 'eta11': -0.025991217607318155, 'eta12': -0.023371014316687883, 'eta21': 0.022961160752093104, 'eta02': 0.13376567514240228, 'eta20': 0.1294631438553694, 'eta03': 0.023035092202229136, 'eta30': -7.841200091435006e-05}\n"
]
],
[
[
"Also notice that other features are largely similar since the shape itself resembles the one generated by MHI.",
"_____no_output_____"
],
[
"# Perform Optic Flow on the set of images using the best differencing technique",
"_____no_output_____"
]
],
[
[
"box1 = np.zeros((101,101))\nbox2 = np.zeros((101,101))\n\nsize = 21\nbox1[39:39+size, 5:5+size] = 1\nbox2[40:40+size, 6:6+size] = 1\n\nbox1 = img_as_float(box1)\nbox2 = img_as_float(box2)\n\nplt.figure(figsize=(10,10))\nplt.axis('off')\nplt.imshow(np.abs(box2-box1), cmap='gray')",
"_____no_output_____"
]
],
[
[
"Just a plot of image diff, to show that it is indeed shifted by just 1 pixel down and right.",
"_____no_output_____"
]
],
[
[
"from scipy import signal\ndef optical_flow(I1g, I2g, window_size):\n \n kernel_x = 0.25 * np.array([[-1., 1.], [-1., 1.]])\n kernel_y = 0.25 * np.array([[-1., -1.], [1., 1.]])\n kernel_t = 0.25 * np.array([[1., 1.], [1., 1.]])\n kernel_x = np.fliplr(kernel_x)\n mode = 'same'\n fx = (signal.convolve2d(I1g, kernel_x, boundary='symm', mode=mode))\n fy = (signal.convolve2d(I1g, kernel_y, boundary='symm', mode=mode))\n ft = (signal.convolve2d(I2g, kernel_t, boundary='symm', mode=mode) + \n signal.convolve2d(I1g, -kernel_t, boundary='symm', mode=mode))\n \n #ft = I2g - I1g\n \n u = np.zeros(I1g.shape)\n v = np.zeros(I1g.shape)\n \n window = np.ones((window_size,window_size))\n \n denom = (signal.convolve2d(fx**2, window, boundary='symm', mode=mode) * \n signal.convolve2d(fy**2, window, boundary='symm', mode=mode) -\n signal.convolve2d(fx*fy, window, boundary='symm', mode=mode)**2)\n denom[denom == 0] = 1\n \n u = ((signal.convolve2d(fy**2, window, boundary='symm', mode=mode) * \n signal.convolve2d(fy*ft, window, boundary='symm', mode=mode) + \n signal.convolve2d(fx*fy, window, boundary='symm', mode=mode) * \n signal.convolve2d(fy*ft, window, boundary='symm', mode=mode) ) /\n denom)\n \n \n v = ((signal.convolve2d(fx*ft, window, boundary='symm', mode=mode) * \n signal.convolve2d(fx*fy, window, boundary='symm', mode=mode) -\n signal.convolve2d(fx**2, window, boundary='symm', mode=mode) * \n signal.convolve2d(fy*ft, window, boundary='symm', mode=mode)) / \n denom)\n \n \n return (u,v)\n\nu,v = optical_flow(box1, box2, 3)",
"_____no_output_____"
]
],
[
[
"The optical flow calculation function. Notice that I use convolution to calculate the vectors. There might be easier ways to do it, and even ways to do it that's computationally a lot cheaper, but in-order to preserve the scales when plotting and make sure the formula used is a direct translation of the ones present in slides, I decided to go with this approach.",
"_____no_output_____"
]
],
[
[
"x = np.arange(0, box1.shape[1], 1)\ny = np.arange(0, box1.shape[0], 1)\nx, y = np.meshgrid(x, y)\ndelta = 3",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,15))\nplt.axis('off')\nplt.imshow(box1, cmap='jet')\nplt.quiver(x[::delta, ::delta], y[::delta, ::delta],\n u[::delta, ::delta], v[::delta, ::delta],\ncolor='lime', pivot='middle', headwidth=2, headlength=3, scale=25)",
"_____no_output_____"
]
],
[
[
"Notice that the arrows are pointed in the direction of motion.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
eca774c1ad22018dc2894be5356e1cf40164905d | 46,537 | ipynb | Jupyter Notebook | examples/Notebook Experiments/script2.ipynb | Mike-HubGit/pymeasure | 130d49b248e49cfd6a4ddfd19073779af66405e4 | [
"MIT"
] | 10 | 2019-01-23T05:52:59.000Z | 2021-11-19T02:56:20.000Z | examples/Notebook Experiments/script2.ipynb | Mike-HubGit/pymeasure | 130d49b248e49cfd6a4ddfd19073779af66405e4 | [
"MIT"
] | null | null | null | examples/Notebook Experiments/script2.ipynb | Mike-HubGit/pymeasure | 130d49b248e49cfd6a4ddfd19073779af66405e4 | [
"MIT"
] | 2 | 2020-01-27T04:40:48.000Z | 2020-07-17T03:41:16.000Z | 86.17963 | 30,924 | 0.781507 | [
[
[
"# More features for ```Experiment``` class: custom config, `Measurable` parameter, `analysis` function\n\nThis example uses the ```Experiment``` class to create a measurement from a ```procedure``` object, with the ```Measurable``` parameter to automatically generate sorted ```DATA_COLUMNS``` and ```MEASURE``` lists (which is then passed to the ```get_datapoint``` function of the ```Procedure``` class).\n\nThe file ```my_config.ini``` is passed to set custom data saving, logging and matplotlib options.\n\nThe ```analysis``` function is passed as an optional attribute, to produce on-the-fly data analysis for live plotting (only the raw data is saved on disk). To have analysed data save on disk, create an empty ```Measurable``` and update it in the ```measure``` loop as also shown in the example below.",
"_____no_output_____"
]
],
[
[
"%%writefile my_config.ini\n[Filename]\nprefix = my_data_\ndated_folder = 1\ndirectory = data\next = csv\nindex = \ndatetimeformat = %Y%m%d_%H%M%S\n\n[Logging]\nconsole = 1\nconsole_level = WARNING\nfilename = test.log\nfile_level = DEBUG\n\n[matplotlib.rcParams]\naxes.axisbelow = True\naxes.color_cycle = [(0.2980392156862745, 0.4470588235294118, 0.6901960784313725),\n (0.3333333333333333, 0.6588235294117647, 0.40784313725490196),\n (0.7686274509803922, 0.3058823529411765, 0.3215686274509804),\n (0.5058823529411764, 0.4470588235294118, 0.6980392156862745),\n (0.8, 0.7254901960784313, 0.4549019607843137),\n (0.39215686274509803, 0.7098039215686275, 0.803921568627451)]\naxes.edgecolor = 'white'\naxes.facecolor = '#EAEAF2'\naxes.grid = True\naxes.labelcolor = '.15'\naxes.labelsize = 11.0\naxes.linewidth = 0.0\naxes.titlesize = 12.0\nfigure.facecolor = 'white'\nfigure.figsize = [8.0, 5.5]\nfont.sans-serif = ['Arial', 'Liberation Sans', 'Bitstream Vera Sans', 'sans-serif']\ngrid.color = 'white'\ngrid.linestyle = '-'\ngrid.linewidth = 1.0\nimage.cmap = 'Greys'\nlegend.fontsize = 10.0\nlegend.frameon = False\nlegend.numpoints = 1\nlegend.scatterpoints = 1\nlines.linewidth = 1.75\nlines.markeredgewidth = 0.0\nlines.markersize = 7.0\nlines.solid_capstyle = 'round'\npatch.facecolor = (0.2980392156862745, 0.4470588235294118, 0.6901960784313725)\npatch.linewidth = 0.3\ntext.color = '.15'\nxtick.color = '.15'\nxtick.direction = 'out'\nxtick.labelsize = 10.0\nxtick.major.pad = 7.0\nxtick.major.size = 0.0\nxtick.major.width = 1.0\nxtick.minor.size = 0.0\nytick.color = '.15'\nytick.direction = 'out'\nytick.labelsize = 10.0\nytick.major.pad = 7.0\nytick.major.size = 0.0\nytick.major.width = 1.0\nytick.minor.size = 0.0",
"Writing my_config.ini\n"
],
[
"%%writefile procedures.py\nimport random\nfrom time import sleep\n\nimport logging\nlog = logging.getLogger('')\nlog.addHandler(logging.NullHandler())\n\nfrom pymeasure.experiment import Procedure, IntegerParameter, Parameter, FloatParameter, Measurable\n\nclass TestProcedure(Procedure):\n \n iterations = IntegerParameter('Loop Iterations', default=100)\n delay = FloatParameter('Delay Time', units='s', default=0.2)\n seed = Parameter('Random Seed', default='12345')\n iteration = Measurable('Iteration', default = 0)\n random_number = Measurable('Random Number', random.random)\n offset = Measurable('Random Number + 1', default = 0)\n\n def startup(self):\n log.info(\"Setting up random number generator\")\n random.seed(self.seed)\n \n def measure(self):\n data = self.get_datapoint()\n data['Random Number + 1'] = data['Random Number'] + 1\n log.debug(\"Produced numbers: %s\" % data)\n self.emit('results', data)\n self.emit('progress', 100.*self.iteration.value/self.iterations)\n\n def execute(self):\n log.info(\"Starting to generate numbers\")\n for self.iteration.value in range(self.iterations):\n self.measure()\n sleep(self.delay)\n if self.should_stop():\n log.warning(\"Catch stop command in procedure\")\n break\n\n def shutdown(self):\n log.info(\"Finished\")",
"Writing procedures.py\n"
],
[
"%%writefile analysis.py\ndef add_offset(data, offset):\n return data['Random Number'] + offset\n\ndef analyse(data):\n data['Random Number + 2'] = add_offset(data, 2)\n return data",
"Writing analysis.py\n"
],
[
"from pymeasure.experiment import Experiment, config\nfrom procedures import TestProcedure\nfrom analysis import analyse\nconfig.set_file('my_config.ini')\n%matplotlib inline",
"_____no_output_____"
],
[
"procedure = TestProcedure(iterations=10, delay=.1)\nexperiment = Experiment('test', procedure, analyse)",
"_____no_output_____"
],
[
"experiment.start()\nimport pylab as pl\npl.figure(figsize=(10,4))\nax1 = pl.subplot(121)\nexperiment.plot('Iteration', 'Random Number', ax=ax1)\nax2 = pl.subplot(122)\nexperiment.plot('Iteration', 'Random Number + 1', ax=ax2)\nexperiment.plot_live()",
"_____no_output_____"
]
],
[
[
"Analysed data",
"_____no_output_____"
]
],
[
[
"experiment.data",
"_____no_output_____"
]
],
[
[
"Raw data (as saved on disk)",
"_____no_output_____"
]
],
[
[
"experiment.results.data",
"_____no_output_____"
]
],
[
[
"Filename generated by config preferences",
"_____no_output_____"
]
],
[
[
"experiment.filename",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca77ddbf84656f6b6e5163a63c4c6b0f687a6df | 31,814 | ipynb | Jupyter Notebook | xinetzone/docs/topic/vta/tutorials/optimize/matrix_multiply_opt.ipynb | daobook/tvm | a0dca482824ba9e18ec914b962ce31fcec0696e2 | [
"Apache-2.0"
] | null | null | null | xinetzone/docs/topic/vta/tutorials/optimize/matrix_multiply_opt.ipynb | daobook/tvm | a0dca482824ba9e18ec914b962ce31fcec0696e2 | [
"Apache-2.0"
] | 1 | 2022-02-16T15:48:57.000Z | 2022-02-16T15:48:57.000Z | xinetzone/docs/topic/vta/tutorials/optimize/matrix_multiply_opt.ipynb | daobook/tvm | a0dca482824ba9e18ec914b962ce31fcec0696e2 | [
"Apache-2.0"
] | null | null | null | 45.841499 | 273 | 0.478154 | [
[
[
"(vta-mat-mult-opt)=\n# ๅๅ็ฉ้ตไนๆณ\n\n\n**ๅไฝ่
**: [Thierry Moreau](https://homes.cs.washington.edu/~moreau/)\n\nๆฌๆ็จๆฆ่ฟฐไบๅฆไฝๅจ VTA ่ฎพ่ฎกไธญไฝฟ็จ TVM ๆๆๅฐๆ ๅฐ็ฉ้ตไนๆณใๅปบ่ฎฎๅ
ๅญฆไน {ref}`basic-mat-mult` ๆ็จใ\n\nๅจๆฌๆ็จไธญ๏ผๅฐๆผ็คบ TVM ่ฐๅบฆไผๅ๏ผๅฐๅคงๅ็ฅ็ป็ฝ็ป็ฎๅญๅ่งฃไธบ่พๅฐ็ๅ๏ผไปฅๅจๆ้็็กฌไปถๅ ้ๅจ่ตๆบๅ
ๅฎ็ฐ่ฎก็ฎใ\n\n## RPC ่ฎพ็ฝฎ\n\n้ฆๅ
็ผ็จ Pynq ็ FPGA ๅนถๆๅปบๅฎ็ RPC ่ฟ่กๆถใ",
"_____no_output_____"
]
],
[
[
"import os\nimport tvm\nfrom tvm import te\nimport vta\nimport numpy as np\nfrom tvm import rpc\nfrom tvm.contrib import utils\nfrom vta.testing import simulator\n\n# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file\nenv = vta.get_env()\n\n# We read the Pynq RPC host IP address and port number from the OS environment\nhost = os.environ.get(\"VTA_RPC_HOST\", \"192.168.2.99\")\nport = int(os.environ.get(\"VTA_RPC_PORT\", \"9091\"))\n\n# We configure both the bitstream and the runtime system on the Pynq\n# to match the VTA configuration specified by the vta_config.json file.\nif env.TARGET == \"pynq\":\n\n # Make sure that TVM was compiled with RPC=1\n assert tvm.runtime.enabled(\"rpc\")\n remote = rpc.connect(host, port)\n\n # Reconfigure the JIT runtime\n vta.reconfig_runtime(remote)\n\n # Program the FPGA with a pre-compiled VTA bitstream.\n # You can program the FPGA with your own custom bitstream\n # by passing the path to the bitstream file instead of None.\n vta.program_fpga(remote, bitstream=None)\n\n# In simulation mode, host the RPC server locally.\nelif env.TARGET in [\"sim\", \"tsim\"]:\n remote = rpc.LocalSession()",
"_____no_output_____"
]
],
[
[
"## ๅฃฐๆ่ฎก็ฎ\n\nไฝไธบ็ฌฌไธๆญฅ๏ผ้่ฆๆ่ฟฐ็ฉ้ตไนๆณ็่ฎก็ฎใๅฐ็ฉ้ตไนๆณๅฎไนไธบๅ
จ่ฟๆฅๅฑไธญ็่ฎก็ฎ๏ผ็ฑๅ
ถ batch sizeใ่พๅ
ฅ้้ๅ่พๅบ้้ๅฎไนใๅฎไปฌๅฟ
้กปๆฏ VTA ๅผ ้ๅฝข็ถ็ๆดๆฐๅ๏ผ`BATCH`ใ`BLOCK_IN` ๅ `BLOCK_OUT`ใ\n\nๅจ็ฉ้ตไนๆณไธญๆทปๅ ้ขๅค็็ฎๅญ๏ผ่ฟไบ็ฎๅญๅฏน่พๅบ่ฟ่กไบ็งปไฝ๏ผshifting๏ผๅๅชๅ๏ผclipping๏ผ๏ผไปฅๆจกๆๅฎ็น็ฉ้ตไนๆณ๏ผ็ถๅๆฏไฟฎๆญฃ็็บฟๆงๆฟๆดปใๅฐๅ
จ่ฟ้ๅฑ็ TVM ๆฐๆฎๆตๅพๆ่ฟฐๅฆไธ๏ผ\n\n```{image} images/fc_dataflow.png\n:align: center\n```\n\nๆญค่ฎก็ฎ่ขซๆ
ๆ่ฎพ็ฝฎๅพๅคชๅคง๏ผไปฅ่ณไบไธ่ฝไธๆฌกๅ
จ้จๆพๅ
ฅ VTA ็ on-chip bufferใๅ ๆญค๏ผๅจ่ฐๅบฆ้ถๆฎต๏ผๅฐไพ้ ่ฎก็ฎ้ปๅก็ญ็ฅๅฐ่ฎก็ฎๅ่งฃไธบๅฏ็ฎก็็ๅใ",
"_____no_output_____"
]
],
[
[
"# Fully connected layer dimensions: 1024 x 1024\nbatch_size = 1\nin_channels = 1024\nout_channels = 1024\nassert batch_size % env.BATCH == 0\nassert in_channels % env.BLOCK_IN == 0\nassert out_channels % env.BLOCK_OUT == 0\n\n# Let's derive the tiled input tensor shapes\ndata_shape = (batch_size // env.BATCH, in_channels // env.BLOCK_IN, env.BATCH, env.BLOCK_IN)\nweight_shape = (\n out_channels // env.BLOCK_OUT,\n in_channels // env.BLOCK_IN,\n env.BLOCK_OUT,\n env.BLOCK_IN,\n)\noutput_shape = (batch_size // env.BATCH, out_channels // env.BLOCK_OUT, env.BATCH, env.BLOCK_OUT)\nnum_ops = in_channels * out_channels * batch_size * 2\n\n# Reduction axes\nic = te.reduce_axis((0, in_channels // env.BLOCK_IN), name=\"ic\")\nic_tns = te.reduce_axis((0, env.BLOCK_IN), name=\"ic_tns\")\n\n# Input placeholder tensors\ndata = te.placeholder(data_shape, name=\"data\", dtype=env.inp_dtype)\nweight = te.placeholder(weight_shape, name=\"weight\", dtype=env.wgt_dtype)\n\n# Copy buffers\ndata_buf = te.compute(data_shape, lambda *i: data(*i), \"data_buf\")\nweight_buf = te.compute(weight_shape, lambda *i: weight(*i), \"weight_buf\")\n\n# Declare matrix multiply computation\nres_gemm = te.compute(\n output_shape,\n lambda bo, co, bi, ci: te.sum(\n data_buf[bo, ic, bi, ic_tns].astype(env.acc_dtype)\n * weight_buf[co, ic, ci, ic_tns].astype(env.acc_dtype),\n axis=[ic, ic_tns],\n ),\n name=\"res_gem\",\n)\n\n# Add shift stage for fix-point normalization\nres_shr = te.compute(output_shape, lambda *i: res_gemm(*i) >> env.INP_WIDTH, name=\"res_shr\")\n\n# Apply clipping between (0, input max value)\ninp_max = (1 << (env.INP_WIDTH - 1)) - 1\nres_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), \"res_max\")\nres_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), \"res_min\")\n\n# Apply typecast to input data type before sending results back\nres = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name=\"res\")",
"_____no_output_____"
]
],
[
[
"## ่ฐๅบฆ่ฎก็ฎ\n\nๆฅ็ไธ็ปๅฟ
่ฆ็่ฐๅบฆๅๆข๏ผไปฅๆๆ็ๆนๅผๅฐ็ฉ้ตไนๆณๆ ๅฐๅฐ VTAใ่ฟไบๅ
ๆฌ๏ผ\n\n- ๅๅ่ฎก็ฎ๏ผComputation blocking๏ผ\n- Lowering ๅฐ VTA ็กฌไปถ intrinsics",
"_____no_output_____"
]
],
[
[
"# Create TVM schedule\ns = te.create_schedule(res.op)\n# Let's look at the default TVM schedule\nprint(tvm.lower(s, [data, weight, res], simple_mode=True))",
"@main = primfn(data_1: handle, weight_1: handle, res_1: handle) -> ()\n attr = {\"from_legacy_te_schedule\": True, \"global_symbol\": \"main\", \"tir.noalias\": True}\n buffers = {data: Buffer(data_2: Pointer(int8), int8, [1024], []),\n weight: Buffer(weight_2: Pointer(int8), int8, [1048576], []),\n res: Buffer(res_2: Pointer(int8), int8, [1024], [])}\n buffer_map = {data_1: data, weight_1: weight, res_1: res}\n preflattened_buffer_map = {data_1: data_3: Buffer(data_2, int8, [1, 64, 1, 16], []), weight_1: weight_3: Buffer(weight_2, int8, [64, 64, 16, 16], []), res_1: res_3: Buffer(res_2, int8, [1, 64, 1, 16], [])} {\n allocate(data_buf: Pointer(global int8), int8, [1024]), storage_scope = global;\n allocate(weight_buf: Pointer(global int8), int8, [1048576]), storage_scope = global;\n allocate(res_gem: Pointer(global int32), int32, [1024]), storage_scope = global {\n for (i1: int32, 0, 64) {\n for (i3: int32, 0, 16) {\n let cse_var_1: int32 = ((i1*16) + i3)\n data_buf_1: Buffer(data_buf, int8, [1024], [])[cse_var_1] = data[cse_var_1]\n }\n }\n for (i0: int32, 0, 64) {\n for (i1_1: int32, 0, 64) {\n for (i2: int32, 0, 16) {\n for (i3_1: int32, 0, 16) {\n let cse_var_2: int32 = ((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)\n weight_buf_1: Buffer(weight_buf, int8, [1048576], [])[cse_var_2] = weight[cse_var_2]\n }\n }\n }\n }\n for (co: int32, 0, 64) {\n for (ci: int32, 0, 16) {\n res_gem_1: Buffer(res_gem, int32, [1024], [])[((co*16) + ci)] = 0\n for (ic: int32, 0, 64) {\n for (ic_tns: int32, 0, 16) {\n let cse_var_3: int32 = ((co*16) + ci)\n res_gem_1[cse_var_3] = (res_gem_1[cse_var_3] + (cast(int32, data_buf_1[((ic*16) + ic_tns)])*cast(int32, weight_buf_1[((((co*16384) + (ic*256)) + (ci*16)) + ic_tns)])))\n }\n }\n }\n }\n for (i1_2: int32, 0, 64) {\n for (i3_2: int32, 0, 16) {\n let cse_var_4: int32 = ((i1_2*16) + i3_2)\n res_gem_2: Buffer(res_gem, int32, [1024], [])[cse_var_4] = @tir.shift_right(res_gem_1[cse_var_4], 8, dtype=int32)\n }\n }\n for (i1_3: int32, 0, 64) {\n for (i3_3: int32, 0, 16) {\n let cse_var_5: int32 = ((i1_3*16) + i3_3)\n res_gem_3: Buffer(res_gem, int32, [1024], [])[cse_var_5] = max(res_gem_2[cse_var_5], 0)\n }\n }\n for (i1_4: int32, 0, 64) {\n for (i3_4: int32, 0, 16) {\n let cse_var_6: int32 = ((i1_4*16) + i3_4)\n res_gem_4: Buffer(res_gem, int32, [1024], [])[cse_var_6] = min(res_gem_3[cse_var_6], 127)\n }\n }\n for (i1_5: int32, 0, 64) {\n for (i3_5: int32, 0, 16) {\n let cse_var_7: int32 = ((i1_5*16) + i3_5)\n res[cse_var_7] = cast(int8, res_gem_4[cse_var_7])\n }\n }\n }\n}\n\n\n"
]
],
[
[
"## ๅๅ่ฎก็ฎ\n\nๅจ้ป่ฎคๆ
ๅตไธ๏ผ็ฉ้ตไนๆณๅฏนไบๆฟๆดปๆๆ้ๆฅ่ฏดๅคชๅคงไบ๏ผๆ ๆณไธๆฌกๆง้ๅบ VTA ็ on-chip bufferใๅฐ (1, 1024)ร(1024, 1024) ็ฉ้ตไนๆณๅๆๆดๅฐ็ (1, 256) ร (256, 256) ็ฉ้ตไนๆณ๏ผ่ฟๆ ทไธญ้ดๅผ ้ๅฐฑๅฏไปฅ่ฃ
่ฟๅ ้ๅจ็ on-chip SRAM ไธญใ่ฟ็งๆนๆณ็ฑปไผผไบๅฐๅๅๆๆฏๅบ็จไบ CPU ๅ GPU๏ผไปฅๆ้ซ็ผๅญๅฝไธญ็๏ผcache hit rate๏ผใ\n\nๆฒฟ็ๆฏไธช่ฝดๆง่กๅๅ๏ผbatch ่ฝดไธๅๅฝฑๅ๏ผๅ ไธบๆญฃๅจๆง่กๅ batch ๆจ็๏ผใไนไฟๆๆๅ
ไพง็ tensorization ่ฝดไธๅ๏ผไปฅไพฟ TVM ่ฝๅค่ฟ่กๆจกๅผๅน้
็ tensorizationใๅจไธ้ข็ๅพ่กจไธญๅฑ็คบไบๅๅๅจ่ฎก็ฎ่ฐๅบฆไธ็็ปๆ๏ผ\n\n```{image} images/blocking.png\n:align: center\n:width: 480px\n```\n\n````{admonition} ๅพช็ฏๅๅฒ๏ผsplitting๏ผๅ้ๆฐๆๅบ๏ผreordering๏ผๅ็ไปฃ็ ็ญไปทไบไธ้ข็ไผชไปฃ็ ใๅฟฝ็ฅ batch ่ฝด๏ผๅ ไธบๅจ่ฟไธชไพๅญไธญๅชๆง่กๅ batch ๆจๆญ๏ผ\n:class: alert alert-info\n```c\nfor (int oc_out = 0; oc_out < 4; ++oc_out) {\n // Initialization loop\n for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {\n for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {\n int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;\n C[0][j] = 0;\n }\n }\n for (int ic_out = 0; ic_out < 4; ++ic_out) {\n // Block loop\n for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {\n for (int ic_inn = 0; ic_inn < 16; ++ic_inn) {\n // Tensorization loop\n for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {\n for (int ic_tns = 0; ic_tns < 16; ++ic_tns) {\n int i = (ic_out * 16 + ic_inn) * 16 + ic_tns;\n int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;\n C[0][i] = C[0][i] + A[0][i] * B[j][i];\n }\n }\n }\n }\n }\n }\n}\n```\n````",
"_____no_output_____"
]
],
[
[
"# Let's define tiling sizes (expressed in multiples of VTA tensor shape size)\nb_block = 1 // env.BATCH\ni_block = 256 // env.BLOCK_IN\no_block = 256 // env.BLOCK_OUT\n\n# Tile the output tensor along the batch and output channel dimensions\n# (since by default we are doing single batch inference, the split along\n# the batch dimension has no effect)\nb, oc, b_tns, oc_tns = s[res].op.axis\nb_out, b_inn = s[res].split(b, b_block)\noc_out, oc_inn = s[res].split(oc, o_block)\ns[res].reorder(b_out, oc_out, b_inn, oc_inn)\n\n# Move intermediate computation into each output compute tile\ns[res_gemm].compute_at(s[res], oc_out)\ns[res_shr].compute_at(s[res], oc_out)\ns[res_max].compute_at(s[res], oc_out)\ns[res_min].compute_at(s[res], oc_out)\n\n# Apply additional loop split along reduction axis (input channel)\nb_inn, oc_inn, b_tns, oc_tns = s[res_gemm].op.axis\nic_out, ic_inn = s[res_gemm].split(ic, i_block)\n\n# Reorder axes. We move the ic_out axis all the way out of the GEMM\n# loop to block along the reduction axis\ns[res_gemm].reorder(ic_out, b_inn, oc_inn, ic_inn, b_tns, oc_tns, ic_tns)\n\n# Let's look at the current TVM schedule after blocking\nprint(tvm.lower(s, [data, weight, res], simple_mode=True))",
"@main = primfn(data_1: handle, weight_1: handle, res_1: handle) -> ()\n attr = {\"from_legacy_te_schedule\": True, \"global_symbol\": \"main\", \"tir.noalias\": True}\n buffers = {data: Buffer(data_2: Pointer(int8), int8, [1024], []),\n weight: Buffer(weight_2: Pointer(int8), int8, [1048576], []),\n res: Buffer(res_2: Pointer(int8), int8, [1024], [])}\n buffer_map = {data_1: data, weight_1: weight, res_1: res}\n preflattened_buffer_map = {data_1: data_3: Buffer(data_2, int8, [1, 64, 1, 16], []), weight_1: weight_3: Buffer(weight_2, int8, [64, 64, 16, 16], []), res_1: res_3: Buffer(res_2, int8, [1, 64, 1, 16], [])} {\n allocate(data_buf: Pointer(global int8), int8, [1024]), storage_scope = global;\n allocate(weight_buf: Pointer(global int8), int8, [1048576]), storage_scope = global;\n allocate(res_gem: Pointer(global int32), int32, [256]), storage_scope = global {\n for (i1: int32, 0, 64) {\n for (i3: int32, 0, 16) {\n let cse_var_1: int32 = ((i1*16) + i3)\n data_buf_1: Buffer(data_buf, int8, [1024], [])[cse_var_1] = data[cse_var_1]\n }\n }\n for (i0: int32, 0, 64) {\n for (i1_1: int32, 0, 64) {\n for (i2: int32, 0, 16) {\n for (i3_1: int32, 0, 16) {\n let cse_var_2: int32 = ((((i0*16384) + (i1_1*256)) + (i2*16)) + i3_1)\n weight_buf_1: Buffer(weight_buf, int8, [1048576], [])[cse_var_2] = weight[cse_var_2]\n }\n }\n }\n }\n for (i1.outer: int32, 0, 4) {\n for (co.init: int32, 0, 16) {\n for (ci.init: int32, 0, 16) {\n res_gem_1: Buffer(res_gem, int32, [256], [])[((co.init*16) + ci.init)] = 0\n }\n }\n for (ic.outer: int32, 0, 4) {\n for (co: int32, 0, 16) {\n for (ic.inner: int32, 0, 16) {\n for (ci: int32, 0, 16) {\n for (ic_tns: int32, 0, 16) {\n let cse_var_3: int32 = ((co*16) + ci)\n res_gem_1[cse_var_3] = (res_gem_1[cse_var_3] + (cast(int32, data_buf_1[(((ic.outer*256) + (ic.inner*16)) + ic_tns)])*cast(int32, weight_buf_1[((((((i1.outer*262144) + (co*16384)) + (ic.outer*4096)) + (ic.inner*256)) + (ci*16)) + ic_tns)])))\n }\n }\n }\n }\n }\n for (i1_2: int32, 0, 16) {\n for (i3_2: int32, 0, 16) {\n let cse_var_4: int32 = ((i1_2*16) + i3_2)\n res_gem_2: Buffer(res_gem, int32, [256], [])[cse_var_4] = @tir.shift_right(res_gem_1[cse_var_4], 8, dtype=int32)\n }\n }\n for (i1_3: int32, 0, 16) {\n for (i3_3: int32, 0, 16) {\n let cse_var_5: int32 = ((i1_3*16) + i3_3)\n res_gem_3: Buffer(res_gem, int32, [256], [])[cse_var_5] = max(res_gem_2[cse_var_5], 0)\n }\n }\n for (i1_4: int32, 0, 16) {\n for (i3_4: int32, 0, 16) {\n let cse_var_6: int32 = ((i1_4*16) + i3_4)\n res_gem_4: Buffer(res_gem, int32, [256], [])[cse_var_6] = min(res_gem_3[cse_var_6], 127)\n }\n }\n for (i1.inner: int32, 0, 16) {\n for (i3_5: int32, 0, 16) {\n let cse_var_7: int32 = (i1.inner*16)\n res[(((i1.outer*256) + cse_var_7) + i3_5)] = cast(int8, res_gem_4[(cse_var_7 + i3_5)])\n }\n }\n }\n }\n}\n\n\n"
]
],
[
[
"### lowering ๅคๅถๅฐ DMA ไผ ่พ\n\nๆฅไธๆฅ๏ผๅฐ buffer ไฝ็จๅ่ฎพ็ฝฎไธบ็ธๅบ็ on-chip VTA SRAM bufferใๅฐ load ๅพช็ฏ็งปๅจๅฐ็ฉ้ตไนๆณ่ฎก็ฎๅพช็ฏไธญ๏ผไปฅไฝฟๅฎไปฌ้ๅไบ on-chip SRAM bufferใๆๅ๏ผ็จ DMA ๅคๅถๅฎ็จ็จๅบๅฏน load/store ๅพช็ฏๅค่ฝด่ฟ่กๆณจ่งฃ๏ผไปฅๅจ VTA ไธๆง่กๆน้ๅ
ๅญไผ ่พใ",
"_____no_output_____"
]
],
[
[
"# Set scope of SRAM buffers\ns[data_buf].set_scope(env.inp_scope)\ns[weight_buf].set_scope(env.wgt_scope)\ns[res_gemm].set_scope(env.acc_scope)\ns[res_shr].set_scope(env.acc_scope)\ns[res_min].set_scope(env.acc_scope)\ns[res_max].set_scope(env.acc_scope)\n\n# Block data and weight cache reads\ns[data_buf].compute_at(s[res_gemm], ic_out)\ns[weight_buf].compute_at(s[res_gemm], ic_out)\n\n# Use DMA copy pragma on DRAM->SRAM operations\ns[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)\ns[weight_buf].pragma(s[weight_buf].op.axis[0], env.dma_copy)\n\n# Use DMA copy pragma on SRAM->DRAM operation\n# (this implies that these copies should be performed along b_inn,\n# or result axis 2)\ns[res].pragma(s[res].op.axis[2], env.dma_copy)",
"_____no_output_____"
]
],
[
[
"### Lowering ่ฎก็ฎๅฐ VTA Compute Intrinsics\n\nๆๅ้ถๆฎตๆฏ้่ฟๅฐ็ฉ้ตไนๆณๆ ๅฐๅฐๅผ ้ intrinsics๏ผๅฐ shift ๆ ๅฐๅฐ็ข้ ALU๏ผไป่ๅฐ่ฎก็ฎๅพช็ฏ lowering ๅฐ VTA ็กฌไปถ intrinsicsใ",
"_____no_output_____"
]
],
[
[
"# Apply tensorization over the batch tensor tile axis\ns[res_gemm].tensorize(b_tns, env.gemm)\n\n# Add an ALU pragma over the shift and clipping operations\ns[res_shr].pragma(s[res_shr].op.axis[0], env.alu)\ns[res_min].pragma(s[res_min].op.axis[0], env.alu)\ns[res_max].pragma(s[res_max].op.axis[0], env.alu)\n\n# Let's look at the final lowered TVM schedule after lowering memory\n# loads/stores down to DMA copy intrinsics, and the computation down to\n# VTA compute intrinsics.\nprint(vta.lower(s, [data, weight, res], simple_mode=True))",
"@main = primfn(data_1: handle, weight_1: handle, res_1: handle) -> ()\n attr = {\"from_legacy_te_schedule\": True, \"global_symbol\": \"main\", \"tir.noalias\": True}\n buffers = {data: Buffer(data_2: Pointer(int8), int8, [1024], []),\n weight: Buffer(weight_2: Pointer(int8), int8, [1048576], []),\n res: Buffer(res_2: Pointer(int8), int8, [1024], [])}\n buffer_map = {data_1: data, weight_1: weight, res_1: res}\n preflattened_buffer_map = {data_1: data_3: Buffer(data_2, int8, [1, 64, 1, 16], []), weight_1: weight_3: Buffer(weight_2, int8, [64, 64, 16, 16], []), res_1: res_3: Buffer(res_2, int8, [1, 64, 1, 16], [])} {\n @tir.vta.coproc_dep_push(3, 2, dtype=int32)\n for (i1.outer: int32, 0, 4) {\n attr [IterVar(vta: int32, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_scope\" = 2 {\n @tir.vta.coproc_dep_pop(3, 2, dtype=int32)\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_uop_scope\" = \"VTAPushGEMMOp\" {\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 1, 0, 0, dtype=int32)\n @tir.vta.uop_push(0, 1, 0, 0, 0, 0, 0, 0, dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n }\n @tir.vta.coproc_dep_push(2, 1, dtype=int32)\n }\n for (ic.outer: int32, 0, 4) {\n let cse_var_1: int32 = (ic.outer*16)\n {\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_scope\" = 1 {\n @tir.vta.coproc_dep_pop(2, 1, dtype=int32)\n @tir.call_extern(\"VTALoadBuffer2D\", @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), data_2, cse_var_1, 16, 1, 16, 0, 0, 0, 0, 0, 2, dtype=int32)\n @tir.call_extern(\"VTALoadBuffer2D\", @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), weight_2, ((i1.outer*1024) + cse_var_1), 16, 16, 64, 0, 0, 0, 0, 0, 1, dtype=int32)\n @tir.vta.coproc_dep_push(1, 2, dtype=int32)\n }\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_scope\" = 2 {\n @tir.vta.coproc_dep_pop(1, 2, dtype=int32)\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_uop_scope\" = \"VTAPushGEMMOp\" {\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 1, 0, 16, dtype=int32)\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 0, 1, 1, dtype=int32)\n @tir.vta.uop_push(0, 0, 0, 0, 0, 0, 0, 0, dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n }\n @tir.vta.coproc_dep_push(2, 1, dtype=int32)\n }\n }\n }\n @tir.vta.coproc_dep_pop(2, 1, dtype=int32)\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_scope\" = 2 {\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_uop_scope\" = \"VTAPushALUOp\" {\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 1, 1, 0, dtype=int32)\n @tir.vta.uop_push(1, 0, 0, 0, 0, 3, 1, 8, dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n }\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_uop_scope\" = \"VTAPushALUOp\" {\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 1, 1, 0, dtype=int32)\n @tir.vta.uop_push(1, 0, 0, 0, 0, 1, 1, 0, dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n }\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_uop_scope\" = \"VTAPushALUOp\" {\n @tir.call_extern(\"VTAUopLoopBegin\", 16, 1, 1, 0, dtype=int32)\n @tir.vta.uop_push(1, 0, 0, 0, 0, 0, 1, 127, dtype=int32)\n @tir.call_extern(\"VTAUopLoopEnd\", dtype=int32)\n }\n @tir.vta.coproc_dep_push(2, 3, dtype=int32)\n }\n attr [IterVar(vta, (nullptr), \"ThreadIndex\", \"vta\")] \"coproc_scope\" = 3 {\n @tir.vta.coproc_dep_pop(2, 3, dtype=int32)\n for (i1.inner: int32, 0, 16) {\n @tir.call_extern(\"VTAStoreBuffer2D\", @tir.tvm_thread_context(@tir.vta.command_handle(, dtype=handle), dtype=handle), i1.inner, 4, res_2, ((i1.outer*16) + i1.inner), 1, 1, 1, dtype=int32)\n }\n @tir.vta.coproc_dep_push(3, 2, dtype=int32)\n }\n }\n @tir.vta.coproc_sync(, dtype=int32)\n @tir.vta.coproc_dep_pop(3, 2, dtype=int32)\n}\n\n\n"
]
],
[
[
"## TVM ่ฎก็ฎๅ้ช่ฏ\n\nๅจๆๅฎ่ฐๅบฆไนๅ๏ผๅฏไปฅๅฐๅ
ถ็ผ่ฏไธบ TVM ๅฝๆฐใไฟๅญๆจกๅ๏ผ่ฟๆ ทๅฐฑๅฏไปฅ้่ฟ RPC ๅ้ๅฎใ่ฟ่ก่ฏฅๅฝๆฐๅนถๅฏน numpy ๅฎ็ฐ่ฟ่ก้ช่ฏ๏ผไปฅ็กฎไฟๅ
ถๆญฃ็กฎๆงใ",
"_____no_output_____"
]
],
[
[
"# Compile the TVM module\nmy_gemm = vta.build(\n s, [data, weight, res], tvm.target.Target(\"ext_dev\", host=env.target_host), name=\"my_gemm\"\n)\ntemp = utils.tempdir()\nmy_gemm.save(temp.relpath(\"gemm.o\"))\nremote.upload(temp.relpath(\"gemm.o\"))\nf = remote.load_module(\"gemm.o\")\n\n# Get the remote device context\nctx = remote.ext_dev(0)\n\n# Initialize the data and weight arrays randomly in the int range of (-128, 128]\ndata_np = np.random.randint(-128, 128, size=(batch_size, in_channels)).astype(data.dtype)\nweight_np = np.random.randint(-128, 128, size=(out_channels, in_channels)).astype(weight.dtype)\n\n# Apply packing to the data and weight arrays from a 2D to a 4D packed layout\ndata_packed = data_np.reshape(\n batch_size // env.BATCH, env.BATCH, in_channels // env.BLOCK_IN, env.BLOCK_IN\n).transpose((0, 2, 1, 3))\nweight_packed = weight_np.reshape(\n out_channels // env.BLOCK_OUT, env.BLOCK_OUT, in_channels // env.BLOCK_IN, env.BLOCK_IN\n).transpose((0, 2, 1, 3))\n\n# Format the input/output arrays with tvm.nd.array to the DLPack standard\ndata_nd = tvm.nd.array(data_packed, ctx)\nweight_nd = tvm.nd.array(weight_packed, ctx)\nres_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)\n\n# Clear stats\nif env.TARGET in [\"sim\", \"tsim\"]:\n simulator.clear_stats()\n\n# Invoke the module to perform the computation\nf(data_nd, weight_nd, res_nd)\n\n# Verify against numpy implementation\nres_ref = np.dot(data_np.astype(env.acc_dtype), weight_np.T.astype(env.acc_dtype))\nres_ref = res_ref >> env.INP_WIDTH\nres_ref = np.clip(res_ref, 0, inp_max)\nres_ref = res_ref.astype(res.dtype)\nres_ref = res_ref.reshape(\n batch_size // env.BATCH, env.BATCH, out_channels // env.BLOCK_OUT, env.BLOCK_OUT\n).transpose((0, 2, 1, 3))\nnp.testing.assert_equal(res_ref, res_nd.numpy())\n\n# Print stats\nif env.TARGET in [\"sim\", \"tsim\"]:\n sim_stats = simulator.stats()\n print(\"Execution statistics:\")\n for k, v in sim_stats.items():\n print(\"\\t{:<16}: {:>16}\".format(k, v))\n\nprint(\"Successful blocked matrix multiply test!\")",
"[21:37:07] /media/pc/data/4tb/lxw/books/tvm/src/tir/transforms/arg_binder.cc:95: Warning: Trying to bind buffer to another one with lower alignment requirement required_alignment=256, provided_alignment=128\n"
]
],
[
[
"## ๅฐ็ป\n\nๆฌๆ็จๆผ็คบไบ TVM ่ฐๅบฆๅ่ฏญๅฆไฝไธบ็ฉ้ตไนๆณ็คบไพๅฎ็ฐๅๅ่ฎก็ฎใ่ฟๅ
่ฎธๅฐไปปๆๅคง็่ฎก็ฎๆ ๅฐๅฐๆ้็็กฌไปถๅ ้ๅจ่ตๆบไธใ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
eca78ae7c93511dba6f3d7d0d6d81fb2f4a02e0e | 4,150 | ipynb | Jupyter Notebook | zad1.ipynb | StPluto/CP7 | 365a8dc275f487c78f79d164d520fad0100fccaa | [
"MIT"
] | null | null | null | zad1.ipynb | StPluto/CP7 | 365a8dc275f487c78f79d164d520fad0100fccaa | [
"MIT"
] | null | null | null | zad1.ipynb | StPluto/CP7 | 365a8dc275f487c78f79d164d520fad0100fccaa | [
"MIT"
] | null | null | null | 20.44335 | 182 | 0.443614 | [
[
[
"<p>Cะพะทะดะฐะนัะต ะพะฑัะตะบั Series, ัะฒัะทะฐะฒ ะตะณะพ ั ะฟะตัะตะผะตะฝะฝะพะน school , ะธ ะฝะฐะฟะพะปะฝะธัะต ะดะฐะฝะฝัะผะธ, ะบะพัะพััะต ะฑั ะพััะฐะถะฐะปะธ ะบะพะปะธัะตััะฒะพ ััะฐัะธั
ัั ะฒ ัะฐะทะฝัั
ะบะปะฐััะฐั
(1ะฐ, 1ะฑ, 2ะฑ, 6ะฐ, 7ะฒ ะธ ั. ะฟ.).</p> \n<p>ะะฝะตัะธัะต ะธะทะผะตะฝะตะฝะธั ะฒ ะพะฑัะตะบั Series ัะพะณะปะฐัะฝะพ ัะปะตะดัััะตะผั:</p> \n<ol>\n <li>ะฒ ะพะดะฝะพะผ ะธะท ะบะปะฐััะพะฒ ะธะทะผะตะฝะธะปะพัั ะบะพะปะธัะตััะฒะพ ััะฐัะธั
ัั, \n <li>ะฒ ัะบะพะปะต ะฟะพัะฒะธะปัั ะฝะพะฒัะน ะบะปะฐัั, \n <li>ะฒ ัะบะพะปะต ะฑัะป ัะฐััะพัะผะธัะพะฒะฐะฝ (ัะดะฐะปะตะฝ) ะดััะณะพะน ะบะปะฐัั.\n</ol>\n<p>ะััะธัะปะธัะต ะพะฑัะตะต ะบะพะปะธัะตััะฒะพ ััะฐัะธั
ัั ะฒ ัะบะพะปะต.</p>\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"school = {'1ะฐ':19, '1ะฑ':18, '2ะฑ':21,'6ะฐ':16, '7ะฒ':15, '11':6}\nschool_ser = pd.Series(school)\nschool_ser.index.name = 'classes'\nprint(school_ser)",
"classes\n1ะฐ 19\n1ะฑ 18\n2ะฑ 21\n6ะฐ 16\n7ะฒ 15\n11 6\ndtype: int64\n"
]
],
[
[
"<p><b>1.</b> ะ ะบะปะฐัั 2ะฑ ะดะพะฑะฐะฒะธะปะธ ััะตั
ััะตะฝะธะบะพะฒ</p>",
"_____no_output_____"
]
],
[
[
"school_ser['2ะฑ'] += 3\nprint(school_ser)",
"classes\n1ะฐ 19\n1ะฑ 18\n2ะฑ 24\n6ะฐ 16\n7ะฒ 15\n11 6\ndtype: int64\n"
]
],
[
[
"<p><b>2.</b> ะ ัะบะพะปะต ะฟะพัะฒะธะปัั ะฝะพะฒัะน 8ะฐ ะบะปะฐัั</p>",
"_____no_output_____"
]
],
[
[
"school_ser = school_ser.append(pd.Series({'8ะฐ':17}))\nprint(school_ser)",
"1ะฐ 19\n1ะฑ 18\n2ะฑ 24\n6ะฐ 16\n7ะฒ 15\n11 6\n8ะฐ 17\ndtype: int64\n"
]
],
[
[
"<p><b>3.</b> ะ ัะบะพะปะต ะฑัะป ัะฐััะพัะผะธัะพะฒะฐะฝ 11 ะบะปะฐัั, ัะฐะบ ะบะฐะบ ะฑัะปะพ ะผะฐะปะพ ะปัะดะตะน</p>",
"_____no_output_____"
]
],
[
[
"del school_ser['11']\nprint(school_ser)",
"1ะฐ 19\n1ะฑ 18\n2ะฑ 24\n6ะฐ 16\n7ะฒ 15\n8ะฐ 17\ndtype: int64\n"
]
],
[
[
"ะะฑัะตะต ะบะพะปะธัะตััะฒะพ ััะฐัะธั
ัั ะฒ ัะบะพะปะต",
"_____no_output_____"
]
],
[
[
"print(school_ser.sum())",
"109\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca79e4fe3db118260818e1132189e39c36f71f9 | 553,259 | ipynb | Jupyter Notebook | Neural Networks/Logistic_Regression/Logistic_Regression_from_Scratch_for_cats_vs_non-cats.ipynb | abraarsyed/Deep-Learning-Training | 853dd4fa5d03f5b9e3a36e483f19b9ae037d64d9 | [
"Apache-2.0"
] | 3 | 2019-12-08T15:15:08.000Z | 2020-06-09T02:04:42.000Z | Neural Networks/Logistic_Regression/Logistic_Regression_from_Scratch_for_cats_vs_non-cats.ipynb | abraarsyed/Deep-Learning-Training | 853dd4fa5d03f5b9e3a36e483f19b9ae037d64d9 | [
"Apache-2.0"
] | 16 | 2018-06-13T05:39:53.000Z | 2018-07-03T03:05:13.000Z | Neural Networks/Logistic_Regression/Logistic_Regression_from_Scratch_for_cats_vs_non-cats.ipynb | abraarsyed/Deep-Learning-Training | 853dd4fa5d03f5b9e3a36e483f19b9ae037d64d9 | [
"Apache-2.0"
] | 6 | 2018-06-14T09:22:44.000Z | 2018-08-20T08:36:12.000Z | 66.005607 | 178,420 | 0.642397 | [
[
[
"[View in Colaboratory](https://colab.research.google.com/github/hackintoshrao/Deep-Learning-Training/blob/master/Logistic+Regression+with+a+Neural+Network+mindset+v4.ipynb)",
"_____no_output_____"
]
],
[
[
"!wget https://play.minio.io:9000/rao/train_catvnoncat.h5 -P /tmp/\n!wget https://play.minio.io:9000/rao/test_catvnoncat.h5 -P /tmp/\n!wget https://play.minio.io:9000/rao/my_image.jpg -P /tmp/\n!wget https://play.minio.io:9000/rao/my_image2.jpg -P /tmp/\n",
"--2018-06-24 17:20:12-- https://play.minio.io:9000/rao/train_catvnoncat.h5\nResolving play.minio.io (play.minio.io)... 147.75.201.93\nConnecting to play.minio.io (play.minio.io)|147.75.201.93|:9000... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 2572022 (2.5M) [application/octet-stream]\nSaving to: โ/tmp/train_catvnoncat.h5โ\n\ntrain_catvnoncat.h5 100%[===================>] 2.45M 4.92MB/s in 0.5s \n\n2018-06-24 17:20:12 (4.92 MB/s) - โ/tmp/train_catvnoncat.h5โ saved [2572022/2572022]\n\n--2018-06-24 17:20:14-- https://play.minio.io:9000/rao/test_catvnoncat.h5\nResolving play.minio.io (play.minio.io)... 147.75.201.93\nConnecting to play.minio.io (play.minio.io)|147.75.201.93|:9000... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 616958 (602K) [application/octet-stream]\nSaving to: โ/tmp/test_catvnoncat.h5โ\n\ntest_catvnoncat.h5 100%[===================>] 602.50K 1.67MB/s in 0.4s \n\n2018-06-24 17:20:14 (1.67 MB/s) - โ/tmp/test_catvnoncat.h5โ saved [616958/616958]\n\n--2018-06-24 17:20:15-- https://play.minio.io:9000/rao/my_image.jpg\nResolving play.minio.io (play.minio.io)... 147.75.201.93\nConnecting to play.minio.io (play.minio.io)|147.75.201.93|:9000... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 636273 (621K) [image/jpeg]\nSaving to: โ/tmp/my_image.jpgโ\n\nmy_image.jpg 100%[===================>] 621.36K 1.73MB/s in 0.4s \n\n2018-06-24 17:20:16 (1.73 MB/s) - โ/tmp/my_image.jpgโ saved [636273/636273]\n\n--2018-06-24 17:20:17-- https://play.minio.io:9000/rao/my_image2.jpg\nResolving play.minio.io (play.minio.io)... 147.75.201.93\nConnecting to play.minio.io (play.minio.io)|147.75.201.93|:9000... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 94439 (92K) [image/jpeg]\nSaving to: โ/tmp/my_image2.jpgโ\n\nmy_image2.jpg 100%[===================>] 92.23K --.-KB/s in 0.1s \n\n2018-06-24 17:20:18 (644 KB/s) - โ/tmp/my_image2.jpgโ saved [94439/94439]\n\n"
]
],
[
[
"# Logistic Regression with a Neural Network mindset\n\nWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.\n\n**Instructions:**\n- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.\n\n**You will learn to:**\n- Build the general architecture of a learning algorithm, including:\n - Initializing parameters\n - Calculating the cost function and its gradient\n - Using an optimization algorithm (gradient descent) \n- Gather all three functions above into a main model function, in the right order.",
"_____no_output_____"
],
[
"## 1 - Packages ##\n\nFirst, let's run the cell below to import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.\n- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.\n- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport h5py\n\n \ndef load_dataset():\n train_dataset = h5py.File('/tmp/train_catvnoncat.h5', \"r\")\n print(\"Train dataset h5 type: \", type(train_dataset))\n print(\"train dataset extract type: \",type(train_dataset[\"train_set_x\"]))\n print(\": Extract type: \", type(train_dataset[\"train_set_x\"][:]))\n \n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n print(\"train dataset X shape: \",train_set_x_orig.shape)\n print(\"train dataset Y shape: \",train_set_y_orig.shape)\n \n test_dataset = h5py.File('/tmp/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n print(\"Test dataset X shape: \",test_set_x_orig.shape)\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n print(\"classes type: \", type(classes))\n print(classes)\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"_____no_output_____"
],
[
"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\n\n\n%matplotlib inline",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"## 2 - Overview of the Problem set ##\n\n**Problem Statement**: You are given a dataset (\"data.h5\") containing:\n - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\n - a test set of m_test images labeled as cat or non-cat\n - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).\n\nYou will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.\n\nLet's get more familiar with the dataset. Load the data by running the following code.",
"_____no_output_____"
]
],
[
[
"# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()",
"Train dataset h5 type: <class 'h5py._hl.files.File'>\ntrain dataset extract type: <class 'h5py._hl.dataset.Dataset'>\n: Extract type: <class 'numpy.ndarray'>\ntrain dataset X shape: (209, 64, 64, 3)\ntrain dataset Y shape: (209,)\nTest dataset X shape: (50, 64, 64, 3)\nclasses type: <class 'numpy.ndarray'>\n[b'non-cat' b'cat']\n"
]
],
[
[
"We added \"_orig\" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\n\nEach line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ",
"_____no_output_____"
]
],
[
[
"# Example of a picture\nindex = 25\nprint(test_set_x_orig.shape)\nprint(train_set_y.shape)\nprint(np.squeeze(train_set_y[:, index]))\nprint(type(train_set_y[:, index]))\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")",
"(50, 64, 64, 3)\n(1, 209)\n1\n<class 'numpy.ndarray'>\ny = [1], it's a 'cat' picture.\n"
]
],
[
[
"Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. \n\n**Exercise:** Find the values for:\n - m_train (number of training examples)\n - m_test (number of test examples)\n - num_px (= height = width of a training image)\nRemember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.",
"_____no_output_____"
]
],
[
[
"### START CODE HERE ### (โ 3 lines of code)\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_x_orig.shape[0]\nnum_px = train_set_x_orig.shape[1]\n### END CODE HERE ###\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))",
"Number of training examples: m_train = 209\nNumber of testing examples: m_test = 50\nHeight/Width of each image: num_px = 64\nEach image is of size: (64, 64, 3)\ntrain_set_x shape: (209, 64, 64, 3)\ntrain_set_y shape: (1, 209)\ntest_set_x shape: (50, 64, 64, 3)\ntest_set_y shape: (1, 50)\n"
]
],
[
[
"**Expected Output for m_train, m_test and num_px**: \n<table style=\"width:15%\">\n <tr>\n <td>**m_train**</td>\n <td> 209 </td> \n </tr>\n \n <tr>\n <td>**m_test**</td>\n <td> 50 </td> \n </tr>\n \n <tr>\n <td>**num_px**</td>\n <td> 64 </td> \n </tr>\n \n</table>\n",
"_____no_output_____"
],
[
"For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.\n\n**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\\_px $*$ num\\_px $*$ 3, 1).\n\nA trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: \n```python\nX_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X\n```",
"_____no_output_____"
]
],
[
[
"# Reshape the training and test examples\n# https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.reshape.html\n### START CODE HERE ### (โ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T\n### END CODE HERE ###\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))",
"train_set_x_flatten shape: (12288, 209)\ntrain_set_y shape: (1, 209)\ntest_set_x_flatten shape: (12288, 50)\ntest_set_y shape: (1, 50)\nsanity check after reshaping: [17 31 56 22 33]\n"
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:35%\">\n <tr>\n <td>**train_set_x_flatten shape**</td>\n <td> (12288, 209)</td> \n </tr>\n <tr>\n <td>**train_set_y shape**</td>\n <td>(1, 209)</td> \n </tr>\n <tr>\n <td>**test_set_x_flatten shape**</td>\n <td>(12288, 50)</td> \n </tr>\n <tr>\n <td>**test_set_y shape**</td>\n <td>(1, 50)</td> \n </tr>\n <tr>\n <td>**sanity check after reshaping**</td>\n <td>[17 31 56 22 33]</td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\n\nOne common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\n\n<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> \n\nLet's standardize our dataset.",
"_____no_output_____"
]
],
[
[
"train_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.",
"_____no_output_____"
]
],
[
[
"<font color='blue'>\n**What you need to remember:**\n\nCommon steps for pre-processing a new dataset are:\n- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\n- Reshape the datasets such that each example is now a vector of size (num_px \\* num_px \\* 3, 1)\n- \"Standardize\" the data",
"_____no_output_____"
],
[
"## 3 - General Architecture of the learning algorithm ##\n\nIt's time to design a simple algorithm to distinguish cat images from non-cat images.\n\nYou will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**\n\n<img src=\"images/LogReg_kiank.png\" style=\"width:650px;height:400px;\">\n\n**Mathematical expression of the algorithm**:\n\nFor one example $x^{(i)}$:\n$$z^{(i)} = w^T x^{(i)} + b \\tag{1}$$\n$$\\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\\tag{2}$$ \n$$ \\mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \\log(a^{(i)}) - (1-y^{(i)} ) \\log(1-a^{(i)})\\tag{3}$$\n\nThe cost is then computed by summing over all training examples:\n$$ J = \\frac{1}{m} \\sum_{i=1}^m \\mathcal{L}(a^{(i)}, y^{(i)})\\tag{6}$$\n\n**Key steps**:\nIn this exercise, you will carry out the following steps: \n - Initialize the parameters of the model\n - Learn the parameters for the model by minimizing the cost \n - Use the learned parameters to make predictions (on the test set)\n - Analyse the results and conclude",
"_____no_output_____"
],
[
"## 4 - Building the parts of our algorithm ## \n\nThe main steps for building a Neural Network are:\n1. Define the model structure (such as number of input features) \n2. Initialize the model's parameters\n3. Loop:\n - Calculate current loss (forward propagation)\n - Calculate current gradient (backward propagation)\n - Update parameters (gradient descent)\n\nYou often build 1-3 separately and integrate them into one function we call `model()`.\n\n### 4.1 - Helper functions\n\n**Exercise**: Using your code from \"Python Basics\", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \\frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().",
"_____no_output_____"
]
],
[
[
"\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n ### START CODE HERE ### (โ 1 line of code)\n s = 1/(1+np.exp(-z))\n ### END CODE HERE ###\n \n return s",
"_____no_output_____"
],
[
"print (\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))",
"sigmoid([0, 2]) = [0.5 0.88079708]\n"
]
],
[
[
"**Expected Output**: \n\n<table>\n <tr>\n <td>**sigmoid([0, 2])**</td>\n <td> [ 0.5 0.88079708]</td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"### 4.2 - Initializing parameters\n\n**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.",
"_____no_output_____"
]
],
[
[
"\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n \n ### START CODE HERE ### (โ 1 line of code)\n w = np.zeros((dim,1))\n b = 0.0\n ### END CODE HERE ###\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b",
"_____no_output_____"
],
[
"dim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))",
"w = [[0.]\n [0.]]\nb = 0.0\n"
]
],
[
[
"**Expected Output**: \n\n\n<table style=\"width:15%\">\n <tr>\n <td> ** w ** </td>\n <td> [[ 0.]\n [ 0.]] </td>\n </tr>\n <tr>\n <td> ** b ** </td>\n <td> 0 </td>\n </tr>\n</table>\n\nFor image inputs, w will be of shape (num_px $\\times$ num_px $\\times$ 3, 1).",
"_____no_output_____"
],
[
"### 4.3 - Forward and Backward propagation\n\nNow that your parameters are initialized, you can do the \"forward\" and \"backward\" propagation steps for learning the parameters.\n\n**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.\n\n**Hints**:\n\nForward Propagation:\n- You get X\n- You compute $A = \\sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$\n- You calculate the cost function: $J = -\\frac{1}{m}\\sum_{i=1}^{m}y^{(i)}\\log(a^{(i)})+(1-y^{(i)})\\log(1-a^{(i)})$\n\nHere are the two formulas you will be using: \n\n$$ \\frac{\\partial J}{\\partial w} = \\frac{1}{m}X(A-Y)^T\\tag{7}$$\n$$ \\frac{\\partial J}{\\partial b} = \\frac{1}{m} \\sum_{i=1}^m (a^{(i)}-y^{(i)})\\tag{8}$$",
"_____no_output_____"
]
],
[
[
"\n\ndef propagate(w, b, X, Y):\n \"\"\"\n Implement the cost function and its gradient for the propagation explained above\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\n\n Return:\n cost -- negative log-likelihood cost for logistic regression\n dw -- gradient of the loss with respect to w, thus same shape as w\n db -- gradient of the loss with respect to b, thus same shape as b\n \n Tips:\n - Write your code step by step for the propagation. np.log(), np.dot()\n \"\"\"\n \n m = X.shape[1]\n # FORWARD PROPAGATION (FROM X TO COST)\n ### START CODE HERE ### (โ 2 lines of code)\n A = sigmoid(np.dot(w.T,X)+b) # compute activation\n cost = -1/m * np.sum(Y * np.log(A) + (1-Y) * (np.log(1-A))) # compute cost\n ### END CODE HERE ###\n print(\"Cost Shape: \", cost.shape)\n # BACKWARD PROPAGATION (TO FIND GRAD)\n ### START CODE HERE ### (โ 2 lines of code)\n dz= (1/m)*(A - Y)\n dw = np.dot(X,dz.T)\n db = np.sum(dz)\n ### END CODE HERE ###\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost",
"_____no_output_____"
],
[
"w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))",
"Cost Shape: ()\ndw = [[0.99845601]\n [2.39507239]]\ndb = 0.001455578136784208\ncost = 5.801545319394553\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:50%\">\n <tr>\n <td> ** dw ** </td>\n <td> [[ 0.99845601]\n [ 2.39507239]]</td>\n </tr>\n <tr>\n <td> ** db ** </td>\n <td> 0.00145557813678 </td>\n </tr>\n <tr>\n <td> ** cost ** </td>\n <td> 5.801545319394553 </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### d) Optimization\n- You have initialized your parameters.\n- You are also able to compute a cost function and its gradient.\n- Now, you want to update the parameters using gradient descent.\n\n**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\\theta$, the update rule is $ \\theta = \\theta - \\alpha \\text{ } d\\theta$, where $\\alpha$ is the learning rate.",
"_____no_output_____"
]
],
[
[
"\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \"\"\"\n This function optimizes w and b by running a gradient descent algorithm\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- True to print the loss every 100 steps\n \n Returns:\n params -- dictionary containing the weights w and bias b\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\n \n Tips:\n You basically need to write down two steps and iterate through them:\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\n 2) Update the parameters using gradient descent rule for w and b.\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (โ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (โ 2 lines of code)\n ### START CODE HERE ###\n w = w - (learning_rate*dw)\n b = b - (learning_rate*db)\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs",
"_____no_output_____"
],
[
"params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))",
"Cost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nw = [[0.19033591]\n [0.12259159]]\nb = 1.9253598300845747\ndw = [[0.67752042]\n [1.41625495]]\ndb = 0.21919450454067654\n"
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:40%\">\n <tr>\n <td> **w** </td>\n <td>[[ 0.19033591]\n [ 0.12259159]] </td>\n </tr>\n \n <tr>\n <td> **b** </td>\n <td> 1.92535983008 </td>\n </tr>\n <tr>\n <td> **dw** </td>\n <td> [[ 0.67752042]\n [ 1.41625495]] </td>\n </tr>\n <tr>\n <td> **db** </td>\n <td> 0.219194504541 </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There is two steps to computing predictions:\n\n1. Calculate $\\hat{Y} = A = \\sigma(w^T X + b)$\n\n2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ",
"_____no_output_____"
]
],
[
[
"\n\ndef predict(w, b, X):\n '''\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n \n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n '''\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n ### START CODE HERE ### (โ 1 line of code)\n A = sigmoid(np.dot(w.T,X)+ b)\n \n ### END CODE HERE ### \n for i in range(A.shape[1]):\n \n # Convert probabilities A[0,i] to actual predictions p[0,i]\n ### START CODE HERE ### (โ 4 lines of code)\n ''''\n x_exp = np.exp(A)\n print(x_exp)\n x_sum = np.sum(x_exp,axis=1,keepdims=True)\n print(x_sum)\n s = np.divide(x_exp,x_sum)\n '''\n \n Y_prediction = 1. * (A > 0.5)\n ### END CODE HERE ###\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction",
"_____no_output_____"
],
[
"w = np.array([[0.1124579],[0.23106775]])\nb = -0.3\nX = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])\nprint (\"predictions = \" + str(predict(w, b, X)))",
"predictions = [[1. 1. 0.]]\n"
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:30%\">\n <tr>\n <td>\n **predictions**\n </td>\n <td>\n [[ 1. 1. 0.]]\n </td> \n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"<font color='blue'>\n**What to remember:**\nYou've implemented several functions that:\n- Initialize (w,b)\n- Optimize the loss iteratively to learn parameters (w,b):\n - computing the cost and its gradient \n - updating the parameters using gradient descent\n- Use the learned (w,b) to predict the labels for a given set of examples",
"_____no_output_____"
],
[
"## 5 - Merge all functions into a model ##\n\nYou will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.\n\n**Exercise:** Implement the model function. Use the following notation:\n - Y_prediction for your predictions on the test set\n - Y_prediction_train for your predictions on the train set\n - w, costs, grads for the outputs of optimize()",
"_____no_output_____"
]
],
[
[
"\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n \"\"\"\n Builds the logistic regression model by calling the function you've implemented previously\n \n Arguments:\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\n print_cost -- Set to true to print the cost every 100 iterations\n \n Returns:\n d -- dictionary containing information about the model.\n \"\"\"\n \n ### START CODE HERE ###\n \n # initialize parameters with zeros (โ 1 line of code)\n w, b = initialize_with_zeros(X_train.shape[0])\n\n # Gradient descent (โ 1 line of code)\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples (โ 2 lines of code)\n Y_prediction_test = predict(w,b,X_test)\n Y_prediction_train = predict(w,b,X_train)\n\n ### END CODE HERE ###\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d",
"_____no_output_____"
]
],
[
[
"Run the following cell to train your model.",
"_____no_output_____"
]
],
[
[
"d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)",
"Cost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: "
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:40%\"> \n\n <tr>\n <td> **Cost after iteration 0 ** </td> \n <td> 0.693147 </td>\n </tr>\n <tr>\n <td> <center> $\\vdots$ </center> </td> \n <td> <center> $\\vdots$ </center> </td> \n </tr> \n <tr>\n <td> **Train Accuracy** </td> \n <td> 99.04306220095694 % </td>\n </tr>\n\n <tr>\n <td>**Test Accuracy** </td> \n <td> 70.0 % </td>\n </tr>\n</table> \n\n\n",
"_____no_output_____"
],
[
"**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!\n\nAlso, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.",
"_____no_output_____"
]
],
[
[
"# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + str(d[\"Y_prediction_test\"][0,index]) + \"\\\" picture.\")",
"y = 1, you predicted that it is a \"1.0\" picture.\n"
]
],
[
[
"Let's also plot the cost function and the gradients.",
"_____no_output_____"
]
],
[
[
"# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Interpretation**:\nYou can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. ",
"_____no_output_____"
],
[
"\n\nCongratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\\alpha$. ",
"_____no_output_____"
],
[
"#### Choice of learning rate ####\n\n**Reminder**:\nIn order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.\n\nLet's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ",
"_____no_output_____"
]
],
[
[
"learning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()",
"learning rate is: 0.01\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: ()\nCost Shape: "
]
],
[
[
"**Interpretation**: \n- Different learning rates give different costs and thus different predictions results.\n- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). \n- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.\n- In deep learning, we usually recommend that you: \n - Choose the learning rate that better minimizes the cost function.\n - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) \n",
"_____no_output_____"
],
[
"\n\nCongratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Change your image's name in the following code\n 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!",
"_____no_output_____"
]
],
[
[
"## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"/tmp/my_image2.jpg\" # change this to the name of your image file \n## END CODE HERE ##\n\n# We preprocess the image to fit your algorithm.\nfname = my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")",
"/usr/local/lib/python3.6/dist-packages/scipy/misc/pilutil.py:482: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.\n if issubdtype(ts, int):\n/usr/local/lib/python3.6/dist-packages/scipy/misc/pilutil.py:485: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n elif issubdtype(type(size), float):\n"
]
],
[
[
"<font color='blue'>\n**What to remember from this assignment:**\n1. Preprocessing the dataset is important.\n2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().\n3. Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big difference to the algorithm. You will see more examples of this later in this course!",
"_____no_output_____"
],
[
"Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:\n - Play with the learning rate and the number of iterations\n - Try different initialization methods and compare the results\n - Test other preprocessings (center the data, or divide each row by its standard deviation)",
"_____no_output_____"
],
[
"Bibliography:\n- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/\n- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
eca7a60c52a0a18ed7aa95942f9c5973abc7d832 | 30,781 | ipynb | Jupyter Notebook | research/etc/ds.ipynb | Oh-Donggyu/mrc-level2-nlp-01 | ffe9c555b5702404ff4ff90ccd60ac22c30f0579 | [
"MIT"
] | 1 | 2021-11-25T04:30:51.000Z | 2021-11-25T04:30:51.000Z | research/etc/ds.ipynb | Oh-Donggyu/mrc-level2-nlp-01 | ffe9c555b5702404ff4ff90ccd60ac22c30f0579 | [
"MIT"
] | null | null | null | research/etc/ds.ipynb | Oh-Donggyu/mrc-level2-nlp-01 | ffe9c555b5702404ff4ff90ccd60ac22c30f0579 | [
"MIT"
] | 5 | 2021-11-21T22:53:40.000Z | 2022-02-23T09:22:25.000Z | 38.47625 | 369 | 0.480101 | [
[
[
"from datasets import load_from_disk\n\nwiki_datasets = load_from_disk(\"/home/ubuntu/workspace/data/wiki_preprocessed_droped\")\ntrain_dataset = load_from_disk(\"/home/ubuntu/workspace/data/new_train_doc\")\nwiki_datasets.load_elasticsearch_index(\"text\", host=\"localhost\", port=\"9200\", es_index_name=\"wikipedia_contexts\")",
"_____no_output_____"
],
[
"train_dataset",
"_____no_output_____"
],
[
"import pandas as pd\n\ndata = train_dataset['train'].to_pandas()",
"_____no_output_____"
],
[
"answers = pd.read_csv('d.csv')\nanswers",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"\nfrom sklearn.model_selection import StratifiedKFold\n\ndef get_stratified_K_fold(dataset: pd.DataFrame, num):\n \"\"\"\n stratified_K_fold๋ฅผ ๊ตฌํํ์์ต๋๋ค.\n n_splits์ ๋ช๊ฐ๋ก ๋๋๊ฒ์ธ์ง๋ฅผ ์๋ฏธํฉ๋๋ค.\n num์ n_splits์ผ๋ก ๋๋ด์๋ ๋ช๋ฒ์งธ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์ฌ์ง๋ฅผ ์๋ฏธํฉ๋๋ค.\n shuffle์ ๋๋คํ๊ฒ ๋๋๋๋ค. ์ด๋๋ num์ ์ฌ์ฉํ์ง์๊ณ ๋ฌด์กฐ๊ฑด 1๋ฒ์งธ๊ฒ์ ๊ฐ์ ธ์ต๋๋ค.\n \"\"\"\n skf = StratifiedKFold(4)\n k_fold_data = list(skf.split(dataset, dataset[\"total\"]))\n train_index, valid_index = (\n k_fold_data[num][0],\n k_fold_data[num][1],\n )\n return dataset.iloc[train_index], dataset.iloc[valid_index]\n",
"_____no_output_____"
],
[
"_, result1 = get_stratified_K_fold(answers, 0)\n_, result2 = get_stratified_K_fold(answers, 1)\n_, result3 = get_stratified_K_fold(answers, 2)\n_, result4 = get_stratified_K_fold(answers, 3)",
"_____no_output_____"
],
[
"valid1_id = list(result1['id'])\nvalid2_id = list(result2['id'])\nvalid3_id = list(result3['id'])\nvalid4_id = list(result4['id'])",
"_____no_output_____"
],
[
"def split_train_valid(datasets, valid, valid_id):\n valid_dataset = datasets.filter(lambda x: x['id'] in valid_id)\n train_dataset = datasets.filter(lambda x: x['id'] not in valid_id)\n columns = ['context', 'question', 'id', 'title', 'document_id', 'answers']\n train_datadict = {}\n for col in columns:\n train_datadict[col] = []\n for data in train_dataset:\n for column in columns:\n train_datadict[column].append(data[column])\n for data in valid:\n for column in columns:\n train_datadict[column].append(data[column])\n return train_dataset, valid_dataset",
"_____no_output_____"
],
[
"result1 = split_train_valid(train_dataset['train'], train_dataset['validation'], valid1_id)\nresult2 = split_train_valid(train_dataset['train'], train_dataset['validation'], valid2_id)\nresult3 = split_train_valid(train_dataset['train'], train_dataset['validation'], valid3_id)\nresult4 = split_train_valid(train_dataset['train'], train_dataset['validation'], valid4_id)",
"Loading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-1d272fa7b1452e64.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-d3a2c29680fa3668.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-ad62ca5e359eb235.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-2e89f64faf8f2c44.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-355f3de0445e0323.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-78ed65fdd091fc56.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-18d85f4f62118564.arrow\nLoading cached processed dataset at /home/ubuntu/workspace/data/new_train_doc/train/cache-e192557a06c1bebe.arrow\n"
],
[
"result3",
"_____no_output_____"
],
[
"from datasets import DatasetDict\nresult1 = DatasetDict({\n 'train': result1[0],\n 'validation': result1[1]\n})\nresult2 = DatasetDict({\n 'train': result2[0],\n 'validation': result2[1]\n})\nresult3 = DatasetDict({\n 'train': result3[0],\n 'validation': result3[1]\n})\nresult4 = DatasetDict({\n 'train': result4[0],\n 'validation': result4[1]\n})",
"_____no_output_____"
],
[
"train_dataset.save_to_disk('new_train1')\nresult1.save_to_disk('new_train2')\nresult2.save_to_disk('new_train3')\nresult3.save_to_disk('new_train4')\nresult4.save_to_disk('new_train5')",
"_____no_output_____"
],
[
"import random\n\nhard_sample = []\n\ndef ds_data_function(data):\n query = data['question']\n negative_contexts = []\n _, retrieved_examples = wiki_datasets.get_nearest_examples(\"text\", query, k=100)\n if data['document_id'] not in retrieved_examples['document_id']:\n hard_sample.append([data, retrieved_examples])\n for index in range(50):\n if retrieved_examples['document_id'][index] == data['document_id']:\n continue\n negative_contexts.append(retrieved_examples['text'][index])\n if len(negative_contexts) == 9:\n break\n random.shuffle(negative_contexts)\n index = random.randint(0, 9)\n negative_contexts.insert(index, data['context'])\n answer_index = data['answers']['answer_start'][0]\n for negative_index in range(index):\n answer_index += len(negative_contexts[negative_index])\n data['answers']['answer_start'][0] = answer_index + index\n data['context'] = \" \".join(negative_contexts)\n return data",
"_____no_output_____"
],
[
"new_train_dataset = result[0].map(ds_data_function, num_proc=4)\nnew_valid_dataset = result[1].map(ds_data_function, num_proc=4)",
"/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n/home/ubuntu/workspace/mrc_venv/lib/python3.8/site-packages/elasticsearch/connection/base.py:209: ElasticsearchWarning: Elasticsearch built-in security features are not enabled. Without authentication, your cluster could be accessible to anyone. See https://www.elastic.co/guide/en/elasticsearch/reference/7.15/security-minimal-setup.html to enable security.\n warnings.warn(message, category=ElasticsearchWarning)\n"
],
[
"len(hard_sample)",
"_____no_output_____"
],
[
"search_error = new_train_dataset.filter(lambda example: example['context'][example['answers']['answer_start'][0]:example['answers']['answer_start'][0]+len(example['answers']['text'][0])] != example['answers']['text'][0])",
"_____no_output_____"
],
[
"search_error",
"_____no_output_____"
],
[
"from datasets import DatasetDict\n\ndata = {\n 'train': new_train_dataset,\n 'validation': train_dataset['validation']\n}\ntrain = DatasetDict(data)\ntrain.save_to_disk('new_ds_train_datasets5') # ์ ์ฅ์์น",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca7aaae41aaf37ed96f1f393b444df08d8a0e15 | 71,519 | ipynb | Jupyter Notebook | examples/connected_tanks.ipynb | OpenRTDynamics/openrtdynamics2 | 1b7a114110089bc7721da604c5e344854ed555c3 | [
"MIT"
] | null | null | null | examples/connected_tanks.ipynb | OpenRTDynamics/openrtdynamics2 | 1b7a114110089bc7721da604c5e344854ed555c3 | [
"MIT"
] | null | null | null | examples/connected_tanks.ipynb | OpenRTDynamics/openrtdynamics2 | 1b7a114110089bc7721da604c5e344854ed555c3 | [
"MIT"
] | null | null | null | 496.659722 | 67,612 | 0.943861 | [
[
[
"import math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport openrtdynamics2.lang as dy\nfrom openrtdynamics2.ORTDtoNumpy import ORTDtoNumpy",
"_____no_output_____"
]
],
[
[
"# Connected tanks\n\nThis model describes four connected tanks filled with liquid to a level described by $h_i$. Herein, $i$ describes the respective tank. The tanks are pairwise connected with a pipe and the flow through this pipe is proportional to the level difference (and pressure difference) between the two neighboring tanks. Further, the first tank has a controllable inlet $u$ and the 4th tank has an outlet pipe.\n\nThe initial level of liquid is zero in tanks 1, 2, and 4. In tank 4, the initial level is 1.",
"_____no_output_____"
]
],
[
[
"@ORTDtoNumpy()\ndef connected_tanks( u ):\n \n Ts = 1\n \n # constants\n c12 = 0.05\n c23 = 0.05\n c34 = 0.05\n \n c_4_out = 0.1\n \n flow_into_1 = u\n \n \n # hx: level of liquid in tank x\n h1 = dy.signal()\n h2 = dy.signal()\n h3 = dy.signal()\n h4 = dy.signal()\n \n # system equations describing the flow in-between two connected tanks based \n # on the difference of the liquid level.\n flow_1_to_2 = c12 * ( h1 - h2 )\n flow_2_to_3 = c23 * ( h2 - h3 )\n flow_3_to_4 = c34 * ( h3 - h4 )\n \n flow_out_of_4 = c_4_out * h4\n \n # integrate in-/outflow balance\n h1 << dy.euler_integrator( flow_into_1 - flow_1_to_2, Ts, initial_state=0.0 )\n h2 << dy.euler_integrator( flow_1_to_2 - flow_2_to_3, Ts, initial_state=0.0 )\n h3 << dy.euler_integrator( flow_2_to_3 - flow_3_to_4, Ts, initial_state=1.0 ) # inital level h3(0) = 1.0\n h4 << dy.euler_integrator( flow_3_to_4 - flow_out_of_4, Ts, initial_state=0.0 )\n \n \n return h1, h2, h3, h4\n\n",
"_____no_output_____"
],
[
"# Apply a step-wise change to the inflow\nh1, h2, h3, h4 = connected_tanks( np.concatenate( (0.05 * np.ones(50), np.zeros(50)) ) )\n\nplt.figure(figsize=(10,6), dpi=100)\nplt.plot(h1)\nplt.plot(h2)\nplt.plot(h3)\nplt.plot(h4)\nplt.legend(['liquid level tank 1 (h1)', 'liquid level tank 2 (h2)', 'liquid level tank 3 (h3)', 'liquid level tank 1 (h4)'])\n\nplt.show()",
"compiling system simulation (level 0)... \n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca7bd582c18beae2f8d1407074847d7e207d04d | 677,063 | ipynb | Jupyter Notebook | Amenities_Niyati/Amazon_nearby_amenities_Refreshments.ipynb | gvo34/BC_Project1 | 324d84aac1cc147f68382922c1ab8b73ac2c2070 | [
"MIT"
] | 1 | 2018-03-24T17:42:15.000Z | 2018-03-24T17:42:15.000Z | Amenities_Niyati/Amazon_nearby_amenities_Refreshments.ipynb | gvo34/BC_Project1 | 324d84aac1cc147f68382922c1ab8b73ac2c2070 | [
"MIT"
] | 15 | 2018-03-24T21:13:14.000Z | 2022-03-11T23:18:33.000Z | Amenities_Niyati/Amazon_nearby_amenities_Refreshments.ipynb | indranik/BC_Project1 | 0766a7fddebf0f7c0c19415a62990c9f06200169 | [
"MIT"
] | null | null | null | 56.520828 | 85,416 | 0.572019 | [
[
[
"## Search for nearby Amenities for all site locations of each city\n List of Amenities by Categories:\n Categories: \n A. Emergency Facilities \n '''How accesible are these facilities in case of mass emergency on/around \n sites for containing the situation and resuming business asap''' \n \n 1. Hospital\n 2. Fire Station\n 3. Doctor\n \n B. Accomodation \n '''Many executive employees travel from outside the city for fews days a week and might need lodging\n facility as close to the company as possible. Also lot of local/not-local employees use vehicle\n for commute and will need parking nearby'''\n \n 1. Lodging\n 2. Parking\n \n C. Recreation \n '''MNC(s) often arrange for team building activities on and near site. Also employees might look for group \n recreational activities close to work''' \n \n 1. Movies Theatres\n 2. Parks\n 3. Malls\n 4. Amusement park\n 5. Cafe/Restaurants\n \n D. Basic Errands/appointments\n '''By large employees tend to take care of daily errands and appoinments in lunch breaks or before/after\n work hours and prefer it to be as close to work as possible for obvious reasons''' \n \n 1. Super markets\n 2. Post Office\n 3. Doctor\n \n E. Fitness\n '''Fitness is top on priority list for a significant amount of employees now a days may be in gym or a \n jog/walk in park.'''\n \n 1. Gym\n 2. Parks\n ",
"_____no_output_____"
]
],
[
[
"# Dependencies\nimport requests\nimport json\nimport pandas as pd\nfrom pprint import pprint\nimport time\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Google developer API key\nfrom config import g_key",
"_____no_output_____"
],
[
"# read places \nxls = pd.ExcelFile('BC_Project1/Project1_AmazonSites.xlsx') \nplaces_df=xls.parse('AmazonSites', dtype=str)\nplaces_df = places_df[['Amazon City','Site','Site Name','Latitude','Longitude']]\nplaces_df.head()\n#len(places_df)",
"_____no_output_____"
],
[
"# eating_out = {'restaurant':'restaurant', 'cafe':'cafe'}\n# all_eatingout_rating = []\n\n# for key in eating_out.keys():\n# eatingout_rating = []\n# for site in places_df.values:\n\n# # geocoordinates\n# target_coordinates = str(site[3]) + ',' + str(site[4])\n# target_search = eating_out[key]\n# target_radius = 2500\n# target_type = key\n# print(\"{} For {}: {}, {}\".format(key, site[0], site[1], target_coordinates))\n# print(\"----------\")\n# # set up a parameters dictionary\n# params = {\n# \"location\": target_coordinates,\n# \"keyword\": target_search,\n# \"radius\": target_radius,\n# \"type\": target_type,\n# \"key\": g_key\n# }\n\n# # base url\n# base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\n# # run a request using our params dictionary\n# response = requests.get(base_url, params=params).json()\n# results = response.get('results')\n# total_counts = len(results)\n# print(total_counts)\n\n# # Print the name and address of the first restaurant that appears\n# for x in range(total_counts):\n# if \"rating\" in results[x].keys():\n# rating = results[x][\"rating\"]\n# else:\n# rating = 'NAN'\n# if \"price_level\" in results[x].keys():\n# price_level = results[x][\"price_level\"]\n# else:\n# price_level = 'NAN'\n# eatingout_rating.append({\"Site Name\":site[2],\n# key+\" Total Count\":total_counts,\n# \"Latitude\":site[3],\n# \"Longitude\":site[4],\n# \"Facility \"+key:results[x][\"name\"],\n# \"Rating\":rating,\n# \"price_level\":price_level})\n \n# time.sleep(2)\n# time.sleep(2)\n# all_eatingout_rating.append(eatingout_rating)\n# print(\"ALL Done!!!!!\")\n# #all_eatingout_rating",
"restaurant For Northern Virginia Area: Site1, 38.96,-77.42\n----------\n20\nrestaurant For Northern Virginia Area: Site2, 38.84,-77.05\n----------\n20\nrestaurant For Washington DC: Site1, 38.86,-77.01\n----------\n20\nrestaurant For Washington DC: Site2, 38.89,-77\n----------\n20\nrestaurant For Washington DC: Site3, 38.88,-76.97\n----------\n20\nrestaurant For Washington DC: Site4, 38.91,-77.02\n----------\n20\nrestaurant For Los Angeles: Site1, 34.1802,-118.6028\n----------\n20\nrestaurant For Los Angeles: Site2, 34.4318,-118.5584\n----------\n20\nrestaurant For Los Angeles: Site3, 34.0885,-117.7702\n----------\n20\nrestaurant For Boston: Site1, 42.39,-71\n----------\n20\nrestaurant For Boston: Site2, 42.34,-71.03\n----------\n20\nrestaurant For Boston: Site3, 42.35,-71.05\n----------\n20\nrestaurant For Boston: Site4, 42.34,-71.06\n----------\n20\nrestaurant For New York: Site1, 40.75,-74\n----------\n20\nrestaurant For New York: Site2, 40.74,-73.94\n----------\n20\nrestaurant For New York: Site3, 40.7,-73.98\n----------\n20\nrestaurant For New York: Site4, 40.7,-74.01\n----------\n20\nrestaurant For Chicago: Site1, 41.91,-87.64\n----------\n20\nrestaurant For Chicago: Site2, 41.89,-87.64\n----------\n20\nrestaurant For Chicago: Site3, 41.88,-87.65\n----------\n20\nrestaurant For Chicago: Site4, 41.87,-87.63\n----------\n20\nrestaurant For Chicago: Site5, 41.87,-87.62\n----------\n20\nrestaurant For Chicago: Site6, 41.86,-87.67\n----------\n20\nrestaurant For Chicago: Site7, 41.85,-87.66\n----------\n20\nrestaurant For Chicago: Site8, 41.83,-87.6\n----------\n18\nrestaurant For Raleigh: Site1, 35.9,-78.87\n----------\n20\nrestaurant For Raleigh: Site2, 35.85,-79.08\n----------\n6\nrestaurant For Raleigh: Site3, 35.77,-78.64\n----------\n20\nrestaurant For Atlanta: Site1, 33.7615,-84.3856\n----------\n20\nrestaurant For Atlanta: Site2, 34.5129,-93.0431\n----------\n20\nrestaurant For Atlanta: Site3, 33.904,-84.2843\n----------\n20\nrestaurant For Atlanta: Site4, 33.9133,-84.3888\n----------\n20\nrestaurant For Austin: Site1, 30.2586,-97.7444\n----------\n20\nrestaurant For Austin: Site2, 30.2714,-97.6647\n----------\n1\nrestaurant For Austin: Site3, 30.199,-97.8276\n----------\n20\nrestaurant For Austin: Site4, 30.4015,-97.7268\n----------\n20\nrestaurant For Austin: Site5, 30.4014,-97.7147\n----------\n20\nrestaurant For Austin: Site6, 30.4704,-97.7701\n----------\n18\nrestaurant For Austin: Site7, 30.1338,-97.6411\n----------\n1\nALL Done!!!!!\ncafe For Northern Virginia Area: Site1, 38.96,-77.42\n----------\n13\ncafe For Northern Virginia Area: Site2, 38.84,-77.05\n----------\n17\ncafe For Washington DC: Site1, 38.86,-77.01\n----------\n14\ncafe For Washington DC: Site2, 38.89,-77\n----------\n20\ncafe For Washington DC: Site3, 38.88,-76.97\n----------\n13\ncafe For Washington DC: Site4, 38.91,-77.02\n----------\n20\ncafe For Los Angeles: Site1, 34.1802,-118.6028\n----------\n18\ncafe For Los Angeles: Site2, 34.4318,-118.5584\n----------\n19\ncafe For Los Angeles: Site3, 34.0885,-117.7702\n----------\n6\ncafe For Boston: Site1, 42.39,-71\n----------\n9\ncafe For Boston: Site2, 42.34,-71.03\n----------\n20\ncafe For Boston: Site3, 42.35,-71.05\n----------\n20\ncafe For Boston: Site4, 42.34,-71.06\n----------\n20\ncafe For New York: Site1, 40.75,-74\n----------\n20\ncafe For New York: Site2, 40.74,-73.94\n----------\n20\ncafe For New York: Site3, 40.7,-73.98\n----------\n20\ncafe For New York: Site4, 40.7,-74.01\n----------\n20\ncafe For Chicago: Site1, 41.91,-87.64\n----------\n20\ncafe For Chicago: Site2, 41.89,-87.64\n----------\n20\ncafe For Chicago: Site3, 41.88,-87.65\n----------\n20\ncafe For Chicago: Site4, 41.87,-87.63\n----------\n20\ncafe For Chicago: Site5, 41.87,-87.62\n----------\n20\ncafe For Chicago: Site6, 41.86,-87.67\n----------\n20\ncafe For Chicago: Site7, 41.85,-87.66\n----------\n20\ncafe For Chicago: Site8, 41.83,-87.6\n----------\n2\ncafe For Raleigh: Site1, 35.9,-78.87\n----------\n4\ncafe For Raleigh: Site2, 35.85,-79.08\n----------\n1\ncafe For Raleigh: Site3, 35.77,-78.64\n----------\n20\ncafe For Atlanta: Site1, 33.7615,-84.3856\n----------\n20\ncafe For Atlanta: Site2, 34.5129,-93.0431\n----------\n8\ncafe For Atlanta: Site3, 33.904,-84.2843\n----------\n8\ncafe For Atlanta: Site4, 33.9133,-84.3888\n----------\n10\ncafe For Austin: Site1, 30.2586,-97.7444\n----------\n20\ncafe For Austin: Site2, 30.2714,-97.6647\n----------\n0\ncafe For Austin: Site3, 30.199,-97.8276\n----------\n9\ncafe For Austin: Site4, 30.4015,-97.7268\n----------\n16\ncafe For Austin: Site5, 30.4014,-97.7147\n----------\n13\ncafe For Austin: Site6, 30.4704,-97.7701\n----------\n4\ncafe For Austin: Site7, 30.1338,-97.6411\n----------\n0\nALL Done!!!!!\n"
],
[
"all_eatingout_rating",
"_____no_output_____"
],
[
"all_restaurant_rating_df = pd.DataFrame(all_eatingout_rating[0])\nall_restaurant_rating_df",
"_____no_output_____"
],
[
"all_cafe_rating_df = pd.DataFrame(all_eatingout_rating[1])\nall_cafe_rating_df",
"_____no_output_____"
],
[
"all_cafe_rating_df.to_csv(\"Cafe_Rating.csv\")",
"_____no_output_____"
],
[
"all_restaurant_rating_df.to_csv(\"Restaurant_Rating.csv\")",
"_____no_output_____"
],
[
"# geocoordinates\ntarget_coordinates = '38.96,-77.42'\ntarget_search = 'restaurant'\ntarget_radius = 2500\ntarget_type = 'Restaurant'\n#print(\"{} For {}: {}, {}\".format(key, site[0], site[1], target_coordinates))\nprint(\"----------\")\n# set up a parameters dictionary\nparams = {\n \"location\": target_coordinates,\n \"keyword\": target_search,\n \"radius\": target_radius,\n \"type\": target_type,\n \"key\": g_key\n}\n\n# base url\nbase_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\n# run a request using our params dictionary\nresponse = requests.get(base_url, params=params).json()\nresults = response.get('results')",
"----------\n"
],
[
"results",
"_____no_output_____"
],
[
"restaurant = pd.read_csv('Restaurant_Rating.csv')\ndel restaurant['Unnamed: 0']\nrestaurant['Rating']=restaurant['Rating'].astype(float)\nrestaurant.replace('NAN', value=0, inplace=True)\nrestaurant.head()",
"_____no_output_____"
],
[
"restaurant_grouped = restaurant.groupby('City Name')\nsite_avg_count = restaurant_grouped['restaurant Total Count'].median()\nsite_avg_rating = restaurant_grouped['Rating'].median()\nsite_avg_count_df = pd.DataFrame(site_avg_rating)\n# site_avg_rating_df =pd.DataFrame({'City Name': restaurant['City Name'],\n# 'Median Rating':site_avg_rating})\n\nsite_avg_count_df['Restaurant Median Count']=site_avg_count\nsite_avg_count_df = site_avg_count_df.rename(columns={'Rating':'Median Rating'})\nsite_avg_count_df['Median Rating'] = site_avg_count_df['Median Rating'].astype(float)\nsite_avg_count_df = site_avg_count_df.reset_index()\nsite_avg_count_df['Restaurant Median Count'] = site_avg_count_df['Restaurant Median Count'].astype(int)\nsite_avg_count_df",
"_____no_output_____"
],
[
"# Create legend for colors\ncolors = ['lightblue', 'green', 'red', 'blue', 'yellow']\n\n# Use seaborn to make the scatter plot\nax = sns.lmplot(x='City Name', y='Median Rating', data=site_avg_count_df, fit_reg=False, aspect=2.5, \n hue='City Name', legend=False, size=8,\n scatter_kws={\"s\":site_avg_count_df['Median Rating']*500,'alpha':1, 'edgecolors':'black', 'linewidths':1})\n\n# Make the grid, set x-limit and y-limit\nplt.grid()\nplt.ylim(3.5,4.5)\n\n# Set scale for all the fonts of the plot\nsns.set(font_scale=1.4)\n\n# Make x-axis, y-axis & title labels\n# plt.title(\"SENTIMENT ANALYSIS OF MEDIA TWEETS (03/25/2018)\")\n# plt.xlabel(\"Tweets Ago\")\n# plt.ylabel(\"Tweets Polarity\")\n\n# Set the plot baclground color\nsns.set_style(\"dark\")\n\n# Format the legend and plot\nplt.legend(loc='upper right', title='City Types')\n# Put the legend out of the figure\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.show()",
"_____no_output_____"
],
[
"x=site_avg_count_df['City Name']\ny=site_avg_count_df['Median Rating']\nz=site_avg_count_df['Median Rating']\nplt.scatter(x, y, s=z*1000, alpha=0.4, edgecolors=\"grey\", linewidth=2, c=y, cmap=\"PuBuGn\")\nplt.grid()\n# Get current size\nfig_size = plt.rcParams[\"figure.figsize\"]\n \n# Prints: [8.0, 6.0]\nprint (\"Current size:\", fig_size)\n \n# Set figure width to 12 and height to 9\nfig_size[0] = 20\nfig_size[1] = 9\nplt.rcParams[\"figure.figsize\"] = fig_size\nplt.show()\n",
"Current size: [20.0, 9.0]\n"
],
[
"sns.boxplot(x='City Name', y='Rating', data=restaurant )\nplt.grid()\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca7bfcd0aae7142466c214bbaea112aab2153bb | 219,694 | ipynb | Jupyter Notebook | PrepareRtTrainingFile/Test_Other_Model-ForPaperSCXLumos_ThermoUltimate3000_130m.ipynb | YeonChoi/EPIQ | 4d3aea0c466b179cfe04520b2bdfb4a3b2f18639 | [
"MIT"
] | null | null | null | PrepareRtTrainingFile/Test_Other_Model-ForPaperSCXLumos_ThermoUltimate3000_130m.ipynb | YeonChoi/EPIQ | 4d3aea0c466b179cfe04520b2bdfb4a3b2f18639 | [
"MIT"
] | null | null | null | PrepareRtTrainingFile/Test_Other_Model-ForPaperSCXLumos_ThermoUltimate3000_130m.ipynb | YeonChoi/EPIQ | 4d3aea0c466b179cfe04520b2bdfb4a3b2f18639 | [
"MIT"
] | null | null | null | 54.541708 | 287 | 0.406065 | [
[
[
"import pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import linear_model\nfrom sklearn import neighbors\nfrom sklearn import neural_network\nfrom sklearn import tree\nfrom sklearn import kernel_ridge\nfrom sklearn import gaussian_process\nfrom sklearn import svm",
"_____no_output_____"
],
[
"dfIn = pd.io.parsers.read_csv(\"./LibSVM_data/D-shifts_UniquePepSampled_ForPaperSCXLumos_ThermoUltimate3000_130m.standardized.csv\")\ndfIn.head()",
"_____no_output_____"
]
],
[
[
"# Rescaling ",
"_____no_output_____"
]
],
[
[
"seY = dfIn['NormedApexEtDiff']\ndfX = dfIn['Dnum NormedRepreEt PeptideLength [K]Ratio'.split()]",
"_____no_output_____"
],
[
"min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1), copy=True)\narrXScaled = min_max_scaler.fit_transform(dfX)\narrXScaled",
"_____no_output_____"
],
[
"print(arrXScaled.max(axis=0), arrXScaled.min(axis=0))",
"[ 1. 1. 1. 1.] [-1. -1. -1. -1.]\n"
]
],
[
[
"# Read Standardized",
"_____no_output_____"
]
],
[
[
"dfStand = dfIn['Dnum_Standardized NormedRepreEt_Standardized PeptideLength_Standardized [K]Ratio_Standardized'.split()]\narrXStand = np.array(dfStand)",
"_____no_output_____"
],
[
"arrXStand",
"_____no_output_____"
]
],
[
[
"# Cross Validation of Linear Model",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = linear_model.LinearRegression()\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedLMrs = np.concatenate(lPredicted)\narrTestValueLMrs = np.concatenate(lTestValue)\n \nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nnp.sqrt(totalSqrError/len(seY)) * 62",
"Mean squared error: 0.0000032540\nRoot Mean squared error: 0.0018038858\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = linear_model.LinearRegression()\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedLMstd = np.concatenate(lPredicted)\narrTestValueLMstd = np.concatenate(lTestValue)\n \nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nnp.sqrt(totalSqrError/len(seY)) * 62",
"Mean squared error: 0.0000032535\nRoot Mean squared error: 0.0018037567\n"
]
],
[
[
"# Cross Validation of Polynomial Linear Model",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"poly = preprocessing.PolynomialFeatures(2)\narrXPoly = poly.fit_transform(arrXScaled)\narrXPoly",
"_____no_output_____"
],
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXPoly):\n reg = linear_model.LinearRegression()\n linearModel = reg.fit(arrXPoly[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXPoly[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedPLMrs = np.concatenate(lPredicted)\narrTestValuePLMrs = np.concatenate(lTestValue)\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nnp.sqrt(totalSqrError/len(seY)) * 62",
"Mean squared error: 0.0000030030\nRoot Mean squared error: 0.0017329048\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"poly = preprocessing.PolynomialFeatures(2)\narrXPoly = poly.fit_transform(dfStand)\narrXPoly",
"_____no_output_____"
],
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXPoly):\n reg = linear_model.LinearRegression()\n linearModel = reg.fit(arrXPoly[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXPoly[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedPLMstd = np.concatenate(lPredicted)\narrTestValuePLMstd = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000030027\nRoot Mean squared error: 0.0017328164\n0.107434619756\n"
]
],
[
[
"# Cross Validation of KNN",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = neighbors.KNeighborsRegressor(n_neighbors=30)\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKNNrs = np.concatenate(lPredicted)\narrTestValueKNNrs = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000027854\nRoot Mean squared error: 0.0016689379\n0.103474149613\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = neighbors.KNeighborsRegressor(n_neighbors=30)\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKNNstd = np.concatenate(lPredicted)\narrTestValueKNNstd = np.concatenate(lTestValue)\n\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000028241\nRoot Mean squared error: 0.0016804913\n0.104190458853\n"
]
],
[
[
"Rescaled, Polynomial Data",
"_____no_output_____"
]
],
[
[
"poly = preprocessing.PolynomialFeatures(2)\narrXPoly = poly.fit_transform(arrXScaled)\n\nlPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXPoly):\n reg = neighbors.KNeighborsRegressor(n_neighbors=30)\n linearModel = reg.fit(arrXPoly[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXPoly[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKNNrsP = np.concatenate(lPredicted)\narrTestValueKNNrsP = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000027888\nRoot Mean squared error: 0.0016699777\n0.10353861967\n"
]
],
[
[
"Standardized, Polynomial Data",
"_____no_output_____"
]
],
[
[
"poly = preprocessing.PolynomialFeatures(2)\narrXPoly = poly.fit_transform(dfStand)\n\nlPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXPoly):\n reg = neighbors.KNeighborsRegressor(n_neighbors=30)\n linearModel = reg.fit(arrXPoly[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXPoly[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKNNstdP = np.concatenate(lPredicted)\narrTestValueKNNstdP = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000028514\nRoot Mean squared error: 0.0016886215\n0.104694534165\n"
]
],
[
[
"# Cross Validation of RadiusNeighborsRegressor",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = neighbors.RadiusNeighborsRegressor()\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedRNRrs = np.concatenate(lPredicted)\narrTestValueRNRrs = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000038895\nRoot Mean squared error: 0.0019721919\n0.122275896585\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = neighbors.RadiusNeighborsRegressor(radius=40)\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedRNRstd = np.concatenate(lPredicted)\narrTestValueRNRstd = np.concatenate(lTestValue)\n\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000046668\nRoot Mean squared error: 0.0021602678\n0.133936604344\n"
]
],
[
[
"# Cross Validation of Multi-Layer Perceptron",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = neural_network.MLPRegressor(hidden_layer_sizes=(400, 400, 400),)\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedMLPrs = np.concatenate(lPredicted)\narrTestValueMLPrs = np.concatenate(lTestValue)\n\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000035179\nRoot Mean squared error: 0.0018755969\n0.116287007385\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = neural_network.MLPRegressor(hidden_layer_sizes=(400, 400, 400),)\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedMLPstd = np.concatenate(lPredicted)\narrTestValueMLPstd = np.concatenate(lTestValue)\n\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000059064\nRoot Mean squared error: 0.0024303055\n0.150678939276\n"
]
],
[
[
"# Cross Validation of Decision Tree",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = tree.DecisionTreeRegressor()\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedDTrs = np.concatenate(lPredicted)\narrTestValueDTrs = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000048596\nRoot Mean squared error: 0.0022044592\n0.136676469855\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = tree.DecisionTreeRegressor()\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedDTstd = np.concatenate(lPredicted)\narrTestValueDTstd = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000050021\nRoot Mean squared error: 0.0022365272\n0.138664684443\n"
]
],
[
[
"# Cross Validation of Gaussian Process",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = gaussian_process.GaussianProcessRegressor()\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedGPrs = np.concatenate(lPredicted)\narrTestValueGPrs = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.0000472519\nRoot Mean squared error: 0.0068740045\n0.42618828147\n"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = gaussian_process.GaussianProcessRegressor()\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedGPstd = np.concatenate(lPredicted)\narrTestValueGPstd = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"Mean squared error: 0.1657577260\nRoot Mean squared error: 0.4071335482\n25.242279986\n"
]
],
[
[
"# Cross Validation of KernelRidgeRegression",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = kernel_ridge.KernelRidge(kernel='rbf', gamma='auto')\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKRRrs = np.concatenate(lPredicted)\narrTestValueKRRrs = np.concatenate(lTestValue)\n\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"_____no_output_____"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n reg = kernel_ridge.KernelRidge(kernel='rbf', gamma='auto')\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedKRRstd = np.concatenate(lPredicted)\narrTestValueKRRstd = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"_____no_output_____"
]
],
[
[
"# Cross Validation of Nu-SVR",
"_____no_output_____"
],
[
"Rescaled Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXScaled):\n reg = svm.NuSVR(kernel='rbf', tol=0.00005, nu=0.5, C=1.0, gamma='auto', cache_size=20000)\n linearModel = reg.fit(arrXScaled[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXScaled[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedNuSVRrs = np.concatenate(lPredicted)\narrTestValueNuSVRrs = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)",
"_____no_output_____"
]
],
[
[
"Standardized Data",
"_____no_output_____"
]
],
[
[
"lPredicted = []\nlTestValue = []\n\nkf = KFold(n_splits=10, shuffle=True)\ntotalSqrError = 0\nfor train_idx, test_idx in kf.split(arrXStand):\n \n reg = svm.NuSVR(kernel='rbf', tol=0.00005, nu=0.5, C=0.1, gamma='auto', cache_size=20000)\n linearModel = reg.fit(arrXStand[train_idx], seY[train_idx])\n arrTestPredicted = reg.predict(arrXStand[test_idx])\n totalSqrError += np.sum((arrTestPredicted - seY[test_idx]) ** 2)\n \n lPredicted.append(arrTestPredicted)\n lTestValue.append(seY[test_idx])\n \narrPredictedNuSVRstdNu = np.concatenate(lPredicted)\narrTestValueNuSVRstdNu = np.concatenate(lTestValue)\n\nprint(\"Mean squared error: %.10f\"%(totalSqrError/len(seY)))\nprint(\"Root Mean squared error: %.10f\"% np.sqrt(totalSqrError/len(seY)))\nprint(np.sqrt(totalSqrError/len(seY)) * 62)\n",
"_____no_output_____"
]
],
[
[
"# Dump to pickle",
"_____no_output_____"
]
],
[
[
"with open('CV_results_Regression_models.pk', 'wb') as pkfile:\n pickle.dump({'arrPredictedLMrs': arrPredictedLMrs,\n 'arrTestValueLMrs': arrTestValueLMrs,\n 'arrPredictedLMstd': arrPredictedLMstd,\n 'arrTestValueLMstd': arrTestValueLMstd,\n \n 'arrPredictedPLMrs': arrPredictedPLMrs,\n 'arrTestValuePLMrs': arrTestValuePLMrs,\n 'arrPredictedPLMstd': arrPredictedPLMstd,\n 'arrTestValuePLMstd': arrTestValuePLMstd,\n \n 'arrPredictedKNNrs': arrPredictedKNNrs,\n 'arrTestValueKNNrs': arrTestValueKNNrs,\n 'arrPredictedKNNstd': arrPredictedKNNstd,\n 'arrTestValueKNNstd': arrTestValueKNNstd,\n \n 'arrPredictedKNNrsP': arrPredictedKNNrsP,\n 'arrTestValueKNNrsP': arrTestValueKNNrsP,\n 'arrPredictedKNNstdP': arrPredictedKNNstdP,\n 'arrTestValueKNNstdP': arrTestValueKNNstdP,\n \n 'arrPredictedRNRrs': arrPredictedRNRrs,\n 'arrTestValueRNRrs': arrTestValueRNRrs,\n 'arrPredictedRNRstd': arrPredictedRNRstd,\n 'arrTestValueRNRstd': arrTestValueRNRstd,\n \n 'arrPredictedMLPrs': arrPredictedMLPrs,\n 'arrTestValueMLPrs': arrTestValueMLPrs,\n 'arrPredictedMLPstd': arrPredictedMLPstd,\n 'arrTestValueMLPstd': arrTestValueMLPstd,\n \n 'arrPredictedDTrs': arrPredictedDTrs,\n 'arrTestValueDTrs': arrTestValueDTrs,\n 'arrPredictedDTstd': arrPredictedDTstd,\n 'arrTestValueDTstd': arrTestValueDTstd,\n \n 'arrPredictedGPrs': arrPredictedGPrs,\n 'arrTestValueGPrs': arrTestValueGPrs,\n 'arrPredictedGPstd': arrPredictedGPstd,\n 'arrTestValueGPstd': arrTestValueGPstd,\n \n 'arrPredictedKRRrs': arrPredictedKRRrs,\n 'arrTestValueKRRrs': arrTestValueKRRrs,\n 'arrPredictedKRRstd': arrPredictedKRRstd,\n 'arrTestValueKRRstd': arrTestValueKRRstd,\n \n 'arrPredictedNuSVRrs': arrPredictedNuSVRrs,\n 'arrTestValueNuSVRrs': arrTestValueNuSVRrs,\n 'arrPredictedNuSVRstd': arrPredictedNuSVRstd,\n 'arrTestValueNuSVRstd': arrTestValueNuSVRstd,\n \n },\n pkfile)\n \n",
"_____no_output_____"
]
],
[
[
"# Load From Pickle",
"_____no_output_____"
]
],
[
[
"with open('CV_results_Regression_models.pk', 'rb') as pkfile:\n dArr = pickle.load(pkfile)",
"_____no_output_____"
]
],
[
[
"# Benchmark of Each Model",
"_____no_output_____"
]
],
[
[
"def draw_benchmark(model):\n arrPredicted = dArr['arrPredicted' + model]\n arrTestValue = dArr['arrTestValue' + model]\n draw_benchmark_fromArr(arrPredicted, arrTestValue, model)\n \ndef draw_benchmark_fromArr(arrPredicted, arrTestValue, model=None):\n rmse = np.sqrt(np.mean(((arrPredicted - arrTestValue) ** 2)))\n corr = stats.pearsonr(arrPredicted, arrTestValue)[0]\n\n print('RMSE: %.5e'%rmse)\n print('Corr: %.5f'%corr)\n\n left = 0.2\n bottom = 0.2\n width = 0.7\n height = 0.7\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_axes([left, bottom, width, height]) \n\n \n ax.set_xlim((-0.003, 0.008))\n ax.set_ylim((-0.003, 0.008))\n ax.set_xlabel('Predicted RT shift')\n ax.set_ylabel('Real RT shift')\n ax.grid()\n\n ax.scatter(arrPredicted, arrTestValue,\n alpha=0.02, color='k', marker='o', rasterized=True)\n\n if model is not None:\n plt.savefig('Predicted_RT_shift_vs_Real_RT_shift_{}.pdf'.format(model), format='pdf', dpi=500)",
"_____no_output_____"
],
[
"#Linear Model, Rescaled\ndraw_benchmark('LMrs')",
"_____no_output_____"
],
[
"#Linear Model, Standardized\ndraw_benchmark('LMstd')",
"_____no_output_____"
],
[
"#Polynomial Linear Model, Rescaled\ndraw_benchmark('PLMrs')",
"_____no_output_____"
],
[
"#Polynomial Linear Model, standardized\ndraw_benchmark('PLMstd')",
"_____no_output_____"
],
[
"#KNN, Rescaled\ndraw_benchmark('KNNrs')",
"_____no_output_____"
],
[
"#KNN, Standardized\ndraw_benchmark('KNNstd')",
"_____no_output_____"
],
[
"#KNN, Rescaled, Polynomial Data\ndraw_benchmark('KNNrsP')",
"_____no_output_____"
],
[
"#KNN, Standardized, Polynomial Data\ndraw_benchmark('KNNstdP')",
"_____no_output_____"
],
[
"#Radius Neighbor Regressor, rescaled\ndraw_benchmark('RNRrs')",
"_____no_output_____"
],
[
"#Radius Neighbor Regressor, standardized\ndraw_benchmark('RNRstd')",
"_____no_output_____"
],
[
"#Multi Layer Perceptron, rescaled\ndraw_benchmark('MLPrs')",
"_____no_output_____"
],
[
"#Multi Layer Perceptron, std\ndraw_benchmark('MLPstd')",
"_____no_output_____"
],
[
"#Decision Tree, Rescaled\ndraw_benchmark('DTrs')",
"_____no_output_____"
],
[
"#Decision Tree, Standardized\ndraw_benchmark('DTstd')",
"_____no_output_____"
],
[
"#Gaussian Process, Rescaled\ndraw_benchmark('GPrs')",
"_____no_output_____"
],
[
"#Gaussian Process, Standardized\ndraw_benchmark('GPstd')",
"_____no_output_____"
],
[
"#KernalRidgeRegression, Rescaled\ndraw_benchmark('KRRrs')",
"_____no_output_____"
],
[
"#KernalRidgeRegression, Rescaled\ndraw_benchmark('KRRstd')",
"_____no_output_____"
],
[
"#NuSVR, Rescaled\ndraw_benchmark('NuSVRrs')",
"_____no_output_____"
],
[
"#NuSVR, Standardized\ndraw_benchmark('NuSVRstd')",
"_____no_output_____"
],
[
"#NuSVR, Rescaled, gamma=1\n#draw_benchmark('NuSVRrsg1')",
"_____no_output_____"
],
[
"#NuSVR, Standardized, gamma=1\n#draw_benchmark('NuSVRstdg1')",
"_____no_output_____"
],
[
"svr = svm.NuSVR(kernel='rbf', cache_size=20000)\nparams = {\"tol\": [0.0005, 0.0001, 0.00005],\n \"nu\":[0.7, 0.5, 0.3], \n \"C\":[0.01, 0.1, 1, 10],\n \"gamma\": [0.001, 0.01, 0.1, 0.2, 1][::-1]}\n\ngrid_search = GridSearchCV(svr, params, n_jobs=32, scoring='r2', verbose=2, cv=5)\ngrid_search.fit(arrXStand, seY)\n\nprint(\"Best parameters set found on development set:\")\nprint()\nprint(grid_search.best_params_)\nprint()\nprint(\"Grid scores on development set:\")\nprint()\nfor params, mean_score, scores in grid_search.grid_scores_:\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean_score, scores.std() * 2, params))\nprint()",
"Fitting 5 folds for each of 180 candidates, totalling 900 fits\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] nu=0.3, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] nu=0.3, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.3, tol=0.0005, C=0.01, gamma=1, total= 10.9s\n[CV] nu=0.3, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.3, tol=0.0005, C=0.01, gamma=1, total= 11.8s\n[CV] nu=0.3, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.5, tol=0.0005, C=0.01, gamma=1, total= 14.7s\n[CV] nu=0.3, tol=0.0005, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.5, tol=0.0005, C=0.01, gamma=1, total= 16.6s\n[CV] nu=0.3, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.5, tol=0.0005, C=0.01, gamma=1, total= 16.2s\n[CV] nu=0.3, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.5, tol=0.0005, C=0.01, gamma=1, total= 16.1s\n[CV] nu=0.3, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.5, tol=0.0001, C=0.01, gamma=1, total= 17.1s\n[CV] nu=0.3, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] .............. nu=0.3, tol=0.0005, C=0.01, gamma=1, total= 9.8s\n[CV] ............... nu=0.5, tol=5e-05, C=0.01, gamma=1, total= 18.2s\n[CV] nu=0.3, tol=0.0001, C=0.01, gamma=1 .............................\n[CV] nu=0.3, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] ............... nu=0.5, tol=5e-05, C=0.01, gamma=1, total= 18.1s\n[CV] nu=0.3, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] .............. nu=0.5, tol=0.0001, C=0.01, gamma=1, total= 18.9s\n[CV] nu=0.3, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] .............. nu=0.5, tol=0.0005, C=0.01, gamma=1, total= 18.1s\n[CV] nu=0.3, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] ............... nu=0.5, tol=5e-05, C=0.01, gamma=1, total= 18.9s\n[CV] nu=0.3, tol=5e-05, C=0.01, gamma=1 ..............................\n[CV] .............. nu=0.5, tol=0.0001, C=0.01, gamma=1, total= 19.3s\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] ............... nu=0.5, tol=5e-05, C=0.01, gamma=1, total= 18.8s\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.3, tol=0.0005, C=0.01, gamma=1, total= 9.8s\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.5, tol=0.0001, C=0.01, gamma=1, total= 19.9s\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.5, tol=0.0001, C=0.01, gamma=1, total= 19.3s\n[CV] nu=0.7, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] ............... nu=0.5, tol=5e-05, C=0.01, gamma=1, total= 20.2s\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0005, C=0.01, gamma=1, total= 21.6s\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0005, C=0.01, gamma=1, total= 21.8s\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0005, C=0.01, gamma=1, total= 22.5s\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0005, C=0.01, gamma=1, total= 22.5s\n[CV] nu=0.7, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0005, C=0.01, gamma=1, total= 22.4s\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] .............. nu=0.7, tol=0.0001, C=0.01, gamma=1, total= 23.6s\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] .............. nu=0.3, tol=0.0005, C=0.01, gamma=1, total= 11.0s\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] ............... nu=0.7, tol=5e-05, C=0.01, gamma=1, total= 24.4s\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] ............... nu=0.7, tol=5e-05, C=0.01, gamma=1, total= 24.2s\n[CV] nu=0.7, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] ............... nu=0.7, tol=5e-05, C=0.01, gamma=1, total= 24.1s\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] ............... nu=0.7, tol=5e-05, C=0.01, gamma=1, total= 25.2s\n[CV] .............. nu=0.7, tol=0.0001, C=0.01, gamma=1, total= 25.1s\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0001, C=0.01, gamma=1, total= 25.2s\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0001, C=0.01, gamma=1, total= 25.6s\n[CV] nu=0.5, tol=0.0005, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.7, tol=0.0001, C=0.01, gamma=1, total= 26.7s\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.3, tol=0.0001, C=0.01, gamma=1, total= 11.8s\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.3, tol=0.0001, C=0.01, gamma=1, total= 11.4s\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.3, tol=0.0001, C=0.01, gamma=1, total= 12.1s\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] ............... nu=0.7, tol=5e-05, C=0.01, gamma=1, total= 26.9s\n[CV] nu=0.5, tol=0.0001, C=0.01, gamma=0.2 ...........................\n[CV] .............. nu=0.3, tol=0.0001, C=0.01, gamma=1, total= 13.3s\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] .............. nu=0.3, tol=0.0001, C=0.01, gamma=1, total= 11.7s\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] ............... nu=0.3, tol=5e-05, C=0.01, gamma=1, total= 12.2s\n[CV] nu=0.5, tol=5e-05, C=0.01, gamma=0.2 ............................\n[CV] ............... nu=0.3, tol=5e-05, C=0.01, gamma=1, total= 11.1s\n"
],
[
"grid_search.best_params_",
"_____no_output_____"
],
[
"(grid_search.best_score_)**0.5",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca7cbc98302e9647d8a7da56f713394b3fde933 | 9,419 | ipynb | Jupyter Notebook | jerry/resnet/resnet152_self_train.ipynb | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | 3 | 2019-01-22T03:14:32.000Z | 2019-08-17T02:22:06.000Z | jerry/resnet/resnet152_self_train.ipynb | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | null | null | null | jerry/resnet/resnet152_self_train.ipynb | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | null | null | null | 33.759857 | 147 | 0.539442 | [
[
[
"import os\nimport shutil\nimport numpy as np\nimport pickle as pk\nimport pandas as pd\nfrom keras.utils import to_categorical ,Sequence\nfrom keras import losses, models, optimizers\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.activations import relu, softmax\nfrom keras.callbacks import (EarlyStopping, LearningRateScheduler,\n ModelCheckpoint, TensorBoard, ReduceLROnPlateau)\nfrom keras.layers import Activation, LeakyReLU\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import backend as K\nfrom sklearn.model_selection import KFold\nfrom random_eraser import get_random_eraser\nfrom keras.optimizers import Adam\nfrom os.path import join\nimport resnet\nfrom sklearn.utils import shuffle\n\nmap_dict = pk.load(open('data/map.pkl' , 'rb'))\n",
"_____no_output_____"
],
[
"semi = pd.read_csv('data/cotrain/Y_selftrain_ens_verified.csv')",
"_____no_output_____"
],
[
"semi_map = {}\n\nsemi_name = semi['fname'].values\nsemi_label_verified = semi['label_verified'].values\n\nfor idx ,d in enumerate( semi_name):\n semi_map[d] = semi_label_verified[idx]\n",
"_____no_output_____"
],
[
"unverified_df = pd.read_csv('data/train_label.csv')\ntest_df = pd.read_csv('data/sample_submission.csv')\n\nunverified_df = unverified_df[unverified_df['fname'].isin(semi_name)]\nunverified_df = unverified_df.drop(columns=['manually_verified'])\nunverified_df['label_verified'] = unverified_df['fname'].map(semi_map)\n\ntest_df = test_df[test_df['fname'].isin(semi_name)]\ntest_df['label_verified'] = test_df['fname'].map(semi_map)\n\nunverified_idx = unverified_df.index.values\ntest_idx = test_df.index.values\n\ndf = pd.concat([unverified_df , test_df])\ndf = df.drop(columns=['label'])\ndf['trans'] = df['label_verified'].map(map_dict)\ndf['onehot'] = df['trans'].apply(lambda x: to_categorical(x,num_classes=41))",
"_____no_output_____"
],
[
"X_unverified = np.load('data/train/mfcc3/X_train.npy')[unverified_idx]\nX_test = np.load('data/test/mfcc3/X_test.npy')[test_idx]\n\nX_semi = np.append(X_unverified,X_test , axis=0)\nY_semi = np.array(df['onehot'].tolist()).reshape(-1,41)\n\nprint(X_semi.shape)\nprint(Y_semi.shape)",
"_____no_output_____"
],
[
" # data generator ====================================================================================\nclass MixupGenerator():\n def __init__(self, X_train, y_train, batch_size=32, alpha=0.2, shuffle=True, datagen=None):\n self.X_train = X_train\n self.y_train = y_train\n self.batch_size = batch_size\n self.alpha = alpha\n self.shuffle = shuffle\n self.sample_num = len(X_train)\n self.datagen = datagen\n\n def __call__(self):\n while True:\n indexes = self.__get_exploration_order()\n itr_num = int(len(indexes) // (self.batch_size * 2))\n\n for i in range(itr_num):\n batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]\n X, y = self.__data_generation(batch_ids)\n\n yield X, y\n\n def __get_exploration_order(self):\n indexes = np.arange(self.sample_num)\n\n if self.shuffle:\n np.random.shuffle(indexes)\n\n return indexes\n\n def __data_generation(self, batch_ids):\n _, h, w, c = self.X_train.shape\n l = np.random.beta(self.alpha, self.alpha, self.batch_size)\n X_l = l.reshape(self.batch_size, 1, 1, 1)\n y_l = l.reshape(self.batch_size, 1)\n\n X1 = self.X_train[batch_ids[:self.batch_size]]\n X2 = self.X_train[batch_ids[self.batch_size:]]\n X = X1 * X_l + X2 * (1 - X_l)\n\n if self.datagen:\n for i in range(self.batch_size):\n X[i] = self.datagen.random_transform(X[i])\n X[i] = self.datagen.standardize(X[i])\n\n if isinstance(self.y_train, list):\n y = []\n\n for y_train_ in self.y_train:\n y1 = y_train_[batch_ids[:self.batch_size]]\n y2 = y_train_[batch_ids[self.batch_size:]]\n y.append(y1 * y_l + y2 * (1 - y_l))\n else:\n y1 = self.y_train[batch_ids[:self.batch_size]]\n y2 = self.y_train[batch_ids[self.batch_size:]]\n y = y1 * y_l + y2 * (1 - y_l)\n\n return X, y",
"_____no_output_____"
]
],
[
[
"# Training Semi Data",
"_____no_output_____"
]
],
[
[
"\nmodel_path = 'model_full_resnet152'\nrefine_path = 'model_full_resnet152_refine_co'\n\nall_x = np.concatenate( (np.load('data/train/mfcc3/X_train.npy') , np.load('data/test/mfcc3/X_test.npy')))\n\nif not os.path.exists(refine_path):\n os.mkdir(refine_path)\n\nfor i in range(1,11):\n X_train = np.load('data/ten_fold_data/X_train_{}.npy'.format(i)) \n Y_train = np.load('data/ten_fold_data/Y_train_{}.npy'.format(i)) \n X_test = np.load('data/ten_fold_data/X_valid_{}.npy'.format(i))\n Y_test = np.load('data/ten_fold_data/Y_valid_{}.npy'.format(i))\n \n X_train = np.append(X_train,X_semi , axis=0)\n Y_train = np.append(Y_train,Y_semi , axis=0)\n \n X_train , Y_train = shuffle(X_train, Y_train, random_state=5)\n \n print(X_train.shape)\n print(Y_train.shape)\n print(X_test.shape)\n print(Y_test.shape)\n \n model = load_model(join(model_path,'best_{}.h5'.format(i)))\n \n checkpoint = ModelCheckpoint(join(refine_path , 'semi_self_%d_{val_acc:.5f}.h5'%i), monitor='val_acc', verbose=1, save_best_only=True)\n early = EarlyStopping(monitor=\"val_acc\", mode=\"max\", patience=30)\n callbacks_list = [checkpoint, early]\n \n datagen = ImageDataGenerator(\n featurewise_center=True, # set input mean to 0 over the dataset\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n preprocessing_function=get_random_eraser(v_l=np.min(all_x), v_h=np.max(all_x)) # Trainset's boundaries.\n )\n \n mygenerator = MixupGenerator(X_train, Y_train, alpha=1.0, batch_size=128, datagen=datagen)\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=0.0001),\n metrics=['accuracy'])\n # mixup\n history = model.fit_generator(mygenerator(),\n steps_per_epoch= X_train.shape[0] // 128,\n epochs=10000,\n validation_data=(X_test,Y_test), callbacks=callbacks_list)\n # normalize\n# history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), callbacks=callbacks_list,\n# batch_size=32, epochs=10000)\n\n \n# break",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca7cd47ccfd301d4ecd7e45f071a50ab7c34632 | 36,714 | ipynb | Jupyter Notebook | Final_Model_Notebook.ipynb | buildweek-saltiest-hacker/data-science | 8e682a18fc1175b946e8870afc12bf1d65cfd01d | [
"MIT"
] | null | null | null | Final_Model_Notebook.ipynb | buildweek-saltiest-hacker/data-science | 8e682a18fc1175b946e8870afc12bf1d65cfd01d | [
"MIT"
] | null | null | null | Final_Model_Notebook.ipynb | buildweek-saltiest-hacker/data-science | 8e682a18fc1175b946e8870afc12bf1d65cfd01d | [
"MIT"
] | 1 | 2020-06-18T01:47:57.000Z | 2020-06-18T01:47:57.000Z | 27.235905 | 143 | 0.417743 | [
[
[
"Import Things needed for the project",
"_____no_output_____"
]
],
[
[
"%%capture\nimport pandas as pd\n!pip install vaderSentiment\nimport vaderSentiment",
"_____no_output_____"
]
],
[
[
"**Loading the data**",
"_____no_output_____"
]
],
[
[
"cleaned = pd.read_csv(\"https://raw.githubusercontent.com/buildweek-saltiest-hacker/data-engineering-api/master/hacker-comments.csv\")\n\ncleaned.head()",
"_____no_output_____"
]
],
[
[
"**Using Vader Sentiment Analysis**",
"_____no_output_____"
]
],
[
[
"from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nscore = SentimentIntensityAnalyzer()",
"_____no_output_____"
]
],
[
[
"Rober Notebook https://github.com/BrokenShell/SaltyHacker/blob/master/nlp.py\n\nVader Documentation\n\nhttps://pypi.org/project/vaderSentiment/",
"_____no_output_____"
],
[
"Creating a the ranking for each comment.",
"_____no_output_____"
]
],
[
[
"# creating a new dataframe that just has the information needed\n\ntext = cleaned['hacker_comment']\nname = cleaned['hacker_name']\n\nsalty_hackers = pd.DataFrame({\n 'Name':name,\n 'Comment': text\n})\n\nsalty_hackers.head()",
"_____no_output_____"
],
[
"comment = salty_hackers['Comment']\n\nranking = []\n\nfor i in comment:\n scores = score.polarity_scores(i)\n final_score = scores['compound']\n rounded_score = round(final_score*10, 2)\n ranking.append(rounded_score)",
"_____no_output_____"
],
[
"salty_hackers['comment_ranking'] = ranking",
"_____no_output_____"
],
[
"salty_hackers.head()",
"_____no_output_____"
],
[
"sample_data = salty_hackers.iloc[:10]\nsample_data",
"_____no_output_____"
],
[
"salty_hackers['comment_ranking'].describe()",
"_____no_output_____"
],
[
"average = salty_hackers.groupby(by='Name').mean()\naverage",
"_____no_output_____"
],
[
"average[:10]",
"_____no_output_____"
],
[
"average['comment_ranking']",
"_____no_output_____"
],
[
"average_dict = average['comment_ranking'].to_dict()\nall_users = average_dict.keys()\nuser_list = list(all_users)",
"_____no_output_____"
],
[
"users = salty_hackers['Name']\nuser_ranking = []\nfor user in users:\n user_rank = average_dict[user]\n round_user_rank = round(user_rank, 2)\n user_ranking.append(round_user_rank)",
"_____no_output_____"
],
[
"user_ranking[:10]",
"_____no_output_____"
],
[
"salty_hackers['user_ranking'] = user_ranking",
"_____no_output_____"
],
[
"salty_hackers.head()",
"_____no_output_____"
],
[
"salty_hackers['user_ranking'].describe()",
"_____no_output_____"
]
],
[
[
"## Exporting Final Data Set",
"_____no_output_____"
]
],
[
[
"compression_opts = dict(method='zip',archive_name='salty_hackers.csv') \n\nsalty_hackers.to_csv('salty_hackers.zip', index=False, compression=compression_opts) ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca7ff48fae37aa99d9dd58f9b713a1de0ce2dc0 | 7,489 | ipynb | Jupyter Notebook | Assessments/.ipynb_checkpoints/Artificial Intelligence 2 - CA 2 Q2-checkpoint.ipynb | lavishthomas/AICourse | 0b630330a65a96a89a4d727570abbd47de97b632 | [
"MIT"
] | null | null | null | Assessments/.ipynb_checkpoints/Artificial Intelligence 2 - CA 2 Q2-checkpoint.ipynb | lavishthomas/AICourse | 0b630330a65a96a89a4d727570abbd47de97b632 | [
"MIT"
] | null | null | null | Assessments/.ipynb_checkpoints/Artificial Intelligence 2 - CA 2 Q2-checkpoint.ipynb | lavishthomas/AICourse | 0b630330a65a96a89a4d727570abbd47de97b632 | [
"MIT"
] | null | null | null | 27.232727 | 122 | 0.446522 | [
[
[
"import numpy as np\nimport pandas as pd\nimport scipy.sparse\n\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"data_frame = pd.read_csv(\"QuoraWithTopic.csv\", encoding='utf-8')\ndata_frame.head(5)",
"_____no_output_____"
],
[
"# max_df is between 0-1 or an INT\ncount_vectorizer = CountVectorizer(max_df=0.90, min_df=4, stop_words=\"english\")\ndoc_term_matrix = count_vectorizer.fit_transform(data_frame[\"question\"])\nlen(count_vectorizer.get_feature_names())\n",
"_____no_output_____"
],
[
"doc_term_matrix = count_vectorizer.fit_transform(data_frame[\"question\"])\ntarget_topic = data_frame['Topic number']",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(doc_term_matrix,target_topic, test_size = 0.3, random_state = 1)",
"_____no_output_____"
],
[
"mnc_classifier = MultinomialNB()\nmnc_classifier.fit(X_train, y_train)",
"_____no_output_____"
],
[
"mnc_model_predictions = mnc_classifier.predict(X_test)",
"_____no_output_____"
],
[
"print(metrics.confusion_matrix(y_test, mnc_model_predictions))",
"[[5387 241 175 411 227 135 197 157]\n [ 227 5551 182 368 133 230 205 186]\n [ 181 128 6540 333 155 155 176 178]\n [ 92 108 146 8550 140 144 253 122]\n [ 156 71 98 208 6341 148 180 133]\n [ 152 152 160 331 166 5984 202 159]\n [ 184 175 170 530 231 262 5590 205]\n [ 123 209 226 250 176 223 177 5215]]\n"
],
[
"print(metrics.classification_report(y_test, mnc_model_predictions))",
" precision recall f1-score support\n\n 0 0.83 0.78 0.80 6930\n 1 0.84 0.78 0.81 7082\n 2 0.85 0.83 0.84 7846\n 3 0.78 0.89 0.83 9555\n 4 0.84 0.86 0.85 7335\n 5 0.82 0.82 0.82 7306\n 6 0.80 0.76 0.78 7347\n 7 0.82 0.79 0.81 6599\n\n accuracy 0.82 60000\n macro avg 0.82 0.82 0.82 60000\nweighted avg 0.82 0.82 0.82 60000\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca81db674199ea9c770c7ca09b28089769cf4d0 | 999 | ipynb | Jupyter Notebook | notebook/2_get_data/0_overview.ipynb | Kanbc/stock-analysis | d773313a1bee1883a1eff4695e62ec0f1c124bac | [
"Apache-2.0"
] | null | null | null | notebook/2_get_data/0_overview.ipynb | Kanbc/stock-analysis | d773313a1bee1883a1eff4695e62ec0f1c124bac | [
"Apache-2.0"
] | null | null | null | notebook/2_get_data/0_overview.ipynb | Kanbc/stock-analysis | d773313a1bee1883a1eff4695e62ec0f1c124bac | [
"Apache-2.0"
] | null | null | null | 23.232558 | 198 | 0.575576 | [
[
[
"เธเธฒเธ [เนเธเธงเธเธฒเธเธเธฒเธฃเธซเธฒเธซเธธเนเธเธเธต](https://docs.google.com/spreadsheets/d/1zcPrkgBKzuZTNmR8287a-5mzu2yuAYNUavkUUu_-dbA/edit#gid=1256847632) เธเธตเนเนเธเธขเธงเธดเนเธเธฃเธฒเธฐเธซเนเนเธงเน เนเธเธทเนเธญเธเธณเธฃเธฐเธเธเธเธถเนเธ automate เธเนเธญเธเธเธฒเธฃเธเนเธญเธกเธนเธฅเธเธฑเธเธเธตเน",
"_____no_output_____"
],
[
"- เธเธทเนเธญเธซเธธเนเธ + เธเธฅเธธเนเธก เธญเธธเธเธชเธฒเธซเธเธฃเธฃเธก\n- เธเธเธเธฒเธฃเนเธเธดเธเธขเนเธญเธเธซเธฅเธฑเธ 10 เธเธต (เธเนเธญเธกเธนเธฅเนเธเนเธฒเนเธญเธฒเนเธ excel เธเนเธญเธกเธนเธฅเนเธซเธกเน scrape เธเธฒเธเธซเธเนเธฒเนเธงเนเธ)\n- เธฃเธฒเธเธฒเธซเธธเนเธ Currently\n- เธเธถเธเธเธเธเธฒเธฃเนเธเธดเธเธเธตเนเธซเธกเนเน เธเธฒเธเธซเธเนเธฒเนเธงเนเธ Settrade",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown"
]
] |
eca82417c0f878d77c88a2ffcc73b56e1613a66d | 4,573 | ipynb | Jupyter Notebook | example-notebooks/crew-scheduling.ipynb | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | example-notebooks/crew-scheduling.ipynb | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | example-notebooks/crew-scheduling.ipynb | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | 29.694805 | 567 | 0.510387 | [
[
[
"# Crew Scheduling\n\n",
"_____no_output_____"
],
[
"We consider 6 days with 4 places and want to schedule a bunch of flights connecting the places. One crew starts and ends at each place. The goal is to maximize the number of scheduled flights. All gaps in the schedule of a crew have to be filled with stays at the respective places. We give the gaps a higher negative completion time cost (-2) than the flights (-1) to ensure that scheduling flights has priority compared to scheduling stays. Because all completion time costs are negative, all non-scheduled tasks are pushed to the end of the planning horizon.",
"_____no_output_____"
]
],
[
[
"import sys;sys.path.append('../src')\n\nn_days = 6\ndays = range(n_days)\nplaces = ['A','B','C','D']\ncrews = [ 'Crew_%s'%place for place in places ]\n\n# Number of flights for each orig-dest pair\nflights =\\\n{\n('A','B') : 2,\n('B','C') : 2,\n('C','D') : 2,\n('D','A') : 3,\n('A','C') : 2,\n('B','D') : 2\n}\n\n# Stays have to fill gaps, at most n_days-2 gaps can happen\nstays =\\\n{\n'A' : n_days-2,\n'B' : n_days-2,\n'C' : n_days-2,\n'D' : n_days-2\n}",
"_____no_output_____"
]
],
[
[
"# Solving",
"_____no_output_____"
]
],
[
[
"from pyschedule import Scenario, solvers, plotters, alt\n\n# Create employee scheduling scenari\nS = Scenario('crew_scheduling',horizon=2*n_days)\n\n# Create crew resources\nfor crew in crews:\n S.Resource(crew)\n\n# Create flight tasks\nfor orig,dest in flights.keys():\n for i in range(flights[orig,dest]):\n flight = S.Task('%s_%s_%i'%(orig,dest,i))\n flight.orig = orig\n flight.dest = dest\n flight += alt( S.resources() )\n # Push flight to end of planning horizon\n S += flight*-1\n\n# Create start and end tasks of crews\nfor crew in crews:\n # Start task\n start = S.Task('first_%s'%crew[-1])\n start.dest = crew[-1]\n start.orig = None\n start += S[crew]\n S += start >= 0\n # End task\n end = S.Task('last_%s'%crew[-1])\n end.orig = crew[-1]\n end.dest = None\n end += S[crew]\n S += end <= n_days\n\n# Create stay tasks\nfor place in stays:\n for i in range(stays[place]):\n stay = S.Task('stay_%s_%i'%(place,i))\n stay.orig = place\n stay.dest = place\n stay += alt( S.resources() )\n # Push stay to end of planning horizon (stronger than flights)\n S += stay*-2\n\n# Connection constraints\nfor crew in crews:\n for place in places:\n for day in days:\n # Ensure that dest and orig are the same of consecutive tasks\n select = lambda T,t,place=place,day=day : T.dest==place and t==day or T.orig==place and t==day+1\n C = S[crew][select].diff <= 0\n # Select a customized name for capacity constraint\n C.name = 'Capacity_%s_%s_%i'%(crew,place,day)\n S += C\n\n# solve and plot problem\nif solvers.mip.solve(S,msg=1):\n %matplotlib inline\n plotters.matplotlib.plot(S,fig_size=(12,5))\nelse:\n print('no solution found')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca825d84b5233b9ee698184abdcd8d1a6149aa5 | 5,107 | ipynb | Jupyter Notebook | notebooks/ruta-training/Chapter 2 - Specific tasks/Exercise 5 - Undesirable Effect Relations in Tables.ipynb | averbis/IRuta | 437488b1bd88ac5c3436d7858fb67d262dabcea3 | [
"Apache-2.0"
] | 6 | 2021-05-21T13:41:31.000Z | 2021-12-10T10:17:59.000Z | notebooks/ruta-training/Chapter 2 - Specific tasks/Exercise 5 - Undesirable Effect Relations in Tables.ipynb | averbis/IRuta | 437488b1bd88ac5c3436d7858fb67d262dabcea3 | [
"Apache-2.0"
] | 12 | 2021-04-12T13:59:07.000Z | 2022-01-03T17:02:43.000Z | notebooks/ruta-training/Chapter 2 - Specific tasks/Exercise 5 - Undesirable Effect Relations in Tables.ipynb | averbis/IRuta | 437488b1bd88ac5c3436d7858fb67d262dabcea3 | [
"Apache-2.0"
] | null | null | null | 30.951515 | 249 | 0.605835 | [
[
[
"# Exercise 5: Undesirable Effect Relations in Tables\n\nThe goal of this exercise is to create a simple rule script for extracting undesirable effect information from tables. Declare a new annotation type named `UndesirableEffect` with three features of the type `Annotation`. \nThe first feature named `class` represents the symptom class of the undesirable effect. The second feature named `effect` represents the actual effect of the relation. The third feature named `frequency` represents the frequency of the effect.",
"_____no_output_____"
],
[
"First, we take a look at the two example tables.",
"_____no_output_____"
]
],
[
[
"%inputDir data/ex5_table1\n%displayMode RUTA_COLORING",
"_____no_output_____"
],
[
"%inputDir data/ex5_table2\n%displayMode RUTA_COLORING",
"_____no_output_____"
]
],
[
[
"Now, we write some rules to extract the triples.",
"_____no_output_____"
]
],
[
[
"%inputDir data/ex5_table1\n%outputDir temp/\n%displayMode CSV\n%csvConfig UndesirableEffect frequency class \n\n// We write the output of this script in a temporary directory so that we can apply it to to the other table, too\n%writescript temp/UndesirableEffect.ruta\n%saveTypeSystem temp/UndesirableEffectTypeSystem.xml\n\n// We want to use the old DefaultSeeder for obtaining MARKUP annotations\n%configParams --seeders=org.apache.uima.ruta.seed.DefaultSeeder\n\nTYPESYSTEM org.apache.uima.ruta.engine.HtmlTypeSystem;\nUIMAFIT org.apache.uima.ruta.engine.HtmlAnnotator;\nEXEC(HtmlAnnotator, {TAG});\n\nRETAINTYPE(WS, MARKUP);\nTAG{-> TRIM(WS, MARKUP)};\nRETAINTYPE;\n\n// The targeted relation\nDECLARE UndesirableEffect (Annotation class, Annotation effect, Annotation frequency);\n\n// We define a macro action just for shorter rules later\nACTION UE(ANNOTATION class, ANNOTATION effect, ANNOTATION frequency) = \n CREATE(UndesirableEffect, \"class\"= class, \"effect\" = effect, \"frequency\" = frequency) ;\n\n// Annotate frequencies like \"common\" from an external Wordlist\nDECLARE FrequencyInd;\nWORDLIST FrequencyList = 'resources/Frequencies.txt';\nMARKFAST(FrequencyInd, FrequencyList, true);\nFrequencyInd->{ANY f:FrequencyInd{-> UNMARK(f)};};\n\n// Some useful annotations\nINT index;\nDECLARE Cell(INT column);\nDECLARE FirstRow, FirstColumn, FrequencyCell;\nTR{STARTSWITH(TABLE)-> FirstRow};\nTD{STARTSWITH(TR)-> FirstColumn};\nTD{CONTAINS(FrequencyInd)-> FrequencyCell};\n\n// Create Cell annoation with index representing the column number\nTR{->index=0}->{\n TD{->index = index+1, CREATE(Cell,\"column\"=index)};\n};\n\n// Candidates for the effect\nDECLARE Chunk;\nTD{-CONTAINS(FrequencyInd), -PARTOF(FirstColumn), -REGEXP(\"-\") -> Chunk};\nChunk{CONTAINS(COMMA)-> SPLIT(COMMA)};\n\nDECLARE Header;\n\"System organ class\"-> Header;\n\n// the actual rules\nc:TD{PARTOF(FirstColumn),-PARTOF(Header), -PARTOF(FrequencyCell)} \n # f:FrequencyCell \n # e:@Chunk{-PARTOF(UndesirableEffect) -> UE(c,e,f)};\n\n// a rule for format 2\nfc:Cell{PARTOF(FirstRow),PARTOF(FrequencyCell),fc.column==c.column}\n # cc:Cell{PARTOF(FirstColumn), -PARTOF(FrequencyCell)}\n # c:@Cell{CONTAINS(Chunk),-PARTOF(UndesirableEffect)}\n ->{e:@Chunk{-PARTOF(UndesirableEffect)-> UE(cc,e,fc)};};",
"_____no_output_____"
]
],
[
[
"We apply the rules above on the second example.",
"_____no_output_____"
]
],
[
[
"%inputDir data/ex5_table2\n%displayMode CSV\n%scriptDir temp/\n%typeSystemDir temp/\n%csvConfig UndesirableEffect frequency class \n\nSCRIPT UndesirableEffect;\nCALL(UndesirableEffect);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca83a8de385c261a94da57ac7808c011a9d6add | 680,418 | ipynb | Jupyter Notebook | EDA_joy/SARC/sarc_EDA.ipynb | suhasgupta791/mids-w251-final-project | aa1ef80685c6d9b5fc8a444e438078150cc0d96c | [
"Apache-2.0"
] | null | null | null | EDA_joy/SARC/sarc_EDA.ipynb | suhasgupta791/mids-w251-final-project | aa1ef80685c6d9b5fc8a444e438078150cc0d96c | [
"Apache-2.0"
] | null | null | null | EDA_joy/SARC/sarc_EDA.ipynb | suhasgupta791/mids-w251-final-project | aa1ef80685c6d9b5fc8a444e438078150cc0d96c | [
"Apache-2.0"
] | 1 | 2020-02-14T01:10:43.000Z | 2020-02-14T01:10:43.000Z | 206.249773 | 292,632 | 0.871476 | [
[
[
"import string\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom wordcloud import WordCloud, STOPWORDS\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer",
"_____no_output_____"
]
],
[
[
"## The Exploratory Data Analysis on 1M SARC dataset",
"_____no_output_____"
],
[
"There are 1010826 rows in this balanced dataset, 505413 are sarcastic comments, the other 505413 are not.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('train-balanced-sarcasm.csv')\nprint(df.shape)\ndf.head()",
"(1010826, 10)\n"
],
[
"df['label'].value_counts()",
"_____no_output_____"
]
],
[
[
"There are 53 NaN values in the our target variable, comment. 45 are labeled as sarcastic comments, 8 are labeled as normal comments. We want to take these rows out from the dataset. After removing 53 rows, 505405 are sarcastic comments, 505368 are normal comments. Since we only remove a small portion of the data, the dataset is still balanced.",
"_____no_output_____"
]
],
[
[
"df[df['comment'].isna()]",
"_____no_output_____"
],
[
"df[df['comment'].isna()]['label'].value_counts()",
"_____no_output_____"
],
[
"df.dropna(subset=['comment'], inplace=True)",
"_____no_output_____"
],
[
"df['label'].value_counts()",
"_____no_output_____"
]
],
[
[
"Adding features to the dataset that are computed from the comment text:\n\n- Length of the comment\n- Number of capitals \n- Proportion of capitals\n- Number of exclamation marks\n- Number of question marks\n- Number of punctuation symbols\n- Number of symbols\n- Number of words\n- Number of unique words\n- Proportion of unique words \n- Number of (happy) smilies",
"_____no_output_____"
]
],
[
[
"df['total_length'] = df['comment'].apply(len)\ndf['capitals'] = df['comment'].apply(\n lambda comment: sum(1 for c in comment if c.isupper()))\ndf['caps_vs_length'] = df.apply(\n lambda row: float(row['capitals'])/float(row['total_length']), axis=1)\ndf['num_exclamation_marks'] = df['comment'].apply(\n lambda comment: comment.count('!'))\ndf['num_question_marks'] = df['comment'].apply(\n lambda comment: comment.count('?'))\ndf['num_punctuation'] = df['comment'].apply(\n lambda comment: sum(comment.count(w) for w in '.,;:'))\ndf['num_symbols'] = df['comment'].apply(\n lambda comment: sum(comment.count(w) for w in '*&$%'))\ndf['num_words'] = df['comment'].apply(\n lambda comment: len(comment.split()))\ndf['num_unique_words'] = df['comment'].apply(\n lambda comment: len(set(w for w in comment.split())))\ndf['words_vs_unique'] = df['num_unique_words'] / df['num_words']\ndf['num_smilies'] = df['comment'].apply(\n lambda comment: sum(comment.count(w) for w in (':-)', ':)', ';-)', ';)')))",
"_____no_output_____"
],
[
"features = ('total_length', 'capitals', 'caps_vs_length', 'num_exclamation_marks',\n 'num_question_marks', 'num_punctuation', 'num_words', 'num_unique_words',\n 'words_vs_unique', 'num_smilies', 'num_symbols')\ncolumns = ('label')\n\nrows = [{columns:df[f].corr(df[columns])} for f in features]\ndf_correlations = pd.DataFrame(rows, index=features)",
"_____no_output_____"
]
],
[
[
"Some of the feature ideas make sense: They correlate with the target variable, so a model should be able to use them. Other feature ideas don't correlate - so they look less promising.\n\nFor now this feature seem the best candidate: Number of exclamation marks. \"num_exclamation_marks\" has positive relationship with label.",
"_____no_output_____"
]
],
[
[
"df_correlations",
"_____no_output_____"
],
[
"ax = sns.heatmap(df_correlations, vmin=-0.2, vmax=0.2, center=0.0)",
"_____no_output_____"
]
],
[
[
"We will keep only \"num_exclamation_marks\" feature and clean the rest of the features.",
"_____no_output_____"
]
],
[
[
"df['comment'] = df['comment'].apply(lambda x: x.lower())\ndf['parent_comment'] = df['parent_comment'].apply(lambda x: x.lower())",
"_____no_output_____"
],
[
"for i in string.punctuation[1:]:\n df['comment'] = df['comment'].apply(lambda x: x.replace(i, \"\"))\n df['parent_comment'] = df['parent_comment'].apply(lambda x: x.replace(i, \"\"))",
"_____no_output_____"
]
],
[
[
"Distribution of lengths for sarcastic and normal comments is almost the same.\n",
"_____no_output_____"
]
],
[
[
"df.loc[df['label'] == 1, 'comment'].str.len().apply(np.log1p).hist(label='sarcastic', alpha=.5)\ndf.loc[df['label'] == 0, 'comment'].str.len().apply(np.log1p).hist(label='normal', alpha=.5)\nplt.legend();",
"_____no_output_____"
],
[
"wordcloud = WordCloud(background_color='black', stopwords = STOPWORDS,\n max_words = 200, max_font_size = 100, \n random_state = 17, width=800, height=400)",
"_____no_output_____"
]
],
[
[
"Word cloud are nice, but not very useful\n",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16, 12))\nwordcloud.generate(str(df.loc[df['label'] == 1, 'comment']))\nplt.imshow(wordcloud);",
"_____no_output_____"
],
[
"plt.figure(figsize=(16, 12))\nwordcloud.generate(str(df.loc[df['label'] == 0, 'comment']))\nplt.imshow(wordcloud);",
"_____no_output_____"
]
],
[
[
"Let's analyze whether some subreddits are more \"sarcastic\" on average than others\n",
"_____no_output_____"
]
],
[
[
"sub_df = df.groupby('subreddit')['label'].agg([np.size, np.mean, np.sum])\nsub_df.sort_values(by='sum', ascending=False).head(10)",
"_____no_output_____"
],
[
"sub_df[sub_df['size'] > 1000].sort_values(by='mean', ascending=False).head(10)",
"_____no_output_____"
]
],
[
[
"The same for authors doesn't yield much insight. Except for the fact that somebody's comments were sampled - we can see the same amounts of sarcastic and non-sarcastic comments.\n",
"_____no_output_____"
]
],
[
[
"sub_df = df.groupby('author')['label'].agg([np.size, np.mean, np.sum])\nsub_df[sub_df['size'] > 300].sort_values(by='mean', ascending=False).head(10)",
"_____no_output_____"
],
[
"sub_df = df[df['score'] >= 0].groupby('score')['label'].agg([np.size, np.mean, np.sum])\nsub_df[sub_df['size'] > 300].sort_values(by='mean', ascending=False).head(10)",
"_____no_output_____"
],
[
"sub_df = df[df['score'] < 0].groupby('score')['label'].agg([np.size, np.mean, np.sum])\nsub_df[sub_df['size'] > 300].sort_values(by='mean', ascending=False).head(10)",
"_____no_output_____"
],
[
"#https://buhrmann.github.io/tfidf-analysis.html\ndef top_tfidf_feats(row, features, top_n=25):\n ''' Get top n tfidf values in row and return them with their corresponding feature names.'''\n topn_ids = np.argsort(row)[::-1][:top_n]\n top_feats = [(features[i], row[i]) for i in topn_ids]\n df = pd.DataFrame(top_feats)\n df.columns = ['feature', 'tfidf']\n return df\n\ndef top_feats_in_doc(Xtr, features, row_id, top_n=25):\n ''' Top tfidf features in specific document (matrix row) '''\n row = np.squeeze(Xtr[row_id].toarray())\n return top_tfidf_feats(row, features, top_n)\n\ndef top_mean_feats(Xtr, features, grp_ids, min_tfidf=0.1, top_n=25):\n ''' Return the top n features that on average are most important amongst documents in rows\n indentified by indices in grp_ids. '''\n \n D = Xtr[grp_ids].toarray()\n\n D[D < min_tfidf] = 0\n tfidf_means = np.mean(D, axis=0)\n return top_tfidf_feats(tfidf_means, features, top_n)\n\ndef top_feats_by_class(Xtr, features, min_tfidf=0.1, top_n=20):\n ''' Return a list of dfs, where each df holds top_n features and their mean tfidf value\n calculated across documents with the same class label. '''\n sar = df.index[df['label']==1]\n norm = df.index[df['label']==0]\n sar_df = top_mean_feats(Xtr, features, sar, min_tfidf=min_tfidf, top_n=top_n)\n norm_df = top_mean_feats(Xtr, features, norm, min_tfidf=min_tfidf, top_n=top_n)\n return sar_df, norm_df",
"_____no_output_____"
],
[
"df = df.reset_index(drop=True)",
"_____no_output_____"
],
[
"vectorizer = TfidfVectorizer(min_df=200, max_features=10000, \n strip_accents='unicode', analyzer='word',ngram_range=(1,1),\n use_idf=1,smooth_idf=1,sublinear_tf=1,\n stop_words = 'english')\nvectorizer.fit(df['comment'])\nfeatures = np.array(vectorizer.get_feature_names())\nunigrams = vectorizer.transform(df['comment'])",
"_____no_output_____"
],
[
"sar_df, norm_df = top_feats_by_class(unigrams,features)",
"_____no_output_____"
],
[
"sar_df",
"_____no_output_____"
],
[
"norm_df",
"_____no_output_____"
],
[
"#temp settings to min=150 to facilitate top features section to run in kernals\n#change back to min=10 to get better results\nvectorizer = TfidfVectorizer(min_df=150, max_features=30000, \n strip_accents='unicode', analyzer='word',ngram_range=(2,2),\n use_idf=1,smooth_idf=1,sublinear_tf=1,\n stop_words = 'english')\n\nvectorizer.fit(df['comment'])\nfeatures = np.array(vectorizer.get_feature_names())\nbigrams = vectorizer.transform(df['comment'])",
"_____no_output_____"
],
[
"sar_df, norm_df = top_feats_by_class(bigrams,features)",
"_____no_output_____"
],
[
"sar_df",
"_____no_output_____"
],
[
"norm_df",
"_____no_output_____"
],
[
"vectorizer = TfidfVectorizer(min_df=100, max_features=30000, \n strip_accents='unicode', analyzer='char',ngram_range=(1,4),\n use_idf=1,smooth_idf=1,sublinear_tf=1,\n stop_words = 'english')\n\nvectorizer.fit(df['comment'])\nfeatures = np.array(vectorizer.get_feature_names())\nchargram = vectorizer.transform(df['comment'])",
"/home/jchiang/.local/lib/python3.6/site-packages/sklearn/feature_extraction/text.py:520: UserWarning: The parameter 'stop_words' will not be used since 'analyzer' != 'word'\n warnings.warn(\"The parameter 'stop_words' will not be used\"\n"
],
[
"sar_df, norm_df = top_feats_by_class(chargram,features)\nsar_df",
"_____no_output_____"
],
[
"norm_df",
"_____no_output_____"
],
[
"train_texts, valid_texts, y_train, y_valid = \\\n train_test_split(df['comment'], df['label'], random_state=17)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca844befc184c05ada9c0657f7176914ea175ed | 17,198 | ipynb | Jupyter Notebook | HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility.ipynb | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | 2 | 2022-02-22T12:46:48.000Z | 2022-03-28T21:58:13.000Z | HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility.ipynb | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | null | null | null | HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility.ipynb | hotchilianalytics/hca-resources | 051fcad7bf94ff0b7543adb227a769f0b9cead67 | [
"Apache-2.0"
] | 6 | 2021-05-26T14:56:40.000Z | 2022-02-14T15:56:27.000Z | 36.747863 | 275 | 0.588092 | [
[
[
"## HotChili Analytics trading notebook template\n#### Configure by setting ALGO_NAME in cell below.\n#### Run various options (backtest, ingest, live) by uncommenting one cell ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%load_ext zipline\n\n# %reload_ext zipline # Uncomment and use this when already loaded zipline extension for magic cell usage.",
"/home/hca-r2-001/miniconda3/envs/hca/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n/home/hca-r2-001/miniconda3/envs/hca/lib/python3.6/site-packages/odo/backends/pandas.py:94: FutureWarning: pandas.tslib is deprecated and will be removed in a future version.\nYou can access NaTType as type(pandas.NaT)\n @convert.register((pd.Timestamp, pd.Timedelta), (pd.tslib.NaTType, type(None)))\n"
],
[
"import pandas as pd\n\n# Options you can uncomment and set:\n\n# pd.set_option(\"max_colwidth\", 300)\n# pd.set_option(\"display.max_rows\", 300)\n# pd.set_option(\"display.max_columns\", 50)\n# pd.set_option('precision', 2)\n# pd.options.display.float_format = '{:20,.2f}'.format",
"_____no_output_____"
],
[
"import os\n\nhca_root_path = os.environ['HCA_ROOT']\nprint(f\"hca_root_path = {hca_root_path}\")",
"hca_root_path = /home/hca-ws2004/hca\n"
]
],
[
[
"# Construct algorithm strategy path names\n\nAssumptions:\n\n- the strategy is in a directory with the same name as the strategy in `ALGO_NAME` below\n- the strategy is located in the hca-resources directory, which is located relative to `hca_root_path`, found above",
"_____no_output_____"
]
],
[
[
"ALGO_NAME = \"HCA_AllWeatherOptimizeVolatility\" # <--- Supply name here",
"_____no_output_____"
]
],
[
[
"Other variables are derived from `ALGO_NAME`:",
"_____no_output_____"
]
],
[
[
"HCA_RESOURCES_PATH = hca_root_path + \"/hca-resources/\" \nALGO_PATH = HCA_RESOURCES_PATH + ALGO_NAME + \"/\" \nALGO_BT = ALGO_PATH + ALGO_NAME + \".py\"\nALGO_BT_OUT = ALGO_PATH + ALGO_NAME + \".pkl\"\nALGO_LIVE = ALGO_PATH + ALGO_NAME + \"_Live\" + \".py\"\n\nprint(f\"\"\"\nALGO_NAME = {ALGO_NAME}\nHCA_RESOURCES_PATH = {HCA_RESOURCES_PATH}\nALGO_PATH = {ALGO_PATH}\nALGO_BT = {ALGO_BT}\nALGO_BT_OUT = {ALGO_BT_OUT}\nALGO_LIVE = {ALGO_LIVE}\n\nContents of algo directory:\n\"\"\")\n\n!ls $ALGO_PATH",
"\nALGO_NAME = HCA_AllWeatherOptimizeVolatility\nHCA_RESOURCES_PATH = /home/hca-ws2004/hca/hca-resources/\nALGO_PATH = /home/hca-ws2004/hca/hca-resources/HCA_AllWeatherOptimizeVolatility/\nALGO_BT = /home/hca-ws2004/hca/hca-resources/HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility.py\nALGO_BT_OUT = /home/hca-ws2004/hca/hca-resources/HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility.pkl\nALGO_LIVE = /home/hca-ws2004/hca/hca-resources/HCA_AllWeatherOptimizeVolatility/HCA_AllWeatherOptimizeVolatility_Live.py\n\nContents of algo directory:\n\nHCA_AllWeatherOptimizeVolatility.ipynb\nHCA_AllWeatherOptimizeVolatility_Live.py\nHCA_AllWeatherOptimizeVolatility.pkl\nHCA_AllWeatherOptimizeVolatility.py\nHCA_AllWeatherOptimizeVolatility_tearsheet.ipynb\n"
]
],
[
[
"## Zipline backtest: \n\n- Method: Jupyter magic cell (%%) \n- Execution of zipline code in cell containing command line command\n- Uncomment first line and hit (shift-enter) inside the cell to run simulation backtest",
"_____no_output_____"
]
],
[
[
"# %%zipline --start=2018-1-1 --end=2020-8-10 -b sharadar-eqfd -o $ALGO_BT_OUT\n\n# Source: adapted from various algos on quantopian\n# HCA Conversion Date: 09-05-2020\n# Conversion Author: Anthony Garner\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nfrom zipline.api import (order, cancel_order, get_open_orders, symbol, symbols, \n date_rules, time_rules, order_target_percent, \n record, schedule_function, get_datetime)\nfrom trading_calendars import get_calendar\n\n\ndef initialize(context):\n schedule_function(func=trade, date_rule=date_rules.every_day(),\n time_rule=time_rules.market_open(),half_days=True)\n context.asserts = symbols('SPY','IEF')\n\n context.rebalance_date = 0\n context.fired = False\n context.rebalance_inteval = 'M'#'Q', #'D', #'M' #'Q' #'Y'\n\n context.asserts_position = [0.5, 0.5]\n context.volatility_policy = True\n #unused if volatility_policy is false\n context.volatility_days = 252\n context.volatility_price_history = 66\n #set at less than 1 to ensure no leverage\n context.leverage_buffer=0.90\n\n\ndef handle_data(context, data):\n record(SPY=data[symbol('SPY')].price)\n\n\ndef is_new_day(context, now):\n return ( (now.year > context.rebalance_date.year) or (now.month > context.rebalance_date.month) or((now.month == context.rebalance_date.month) and (now.day > context.rebalance_date.day))) \n\n\ndef is_new_month(context, now):\n return ((now.year > context.rebalance_date.year) or ((now.year == context.rebalance_date.year) and (now.month > context.rebalance_date.month)))\n\n\ndef is_new_quarter(context, now):\n return ((now.year > context.rebalance_date.year) or ((now.year == context.rebalance_date.year) and (now.month == context.rebalance_date.month + 3)))\n\n\ndef is_new_year(context, now):\n return (now.year > context.rebalance_date.year)\n\n\ndef need_rebalance(context, now):\n return ((context.rebalance_inteval == 'Y' and is_new_year(context, now))or \n (context.rebalance_inteval == 'Q' and is_new_quarter(context, now)) or \n (context.rebalance_inteval == 'M' and is_new_month(context, now)) or \n (context.rebalance_inteval == 'D' and is_new_day(context, now)))\n\n\n# Compute historical volatility \ndef compute_volatility(price_history, days): \n # Compute daily returns \n daily_returns = price_history.pct_change().dropna().values \n # Compute daily volatility \n historical_vol_daily = np.std(daily_returns,axis=0) \n # Convert daily volatility to annual volatility, assuming 252 trading days \n historical_vol_annually = historical_vol_daily*math.sqrt(days) \n # Return estimate of annual volatility \n return historical_vol_annually\n\n\ndef compute_asserts_volatility(context, data):\n price_history = data.history(context.asserts, \"price\", context.volatility_price_history, \"1d\")\n vol = 1.0/(compute_volatility(price_history, context.volatility_days))\n #print(\"vol: \" + str(vol))\n sum = np.sum(vol)\n context.asserts_position = vol / sum\n #print(\"asserts_position: \" + str(context.asserts_position))\n\n\ndef init_portfolio(context, data):\n if context.volatility_policy:\n compute_asserts_volatility(context, data)\n for i in range(0, len(context.asserts)):\n #print(\"rebalance \" + context.asserts[i].symbol + \" to:\" + str(context.asserts_position[i]*100) + \"%\")\n order_target_percent(context.asserts[i], context.asserts_position[i]* context.leverage_buffer) \n\n\ndef rebalance(context, data):\n if context.volatility_policy:\n compute_asserts_volatility(context, data)\n for i in range(0, len(context.asserts)):\n if data.can_trade(context.asserts[i]):\n #print(\"rebalance \" + context.asserts[i].symbol + \" to:\" + str(context.asserts_position[i]*100) + \"%\")\n order_target_percent(context.asserts[i], context.asserts_position[i]* context.leverage_buffer)\n\n\ndef trade(context, data):\n if not context.fired:\n context.rebalance_date = get_datetime()\n #print(\"build portfolio at \" + str(context.rebalance_date))\n init_portfolio(context, data)\n context.fired = True\n else:\n now = get_datetime()\n if (need_rebalance(context, now)):\n #print(\"new rebalance arrivid:\" + str(now))\n context.rebalance_date = now\n rebalance(context, data)\n\n\ndef analyze(context, perf):\n ax1 = plt.subplot(211)\n perf.portfolio_value.plot(ax=ax1)\n ax2 = plt.subplot(212, sharex=ax1)\n perf.SPY.plot(ax=ax2)\n plt.gcf().set_size_inches(18, 8)\n plt.show()",
"_____no_output_____"
]
],
[
[
"## Display your current bundles",
"_____no_output_____"
]
],
[
[
"#!zipline bundles # Finds all bundles",
"_____no_output_____"
]
],
[
[
"## Ingest Sharadar funds assets for today",
"_____no_output_____"
]
],
[
[
"# Ingest Sharadar funds assets for today, if needed.\n\n# Only need to ingest Funds for this algo, and this takes less processing time and system memory than ingesting\n# all of Sharadar Equities plus Funds bundle (sharadar-eqfd)\n# !zipline ingest -b sharadar-funds",
"_____no_output_____"
]
],
[
[
"## Zipline backtest, alternative method\n\n- Method: command line\n- Execution of zipline code, located in a file, using below command line execution with magic (`!`) invocation\n- This line can also be run in a terminal by copying everything past the `!` and pasting (shift-insert) it into the target terminal",
"_____no_output_____"
]
],
[
[
"!zipline run -f $ALGO_BT --start=2018-1-1 --end=2021-04-09 -b sharadar-funds -o $ALGO_BT_OUT",
"/home/hca-r2-001/miniconda3/envs/hca/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n/home/hca-r2-001/miniconda3/envs/hca/lib/python3.6/site-packages/odo/backends/pandas.py:94: FutureWarning: pandas.tslib is deprecated and will be removed in a future version.\nYou can access NaTType as type(pandas.NaT)\n @convert.register((pd.Timestamp, pd.Timedelta), (pd.tslib.NaTType, type(None)))\nextension: hca_root_path = /home/hca-ws2004/hca\nextension:TODAY_STR = 2021-04-18\nextension:TWO_YR_AGO_STR = 2019-04-18\nextension: start_date=2019-04-18 end_date = 2021-04-18\n[2021-04-18 22:15:31.985454] INFO: Loader: Cache at /home/hca-ws2004/zipline-broker/data/SPY_benchmark.csv does not have data from 1990-01-02 00:00:00+00:00 to 2021-04-16 00:00:00+00:00.\n\n[2021-04-18 22:15:31.985820] INFO: Loader: Downloading benchmark data for 'SPY' from 1989-12-29 00:00:00+00:00 to 2021-04-16 00:00:00+00:00\n[2021-04-18 22:15:33.060481] WARNING: Loader: Still don't have expected benchmark data for 'SPY' from 1989-12-29 00:00:00+00:00 to 2021-04-16 00:00:00+00:00 after redownload!\n[2021-04-18 22:15:33.107476] INFO: Loader: Cache at /home/hca-ws2004/zipline-broker/data/treasury_curves.csv does not have data from 1990-01-02 00:00:00+00:00 to 2021-04-16 00:00:00+00:00.\n\n[2021-04-18 22:15:33.107689] INFO: Loader: Downloading treasury data for 'SPY' from 1990-01-02 00:00:00+00:00 to 2021-04-16 00:00:00+00:00\n[2021-04-18 22:15:35.315080] WARNING: Loader: Still don't have expected treasury data for 'SPY' from 1990-01-02 00:00:00+00:00 to 2021-04-16 00:00:00+00:00 after redownload!\n[2021-04-18 22:15:37.156377] INFO: zipline.finance.metrics.tracker: Simulated 823 trading days\nfirst open: 2018-01-02 14:31:00+00:00\nlast close: 2021-04-09 20:00:00+00:00\n"
]
],
[
[
"## Run Zipline live on IB-TWS via command line\n\n- Method: command line\n- Execution of zipline code using below command line execution using magic (`!`) invocation\n\n**Notes:** \n- IB-TWS or IB-Gateway must be running, with `IB_ACCT` and `IB_URI` port being correct to live trade\n- Change `I_WANT_TO_RUN_THIS_CODE` to `True` below to run zipline live on IB-TWS/IB-Gateway",
"_____no_output_____"
]
],
[
[
"TODAY = pd.datetime.today().strftime(\"%Y-%m-%d\")\nprint(\"TODAY = {}\".format(TODAY))",
"TODAY = 2021-04-18\n"
],
[
"ALGO_STATE = ALGO_PATH + \"strategy.state\" \nALGO_RTB = ALGO_PATH + \"realtime-bars/\"\n\n# Edit the following URI to match your IB account and port info.\nIB_ACCT = \"DU1568488\"\nIB_URI = \"127.0.0.1:7497:1301\"\n\n# Change following to 'True' and run cell (control-enter) to execute live run.\nI_WANT_TO_RUN_THIS_CODE = False\n#I_WANT_TO_RUN_THIS_CODE = True\n\nif I_WANT_TO_RUN_THIS_CODE:\n \n !zipline run \\\n -s $TODAY \\\n -f $ALGO_LIVE \\\n --bundle hca-symbol \\\n --broker ib \\\n --broker-uri $IB_URI \\\n --broker-acct $IB_ACCT \\\n --data-frequency daily \\\n --state-file $ALGO_STATE \\\n --realtime-bar-target $ALGO_RTB ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca84b597efc1dd718bd9d697c5706b6d74403b8 | 143,017 | ipynb | Jupyter Notebook | docs/mindspore/programming_guide/source_zh_cn/quick_start/quick_start.ipynb | bwcsswcx/docs | e54b179bb8ca020a9bf0c83926822048057e9536 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | docs/mindspore/programming_guide/source_zh_cn/quick_start/quick_start.ipynb | bwcsswcx/docs | e54b179bb8ca020a9bf0c83926822048057e9536 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | docs/mindspore/programming_guide/source_zh_cn/quick_start/quick_start.ipynb | bwcsswcx/docs | e54b179bb8ca020a9bf0c83926822048057e9536 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | 120.486099 | 29,856 | 0.868107 | [
[
[
"# ๅฎ็ฐไธไธชๅพ็ๅ็ฑปๅบ็จ\n\n`Linux` `Windows` `Ascend` `GPU` `CPU` `ๅ
จๆต็จ` `ๅ็บง` `ไธญ็บง` `้ซ็บง`\n\n[](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/programming_guide/source_zh_cn/quick_start/quick_start.ipynb) [](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_quick_start.ipynb) [](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9vYnMuZHVhbHN0YWNrLmNuLW5vcnRoLTQubXlodWF3ZWljbG91ZC5jb20vbWluZHNwb3JlLXdlYnNpdGUvbm90ZWJvb2svbW9kZWxhcnRzL21pbmRzcG9yZV9xdWlja19zdGFydC5pcHluYg==&imageid=65f636a0-56cf-49df-b941-7d2a07ba8c8c) [](https://ascend.huawei.com/zh/#/college/onlineExperiment/codeLabMindSpore/mindSpore)",
"_____no_output_____"
],
[
"## ๆฆ่ฟฐ\n\nไธ้ขๆไปฌ้่ฟไธไธชๅฎ้
ๆ ทไพ๏ผๅธฆ้ขๅคงๅฎถไฝ้ชMindSporeๅบ็ก็ๅ่ฝ๏ผๅฏนไบไธ่ฌ็็จๆท่่จ๏ผๅฎๆๆดไธชๆ ทไพๅฎ่ทตไผๆ็ปญ20~30ๅ้ใ\n\nๆฌไพๅญไผๅฎ็ฐไธไธช็ฎๅ็ๅพ็ๅ็ฑป็ๅ่ฝ๏ผๆดไฝๆต็จๅฆไธ๏ผ\n\n1. ๅค็้่ฆ็ๆฐๆฎ้๏ผ่ฟ้ไฝฟ็จไบMNISTๆฐๆฎ้ใ\n2. ๅฎไนไธไธช็ฝ็ป๏ผ่ฟ้ๆไปฌไฝฟ็จLeNet็ฝ็ปใ\n3. ่ชๅฎไนๅ่ฐๅฝๆฐๆถ้ๆจกๅ็ๆๅคฑๅผๅ็ฒพๅบฆๅผใ\n4. ๅฎไนๆๅคฑๅฝๆฐๅไผๅๅจใ\n5. ๅ ่ฝฝๆฐๆฎ้ๅนถ่ฟ่ก่ฎญ็ป๏ผ่ฎญ็ปๅฎๆๅ๏ผๆฅ็็ปๆๅไฟๅญๆจกๅๆไปถใ\n6. ๅ ่ฝฝไฟๅญ็ๆจกๅ๏ผ่ฟ่กๆจ็ใ\n7. ้ช่ฏๆจกๅ๏ผๅ ่ฝฝๆต่ฏๆฐๆฎ้ๅ่ฎญ็ปๅ็ๆจกๅ๏ผ้ช่ฏ็ปๆ็ฒพๅบฆใ\n\n่ฟๆฏ็ฎๅใๅบ็ก็ๅบ็จๆต็จ๏ผๅ
ถไป้ซ็บงใๅคๆ็ๅบ็จๅฏไปฅๅบไบ่ฟไธชๅบๆฌๆต็จ่ฟ่กๆฉๅฑใ\n\n> ๆฌๆๆกฃ้็จไบCPUใGPUๅAscend็ฏๅขใ \n> ไฝ ๅฏไปฅๅจ่ฟ้ๆพๅฐๅฎๆดๅฏ่ฟ่ก็ๆ ทไพไปฃ็ ๏ผ<https://gitee.com/mindspore/docs/tree/master/docs/sample_code/lenet> ใ",
"_____no_output_____"
],
[
"## ๅๅค็ฏ่\n\nๅจๅจๆ่ฟ่กๅฎ่ทตไนๅ๏ผ็กฎไฟ๏ผไฝ ๅทฒ็ปๆญฃ็กฎๅฎ่ฃ
ไบMindSporeใๅฆๆๆฒกๆ๏ผๅฏไปฅ้่ฟ[MindSporeๅฎ่ฃ
้กต้ข](https://www.mindspore.cn/install)ๅฐMindSporeๅฎ่ฃ
ๅจไฝ ็็ต่ๅฝไธญใ \n\nๅๆถๅธๆไฝ ๆฅๆPython็ผ็ ๅบ็กๅๆฆ็ใ็ฉ้ต็ญๅบ็กๆฐๅญฆ็ฅ่ฏใ\n\n้ฃไนๆฅไธๆฅ๏ผๅฐฑๅผๅงMindSpore็ไฝ้ชไนๆ
ๅงใ\n\n### ไธ่ฝฝๆฐๆฎ้\n\nๆไปฌ็คบไพไธญ็จๅฐ็`MNIST`ๆฐๆฎ้ๆฏ็ฑ10็ฑป$28*28$็็ฐๅบฆๅพ็็ปๆ๏ผ่ฎญ็ปๆฐๆฎ้ๅ
ๅซ60000ๅผ ๅพ็๏ผๆต่ฏๆฐๆฎ้ๅ
ๅซ10000ๅผ ๅพ็ใ\n\n> MNISTๆฐๆฎ้ไธ่ฝฝ้กต้ข๏ผ<http://yann.lecun.com/exdb/mnist/>ใ้กต้ขๆไพ4ไธชๆฐๆฎ้ไธ่ฝฝ้พๆฅ๏ผๅ
ถไธญๅ2ไธชๆไปถๆฏ่ฎญ็ปๆฐๆฎ้่ฆ๏ผๅ2ไธชๆไปถๆฏๆต่ฏ็ปๆ้่ฆใ\n\nๅจJupyter Notebookไธญๆง่กๅฆไธๅฝไปคไธ่ฝฝMNISTๆฐๆฎ้ใ",
"_____no_output_____"
]
],
[
[
"!mkdir -p ./datasets/MNIST_Data/train ./datasets/MNIST_Data/test\n!wget -NP ./datasets/MNIST_Data/train https://mindspore-website.obs.myhuaweicloud.com/notebook/datasets/mnist/train-labels-idx1-ubyte --no-check-certificate \n!wget -NP ./datasets/MNIST_Data/train https://mindspore-website.obs.myhuaweicloud.com/notebook/datasets/mnist/train-images-idx3-ubyte --no-check-certificate\n!wget -NP ./datasets/MNIST_Data/test https://mindspore-website.obs.myhuaweicloud.com/notebook/datasets/mnist/t10k-labels-idx1-ubyte --no-check-certificate\n!wget -NP ./datasets/MNIST_Data/test https://mindspore-website.obs.myhuaweicloud.com/notebook/datasets/mnist/t10k-images-idx3-ubyte --no-check-certificate\n!tree ./datasets/MNIST_Data",
"./datasets/MNIST_Data\nโโโ test\nโย ย โโโ t10k-images-idx3-ubyte\nโย ย โโโ t10k-labels-idx1-ubyte\nโโโ train\n โโโ train-images-idx3-ubyte\n โโโ train-labels-idx1-ubyte\n\n2 directories, 4 files\n"
]
],
[
[
"### ๅฏผๅ
ฅPythonๅบ&ๆจกๅ\n\nๅจไฝฟ็จๅ๏ผ้่ฆๅฏผๅ
ฅ้่ฆ็Pythonๅบใ\n\n็ฎๅไฝฟ็จๅฐ`os`ๅบ๏ผไธบๆนไพฟ็่งฃ๏ผๅ
ถไป้่ฆ็ๅบ๏ผๆไปฌๅจๅ
ทไฝไฝฟ็จๅฐๆถๅ่ฏดๆใ",
"_____no_output_____"
]
],
[
[
"import os",
"_____no_output_____"
]
],
[
[
"่ฏฆ็ป็MindSpore็ๆจกๅ่ฏดๆ๏ผๅฏไปฅๅจ[MindSpore API้กต้ข](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)ไธญๆ็ดขๆฅ่ฏขใ",
"_____no_output_____"
],
[
"### ้
็ฝฎ่ฟ่กไฟกๆฏ\n\nๅจๆญฃๅผ็ผๅไปฃ็ ๅ๏ผ้่ฆไบ่งฃMindSpore่ฟ่กๆ้่ฆ็็กฌไปถใๅ็ซฏ็ญๅบๆฌไฟกๆฏใ\n\nๅฏไปฅ้่ฟ`context.set_context`ๆฅ้
็ฝฎ่ฟ่ก้่ฆ็ไฟกๆฏ๏ผ่ญฌๅฆ่ฟ่กๆจกๅผใๅ็ซฏไฟกๆฏใ็กฌไปถ็ญไฟกๆฏใ\n\nๅฏผๅ
ฅ`context`ๆจกๅ๏ผ้
็ฝฎ่ฟ่ก้่ฆ็ไฟกๆฏใ",
"_____no_output_____"
]
],
[
[
"from mindspore import context\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")",
"_____no_output_____"
]
],
[
[
"ๅจๆ ทไพไธญๆไปฌ้
็ฝฎๆ ทไพ่ฟ่กไฝฟ็จๅพๆจกๅผใๆ นๆฎๅฎ้
ๆ
ๅต้
็ฝฎ็กฌไปถไฟกๆฏ๏ผ่ญฌๅฆไปฃ็ ่ฟ่กๅจAscend AIๅค็ๅจไธ๏ผๅ`--device_target`้ๆฉ`Ascend`๏ผไปฃ็ ่ฟ่กๅจCPUใGPUๅ็ใ่ฏฆ็ปๅๆฐ่ฏดๆ๏ผ่ฏทๅ่ง`context.set_context`ๆฅๅฃ่ฏดๆใ",
"_____no_output_____"
],
[
"## ๆฐๆฎๅค็\nๆฐๆฎ้ๅฏนไบ่ฎญ็ป้ๅธธ้่ฆ๏ผๅฅฝ็ๆฐๆฎ้ๅฏไปฅๆๆๆ้ซ่ฎญ็ป็ฒพๅบฆๅๆ็๏ผๅจๅ ่ฝฝๆฐๆฎ้ๅ๏ผ้ๅธธไผๅฏนๆฐๆฎ้่ฟ่กไธไบๅค็ใ\n\n็ฑไบๅ้ขไผ้็จLeNet่ฟๆ ท็ๅท็งฏ็ฅ็ป็ฝ็ปๅฏนๆฐๆฎ้่ฟ่ก่ฎญ็ป๏ผ่้็จๅจ่ฎญ็ปๆฐๆฎๆถ๏ผๅฏนๆฐๆฎๆ ผๅผๆฏๆๆ่ฆๆฑ็๏ผๆไปฅๆฅไธๆฅ้่ฆๅ
ๆฅ็ๆฐๆฎ้ๅ
็ๆฐๆฎๆฏไปไนๆ ท็๏ผ่ฟๆ ทๆ่ฝๆ้ ไธไธช้ๅฏนๆง็ๆฐๆฎ่ฝฌๆขๅฝๆฐ๏ผๅฐๆฐๆฎ้ๆฐๆฎ่ฝฌๆขๆ็ฌฆๅ่ฎญ็ป่ฆๆฑ็ๆฐๆฎๅฝขๅผใ\n\nๆง่กๅฆไธไปฃ็ ๆฅ็ๅๅงๆฐๆฎ้ๆฐๆฎใ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nimport mindspore.dataset as ds\n\ntrain_data_path = \"./datasets/MNIST_Data/train\"\ntest_data_path = \"./datasets/MNIST_Data/test\"\nmnist_ds = ds.MnistDataset(train_data_path)\nprint('The type of mnist_ds:', type(mnist_ds))\nprint(\"Number of pictures contained in the mnist_ds๏ผ\", mnist_ds.get_dataset_size())\n\ndic_ds = mnist_ds.create_dict_iterator()\nitem = next(dic_ds)\nimg = item[\"image\"].asnumpy()\nlabel = item[\"label\"].asnumpy()\n\nprint(\"The item of mnist_ds:\", item.keys())\nprint(\"Tensor of image in item:\", img.shape) \nprint(\"The label of item:\", label)\n\nplt.imshow(np.squeeze(img))\nplt.title(\"number:%s\"% item[\"label\"].asnumpy())\nplt.show()",
"The type of mnist_ds: <class 'mindspore.dataset.engine.datasets.MnistDataset'>\nNumber of pictures contained in the mnist_ds๏ผ 60000\nThe item of mnist_ds: dict_keys(['image', 'label'])\nTensor of image in item: (28, 28, 1)\nThe label of item: 8\n"
]
],
[
[
"ไปไธ้ข็่ฟ่กๆ
ๅตๆไปฌๅฏไปฅ็ๅฐ๏ผ่ฎญ็ปๆฐๆฎ้`train-images-idx3-ubyte`ๅ`train-labels-idx1-ubyte`ๅฏนๅบ็ๆฏ6ไธๅผ ๅพ็ๅ6ไธไธชๆฐๅญๆ ็ญพ๏ผ่ฝฝๅ
ฅๆฐๆฎๅ็ป่ฟ`create_dict_iterator`่ฝฌๆขๅญๅ
ธๅ็ๆฐๆฎ้๏ผๅๅ
ถไธญ็ไธไธชๆฐๆฎๆฅ็๏ผ่ฟๆฏไธไธชkeyไธบ`image`ๅ`label`็ๅญๅ
ธ๏ผๅ
ถไธญ็`image`็ๅผ ้(้ซๅบฆ28๏ผๅฎฝๅบฆ28๏ผ้้1)ๅ`label`ไธบๅฏนๅบๅพ็็ๆฐๅญใ",
"_____no_output_____"
],
[
"### ๅฎไนๆฐๆฎ้ๅๆฐๆฎๆไฝ\n\nๆไปฌๅฎไนไธไธชๅฝๆฐ`create_dataset`ๆฅๅๅปบๆฐๆฎ้ใๅจ่ฟไธชๅฝๆฐไธญ๏ผๆไปฌๅฎไนๅฅฝ้่ฆ่ฟ่ก็ๆฐๆฎๅขๅผบๅๅค็ๆไฝ๏ผ\n\n1. ๅฎไนๆฐๆฎ้ใ\n2. ๅฎไน่ฟ่กๆฐๆฎๅขๅผบๅๅค็ๆ้่ฆ็ไธไบๅๆฐใ\n3. ๆ นๆฎๅๆฐ๏ผ็ๆๅฏนๅบ็ๆฐๆฎๅขๅผบๆไฝใ\n4. ไฝฟ็จ`map`ๆ ๅฐๅฝๆฐ๏ผๅฐๆฐๆฎๆไฝๅบ็จๅฐๆฐๆฎ้ใ\n5. ๅฏน็ๆ็ๆฐๆฎ้่ฟ่กๅค็ใ\n\nๅฎไนๅฎๆๅ๏ผไฝฟ็จ`create_datasets`ๅฏนๅๅงๆฐๆฎ่ฟ่กๅขๅผบๆไฝ๏ผๅนถๆฝๅไธไธช`batch`็ๆฐๆฎ๏ผๆฅ็ๆฐๆฎๅขๅผบๅ็ๅๅใ",
"_____no_output_____"
]
],
[
[
"import mindspore.dataset.vision.c_transforms as CV\nimport mindspore.dataset.transforms.c_transforms as C\nfrom mindspore.dataset.vision import Inter\nfrom mindspore import dtype as mstype\n\n\ndef create_dataset(data_path, batch_size=32, repeat_size=1,\n num_parallel_workers=1):\n \"\"\" \n create dataset for train or test\n \n Args:\n data_path (str): Data path\n batch_size (int): The number of data records in each group\n repeat_size (int): The number of replicated data records\n num_parallel_workers (int): The number of parallel workers\n \"\"\"\n # define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n # define some parameters needed for data enhancement and rough justification\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # according to the parameters, generate the corresponding data enhancement method\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)\n rescale_op = CV.Rescale(rescale, shift)\n hwc2chw_op = CV.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n # using map to apply operations to a dataset\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n \n # process the generated dataset\n buffer_size = 10000\n mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds\n\nms_dataset = create_dataset(train_data_path)\nprint('Number of groups in the dataset:', ms_dataset.get_dataset_size())",
"Number of groups in the dataset: 1875\n"
]
],
[
[
"่ฐ็จๆฐๆฎๅขๅผบๅฝๆฐๅ๏ผๆฅ็ๆฐๆฎ้`size`็ฑ60000ๅๆไบ1875๏ผ็ฌฆๅๆไปฌ็ๆฐๆฎๅขๅผบไธญ`mnist_ds.batch`ๆไฝ็้ขๆ๏ผ$60000/32=1875$๏ผใ",
"_____no_output_____"
],
[
"ไธ่ฟฐๅขๅผบ่ฟ็จไธญ๏ผ\n\n- ๆฐๆฎ้ไธญ็`label`ๆฐๆฎๅขๅผบๆไฝ๏ผ\n\n - `C.TypeCast`๏ผๅฐๆฐๆฎ็ฑปๅ่ฝฌๅไธบ`int32`ใ\n\n- ๆฐๆฎ้ไธญ็`image`ๆฐๆฎๅขๅผบๆไฝ๏ผ \n\n - `datasets.MnistDataset`๏ผๅฐๆฐๆฎ้่ฝฌๅไธบMindSporeๅฏ่ฎญ็ป็ๆฐๆฎใ \n - `CV.Resize`๏ผๅฏนๅพๅๆฐๆฎๅ็ด ่ฟ่ก็ผฉๆพ๏ผ้ๅบLeNet็ฝ็ปๅฏนๆฐๆฎ็ๅฐบๅฏธ่ฆๆฑใ \n - `CV.Rescale`๏ผๅฏนๅพๅๆฐๆฎ่ฟ่กๆ ๅๅใๅฝไธๅๆไฝ๏ผไฝฟๅพๆฏไธชๅ็ด ็ๆฐๅผๅคงๅฐๅจ๏ผ0,1๏ผ่ๅดไธญ๏ผๅฏไปฅๆๅ่ฎญ็ปๆ็ใ \n - `CV.HWC2CHW`๏ผๅฏนๅพๅๆฐๆฎๅผ ้่ฟ่กๅๆข๏ผๅผ ้ๅฝขๅผ็ฑ`้ซxๅฎฝx้้`๏ผHWC๏ผๅไธบ`้้x้ซxๅฎฝ`๏ผCHW๏ผ๏ผๆนไพฟ่ฟ่กๆฐๆฎ่ฎญ็ปใ\n \n- ๅ
ถไปๅขๅผบๆไฝ๏ผ\n\n - `mnist_ds.shuffle`๏ผ้ๆบๅฐๆฐๆฎๅญๆพๅจๅฏๅฎน็บณ10000ๅผ ๅพ็ๅฐๅ็ๅ
ๅญไธญ่ฟ่กๆททๆดใ \n - `mnist_ds.batch`๏ผไปๆททๆด็10000ๅผ ๅพ็ๅฐๅไธญๆฝๅ32ๅผ ๅพ็็ปๆไธไธช`batch`๏ผๅๆฐ`batch_size`่กจ็คบๆฏ็ปๅ
ๅซ็ๆฐๆฎไธชๆฐ๏ผ็ฐ่ฎพ็ฝฎๆฏ็ปๅ
ๅซ32ไธชๆฐๆฎใ \n - `mnist_ds.repeat`๏ผๅฐ`batch`ๆฐๆฎ่ฟ่กๅคๅถๅขๅผบ๏ผๅๆฐ`repeat_size`่กจ็คบๆฐๆฎ้ๅคๅถ็ๆฐ้ใ\n\nๅ
่ฟ่ก`shuffle`ใ`batch`ๆไฝ๏ผๅ่ฟ่ก`repeat`ๆไฝ๏ผ่ฟๆ ท่ฝไฟ่ฏ1ไธช`epoch`ๅ
ๆฐๆฎไธ้ๅคใ\n\n> MindSporeๆฏๆ่ฟ่กๅค็งๆฐๆฎๅค็ๅๅขๅผบ็ๆไฝ๏ผๅ็งๆไฝๅพๅพ็ปๅไฝฟ็จ๏ผๅ
ทไฝๅฏไปฅๅ่[ๆฐๆฎๅค็](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/pipeline.html)ๅไธ[ๆฐๆฎๅขๅผบ](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/augmentation.html)็ซ ่ใ",
"_____no_output_____"
],
[
"### ๆฅ็ๅขๅผบๅ็ๆฐๆฎ",
"_____no_output_____"
],
[
"ไป1875็ปๆฐๆฎไธญๅๅบไธ็ปๆฐๆฎ๏ผๆฅ็ๅ
ถๆฐๆฎๅผ ้ๅ`label`ใ",
"_____no_output_____"
]
],
[
[
"data = next(ms_dataset.create_dict_iterator(output_numpy=True))\nimages = data[\"image\"]\nlabels = data[\"label\"]\nprint('Tensor of image:', images.shape)\nprint('Labels:', labels)",
"Tensor of image: (32, 1, 32, 32)\nLabels: [9 8 5 5 1 2 3 5 7 0 6 1 0 3 8 1 2 1 5 1 5 2 8 4 4 6 4 5 5 5 7 8]\n"
]
],
[
[
"ๅฐๅผ ้ๆฐๆฎๅ`label`ๅฏนๅบ็ๅผ่ฟ่กๅฏ่งๅใ",
"_____no_output_____"
]
],
[
[
"count = 1\nfor i in images:\n plt.subplot(4, 8, count) \n plt.imshow(np.squeeze(i))\n plt.title('num:%s'%labels[count-1])\n plt.xticks([])\n count += 1\n plt.axis(\"off\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"้่ฟไธ่ฟฐๆฅ่ฏขๆไฝ๏ผ็ๅฐ็ป่ฟๅๆขๅ็ๅพ็๏ผๆฐๆฎ้ๅ
ๅๆไบ1875็ปๆฐๆฎ๏ผๆฏ็ปๆฐๆฎไธญๅซๆ32ๅผ ๅพ็๏ผๆฏๅผ ๅพ็ๅๆฐๅผไธบ32ร32๏ผๆฐๆฎๅ
จ้จๅๅคๅฅฝๅ๏ผๅฐฑๅฏไปฅ่ฟ่กไธไธๆญฅ็ๆฐๆฎ่ฎญ็ปไบใ",
"_____no_output_____"
],
[
"## ๅฎไน็ฝ็ป",
"_____no_output_____"
],
[
"ๆไปฌ้ๆฉ็ธๅฏน็ฎๅ็LeNet็ฝ็ปใLeNet็ฝ็ปไธๅ
ๆฌ่พๅ
ฅๅฑ็ๆ
ๅตไธ๏ผๅ
ฑๆ7ๅฑ๏ผ2ไธชๅท็งฏๅฑใ2ไธชไธ้ๆ ทๅฑ๏ผๆฑ ๅๅฑ๏ผใ3ไธชๅ
จ่ฟๆฅๅฑใๆฏๅฑ้ฝๅ
ๅซไธๅๆฐ้็่ฎญ็ปๅๆฐ๏ผๅฆไธๅพๆ็คบ๏ผ",
"_____no_output_____"
],
[
"<img src=\"https://gitee.com/mindspore/docs/raw/master/docs/mindspore/programming_guide/source_zh_cn/quick_start/images/LeNet_5.png\" alt=\"LeNet5\">",
"_____no_output_____"
],
[
"> ๆดๅค็LeNet็ฝ็ป็ไป็ปไธๅจๆญค่ต่ฟฐ๏ผๅธๆ่ฏฆ็ปไบ่งฃLeNet็ฝ็ป๏ผๅฏไปฅๆฅ่ฏข<http://yann.lecun.com/exdb/lenet/>ใ",
"_____no_output_____"
],
[
"ๅจๆๅปบLeNetๅ๏ผๆไปฌๅฏนๅ
จ่ฟๆฅๅฑไปฅๅๅท็งฏๅฑ้็จNormal่ฟ่กๅๆฐๅๅงๅใ\n\nMindSporeๆฏๆ`TruncatedNormal`ใ`Normal`ใ`Uniform`็ญๅค็งๅๆฐๅๅงๅๆนๆณ๏ผๅ
ทไฝๅฏไปฅๅ่MindSpore API็`mindspore.common.initializer`ๆจกๅ่ฏดๆใ",
"_____no_output_____"
],
[
"ไฝฟ็จMindSporeๅฎไน็ฅ็ป็ฝ็ป้่ฆ็ปงๆฟ`mindspore.nn.Cell`๏ผ`Cell`ๆฏๆๆ็ฅ็ป็ฝ็ป๏ผ`Conv2d`็ญ๏ผ็ๅบ็ฑปใ\n\n็ฅ็ป็ฝ็ป็ๅๅฑ้่ฆ้ขๅ
ๅจ`__init__`ๆนๆณไธญๅฎไน๏ผ็ถๅ้่ฟๅฎไน`construct`ๆนๆณๆฅๅฎๆ็ฅ็ป็ฝ็ป็ๅๅๆ้ ๏ผๆ็
งLeNet็็ฝ็ป็ปๆ๏ผๅฎไน็ฝ็ปๅๅฑๅฆไธ๏ผ",
"_____no_output_____"
]
],
[
[
"import mindspore.nn as nn\nfrom mindspore.common.initializer import Normal\n\nclass LeNet5(nn.Cell):\n \"\"\"Lenet network structure.\"\"\"\n # define the operator required\n def __init__(self, num_class=10, num_channel=1):\n super(LeNet5, self).__init__()\n self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')\n self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')\n self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))\n self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))\n self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))\n self.relu = nn.ReLU()\n self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)\n self.flatten = nn.Flatten()\n\n # use the preceding operators to construct networks\n def construct(self, x):\n x = self.max_pool2d(self.relu(self.conv1(x)))\n x = self.max_pool2d(self.relu(self.conv2(x)))\n x = self.flatten(x)\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x) \n return x\n \nnetwork = LeNet5()\nprint(\"layer conv1:\", network.conv1)\nprint(\"*\"*40)\nprint(\"layer fc1:\", network.fc1)",
"layer conv1: Conv2d<input_channels=1, output_channels=6, kernel_size=(5, 5),stride=(1, 1), pad_mode=valid, padding=0, dilation=(1, 1), group=1, has_bias=Falseweight_init=normal, bias_init=zeros, format=NCHW>\n****************************************\nlayer fc1: Dense<input_channels=400, output_channels=120, has_bias=True>\n"
]
],
[
[
"ๆๅปบๅฎๆๅ๏ผๅฏไปฅไฝฟ็จ`print(LeNet5())`ๅฐ็ฅ็ป็ฝ็ปไธญ็ๅๅฑๅๆฐๅ
จ้จๆๅฐๅบๆฅ๏ผไนๅฏไปฅไฝฟ็จ`LeNet().{layerๅ็งฐ}`ๆๅฐ็ธๅบ็ๅๆฐไฟกๆฏใๆฌไพ้ๆฉๆๅฐ็ฌฌไธไธชๅท็งฏๅฑๅ็ฌฌไธไธชๅ
จ่ฟๆฅๅฑ็็ธๅบๅๆฐใ",
"_____no_output_____"
],
[
"## ่ชๅฎไนๅ่ฐๅฝๆฐๆถ้ๆจกๅ็ๆๅคฑๅผๅ็ฒพๅบฆๅผ",
"_____no_output_____"
],
[
"่ชๅฎไนไธไธชๆฐๆฎๆถ้็ๅ่ฐ็ฑป`StepLossAccInfo`๏ผ็จไบๆถ้ไธค็ฑปไฟกๆฏ๏ผ\n\n1. ่ฎญ็ป่ฟ็จไธญ`step`ๅ`loss`ๅผไน้ดๅ
ณ็ณป็ไฟกๆฏ๏ผ\n2. ๆฏ่ฎญ็ป125ไธช`step`ๅๅฏนๅบๆจกๅ็ฒพๅบฆๅผ`accuracy`็ไฟกๆฏใ\n\n่ฏฅ็ฑป็ปงๆฟไบ`Callback`็ฑป๏ผๅฏไปฅ่ชๅฎไน่ฎญ็ป่ฟ็จไธญ็ๆไฝ๏ผ็ญ่ฎญ็ปๅฎๆๅ๏ผๅฏๅฐๆฐๆฎ็ปๆๅพๆฅ็`step`ไธ`loss`็ๅๅๆ
ๅต๏ผไปฅๅ`step`ไธ`accuracy`็ๅๅๆ
ๅตใ",
"_____no_output_____"
],
[
"ไปฅไธไปฃ็ ไผไฝไธบๅ่ฐๅฝๆฐ๏ผๅจๆจกๅ่ฎญ็ปๅฝๆฐ`model.train`ไธญ่ฐ็จ๏ผๆฌๆ้ช่ฏๆจกๅ้ถๆฎตไผๅฐๆถ้ๅฐ็ไฟกๆฏ๏ผ่ฟ่กๅฏ่งๅๅฑ็คบใ",
"_____no_output_____"
]
],
[
[
"from mindspore.train.callback import Callback\n\n# custom callback function\nclass StepLossAccInfo(Callback):\n def __init__(self, model, eval_dataset, steps_loss, steps_eval):\n self.model = model\n self.eval_dataset = eval_dataset\n self.steps_loss = steps_loss\n self.steps_eval = steps_eval\n \n def step_end(self, run_context):\n cb_params = run_context.original_args()\n cur_epoch = cb_params.cur_epoch_num\n cur_step = (cur_epoch-1)*1875 + cb_params.cur_step_num\n self.steps_loss[\"loss_value\"].append(str(cb_params.net_outputs))\n self.steps_loss[\"step\"].append(str(cur_step))\n if cur_step % 125 == 0:\n acc = self.model.eval(self.eval_dataset, dataset_sink_mode=False)\n self.steps_eval[\"step\"].append(cur_step)\n self.steps_eval[\"acc\"].append(acc[\"Accuracy\"])",
"_____no_output_____"
]
],
[
[
"ๅ
ถไธญ๏ผ\n\n- `model`๏ผ่ฎก็ฎๅพๆจกๅModelใ\n- `eval_dataset`๏ผ้ช่ฏๆฐๆฎ้ใ\n- `steps_loss`๏ผๆถ้stepๅlossๅผไน้ด็ๅ
ณ็ณป๏ผๆฐๆฎๆ ผๅผ`{\"step\": [], \"loss_value\": []}`ใ\n- `steps_eval`๏ผๆถ้stepๅฏนๅบๆจกๅ็ฒพๅบฆๅผ`accuracy`็ไฟกๆฏ๏ผๆฐๆฎๆ ผๅผไธบ`{\"step\": [], \"acc\": []}`ใ",
"_____no_output_____"
],
[
"## ๅฎไนๆๅคฑๅฝๆฐๅไผๅๅจ\n\nๅจ่ฟ่กๅฎไนไนๅ๏ผๅ
็ฎๅไป็ปๆๅคฑๅฝๆฐๅไผๅๅจ็ๆฆๅฟตใ\n\n- ๆๅคฑๅฝๆฐ๏ผๅๅซ็ฎๆ ๅฝๆฐ๏ผ็จไบ่กก้้ขๆตๅผไธๅฎ้
ๅผๅทฎๅผ็็จๅบฆใๆทฑๅบฆๅญฆไน ้่ฟไธๅๅฐ่ฟญไปฃๆฅ็ผฉๅฐๆๅคฑๅฝๆฐ็ๅผใๅฎไนไธไธชๅฅฝ็ๆๅคฑๅฝๆฐ๏ผๅฏไปฅๆๆๆ้ซๆจกๅ็ๆง่ฝใ\n\n- ไผๅๅจ๏ผ็จไบๆๅฐๅๆๅคฑๅฝๆฐ๏ผไป่ๅจ่ฎญ็ป่ฟ็จไธญๆน่ฟๆจกๅใ\n\nๅฎไนไบๆๅคฑๅฝๆฐๅ๏ผๅฏไปฅๅพๅฐๆๅคฑๅฝๆฐๅ
ณไบๆ้็ๆขฏๅบฆใๆขฏๅบฆ็จไบๆ็คบไผๅๅจไผๅๆ้็ๆนๅ๏ผไปฅๆ้ซๆจกๅๆง่ฝใ\n\nMindSporeๆฏๆ็ๆๅคฑๅฝๆฐๆ`SoftmaxCrossEntropyWithLogits`ใ`L1Loss`ใ`MSELoss`็ญใ่ฟ้ไฝฟ็จ`SoftmaxCrossEntropyWithLogits`ๆๅคฑๅฝๆฐใ\n\nMindSporeๆฏๆ็ไผๅๅจๆ`Adam`ใ`AdamWeightDecay`ใ`Momentum`็ญใ่ฟ้ไฝฟ็จๆต่ก็`Momentum`ไผๅๅจใ",
"_____no_output_____"
]
],
[
[
"import mindspore.nn as nn\nfrom mindspore.nn import SoftmaxCrossEntropyWithLogits\n\nlr = 0.01\nmomentum = 0.9 \n\n# create the network\nnetwork = LeNet5()\n\n# define the optimizer\nnet_opt = nn.Momentum(network.trainable_params(), lr, momentum)\n\n# define the loss function\nnet_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')",
"_____no_output_____"
]
],
[
[
"## ่ฎญ็ป็ฝ็ป",
"_____no_output_____"
],
[
"ๅฎๆ็ฅ็ป็ฝ็ป็ๆๅปบๅ๏ผๅฐฑๅฏไปฅ็ๆ่ฟ่ก็ฝ็ป่ฎญ็ปไบ๏ผ้่ฟMindSporeๆไพ็`Model.train`ๆฅๅฃๅฏไปฅๆนไพฟๅฐ่ฟ่ก็ฝ็ป็่ฎญ็ป๏ผๅๆฐไธป่ฆๅ
ๅซ๏ผ\n\n1. ๆฏไธช`epoch`้่ฆ้ๅๅฎๆๅพ็็batchๆฐ๏ผ`epoch_size`๏ผ \n2. ่ฎญ็ปๆฐๆฎ้`ds_train`๏ผ \n3. MindSporeๆไพไบcallbackๆบๅถ๏ผๅ่ฐๅฝๆฐ`callbacks`๏ผๅ
ๅซ`ModelCheckpoint`ใ`LossMonitor`ๅ`Callback`ๆจกๅๆฃๆตๅๆฐ๏ผๅ
ถไธญ`ModelCheckpoint`ๅฏไปฅไฟๅญ็ฝ็ปๆจกๅๅๅๆฐ๏ผไปฅไพฟ่ฟ่กๅ็ปญ็fine-tuning๏ผๅพฎ่ฐ๏ผๆไฝ๏ผ \n4. ๆฐๆฎไธๆฒๆจกๅผ`dataset_sink_mode`๏ผๆญคๅๆฐ้ป่ฎค`True`้่ฎพ็ฝฎๆ`False`๏ผๅ ไธบๆญคๆจกๅผไธๆฏๆCPU่ฎก็ฎๅนณๅฐใ",
"_____no_output_____"
]
],
[
[
"import os\nfrom mindspore import Tensor, Model\nfrom mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\nfrom mindspore.nn import Accuracy\n\nepoch_size = 1\nmnist_path = \"./datasets/MNIST_Data\"\nmodel_path = \"./models/ckpt/mindspore_quick_start/\"\n\nrepeat_size = 1\nds_train = create_dataset(os.path.join(mnist_path, \"train\"), 32, repeat_size)\neval_dataset = create_dataset(os.path.join(mnist_path, \"test\"), 32)\n\n# clean up old run files before in Linux\nos.system('rm -f {0}*.ckpt {0}*.meta {0}*.pb'.format(model_path))\n\n# define the model\nmodel = Model(network, net_loss, net_opt, metrics={\"Accuracy\": Accuracy()} )\n\n# save the network model and parameters for subsequence fine-tuning\nconfig_ck = CheckpointConfig(save_checkpoint_steps=375, keep_checkpoint_max=16)\n# group layers into an object with training and evaluation features\nckpoint_cb = ModelCheckpoint(prefix=\"checkpoint_lenet\", directory=model_path, config=config_ck)\n\nsteps_loss = {\"step\": [], \"loss_value\": []}\nsteps_eval = {\"step\": [], \"acc\": []}\n# collect the steps,loss and accuracy information\nstep_loss_acc_info = StepLossAccInfo(model , eval_dataset, steps_loss, steps_eval)\n\nmodel.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(125), step_loss_acc_info], dataset_sink_mode=False)",
"epoch: 1 step: 125, loss is 2.2961428\nepoch: 1 step: 250, loss is 2.2972755\nepoch: 1 step: 375, loss is 2.2992194\nepoch: 1 step: 500, loss is 2.3089285\nepoch: 1 step: 625, loss is 2.304193\nepoch: 1 step: 750, loss is 2.3023324\nepoch: 1 step: 875, loss is 0.69262105\nepoch: 1 step: 1000, loss is 0.23356618\nepoch: 1 step: 1125, loss is 0.35567114\nepoch: 1 step: 1250, loss is 0.2065609\nepoch: 1 step: 1375, loss is 0.19551893\nepoch: 1 step: 1500, loss is 0.1836512\nepoch: 1 step: 1625, loss is 0.028234977\nepoch: 1 step: 1750, loss is 0.1124336\nepoch: 1 step: 1875, loss is 0.026502304\n"
]
],
[
[
"่ฎญ็ปๅฎๆๅ๏ผไผๅจ่ฎพ็ฝฎ็ๆจกๅไฟๅญ่ทฏๅพไธ็ๆๅคไธชๆจกๅๆไปถใ",
"_____no_output_____"
]
],
[
[
"!tree $model_path",
"./models/ckpt/mindspore_quick_start/\nโโโ checkpoint_lenet-1_1125.ckpt\nโโโ checkpoint_lenet-1_1500.ckpt\nโโโ checkpoint_lenet-1_1875.ckpt\nโโโ checkpoint_lenet-1_375.ckpt\nโโโ checkpoint_lenet-1_750.ckpt\nโโโ checkpoint_lenet-graph.meta\n\n0 directories, 6 files\n"
]
],
[
[
"ๆไปถๅ็งฐๅ
ทไฝๅซไน`{ModelCheckpointไธญ่ฎพ็ฝฎ็่ชๅฎไนๅ็งฐ}-{็ฌฌๅ ไธชepoch}_{็ฌฌๅ ไธชstep}.ckpt`ใ\n\n> ไฝฟ็จ่ช็ฑๆงๅถๅพช็ฏ็่ฟญไปฃๆฌกๆฐใ้ๅๆฐๆฎ้็ญ๏ผๅฏไปฅๅ็
งๅฎ็ฝ็ผ็จๆๅใ[่ฎญ็ป](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/train.html#%E8%87%AA%E5%AE%9A%E4%B9%89%E8%AE%AD%E7%BB%83%E5%BE%AA%E7%8E%AF)ใ็่ชๅฎไนๅพช็ฏ่ฎญ็ป้จๅใ\n",
"_____no_output_____"
],
[
"### ๆฅ็ๆจกๅๆๅคฑๅผ้็่ฎญ็ปๆญฅๆฐ็ๅๅๆ
ๅต",
"_____no_output_____"
]
],
[
[
"steps = steps_loss[\"step\"]\nloss_value = steps_loss[\"loss_value\"]\nsteps = list(map(int, steps))\nloss_value = list(map(float, loss_value))\nplt.plot(steps, loss_value, color=\"red\")\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Loss_value\")\nplt.title(\"Change chart of model loss value\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"ไปไธ้ขๅฏไปฅ็ๅบๆฅๅคง่ดๅไธบไธไธช้ถๆฎต๏ผ\n\n้ถๆฎตไธ๏ผ่ฎญ็ปๅผๅงๆถ๏ผlossๅผๅจ2.2ไธไธๆตฎๅจ๏ผ่ฎญ็ปๆถ็ๆ่งๅนถไธๆๆพใ\n\n้ถๆฎตไบ๏ผ่ฎญ็ปๅฐๆไธๆถๅป๏ผlossๅผ่ฟ
้ๅๅฐ๏ผ่ฎญ็ปๆถ็ๅคงๅน
ๅขๅ ใ\n\n้ถๆฎตไธ๏ผlossๅผๆถๆๅฐไธๅฎๅฐ็ๅผๅ๏ผๅผๅงๆฏ่กๅจไธไธชๅฐ็ๅบ้ดไธๆ ๆณ่ถ0๏ผๅ็ปง็ปญๅขๅ ่ฎญ็ปๅนถๆ ๆๆพๆถ็๏ผ่ณๆญค่ฎญ็ป็ปๆใ",
"_____no_output_____"
],
[
"## ้ช่ฏๆจกๅ",
"_____no_output_____"
],
[
"ๅพๅฐๆจกๅๆไปถๅ๏ผ้่ฟ่ฟ่กๆต่ฏๆฐๆฎ้ๅพๅฐ็็ปๆ๏ผ้ช่ฏๆจกๅ็ๆณๅ่ฝๅใ\n\nๆญๅปบๆต่ฏ็ฝ็ปๆฅ้ช่ฏๆจกๅ็่ฟ็จไธป่ฆไธบ๏ผ\n\n1. ่ฝฝๅ
ฅๆจกๅ`.ckpt`ๆไปถไธญ็ๅๆฐ`param_dict`๏ผ\n2. ๅฐๅๆฐ`param_dict`่ฝฝๅ
ฅๅฐ็ฅ็ป็ฝ็ปLeNetไธญ๏ผ\n3. ่ฝฝๅ
ฅๆต่ฏๆฐๆฎ้๏ผ\n4. ่ฐ็จๅฝๆฐ`model.eval`ไผ ๅ
ฅๅๆฐๆต่ฏๆฐๆฎ้`ds_eval`๏ผ็ๆๆจกๅ`checkpoint_lenet-{epoch}_1875.ckpt`็็ฒพๅบฆๅผใ",
"_____no_output_____"
]
],
[
[
"from mindspore import load_checkpoint, load_param_into_net\n\n# testing relate modules \ndef test_net(network, model, mnist_path):\n \"\"\"Define the evaluation method.\"\"\"\n print(\"============== Starting Testing ==============\")\n # load the saved model for evaluation\n param_dict = load_checkpoint(\"./models/ckpt/mindspore_quick_start/checkpoint_lenet-1_1875.ckpt\")\n # load parameter to the network\n load_param_into_net(network, param_dict)\n # load testing dataset\n ds_eval = create_dataset(os.path.join(mnist_path, \"test\"))\n acc = model.eval(ds_eval, dataset_sink_mode=False)\n print(\"============== Accuracy:{} ==============\".format(acc))\n\ntest_net(network, model, mnist_path)",
"============== Starting Testing ==============\n============== Accuracy:{'Accuracy': 0.9697516025641025} ==============\n"
]
],
[
[
"ๅ
ถไธญ๏ผ\n\n- `load_checkpoint`๏ผ้่ฟ่ฏฅๆฅๅฃๅ ่ฝฝCheckPointๆจกๅๅๆฐๆไปถ๏ผ่ฟๅไธไธชๅๆฐๅญๅ
ธใ\n\n- `checkpoint_lenet-1_1875.ckpt`๏ผไนๅไฟๅญ็CheckPointๆจกๅๆไปถๅ็งฐใ\n\n- `load_param_into_net`๏ผ้่ฟ่ฏฅๆฅๅฃๆๅๆฐๅ ่ฝฝๅฐ็ฝ็ปไธญใ\n\n็ป่ฟ1875ๆญฅ่ฎญ็ปๅ็ๆ็ๆจกๅ็ฒพๅบฆ่ถ
่ฟ95%๏ผๆจกๅไผ่ฏใ\n\nๆไปฌๅฏไปฅ็ไธไธๆจกๅ้็่ฎญ็ปๆญฅๆฐๅๅ๏ผ็ฒพๅบฆ้ไนๅๅ็ๆ
ๅตใ",
"_____no_output_____"
],
[
"`eval_show`ๅฐ็ปๅถๆฏ25ไธช`step`ไธๆจกๅ็ฒพๅบฆๅผ็ๆ็บฟๅพ๏ผๅ
ถไธญ`steps_eval`ๅญๅจ็ๆจกๅ็stepๆฐๅๅฏนๅบๆจกๅ็ฒพๅบฆๅผไฟกๆฏใ",
"_____no_output_____"
]
],
[
[
"def eval_show(steps_eval):\n plt.xlabel(\"step number\")\n plt.ylabel(\"Model accuracy\")\n plt.title(\"Model accuracy variation chart\")\n plt.plot(steps_eval[\"step\"], steps_eval[\"acc\"], \"red\")\n plt.show()\n\neval_show(steps_eval)",
"_____no_output_____"
]
],
[
[
"ไปๅพไธญๅฏไปฅ็ๅบ่ฎญ็ปๅพๅฐ็ๆจกๅ็ฒพๅบฆๅๅๅไธบไธไธช้ถๆฎต๏ผ\n\n้ถๆฎตไธ๏ผ่ฎญ็ปๅผๅงๆถ๏ผๆจกๅ็ฒพๅบฆ็ผๆ
ข้่กไธๅใ\n\n้ถๆฎตไบ๏ผ่ฎญ็ปๅฐๆไธๆถๅป๏ผๆจกๅ็ฒพๅบฆ่ฟ
้ไธๅใ\n\n้ถๆฎตไธ๏ผ็ผๆ
ขไธๅ่ถ่ฟไบไธๅฐ1็ๆไธชๅผๆถ้่ฟๆฏ่กใ\n\nๆดไธช่ฎญ็ป่ฟ็จ๏ผ้็่ฎญ็ปๆฐๆฎ็ๅขๅ ๏ผไผๅฏนๆจกๅ็ฒพๅบฆๆ็ๆญฃ็ธๅ
ณ็ๅฝฑๅ๏ผไฝๆฏ้็็ฒพๅบฆๅฐ่พพไธๅฎ็จๅบฆ๏ผ่ฎญ็ปๆถ็ไผ้ไฝใ",
"_____no_output_____"
],
[
"## ๆจ็้ขๆต",
"_____no_output_____"
],
[
"ๆไปฌไฝฟ็จ็ๆ็ๆจกๅๅบ็จๅฐๅ็ฑป้ขๆตๅไธชๆ่
ๅ็ปๅพ็ๆฐๆฎไธ๏ผๅ
ทไฝๆญฅ้ชคๅฆไธ๏ผ",
"_____no_output_____"
],
[
"1. ๅฐ่ฆๆต่ฏ็ๆฐๆฎ่ฝฌๆขๆ้ๅบLeNet็ๆฐๆฎ็ฑปๅใ\n2. ๆๅๅบ`image`็ๆฐๆฎใ\n3. ไฝฟ็จๅฝๆฐ`model.predict`้ขๆต`image`ๅฏนๅบ็ๆฐๅญใ้่ฆ่ฏดๆ็ๆฏ`predict`่ฟๅ็ๆฏ`image`ๅฏนๅบ0-9็ๆฆ็ๅผใ\n4. ่ฐ็จ`plot_pie`ๅฐ้ขๆตๅบ็ๅๆฐๅญ็ๆฆ็ๆพ็คบๅบๆฅใ่ดๆฆ็็ๆฐๅญไผ่ขซๅปๆใ",
"_____no_output_____"
],
[
"่ฝฝๅ
ฅ่ฆ้ขๆต็ๆฐๆฎ้๏ผๅนถ่ฐ็จ`create_dataset`่ฝฌๆขๆ็ฌฆๅๆ ผๅผ่ฆๆฑ็ๆฐๆฎ้๏ผๅนถ้ๅๅ
ถไธญไธ็ป32ๅผ ๅพ็่ฟ่กๆจ็้ขๆตใ",
"_____no_output_____"
]
],
[
[
"ds_test = create_dataset(test_data_path).create_dict_iterator()\ndata = next(ds_test)\nimages = data[\"image\"].asnumpy()\nlabels = data[\"label\"].asnumpy()\n\noutput = model.predict(Tensor(data['image']))\npred = np.argmax(output.asnumpy(), axis=1)\nerr_num = []\nindex = 1\nfor i in range(len(labels)):\n plt.subplot(4, 8, i+1)\n color = 'blue' if pred[i] == labels[i] else 'red'\n plt.title(\"pre:{}\".format(pred[i]), color=color)\n plt.imshow(np.squeeze(images[i]))\n plt.axis(\"off\")\n if color == 'red':\n index = 0\n print(\"Row {}, column {} is incorrectly identified as {}, the correct value should be {}\".format(int(i/8)+1, i%8+1, pred[i], labels[i]), '\\n')\nif index:\n print(\"All the figures in this group are predicted correctly!\")\nprint(pred, \"<--Predicted figures\") \nprint(labels, \"<--The right number\")\nplt.show()",
"Row 1, column 2 is incorrectly identified as 8, the correct value should be 2 \n\nRow 3, column 7 is incorrectly identified as 9, the correct value should be 4 \n\n[5 8 0 2 7 4 1 7 8 6 6 8 7 9 5 8 7 2 0 4 5 9 9 3 9 1 3 9 7 6 3 4] <--Predicted figures\n[5 2 0 2 7 4 1 7 8 6 6 8 7 9 5 8 7 2 0 4 5 9 4 3 9 1 3 9 7 6 3 4] <--The right number\n"
]
],
[
[
"ๆๅปบไธไธชๆฆ็ๅๆ็้ฅผๅพๅฝๆฐ๏ผๆฌไพๅฑ็คบไบๅฝๅ`batch`ไธญ็็ฌฌไธๅผ ๅพ็็ๅๆ้ฅผๅพใ\n\n`prb`ๅญๅจไบไธ้ข่ฟ็ป32ๅผ ้ขๆตๆฐๅญๅๅฏนๅบ็่พๅบ็ปๆ๏ผๅๅบ็ฌฌไธๅผ ๅพ็ๅฏนๅบ[0-9]ๅ็ฑป็ปๆ`prb[0]`๏ผๅธฆๅ
ฅsigmolๅ
ฌๅผ$\\frac{1}{1+e^{-x}}$๏ผๅพๅฐ่ฏฅๅพ็ๅฏนๅบ[0-9]็ๆฆ็๏ผๅฐๆฆ็ๅผ0.5ไปฅไธ็ๆฐๅญ็ปๆ้ฅผๅพๅๆใ",
"_____no_output_____"
]
],
[
[
"import numpy as np\n# define the pie drawing function of probability analysis\n\nprb = output.asnumpy()\n\ndef plot_pie(prbs):\n dict1 = {}\n # remove the negative number and build the dictionary dict1. The key is the number and the value is the probability value\n for i in range(10):\n if prbs[i] > 0:\n dict1[str(i)] = prbs[i]\n\n label_list = dict1.keys()\n size = dict1.values()\n colors = [\"red\", \"green\", \"pink\", \"blue\", \"purple\", \"orange\", \"gray\"] \n color = colors[: len(size)]\n plt.pie(size, colors=color, labels=label_list, labeldistance=1.1, autopct=\"%1.1f%%\", shadow=False, startangle=90, pctdistance=0.6)\n plt.axis(\"equal\")\n plt.legend()\n plt.title(\"Image classification\")\n plt.show()\n\n\nprint(\"The probability of corresponding numbers [0-9] in Figure 1:\\n\", list(map(lambda x:1/(1+np.exp(-x)), prb[0])))\nplot_pie(prb[0])",
"The probability of corresponding numbers [0-9] in Figure 1:\n [0.16316024637045334, 0.04876983802517727, 0.02261393383191808, 0.9963960715325838, 0.037634749376478496, 0.998856840107891, 0.1612087582052347, 0.08714517716531343, 0.6207903209907534, 0.9653037548477632]\n"
]
],
[
[
"ไปฅไธๅฐฑๆฏ่ฟๆฌกๆๅๆฐๅญๅ็ฑปๅบ็จ็ๅ
จ้จไฝ้ช่ฟ็จใ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
eca8609661a908d5f4567599ac04f40f7cb2ba78 | 58,474 | ipynb | Jupyter Notebook | data/.ipynb_checkpoints/data_statistic-checkpoint.ipynb | YefanZhou/seuthesis2020 | 9b4558fda9f1f68003490aa9a57399f7e04718c6 | [
"MIT"
] | null | null | null | data/.ipynb_checkpoints/data_statistic-checkpoint.ipynb | YefanZhou/seuthesis2020 | 9b4558fda9f1f68003490aa9a57399f7e04718c6 | [
"MIT"
] | null | null | null | data/.ipynb_checkpoints/data_statistic-checkpoint.ipynb | YefanZhou/seuthesis2020 | 9b4558fda9f1f68003490aa9a57399f7e04718c6 | [
"MIT"
] | null | null | null | 423.724638 | 34,264 | 0.938964 | [
[
[
"import os\nimport sys\nimport glob\nimport time\nimport argparse\n\nimport numpy as np\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"address_0 = 'ae_label_baseline'\nprint(address_0)\nresults_list = ['stats_finecd_epochval.npz', 'stats_finecd_itertrain.npz']\nresults_list = [results_list[1]]\n\n# logs_list = ['logs_aebatch10_0', 'logs_aebatch10_1', 'logs_aebatch10_2', 'logs_aebatch128_0'] #'logs',logs_adam_0 logs_adam_1\nlogs_list = ['logs', 'logs_adam_0', 'logs_adam_1']\nlogs_list = logs_list[0:2]\nprint(logs_list)",
"ae_label_baseline\n['logs', 'logs_adam_0']\n"
],
[
"fig = plt.figure(figsize=(10,10))\nfor row, log in enumerate(logs_list): \n for col, result in enumerate(results_list): \n file = os.path.join(address_0, '{}/{}'.format(log,result))\n stats = np.load(file)\n plt.plot(stats[\"iter_loss\"][:,0], stats[\"iter_loss\"][:,1],'-', label = '{}'.format('Auto-encoder'))\n# plt.tick_params(labelsize=33)\n# plt.legend(fontsize=20)\nplt.show() \n",
"_____no_output_____"
],
[
"results_list = ['stats_finecd_epochval.npz', 'stats_finecd_itertrain.npz']\nresults_list = [results_list[0]]\n\nfig = plt.figure(figsize=(10,10))\nfor row, log in enumerate(logs_list): \n for col, result in enumerate(results_list): \n file = os.path.join(address_0, '{}/{}'.format(log,result))\n stats = np.load(file)\n plt.plot(stats[\"iter_loss\"][:,0], stats[\"iter_loss\"][:,1],'-', label = '{}'.format('Auto-encoder'))\n# plt.tick_params(labelsize=33)\n# plt.legend(fontsize=20)\nplt.show() ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
eca868e88b02785e4ed430d47ef0096077e7b4eb | 22,034 | ipynb | Jupyter Notebook | Word2Vec_Embeddings.ipynb | Andres361/itba-dpl | 949ad55e982097e3bea5c3a16eaefae26a26d6bb | [
"MIT"
] | null | null | null | Word2Vec_Embeddings.ipynb | Andres361/itba-dpl | 949ad55e982097e3bea5c3a16eaefae26a26d6bb | [
"MIT"
] | null | null | null | Word2Vec_Embeddings.ipynb | Andres361/itba-dpl | 949ad55e982097e3bea5c3a16eaefae26a26d6bb | [
"MIT"
] | null | null | null | 37.794168 | 987 | 0.468367 | [
[
[
"from keras.datasets import imdb\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nimport gensim",
"_____no_output_____"
]
],
[
[
"# Dataset: IMDB Movie reviews sentiment classification",
"_____no_output_____"
]
],
[
[
"num_words=30000\nINDEX_FROM=3 # idx 0 => PAD, idx 1 => START, idx 2 => OOV (out of vocab.)\n(training_data, training_targets), (testing_data, testing_targets) = imdb.load_data(num_words=num_words+2,)\ndata = np.concatenate((training_data, testing_data), axis=0)\ntargets = np.concatenate((training_targets, testing_targets), axis=0)",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz\n17465344/17464789 [==============================] - 0s 0us/step\n17473536/17464789 [==============================] - 0s 0us/step\n"
],
[
"num_words=len(np.unique(np.hstack(data)))\nprint(\"Categories:\", np.unique(targets))\nprint(\"Number of unique words:\", num_words)",
"Categories: [0 1]\nNumber of unique words: 30000\n"
]
],
[
[
"Agregar el siguiente archivo al Google Drive\n\nhttps://drive.google.com/open?id=1K5r423yMxBb1Yz2uDT7lto60lu1jqEjl",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
],
[
"length = [len(i) for i in data]\nprint(\"Average Review length:\", np.mean(length))\nprint(\"Standard Deviation:\", round(np.std(length)))",
"Average Review length: 234.75892\nStandard Deviation: 173\n"
],
[
"print(\"Label:\", targets[0])\nprint(data[0])",
"Label: 1\n[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 22665, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 21631, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 19193, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 10311, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 12118, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]\n"
]
],
[
[
"# Traemos el vocabulario y armamos indice reverso",
"_____no_output_____"
]
],
[
[
"index = imdb.get_word_index()\nreverse_index = dict([(value, key) for (key, value) in index.items()]) \ndecoded = \" \".join( [reverse_index.get(i - INDEX_FROM, \"#\") for i in data[1]] )\nprint(decoded)",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json\n1646592/1641221 [==============================] - 0s 0us/step\n1654784/1641221 [==============================] - 0s 0us/step\n# big hair big boobs bad music and a giant safety pin these are the words to best describe this terrible movie i love cheesy horror movies and i've seen hundreds but this had got to be on of the worst ever made the plot is paper thin and ridiculous the acting is an abomination the script is completely laughable the best is the end showdown with the cop and how he worked out who the killer is it's just so damn terribly written the clothes are sickening and funny in equal measures the hair is big lots of boobs bounce men wear those cut tee shirts that show off their stomachs sickening that men actually wore them and the music is just # trash that plays over and over again in almost every scene there is trashy music boobs and paramedics taking away bodies and the gym still doesn't close for # all joking aside this is a truly bad film whose only charm is to look back on the disaster that was the 80's and have a good old laugh at how bad everything was back then\n"
],
[
"w2v = gensim.models.KeyedVectors.load_word2vec_format(\"/content/drive/My Drive/GoogleNews-vectors-negative300.bin\", binary=True) ",
"_____no_output_____"
],
[
"w2v.wv[\"car\"]",
"/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).\n \"\"\"Entry point for launching an IPython kernel.\n"
]
],
[
[
"# Armamos la matriz de embeddings",
"_____no_output_____"
]
],
[
[
"embed_dim=300\nembedding_matrix=np.zeros([num_words+4,embed_dim])\nfor word, idx in index.items():\n if idx <= num_words and word in w2v.wv:\n embedding_matrix[idx+INDEX_FROM,:]=w2v.wv[word]\n \nembedding_matrix.shape",
"/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).\n after removing the cwd from sys.path.\n/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: DeprecationWarning: Call to deprecated `wv` (Attribute will be removed in 4.0.0, use self instead).\n \"\"\"\n"
]
],
[
[
"# Hacemos que todos los reviews tengan el mismo largo",
"_____no_output_____"
]
],
[
[
"maxlen=1000",
"_____no_output_____"
],
[
"data = pad_sequences(data, maxlen=maxlen, value=0.0)",
"_____no_output_____"
],
[
"len(data[0])",
"_____no_output_____"
],
[
"len(data[1])",
"_____no_output_____"
],
[
"data=np.array(data)",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
]
],
[
[
"# Armamos el modelo con una Conv1D",
"_____no_output_____"
]
],
[
[
"from keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Dropout, Dense\nfrom keras.models import Sequential\nfrom keras import optimizers\n\n",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Embedding(30000, embed_dim, input_length=maxlen, trainable=True))\nmodel.add(Conv1D(filters=64, kernel_size=1, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(GlobalMaxPooling1D())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(300, activation='softmax'))\nmodel.summary()",
"Model: \"sequential_12\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n embedding_11 (Embedding) (None, 1000, 300) 9000000 \n \n conv1d_11 (Conv1D) (None, 1000, 64) 19264 \n \n max_pooling1d_11 (MaxPoolin (None, 500, 64) 0 \n g1D) \n \n global_max_pooling1d_1 (Glo (None, 64) 0 \n balMaxPooling1D) \n \n dropout_8 (Dropout) (None, 64) 0 \n \n dense_6 (Dense) (None, 300) 19500 \n \n=================================================================\nTotal params: 9,038,764\nTrainable params: 9,038,764\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca87ae224c4092c6b5351d604365d1b0ccc81b6 | 161,044 | ipynb | Jupyter Notebook | 02 - Estruturas de Dados/Grafos - Estruturas de Dados.ipynb | RafaelPereira56/Let-s-Code | aad1b5e268fc88ce2f615800f60350d1500bf033 | [
"MIT"
] | null | null | null | 02 - Estruturas de Dados/Grafos - Estruturas de Dados.ipynb | RafaelPereira56/Let-s-Code | aad1b5e268fc88ce2f615800f60350d1500bf033 | [
"MIT"
] | null | null | null | 02 - Estruturas de Dados/Grafos - Estruturas de Dados.ipynb | RafaelPereira56/Let-s-Code | aad1b5e268fc88ce2f615800f60350d1500bf033 | [
"MIT"
] | null | null | null | 161,044 | 161,044 | 0.941749 | [
[
[
"Questรฃo 1\n\n> Bloco com recuo\n\n> Bloco com recuo\n\n\n\n\nImplemente o grafo ilustrado pela figura abaixo, utilizando a representaรงรฃo baseada em uma matriz de adjacรชncias. Em seguida, exiba a matriz que representa o grafo.\n",
"_____no_output_____"
]
],
[
[
"class Grafo:\n def __init__(self):\n self.adjacente_matriz = {}\n \n def add_vertice(self, indice):\n self.adjacente_matriz[indice] = {}\n \n def add_aresta(self, vertice1, vertice2):\n self.adjacente_matriz[vertice1][vertice2] = 1\n self.adjacente_matriz[vertice2][vertice1] = 1\n\n def remover_aresta(self, vertice1, vertice2):\n if self.adjacente_matriz.get(vertice1).get(vertice2):\n self.adjacente_matriz[vertice1][vertice2] = 0\n \n def __repr__(self):\n matriz = ' | '\n\n for vertice1 in self.adjacente_matriz:\n matriz += str(vertice1) + ' '\n \n matriz += '\\n'\n\n for vertice1 in self.adjacente_matriz:\n matriz += str(vertice1) + ' | '\n for vertice2 in self.adjacente_matriz:\n matriz += str(self.adjacente_matriz[vertice1].get(vertice2, 0)) + ' '\n matriz += '\\n'\n \n return matriz\n\no_grafo = Grafo()\n\no_grafo.add_vertice(0)\no_grafo.add_vertice(1)\no_grafo.add_vertice(2)\no_grafo.add_vertice(3)\no_grafo.add_vertice(4)\no_grafo.add_vertice(5)\no_grafo.add_vertice(6)\n\no_grafo.add_aresta(1, 3)\no_grafo.add_aresta(2, 3)\no_grafo.add_aresta(2, 5)\no_grafo.add_aresta(3, 1)\no_grafo.add_aresta(4, 1)\no_grafo.add_aresta(4, 3)\no_grafo.add_aresta(4, 6)\no_grafo.add_aresta(4, 6)\no_grafo.add_aresta(5, 2)\no_grafo.add_aresta(5, 4)\no_grafo.add_aresta(5, 5)\no_grafo.add_aresta(5, 6)\no_grafo.add_aresta(6, 4)\no_grafo.add_aresta(6, 5)\n\nprint(o_grafo)",
"_____no_output_____"
]
],
[
[
"\nQuestรฃo 2\nImplemente o grafo ilustrado pela figura abaixo, utilizando a representaรงรฃo baseada em uma lista de adjacรชncias. Em seguida, observe a lista de adjacรชncias que representa o grafo.\n",
"_____no_output_____"
]
],
[
[
"class Grafo:\n def __init__ (self):\n self.adjacente_matriz = {}\n\n def add_vertice(self, indice):\n if not self.adjacente_matriz.get(indice):\n self.adjacente_matriz[indice] = {}\n \n def add_aresta(self, origem, destino):\n #if self.adjacente_matriz.get(origem):\n self.adjacente_matriz[origem][destino] = 1\n\n def __repr__(self):\n matriz = ' | '\n\n for vertice in self.adjacente_matriz:\n matriz += str(vertice) + ' '\n\n matriz += '\\n'\n\n for vertice1 in self.adjacente_matriz.keys():\n matriz += str(vertice) + '| '\n\n for vertice2 in self.adjacente_matriz.keys():\n matriz += str(self.adjacente_matriz[vertice1].get(vertice2, 0)) + ' '\n \n matriz += '\\n'\n \n return matriz\n\ngrafo = Grafo ()\n\ngrafo.add_vertice(1)\ngrafo.add_vertice(2)\ngrafo.add_vertice(3)\ngrafo.add_vertice(4)\ngrafo.add_vertice(5)\n\ngrafo.add_aresta(1, 2)\ngrafo.add_aresta(1, 3)\ngrafo.add_aresta(1, 4)\ngrafo.add_aresta(2, 3)\ngrafo.add_aresta(3, 2)\ngrafo.add_aresta(4, 4)\ngrafo.add_aresta(5, 2)\n\nprint(grafo)\n",
"_____no_output_____"
]
],
[
[
"Questรฃo 3\nConsidere um determinado nรบmero de cidades (N) e uma determinada quantidade de rotas (M) que vocรช sabe que existem entre aquelas cidades. Com base nisso, vocรช deve criar um script em Python que recebe um nรบmero N de cidades e um nรบmero M de caminhos que existem entre elas. Em seguida, seu programa vai ler quais sรฃo esses M caminhos e a distรขncia deles.\n\nPara exemplificar, vamos analisar a entrada abaixo:\n\n3 2 \n0 1 10 \n1 2 23\nO exemplo acima รฉ bem simples. Os dois primeiros nรบmeros sรฃo o N e o M; ou seja, existem 3 cidades e 2 caminhos conhecidos entre elas. Em seguida, temos os dois caminhos:\n\nO primeiro vai da cidade 0 para a cidade 1, cuja distรขncia รฉ de 10.\nO segundo vai da cidade 1 para a cidade 2, cuja distรขncia รฉ de 23.\nTeste o seu programa para a entrada acima e, tambรฉm, para a entrada abaixo:\n\n7 11\n0 1 7\n0 3 5\n1 2 8\n1 3 9\n1 4 7\n2 4 5\n3 4 15\n3 5 6\n4 5 8\n4 6 9\n5 6 11",
"_____no_output_____"
]
],
[
[
"class Aresta:\n def __init__(self, no, peso):\n self.no = no\n self.peso = peso\n\nclass No:\n def __init__(self, indice):\n self.indice = indice\n self._distancia = float('inf')\n self._parente = None\n self._vizinhos = []\n \n @property\n def distancia(self):\n return self._distancia\n \n @distancia.setter\n def distancia(self, nova_distancia):\n self._distancia = nova_distancia\n \n @property\n def parente(self):\n return self._parente\n \n @parente.setter\n def parente(self, no):\n self._parente = no\n \n @property\n def vizinhos(self):\n return self._vizinhos\n \n @vizinhos.setter\n def vizinhos(self, dados):\n no, peso = dados\n\n self._vizinhos.append(Aresta(no, peso))\n\nclass Grafo:\n def __init__(self):\n self._nos = {}\n \n @property\n def nos(self):\n return self.nos\n \n @nos.setter\n def nos(self, indice):\n self.nos[indice] = No(indice)\n\n def pegar_no_pelo_indice(self, indice):\n return self.no.get(indice)\n \n def add_aresta(sef, indice1, indice2, peso):\n no1 = self.pegar_no_pelo_indice(indice1)\n\n no2 = self.pegar_no_pelo_indice(indice2)\n\n if no1 and no2:\n no1.vizinhos = no2, peso\n no2.vizinhos = no1, peso\n\n @staticmethod\n def extrair_no_de_menor_distancia(no_lista):\n no_lista.sort(key=lambda no: no.distancia)\n\n no_de_menor_distancia = no_lista[0]\n\n no_lista = no_lista[1:]\n\n return no_de_menor_distancia, no_lista\n\n @staticmethod\n def relaxar(no, vizinho, peso):\n if vizinho.distancia > no.distancia + peso:\n vizinho.distancia = no.distancia + peso\n\n vizinho.parente = no\n \n def dijkstra(self, recurso):\n recurso_no = self.pegar_no_pelo_indice(recurso)\n\n recurso_no.distancia = 0\n\n distancias = []\n\n fila = list(self.nos.valores())\n\n while fila:\n no, fila = self.extrair_no_de_menor_distancia(fila)\n\n distancias.append(no)\n\n for vizinho in no.vizinhos:\n self.relaxar(no, vizinho.no, vizinho.peso)\n \n for no in distancias:\n print(f'No: {no.indice} | Distancia: {no.distancia}')\n\ndef percorrer():\n grafo = Grafo()\n\n dados = input()\n\n dados = dados.split(' ')\n\n numeros_de_nos = int(dados[0])\n\n numeros_de_aresta = int(dados[1])\n\n for i in range(numeros_de_nos):\n grafo.nos = i\n \n for j in range(numeros_de_aresta):\n dados = input()\n\n dados = dados.split(' ')\n\n indice1 = int(dados[0])\n\n indice2 = int(dados[1])\n\n peso = int(dados[2])\n\n grafo.add_aresta(indice1, indice2, peso)\n \n grafo.dijkstra(0)\n\npercorrer()",
"_____no_output_____"
]
],
[
[
"\nQuestรฃo 4\nImplemente o grafo ponderado ilustrado pela figura abaixo, utilizando as representaรงรตes baseadas em:\n\nmatriz de adjacรชncias;\nlista de adjacรชncias.\nEm seguida, exiba a matriz e a lista que representam o grafo.\n",
"_____no_output_____"
]
],
[
[
"class Grafo:\n def __init__(self):\n self.adjacente_matriz = {}\n self.adjacente_lista = {}\n\n def add_vertice(self, indice):\n self.adjacente_matriz[indice] = {}\n self.adjacente_lista[indice] = []\n\n def add_aresta(self, indice_1, indice_2, peso):\n self.adjacente_matriz[indice_1][indice_2] = peso\n self.adjacente_matriz[indice_2][indice_1] = peso\n\n self.adjacente_lista[indice_1].append((indice_2, peso))\n self.adjacente_lista[indice_2].append((indice_1, peso))\n\n def mostrar_adjacente_matriz(self):\n representacao = ' |' \n\n for i in self.adjacente_matriz:\n representacao += f' {i}'\n\n representacao += '\\n'\n\n for i in self.adjacente_matriz:\n representacao += f' {i} |'\n\n for j in self.adjacente_matriz:\n representacao += f' {0 if not (self.adjacente_matriz.get(i) and self.adjacente_matriz.get(i).get(j)) else self.adjacente_matriz.get(i).get(j)}'\n\n representacao += '\\n'\n\n return representacao\n\n def mostrar_adjacente_lista(self):\n representacao = ''\n\n for i in self.adjacente_lista:\n representacao += f'{i}: {self.adjacente_lista.get(i)}\\n'\n\n return representacao\n\ngrafo = Grafo()\n\ngrafo.add_vertice(0)\ngrafo.add_vertice(1)\ngrafo.add_vertice(2)\ngrafo.add_vertice(3)\ngrafo.add_vertice(4)\n\ngrafo.add_aresta(0, 1, 3)\ngrafo.add_aresta(0, 3, 7)\ngrafo.add_aresta(0, 4, 8)\ngrafo.add_aresta(1, 2, 1)\ngrafo.add_aresta(1, 3, 4)\ngrafo.add_aresta(2, 3, 2)\ngrafo.add_aresta(3, 4, 3)\n\nprint(grafo.mostrar_adjacente_matriz())\nprint(grafo.mostrar_adjacente_lista())",
" | 0 1 2 3 4\n 0 | 0 3 0 7 8\n 1 | 3 0 1 4 0\n 2 | 0 1 0 2 0\n 3 | 7 4 2 0 3\n 4 | 8 0 0 3 0\n\n0: [(1, 3), (3, 7), (4, 8)]\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca87cc693d731691f7c4984c815a209dafb8c03 | 20,908 | ipynb | Jupyter Notebook | notebooks/06_scrapeAnimeList.ipynb | alancmathew/anime-recommendation-engine | 43ef08d761eb787f49fad28076d926d0af13fba0 | [
"MIT"
] | null | null | null | notebooks/06_scrapeAnimeList.ipynb | alancmathew/anime-recommendation-engine | 43ef08d761eb787f49fad28076d926d0af13fba0 | [
"MIT"
] | null | null | null | notebooks/06_scrapeAnimeList.ipynb | alancmathew/anime-recommendation-engine | 43ef08d761eb787f49fad28076d926d0af13fba0 | [
"MIT"
] | null | null | null | 34.163399 | 127 | 0.470872 | [
[
[
"import pickle\nimport numpy as np\nimport pandas as pd\nimport json\nimport sqlalchemy as sql\nfrom sqlalchemy import create_engine\nfrom tqdm import tqdm\nimport requests\nfrom bs4 import BeautifulSoup\nfrom io import StringIO \nimport time\nimport re\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import Pool\nimport random\nfrom urllib.parse import quote",
"_____no_output_____"
],
[
"with open('../tools/credentials.json') as file:\n credentials = json.load(file)\n \nusername = credentials[\"dblogin\"][\"username\"]\npassword = credentials[\"dblogin\"][\"password\"]",
"_____no_output_____"
],
[
"db_string = f\"postgresql://{username}:{password}@192.168.0.3:5432/animeplanet\"\ndb = create_engine(db_string)",
"_____no_output_____"
],
[
"def chunker(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))",
"_____no_output_____"
]
],
[
[
"### Get Anime List",
"_____no_output_____"
]
],
[
[
"print('scraping anime list...')",
"_____no_output_____"
],
[
"base_url = 'https://www.anime-planet.com/anime/top-anime?page='\n\nurl = f'{base_url}{1}'\nresp = requests.get(f'http://192.168.0.3:5000/special-requests?url={quote(url)}')\nsoup = BeautifulSoup(resp.text, 'html.parser')\nul = soup.find('ul', attrs={'class':'nav'})",
"_____no_output_____"
],
[
"page_nums = []\nfor tag in ul.find_all('a'):\n try:\n page_nums.append(int(tag.text))\n except:\n continue\n \nnum_pages = max(page_nums)\n\nurls = [f'{base_url}{i}' for i in range(1, num_pages+1)]",
"_____no_output_____"
],
[
"def scrapeTable(url):\n resp = requests.get(f'http://192.168.0.3:5000/special-requests?url={quote(url)}')\n if resp.text != '':\n soup = BeautifulSoup(resp.text, 'html.parser')\n table = soup.find('table')\n chunk = pd.read_html(StringIO(str(table)), index_col='Rank')[0][['Title', 'Type', 'Year']]\n chunk['url'] = [np.where(tag.has_attr('href'), \n 'https://www.anime-planet.com' + tag.get('href'), \n 'no link') for tag in table.find_all('a')]\n chunk.columns = [col.lower() for col in chunk.columns]\n chunk['url'] = chunk['url'].astype('string')\n return chunk\n else:\n return scrapeTable(url)",
"_____no_output_____"
],
[
"chunksize = 10\ndf = pd.DataFrame()\n\nurl_chunks = chunker(urls, chunksize)\n\nfor idx, url_chunk in enumerate(tqdm(url_chunks, total=int(len(urls)/chunksize)+1), 1):\n with ThreadPoolExecutor(max_workers=chunksize) as executor:\n chunk = pd.concat(list(executor.map(scrapeTable, url_chunk)), ignore_index=True)\n \n df = pd.concat([df, chunk], ignore_index=True)\n \n time.sleep(max(min(np.random.poisson(2), 5), 1))",
"_____no_output_____"
],
[
"df = df.drop_duplicates(['url'], ignore_index=True)",
"_____no_output_____"
],
[
"print('saving data to file...')\ndf.to_csv('../data/anime_list.csv.xz', index=False)\n\nwith db.connect() as con:\n print('removing from db...')\n query = f\"\"\"DELETE FROM anime;\"\"\"\n con.execute(sql.text(query))\n \n print('saving data to db...')\n df.to_sql('anime', con, if_exists='append', index=False, method='multi')",
"_____no_output_____"
]
],
[
[
"### Scrape Anime Pages",
"_____no_output_____"
]
],
[
[
"print('scraping anime pages...')",
"_____no_output_____"
],
[
"df = pd.read_sql('anime', db)",
"_____no_output_____"
],
[
"def getPage(url, attempt=1):\n if attempt == 4:\n return (url, '')\n resp = requests.get(f'http://192.168.0.3:5000/special-requests?url={quote(url)}')\n return (url, resp.text) if resp.text != '' else getPage(url, attempt+1)",
"_____no_output_____"
],
[
"chunksize = 10\n\nurl_list = df['url'].to_list()\nurl_chunks = chunker(url_list, chunksize)\n\nurl_html_dict = {}\nfor url_chunk in tqdm(url_chunks, total=int(len(url_list)/chunksize)+1):\n with ThreadPoolExecutor(max_workers=chunksize) as executor:\n list_of_tup = list(executor.map(getPage, url_chunk))\n for tup in list_of_tup:\n url_html_dict[tup[0]] = tup[1]\n \n time.sleep(max(min(np.random.poisson(10), 30), 4))",
"_____no_output_____"
],
[
"df['html_text'] = df['url'].map(url_html_dict)",
"_____no_output_____"
],
[
"print('saving data to file...')\ndf.to_csv('../data/anime_list_html.csv.xz', index=False)\n\nwith db.connect() as con:\n print('removing from db...')\n query = f\"\"\"DELETE FROM web_scrape \n WHERE url in ({str(df['url'].to_list())[1:-1]})\"\"\"\n con.execute(sql.text(query))\n print('saving data to db...')\n chunks = chunker(df[['url', 'html_text']], 1000)\n for chunk in tqdm(chunks):\n chunk.to_sql('web_scrape', con, if_exists='append', index=False, method='multi')",
"_____no_output_____"
]
],
[
[
"### Extracting addition info",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../data/anime_list_html.csv.xz')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"def email(string):\n r = int(string[:2], 16)\n email = ''.join([chr(int(string[i:i+2], 16) ^ r)\n for i in range(2, len(string), 2)])\n return email",
"_____no_output_____"
],
[
"def parseInfo(html):\n soup = BeautifulSoup(html)\n title = soup.find('h1', {'itemprop':'name'}).text\n if '[email\\xa0protected]' in title:\n real_text = email(soup.find('a', attrs={'href': '/cdn-cgi/l/email-protection'})['data-cfemail'])\n title = title.replace('[email\\xa0protected]', real_text)\n\n section = soup.find(attrs={'class': 'pure-g entryBar'})\n num_eps = section.find('span', {'class':'type'})\n if num_eps:\n num_eps = num_eps.text.replace('\\n', ' ').strip()\n else:\n num_eps = None\n \n studio = section.find('a', {'href': re.compile(r'/anime/studios/.*')})\n if studio:\n studio = studio.text\n else:\n studio = None\n \n start_end_years = section.find('span', {'class': 'iconYear'})\n if start_end_years:\n start_end_years = start_end_years.text\n else:\n start_end_years = None\n \n season_year = section.find('a', {'href': re.compile(r'/anime/seasons/.*')})\n if season_year:\n season_year = season_year.text\n else:\n season_year = None\n \n rating = section.find('div', {'class': 'avgRating'}).text.replace('\\n', ' ').strip()\n \n tags_section = soup.find('div', {'class':'tags'})\n if tags_section:\n tags = tags_section.find_all('a', {'href': re.compile(r'/anime/tags/.*')})\n tags = [tag.text.replace('\\n', ' ').strip() for tag in tags]\n else:\n tags = None\n \n cw_section = soup.find('div', {'class':'tags tags--plain'})\n if cw_section:\n content_warnings = [cw.text.replace('\\n', ' ').replace(',', '').strip() for cw in cw_section.find_all('li')]\n else:\n content_warnings = None\n \n synopsis = soup.find('p').text\n url = soup.find('link', {'href': re.compile(r'https://www.anime-planet.com/anime/')})['href']\n \n return (title, num_eps, studio, start_end_years, season_year, rating, synopsis, tags, content_warnings, url)",
"_____no_output_____"
],
[
"with Pool(14) as p:\n list_of_tups = list(p.map(parseInfo, df['html_text']))",
"_____no_output_____"
],
[
"anime = pd.DataFrame(list_of_tups, columns=['title', 'num_eps', 'studio', 'start_end_years', 'season_year', 'rating', \n 'synopsis', 'tags', 'content_warnings', 'url'])",
"_____no_output_____"
],
[
"anime.to_csv('../data/anime_raw.csv.xz', index=False)\nanime.to_pickle('../data/anime_raw.pkl.xz')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca89067fe6a86962b898d78d2a49f75440faf25 | 49,931 | ipynb | Jupyter Notebook | examples/example2-2.ipynb | jychoi-hpc/fluctana | a8950c347d422e471a43034e05214e3fec58d428 | [
"MIT"
] | null | null | null | examples/example2-2.ipynb | jychoi-hpc/fluctana | a8950c347d422e471a43034e05214e3fec58d428 | [
"MIT"
] | 1 | 2020-02-12T22:08:39.000Z | 2020-02-12T22:08:39.000Z | examples/example2-2.ipynb | jychoi-hpc/fluctana | a8950c347d422e471a43034e05214e3fec58d428 | [
"MIT"
] | 1 | 2020-02-12T21:40:13.000Z | 2020-02-12T21:40:13.000Z | 213.380342 | 40,262 | 0.882999 | [
[
[
"### Example #1-3\n\nAuthor : Minjun J. Choi ([email protected])\n\nWe will calculate coherence mean image similar with Fig.6 in Choi NF 2017\n<img src=\"files/16150_coh.png\">",
"_____no_output_____"
]
],
[
[
"%matplotlib inline \nimport sys, os\nsys.path.append(os.pardir)\nfrom fluctana import *",
"_____no_output_____"
],
[
"A = FluctAna()\nA.add_data(KstarEcei(shot=16150, clist=['ECEI_G0201-2308']), trange=[7.57, 7.62])",
"data is normalized by trange average\n"
],
[
"A.fftbins(nfft=512,window='hann',overlap=0.5,detrend=1)",
"---- DATA SET # 0 for [7.57, 7.62] s ----\n{000:ECEI_G0201} {001:ECEI_G0202} {002:ECEI_G0203} {003:ECEI_G0204} \n{004:ECEI_G0205} {005:ECEI_G0206} {006:ECEI_G0207} {007:ECEI_G0208} \n{008:ECEI_G0301} {009:ECEI_G0302} {010:ECEI_G0303} {011:ECEI_G0304} \n{012:ECEI_G0305} {013:ECEI_G0306} {014:ECEI_G0307} {015:ECEI_G0308} \n{016:ECEI_G0401} {017:ECEI_G0402} {018:ECEI_G0403} {019:ECEI_G0404} \n{020:ECEI_G0405} {021:ECEI_G0406} {022:ECEI_G0407} {023:ECEI_G0408} \n{024:ECEI_G0501} {025:ECEI_G0502} {026:ECEI_G0503} {027:ECEI_G0504} \n{028:ECEI_G0505} {029:ECEI_G0506} {030:ECEI_G0507} {031:ECEI_G0508} \n{032:ECEI_G0601} {033:ECEI_G0602} {034:ECEI_G0603} {035:ECEI_G0604} \n{036:ECEI_G0605} {037:ECEI_G0606} {038:ECEI_G0607} {039:ECEI_G0608} \n{040:ECEI_G0701} {041:ECEI_G0702} {042:ECEI_G0703} {043:ECEI_G0704} \n{044:ECEI_G0705} {045:ECEI_G0706} {046:ECEI_G0707} {047:ECEI_G0708} \n{048:ECEI_G0801} {049:ECEI_G0802} {050:ECEI_G0803} {051:ECEI_G0804} \n{052:ECEI_G0805} {053:ECEI_G0806} {054:ECEI_G0807} {055:ECEI_G0808} \n{056:ECEI_G0901} {057:ECEI_G0902} {058:ECEI_G0903} {059:ECEI_G0904} \n{060:ECEI_G0905} {061:ECEI_G0906} {062:ECEI_G0907} {063:ECEI_G0908} \n{064:ECEI_G1001} {065:ECEI_G1002} {066:ECEI_G1003} {067:ECEI_G1004} \n{068:ECEI_G1005} {069:ECEI_G1006} {070:ECEI_G1007} {071:ECEI_G1008} \n{072:ECEI_G1101} {073:ECEI_G1102} {074:ECEI_G1103} {075:ECEI_G1104} \n{076:ECEI_G1105} {077:ECEI_G1106} {078:ECEI_G1107} {079:ECEI_G1108} \n{080:ECEI_G1201} {081:ECEI_G1202} {082:ECEI_G1203} {083:ECEI_G1204} \n{084:ECEI_G1205} {085:ECEI_G1206} {086:ECEI_G1207} {087:ECEI_G1208} \n{088:ECEI_G1301} {089:ECEI_G1302} {090:ECEI_G1303} {091:ECEI_G1304} \n{092:ECEI_G1305} {093:ECEI_G1306} {094:ECEI_G1307} {095:ECEI_G1308} \n{096:ECEI_G1401} {097:ECEI_G1402} {098:ECEI_G1403} {099:ECEI_G1404} \n{100:ECEI_G1405} {101:ECEI_G1406} {102:ECEI_G1407} {103:ECEI_G1408} \n{104:ECEI_G1501} {105:ECEI_G1502} {106:ECEI_G1503} {107:ECEI_G1504} \n{108:ECEI_G1505} {109:ECEI_G1506} {110:ECEI_G1507} {111:ECEI_G1508} \n{112:ECEI_G1601} {113:ECEI_G1602} {114:ECEI_G1603} {115:ECEI_G1604} \n{116:ECEI_G1605} {117:ECEI_G1606} {118:ECEI_G1607} {119:ECEI_G1608} \n{120:ECEI_G1701} {121:ECEI_G1702} {122:ECEI_G1703} {123:ECEI_G1704} \n{124:ECEI_G1705} {125:ECEI_G1706} {126:ECEI_G1707} {127:ECEI_G1708} \n{128:ECEI_G1801} {129:ECEI_G1802} {130:ECEI_G1803} {131:ECEI_G1804} \n{132:ECEI_G1805} {133:ECEI_G1806} {134:ECEI_G1807} {135:ECEI_G1808} \n{136:ECEI_G1901} {137:ECEI_G1902} {138:ECEI_G1903} {139:ECEI_G1904} \n{140:ECEI_G1905} {141:ECEI_G1906} {142:ECEI_G1907} {143:ECEI_G1908} \n{144:ECEI_G2001} {145:ECEI_G2002} {146:ECEI_G2003} {147:ECEI_G2004} \n{148:ECEI_G2005} {149:ECEI_G2006} {150:ECEI_G2007} {151:ECEI_G2008} \n{152:ECEI_G2101} {153:ECEI_G2102} {154:ECEI_G2103} {155:ECEI_G2104} \n{156:ECEI_G2105} {157:ECEI_G2106} {158:ECEI_G2107} {159:ECEI_G2108} \n{160:ECEI_G2201} {161:ECEI_G2202} {162:ECEI_G2203} {163:ECEI_G2204} \n{164:ECEI_G2205} {165:ECEI_G2206} {166:ECEI_G2207} {167:ECEI_G2208} \n{168:ECEI_G2301} {169:ECEI_G2302} {170:ECEI_G2303} {171:ECEI_G2304} \n{172:ECEI_G2305} {173:ECEI_G2306} {174:ECEI_G2307} {175:ECEI_G2308} \n\ndnum 0 fftbins 193 with hann size 512 overlap 0.5 detrend 1\n"
]
],
[
[
"Calculate with 'dc=-8' option, which means that the calculation is done between (c)-th channel in dtwo and (c+dc)-th channel in done.",
"_____no_output_____"
]
],
[
[
"A.coherence(done=0,dtwo=0,dc=-8)",
"_____no_output_____"
],
[
"A.cplot(dnum=0,snum=124,frange=[0,60],vlimits=[0,0.4])",
"/home/users/ymjeon.ikstar/anaconda2/lib/python2.7/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n if self._edgecolors == str('face'):\n"
],
[
"A.Dlist[0].pdata",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca8a3dced89b5d2b6a308d38689dc9941cf19d1 | 33,881 | ipynb | Jupyter Notebook | notebooks/lecture_03_intro_python_jupyter.ipynb | hua-mike/CYPLAN255 | 03493a41f51bcd56f587cc088468eccbeafadc05 | [
"CNRI-Python"
] | null | null | null | notebooks/lecture_03_intro_python_jupyter.ipynb | hua-mike/CYPLAN255 | 03493a41f51bcd56f587cc088468eccbeafadc05 | [
"CNRI-Python"
] | null | null | null | notebooks/lecture_03_intro_python_jupyter.ipynb | hua-mike/CYPLAN255 | 03493a41f51bcd56f587cc088468eccbeafadc05 | [
"CNRI-Python"
] | null | null | null | 28.399832 | 742 | 0.593637 | [
[
[
"\n*******\n# Introduction to Jupyter Notebooks and Python\n*******",
"_____no_output_____"
],
[
"## Python\n\nPython is an interpreted programming language, also referred to as a *high-level language*, or as a scripting language. What this means is that when you write some commands, or statements that are meaningful in the Python language, the Python 'interpreter' reads the command, figures out what the intended computation is, and then executes it. This differs from *low-level* languages like C or C++, in which you generally have to *compile* code before you can run it, and if you find errors they have to be diagnosed and then the code re-compiled before you can run it. Interpreted languages skip the compile step, and just execute code directly, and if there are errors, they are seen at run-time. ",
"_____no_output_____"
],
[
"### Installing Anaconda Python Bundle with Jupyter on Your Own Computer\n\nYou should install Python on your own computer for this class to gain experience managing a Python installation and supporting libraries, and to give you more compute resources than you would get using Datahub. Please use the following installer to make things as consistent as possible with the environment in class.\n\n- https://www.anaconda.com/products/individual\n\nYou will find installers for each operating system, and the current default version is 3.9. Please do not install an earlier version, especially not one below Python 3, since the syntax is very different and many libraries no longer support it.\n\nPython and the Anaconda distribution is free. In fact, all the software we will use in this class is open source and free (as in no cost). Python runs on Windows, OSX, and Linux, so regardless of what computer you are using, it will most likely run on it. ",
"_____no_output_____"
],
[
"\n## Jupyter Notebooks\n\nThis first session will cover the basics of Python, and introduce elements that will help you get familiar with Python as an interactive computational environment for exploring data. The material is presented in an interactive environment that runs within your web browser, called a Jupyter Notebook. It allows presentation of text and graphics to be combined with Python code that can be run interactively, with the results appearing inline. We are looking at a Jupyter notebook now. Note that Jupyter is a relatively recent name for this so sometimes you may still see it referred to as an IPython noteboook. Jupyter is just the new version of IPython notebooks, but now also supports a variety of other languages and tools. \n\nLet's start by getting familiar with the Jupyter Notebook and how it works.\n\n\n",
"_____no_output_____"
],
[
"### Launching a Jupyter Notebook at the Command Prompt\n\nTo begin using a Jupyter Notebook on your own computer, you need to launch a command prompt (or command shell). If you don't know what this is, you'll need to get familiar with the command prompt and navigating around on your computer using change directory commands, in order to be able to launch the notebook.\n\nOn Windows 10, there are multiple ways to open a command prompt - nicely explained [here](http://www.howtogeek.com/235101/10-ways-to-open-the-command-prompt-in-windows-10/). If you haven't learned how to use a command prompt in windows, [this](http://dosprompt.info/) is a good reference for the command prompt in Windows.\n\nOn a mac, you can launch the terminal app. On a mac, the syntax is a bit different since it is based on Linux. [Here](http://ss64.com/osx/) is a reference for commands on the mac, or a simpler version [here](http://www.dummies.com/how-to/content/how-to-use-basic-unix-commands-to-work-in-terminal.html).\n\n",
"_____no_output_____"
],
[
"Once you have a shell, use `cd` (change directory) to navigate to whatever directory you want to work in. You'll need to use the command prompt from the beginning of this course, so get comfortable with basic commands this week if you are not already.\n\nAt the command prompt, `cd` to the location of this notebook, and run the following command:\n- `jupyter notebook`\n\nThis command does two things:\n1. launches a Jupyter Notebook Server in the terminal\n2. launches the [Notebook Dashboard](https://jupyter-notebook.readthedocs.io/en/stable/ui_components.html#notebook-dashboard) in the browser\n\n",
"_____no_output_____"
],
[
"### Using Jupyter Notebooks\n\nFrom the Notebook Dashboard, you can either load an existing notebook if you see one, create a new one (or open a terminal). If you started the server from the right place, you should see the name of this notebook listed there: **lecture_03_intro_python_jupyter.ipynb**. If you click on this notebook, another tab will open in your browser, containing this notebook, ready to use. Go ahead and do that.\n\nA notebook is made of cells. So in this notebook you've only seen cells that contain text. These are markdown cells. Notice the pulldown list for the cell type contains:\n\n* Code -- which we will use for Python code mainly, though it could use other languages\n* Markdown -- like this cell, using a flavor of structured text like that is used in Wikipedia and many other platforms\n* Other options will appear depending on what else is installed for use with Jupyter, like kernels for Scala, R, Octave, etc.",
"_____no_output_____"
],
[
"#### Edit Mode + Markdown\nYou can edit the contents of a cell by double-clicking on it. The border of the cell will turn from blue to green. You are now in \"edit mode\". Try it on this cell. When you are ready to save the cell or exit edit mode, just use `<shift> + <enter>`. That's one way to run a cell. We will see how the code cells work next. Before going on to that, read a little bit about how you can format your text cells with Markdown [here](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Working%20With%20Markdown%20Cells.html)\n\nFor example, you can render LaTeX equations in your Markdown cells:\n\n- $y = \\alpha + \\beta X $\n- $c = \\sqrt{a^2 + b^2}$\n\nOr use the backtick \"\\`\" character to style text like lines of code:\n- `math.sqrt(98)`",
"_____no_output_____"
],
[
"Cells can also contain lines of interactive code. The next cell contains a Python command. You can run that command in two ways:\n1. Select the cell in \"command mode\" (blue) and type `<shift> + <enter>`\n2. Clicking on the Run icon (left of the black square), on the toolbar. \n\nTry running the next cell and notice that it executes the command and writes the output below the cell:",
"_____no_output_____"
]
],
[
[
"import math\nmath.sqrt(49)",
"_____no_output_____"
]
],
[
[
"### Python Interpreter Environments\n\nWhen we write and execute Python code, we generally do that within an environment call as an **interpreter**. Python interpreters and editing environments can be quite varied. Some options include:\n\n1. Start the default Python interpreter from a terminal with `python`\n2. Start an IPython interpreter from the terminal with `ipython`\n2. With an integrated development environment (e.g. PyCharm)\n3. Jupyter Notebooks like this one, that provide a Python environment that runs in your web browser. This is the environment you are looking at now, with a mixture of headings, text, and code embedded in a Jupyter Notebook.",
"_____no_output_____"
],
[
"## Hello World!\n\nThe first programming command demonstrated when you are learning a programming language is usually to make the computer print 'Hello World!'. In Python, doing this is pretty simple:",
"_____no_output_____"
],
[
"As you can see, there is not much code involved in making this happen. The word 'print' is a command that Python knows how to process, and the text string 'Hello World!' in quotations is an **argument** being passes to the print command. You can of course pass any kind of argument to the Python print command, and it will try to *do the right thing* without you having to micro-manage the process.",
"_____no_output_____"
],
[
"## Python as an Interactive Calculator\n\nPython can be used as a simple interactive calculator, by just typing in a mathematical expression as you might on a regular or scientific calculator:",
"_____no_output_____"
]
],
[
[
"2 - 4",
"_____no_output_____"
]
],
[
[
"What happened above is that Python interpreted the line `2 - 4` to parse that it should understand the first object it encountered as an integer, the second object as a mathematical operator for addition, and the third as another integer. Python's interpreter mostly just tries to figure out what you mean when you write statements like this, and as long as it is unambiguous and feasible to compute, it just does it without you having to explain things in detail.",
"_____no_output_____"
],
[
"You can of course use any kinds of numbers (e.g. integers or decimals), and any standard mathematical operators, and most of the time you get what you expect:",
"_____no_output_____"
]
],
[
[
"3.2 * 4",
"_____no_output_____"
],
[
"3 ** 4",
"_____no_output_____"
]
],
[
[
"### A Note on Calculating with Different Data Types\n\nWhat happens if we perform computations with mixed types?",
"_____no_output_____"
]
],
[
[
"print(12 + 3)\ntype(12 + 3)",
"_____no_output_____"
],
[
"print(12. + 3)\ntype(12. + 3)",
"_____no_output_____"
]
],
[
[
"## The Pylab Interactive Plotting Environment\n\nOK, so maybe using Python as an interactive calculator is not the most compelling case for using Python, even if it does demonstrate that Python has a very shallow learning curve for someone completely new to programming. You can actually begin using it productively even before learning how to program in it!\n\nTo give a preview of somewhat more advanced topics, let's look at the interactive plotting mode in IPython that we can invoke by using 'magic' commands, and importing some modules: ",
"_____no_output_____"
]
],
[
[
"### magic command to display matplotlib plots inline within the Jupyter notebook webpage\n%matplotlib inline\n\n#import necessary modules\nimport pandas as pd, numpy as np, matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"This loads pandas and numpy and the matplotlib plotting environment. We'll come back to these libraries in more detail later, but now let's look at how they allow us to extend the range of things we can do. Let's assign 100 sequential numbers to a variable labeled x, and create another variable, y, that has some transformation of x, and then plot y against x:",
"_____no_output_____"
]
],
[
[
"x = range(100)\nprint(x)\nprint(list(x))",
"_____no_output_____"
],
[
"y = np.sin(x)\nprint(\"The first 10 entries in y: {0}\".format(y[:10])) ",
"_____no_output_____"
],
[
"plt.plot(x, x * y)",
"_____no_output_____"
]
],
[
[
"Or here is how we could draw 1,000 random numbers from a normal distribution, and plot the results as a frequency histogram:",
"_____no_output_____"
]
],
[
[
"x = np.random.randn(1000)\n_ = plt.hist(x, bins=30)",
"_____no_output_____"
]
],
[
[
"## Getting Some Help\n\nA couple of things that IPython does to help you be more productive are useful to introduce here. \n\nOne is called **tab-completion**. If you can't quite remember the full name of a function, or it is really long and you don't like to type much, you can type the first few characters, and hit the `<tab>` key, and the options that begin with those first few characters show up on a menu.\n\nYou can also use `<shift> + <tab>` to display a **tool-tip** for a function you may have forgotten how to use.\n\nTry these in the cell below:",
"_____no_output_____"
]
],
[
[
"plt.plot(x=x, y=x + y)",
"_____no_output_____"
]
],
[
[
"The other thing you can make a lot of use of is **help**! If you want to know more about how a method or function works, type the name of the function followed by `?`. For example, if we wanted to see how to configure the hist command, we could do:",
"_____no_output_____"
]
],
[
[
"plt.hist()",
"_____no_output_____"
]
],
[
[
"This brings up help text for this command, in a split window in the IPython Notebook. After you read the help, you can minimize the help window by dragging the divider down to the bottom of the Notebook window.",
"_____no_output_____"
],
[
"Another interactive feature I use all the time is the `dir()` function. With no argument, it will tell you about all of the Python objects you have access to in your **namespace**. Or, if you pass in a Python object, it will tell you all of the attributes of that object. Give it a try:",
"_____no_output_____"
]
],
[
[
"dir()",
"_____no_output_____"
],
[
"dir(x)",
"_____no_output_____"
],
[
"x.max()",
"_____no_output_____"
]
],
[
[
"## Useful Notebook shortcuts\n\nFor more shortcuts see `Help > Keyboard Shortcuts` in the file menu at the top of this page\n\n### Command mode\n- `00` -- Restart the kernel\n- `<shift> + m` -- Merge the contents of a cell with the cell below it\n- `a` -- Create a new blank cell above this one\n- `b` -- Create a new blank cell below this one\n- `dd` -- Delete this cell\n- `y` -- Convert cell to code\n- `m` -- Convert cell to markdown",
"_____no_output_____"
],
[
"## What is a Program?\n\nAs Allen Downey explains in _Think Python_ the main elements of a program are:\n\n- **input**: Get data from the keyboard, a ๏ฌle, or some other device.\n- **output**: Display data on the screen or send data to a ๏ฌle or other device.\n- **math**: Perform basic mathematical operations like addition and multiplication.\n- **conditional execution**: Check for certain conditions and execute the appropriate code.\n- **repetition**: Perform some action repeatedly, usually with some variation.\n\nThese are common steps that you will find to be a generic recipe for many programs, whether written in Python or any other language.",
"_____no_output_____"
],
[
"## The Basic Data Types\n\nData in Python is interpreted as having a **type**. In low-level, compiled languages like C or C++, the programmer has to explicitly declare the type of each variable before actually using it. In Python, the type is inferred at run time, and you can always ask Python what the type of an object is:",
"_____no_output_____"
]
],
[
[
"a = 13\ntype(a)",
"_____no_output_____"
],
[
"a = a * 1.1\ntype(a)",
"_____no_output_____"
],
[
"a = 'Hello World!'\ntype(a)",
"_____no_output_____"
]
],
[
[
"Notice that when we multiply `a`, which was initially an integer, by a floating point (decimal number) the result is **cast** as a float. This is like the integer divide problem earlier -- using a floating point number in the calculation causes the result of the calculation to become a floating point number.\n\nNotice also that we can reassign any value or type to a variable. We began with `a` being an integer, then changed its value to a float, and then to a string (text). Variables are dynamically updated in this way based on values assigned to them. That's what people mean when they say Python is a _dynamically typed_ language, instead of a _statically typed_ language like C++.",
"_____no_output_____"
],
[
"### Lists\nIn Python, you can also make lists of numbers. A Python **list** is enclosed in square brackets. Items inside the list are separated by commas.",
"_____no_output_____"
]
],
[
[
"# a list\n[7.0, 6.24, 9.98, 4]",
"_____no_output_____"
]
],
[
[
"Lists can have names too, which is handy for when you want to want to save a set of items without writing them out over and over again.",
"_____no_output_____"
]
],
[
[
"my_list = [4, 8, 15, 16, 23, 42]\nmy_list",
"_____no_output_____"
]
],
[
[
"## Variables\n\nVariables are named objects that we use to store a value. They can be of any type: ",
"_____no_output_____"
]
],
[
[
"city = 'San Francisco'\nprint(city, 'is a ', type(city))",
"_____no_output_____"
],
[
"x = 345\nprint(x, 'is a ', type(x))",
"_____no_output_____"
],
[
"y = 2.324\nprint(y, 'is a ', type(y))",
"_____no_output_____"
]
],
[
[
"You can use a lot of names for a variable, but there are exceptions (another word for error!). Some rules apply. You can't use Python reserved words, or start with a number, or use nonstandard characters like a copyright symbol. You'll get an **exception** if you do:",
"_____no_output_____"
]
],
[
[
"2x = 24",
"_____no_output_____"
]
],
[
[
"And here are the 31 keywords reserved by Python (in version 2), that are ineligible for use as variable names:\n\n`and`, `as`, `assert`, `break`, `class`, `continue`, `def`, `del`, `elif`, `else`, `except`, `exec`, `finally`, `for`, `from`, `global`, `if`, `import`, `in`, `is`, `lambda`, `not`, `or`, `pass`, `print`, `raise`, `return`, `try`, `while`, `with`, `yield`.",
"_____no_output_____"
],
[
"## Operators, Equations, and Expressions\n\nOperators are symbols used to indicate different operations, mostly mathematical, but some operate on strings also. Many basic arithmetic operations are built into Python, like:\n- `+` -- addition\n- `-` -- subtraction\n- `*` -- multiplication\n- `/` -- division\n- `**` or `^` -- exponentiation\n- `%` -- modulo\n\nThere are many others, which you can find information about [here](http://www.inferentialthinking.com/chapters/03/1/expressions.html). \n",
"_____no_output_____"
],
[
"The computer evaluates arithmetic according to the PEMDAS order of operations (just like you probably learned in middle school): anything in parentheses is done first, followed by exponents, then multiplication and division, and finally addition and subtraction.\nSome basic operations:",
"_____no_output_____"
]
],
[
[
"5 * 5",
"_____no_output_____"
],
[
"x = 5\nx = x / 2.1\nprint (x)",
"_____no_output_____"
],
[
"y = x ** 2\nprint (y)",
"_____no_output_____"
]
],
[
[
"NOTE: Proper Python style requires using whitespace (\" \") on either side of an operator in an expression",
"_____no_output_____"
],
[
"Some of these operators also work on strings, but the behavior is different:",
"_____no_output_____"
]
],
[
[
"city = 'San Francisco'\nsep = ', '\nstate = 'California'\nlocation = city + sep + state\nprint (location)",
"_____no_output_____"
],
[
"city * 4",
"_____no_output_____"
]
],
[
[
"### Expressions\n\n**Expressions** are combinations of values, variables, and operators, like most of the lines of code that we've just seen.\n",
"_____no_output_____"
]
],
[
[
"# an example of expression\n14 + 20",
"_____no_output_____"
]
],
[
[
"When you run the cell, the computer evaluates the expression and prints the result. Note that only the last line in a code cell will be printed, unless you explicitly tell the computer you want to print the result.",
"_____no_output_____"
]
],
[
[
"# more expressions. what gets printed and what doesn't?\n100 / 10\n\nprint(4.3 + 10.98)\n\n33 - 9 * (40000 + 1)\n\n884",
"_____no_output_____"
]
],
[
[
"You can also assign names to expressions. The computer will compute the expression and assign the name to the result of the computation.",
"_____no_output_____"
]
],
[
[
"y = 50 * 2 + 1\ny",
"_____no_output_____"
]
],
[
[
"We can then use these names as if they were whatever they stand for (in this case, numbers).",
"_____no_output_____"
]
],
[
[
"x - 42",
"_____no_output_____"
],
[
"x + y",
"_____no_output_____"
],
[
"# before you run this cell, can you say what it should print?\n4 - 2 * (1 + 6 / 3)",
"_____no_output_____"
]
],
[
[
"### Statements\nStatements often include expressions, but unlike expressions they do not always have a value (e.g. print statement), and represent a line of code that Python can execute.\n\n### Scripts\nA script is a text file which stores a bunch of Python statements to be executed in sequential order. Python scripts will normally use the \".py\" file extension.\n\nJupyter makes it easy for you to convert a .ipynb notebook to a .py script from the file menu: `File > Download as > Python (.py)`. Python scripts can be run at the command line by typing `python <filename>` where `<filename>` is name of the .py script you want to run. ",
"_____no_output_____"
],
[
"### Comments\nA good thing to add to code, both in notebooks and scripts, to remind yourself of its intended use or to document it for someone else who may want to run it in the future. Use liberally! They won't slow anything down.",
"_____no_output_____"
]
],
[
[
"# This is a comment explaining the code below which, if my code is complex,\n# I might not remember in detail later without comments.\n# Below I create an array of 10 numbers by adding a random number between\n# 0 and 10000 to a base of 5000 and then taking the natural logarithm of the result\nincome = 50000.0 + 10000 * np.random.randn(10)\ny = np.log(income)\ny",
"_____no_output_____"
]
],
[
[
"### Before going on, make sure you have Anaconda Python installed and working correctly\n\nOnce you have Anaconda Python installed you should be able to launch a Jupyter Notebook and experiment with creating some cells with Markdown text, some code cells with simple calculations and creating variables, and execute those cells using `<shift> + <enter>` or using the Run icon at the top of the notebook.",
"_____no_output_____"
],
[
"# Sources\n\nThis notebook was heavily adapted from previous course material by Prof. Paul Waddell and Samuel Maurer.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
eca8a76035e635b016da67210f96943e4dc09645 | 484,707 | ipynb | Jupyter Notebook | 0010/On chi-squared goodness of fits tests.ipynb | genkuroki/public | 339ea5dfd424492a6b21d1df299e52d48902de18 | [
"MIT"
] | 10 | 2021-06-06T00:33:49.000Z | 2022-01-24T06:56:08.000Z | 0010/On chi-squared goodness of fits tests.ipynb | genkuroki/public | 339ea5dfd424492a6b21d1df299e52d48902de18 | [
"MIT"
] | null | null | null | 0010/On chi-squared goodness of fits tests.ipynb | genkuroki/public | 339ea5dfd424492a6b21d1df299e52d48902de18 | [
"MIT"
] | 3 | 2021-08-02T11:58:34.000Z | 2021-12-11T11:46:05.000Z | 171.032816 | 41,521 | 0.668049 | [
[
[
"See https://discourse.julialang.org/t/whats-wrong-with-my-chi-squared-goodness-of-fits-tests/64334",
"_____no_output_____"
]
],
[
[
"using Distributions, StatsPlots\nP = plot(DiscreteUniform(0, 100); label=\"DiscreteUniform(0, 100)\", xtick=0:10:100, ylim=(0, 0.013))\nQ = plot(Binomial(100, 0.7); label=\"Binomial(100, 0.7)\", legend=:topleft, xtick=0:10:100)\nplot(P, Q; size=(800, 250))",
"_____no_output_____"
],
[
"using Distributions, Random, StatsPlots\n\n# The test definitions.....\nfunction computeDensity(data, supp)\n counts = [count(i -> i==s,data) for s in supp]\n if length(data) > sum(counts)\n error(\"There are some data not in the support !\")\n end\n return counts\nend\n\n\"\"\"\nModified version of `goodnessOfFitDiscrete` function in\nhttps://discourse.julialang.org/t/whats-wrong-with-my-chi-squared-goodness-of-fits-tests/64334\n\n* The old argument `support` is deleted.\n* The new local variable `supp` is defined to be `support(fโ)`\n\"\"\"\nfunction goodnessOfFitDiscrete(data, fโ; compressedData=true, ฮฑ=0.05, d=0)\n supp = support(fโ)\n if !compressedData\n data = computeDensity(data, supp)\n end\n K = length(supp)\n N = sum(data)\n pฬ = data ./ N\n df = K - d - 1\n p0 = pdf.(fโ,supp)\n T = N * sum((pฬ[k] - p0[k])^2/p0[k] for k in 1:K)\n ฯDist = Chisq(df)\n rejectedHโ = T > quantile(ฯDist, 1-ฮฑ)\n p_value = 1 - cdf(ฯDist, T)\n return (testValue=T, threashold=quantile(ฯDist,1-ฮฑ), rejectedHโ=rejectedHโ, p_value=p_value)\nend\n\nfunction repeat_tests(fโ; datasize = 10000, repetitions = 10000, ฮฑ = 0.05, d = 0)\n testValue = zeros(repetitions)\n rejectedHโ = falses(repetitions)\n data = rand(fโ, datasize)\n for rep in 1:repetitions\n rand!(fโ, data)\n out = goodnessOfFitDiscrete(data, fโ; compressedData=false, ฮฑ, d)\n testValue[rep] = out.testValue\n rejectedHโ[rep] = out.rejectedHโ\n end\n ฮฑ_real = sum(rejectedHโ)/repetitions\n (; ฮฑ_real, testValue, rejectedHโ)\nend\n\nfunction plot_testValue_dist(fโ; datasize = 10000, repetitions = 10000, ฮฑ = 0.05, d=0)\n ฮฑ_real, testValue, rejectedHโ = repeat_tests(fโ; datasize, repetitions, ฮฑ, d)\n\n title = \"$fโ (real ฮฑ = $ฮฑ_real / nominal ฮฑ = $ฮฑ)\"\n plot(; title, titlefontsize=10)\n\n xlim=(0, 2length(support(fโ)))\n a, b = xlim\n histogram!(testValue[a .โค testValue .โค b]; norm=true, alpha=0.3, label=\"testValue\", xlim)\n\n df = length(support(fโ)) - d - 1\n plot!(Chisq(df), a, b; label=\"Chisq(df = $df)\", ls=:dash, lw=2)\nend",
"_____no_output_____"
],
[
"plot_testValue_dist(DiscreteUniform(0, 100))",
"_____no_output_____"
],
[
"plot_testValue_dist(Binomial(100, 0.7))",
"_____no_output_____"
],
[
"plot_testValue_dist(Binomial(100, 0.7); d = 40)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
eca8c7c56085a3952e81ac5f557907d5d6c707ef | 96,193 | ipynb | Jupyter Notebook | how-to/single-group-walkthrough.ipynb | FredericBr/MRS-voxel-plot | 5e628d724cb987e440a98ace129d575b6964a1f1 | [
"MIT"
] | 3 | 2020-05-21T00:23:58.000Z | 2022-02-22T12:08:52.000Z | how-to/single-group-walkthrough.ipynb | FredericBr/MRS-voxel-plot | 5e628d724cb987e440a98ace129d575b6964a1f1 | [
"MIT"
] | 2 | 2020-07-20T13:13:58.000Z | 2020-07-21T03:42:28.000Z | how-to/single-group-walkthrough.ipynb | FredericBr/MRS-voxel-plot | 5e628d724cb987e440a98ace129d575b6964a1f1 | [
"MIT"
] | 3 | 2020-05-21T00:24:47.000Z | 2022-02-21T19:20:11.000Z | 226.336471 | 37,424 | 0.911116 | [
[
[
"# Creating figures for data from a single group\n\n## Required data \n\nEach participant should have a separate folder where their spectra and MRS voxels should be stored. The folders should be arranged as follows:\n\n```\nproject_folder\n| participants.tsv\n|\nโโโโdata_folder\n | \n โโโโsub-01\n | | spectrum.tsv\n | | spectrum_frequencies.tsv\n | | mrs-voxel_mni-space.nii.gz\n | | ...\n |\n โโโโsub-02\n | | spectrum.tsv\n | | spectrum_frequencies.tsv\n | | mrs-voxel_mni-space.nii.gz\n | | ...\n |\n |...\n```\n\nParticipant IDs should be contained in the participants.tsv file. They should be in a column entitled \"participant_id\". \n\nSpectrum files should contain a single column with the preprocessed MRS spectra that you wish to display. The related frequencies file should contain a single column with the corresponding frequencies in PPM. \n\nIn this example we use a MEGA-PRESS difference spectra created with the Gannet toolbox (http://www.gabamrs.com/). In the case of Gannet, these files must be written out from the data file created during the analysis. This step will differ depending on the analysis tools that you are using. \n\nMRS voxels should be in compressed NIFTI format. These should have been aligned to the MNI152 standard space with a 2mm resolution. \n\n\n## Make a voxel overlap image\n\nFirst define the directories and files to be used and load in the participant IDs.",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\n\n# Project directory\ndata_dir = '/home/niall/work/MRS-voxel-plot/how-to/'\n\n# Directory to create figures in\nfig_dir = os.path.join(data_dir,'figures')\n\n# Check if this figure directory exists, and if not create it\nif not os.path.isdir(fig_dir):\n os.mkdir(fig_dir)\n \n# Define the column in which participant IDs are stored in the tsv file\nID_header = 'participant_id'\n\n# Load in the participant IDs\nsubjects = pd.read_csv(data_dir+'participants.tsv', delimiter='\\t')[ID_header]\nn_subs = len(subjects)\n\n# Define the voxel mask filename\nmask_name = 'mrs-voxel_mni-space.nii.gz'",
"_____no_output_____"
]
],
[
[
"To create the overlap image we first need to establish the image dimensions and the affine matrix that defines its orientation. ",
"_____no_output_____"
]
],
[
[
"import nibabel as ni\n\n# Use the first subject as an examplar for the dimensions and matrix\ndef get_mask_info(fpath):\n tmp = ni.load(fpath)\n aff = tmp.affine\n dims = tmp.shape\n return(aff,dims)\n\nmask_file = os.path.join(data_dir,'example-data',subjects[0],mask_name)\nmask_aff, mask_dims = get_mask_info(mask_file)",
"_____no_output_____"
]
],
[
[
"We now load the masks for all participants into a single array and calculate the percent overlap.",
"_____no_output_____"
]
],
[
[
"# Load data\nall_mask_data = np.zeros(np.hstack((n_subs,mask_dims)))\nfor i,sub in enumerate(subjects):\n mask_file = os.path.join(data_dir,'example-data',sub,mask_name)\n all_mask_data[i,:,:,:] = ni.load(mask_file).get_fdata()\n\n# Calculate the overlap between voxels\ndensity = np.sum(all_mask_data, axis = 0)\ndensity = (density/n_subs)*100\n\n# Convert this to nifti image format\ndensity_map = ni.Nifti1Image(density,mask_aff)",
"_____no_output_____"
]
],
[
[
"The overlap image can now be plotted on the glass brain provided in nilearn (https://nilearn.github.io). \n\nPlotting is done with matplotlib. This uses inches to define the image dimensions and so we define a helper function so we can input dimensions in cm. \n\nHere we use the \"autumn\" colourmap for the plotting but any matplotlib compatible colourmap can be used (see https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html)",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfrom nilearn import plotting\nfrom matplotlib import ticker\n\n# cm to inch convertor\ndef cm2inch(*tupl):\n inch = 2.54\n if isinstance(tupl[0], tuple):\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)\n\n# Define the filename for the image\nfig_file = os.path.join(fig_dir,'voxel-density-map_single-group.png')\n \n# Plot the figure\nfig = plt.figure()\nfig.set_size_inches(cm2inch(12,6)) # width, height\nax1 = plt.subplot(111)\nplotting.plot_glass_brain(density_map, threshold=0, colorbar=True, axes=ax1, cmap='autumn', \n display_mode='xz',annotate=False)\nfor ax in plt.gcf().axes: # Adjust the colourbar to percentages\n ax.yaxis.set_major_formatter(ticker.PercentFormatter(1))\n\n# Save the figure\nfig.savefig(fig_file,bbox_inches='tight',dpi=300)\n",
"_____no_output_____"
]
],
[
[
"It can also be useful to have the nifti file of this overlap map to use in other ways so, finally, we create this.",
"_____no_output_____"
]
],
[
[
"# Save density map NIFTI file\nmap_file = os.path.join(fig_dir,'voxel_density_map_single-group.nii.gz')\ndensity_map.to_filename(map_file)",
"_____no_output_____"
]
],
[
[
"## Make a voxel centroid image\n\nWe now make a figure showing the voxel centroids for each participant's MRS voxel.\n\nCentroid coordinates are calculated using a helper function from nilearn. This requires separate images in a list structure so we need to reload all the masks.",
"_____no_output_____"
]
],
[
[
"# Load all masks into a list\nall_masks = []\nfor i, sub in enumerate(subjects):\n mask_file = os.path.join(data_dir,'example-data',sub,mask_name)\n all_masks.append(ni.load(mask_file))\n \n# Calculate centroids\nall_centres = np.zeros((n_subs,3))\nfor i in range(n_subs):\n all_centres[i,:] = plotting.find_xyz_cut_coords(all_masks[i])",
"_____no_output_____"
]
],
[
[
"These centroids can be plotted onto the glass brain. \n\nAny colour definition that matplotlib can understand can be used. Here we use the basic \"red\" colour.\n\nPlotting is done through the nilearn plot_connectome function. This expects an adjacency matrix for the points being plotted and so we create a blank one.",
"_____no_output_____"
]
],
[
[
"# Create a dummy adjacency matrix\nadjacency_matrix = np.zeros((n_subs,n_subs))\n\n# Define the filename for the image\nfig_file = os.path.join(fig_dir,'mask-centroids_single-group.png')\n\n# Set colour for points\nnode_colour = 'red'\n\n# Plot the figure\nfig = plt.figure()\nfig.set_size_inches(cm2inch(12,6)) # width, height\nax1 = plt.subplot(111)\nplotting.plot_connectome(adjacency_matrix=adjacency_matrix, node_coords=all_centres,\n node_size=50, node_color=node_colour, display_mode='xz', \n node_kwargs={'alpha':0.3},axes=ax1,annotate=False)\n\n# Save the figure\nfig.savefig(fig_file,bbox_inches='tight',dpi=300)\n",
"_____no_output_____"
]
],
[
[
"## Make an image showing the spectrum\n\nThe spectrum obtained from the analysis includes frequencies that we may not wish to plot. The frequency range to show must be identified in the frequencies file and the indices for the highest and lowest point entered. \n\nIn this example we wish to display between approximately 1.5 and 4.2 ppm to cover the GABA and Glx peaks.\n\nThe highest value index comes first when plotting Gannet data. This may vary between analysis software.",
"_____no_output_____"
]
],
[
[
"# Spectra filename\nspec_name = 'spectrum.tsv'\n\n# Spectra frequency filename\nfreq_name = 'spectrum_frequencies.tsv'\n\n# Load in the spectum frequencies - use first participant as exemplar\nfreq_file = os.path.join(data_dir,'example-data',subjects[0],freq_name)\nfreq = np.loadtxt(freq_file)\nfreq_len = freq.shape[-1]\n\n# Load in spectra\nall_spec = np.zeros((n_subs,freq_len))\nfor i,sub in enumerate(subjects):\n spec_file = os.path.join(data_dir,'example-data',sub,spec_name)\n all_spec[i,:] = np.loadtxt(spec_file)\n\n# Calculate group mean\nspec_mean = np.mean(all_spec, axis=0)\n\n# Identify the display range \ndef find_nearest(x,value):\n idx = (abs(x-value)).argmin()\n return idx\n\ndisp_range = [find_nearest(freq,4.2),find_nearest(freq,1.5)]",
"_____no_output_____"
]
],
[
[
"We wish to show the mean across participants along with the individual spectra. These are plotted in different colours. In this case we use black for the individual spectra and red for the mean, but any colour that matplotlib can interpret can be used.",
"_____no_output_____"
]
],
[
[
"# Colours to use\nspec_colour = 'black'\nspec_color_mean = 'red'\n\n# Define the filename for the figure\nfig_file = os.path.join(fig_dir,'mrs-spectra_single-group.png')\n\n# Plot the figure\nfig = plt.figure()\nfig.set_size_inches(cm2inch(8,5))\nax1 = plt.subplot(111)\nax1.spines['top'].set_visible(False) # Some cosmetic commands\nax1.spines['right'].set_visible(False)\nax1.spines['left'].set_visible(False)\nax1.get_xaxis().tick_bottom()\nax1.get_yaxis().tick_left()\nax1.tick_params(axis='x', direction='out')\nax1.tick_params(axis='y', length=0)\nax1.grid(axis='y', color=\"0.9\", linestyle='-', linewidth=1)\nax1.set_axisbelow(True)\nax1.plot(freq[disp_range[0]:disp_range[1]], spec_mean[disp_range[0]:disp_range[1]],\n linewidth=0.8, alpha=0.9, color=spec_color_mean)\nfor i in range(n_subs): # Plot individual spectra\n ax1.plot(freq[disp_range[0]:disp_range[1]], all_spec[i,disp_range[0]:disp_range[1]],\n linewidth=0.2, alpha=0.3, color=spec_colour)\nax1.set_xlabel('ppm',fontsize=8) # Label for x-axis - assumed to be in ppm here\nax1.tick_params(axis='x',labelsize=8)\nax1.set_yticklabels(('')) # Remove the tick labels from the y-axis\nplt.gca().invert_xaxis() # Invert the x-axis \n\n# Save the figure\nfig.savefig(fig_file,bbox_inches='tight',dpi=300)\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca8d64610612c6cc7a5d551c3c6f0341495fa39 | 2,893 | ipynb | Jupyter Notebook | 459. Repeated Substring Pattern.ipynb | kkoo1122/Leetcode_Practice | f9500d561d4747dc0df3472bbc5e21b51431cac8 | [
"BSD-2-Clause"
] | null | null | null | 459. Repeated Substring Pattern.ipynb | kkoo1122/Leetcode_Practice | f9500d561d4747dc0df3472bbc5e21b51431cac8 | [
"BSD-2-Clause"
] | null | null | null | 459. Repeated Substring Pattern.ipynb | kkoo1122/Leetcode_Practice | f9500d561d4747dc0df3472bbc5e21b51431cac8 | [
"BSD-2-Clause"
] | null | null | null | 24.108333 | 262 | 0.434843 | [
[
[
"Given a non-empty string check if it can be constructed by taking a substring of it and appending multiple copies of the substring together. You may assume the given string consists of lowercase English letters only and its length will not exceed 10000.\n\n>**Example 1:**\n\n> Input: \"abab\"\n> Output: True\n\n>**Explanation:** It's the substring \"ab\" twice.\n\n>**Example 2:**\n\n> Input: \"aba\"\n> Output: False\n\n>**Example 3:**\n\n> Input: \"abcabcabcabc\"\n> Output: True\n\n>**Explanation:** It's the substring \"abc\" four times. (And the substring \"abcabc\" twice.)",
"_____no_output_____"
],
[
"### Thought\n1. KMP (still need to think about it)\n2. using str.find",
"_____no_output_____"
]
],
[
[
"#KMP\ndef repeatedSubstringPattern(self, str):\n \"\"\"\n :type str: str\n :rtype: bool\n \"\"\"\n def computeLPS(str):\n lps=[0]*len(str)\n i=1\n length=0\n\n while i<len(str):\n if str[i]==str[length]:\n length+=1\n lps[i]=length\n i+=1\n else:\n if length:\n length=lps[length-1]\n else:\n lps[i]=0\n i+=1\n return lps \n\n lps = computeLPS(str)\n n = len(str)\n lenn = lps[-1]\n if lenn and n%(n-lenn)==0:\n return True \n else:\n return False ",
"_____no_output_____"
],
[
"def repeatedSubstringPattern(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n ss=(s+s)[1:-1]\n\n return ss.find(s)!=-1",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
eca8d7d2c009d970a70bafa53fa13cd3f9f34e6c | 5,927 | ipynb | Jupyter Notebook | fu_2012.ipynb | carjed/primeval | 9fa2442ad7a28cbf75af83aa73e24007c3d17abb | [
"MIT"
] | 3 | 2018-06-17T14:34:23.000Z | 2019-04-04T21:35:02.000Z | fu_2012.ipynb | carjed/primeval | 9fa2442ad7a28cbf75af83aa73e24007c3d17abb | [
"MIT"
] | 1 | 2020-06-02T11:08:19.000Z | 2020-10-29T19:09:19.000Z | fu_2012.ipynb | carjed/primeval | 9fa2442ad7a28cbf75af83aa73e24007c3d17abb | [
"MIT"
] | null | null | null | 34.864706 | 92 | 0.517969 | [
[
[
"import msprime\nimport numpy as np\nimport pandas as pd\nimport math\nfrom random import shuffle\nfrom primeval import *",
"_____no_output_____"
]
],
[
[
"### Code for the Fu et al. (2012) model\n\n### Actual function is found in primeval.py file\n\n```python\ndef fu_model(mu=1.5e-8, phi=0, length=1e4, n_afr=0, n_eur=0, debug=False):\n\n generation_time = 25\n \n # 220kya:\n # African population constant with Ne~7300\n N_A = 7310\n \n # 148kya:\n # instantaneous growth to Ne~14000\n T_AF = 148e3 / generation_time\n N_AF = 14474\n \n # 51kya:\n # non-AFR pops migrate OOA; bottlenecks to Ne~1800\n # migration between AFR occurs\n N_B = 1861\n T_SPLIT = 51e3 / generation_time\n m_AF_B = 15e-5\n \n # 23kya:\n # 2nd EUR bottlenecks to Ne~1000 & starts growing with rate 0.307%\n # migration rate slows between AFR-EUR\n N_EU0 = 1032\n T_EU_B = 23e3 / generation_time\n m_AF_EU = 2.5e-5\n r_EU0 = 0.00307\n N_EU1 = N_EU0 / math.exp(-r_EU0 * T_EU_B)\n \n # 5.1kya:\n # explosive growth in both AFR & EUR\n T_EG = 5.1e3 / generation_time\n r_EU = 0.0195\n r_AF = 0.0166\n N_EU_start = N_EU1 / math.exp(-r_EU * T_EG)\n m_EG = 0\n N_AF_start = N_AF / math.exp(-r_AF * T_EG)\n \n # Population IDs correspond to their indexes in the population\n # configuration array. Therefore, we have 0=YRI, 1=CEU initially.\n population_configurations = [\n msprime.PopulationConfiguration(\n sample_size=n_afr, initial_size=N_AF_start, growth_rate=r_AF),\n msprime.PopulationConfiguration(\n sample_size=n_eur, initial_size=N_EU_start, growth_rate=r_EU)#,\n ]\n\n # up to 5.1kya, no migration\n migration_matrix = [\n [0, 0],\n [0, 0],\n ]\n \n demographic_events = [\n # at 5.1kya, change to slow growth rate in EUR & stop growth in AFR;\n # add migration rate\n msprime.MigrationRateChange(\n time=T_EG, rate=m_AF_EU, matrix_index=(0, 1)),\n msprime.MigrationRateChange(\n time=T_EG, rate=m_AF_EU, matrix_index=(1, 0)),\n msprime.PopulationParametersChange(\n time=T_EG, growth_rate=r_EU0, initial_size=N_EU1, population_id=1),\n msprime.PopulationParametersChange(\n time=T_EG, growth_rate=0, population_id=0),\n \n # at 23kya, EUR growth stops and migration rates increase\n msprime.MigrationRateChange(\n time=T_EU_B, rate=m_AF_B, matrix_index=(0, 1)),\n msprime.MigrationRateChange(\n time=T_EU_B, rate=m_AF_B, matrix_index=(1, 0)),\n msprime.PopulationParametersChange(\n time=T_EU_B, initial_size=N_EU0, growth_rate=0, population_id=1),\n \n # at 51kya, population B merges into AFR\n msprime.MassMigration(\n time=T_SPLIT, source=1, destination=0, proportion=1.0),\n msprime.PopulationParametersChange(\n time=T_SPLIT, initial_size=N_B, population_id=1),\n \n # At 148kya, instantaneous growth in AFR\n msprime.PopulationParametersChange(\n time=T_AF, initial_size=N_A, population_id=0)\n ]\n \n if(debug):\n # Use the demography debugger to print out the demographic history\n # that we have just described.\n dd = msprime.DemographyDebugger(\n population_configurations=population_configurations,\n migration_matrix=migration_matrix,\n demographic_events=demographic_events)\n dd.print_history()\n else:\n sim = msprime.simulate(population_configurations=population_configurations,\n migration_matrix=migration_matrix, \n demographic_events=demographic_events,\n mutation_rate=mu, \n recombination_rate=phi, \n length=length,\n random_seed=30)\n return sim\n```",
"_____no_output_____"
],
[
"### Run with debugging to check correct parameters at each epoch",
"_____no_output_____"
]
],
[
[
"fu_model(debug=True)\n# fu_model(mu=1.5e-8, phi=2e-8, length=5e7, n_afr=0, n_eur=2000, debug=True)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
eca8e81341e3336b524218559c95afc8644026b5 | 39,039 | ipynb | Jupyter Notebook | PDA/jupyter/jupyterNotebooks/assignments/04ControlAndRepetitionStatements_Assignments.ipynb | EMbeDS-education/StatsAndComputing20212022 | 971e418882b206a1b5606d15d222cef1a5a04834 | [
"MIT"
] | 2 | 2022-02-24T09:35:15.000Z | 2022-03-14T20:34:33.000Z | PDA/jupyter/jupyterNotebooks/assignments/04ControlAndRepetitionStatements_Assignments.ipynb | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
] | null | null | null | PDA/jupyter/jupyterNotebooks/assignments/04ControlAndRepetitionStatements_Assignments.ipynb | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
] | 2 | 2022-03-15T21:40:35.000Z | 2022-03-26T14:51:31.000Z | 25.632961 | 1,169 | 0.490407 | [
[
[
"<center><font size=\"+4\">Programming and Data Analytics 1 2021/2022</font></center>\n<center><font size=\"+2\">Sant'Anna School of Advanced Studies, Pisa, Italy</font></center>\n<center><img src=\"https://github.com/EMbeDS-education/StatsAndComputing20212022/raw/main/PDA/jupyter/jupyterNotebooks/images/SSSA.png\" width=\"700\" alt=\"The extensible parallel architecture of MultiVeStA\"></center>\n\n<center><font size=\"+2\">Course responsible</font></center>\n<center><font size=\"+2\">Andrea Vandin [email protected]</font></center>\n\n<center><font size=\"+2\">Co-lecturer </font></center>\n<center><font size=\"+2\">Daniele Licari [email protected]</font></center>\n\n---",
"_____no_output_____"
],
[
"<center><font size=\"+4\">Assignments for</font></center>\n<center><font size=\"+4\">Lecture 4: Control and Repetition Statements</font><br/></center>\n<center><font size=\"+2\"> and CSV manipulation/visualization applied on COVID-19 data</font></center>\n\n---\n",
"_____no_output_____"
]
],
[
[
"#@title RUN, BUT DO NOT MODIFY\n!curl -O https://raw.githubusercontent.com/EMbeDS-education/StatsAndComputing20212022/main/PDA/jupyter/jupyterNotebooks/assignments/auto_testing.py\n!curl -O https://raw.githubusercontent.com/EMbeDS-education/StatsAndComputing20212022/main/PDA/jupyter/jupyterNotebooks/assignments/dpc-covid19-ita-andamento-nazionale.csv\n%reload_ext autoreload\n%autoreload 2\nfrom auto_testing import *",
"_____no_output_____"
]
],
[
[
"# Assignment 04.01: If-Linear equation\n## Statement\n\nWrite a program that solves a linear equation _ax = b_ in **integers**. \n\nGiven two integers _a_ and _b_ (_a_ may be zero), \n- print a single integer root if it exists or\n- print `no solution` if no integer solution exists\n- print `many solutions` if many integer solutions exist.\n\n## Example input #1\n\n```\n1\n```\n\n```\n-2\n```\n\n## Example output #1\n\n```\n-2\n```\n\n## Example input #2\n\n```\n2\n```\n\n```\n-1\n```\n\n## Example output #2\n\n```\nno solution\n```\n\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_01If_Linear_equation():\n # This program prints 'Hello, world!':\n print('Hello, world!')\n\n # Change it to solve the assignment\n\n \n#You can test independently your solution by executing the following line\n#asgn04_01If_Linear_equation()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[[1,-2],[2,-1],[0,0],[5,0],[0,7],[10,11],[1,30000]]\nexpected_outputs=[[\"-2\"],[\"no solution\"],[\"many solutions\"],['0'],\\\n [\"no solution\"],[\"no solution\"],[\"30000\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_01If_Linear_equation)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.02: If-Queen move\n## Statement\n\nChess queen moves horizontally, vertically or diagonally in any number of squares. <br/>\nGiven two different squares of the chessboard, determine whether a queen can go from the first square to the second one in a single move. <br/>\nThe chessboard is assumed to not contain further pieces.\n\nThe program receives four numbers from 1 to 8 each specifying the column and the row number (the first two refer to the starting square, the last two refer to the target one). <br/>\nThe program should output \n- `YES` if a queen can go from the first square to the second one in a single move or \n- `NO` otherwise.\n\n\n## Example input\n\n```\n1\n1\n2\n2\n```\n\n## Example output\n\n```\nYES\n```\n\nWe suggest you to use intelligible (self-commenting) names for the variables...\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_02If_Queen_move():\n # This program reads a string and prints it\n str1=input()\n print(str1)\n\n # Can you change it to solve the assignment?\n \n \n#You can test independently your solution by executing the following line\n#asgn04_02If_Queen_move()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[[1,1,2,2],[1,1,2,3],[5,6,3,3],[3,3,1,1],[6,5,2,5],\\\n [7,6,5,2],[2,7,6,7],[2,7,4,6],[7,4,2,5],[7,5,1,1],\\\n [2,4,5,7],[3,5,7,1],[5,2,5,8],[1,2,3,1],[2,1,1,3]]\nexpected_outputs=[[\"YES\"],[\"NO\"],[\"NO\"],[\"YES\"],[\"YES\"],\\\n [\"NO\"],[\"YES\"],[\"NO\"],[\"NO\"],[\"NO\"],\\\n [\"YES\"],[\"YES\"],[\"YES\"],[\"NO\"],[\"NO\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_02If_Queen_move)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.03: Read integers\n## Statement\n\nWrite a program that reads positive integer numbers from the keyboard as long as the user provides them. <br/>\nThe program should terminate as soon as the user provides an input that is not numeric (e.g. `'end'`, `'ciao'`, ...), which is considered as a terminating token.\n\nUpon reading the termination token, the program prints in separate lines:\n\n- how many numbers were given\n- how many odd numbers were given\n- the mean of the given numbers\n- the maximum given number\n\n**Untested extra challenge:** some of you might be tempted to solve this assignment using a list to store all numbers, and then iterate it. This is problematic in case of big data, or live data, as there might too many numbers to store. Also, it would be inefficient, as you would have to iterate the list of numbers twice. \n\n- Try to solve this assignment without using a list. You should define a variable per quantity we want to observe (or more if necessary), and update them every time a new number arrives.\n\n## Example input 1\n\n```\n1\n```\n\n```\n2\n```\n\n```\n3\n```\n\n```\nend\n```\n\n## Example output 1\n\n```\n3\n```\n\n```\n2\n```\n\n```\n2.0\n```\n\n```\n3\n```\n\n## Hints\n\nHere a suggestion on how to compute the maximum:\n\n- Define a variable `m` with value smaller than any value that might be given. \n- Every time a new number `n` is given, compare `m` with `n`, and update `m` if `n` is bigger than `m`. \n\nWhenever you read with `input()`, you read a string. Strings have a method to check if they represent a number...",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_03Read_integers():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_03Read_integers()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[['1', '2', '3', 'end'],['1', '2', '3', 'fine'],['1', '3', '5', '7', '9', '11', 'ciao'],\\\n ['11', '9', '7', '5', '3', '1', 'ciao']]\nexpected_outputs=[[\"3\",\"2\",\"2.0\",\"3\"],[\"3\",\"2\",\"2.0\",\"3\"],['6', '6', '6.0', '11'],\\\n ['6', '6', '6.0', '11']]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_03Read_integers)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.04: While-Average of sequence\n## Statement\n\nRead a sequence of non-negative integers, where each number is written in a separate line. The sequence ends with 0. Upon termination, print the average of the sequence. \n\nThis assignment should be solved using while loops\n\n## Example input\n\n```\n10\n```\n\n```\n30\n```\n\n```\n0\n```\n\n## Example output\n\n```\n20.0\n```\n\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_04While_Average_of_sequence():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_04While_Average_of_sequence()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[['10', '30', '0'],['1', '1', '1', '1', '0'],['1', '2', '0'],\\\n ['1', '2', '3', '4', '0'],['1234', '0'],['1', '2', '3', '4', '5', '6', '7', '0']]\nexpected_outputs=[[\"20.0\"],[\"1.0\"],[\"1.5\"],[\"2.5\"],[\"1234.0\"],[\"4.0\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_04While_Average_of_sequence)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.05: While-Fibonacci\n## Statement\n\nFibonacci numbers are the numbers in the [integer sequence ](https://en.wikipedia.org/wiki/Integer_sequence)starting with 1, 1 where every number after the first two is the sum of the two preceding ones:\n\n1, 1, 2, 3, 5, 8, 13, 21, 34, ...\n\nGiven a positive integer **n**, print the **n**th Fibonacci number.\n\nThis assignment should be solved using while loops\n\n## Example input\n\n```\n6\n```\n\n## Example output\n\n```\n8\n```",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_05While_Fibonacci():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_05While_Fibonacci()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[[6],[1],[2],[3],[4],[7],[14],[18]]\nexpected_outputs=[[\"8\"],[\"1\"],[\"1\"],[\"2\"],[\"3\"],[\"13\"],[\"377\"],[\"2584\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_05While_Fibonacci)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.06: For-Sum of N numbers\n## Statement\n\n`N` numbers are given in the input. Read them and print their sum. \n\nThe first line of input contains the integer `N`, which is the number of integers to follow. Each of the next `N` lines contains one integer. Print the sum of these `N` integers.\n\nThis assignment should be solved using for loops\n\n## Example input\n\n```\n10\n1\n2\n1\n1\n1\n1\n3\n1\n1\n1\n```\n\n## Example output\n\n```\n13\n```\n\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_06For_Sum_of_N_numbers():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_06For_Sum_of_N_numbers()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[['1', '891'],['0'],['10', '1', '2', '1', '1', '1', '1', '3', '1', '1', '1'],\\\n ['10', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'],['2', '235', '56'],\\\n ['4', '4', '4', '4', '4']]\nexpected_outputs=[[\"891\"],[\"0\"],[\"13\"],[\"55\"],[\"291\"],[\"16\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_06For_Sum_of_N_numbers)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.07: For-Series\n## Statement\n\nGiven two integers `A` and `B`. Print all numbers from `A` to `B` inclusively, all in the same line separated by a space.\nIn particular, the numbers should be printed\n- in increasing order, if `A < B`, or \n- in decreasing order, if `A >= B`.\n\nThis assignment should be solved using for loops\n\n## Example input 1\n\n```\n8\n```\n\n```\n5\n```\n\n## Example output 1\n\n```\n8 7 6 5\n```\n\n## Example input 2\n\n```\n5\n```\n\n```\n8\n```\n\n## Example output 2\n\n```\n5 6 7 8\n```\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_07For_Series():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_07For_Series()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[['8', '5'],['5', '8'],['1', '10'],['179', '179'],['-14', '7'],['12', '-5']]\nexpected_outputs=[[\"8 7 6 5\"],[\"5 6 7 8\"],[\"1 2 3 4 5 6 7 8 9 10\"],[\"179\"],[\"-14 -13 -12 -11 -10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7\"],[\"12 11 10 9 8 7 6 5 4 3 2 1 0 -1 -2 -3 -4 -5\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_07For_Series)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.08: For-Sum of cubes\n## Statement\n\nFor the given integer `N`, calculate the following sum:\n\n$1^3$ + $2^3$ + ... + $N^3$\n\nThis assignment should be solved using for loops\n\n## Example input\n\n```\n3\n```\n\n## Example output\n\n```\n36\n```\n\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"def asgn04_08For_Sum_of_cubes():\n # This program reads a numbwe and prints it\n a = int(input())\n print(a)\n\n # Change it according to the assignment description\n\n\n#You can test independently your solution by executing the following line\n#asgn04_08For_Sum_of_cubes()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN and TEST ALL\n\nfrom IPython.display import display, Markdown \n\n\n\ninputs=[['1'],['2'],['3'],['4'],['9'],['20']]\nexpected_outputs=[[\"1\"],[\"9\"],[\"36\"],[\"100\"],[\"2025\"],[\"44100\"]]\n \nfor k in range(len(inputs)):\n display(Markdown(f'{k+1}. TEST {inputs[k]} = {\",\".join(expected_outputs[k])}'))\n print('-'*60)\n run_and_test(inputs[k],expected_outputs[k],asgn04_08For_Sum_of_cubes)\n",
"_____no_output_____"
]
],
[
[
"# Assignment 04.09: For-Sum of cubes\n## Statement\n\nWrite a program that \n- reads the CSV file **dpc-covid19-ita-andamento-nazionale.csv** provided here \n - It contains Italian official COVID'19 data (downloaded from [here](https://github.com/pcm-dpc/COVID-19) on 2020-05-08)\n- computes some quantities on the data in the CSV file.\n\nFor this assignment, we are interested only in \n- the number of currently infected individuals (label totale_positivi), and\n- how they divide among being 'hospitalized' or in 'home isolation'. \n\nIn particular, the infected individuals can be either\n\n- 'hospitalized' (label totale_ospedalizzati), or\n- in 'home isolation' (label isolamento_domiciliare)\n\nYour program should compute two lists with one entry per row in the CSV file:\n\n- `currently_hosp_perc` containing the percentage of infected individuals hospitalized\n- `currently_home_perc` containing the percentage of infected individuals in home isolation\n\nAfter computing these two lists, you should:\n\n- print the minimum value in `currently_hosp_perc`\n- print the maximum value in `currently_home_perc`\n- for each day for each we have measurements, print the corresponding entries in `currently_hosp_perc` and `currently_home_perc` in the same line but separated by a space. Each pair of numbers should be printed in a different row\n\nIn all the cases, **you should print rounding up to the second decimal digit, using 4 digits in total.** \n\nWe have shown in class that this can be obtained by doing:\n\n- `print( \"%4.2f\" % n ). # old way of doing this`\n- `print(\"{:4.2f}\".format(n) ) # new way of doing this`\n\nwhere `n` is the float we want to print\n\n## Hint\n\nWhat you are required to di is not so different from what we saw in class. \n\nWe suggest you to first create a list per label of interest mentioned above:\n\n- currently_infected\n- currently_hosp\n- currently_home\n\nAfter the data has been loaded in the three lists, you should just properly iterate such lists to compute the required quantities, and print the required results.\n\nThe function zip might help you in iterating the elements of `currently_hosp_perc` and `currently_home_perc` at the same time\n\n## Expected output\n\n```\n17.97\n```\n\n```\n82.03\n```\n\n```\n57.47 42.53 \n```\n\n```\n48.08 51.92 \n```\n\n```\n42.60 57.40 \n```\n\n```\n51.70 48.30 \n```\n\n```\n49.82 50.18 \n```\n\n```\n48.24 51.76 \n```\n\n```\n49.40 50.60 \n```\n\n```\n49.48 50.52 \n```\n\n```\n55.81 44.19 \n```\n\n```\n60.64 39.36 \n```\n\n```\n64.96 35.04 \n```\n\n```\n72.93 27.07 \n```\n\n```\n63.58 36.42 \n```\n\n```\n65.87 34.13 \n```\n\n```\n63.23 36.77 \n```\n\n```\n69.47 30.53 \n```\n\n```\n64.83 35.17 \n```\n\n```\n60.78 39.22 \n```\n\n```\n58.54 41.46 \n```\n\n```\n55.72 44.28 \n```\n\n```\n55.02 44.98 \n```\n\n```\n55.81 44.19 \n```\n\n```\n57.38 42.62 \n```\n\n```\n57.89 42.11 \n```\n\n```\n55.00 45.00 \n```\n\n```\n49.33 50.67 \n```\n\n```\n48.18 51.82 \n```\n\n```\n49.01 50.99 \n```\n\n```\n47.40 52.60 \n```\n\n```\n46.89 53.11 \n```\n\n```\n46.25 53.75 \n```\n\n```\n45.74 54.26 \n```\n\n```\n44.81 55.19 \n```\n\n```\n43.58 56.42 \n```\n\n```\n42.36 57.64 \n```\n\n```\n42.07 57.93 \n```\n\n```\n41.50 58.50 \n```\n\n```\n40.26 59.74 \n```\n\n```\n39.25 60.75 \n```\n\n```\n38.42 61.58 \n```\n\n```\n37.39 62.61 \n```\n\n```\n36.08 63.92 \n```\n\n```\n35.28 64.72 \n```\n\n```\n34.56 65.44 \n```\n\n```\n33.78 66.22 \n```\n\n```\n33.04 66.96 \n```\n\n```\n32.30 67.70 \n```\n\n```\n31.44 68.56 \n```\n\n```\n30.50 69.50 \n```\n\n```\n30.19 69.81 \n```\n\n```\n29.91 70.09 \n```\n\n```\n29.14 70.86 \n```\n\n```\n27.98 72.02 \n```\n\n```\n26.74 73.26 \n```\n\n```\n25.74 74.26 \n```\n\n```\n25.56 74.44 \n```\n\n```\n25.39 74.61 \n```\n\n```\n24.70 75.30 \n```\n\n```\n24.32 75.68 \n```\n\n```\n23.53 76.47 \n```\n\n```\n22.76 77.24 \n```\n\n```\n22.33 77.67 \n```\n\n```\n22.04 77.96 \n```\n\n```\n21.08 78.92 \n```\n\n```\n20.52 79.48 \n```\n\n```\n20.07 79.93 \n```\n\n```\n19.54 80.46 \n```\n\n```\n18.97 81.03 \n```\n\n```\n18.76 81.24 \n```\n\n```\n18.71 81.29 \n```\n\n```\n18.31 81.69 \n```\n\n```\n17.97 82.03 \n```\n\n```\n18.68 81.32 \n```\n\n```\n18.39 81.61 \n```\n\n```\n17.97 82.03 \n```\n\n",
"_____no_output_____"
],
[
"## Write your solution here\n\n\n* Do not change the first line (`def ...():`)\n* Maintain the given indentation\n* You can run some tests by yourself by decommenting the last line\n\n",
"_____no_output_____"
]
],
[
[
"import csv\n\ndef asgn04_09COVID_19():\n #This program reads the csv file used and\n # stores its header in the list header, \n # then it prints all its rows\n file_name='dpc-covid19-ita-andamento-nazionale.csv'\n\n with open(file_name, 'r') as csvfile:\n rows = csv.reader(csvfile, delimiter=',')\n header = next(rows,None)\n r=0\n for row in rows :\n print(r)\n print(row)\n r=r+1\n\n\n\n\n#You can test independently your solution by executing the following line\n#asgn04_09COVID_19()",
"_____no_output_____"
]
],
[
[
"## Run the following cells to perform the provided tests",
"_____no_output_____"
]
],
[
[
"#@title RUN AND TEST\ninputs=[]\nexpected_outputs=['17.97', '82.03', '57.47 42.53', '48.08 51.92', '42.60 57.40', '51.70 48.30', '49.82 50.18', '48.24 51.76', '49.40 50.60', '49.48 50.52', '55.81 44.19', '60.64 39.36', '64.96 35.04', '72.93 27.07', '63.58 36.42', '65.87 34.13', '63.23 36.77', '69.47 30.53', '64.83 35.17', '60.78 39.22', '58.54 41.46', '55.72 44.28', '55.02 44.98', '55.81 44.19', '57.38 42.62', '57.89 42.11', '55.00 45.00', '49.33 50.67', '48.18 51.82', '49.01 50.99', '47.40 52.60', '46.89 53.11', '46.25 53.75', '45.74 54.26', '44.81 55.19', '43.58 56.42', '42.36 57.64', '42.07 57.93', '41.50 58.50', '40.26 59.74', '39.25 60.75', '38.42 61.58', '37.39 62.61', '36.08 63.92', '35.28 64.72', '34.56 65.44', '33.78 66.22', '33.04 66.96', '32.30 67.70', '31.44 68.56', '30.50 69.50', '30.19 69.81', '29.91 70.09', '29.14 70.86', '27.98 72.02', '26.74 73.26', '25.74 74.26', '25.56 74.44', '25.39 74.61', '24.70 75.30', '24.32 75.68', '23.53 76.47', '22.76 77.24', '22.33 77.67', '22.04 77.96', '21.08 78.92', '20.52 79.48', '20.07 79.93', '19.54 80.46', '18.97 81.03', '18.76 81.24', '18.71 81.29', '18.31 81.69', '17.97 82.03', '18.68 81.32', '18.39 81.61', '17.97 82.03']\nrun_and_test(inputs,expected_outputs,asgn04_09COVID_19)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca8ea6b49407b77c3eebb5065a5ec00701260da | 3,889 | ipynb | Jupyter Notebook | demo/demo.ipynb | monniert/docExtractor | 30bfe41977e98394b4590bfc2ec9ff424b71f48a | [
"MIT"
] | 56 | 2020-07-08T07:39:52.000Z | 2022-01-29T09:15:16.000Z | demo/demo.ipynb | monniert/docExtractor | 30bfe41977e98394b4590bfc2ec9ff424b71f48a | [
"MIT"
] | 17 | 2020-10-20T21:54:38.000Z | 2022-01-21T08:53:52.000Z | demo/demo.ipynb | monniert/docExtractor | 30bfe41977e98394b4590bfc2ec9ff424b71f48a | [
"MIT"
] | 5 | 2020-10-16T15:35:01.000Z | 2022-03-28T20:39:50.000Z | 26.455782 | 183 | 0.573412 | [
[
[
"# Demo",
"_____no_output_____"
]
],
[
[
"# Add code to sys.path\nimport sys\nsys.path.append('../src')\n\n# Display\nfrom IPython.display import display, HTML\ndisplay(HTML(\"<style>.container { width:95% !important; }</style>\"))",
"_____no_output_____"
]
],
[
[
"## 1. Load default model",
"_____no_output_____"
]
],
[
[
"# Select GPU ID\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"",
"_____no_output_____"
],
[
"import torch\nfrom models import load_model_from_path\nfrom utils import coerce_to_path_and_check_exist\nfrom utils.path import MODELS_PATH\nfrom utils.constant import MODEL_FILE\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nTAG = 'default'\nmodel_path = coerce_to_path_and_check_exist(MODELS_PATH / TAG / MODEL_FILE)\nmodel, (img_size, restricted_labels, normalize) = load_model_from_path(model_path, device=device, attributes_to_return=['train_resolution', 'restricted_labels', 'normalize'])\n_ = model.eval()",
"_____no_output_____"
]
],
[
[
"## 2. Load and pre-process an input image ",
"_____no_output_____"
]
],
[
[
"from PIL import Image\nimport numpy as np\nfrom utils.image import resize\n\nimg = Image.open('img.jpg')\n\n# Resize \nimg = resize(img, img_size)\nprint(f'image size is: {img.size}')\n\n# Normalize and convert to Tensor\ninp = np.array(img, dtype=np.float32) / 255\nif normalize:\n inp = ((inp - inp.mean(axis=(0, 1))) / (inp.std(axis=(0, 1)) + 10**-7))\ninp = torch.from_numpy(inp.transpose(2, 0, 1)).float().to(device)",
"_____no_output_____"
]
],
[
[
"## 3. Predict segmentation maps and show results",
"_____no_output_____"
]
],
[
[
"from utils.constant import LABEL_TO_COLOR_MAPPING\nfrom utils.image import LabeledArray2Image\n\n# compute prediction\npred = model(inp.reshape(1, *inp.shape))[0].max(0)[1].cpu().numpy()\n\n# Retrieve good color mapping and transform to image\nrestricted_colors = [LABEL_TO_COLOR_MAPPING[l] for l in restricted_labels]\nlabel_idx_color_mapping = {restricted_labels.index(l) + 1: c for l, c in zip(restricted_labels, restricted_colors)}\npred_img = LabeledArray2Image.convert(pred, label_idx_color_mapping)\n\n# Blend predictions with original image\nmask = Image.fromarray((np.array(pred_img) == (0, 0, 0)).all(axis=-1).astype(np.uint8) * 127 + 128)\nblend_img = Image.composite(img, pred_img, mask)\nblend_img",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
eca8ef63f45e70446e54cc62b7921e48766448ee | 89,463 | ipynb | Jupyter Notebook | pytorch_ipynb/gan/gan.ipynb | NeoGitCrt1/deeplearning-models | 35aba5dc03c43bc29af5304ac248fc956e1361bf | [
"MIT"
] | 16,182 | 2019-06-05T17:56:01.000Z | 2022-03-31T16:07:10.000Z | pytorch_ipynb/gan/gan.ipynb | ShirleyGao1023/deeplearning-models | 6816908a3567ff7da539d9a2931047313c3f20dc | [
"MIT"
] | 41 | 2019-06-06T09:42:38.000Z | 2022-02-27T23:59:04.000Z | pytorch_ipynb/gan/gan.ipynb | ShirleyGao1023/deeplearning-models | 6816908a3567ff7da539d9a2931047313c3f20dc | [
"MIT"
] | 3,917 | 2019-06-05T18:03:40.000Z | 2022-03-31T06:07:34.000Z | 85.446991 | 23,000 | 0.734896 | [
[
[
"Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.\n- Author: Sebastian Raschka\n- GitHub Repository: https://github.com/rasbt/deeplearning-models",
"_____no_output_____"
]
],
[
[
"%load_ext watermark\n%watermark -a 'Sebastian Raschka' -v -p torch",
"Sebastian Raschka \n\nCPython 3.7.3\nIPython 7.6.1\n\ntorch 1.2.0\n"
]
],
[
[
"- Runs on CPU or GPU (if available)",
"_____no_output_____"
],
[
"# Model Zoo -- Generative Adversarial Networks (GAN)",
"_____no_output_____"
],
[
"Implementation of a standard GAN.",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import time\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torchvision import datasets\nfrom torchvision import transforms\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\n\nif torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True",
"_____no_output_____"
]
],
[
[
"## Settings and Dataset",
"_____no_output_____"
]
],
[
[
"##########################\n### SETTINGS\n##########################\n\n# Device\ndevice = torch.device(\"cuda:2\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparameters\nrandom_seed = 123\ngenerator_learning_rate = 0.001\ndiscriminator_learning_rate = 0.001\nNUM_EPOCHS = 100\nBATCH_SIZE = 128\nLATENT_DIM = 75\nIMG_SHAPE = (1, 28, 28)\nIMG_SIZE = 1\nfor x in IMG_SHAPE:\n IMG_SIZE *= x\n\n\n\n##########################\n### MNIST DATASET\n##########################\n\n# Note transforms.ToTensor() scales input images\n# to 0-1 range\ntrain_dataset = datasets.MNIST(root='data', \n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = datasets.MNIST(root='data', \n train=False, \n transform=transforms.ToTensor())\n\n\ntrain_loader = DataLoader(dataset=train_dataset, \n batch_size=BATCH_SIZE, \n shuffle=True)\n\ntest_loader = DataLoader(dataset=test_dataset, \n batch_size=BATCH_SIZE, \n shuffle=False)\n\n# Checking the dataset\nfor images, labels in train_loader: \n print('Image batch dimensions:', images.shape)\n print('Image label dimensions:', labels.shape)\n break",
"Image batch dimensions: torch.Size([128, 1, 28, 28])\nImage label dimensions: torch.Size([128])\n"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"##########################\n### MODEL\n##########################\n\n\nclass GAN(torch.nn.Module):\n\n def __init__(self):\n super(GAN, self).__init__()\n \n \n self.generator = nn.Sequential(\n nn.Linear(LATENT_DIM, 128),\n nn.LeakyReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(128, IMG_SIZE),\n nn.Tanh()\n )\n \n self.discriminator = nn.Sequential(\n nn.Linear(IMG_SIZE, 128),\n nn.LeakyReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(128, 1),\n nn.Sigmoid()\n )\n\n \n def generator_forward(self, z):\n img = self.generator(z)\n return img\n \n def discriminator_forward(self, img):\n pred = model.discriminator(img)\n return pred.view(-1)",
"_____no_output_____"
],
[
"torch.manual_seed(random_seed)\n\nmodel = GAN()\nmodel = model.to(device)\n\noptim_gener = torch.optim.Adam(model.generator.parameters(), lr=generator_learning_rate)\noptim_discr = torch.optim.Adam(model.discriminator.parameters(), lr=discriminator_learning_rate)",
"_____no_output_____"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"start_time = time.time() \n\ndiscr_costs = []\ngener_costs = []\nfor epoch in range(NUM_EPOCHS):\n model = model.train()\n for batch_idx, (features, targets) in enumerate(train_loader):\n\n \n \n features = (features - 0.5)*2.\n features = features.view(-1, IMG_SIZE).to(device) \n targets = targets.to(device)\n\n valid = torch.ones(targets.size(0)).float().to(device)\n fake = torch.zeros(targets.size(0)).float().to(device)\n \n\n ### FORWARD AND BACK PROP\n \n \n # --------------------------\n # Train Generator\n # --------------------------\n \n # Make new images\n z = torch.zeros((targets.size(0), LATENT_DIM)).uniform_(-1.0, 1.0).to(device)\n generated_features = model.generator_forward(z)\n \n # Loss for fooling the discriminator\n discr_pred = model.discriminator_forward(generated_features)\n \n gener_loss = F.binary_cross_entropy(discr_pred, valid)\n \n optim_gener.zero_grad()\n gener_loss.backward()\n optim_gener.step()\n \n # --------------------------\n # Train Discriminator\n # -------------------------- \n \n discr_pred_real = model.discriminator_forward(features.view(-1, IMG_SIZE))\n real_loss = F.binary_cross_entropy(discr_pred_real, valid)\n \n discr_pred_fake = model.discriminator_forward(generated_features.detach())\n fake_loss = F.binary_cross_entropy(discr_pred_fake, fake)\n \n discr_loss = 0.5*(real_loss + fake_loss)\n\n optim_discr.zero_grad()\n discr_loss.backward()\n optim_discr.step() \n \n discr_costs.append(discr_loss)\n gener_costs.append(gener_loss)\n \n \n ### LOGGING\n if not batch_idx % 100:\n print ('Epoch: %03d/%03d | Batch %03d/%03d | Gen/Dis Loss: %.4f/%.4f' \n %(epoch+1, NUM_EPOCHS, batch_idx, \n len(train_loader), gener_loss, discr_loss))\n\n print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))\n \nprint('Total Training Time: %.2f min' % ((time.time() - start_time)/60))",
"Epoch: 001/100 | Batch 000/469 | Gen/Dis Loss: 0.6576/0.7134\nEpoch: 001/100 | Batch 100/469 | Gen/Dis Loss: 5.1797/0.0280\nEpoch: 001/100 | Batch 200/469 | Gen/Dis Loss: 1.8944/0.0933\nEpoch: 001/100 | Batch 300/469 | Gen/Dis Loss: 1.5018/0.1451\nEpoch: 001/100 | Batch 400/469 | Gen/Dis Loss: 2.0884/0.1026\nTime elapsed: 0.27 min\nEpoch: 002/100 | Batch 000/469 | Gen/Dis Loss: 2.8803/0.0496\nEpoch: 002/100 | Batch 100/469 | Gen/Dis Loss: 3.4923/0.0483\nEpoch: 002/100 | Batch 200/469 | Gen/Dis Loss: 2.9812/0.1615\nEpoch: 002/100 | Batch 300/469 | Gen/Dis Loss: 2.2371/0.1658\nEpoch: 002/100 | Batch 400/469 | Gen/Dis Loss: 1.7027/0.2905\nTime elapsed: 0.51 min\nEpoch: 003/100 | Batch 000/469 | Gen/Dis Loss: 1.2188/0.3533\nEpoch: 003/100 | Batch 100/469 | Gen/Dis Loss: 1.8254/0.2083\nEpoch: 003/100 | Batch 200/469 | Gen/Dis Loss: 1.9774/0.2238\nEpoch: 003/100 | Batch 300/469 | Gen/Dis Loss: 1.9323/0.2806\nEpoch: 003/100 | Batch 400/469 | Gen/Dis Loss: 1.9518/0.2712\nTime elapsed: 0.77 min\nEpoch: 004/100 | Batch 000/469 | Gen/Dis Loss: 1.2785/0.3455\nEpoch: 004/100 | Batch 100/469 | Gen/Dis Loss: 1.3979/0.3208\nEpoch: 004/100 | Batch 200/469 | Gen/Dis Loss: 1.4295/0.3638\nEpoch: 004/100 | Batch 300/469 | Gen/Dis Loss: 1.2798/0.3620\nEpoch: 004/100 | Batch 400/469 | Gen/Dis Loss: 1.1321/0.4751\nTime elapsed: 1.04 min\nEpoch: 005/100 | Batch 000/469 | Gen/Dis Loss: 1.1786/0.3932\nEpoch: 005/100 | Batch 100/469 | Gen/Dis Loss: 1.1437/0.4343\nEpoch: 005/100 | Batch 200/469 | Gen/Dis Loss: 1.0105/0.4453\nEpoch: 005/100 | Batch 300/469 | Gen/Dis Loss: 1.3987/0.4194\nEpoch: 005/100 | Batch 400/469 | Gen/Dis Loss: 1.3960/0.4005\nTime elapsed: 1.28 min\nEpoch: 006/100 | Batch 000/469 | Gen/Dis Loss: 1.3119/0.4792\nEpoch: 006/100 | Batch 100/469 | Gen/Dis Loss: 1.6029/0.4045\nEpoch: 006/100 | Batch 200/469 | Gen/Dis Loss: 1.6302/0.3768\nEpoch: 006/100 | Batch 300/469 | Gen/Dis Loss: 0.9141/0.4838\nEpoch: 006/100 | Batch 400/469 | Gen/Dis Loss: 0.9891/0.4810\nTime elapsed: 1.56 min\nEpoch: 007/100 | Batch 000/469 | Gen/Dis Loss: 1.3198/0.4820\nEpoch: 007/100 | Batch 100/469 | Gen/Dis Loss: 1.1527/0.4620\nEpoch: 007/100 | Batch 200/469 | Gen/Dis Loss: 1.3668/0.3967\nEpoch: 007/100 | Batch 300/469 | Gen/Dis Loss: 1.6183/0.4676\nEpoch: 007/100 | Batch 400/469 | Gen/Dis Loss: 1.0077/0.4841\nTime elapsed: 1.85 min\nEpoch: 008/100 | Batch 000/469 | Gen/Dis Loss: 1.2245/0.5437\nEpoch: 008/100 | Batch 100/469 | Gen/Dis Loss: 1.0142/0.4928\nEpoch: 008/100 | Batch 200/469 | Gen/Dis Loss: 0.8817/0.4939\nEpoch: 008/100 | Batch 300/469 | Gen/Dis Loss: 1.0748/0.4967\nEpoch: 008/100 | Batch 400/469 | Gen/Dis Loss: 2.1265/0.4329\nTime elapsed: 2.11 min\nEpoch: 009/100 | Batch 000/469 | Gen/Dis Loss: 0.9277/0.4871\nEpoch: 009/100 | Batch 100/469 | Gen/Dis Loss: 1.1624/0.4473\nEpoch: 009/100 | Batch 200/469 | Gen/Dis Loss: 1.1869/0.4800\nEpoch: 009/100 | Batch 300/469 | Gen/Dis Loss: 1.9998/0.4295\nEpoch: 009/100 | Batch 400/469 | Gen/Dis Loss: 1.6921/0.5037\nTime elapsed: 2.34 min\nEpoch: 010/100 | Batch 000/469 | Gen/Dis Loss: 1.3091/0.4358\nEpoch: 010/100 | Batch 100/469 | Gen/Dis Loss: 1.2604/0.5375\nEpoch: 010/100 | Batch 200/469 | Gen/Dis Loss: 1.1491/0.4537\nEpoch: 010/100 | Batch 300/469 | Gen/Dis Loss: 1.3843/0.5068\nEpoch: 010/100 | Batch 400/469 | Gen/Dis Loss: 1.3413/0.5051\nTime elapsed: 2.60 min\nEpoch: 011/100 | Batch 000/469 | Gen/Dis Loss: 1.2368/0.5161\nEpoch: 011/100 | Batch 100/469 | Gen/Dis Loss: 1.3715/0.4692\nEpoch: 011/100 | Batch 200/469 | Gen/Dis Loss: 1.1182/0.5274\nEpoch: 011/100 | Batch 300/469 | Gen/Dis Loss: 1.2770/0.4649\nEpoch: 011/100 | Batch 400/469 | Gen/Dis Loss: 1.1847/0.5504\nTime elapsed: 2.84 min\nEpoch: 012/100 | Batch 000/469 | Gen/Dis Loss: 0.9930/0.5509\nEpoch: 012/100 | Batch 100/469 | Gen/Dis Loss: 1.1921/0.5310\nEpoch: 012/100 | Batch 200/469 | Gen/Dis Loss: 0.9925/0.6062\nEpoch: 012/100 | Batch 300/469 | Gen/Dis Loss: 1.1246/0.5170\nEpoch: 012/100 | Batch 400/469 | Gen/Dis Loss: 1.0432/0.4437\nTime elapsed: 3.07 min\nEpoch: 013/100 | Batch 000/469 | Gen/Dis Loss: 1.1419/0.5287\nEpoch: 013/100 | Batch 100/469 | Gen/Dis Loss: 1.0053/0.5152\nEpoch: 013/100 | Batch 200/469 | Gen/Dis Loss: 1.1308/0.5384\nEpoch: 013/100 | Batch 300/469 | Gen/Dis Loss: 1.1822/0.5124\nEpoch: 013/100 | Batch 400/469 | Gen/Dis Loss: 1.4501/0.5495\nTime elapsed: 3.32 min\nEpoch: 014/100 | Batch 000/469 | Gen/Dis Loss: 1.1417/0.5364\nEpoch: 014/100 | Batch 100/469 | Gen/Dis Loss: 0.9595/0.5884\nEpoch: 014/100 | Batch 200/469 | Gen/Dis Loss: 0.9887/0.5216\nEpoch: 014/100 | Batch 300/469 | Gen/Dis Loss: 1.0332/0.5686\nEpoch: 014/100 | Batch 400/469 | Gen/Dis Loss: 1.5268/0.4554\nTime elapsed: 3.60 min\nEpoch: 015/100 | Batch 000/469 | Gen/Dis Loss: 1.1181/0.4960\nEpoch: 015/100 | Batch 100/469 | Gen/Dis Loss: 1.2722/0.4632\nEpoch: 015/100 | Batch 200/469 | Gen/Dis Loss: 0.9523/0.6012\nEpoch: 015/100 | Batch 300/469 | Gen/Dis Loss: 0.9905/0.5274\nEpoch: 015/100 | Batch 400/469 | Gen/Dis Loss: 1.0448/0.5855\nTime elapsed: 3.82 min\nEpoch: 016/100 | Batch 000/469 | Gen/Dis Loss: 1.0641/0.5432\nEpoch: 016/100 | Batch 100/469 | Gen/Dis Loss: 0.9587/0.5636\nEpoch: 016/100 | Batch 200/469 | Gen/Dis Loss: 1.3602/0.5691\nEpoch: 016/100 | Batch 300/469 | Gen/Dis Loss: 1.1294/0.5564\nEpoch: 016/100 | Batch 400/469 | Gen/Dis Loss: 1.0727/0.5042\nTime elapsed: 4.04 min\nEpoch: 017/100 | Batch 000/469 | Gen/Dis Loss: 0.9285/0.6045\nEpoch: 017/100 | Batch 100/469 | Gen/Dis Loss: 1.0024/0.6384\nEpoch: 017/100 | Batch 200/469 | Gen/Dis Loss: 1.5662/0.4652\nEpoch: 017/100 | Batch 300/469 | Gen/Dis Loss: 1.3644/0.4632\nEpoch: 017/100 | Batch 400/469 | Gen/Dis Loss: 1.2681/0.5238\nTime elapsed: 4.22 min\nEpoch: 018/100 | Batch 000/469 | Gen/Dis Loss: 1.2578/0.5151\nEpoch: 018/100 | Batch 100/469 | Gen/Dis Loss: 1.6475/0.4929\nEpoch: 018/100 | Batch 200/469 | Gen/Dis Loss: 1.0610/0.5496\nEpoch: 018/100 | Batch 300/469 | Gen/Dis Loss: 1.0613/0.5634\nEpoch: 018/100 | Batch 400/469 | Gen/Dis Loss: 1.4675/0.4589\nTime elapsed: 4.40 min\nEpoch: 019/100 | Batch 000/469 | Gen/Dis Loss: 1.1211/0.5027\nEpoch: 019/100 | Batch 100/469 | Gen/Dis Loss: 1.1444/0.5655\nEpoch: 019/100 | Batch 200/469 | Gen/Dis Loss: 1.2471/0.5716\nEpoch: 019/100 | Batch 300/469 | Gen/Dis Loss: 1.0223/0.5106\nEpoch: 019/100 | Batch 400/469 | Gen/Dis Loss: 1.0361/0.5805\nTime elapsed: 4.58 min\nEpoch: 020/100 | Batch 000/469 | Gen/Dis Loss: 0.9195/0.5428\nEpoch: 020/100 | Batch 100/469 | Gen/Dis Loss: 1.3110/0.4955\nEpoch: 020/100 | Batch 200/469 | Gen/Dis Loss: 1.2449/0.4973\nEpoch: 020/100 | Batch 300/469 | Gen/Dis Loss: 1.3258/0.4992\nEpoch: 020/100 | Batch 400/469 | Gen/Dis Loss: 1.2196/0.5279\nTime elapsed: 4.77 min\nEpoch: 021/100 | Batch 000/469 | Gen/Dis Loss: 1.5621/0.5584\nEpoch: 021/100 | Batch 100/469 | Gen/Dis Loss: 1.1148/0.5888\nEpoch: 021/100 | Batch 200/469 | Gen/Dis Loss: 1.5108/0.4636\nEpoch: 021/100 | Batch 300/469 | Gen/Dis Loss: 1.0957/0.4912\nEpoch: 021/100 | Batch 400/469 | Gen/Dis Loss: 1.0342/0.5184\nTime elapsed: 4.92 min\nEpoch: 022/100 | Batch 000/469 | Gen/Dis Loss: 1.9312/0.4366\nEpoch: 022/100 | Batch 100/469 | Gen/Dis Loss: 1.2312/0.5260\nEpoch: 022/100 | Batch 200/469 | Gen/Dis Loss: 1.1939/0.5075\nEpoch: 022/100 | Batch 300/469 | Gen/Dis Loss: 1.1393/0.5692\nEpoch: 022/100 | Batch 400/469 | Gen/Dis Loss: 1.0390/0.5261\nTime elapsed: 5.05 min\nEpoch: 023/100 | Batch 000/469 | Gen/Dis Loss: 1.3148/0.4902\nEpoch: 023/100 | Batch 100/469 | Gen/Dis Loss: 1.2077/0.6129\nEpoch: 023/100 | Batch 200/469 | Gen/Dis Loss: 1.0886/0.5545\nEpoch: 023/100 | Batch 300/469 | Gen/Dis Loss: 1.0762/0.4948\nEpoch: 023/100 | Batch 400/469 | Gen/Dis Loss: 1.5361/0.5476\nTime elapsed: 5.17 min\nEpoch: 024/100 | Batch 000/469 | Gen/Dis Loss: 1.1752/0.5881\nEpoch: 024/100 | Batch 100/469 | Gen/Dis Loss: 1.3408/0.5339\nEpoch: 024/100 | Batch 200/469 | Gen/Dis Loss: 1.2613/0.4555\nEpoch: 024/100 | Batch 300/469 | Gen/Dis Loss: 1.0707/0.5099\nEpoch: 024/100 | Batch 400/469 | Gen/Dis Loss: 1.1063/0.5695\nTime elapsed: 5.32 min\nEpoch: 025/100 | Batch 000/469 | Gen/Dis Loss: 1.2911/0.5084\nEpoch: 025/100 | Batch 100/469 | Gen/Dis Loss: 1.1280/0.5151\nEpoch: 025/100 | Batch 200/469 | Gen/Dis Loss: 1.3799/0.5784\nEpoch: 025/100 | Batch 300/469 | Gen/Dis Loss: 1.1675/0.6001\nEpoch: 025/100 | Batch 400/469 | Gen/Dis Loss: 0.9834/0.6158\nTime elapsed: 5.48 min\nEpoch: 026/100 | Batch 000/469 | Gen/Dis Loss: 1.2713/0.5475\nEpoch: 026/100 | Batch 100/469 | Gen/Dis Loss: 1.3814/0.5652\nEpoch: 026/100 | Batch 200/469 | Gen/Dis Loss: 1.1782/0.4850\nEpoch: 026/100 | Batch 300/469 | Gen/Dis Loss: 0.9917/0.5888\nEpoch: 026/100 | Batch 400/469 | Gen/Dis Loss: 1.0909/0.5825\nTime elapsed: 5.64 min\nEpoch: 027/100 | Batch 000/469 | Gen/Dis Loss: 1.0873/0.5579\nEpoch: 027/100 | Batch 100/469 | Gen/Dis Loss: 0.9639/0.5860\nEpoch: 027/100 | Batch 200/469 | Gen/Dis Loss: 1.0458/0.5526\nEpoch: 027/100 | Batch 300/469 | Gen/Dis Loss: 1.3373/0.5140\nEpoch: 027/100 | Batch 400/469 | Gen/Dis Loss: 1.2790/0.5223\nTime elapsed: 5.79 min\nEpoch: 028/100 | Batch 000/469 | Gen/Dis Loss: 0.9300/0.5869\nEpoch: 028/100 | Batch 100/469 | Gen/Dis Loss: 1.0022/0.6056\nEpoch: 028/100 | Batch 200/469 | Gen/Dis Loss: 1.0688/0.5447\nEpoch: 028/100 | Batch 300/469 | Gen/Dis Loss: 1.0161/0.5702\nEpoch: 028/100 | Batch 400/469 | Gen/Dis Loss: 0.8731/0.5543\nTime elapsed: 5.92 min\nEpoch: 029/100 | Batch 000/469 | Gen/Dis Loss: 0.8719/0.5524\nEpoch: 029/100 | Batch 100/469 | Gen/Dis Loss: 1.3005/0.5179\nEpoch: 029/100 | Batch 200/469 | Gen/Dis Loss: 1.2986/0.5312\nEpoch: 029/100 | Batch 300/469 | Gen/Dis Loss: 1.1084/0.5207\nEpoch: 029/100 | Batch 400/469 | Gen/Dis Loss: 1.0591/0.5577\nTime elapsed: 6.07 min\nEpoch: 030/100 | Batch 000/469 | Gen/Dis Loss: 1.0231/0.6170\nEpoch: 030/100 | Batch 100/469 | Gen/Dis Loss: 0.9142/0.6046\nEpoch: 030/100 | Batch 200/469 | Gen/Dis Loss: 1.2140/0.5290\nEpoch: 030/100 | Batch 300/469 | Gen/Dis Loss: 0.8784/0.5804\nEpoch: 030/100 | Batch 400/469 | Gen/Dis Loss: 1.1178/0.5165\nTime elapsed: 6.20 min\nEpoch: 031/100 | Batch 000/469 | Gen/Dis Loss: 0.9555/0.5921\nEpoch: 031/100 | Batch 100/469 | Gen/Dis Loss: 0.9644/0.5432\nEpoch: 031/100 | Batch 200/469 | Gen/Dis Loss: 0.9531/0.5465\nEpoch: 031/100 | Batch 300/469 | Gen/Dis Loss: 1.3496/0.5550\nEpoch: 031/100 | Batch 400/469 | Gen/Dis Loss: 1.2137/0.5672\nTime elapsed: 6.32 min\nEpoch: 032/100 | Batch 000/469 | Gen/Dis Loss: 1.0849/0.5020\nEpoch: 032/100 | Batch 100/469 | Gen/Dis Loss: 0.9098/0.5481\nEpoch: 032/100 | Batch 200/469 | Gen/Dis Loss: 1.2349/0.5024\nEpoch: 032/100 | Batch 300/469 | Gen/Dis Loss: 0.9468/0.5599\nEpoch: 032/100 | Batch 400/469 | Gen/Dis Loss: 1.4531/0.4928\nTime elapsed: 6.45 min\nEpoch: 033/100 | Batch 000/469 | Gen/Dis Loss: 1.3397/0.5521\nEpoch: 033/100 | Batch 100/469 | Gen/Dis Loss: 1.0106/0.5472\nEpoch: 033/100 | Batch 200/469 | Gen/Dis Loss: 0.9787/0.5606\nEpoch: 033/100 | Batch 300/469 | Gen/Dis Loss: 1.1434/0.5388\nEpoch: 033/100 | Batch 400/469 | Gen/Dis Loss: 1.0476/0.5259\nTime elapsed: 6.57 min\nEpoch: 034/100 | Batch 000/469 | Gen/Dis Loss: 1.3847/0.5294\nEpoch: 034/100 | Batch 100/469 | Gen/Dis Loss: 0.8550/0.5800\nEpoch: 034/100 | Batch 200/469 | Gen/Dis Loss: 1.0220/0.5527\nEpoch: 034/100 | Batch 300/469 | Gen/Dis Loss: 0.9255/0.5751\nEpoch: 034/100 | Batch 400/469 | Gen/Dis Loss: 1.0400/0.5554\nTime elapsed: 6.72 min\nEpoch: 035/100 | Batch 000/469 | Gen/Dis Loss: 0.9723/0.5789\nEpoch: 035/100 | Batch 100/469 | Gen/Dis Loss: 1.4414/0.4769\nEpoch: 035/100 | Batch 200/469 | Gen/Dis Loss: 0.9431/0.5898\nEpoch: 035/100 | Batch 300/469 | Gen/Dis Loss: 0.8252/0.6573\nEpoch: 035/100 | Batch 400/469 | Gen/Dis Loss: 0.9694/0.5427\nTime elapsed: 6.84 min\nEpoch: 036/100 | Batch 000/469 | Gen/Dis Loss: 1.3664/0.5839\nEpoch: 036/100 | Batch 100/469 | Gen/Dis Loss: 1.0854/0.5739\nEpoch: 036/100 | Batch 200/469 | Gen/Dis Loss: 1.0429/0.5457\nEpoch: 036/100 | Batch 300/469 | Gen/Dis Loss: 0.8601/0.6151\nEpoch: 036/100 | Batch 400/469 | Gen/Dis Loss: 1.2785/0.5850\nTime elapsed: 6.97 min\nEpoch: 037/100 | Batch 000/469 | Gen/Dis Loss: 1.0251/0.5933\nEpoch: 037/100 | Batch 100/469 | Gen/Dis Loss: 1.2177/0.5053\nEpoch: 037/100 | Batch 200/469 | Gen/Dis Loss: 0.8804/0.5925\nEpoch: 037/100 | Batch 300/469 | Gen/Dis Loss: 1.2797/0.6173\nEpoch: 037/100 | Batch 400/469 | Gen/Dis Loss: 0.9189/0.6238\nTime elapsed: 7.10 min\nEpoch: 038/100 | Batch 000/469 | Gen/Dis Loss: 1.3463/0.5419\nEpoch: 038/100 | Batch 100/469 | Gen/Dis Loss: 1.0166/0.6045\nEpoch: 038/100 | Batch 200/469 | Gen/Dis Loss: 0.9895/0.6320\nEpoch: 038/100 | Batch 300/469 | Gen/Dis Loss: 0.9749/0.5621\nEpoch: 038/100 | Batch 400/469 | Gen/Dis Loss: 1.0448/0.5945\nTime elapsed: 7.24 min\nEpoch: 039/100 | Batch 000/469 | Gen/Dis Loss: 0.9662/0.5669\nEpoch: 039/100 | Batch 100/469 | Gen/Dis Loss: 1.1476/0.5462\nEpoch: 039/100 | Batch 200/469 | Gen/Dis Loss: 0.9662/0.5554\nEpoch: 039/100 | Batch 300/469 | Gen/Dis Loss: 1.0850/0.6031\nEpoch: 039/100 | Batch 400/469 | Gen/Dis Loss: 1.1491/0.6014\nTime elapsed: 7.41 min\nEpoch: 040/100 | Batch 000/469 | Gen/Dis Loss: 0.9942/0.5999\nEpoch: 040/100 | Batch 100/469 | Gen/Dis Loss: 0.9034/0.5979\nEpoch: 040/100 | Batch 200/469 | Gen/Dis Loss: 1.1880/0.5693\nEpoch: 040/100 | Batch 300/469 | Gen/Dis Loss: 1.0893/0.5933\nEpoch: 040/100 | Batch 400/469 | Gen/Dis Loss: 1.0711/0.5501\nTime elapsed: 7.59 min\nEpoch: 041/100 | Batch 000/469 | Gen/Dis Loss: 0.9100/0.5957\nEpoch: 041/100 | Batch 100/469 | Gen/Dis Loss: 0.7538/0.5947\nEpoch: 041/100 | Batch 200/469 | Gen/Dis Loss: 0.9743/0.5999\nEpoch: 041/100 | Batch 300/469 | Gen/Dis Loss: 0.8305/0.6395\nEpoch: 041/100 | Batch 400/469 | Gen/Dis Loss: 1.1106/0.6419\nTime elapsed: 7.73 min\nEpoch: 042/100 | Batch 000/469 | Gen/Dis Loss: 1.1241/0.5890\nEpoch: 042/100 | Batch 100/469 | Gen/Dis Loss: 0.8509/0.6164\nEpoch: 042/100 | Batch 200/469 | Gen/Dis Loss: 1.2024/0.5684\nEpoch: 042/100 | Batch 300/469 | Gen/Dis Loss: 0.9708/0.6378\nEpoch: 042/100 | Batch 400/469 | Gen/Dis Loss: 1.1171/0.5501\nTime elapsed: 7.85 min\nEpoch: 043/100 | Batch 000/469 | Gen/Dis Loss: 1.0931/0.5653\nEpoch: 043/100 | Batch 100/469 | Gen/Dis Loss: 1.0468/0.5782\nEpoch: 043/100 | Batch 200/469 | Gen/Dis Loss: 1.0359/0.6329\nEpoch: 043/100 | Batch 300/469 | Gen/Dis Loss: 1.1976/0.6114\nEpoch: 043/100 | Batch 400/469 | Gen/Dis Loss: 0.8817/0.6200\nTime elapsed: 7.98 min\nEpoch: 044/100 | Batch 000/469 | Gen/Dis Loss: 0.9911/0.6061\nEpoch: 044/100 | Batch 100/469 | Gen/Dis Loss: 1.0196/0.6435\nEpoch: 044/100 | Batch 200/469 | Gen/Dis Loss: 1.0005/0.6266\nEpoch: 044/100 | Batch 300/469 | Gen/Dis Loss: 0.8342/0.6092\nEpoch: 044/100 | Batch 400/469 | Gen/Dis Loss: 0.8342/0.5589\nTime elapsed: 8.10 min\nEpoch: 045/100 | Batch 000/469 | Gen/Dis Loss: 0.7638/0.6289\nEpoch: 045/100 | Batch 100/469 | Gen/Dis Loss: 0.9049/0.5920\nEpoch: 045/100 | Batch 200/469 | Gen/Dis Loss: 1.0077/0.5975\nEpoch: 045/100 | Batch 300/469 | Gen/Dis Loss: 0.9315/0.6066\nEpoch: 045/100 | Batch 400/469 | Gen/Dis Loss: 0.7719/0.6624\nTime elapsed: 8.23 min\nEpoch: 046/100 | Batch 000/469 | Gen/Dis Loss: 1.0064/0.5672\nEpoch: 046/100 | Batch 100/469 | Gen/Dis Loss: 0.8730/0.6217\nEpoch: 046/100 | Batch 200/469 | Gen/Dis Loss: 1.2217/0.5859\nEpoch: 046/100 | Batch 300/469 | Gen/Dis Loss: 1.1649/0.5878\nEpoch: 046/100 | Batch 400/469 | Gen/Dis Loss: 0.9912/0.5882\nTime elapsed: 8.35 min\nEpoch: 047/100 | Batch 000/469 | Gen/Dis Loss: 0.8579/0.6209\nEpoch: 047/100 | Batch 100/469 | Gen/Dis Loss: 1.0072/0.5908\nEpoch: 047/100 | Batch 200/469 | Gen/Dis Loss: 0.8694/0.6285\nEpoch: 047/100 | Batch 300/469 | Gen/Dis Loss: 0.9354/0.6087\nEpoch: 047/100 | Batch 400/469 | Gen/Dis Loss: 0.8800/0.6521\nTime elapsed: 8.48 min\nEpoch: 048/100 | Batch 000/469 | Gen/Dis Loss: 0.8513/0.6051\nEpoch: 048/100 | Batch 100/469 | Gen/Dis Loss: 0.8803/0.6090\nEpoch: 048/100 | Batch 200/469 | Gen/Dis Loss: 1.0930/0.6115\nEpoch: 048/100 | Batch 300/469 | Gen/Dis Loss: 0.7406/0.6692\nEpoch: 048/100 | Batch 400/469 | Gen/Dis Loss: 0.8551/0.6188\nTime elapsed: 8.62 min\nEpoch: 049/100 | Batch 000/469 | Gen/Dis Loss: 0.8792/0.5986\nEpoch: 049/100 | Batch 100/469 | Gen/Dis Loss: 0.8424/0.6277\nEpoch: 049/100 | Batch 200/469 | Gen/Dis Loss: 0.7973/0.6320\nEpoch: 049/100 | Batch 300/469 | Gen/Dis Loss: 0.9188/0.5828\nEpoch: 049/100 | Batch 400/469 | Gen/Dis Loss: 0.9253/0.6013\nTime elapsed: 8.80 min\nEpoch: 050/100 | Batch 000/469 | Gen/Dis Loss: 1.3241/0.5689\nEpoch: 050/100 | Batch 100/469 | Gen/Dis Loss: 1.0220/0.5922\nEpoch: 050/100 | Batch 200/469 | Gen/Dis Loss: 0.9210/0.6024\nEpoch: 050/100 | Batch 300/469 | Gen/Dis Loss: 0.8139/0.6578\nEpoch: 050/100 | Batch 400/469 | Gen/Dis Loss: 1.0371/0.5987\nTime elapsed: 8.93 min\nEpoch: 051/100 | Batch 000/469 | Gen/Dis Loss: 0.9253/0.6002\nEpoch: 051/100 | Batch 100/469 | Gen/Dis Loss: 0.8154/0.5774\nEpoch: 051/100 | Batch 200/469 | Gen/Dis Loss: 0.9697/0.6240\nEpoch: 051/100 | Batch 300/469 | Gen/Dis Loss: 1.1185/0.5541\nEpoch: 051/100 | Batch 400/469 | Gen/Dis Loss: 0.8016/0.6642\nTime elapsed: 9.06 min\nEpoch: 052/100 | Batch 000/469 | Gen/Dis Loss: 0.8716/0.6364\nEpoch: 052/100 | Batch 100/469 | Gen/Dis Loss: 0.9636/0.5944\nEpoch: 052/100 | Batch 200/469 | Gen/Dis Loss: 0.9511/0.6204\nEpoch: 052/100 | Batch 300/469 | Gen/Dis Loss: 0.9293/0.5901\nEpoch: 052/100 | Batch 400/469 | Gen/Dis Loss: 1.1139/0.5535\nTime elapsed: 9.18 min\nEpoch: 053/100 | Batch 000/469 | Gen/Dis Loss: 0.8345/0.6399\nEpoch: 053/100 | Batch 100/469 | Gen/Dis Loss: 1.0420/0.5847\nEpoch: 053/100 | Batch 200/469 | Gen/Dis Loss: 0.8887/0.6183\nEpoch: 053/100 | Batch 300/469 | Gen/Dis Loss: 1.1280/0.5869\nEpoch: 053/100 | Batch 400/469 | Gen/Dis Loss: 0.8391/0.6031\nTime elapsed: 9.30 min\nEpoch: 054/100 | Batch 000/469 | Gen/Dis Loss: 1.0584/0.5659\nEpoch: 054/100 | Batch 100/469 | Gen/Dis Loss: 0.8722/0.5991\nEpoch: 054/100 | Batch 200/469 | Gen/Dis Loss: 0.8416/0.6067\nEpoch: 054/100 | Batch 300/469 | Gen/Dis Loss: 0.9295/0.5910\nEpoch: 054/100 | Batch 400/469 | Gen/Dis Loss: 0.7705/0.6145\nTime elapsed: 9.43 min\nEpoch: 055/100 | Batch 000/469 | Gen/Dis Loss: 0.9697/0.6207\nEpoch: 055/100 | Batch 100/469 | Gen/Dis Loss: 1.3702/0.5782\nEpoch: 055/100 | Batch 200/469 | Gen/Dis Loss: 0.8874/0.6034\nEpoch: 055/100 | Batch 300/469 | Gen/Dis Loss: 0.9273/0.6095\nEpoch: 055/100 | Batch 400/469 | Gen/Dis Loss: 1.0736/0.5893\nTime elapsed: 9.57 min\nEpoch: 056/100 | Batch 000/469 | Gen/Dis Loss: 0.9631/0.5959\nEpoch: 056/100 | Batch 100/469 | Gen/Dis Loss: 0.8657/0.6398\nEpoch: 056/100 | Batch 200/469 | Gen/Dis Loss: 0.8120/0.6027\nEpoch: 056/100 | Batch 300/469 | Gen/Dis Loss: 1.1529/0.6493\nEpoch: 056/100 | Batch 400/469 | Gen/Dis Loss: 0.9172/0.5788\nTime elapsed: 9.77 min\nEpoch: 057/100 | Batch 000/469 | Gen/Dis Loss: 0.9197/0.6090\nEpoch: 057/100 | Batch 100/469 | Gen/Dis Loss: 0.9413/0.6255\nEpoch: 057/100 | Batch 200/469 | Gen/Dis Loss: 0.9020/0.5870\nEpoch: 057/100 | Batch 300/469 | Gen/Dis Loss: 0.9947/0.5586\nEpoch: 057/100 | Batch 400/469 | Gen/Dis Loss: 0.9077/0.6454\nTime elapsed: 10.03 min\nEpoch: 058/100 | Batch 000/469 | Gen/Dis Loss: 0.8899/0.6106\nEpoch: 058/100 | Batch 100/469 | Gen/Dis Loss: 0.8154/0.6554\nEpoch: 058/100 | Batch 200/469 | Gen/Dis Loss: 0.9307/0.5997\nEpoch: 058/100 | Batch 300/469 | Gen/Dis Loss: 0.8293/0.5881\nEpoch: 058/100 | Batch 400/469 | Gen/Dis Loss: 0.9434/0.6448\nTime elapsed: 10.31 min\nEpoch: 059/100 | Batch 000/469 | Gen/Dis Loss: 0.9638/0.6325\nEpoch: 059/100 | Batch 100/469 | Gen/Dis Loss: 0.9374/0.6304\nEpoch: 059/100 | Batch 200/469 | Gen/Dis Loss: 0.8452/0.6464\nEpoch: 059/100 | Batch 300/469 | Gen/Dis Loss: 1.0170/0.6210\nEpoch: 059/100 | Batch 400/469 | Gen/Dis Loss: 0.8808/0.5950\nTime elapsed: 10.56 min\nEpoch: 060/100 | Batch 000/469 | Gen/Dis Loss: 0.9076/0.5969\nEpoch: 060/100 | Batch 100/469 | Gen/Dis Loss: 1.1195/0.6040\nEpoch: 060/100 | Batch 200/469 | Gen/Dis Loss: 0.9015/0.6149\nEpoch: 060/100 | Batch 300/469 | Gen/Dis Loss: 0.8414/0.5804\nEpoch: 060/100 | Batch 400/469 | Gen/Dis Loss: 0.8220/0.6557\nTime elapsed: 10.83 min\nEpoch: 061/100 | Batch 000/469 | Gen/Dis Loss: 0.8411/0.6360\nEpoch: 061/100 | Batch 100/469 | Gen/Dis Loss: 0.8431/0.6304\nEpoch: 061/100 | Batch 200/469 | Gen/Dis Loss: 0.7740/0.6395\nEpoch: 061/100 | Batch 300/469 | Gen/Dis Loss: 0.8840/0.5987\nEpoch: 061/100 | Batch 400/469 | Gen/Dis Loss: 0.8510/0.6232\nTime elapsed: 11.07 min\nEpoch: 062/100 | Batch 000/469 | Gen/Dis Loss: 1.0286/0.6151\nEpoch: 062/100 | Batch 100/469 | Gen/Dis Loss: 1.0516/0.5767\nEpoch: 062/100 | Batch 200/469 | Gen/Dis Loss: 0.8182/0.5654\nEpoch: 062/100 | Batch 300/469 | Gen/Dis Loss: 0.8658/0.6156\nEpoch: 062/100 | Batch 400/469 | Gen/Dis Loss: 0.9674/0.6434\nTime elapsed: 11.33 min\nEpoch: 063/100 | Batch 000/469 | Gen/Dis Loss: 0.6952/0.6601\nEpoch: 063/100 | Batch 100/469 | Gen/Dis Loss: 0.8180/0.6041\nEpoch: 063/100 | Batch 200/469 | Gen/Dis Loss: 0.8224/0.6683\nEpoch: 063/100 | Batch 300/469 | Gen/Dis Loss: 0.9604/0.5938\nEpoch: 063/100 | Batch 400/469 | Gen/Dis Loss: 0.7969/0.6561\nTime elapsed: 11.54 min\nEpoch: 064/100 | Batch 000/469 | Gen/Dis Loss: 0.8544/0.6290\nEpoch: 064/100 | Batch 100/469 | Gen/Dis Loss: 0.8685/0.5925\nEpoch: 064/100 | Batch 200/469 | Gen/Dis Loss: 1.4746/0.5992\nEpoch: 064/100 | Batch 300/469 | Gen/Dis Loss: 0.8570/0.6417\nEpoch: 064/100 | Batch 400/469 | Gen/Dis Loss: 0.8588/0.6461\nTime elapsed: 11.78 min\nEpoch: 065/100 | Batch 000/469 | Gen/Dis Loss: 0.8579/0.6151\nEpoch: 065/100 | Batch 100/469 | Gen/Dis Loss: 0.9720/0.5867\nEpoch: 065/100 | Batch 200/469 | Gen/Dis Loss: 0.8870/0.6215\nEpoch: 065/100 | Batch 300/469 | Gen/Dis Loss: 0.8184/0.6506\nEpoch: 065/100 | Batch 400/469 | Gen/Dis Loss: 0.9247/0.6219\nTime elapsed: 12.03 min\nEpoch: 066/100 | Batch 000/469 | Gen/Dis Loss: 0.9073/0.6157\nEpoch: 066/100 | Batch 100/469 | Gen/Dis Loss: 0.8459/0.6364\nEpoch: 066/100 | Batch 200/469 | Gen/Dis Loss: 1.0687/0.5647\nEpoch: 066/100 | Batch 300/469 | Gen/Dis Loss: 0.9213/0.6136\nEpoch: 066/100 | Batch 400/469 | Gen/Dis Loss: 0.7895/0.6409\nTime elapsed: 12.30 min\nEpoch: 067/100 | Batch 000/469 | Gen/Dis Loss: 0.8258/0.6246\nEpoch: 067/100 | Batch 100/469 | Gen/Dis Loss: 0.9616/0.5776\nEpoch: 067/100 | Batch 200/469 | Gen/Dis Loss: 0.9039/0.6012\nEpoch: 067/100 | Batch 300/469 | Gen/Dis Loss: 0.9857/0.5949\nEpoch: 067/100 | Batch 400/469 | Gen/Dis Loss: 1.1779/0.5773\nTime elapsed: 12.58 min\nEpoch: 068/100 | Batch 000/469 | Gen/Dis Loss: 0.9631/0.6006\nEpoch: 068/100 | Batch 100/469 | Gen/Dis Loss: 0.7157/0.6103\nEpoch: 068/100 | Batch 200/469 | Gen/Dis Loss: 0.8400/0.6223\nEpoch: 068/100 | Batch 300/469 | Gen/Dis Loss: 1.0586/0.5840\nEpoch: 068/100 | Batch 400/469 | Gen/Dis Loss: 0.9487/0.6224\nTime elapsed: 12.84 min\nEpoch: 069/100 | Batch 000/469 | Gen/Dis Loss: 1.0124/0.5248\nEpoch: 069/100 | Batch 100/469 | Gen/Dis Loss: 0.8849/0.6481\nEpoch: 069/100 | Batch 200/469 | Gen/Dis Loss: 0.9250/0.6130\nEpoch: 069/100 | Batch 300/469 | Gen/Dis Loss: 0.9207/0.6420\nEpoch: 069/100 | Batch 400/469 | Gen/Dis Loss: 0.8661/0.6100\nTime elapsed: 13.11 min\nEpoch: 070/100 | Batch 000/469 | Gen/Dis Loss: 1.0647/0.6247\nEpoch: 070/100 | Batch 100/469 | Gen/Dis Loss: 0.8877/0.6254\nEpoch: 070/100 | Batch 200/469 | Gen/Dis Loss: 0.8151/0.6462\nEpoch: 070/100 | Batch 300/469 | Gen/Dis Loss: 0.8807/0.6079\nEpoch: 070/100 | Batch 400/469 | Gen/Dis Loss: 0.9690/0.6432\nTime elapsed: 13.34 min\nEpoch: 071/100 | Batch 000/469 | Gen/Dis Loss: 0.8764/0.6338\nEpoch: 071/100 | Batch 100/469 | Gen/Dis Loss: 0.9052/0.5937\nEpoch: 071/100 | Batch 200/469 | Gen/Dis Loss: 1.0023/0.5866\nEpoch: 071/100 | Batch 300/469 | Gen/Dis Loss: 0.7945/0.6066\nEpoch: 071/100 | Batch 400/469 | Gen/Dis Loss: 0.8566/0.6092\nTime elapsed: 13.57 min\nEpoch: 072/100 | Batch 000/469 | Gen/Dis Loss: 1.0826/0.5474\nEpoch: 072/100 | Batch 100/469 | Gen/Dis Loss: 0.9077/0.6232\nEpoch: 072/100 | Batch 200/469 | Gen/Dis Loss: 1.0860/0.6291\nEpoch: 072/100 | Batch 300/469 | Gen/Dis Loss: 0.9009/0.6444\nEpoch: 072/100 | Batch 400/469 | Gen/Dis Loss: 0.9546/0.6265\nTime elapsed: 13.82 min\nEpoch: 073/100 | Batch 000/469 | Gen/Dis Loss: 0.9126/0.5977\nEpoch: 073/100 | Batch 100/469 | Gen/Dis Loss: 1.0169/0.6357\nEpoch: 073/100 | Batch 200/469 | Gen/Dis Loss: 0.8760/0.6333\nEpoch: 073/100 | Batch 300/469 | Gen/Dis Loss: 0.8972/0.5929\nEpoch: 073/100 | Batch 400/469 | Gen/Dis Loss: 0.9535/0.6609\nTime elapsed: 14.05 min\nEpoch: 074/100 | Batch 000/469 | Gen/Dis Loss: 0.8905/0.6017\nEpoch: 074/100 | Batch 100/469 | Gen/Dis Loss: 0.9040/0.6458\nEpoch: 074/100 | Batch 200/469 | Gen/Dis Loss: 0.8277/0.6424\nEpoch: 074/100 | Batch 300/469 | Gen/Dis Loss: 1.6138/0.5738\nEpoch: 074/100 | Batch 400/469 | Gen/Dis Loss: 0.9943/0.6718\nTime elapsed: 14.31 min\nEpoch: 075/100 | Batch 000/469 | Gen/Dis Loss: 1.0839/0.6357\nEpoch: 075/100 | Batch 100/469 | Gen/Dis Loss: 0.8858/0.6300\nEpoch: 075/100 | Batch 200/469 | Gen/Dis Loss: 0.9034/0.6045\nEpoch: 075/100 | Batch 300/469 | Gen/Dis Loss: 0.8336/0.5991\nEpoch: 075/100 | Batch 400/469 | Gen/Dis Loss: 0.8414/0.6642\nTime elapsed: 14.54 min\nEpoch: 076/100 | Batch 000/469 | Gen/Dis Loss: 0.8422/0.6506\nEpoch: 076/100 | Batch 100/469 | Gen/Dis Loss: 0.8560/0.5884\nEpoch: 076/100 | Batch 200/469 | Gen/Dis Loss: 0.8066/0.6215\nEpoch: 076/100 | Batch 300/469 | Gen/Dis Loss: 0.7987/0.6537\nEpoch: 076/100 | Batch 400/469 | Gen/Dis Loss: 0.8784/0.5854\nTime elapsed: 14.82 min\nEpoch: 077/100 | Batch 000/469 | Gen/Dis Loss: 0.9845/0.6067\nEpoch: 077/100 | Batch 100/469 | Gen/Dis Loss: 0.8514/0.6269\nEpoch: 077/100 | Batch 200/469 | Gen/Dis Loss: 1.0448/0.6637\nEpoch: 077/100 | Batch 300/469 | Gen/Dis Loss: 0.9325/0.5811\nEpoch: 077/100 | Batch 400/469 | Gen/Dis Loss: 0.9169/0.5837\nTime elapsed: 15.08 min\nEpoch: 078/100 | Batch 000/469 | Gen/Dis Loss: 0.9746/0.6398\nEpoch: 078/100 | Batch 100/469 | Gen/Dis Loss: 0.8518/0.6321\nEpoch: 078/100 | Batch 200/469 | Gen/Dis Loss: 0.9485/0.5925\nEpoch: 078/100 | Batch 300/469 | Gen/Dis Loss: 0.8646/0.6530\nEpoch: 078/100 | Batch 400/469 | Gen/Dis Loss: 0.8851/0.6056\nTime elapsed: 15.33 min\nEpoch: 079/100 | Batch 000/469 | Gen/Dis Loss: 0.9215/0.6184\nEpoch: 079/100 | Batch 100/469 | Gen/Dis Loss: 0.8766/0.5987\nEpoch: 079/100 | Batch 200/469 | Gen/Dis Loss: 0.9273/0.6339\nEpoch: 079/100 | Batch 300/469 | Gen/Dis Loss: 1.0428/0.6016\nEpoch: 079/100 | Batch 400/469 | Gen/Dis Loss: 0.8676/0.6156\nTime elapsed: 15.63 min\nEpoch: 080/100 | Batch 000/469 | Gen/Dis Loss: 0.8753/0.6354\nEpoch: 080/100 | Batch 100/469 | Gen/Dis Loss: 0.7689/0.6156\nEpoch: 080/100 | Batch 200/469 | Gen/Dis Loss: 0.9524/0.5874\nEpoch: 080/100 | Batch 300/469 | Gen/Dis Loss: 1.1452/0.5870\nEpoch: 080/100 | Batch 400/469 | Gen/Dis Loss: 0.9418/0.5921\nTime elapsed: 15.87 min\nEpoch: 081/100 | Batch 000/469 | Gen/Dis Loss: 0.9341/0.5982\nEpoch: 081/100 | Batch 100/469 | Gen/Dis Loss: 0.9412/0.6336\nEpoch: 081/100 | Batch 200/469 | Gen/Dis Loss: 0.8976/0.6561\nEpoch: 081/100 | Batch 300/469 | Gen/Dis Loss: 0.8531/0.6544\nEpoch: 081/100 | Batch 400/469 | Gen/Dis Loss: 0.8658/0.6275\nTime elapsed: 16.14 min\nEpoch: 082/100 | Batch 000/469 | Gen/Dis Loss: 0.8624/0.6454\nEpoch: 082/100 | Batch 100/469 | Gen/Dis Loss: 0.8182/0.5911\nEpoch: 082/100 | Batch 200/469 | Gen/Dis Loss: 0.8794/0.6080\nEpoch: 082/100 | Batch 300/469 | Gen/Dis Loss: 0.9631/0.6111\nEpoch: 082/100 | Batch 400/469 | Gen/Dis Loss: 1.0426/0.6404\nTime elapsed: 16.39 min\nEpoch: 083/100 | Batch 000/469 | Gen/Dis Loss: 1.0449/0.6439\nEpoch: 083/100 | Batch 100/469 | Gen/Dis Loss: 0.9290/0.6319\nEpoch: 083/100 | Batch 200/469 | Gen/Dis Loss: 0.8768/0.6186\nEpoch: 083/100 | Batch 300/469 | Gen/Dis Loss: 0.8202/0.6050\nEpoch: 083/100 | Batch 400/469 | Gen/Dis Loss: 0.8840/0.6135\nTime elapsed: 16.63 min\nEpoch: 084/100 | Batch 000/469 | Gen/Dis Loss: 1.0632/0.6157\nEpoch: 084/100 | Batch 100/469 | Gen/Dis Loss: 0.8863/0.5954\nEpoch: 084/100 | Batch 200/469 | Gen/Dis Loss: 1.0618/0.6428\nEpoch: 084/100 | Batch 300/469 | Gen/Dis Loss: 1.0627/0.5874\nEpoch: 084/100 | Batch 400/469 | Gen/Dis Loss: 0.9114/0.6118\nTime elapsed: 16.90 min\nEpoch: 085/100 | Batch 000/469 | Gen/Dis Loss: 0.8453/0.6248\nEpoch: 085/100 | Batch 100/469 | Gen/Dis Loss: 1.0609/0.6182\nEpoch: 085/100 | Batch 200/469 | Gen/Dis Loss: 0.8899/0.6170\nEpoch: 085/100 | Batch 300/469 | Gen/Dis Loss: 0.9211/0.6023\nEpoch: 085/100 | Batch 400/469 | Gen/Dis Loss: 0.8161/0.6840\nTime elapsed: 17.21 min\nEpoch: 086/100 | Batch 000/469 | Gen/Dis Loss: 0.9190/0.5845\nEpoch: 086/100 | Batch 100/469 | Gen/Dis Loss: 1.0762/0.6450\nEpoch: 086/100 | Batch 200/469 | Gen/Dis Loss: 1.0070/0.6302\nEpoch: 086/100 | Batch 300/469 | Gen/Dis Loss: 0.8805/0.6313\nEpoch: 086/100 | Batch 400/469 | Gen/Dis Loss: 0.8568/0.6320\nTime elapsed: 17.47 min\nEpoch: 087/100 | Batch 000/469 | Gen/Dis Loss: 0.9597/0.6527\nEpoch: 087/100 | Batch 100/469 | Gen/Dis Loss: 0.8664/0.6339\nEpoch: 087/100 | Batch 200/469 | Gen/Dis Loss: 1.0466/0.6181\nEpoch: 087/100 | Batch 300/469 | Gen/Dis Loss: 0.8645/0.6272\nEpoch: 087/100 | Batch 400/469 | Gen/Dis Loss: 0.8296/0.6125\nTime elapsed: 17.71 min\nEpoch: 088/100 | Batch 000/469 | Gen/Dis Loss: 0.8497/0.6134\nEpoch: 088/100 | Batch 100/469 | Gen/Dis Loss: 0.7984/0.6551\nEpoch: 088/100 | Batch 200/469 | Gen/Dis Loss: 0.7777/0.6737\nEpoch: 088/100 | Batch 300/469 | Gen/Dis Loss: 0.8157/0.6250\nEpoch: 088/100 | Batch 400/469 | Gen/Dis Loss: 0.7993/0.6446\nTime elapsed: 17.96 min\nEpoch: 089/100 | Batch 000/469 | Gen/Dis Loss: 0.8526/0.6219\nEpoch: 089/100 | Batch 100/469 | Gen/Dis Loss: 0.9565/0.6241\nEpoch: 089/100 | Batch 200/469 | Gen/Dis Loss: 1.0437/0.6488\nEpoch: 089/100 | Batch 300/469 | Gen/Dis Loss: 0.8082/0.6521\nEpoch: 089/100 | Batch 400/469 | Gen/Dis Loss: 0.9082/0.6187\nTime elapsed: 18.20 min\nEpoch: 090/100 | Batch 000/469 | Gen/Dis Loss: 0.8507/0.6127\nEpoch: 090/100 | Batch 100/469 | Gen/Dis Loss: 0.8370/0.6160\nEpoch: 090/100 | Batch 200/469 | Gen/Dis Loss: 0.8270/0.6310\nEpoch: 090/100 | Batch 300/469 | Gen/Dis Loss: 0.9313/0.6230\nEpoch: 090/100 | Batch 400/469 | Gen/Dis Loss: 0.9462/0.6391\nTime elapsed: 18.46 min\nEpoch: 091/100 | Batch 000/469 | Gen/Dis Loss: 0.9294/0.6189\nEpoch: 091/100 | Batch 100/469 | Gen/Dis Loss: 1.0533/0.6279\nEpoch: 091/100 | Batch 200/469 | Gen/Dis Loss: 0.9623/0.6491\nEpoch: 091/100 | Batch 300/469 | Gen/Dis Loss: 0.8521/0.6031\nEpoch: 091/100 | Batch 400/469 | Gen/Dis Loss: 0.8233/0.6487\nTime elapsed: 18.70 min\nEpoch: 092/100 | Batch 000/469 | Gen/Dis Loss: 0.9691/0.6357\nEpoch: 092/100 | Batch 100/469 | Gen/Dis Loss: 0.8876/0.6303\nEpoch: 092/100 | Batch 200/469 | Gen/Dis Loss: 0.9333/0.6201\nEpoch: 092/100 | Batch 300/469 | Gen/Dis Loss: 0.8813/0.5981\nEpoch: 092/100 | Batch 400/469 | Gen/Dis Loss: 0.9026/0.6128\nTime elapsed: 18.94 min\nEpoch: 093/100 | Batch 000/469 | Gen/Dis Loss: 0.8874/0.6373\nEpoch: 093/100 | Batch 100/469 | Gen/Dis Loss: 0.8537/0.6204\nEpoch: 093/100 | Batch 200/469 | Gen/Dis Loss: 0.7982/0.6342\nEpoch: 093/100 | Batch 300/469 | Gen/Dis Loss: 0.9005/0.6010\nEpoch: 093/100 | Batch 400/469 | Gen/Dis Loss: 1.0532/0.6091\nTime elapsed: 19.20 min\nEpoch: 094/100 | Batch 000/469 | Gen/Dis Loss: 0.9877/0.6426\nEpoch: 094/100 | Batch 100/469 | Gen/Dis Loss: 0.8308/0.6501\nEpoch: 094/100 | Batch 200/469 | Gen/Dis Loss: 0.9217/0.6269\nEpoch: 094/100 | Batch 300/469 | Gen/Dis Loss: 0.9183/0.6632\nEpoch: 094/100 | Batch 400/469 | Gen/Dis Loss: 0.8859/0.6128\nTime elapsed: 19.46 min\nEpoch: 095/100 | Batch 000/469 | Gen/Dis Loss: 0.9032/0.6331\nEpoch: 095/100 | Batch 100/469 | Gen/Dis Loss: 0.8298/0.6976\nEpoch: 095/100 | Batch 200/469 | Gen/Dis Loss: 1.0004/0.6347\nEpoch: 095/100 | Batch 300/469 | Gen/Dis Loss: 0.9161/0.6169\nEpoch: 095/100 | Batch 400/469 | Gen/Dis Loss: 0.7622/0.6884\nTime elapsed: 19.71 min\nEpoch: 096/100 | Batch 000/469 | Gen/Dis Loss: 0.8816/0.5997\nEpoch: 096/100 | Batch 100/469 | Gen/Dis Loss: 0.9499/0.5969\nEpoch: 096/100 | Batch 200/469 | Gen/Dis Loss: 0.8974/0.6214\nEpoch: 096/100 | Batch 300/469 | Gen/Dis Loss: 0.8853/0.6259\nEpoch: 096/100 | Batch 400/469 | Gen/Dis Loss: 0.8107/0.6027\nTime elapsed: 19.95 min\nEpoch: 097/100 | Batch 000/469 | Gen/Dis Loss: 0.9242/0.6189\nEpoch: 097/100 | Batch 100/469 | Gen/Dis Loss: 0.8917/0.6491\nEpoch: 097/100 | Batch 200/469 | Gen/Dis Loss: 0.8729/0.6375\nEpoch: 097/100 | Batch 300/469 | Gen/Dis Loss: 0.8848/0.5950\nEpoch: 097/100 | Batch 400/469 | Gen/Dis Loss: 0.8502/0.6296\nTime elapsed: 20.21 min\nEpoch: 098/100 | Batch 000/469 | Gen/Dis Loss: 0.9020/0.6453\nEpoch: 098/100 | Batch 100/469 | Gen/Dis Loss: 1.1077/0.5882\nEpoch: 098/100 | Batch 200/469 | Gen/Dis Loss: 0.9468/0.6364\nEpoch: 098/100 | Batch 300/469 | Gen/Dis Loss: 0.8636/0.6313\nEpoch: 098/100 | Batch 400/469 | Gen/Dis Loss: 0.9089/0.6911\nTime elapsed: 20.45 min\nEpoch: 099/100 | Batch 000/469 | Gen/Dis Loss: 0.9101/0.6386\nEpoch: 099/100 | Batch 100/469 | Gen/Dis Loss: 0.8036/0.6396\nEpoch: 099/100 | Batch 200/469 | Gen/Dis Loss: 0.9393/0.6060\nEpoch: 099/100 | Batch 300/469 | Gen/Dis Loss: 0.8776/0.6242\nEpoch: 099/100 | Batch 400/469 | Gen/Dis Loss: 0.8244/0.6278\nTime elapsed: 20.68 min\nEpoch: 100/100 | Batch 000/469 | Gen/Dis Loss: 0.8623/0.6496\nEpoch: 100/100 | Batch 100/469 | Gen/Dis Loss: 0.9965/0.5964\nEpoch: 100/100 | Batch 200/469 | Gen/Dis Loss: 0.8666/0.6306\nEpoch: 100/100 | Batch 300/469 | Gen/Dis Loss: 1.1555/0.6634\nEpoch: 100/100 | Batch 400/469 | Gen/Dis Loss: 0.9071/0.6545\nTime elapsed: 20.94 min\nTotal Training Time: 20.94 min\n"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"ax1 = plt.subplot(1, 1, 1)\nax1.plot(range(len(gener_costs)), gener_costs, label='Generator loss')\nax1.plot(range(len(discr_costs)), discr_costs, label='Discriminator loss')\nax1.set_xlabel('Iterations')\nax1.set_ylabel('Loss')\nax1.legend()\n\n###################\n# Set scond x-axis\nax2 = ax1.twiny()\nnewlabel = list(range(NUM_EPOCHS+1))\niter_per_epoch = len(train_loader)\nnewpos = [e*iter_per_epoch for e in newlabel]\n\nax2.set_xticklabels(newlabel[::10])\nax2.set_xticks(newpos[::10])\n\nax2.xaxis.set_ticks_position('bottom')\nax2.xaxis.set_label_position('bottom')\nax2.spines['bottom'].set_position(('outward', 45))\nax2.set_xlabel('Epochs')\nax2.set_xlim(ax1.get_xlim())\n###################\n\nplt.show()",
"_____no_output_____"
],
[
"##########################\n### VISUALIZATION\n##########################\n\n\nmodel.eval()\n# Make new images\nz = torch.zeros((5, LATENT_DIM)).uniform_(-1.0, 1.0).to(device)\ngenerated_features = model.generator_forward(z)\nimgs = generated_features.view(-1, 28, 28)\n\nfig, axes = plt.subplots(nrows=1, ncols=5, figsize=(20, 2.5))\n\n\nfor i, ax in enumerate(axes):\n axes[i].imshow(imgs[i].to(torch.device('cpu')).detach(), cmap='binary')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca8f5fc7bdaee5152961ddeb8566571cd3c5f15 | 5,493 | ipynb | Jupyter Notebook | text_file_information.ipynb | juhyunson/data_preprocessing | 3a94fbfa7b9b14e9b4fce8d78ef65861766b2768 | [
"MIT"
] | null | null | null | text_file_information.ipynb | juhyunson/data_preprocessing | 3a94fbfa7b9b14e9b4fce8d78ef65861766b2768 | [
"MIT"
] | null | null | null | text_file_information.ipynb | juhyunson/data_preprocessing | 3a94fbfa7b9b14e9b4fce8d78ef65861766b2768 | [
"MIT"
] | null | null | null | 26.665049 | 85 | 0.43965 | [
[
[
"### ํ
์คํธ ํ์ผ์ ์ ๋ณด ์ถ์ถ\n- ๋ฌธ์ฅ์\n- ๊ณต๋ฐฑ์\n- ๋จ์ด์-๊ณต๋ฐฑํฌํจ\n- ๋จ์ด์-๊ณต๋ฐฑ์ ์ธ\n- ๊ธ์์ \n\n### Get some information from the text file\n- The number of lines\n- The number of blanks\n- The number of words(including blanks)\n- The number of words\n- The numver of characters",
"_____no_output_____"
]
],
[
[
"import os \nimport sys \nimport pandas as pd",
"_____no_output_____"
],
[
"def text_data_information(file):\n count = 0\n spaces = 0 \n char_count = 0 \n word_count=0 \n word_count_without = 0\n\n for line_count, line in enumerate(file): \n count = line_count + 1\n\n spaces += line.count(' ') \n\n word_count += len(line.split(\" \"))\n\n split_line=line.split(\" \")\n without= [word for word in split_line if word!='\\n']\n word_count_without+=len((without))\n\n char_count += line.__len__() \n\n df=pd.DataFrame({'๋ฌธ์ฅ์':count, '๊ณต๋ฐฑ์':spaces, '๋จ์ด์-๊ณต๋ฐฑํฌํจ': word_count, \\\n '๋จ์ด์-๊ณต๋ฐฑ์ ์ธ':word_count_without, '๊ธ์์': char_count},\n index=['{}'.format(file.name[7:])])\n \n return df",
"_____no_output_____"
]
],
[
[
"#### ํด๋ ```data``` ์์ ๋ชจ๋ ํ์ผ์ ์ ์ฉ\n- ๋ชจ๋ ํ์ผ์ ์ ๋ณด ์ถ์ถ\n- ์ถ์ถํ ์ ๋ณด๋ ์์
ํ์ผ๋ก ์ ์ฅ\n\n#### Apply all files in folder ```data```\n- Get some information of all files from the folder\n- Save the information as an excel file at the folder ",
"_____no_output_____"
]
],
[
[
"listdir=os.listdir('./data/')\nall_information=pd.DataFrame()\ninformation_list=[]\n\nfor list in listdir[:3]:\n file = open(\"./data/{}\".format(list)) \n information=text_data_information(file)\n information_list.append(information)\n\nall_information=pd.concat(information_list,ignore_index=False)\nall_information.to_excel('./test_data_information.xlsx')",
"_____no_output_____"
],
[
"a=pd.read_excel('./test_data_information.xlsx')\na.head(2)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca909d353bc08e4c87ad5969c2cf702695a0952 | 16,805 | ipynb | Jupyter Notebook | how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb | 1789291/MachineLearningNotebooks | 700ab2d78264aa09f868a8379bf0ad514b17a34d | [
"MIT"
] | 3 | 2021-02-04T00:22:23.000Z | 2021-02-11T05:16:34.000Z | how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb | swanderz/MachineLearningNotebooks | aab15bc7a8302d3f62370d3939bdedd1af96adb0 | [
"MIT"
] | null | null | null | how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb | swanderz/MachineLearningNotebooks | aab15bc7a8302d3f62370d3939bdedd1af96adb0 | [
"MIT"
] | 3 | 2020-02-06T22:37:35.000Z | 2021-02-11T22:06:24.000Z | 40.011905 | 429 | 0.592681 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# Automated Machine Learning\n_**Classification of credit card fraudulent transactions on local managed compute **_\n\n## Contents\n1. [Introduction](#Introduction)\n1. [Setup](#Setup)\n1. [Train](#Train)\n1. [Results](#Results)\n1. [Test](#Test)\n1. [Acknowledgements](#Acknowledgements)",
"_____no_output_____"
],
[
"## Introduction\n\nIn this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge.\n\nThis notebook is using local managed compute to train the model.\n\nIf you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. \n\nIn this notebook you will learn how to:\n1. Create an experiment using an existing workspace.\n2. Configure AutoML using `AutoMLConfig`.\n3. Train the model using local managed compute.\n4. Explore the results.\n5. Test the fitted model.",
"_____no_output_____"
],
[
"## Setup\n\nAs part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.",
"_____no_output_____"
]
],
[
[
"import logging\n\nimport pandas as pd\n\nimport azureml.core\nfrom azureml.core.compute_target import LocalTarget\nfrom azureml.core.experiment import Experiment\nfrom azureml.core.workspace import Workspace\nfrom azureml.core.dataset import Dataset\nfrom azureml.train.automl import AutoMLConfig",
"_____no_output_____"
]
],
[
[
"This sample notebook may use features that are not available in previous versions of the Azure ML SDK.",
"_____no_output_____"
]
],
[
[
"print(\"This notebook was created using version 1.29.0 of the Azure ML SDK\")\nprint(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")",
"_____no_output_____"
],
[
"ws = Workspace.from_config()\n\n# choose a name for experiment\nexperiment_name = 'automl-local-managed'\n\nexperiment=Experiment(ws, experiment_name)\n\noutput = {}\noutput['Subscription ID'] = ws.subscription_id\noutput['Workspace'] = ws.name\noutput['Resource Group'] = ws.resource_group\noutput['Location'] = ws.location\noutput['Experiment Name'] = experiment.name\npd.set_option('display.max_colwidth', -1)\noutputDf = pd.DataFrame(data = output, index = [''])\noutputDf.T",
"_____no_output_____"
]
],
[
[
"### Determine if local docker is configured for Linux images\n\nLocal managed runs will leverage a Linux docker container to submit the run to. Due to this, the docker needs to be configured to use Linux containers.",
"_____no_output_____"
]
],
[
[
"# Check if Docker is installed and Linux containers are enabled\nimport subprocess\nfrom subprocess import CalledProcessError\ntry:\n assert subprocess.run(\"docker -v\", shell=True).returncode == 0, 'Local Managed runs require docker to be installed.'\n out = subprocess.check_output(\"docker system info\", shell=True).decode('ascii')\n assert \"OSType: linux\" in out, 'Docker engine needs to be configured to use Linux containers.' \\\n 'https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers'\nexcept CalledProcessError as ex:\n raise Exception('Local Managed runs require docker to be installed.') from ex",
"_____no_output_____"
]
],
[
[
"# Data",
"_____no_output_____"
],
[
"### Load Data\n\nLoad the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model.",
"_____no_output_____"
]
],
[
[
"data = \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv\"\ndataset = Dataset.Tabular.from_delimited_files(data)\ntraining_data, validation_data = dataset.random_split(percentage=0.8, seed=223)\nlabel_column_name = 'Class'",
"_____no_output_____"
]
],
[
[
"## Train\n\nInstantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.\n\n|Property|Description|\n|-|-|\n|**task**|classification or regression|\n|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|\n|**enable_early_stopping**|Stop the run if the metric score is not showing improvement.|\n|**n_cross_validations**|Number of cross validation splits.|\n|**training_data**|Input dataset, containing both features and label column.|\n|**label_column_name**|The name of the label column.|\n|**enable_local_managed**|Enable the experimental local-managed scenario.|\n\n**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)",
"_____no_output_____"
]
],
[
[
"automl_settings = {\n \"n_cross_validations\": 3,\n \"primary_metric\": 'average_precision_score_weighted',\n \"enable_early_stopping\": True,\n \"experiment_timeout_hours\": 0.3, #for real scenarios we recommend a timeout of at least one hour \n \"verbosity\": logging.INFO,\n}\n\nautoml_config = AutoMLConfig(task = 'classification',\n debug_log = 'automl_errors.log',\n compute_target = LocalTarget(),\n enable_local_managed = True,\n training_data = training_data,\n label_column_name = label_column_name,\n **automl_settings\n )",
"_____no_output_____"
]
],
[
[
"Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.",
"_____no_output_____"
]
],
[
[
"parent_run = experiment.submit(automl_config, show_output = True)",
"_____no_output_____"
],
[
"# If you need to retrieve a run that already started, use the following code\n#from azureml.train.automl.run import AutoMLRun\n#parent_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')",
"_____no_output_____"
],
[
"parent_run",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
],
[
"#### Explain model\n\nAutomated ML models can be explained and visualized using the SDK Explainability library. ",
"_____no_output_____"
],
[
"## Analyze results\n\n### Retrieve the Best Child Run\n\nBelow we select the best pipeline from our iterations. The `get_best_child` method returns the best run. Overloads on `get_best_child` allow you to retrieve the best run for *any* logged metric.",
"_____no_output_____"
]
],
[
[
"best_run = parent_run.get_best_child()\n",
"_____no_output_____"
]
],
[
[
"## Test the fitted model\n\nNow that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values.",
"_____no_output_____"
]
],
[
[
"X_test_df = validation_data.drop_columns(columns=[label_column_name])\ny_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True)",
"_____no_output_____"
]
],
[
[
"#### Creating ModelProxy for submitting prediction runs to the training environment.\nWe will create a ModelProxy for the best child run, which will allow us to submit a run that does the prediction in the training environment. Unlike the local client, which can have different versions of some libraries, the training environment will have all the compatible libraries for the model already.",
"_____no_output_____"
]
],
[
[
"from azureml.train.automl.model_proxy import ModelProxy\nbest_model_proxy = ModelProxy(best_run)",
"_____no_output_____"
],
[
"# call the predict functions on the model proxy\ny_pred = best_model_proxy.predict(X_test_df).to_pandas_dataframe()\ny_pred",
"_____no_output_____"
]
],
[
[
"## Acknowledgements",
"_____no_output_____"
],
[
"This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud\n\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universitรฦรยฉ Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project\nPlease cite the following works: \nรยขรขโยฌรยข\tAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\nรยขรขโยฌรยข\tDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\nรยขรขโยฌรยข\tDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\no\tDal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\nรยขรขโยฌรยข\tCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aรฦรยซl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\nรยขรขโยฌรยข\tCarcillo, Fabrizio; Le Borgne, Yann-Aรฦรยซl; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
eca90caa23ec75259444f0254a2eaed85b127ccf | 11,119 | ipynb | Jupyter Notebook | esppy/airport_realtime_detection.ipynb | Mentos05/SAS_AskTheExpert | 623d10f1822e2fbf398dc7c2af1c6abb5d832668 | [
"Apache-2.0"
] | null | null | null | esppy/airport_realtime_detection.ipynb | Mentos05/SAS_AskTheExpert | 623d10f1822e2fbf398dc7c2af1c6abb5d832668 | [
"Apache-2.0"
] | null | null | null | esppy/airport_realtime_detection.ipynb | Mentos05/SAS_AskTheExpert | 623d10f1822e2fbf398dc7c2af1c6abb5d832668 | [
"Apache-2.0"
] | null | null | null | 34.317901 | 147 | 0.500585 | [
[
[
"## Import SAS & Open Source Packages\nPostprocessing done in SAS Micro Analytics Services.",
"_____no_output_____"
]
],
[
[
"# Import Open Source packages\nimport threading\nimport time\nimport websocket\nimport json\nimport numpy as np\nimport base64\nimport cv2\n# Import SAS Packages\nimport esppy\n# Import helper\nfrom helper.helpers import create_scoring_schema",
"_____no_output_____"
]
],
[
[
"### Connect to SAS Event Stream Processing",
"_____no_output_____"
]
],
[
[
"esp = esppy.ESP(hostname='http://localhost:9900') # Connect to SAS ESP\nesp_project = esp.create_project('object_detection', n_threads=10) # Create a SAS ESP project\nesp_project.add_continuous_query('contquery') # Add a Query to project",
"_____no_output_____"
]
],
[
[
"### Add a Source Window",
"_____no_output_____"
]
],
[
[
"# Window: Video Capture\nvid_capture = esp.SourceWindow(\n autogen_key = True, # create key automatically\n schema = ('id*:int64', 'image:blob'), # window schema (columns)\n index_type = 'empty', # window index type\n insert_only = True, # window accepts inserts only\n pubsub = True # window can be used for publishing data\n)\n\nesp_project.windows['w_input_image'] = vid_capture # add source window to project\nesp_project",
"_____no_output_____"
]
],
[
[
"### Add a Resize Window",
"_____no_output_____"
]
],
[
[
"# Window: Video Resize\nvid_capture_resize = esp.CalculateWindow(\n schema = ('id*:int64','image:blob','_image_:blob'), # window schema\n algorithm = 'ImageProcessing', # algorithm to be used in calculation window\n function = 'resize', # function to use\n height = 416, # resize to height = 416 pixels\n width = 416, # resize to width = 416 pixels\n input_map = dict(imageInput='image'), # define column of the input image\n output_map = dict(imageOutput='_image_') # define column of the resized image\n)\n\nesp_project.windows['w_resize_image'] = vid_capture_resize # add resize window to project\n\nvid_capture.add_target(vid_capture_resize, role='data') # connect source window to resize window\nesp_project",
"_____no_output_____"
]
],
[
[
"### Add Model Request and Model Reader Windows",
"_____no_output_____"
]
],
[
[
"# Window: Model Reader\nmodel_reader = esp.ModelReaderWindow()\n\nesp_project.windows['w_read_model'] = model_reader # add window to project\n\n# Window: Model Request\nmodel_request = esp.SourceWindow(\n schema = ('req_id*:int64', 'req_key:string', 'req_val:string'), # window schema\n index_type = 'empty', # window index type\n insert_only = True # window accepts inserts only\n)\n\nesp_project.windows['w_request_model'] = model_request # add window to project\nmodel_request.add_target(model_reader, role='request') # connect request window to reader window\nesp_project",
"_____no_output_____"
]
],
[
[
"### Create a Scoring Window",
"_____no_output_____"
]
],
[
[
"# Window: Model Score\nmodel_score = esp.ScoreWindow(\n schema=create_scoring_schema(number_objects=20), # window schema created programmatically\n)\nmodel_score.add_offline_model(\n model_type='astore' # window receives an offline model (model is not trained during stream)\n)\nesp_project.windows['w_score_image'] = model_score # add window to project\nmodel_reader.add_target(model_score, role='model') # connect model window to score window\nvid_capture_resize.add_target(model_score, role='data') # connect resize window to score window\nesp_project",
"_____no_output_____"
]
],
[
[
"### Create an Annotation Window",
"_____no_output_____"
]
],
[
[
"annotator = esp.ProceduralWindow(\n schema=(create_scoring_schema(number_objects=20)+',image_annotated:blob'), # window schema created programmatically\n pubsub=True # window can be used for subscribing data\n)\n# Use annotator plugin to visualize bounding boxes\nannotator.add_cxx_plugin(source='astore', \n name='annotator_plugin', \n function='annotateImage')\nannotator.set_cxx_plugin_context(cxx_name=\"annotator_plugin\", \n cxx_function=\"initAnnotator\", \n coord_source='astore', \n coord_type='yolo', \n in_image_field='image', \n out_image_field='image_annotated', \n tracker_prefix='Object', \n frame_number_field='id', \n scale_x='1280.0', \n scale_y='720.0', \n offset_x='1.0', \n offset_y='1.0', \n out_format='jpg', \n show_text='true')\nannotator.set_finalized_callback(name='annotator_plugin', \n function='finalizeAnnotator')\nesp_project.windows['w_annotator'] = annotator\nmodel_score.add_target(annotator, role='data') # connect scoring window with annotator window\nesp_project",
"_____no_output_____"
]
],
[
[
"### Load the project",
"_____no_output_____"
]
],
[
[
"esp.load_project(esp_project)",
"_____no_output_____"
]
],
[
[
"### Publish the model into the ESP project",
"_____no_output_____"
]
],
[
[
"# Defines a simple CSV publisher\npub = model_request.create_publisher(\n blocksize=1, \n rate=0, \n pause=0, \n opcode='insert', \n format='csv'\n)\npub.send('i,n,1,\"usegpuesp\",\"1\"\\n') # Enable GPU usage\npub.send('i,n,2,\"ndevices\",\"1\"\\n') # Define number of used GPUs\npub.send('i,n,3,\"action\",\"load\"\\n') # Call load action\npub.send('i,n,4,\"type\",\"astore\"\\n') # model type is astore\npub.send('i,n,5,\"reference\",\"/data/notebooks/esppy/git_ignore/Tiny-Yolov2.astore\"\\n') # path to astore file\npub.send('i,n,6,,\\n')\npub.close()",
"_____no_output_____"
]
],
[
[
"## Define Video-Subscriber & -Publisher",
"_____no_output_____"
]
],
[
[
"def on_message(_, message):\n try:\n data = json.loads(message)\n imageBufferBase64 = data['events'][0]['event']['image_annotated']['image_annotated']\n nparr = np.frombuffer(base64.b64decode(imageBufferBase64), dtype=np.uint8)\n frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit()\n except Exception as e:\n print(e)\n \nannotator_subscriber = annotator.create_subscriber(format='json', mode='streaming', pagesize=1, on_message=on_message)\nannotator_subscriber.start()",
"_____no_output_____"
],
[
"video_publisher = vid_capture.create_publisher(format='csv', opcode='insert', pause=0, separator=',')\nvideo_file = 'git_ignore/turnaround.mp4'\nvideo_fps = 25\ncap = cv2.VideoCapture(video_file)\nprev = 0\nwhile True:\n time_elapsed = time.time() - prev\n if time_elapsed > 1./video_fps:\n prev = time.time()\n ret, frame = cap.read()\n frame = cv2.resize(frame, (1280, 720))\n _, buffer = cv2.imencode('.jpg', frame)\n encoded_string = base64.b64encode(buffer)\n strToSend = 'i, n, ' + str(10) + ',' + encoded_string.decode() + ',' + '\\n'\n video_publisher.send(strToSend)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
eca914aacf05e6cc7074d566907b212bded1a1e9 | 140,555 | ipynb | Jupyter Notebook | dataset/ctwdataset/CTW_data_munger.ipynb | jinmingteo/mmocr | 10c49cca6b010dd92b683ccc8aa4923598c882e0 | [
"Apache-2.0"
] | null | null | null | dataset/ctwdataset/CTW_data_munger.ipynb | jinmingteo/mmocr | 10c49cca6b010dd92b683ccc8aa4923598c882e0 | [
"Apache-2.0"
] | null | null | null | dataset/ctwdataset/CTW_data_munger.ipynb | jinmingteo/mmocr | 10c49cca6b010dd92b683ccc8aa4923598c882e0 | [
"Apache-2.0"
] | null | null | null | 93.891116 | 81,880 | 0.775647 | [
[
[
"import json\nimport mmcv\nimport matplotlib.pyplot as plt\nfrom mmocr.datasets.pipelines.crop import crop_img\n\njsonl_file = ['train.jsonl', 'val.jsonl', 'test_cls.jsonl']",
"_____no_output_____"
]
],
[
[
"## Convert to LineStrParse (Recognition)",
"_____no_output_____"
]
],
[
[
"def line_str_parse_file(input_file):\n new_dir = f\"line_str_parse/{input_file.replace('.jsonl', '')}\"\n img_dir = new_dir + '/images'\n mmcv.mkdir_or_exist(new_dir)\n mmcv.mkdir_or_exist(img_dir)\n f = open(input_file)\n gt_list = []\n for inputz in f:\n line = json.loads(inputz)\n img = mmcv.imread(f'../images/{line[\"file_name\"]}')\n num_of_label = 0\n for _ in line['annotations']:\n for ann in _:\n poly, text = ann['polygon'], ann['text']\n compressed_poly = []\n for item in poly:\n compressed_poly.append(item[0])\n compressed_poly.append(item[1])\n out = crop_img(img, compressed_poly)\n cropped_file_name = f'{line[\"file_name\"].strip(\".jpg\")}_{num_of_label}.jpg'\n mmcv.imwrite(out, f\"{img_dir}/{cropped_file_name}\")\n num_of_label += 1\n gt_list.append(f'{cropped_file_name} {text}' + \"\\n\")\n\n f = open(f'{new_dir}/labels.txt', 'w')\n f.writelines(gt_list)\n f.close()\n print ('done')\n return\n\nline_str_parse_file(jsonl_file[0]) ",
"done\n"
],
[
"# testing of the label\nlabel_f = open('line_str_parse/val/label.txt')\n#label_f = open('/mmocr/tests/data/ocr_toy_dataset/label.txt')\nfor line in label_f:\n x = line",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"sample_img = mmcv.imread('line_str_parse/val/images/3045638_11.jpg')\nplt.imshow(mmcv.bgr2rgb(sample_img))",
"_____no_output_____"
],
[
"import json\nfrom collections import defaultdict\nf = open('val.jsonl')\nattribute_counter = defaultdict(lambda: 0)\nann_counter = 0\nfor inputz in f:\n line = json.loads(inputz)\n for _ in line['annotations']:\n for ann in _:\n ann_counter += 1\n for item in ann['attributes']:\n attribute_counter[item] +=1",
"_____no_output_____"
],
[
"print (f\"attributes of annotations: {dict(attribute_counter)}\")\nprint (f\"total annotations: {ann_counter}\")",
"attributes of annotations: {'distorted': 16115, 'raised': 15741, 'occluded': 8250, 'wordart': 4587, 'bgcomplex': 12454, 'handwritten': 192}\ntotal annotations: 53884\n"
]
],
[
[
"## Convert to LineJsonParser (Detection)",
"_____no_output_____"
]
],
[
[
"def line_json_parse_file(input_file):\n new_dir = f\"line_json_parse/{input_file.replace('.jsonl', '')}\"\n mmcv.mkdir_or_exist(new_dir)\n f = open(input_file)\n gt_lines = []\n for inputz in f:\n line = json.loads(inputz)\n merged_annotations = []\n for _ in line['annotations']:\n for ann in _:\n compressed_poly = []\n for item in ann['polygon']:\n compressed_poly.append(item[0])\n compressed_poly.append(item[1])\n ann['segmentation'] = [compressed_poly]\n ann['category_id'] = 1\n ann['bbox'] = ann['adjusted_bbox']\n merged_annotations.append(ann)\n line['annotations'] = merged_annotations\n gt_lines.append(json.dumps(line)+'\\n')\n \n f = open(f'{new_dir}/labels.txt', 'w')\n f.writelines(gt_lines)\n f.close()\n print ('done')\n return\n\nline_json_parse_file(jsonl_file[0]) ",
"done\n"
],
[
"f = open('line_json_parse/train/labels.txt')\nfor item in f:\n x = json.loads(item)\n break",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
eca91e18c55ba7b9ac2f527cd5bc43cfaa7c67e4 | 547,612 | ipynb | Jupyter Notebook | Microsoft Malware Prediction/code/eda/01-5 EDA - Clustering Feature - AV products.ipynb | choco9966/kaggle | 253c089625c67f34dc8868d97842ecf9a479d617 | [
"MIT"
] | 36 | 2019-12-26T13:07:44.000Z | 2022-03-27T09:59:19.000Z | Microsoft Malware Prediction/code/eda/01-5 EDA - Clustering Feature - AV products.ipynb | min0355/Kaggle | f4a3b931e72c65cf398afb66997f9e155a52028e | [
"MIT"
] | null | null | null | Microsoft Malware Prediction/code/eda/01-5 EDA - Clustering Feature - AV products.ipynb | min0355/Kaggle | f4a3b931e72c65cf398afb66997f9e155a52028e | [
"MIT"
] | 8 | 2020-04-15T10:26:11.000Z | 2021-04-05T11:27:54.000Z | 40.129855 | 14,380 | 0.280666 | [
[
[
"# EDA - Clustering Feature - AV products\n\n* ๋ฐฑ์ ์ ํ์ ๋ํด ๋ถ์ํ ์์ !",
"_____no_output_____"
],
[
"## ๋ผ์ด๋ธ๋ฌ๋ฆฌ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport warnings\nimport gc\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"pd.set_option('max_rows', 500)\npd.set_option('max_colwidth', 500)\npd.set_option('max_columns', 500)",
"_____no_output_____"
]
],
[
[
"## ๋ฐ์ดํฐ ๋ก๋",
"_____no_output_____"
]
],
[
[
"dtypes = {\n 'MachineIdentifier': 'object',\n 'ProductName': 'category',\n 'EngineVersion': 'category',\n 'AppVersion': 'category',\n 'AvSigVersion': 'category',\n 'IsBeta': 'int8',\n 'RtpStateBitfield': 'float16',\n 'IsSxsPassiveMode': 'int8',\n 'DefaultBrowsersIdentifier': 'float16',\n 'AVProductStatesIdentifier': 'float32',\n 'AVProductsInstalled': 'float16',\n 'AVProductsEnabled': 'float16',\n 'HasTpm': 'int8',\n 'CountryIdentifier': 'int16',\n 'CityIdentifier': 'float32',\n 'OrganizationIdentifier': 'float16',\n 'GeoNameIdentifier': 'float16',\n 'LocaleEnglishNameIdentifier': 'int8',\n 'Platform': 'category',\n 'Processor': 'category',\n 'OsVer': 'category',\n 'OsBuild': 'int16',\n 'OsSuite': 'int16',\n 'OsPlatformSubRelease': 'category',\n 'OsBuildLab': 'category',\n 'SkuEdition': 'category',\n 'IsProtected': 'float16',\n 'AutoSampleOptIn': 'int8',\n 'PuaMode': 'category',\n 'SMode': 'float16',\n 'IeVerIdentifier': 'float16',\n 'SmartScreen': 'category',\n 'Firewall': 'float16',\n 'UacLuaenable': 'float32',\n 'Census_MDC2FormFactor': 'category',\n 'Census_DeviceFamily': 'category',\n 'Census_OEMNameIdentifier': 'float16',\n 'Census_OEMModelIdentifier': 'float32',\n 'Census_ProcessorCoreCount': 'float16',\n 'Census_ProcessorManufacturerIdentifier': 'float16',\n 'Census_ProcessorModelIdentifier': 'float16',\n 'Census_ProcessorClass': 'category',\n 'Census_PrimaryDiskTotalCapacity': 'float32',\n 'Census_PrimaryDiskTypeName': 'category',\n 'Census_SystemVolumeTotalCapacity': 'float32',\n 'Census_HasOpticalDiskDrive': 'int8',\n 'Census_TotalPhysicalRAM': 'float32',\n 'Census_ChassisTypeName': 'category',\n 'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',\n 'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',\n 'Census_InternalPrimaryDisplayResolutionVertical': 'float16',\n 'Census_PowerPlatformRoleName': 'category',\n 'Census_InternalBatteryType': 'category',\n 'Census_InternalBatteryNumberOfCharges': 'float32',\n 'Census_OSVersion': 'category',\n 'Census_OSArchitecture': 'category',\n 'Census_OSBranch': 'category',\n 'Census_OSBuildNumber': 'int16',\n 'Census_OSBuildRevision': 'int32',\n 'Census_OSEdition': 'category',\n 'Census_OSSkuName': 'category',\n 'Census_OSInstallTypeName': 'category',\n 'Census_OSInstallLanguageIdentifier': 'float16',\n 'Census_OSUILocaleIdentifier': 'int16',\n 'Census_OSWUAutoUpdateOptionsName': 'category',\n 'Census_IsPortableOperatingSystem': 'int8',\n 'Census_GenuineStateName': 'category',\n 'Census_ActivationChannel': 'category',\n 'Census_IsFlightingInternal': 'float16',\n 'Census_IsFlightsDisabled': 'float16',\n 'Census_FlightRing': 'category',\n 'Census_ThresholdOptIn': 'float16',\n 'Census_FirmwareManufacturerIdentifier': 'float16',\n 'Census_FirmwareVersionIdentifier': 'float32',\n 'Census_IsSecureBootEnabled': 'int8',\n 'Census_IsWIMBootEnabled': 'float16',\n 'Census_IsVirtualDevice': 'float16',\n 'Census_IsTouchEnabled': 'int8',\n 'Census_IsPenCapable': 'int8',\n 'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',\n 'Wdft_IsGamer': 'float16',\n 'Wdft_RegionIdentifier': 'float16',\n 'HasDetections': 'int8'\n }",
"_____no_output_____"
],
[
"%%time\ntrain = pd.read_csv('./data/train.csv', dtype=dtypes)\ntest = pd.read_csv('./data/test.csv', dtype=dtypes)",
"Wall time: 7min 48s\n"
],
[
"data = train.append(test)",
"_____no_output_____"
]
],
[
[
"## AV ๊ด๋ จ ํผ์ฒ",
"_____no_output_____"
],
[
"* ProductName\n* EngineVersion\n* AppVersion\n* AvSigVersion\n* AVProductStatesIdentifier\n* AVProductsInstalled\n* AVProductsEnabled\n* Platform\n* OsVer\n* OsBuild\n* OsSuite\n* OsPlatformSubRelease\n* OsBuildLab\n* IsProtected\n* Census_OSVersion\n* Census_OSArchitecture (์ญ์ )\n* Census_OSBranch (์ญ์ )\n* Census_OSBuildNumber\n* Census_OSBuildRevision",
"_____no_output_____"
],
[
"### AVProductStatesIdentifier\n* ID for the specific configuration of a user's antivirus software (์ฌ์ฉ์์ ๋ฐฑ์ ์ ํน์ ์ค์ ์ ๋ํ ๊ณ ์ ๊ฐ)\n* ๋ฐฑ์ ์์ฒด์ ๋ํ ID๋ ์๋ ๊ฒ์ผ๋ก ์ถ์ (๊ณ ์ ๊ฐ์๊ฐ 39832๊ฐ, ๋ฐ๋ฉด ๋ฐ์ด๋ฌ์ค ํ ํ์ ํตํด ํ์ธํ ์ ์๋ ๋ฐฑ์ ์ ๊ฐ์๋ ์ฝ 130 ๋ด์ธ)\n* ๊ทธ๋ ๋ค๋ฉด ๋ฐฑ์ ์ ํน์ ๋ฒ์ ๊น์ง ID๋ก ์นํํ์ง ์์์๊น?\n* ์ฌ๊ธฐ์ ์๊ฐํด๋ณผ์ ์ด ์๋์ฐ ๋ํ๋๋ฅผ AV ์ ํ์ผ๋ก ํฌํจํ์๊น? (์์์ ์ผ๋ก ์๋์ฐ ๋ํ๋์๋ AV ๊ธฐ๋ฅ์ด ์๊ธฐ ๋๋ฌธ์ ํฌํจํ๋ ๊ฒ์ด ๋ง์)\n* ๋๋ ํฌํจํ๋ค๊ณ ์๊ฐ\n > ๊ทผ๊ฑฐ1) \n - AVProductStatesIdentifier์ ๊ณ ์ ๊ฐ ๊ฐ์๋ฅผ ๋ณด๋ฉด 53447๊ฐ์ด 67%์ ๋ ์ฐจ์ง.\n - MS๊ฐ ์์งํ ๊ธฐ๊ณ๊ฐ ํน์ ํ์ฌ์ ๋ฐฑ์ ์ ์๋์ ์ผ๋ก ๋ง์ด ์ฌ์ฉํ ๊ฒ์ผ๋ก ์๊ฐํ๊ธฐ ์ด๋ ค์.\n - ๋, 53447์ธ์ ๋๋จธ์ง ๋ฐฑ์ ์ ๊ฐ์๊ฐ ํฌ๊ฒ ๋ค๋ฅด์ง ์์.\n - ๋ฐ๋ผ์, 53447๋ ๋ํํธ ๋ฐฑ์ (์๋์ฐ ๋ํ๋)๋ผ๊ณ ์๊ฐํ ์ ์์.\n\n* 29199 mse์ผ ํ๋ฅ ์ด ๋๋ค.\n* ์ด ์ปฌ๋ผ์ ๋ฐฑ์ ์ ์กฐํฉ๋ณ ๊ณ ์ ํ ์ธ๋ฑ์ค๋ฅผ ๊ฐ๋๋ค. (๋ํ๋ + ์นด์คํผ์คํค => ๊ณ ์ ์ธ๋ฑ์ค)",
"_____no_output_____"
]
],
[
[
"print(\"๊ณ ์ ๊ฐ์: \", len(data.AVProductStatesIdentifier.unique()))",
"๊ณ ์ ๊ฐ์: 39832\n"
],
[
"col = 'AVProductStatesIdentifier'\ntemp = data[col].value_counts(dropna=False).to_frame()\ntemp['rate'] = np.around(100 * data[col].value_counts(dropna=False) / data.shape[0], 2)\ntemp.sort_values('AVProductStatesIdentifier', ascending=False)",
"_____no_output_____"
],
[
"temp = temp.sort_values('AVProductStatesIdentifier', ascending=False)",
"_____no_output_____"
],
[
"idx = temp[temp.AVProductStatesIdentifier >= 1000]['AVProductStatesIdentifier'].index\nvalues = temp[temp.AVProductStatesIdentifier >= 1000]['AVProductStatesIdentifier'].values",
"_____no_output_____"
],
[
"t = {}\nfor i in range(len(list(idx))):\n t[idx[i]] = idx[i]",
"_____no_output_____"
],
[
"data.AVProductStatesIdentifier = data.AVProductStatesIdentifier.map(t).fillna(-1)",
"_____no_output_____"
]
],
[
[
"### AVProductsInstalled\n* ์ํฐ๋ฐ์ด๋ฌ์ค ์ ํ์ด ์ค์น๋๋๊ฐ?\n* ์ซ์๋ก ์ ๊ณต => ์ค์น๋ ๊ฐ์๊ฐ ์๋๊น?\n* Installed๊ฐ 1์ด๋ผ๋ ๊ฒ์ 1๊ฐ์ ๋ฐฑ์ ์ด ์ค์น ๋ผ ์๋ค. ์ฌ๊ธฐ์ ์๊ฐํด๋ณผ ์ ์ ์ด๊ฒ ์๋์ฐ ๋ํ๋๋? ์ฆ, ์๋์ฐ ๋ํ๋๊ฐ ์๋ ์ํ๋ก ๋ค๋ฅธ ๋ฐฑ์ ์ ์ค์นํ๋ฉด ๋ช๊ฐ๋ก ์กํ๊น?\n* 3๊ฐ์ ์ปฌ๋ผ ๋ชจ๋ NaN์ ๋น์จ์ด ๊ฐ์. ์ฆ ๋์ผํ NaN Value",
"_____no_output_____"
]
],
[
[
"col = 'AVProductsInstalled'\ntemp = data[col].value_counts(dropna=False).to_frame()\ntemp['rate'] = np.around(100 * data[col].value_counts(dropna=False) / data.shape[0], 2)\ntemp.sort_values('AVProductsInstalled', ascending=False)",
"_____no_output_____"
],
[
"data[data.AVProductsInstalled == 1].groupby(['AVProductStatesIdentifier', 'AVProductsInstalled']).size()",
"_____no_output_____"
],
[
"data[data.AVProductsInstalled == 1].groupby(['ProductName', 'AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"data[data.HasDetections.isna()].index",
"_____no_output_____"
],
[
"data.HasDetections.value_counts(dropna=False)",
"_____no_output_____"
],
[
"train[train.AVProductsInstalled == 1].groupby(['AVProductStatesIdentifier', 'AVProductsInstalled']).size()",
"_____no_output_____"
],
[
"data['is_train'] = 1",
"_____no_output_____"
],
[
"data.loc[data.HasDetections.isna(), 'is_train'] = 0",
"_____no_output_____"
],
[
"train = data[data.is_train == 1]\ntest = data[data.is_train == 0]",
"_____no_output_____"
],
[
"values[1]",
"_____no_output_____"
],
[
"train[train.AVProductsInstalled.isna()].HasDetections.value_counts() / (22588 + 13633)",
"_____no_output_____"
]
],
[
[
"### AVProductsEnabled",
"_____no_output_____"
]
],
[
[
"col = 'AVProductsEnabled'\ntemp = data[col].value_counts(dropna=False).to_frame()\ntemp['rate'] = np.around(100 * data[col].value_counts(dropna=False) / data.shape[0], 2)\ntemp.sort_values('AVProductsEnabled', ascending=False)",
"_____no_output_____"
],
[
"data.ProductName.value_counts(dropna=False)",
"_____no_output_____"
],
[
"data[data.AVProductsEnabled == 0].AVProductStatesIdentifier.value_counts()",
"_____no_output_____"
],
[
"train.groupby(['AVProductsInstalled', 'AVProductsEnabled']).HasDetections.sum()",
"_____no_output_____"
],
[
"train.groupby(['AVProductsEnabled', 'AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"data.groupby(['AVProductsEnabled', 'IsProtected']).size()",
"_____no_output_____"
],
[
"data.groupby(['AVProductsEnabled', 'AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"data.groupby(['AVProductsInstalled', 'IsProtected']).size()",
"_____no_output_____"
],
[
"tt = data[data.AVProductsInstalled == 2].groupby(['AVProductStatesIdentifier'])['CountryIdentifier'].unique()\nttt = data[data.AVProductsInstalled == 2].groupby(['AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"pd.DataFrame({\"t\": tt.apply(lambda v: len(v)), \"tt\":(tt.apply(lambda v: len(v)) / ttt) * 100, \"ttt\":ttt})",
"_____no_output_____"
],
[
"col = 'AVProductStatesIdentifier'\ntable = train.groupby(col)['HasDetections'].sum().to_frame()\ntable['cnt'] = train.groupby(col)['HasDetections'].count()\ntable['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)\ntable.sort_values('HasDetections', ascending=False)",
"_____no_output_____"
],
[
"a = data.groupby(['IsProtected', 'AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"tt = set(a.loc[0].index)",
"_____no_output_____"
],
[
"ttt = set(a.loc[1].index)",
"_____no_output_____"
],
[
"tt.intersection(ttt)",
"_____no_output_____"
],
[
"a = a.to_frame()",
"_____no_output_____"
],
[
"a.apply(lambda v: v[0],axis=1)#np.sum(train[train.AVProductStatesIdentifier == v[0]].HasDetections) / len(train[train.AVProductStatesIdentifier == v[0]].HasDetections), axis=1)",
"_____no_output_____"
],
[
"a = data.groupby(['IsProtected', 'AVProductsInstalled', 'AVProductsEnabled', 'AVProductStatesIdentifier']).size()",
"_____no_output_____"
],
[
"a = a.to_frame()",
"_____no_output_____"
],
[
"a.columns.tolist()",
"_____no_output_____"
],
[
"for v in list(a.index.get_level_values(3)):\n result.append(np.sum(train[train.AVProductStatesIdentifier == v].HasDetections) / len(train[train.AVProductStatesIdentifier == v].HasDetections))",
"_____no_output_____"
],
[
"a['rate'] = result",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"data[data.AVProductStatesIdentifier != 53447.0].HasDetections.value_counts()",
"_____no_output_____"
],
[
"a.loc[0,1]",
"_____no_output_____"
],
[
"result = []",
"_____no_output_____"
],
[
"t = list(a.loc[0].index) + list(a.loc[1].index)",
"_____no_output_____"
],
[
"for v in t:\n result.append(np.sum(train[train.AVProductStatesIdentifier == v].HasDetections) / len(train[train.AVProductStatesIdentifier == v].HasDetections))",
"_____no_output_____"
],
[
"a['rate'] = result",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"import seaborn as sns",
"_____no_output_____"
],
[
"sns.distplot(a.loc[0].rate)",
"_____no_output_____"
],
[
"sns.distplot(a.loc[1].rate)",
"_____no_output_____"
],
[
"a.loc.apply(lambda v: print(v))",
"_____no_output_____"
],
[
"av = 9471 \nnp.sum(train[train.AVProductStatesIdentifier == av].HasDetections) / len(train[train.AVProductStatesIdentifier == av].HasDetections)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
eca96a6249a4f4d31e311d30da6a1499a993fa6a | 41,765 | ipynb | Jupyter Notebook | examples/TCN-examples.ipynb | guillaumeraille/darts | b82b5109e19917342349b98d975ae2518de84a28 | [
"Apache-2.0"
] | null | null | null | examples/TCN-examples.ipynb | guillaumeraille/darts | b82b5109e19917342349b98d975ae2518de84a28 | [
"Apache-2.0"
] | null | null | null | examples/TCN-examples.ipynb | guillaumeraille/darts | b82b5109e19917342349b98d975ae2518de84a28 | [
"Apache-2.0"
] | null | null | null | 240.028736 | 37,692 | 0.928074 | [
[
[
"# Temporal Convolutional Network\nIn this notebook, we show an example of how TCNs can be used with darts.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom darts import TimeSeries\nfrom darts.models.tcn_model import TCNModel\nfrom darts.preprocessing import ScalerWrapper\nfrom darts.metrics import mape\nfrom darts.utils.missing_values import auto_fillna\nfrom darts.backtesting import backtest_forecasting",
"_____no_output_____"
],
[
"# Read data:\ndf = pd.read_csv('AirPassengers.csv', delimiter=\",\")\nts = TimeSeries.from_dataframe(df, 'Month', '#Passengers')\n\n# Create training and validation sets:\ntrain, val = ts.split_after(pd.Timestamp('19580801'))",
"_____no_output_____"
],
[
"transformer = ScalerWrapper()\ntrain_transformed = transformer.fit_transform(train)\nval_transformed = transformer.transform(val)\nts_transformed = transformer.transform(ts)",
"_____no_output_____"
],
[
"my_model = TCNModel(\n n_epochs=1000, \n input_length=20, \n output_length=5, \n dropout=0.1, \n dilation_base=2, \n weight_norm=True,\n kernel_size=5,\n num_filters=3\n)",
"[2020-06-16 12:11:34,988] INFO | darts.models.tcn_model | Number of layers chosen: 2\nINFO:darts.models.tcn_model:Number of layers chosen: 2\n"
],
[
"my_model.fit(train_transformed, val_series=val_transformed, verbose=True)",
"_____no_output_____"
],
[
"def eval_model(model):\n pred_series = model.predict(n=26)\n print(len(pred_series))\n plt.figure(figsize=(8,5))\n ts_transformed.plot(label='actual')\n pred_series.plot(label='forecast')\n plt.title('MAPE: {}'.format(mape(pred_series.slice_intersect(val_transformed), val_transformed)))\n plt.legend();\n \neval_model(my_model)",
"26\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.