hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec66d8567b6734a812af00050df5ca08a9d28562 | 54,641 | ipynb | Jupyter Notebook | Deep Learning/Tensorflow/House Pricing.ipynb | shkhaider2015/PIAIC-QUARTER-2 | 2b6ef1c8d75f9f52b9da8e735751f5f80c76b227 | [
"Unlicense"
]
| null | null | null | Deep Learning/Tensorflow/House Pricing.ipynb | shkhaider2015/PIAIC-QUARTER-2 | 2b6ef1c8d75f9f52b9da8e735751f5f80c76b227 | [
"Unlicense"
]
| null | null | null | Deep Learning/Tensorflow/House Pricing.ipynb | shkhaider2015/PIAIC-QUARTER-2 | 2b6ef1c8d75f9f52b9da8e735751f5f80c76b227 | [
"Unlicense"
]
| null | null | null | 48.397697 | 81 | 0.34615 | [
[
[
"from tensorflow import keras\nimport numpy as np",
"_____no_output_____"
],
[
"model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\nmodel.compile(optimizer='sgd', loss='mean_squared_error')",
"_____no_output_____"
],
[
"xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)\nys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)\nmodel.fit(xs, ys, epochs=500)",
"Train on 6 samples\nEpoch 1/500\n6/6 [==============================] - 1s 162ms/sample - loss: 16.0051\nEpoch 2/500\n6/6 [==============================] - 0s 0s/sample - loss: 7.4615\nEpoch 3/500\n6/6 [==============================] - 0s 0s/sample - loss: 3.5069\nEpoch 4/500\n6/6 [==============================] - 0s 666us/sample - loss: 1.6762\nEpoch 5/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.8285\nEpoch 6/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.4358\nEpoch 7/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.2536\nEpoch 8/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.1689\nEpoch 9/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.1294\nEpoch 10/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.1107\nEpoch 11/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.1017\nEpoch 12/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0971\nEpoch 13/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0947\nEpoch 14/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0932\nEpoch 15/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0921\nEpoch 16/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0913\nEpoch 17/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0905\nEpoch 18/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0898\nEpoch 19/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0891\nEpoch 20/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0885\nEpoch 21/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0878\nEpoch 22/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0872\nEpoch 23/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0866\nEpoch 24/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0859\nEpoch 25/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0853\nEpoch 26/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0847\nEpoch 27/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0841\nEpoch 28/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0835\nEpoch 29/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0828\nEpoch 30/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0822\nEpoch 31/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0816\nEpoch 32/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0810\nEpoch 33/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0805\nEpoch 34/500\n6/6 [==============================] - 0s 664us/sample - loss: 0.0799\nEpoch 35/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0793\nEpoch 36/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0787\nEpoch 37/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0781\nEpoch 38/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0776\nEpoch 39/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0770\nEpoch 40/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0764\nEpoch 41/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0759\nEpoch 42/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0753\nEpoch 43/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0748\nEpoch 44/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0742\nEpoch 45/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0737\nEpoch 46/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0732\nEpoch 47/500\n6/6 [==============================] - 0s 664us/sample - loss: 0.0726\nEpoch 48/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0721\nEpoch 49/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0716\nEpoch 50/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0711\nEpoch 51/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0705\nEpoch 52/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0700\nEpoch 53/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0695\nEpoch 54/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0690\nEpoch 55/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0685\nEpoch 56/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0680\nEpoch 57/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0675\nEpoch 58/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0670\nEpoch 59/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0665\nEpoch 60/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0660\nEpoch 61/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0656\nEpoch 62/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0651\nEpoch 63/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0646\nEpoch 64/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0641\nEpoch 65/500\n6/6 [==============================] - 0s 669us/sample - loss: 0.0637\nEpoch 66/500\n6/6 [==============================] - 0s 664us/sample - loss: 0.0632\nEpoch 67/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0627\nEpoch 68/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0623\nEpoch 69/500\n6/6 [==============================] - 0s 664us/sample - loss: 0.0618\nEpoch 70/500\n6/6 [==============================] - 0s 669us/sample - loss: 0.0614\nEpoch 71/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0609\nEpoch 72/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0605\nEpoch 73/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0601\nEpoch 74/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0596\nEpoch 75/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0592\nEpoch 76/500\n6/6 [==============================] - 0s 669us/sample - loss: 0.0588\nEpoch 77/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0583\nEpoch 78/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0579\nEpoch 79/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0575\nEpoch 80/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0571\nEpoch 81/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0566\nEpoch 82/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0562\nEpoch 83/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0558\nEpoch 84/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0554\nEpoch 85/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0550\nEpoch 86/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0546\nEpoch 87/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0542\nEpoch 88/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0538\nEpoch 89/500\n6/6 [==============================] - 0s 668us/sample - loss: 0.0534\nEpoch 90/500\n6/6 [==============================] - 0s 665us/sample - loss: 0.0530\nEpoch 91/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0526\nEpoch 92/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0523\nEpoch 93/500\n6/6 [==============================] - 0s 666us/sample - loss: 0.0519\nEpoch 94/500\n6/6 [==============================] - 0s 667us/sample - loss: 0.0515\nEpoch 95/500\n6/6 [==============================] - 0s 669us/sample - loss: 0.0511\nEpoch 96/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0508\nEpoch 97/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0504\nEpoch 98/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0500\nEpoch 99/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0497\nEpoch 100/500\n6/6 [==============================] - 0s 0s/sample - loss: 0.0493\n"
],
[
"print(model.predict([7.0]))",
"[[4.0741925]]\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code"
]
]
|
ec66eac78e5cc03c62c8586765ed1458b5994d97 | 862 | ipynb | Jupyter Notebook | template_container_human/labels/.ipynb_checkpoints/test-checkpoint.ipynb | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | template_container_human/labels/.ipynb_checkpoints/test-checkpoint.ipynb | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | template_container_human/labels/.ipynb_checkpoints/test-checkpoint.ipynb | lkondratova/Brainplot | 3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d | [
"MIT"
]
| null | null | null | 16.901961 | 49 | 0.511601 | [
[
[
"from slice_151 import coordinates_00EBFF\nprint(len(coordinates_00EBFF))",
"942\n"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec66f308a785ba3b8b7bb57e9c626fbc90b9afb9 | 25,290 | ipynb | Jupyter Notebook | Explore.ipynb | bnord01/alpha-zero-general-sogo | 76e08d2fc1f0d331018ee1b07bdd33128fbd6d83 | [
"MIT"
]
| null | null | null | Explore.ipynb | bnord01/alpha-zero-general-sogo | 76e08d2fc1f0d331018ee1b07bdd33128fbd6d83 | [
"MIT"
]
| null | null | null | Explore.ipynb | bnord01/alpha-zero-general-sogo | 76e08d2fc1f0d331018ee1b07bdd33128fbd6d83 | [
"MIT"
]
| null | null | null | 25.938462 | 572 | 0.346342 | [
[
[
"from MCTS import MCTS\n\nfrom sogo.SogoGame import SogoGame, display as display_board\nimport numpy as np\nfrom sogo.keras.NNet import NNetWrapper as NNet\nfrom Timer import Timer\n\nfrom Config import Config\nfrom sogo.keras.NNet import NNArgs\n# nnet players\nconfig = Config(\n load_folder_file=('./save/', 'mcts1024_eps40_iter17.h5'),\n num_mcts_sims=150,\n root_dirichlet_alpha=0.3,\n root_exploration_fraction=0.0,\n mcts_discount=0.9,\n pb_c_base=19652,\n pb_c_init=1.25)\nconfig.nnet_args = NNArgs(lr=0.001, \n batch_size=1024, \n epochs=20)\n\ngame = SogoGame(4)\n\ndef setup_board(plays,verbose=True): \n board = game.init_board()\n player = 1 \n for play in plays:\n board, player = game.next_state(board, player,play)\n if verbose:\n display_board(board)\n return board, player\n\ndef display_probs(pi, prefix=\"Probs\"):\n print(f\"{prefix}: {np.array2string(np.array(pi), precision=2, separator=',', suppress_small=True, max_line_width=200)}\")",
"_____no_output_____"
],
[
"from NeuralNet import NeuralNet\nfrom Game import Game\n\nclass NN(NeuralNet):\n def __init__(self,game:Game):\n self.game = game\n def predict(self, board):\n return np.ones(self.game.action_size())/self.game.action_size(), 0\n\n\ndummy_nn = NN(game)\ndummy_mcts = MCTS(game, dummy_nn, config)\n\ndef mcts_only_pred(plays, root=None, verbose=False):\n b,p = setup_board(plays, verbose = verbose)\n with Timer(\"MCTS only prediction\"):\n pi, root = dummy_mcts.get_action_prob(b, p, root)\n display_probs(pi)\n return root ",
"_____no_output_____"
],
[
"nn = NNet(game, config)\nnn.load_checkpoint(*(config.load_folder_file))\n\ndef nn_pred(plays, verbose=False):\n b,p = setup_board(plays, verbose = verbose)\n b = game.canonical_board(b,p)\n with Timer(\"NN prediction\"):\n pi, v = nn.predict(b)\n display_probs(pi,\"NNet\")\n \nmcts = MCTS(game, nn, config)\n\ndef mcts_player(x, player):\n canonical_board = game.canonical_board(x, player)\n return np.argmax(pi), root\n\ndef test_mcts(plays, expected, verbose=False):\n board, player = setup_board(plays, verbose = verbose) \n with Timer() as t:\n pi, root = mcts.get_action_prob(board,player)\n play = np.argmax(pi)\n new_board, new_player = game.next_state(board, player, play)\n if verbose:\n display_board(new_board) \n nn_pred(plays)\n display_probs(pi, \"MCTS\")\n valid = play in expected if isinstance(expected,list) else play == expected\n print(f\"MCTS made {'valid' if valid else 'bad' } play: {play} (expected: {expected}) in {t.interval:0.3f} sec\")\n return root\n \ndef mcts_pred(plays, root=None, verbose=False):\n b,p = setup_board(plays, verbose = verbose)\n with Timer(\"MCTS prediction\"):\n pi, root = mcts.get_action_prob(b, p, root)\n display_probs(pi,\"MCTS\")\n return root\n\n ",
"_____no_output_____"
],
[
"def pref_path(r):\n if len(r.children) == 0:\n return []\n _, a, c = max((c.visit_count, a, c) for a,c in r.children.items())\n return [a] + pref_path(c)",
"_____no_output_____"
]
],
[
[
"### Play with sure win, doesn't favor fast win.",
"_____no_output_____"
]
],
[
[
"pl = [x['i'] + 4*x['j'] for x in [{'i': 3, 'j': 0}, {'i': 0, 'j': 3}, {'i': 2, 'j': 0}, {'i': 2, 'j': 0}, {'i': 2, 'j': 0}, {'i': 1, 'j': 2}, {'i': 2, 'j': 0}, {'i': 0, 'j': 0}, {'i': 3, 'j': 0}, {'i': 3, 'j': 3}, {'i': 3, 'j': 1}, {'i': 3, 'j': 1}, {'i': 3, 'j': 1}, {'i': 3, 'j': 0}, {'i': 3, 'j': 0}, {'i': 2, 'j': 1}, {'i': 3, 'j': 1}, {'i': 3, 'j': 2}, {'i': 3, 'j': 2}, {'i': 3, 'j': 2}, {'i': 3, 'j': 3}, {'i': 2, 'j': 1}, {'i': 3, 'j': 3}, {'i': 3, 'j': 3}, {'i': 3, 'j': 2}, {'i': 0, 'j': 2}, {'i': 1, 'j': 3}]]",
"_____no_output_____"
],
[
"setup_board(pl)\nnn_pred(pl)\nr = mcts_pred(pl)",
"z3+--------+\n3 |- - - O |\n2 |- - - X |\n1 |- - - X |\n0 |- - X X |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - X |\n2 |- - - O |\n1 |- - - X |\n0 |- - X O |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - X |\n2 |- - - X |\n1 |- - O O |\n0 |- - O X |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |O X - O |\n2 |O O - O |\n1 |- - O X |\n0 |O - X X |\nz0+--------+\n 0 1 2 3 \n--\nNN prediction took 0.534 sec\nNNet: [0.01,0.03,0. ,0. ,0.12,0.02,0.11,0. ,0.3 ,0.03,0.07,0.07,0.03,0.04,0.18,0. ]\nMCTS prediction took 0.819 sec\nMCTS: [0. ,0. ,0. ,0. ,0.3 ,0. ,0.11,0. ,0.22,0. ,0.18,0. ,0. ,0.04,0.15,0. ]\n"
],
[
"mcts_only_pred(pl)",
"MCTS only prediction took 0.199 sec\nProbs: [0.01,0.01,0. ,0. ,0.44,0.01,0.01,0. ,0.01,0.01,0.44,0. ,0.01,0.01,0.01,0. ]\n"
],
[
"r.print(3)",
" -> v:-0.81 n:150 p:1.0 tp:1\n 4 -> v:1.0 n:44 p:0.13 tp:-1\n 6 -> v:0.88 n:16 p:0.12 tp:-1\n 0 -> v:-1.0 n:1 p:0.014 tp:1\n 4 -> v:-0.92 n:6 p:0.62 tp:1\n 5 -> v:-1.0 n:1 p:0.0098 tp:1\n 6 -> v:-1.0 n:1 p:0.11 tp:1\n 8 -> v:-1.0 n:1 p:0.07 tp:1\n 9 -> v:-1.0 n:1 p:0.057 tp:1\n 10 -> v:-1.0 n:1 p:0.07 tp:1\n 12 -> v:-1.0 n:1 p:0.0074 tp:1\n 13 -> v:-1.0 n:1 p:0.022 tp:1\n 14 -> v:-1.0 n:1 p:0.021 tp:1\n 8 -> v:0.79 n:33 p:0.32 tp:-1\n 0 -> v:-1.0 n:1 p:0.018 tp:1\n 1 -> v:-1.0 n:1 p:0.0043 tp:1\n 4 -> v:-0.82 n:13 p:0.35 tp:1\n 5 -> v:-1.0 n:1 p:0.009 tp:1\n 6 -> v:-0.82 n:6 p:0.17 tp:1\n 8 -> v:-0.88 n:4 p:0.15 tp:1\n 9 -> v:-1.0 n:1 p:0.074 tp:1\n 10 -> v:-0.95 n:2 p:0.1 tp:1\n 12 -> v:-1.0 n:1 p:0.015 tp:1\n 13 -> v:-1.0 n:1 p:0.034 tp:1\n 14 -> v:-1.0 n:1 p:0.074 tp:1\n 10 -> v:1.0 n:27 p:0.08 tp:-1\n 13 -> v:0.9 n:6 p:0.04 tp:-1\n 4 -> v:-0.95 n:2 p:0.5 tp:1\n 6 -> v:-1.0 n:1 p:0.085 tp:1\n 8 -> v:-1.0 n:1 p:0.12 tp:1\n 10 -> v:-1.0 n:1 p:0.14 tp:1\n 14 -> v:0.83 n:23 p:0.2 tp:-1\n 0 -> v:-1.0 n:1 p:0.019 tp:1\n 1 -> v:-1.0 n:1 p:0.0031 tp:1\n 4 -> v:-0.88 n:5 p:0.29 tp:1\n 5 -> v:-1.0 n:1 p:0.0046 tp:1\n 6 -> v:-1.0 n:1 p:0.025 tp:1\n 8 -> v:-1.0 n:1 p:0.087 tp:1\n 9 -> v:-1.0 n:1 p:0.051 tp:1\n 10 -> v:-0.83 n:8 p:0.4 tp:1\n 12 -> v:-1.0 n:1 p:0.012 tp:1\n 13 -> v:-1.0 n:1 p:0.014 tp:1\n 14 -> v:-1.0 n:1 p:0.093 tp:1\n"
]
],
[
[
"### State requires defense against 2 step win as player 2",
"_____no_output_____"
]
],
[
[
"play, valid = [0,7,3,11,5,15,13], [1,2,9]\ntest_mcts(play, valid);",
"NN prediction took 0.022 sec\nNNet: [0.1 ,0.29,0.05,0.27,0. ,0. ,0.01,0.01,0.03,0.19,0.01,0. ,0.01,0.02,0. ,0. ]\nMCTS: [0.35,0.41,0.02,0.12,0. ,0. ,0. ,0. ,0. ,0.1 ,0. ,0. ,0. ,0. ,0. ,0. ]\nMCTS made valid play: 1 (expected: [1, 2, 9]) in 3.281 sec\n"
],
[
"config.num_mcts_sims = 40000\nplay = [0,7,3,11,5,15,13]\n\nr1 = mcts_only_pred(play) # differs from test_mcts, direct canonical vs this\nrc = mcts_c_only_pred(play)",
"MCTS only prediction took 97.231 sec\nProbs: [0.05,0.24,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.09,0.05,0.05,0.05,0.05,0.05,0.05]\nMCTS canonical only prediction took 95.508 sec\nProbs: [0.05,0.24,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.09,0.05,0.05,0.05,0.05,0.05,0.05]\n"
]
],
[
[
"### Required defense against 1 step win",
"_____no_output_____"
]
],
[
[
"config.num_mcts_sims = 1000\nplay, valid = [0,1,0,1,0], 1\nnn_pred(play)\nmcts_pred(play)",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |O - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |O X - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |O X - - |\nz0+--------+\n 0 1 2 3 \n--\nNN prediction took 0.699 sec\nProbs: [0.9 ,0. ,0. ,0.01,0. ,0.01,0.01,0. ,0.01,0. ,0.01,0.01,0. ,0. ,0.02,0.01] Value: -0.64\nMCTS prediction took 22.706 sec\nProbs: [0.95,0.02,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ,0. ]\nMCTS only prediction took 2.476 sec\nProbs: [0.68,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02]\n"
],
[
"config.num_mcts_sims = 100\nplay = [2,1,2,1,2]\nnn_pred(play)\nmcts_pred(play)\nmcts_only_pred(play)",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - O - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz0+--------+\n 0 1 2 3 \n--\nNN prediction took 0.040 sec\nProbs: [0.01,0. ,0.91,0.01,0.01,0.01,0. ,0. ,0. ,0.01,0. ,0.01,0.01,0.01,0. ,0.01] Value: 0.81\nMCTS prediction took 2.208 sec\nProbs: [0.01,0. ,0.85,0.02,0.02,0.01,0.01,0. ,0. ,0.02,0. ,0.01,0.02,0.02,0. ,0.01]\nMCTS only prediction took 0.215 sec\nProbs: [0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.07,0.07,0.07]\n"
],
[
"config.num_mcts_sims = 100\nplay = [2,1,2,1,2]\nnn_pred(play)\nr1 = mcts_pred(play)\nr0 = mcts_only_pred(play)",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - O - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz0+--------+\n 0 1 2 3 \n--\nNN prediction took 0.013 sec\nProbs: [0.04,0.05,0.44,0.02,0.04,0.04,0.04,0.02,0.02,0.04,0.05,0.04,0.03,0.05,0.05,0.04] Value: 0.90\nMCTS prediction took 1.370 sec\nProbs: [0.01,0.15,0.41,0.01,0.05,0.01,0.01,0.01,0.03,0.04,0.11,0.03,0.01,0.05,0.01,0.06]\nMCTS only prediction took 0.109 sec\nProbs: [0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.06,0.07,0.07,0.07,0.07]\n"
],
[
"config.num_mcts_sims = 100\nplay = [2,1,2,1,2,2]\nnn_pred(play)\nmcts_pred(play)\nmcts_only_pred(play)",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - X - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - O - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- X O - |\nz0+--------+\n 0 1 2 3 \n--\nNN prediction took 0.013 sec\nProbs: [0.06,0.06,0. ,0.07,0.06,0.11,0.08,0.06,0.05,0.06,0.06,0.05,0.08,0.04,0.11,0.06] Value: -0.27\nMCTS prediction took 1.953 sec\nProbs: [0.22,0.02,0. ,0.02,0.02,0.07,0.09,0.06,0.04,0.02,0.09,0.01,0.02,0.01,0.29,0.02]\nMCTS only prediction took 0.111 sec\nProbs: [0.06,0.06,0. ,0.06,0.06,0.06,0.07,0.07,0.07,0.07,0.07,0.07,0.07,0.07,0.07,0.07]\n"
]
],
[
[
"### States that requires defense against 1 step win",
"_____no_output_____"
]
],
[
[
"test_mcts([0,8,0,8,0],0);",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |O - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |X - - - |\n1 |- - - - |\n0 |O - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |X - - - |\n1 |- - - - |\n0 |O - - - |\nz0+--------+\n 0 1 2 3 \n--\nz3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |X - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |O - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |X - - - |\n1 |- - - - |\n0 |O - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |- - - - |\n2 |X - - - |\n1 |- - - - |\n0 |O - - - |\nz0+--------+\n 0 1 2 3 \n--\nMCTS made correct play in 11.332 sec\n"
]
],
[
[
"### State requires defense against 2 step win as player 1",
"_____no_output_____"
]
],
[
[
"test_mcts([12,0,7,3,11,5,15,13],1);",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |O X - O |\n2 |- - - O |\n1 |- X - O |\n0 |X - - X |\nz0+--------+\n 0 1 2 3 \n--\nz3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |O X - O |\n2 |- O - O |\n1 |- X - O |\n0 |X - - X |\nz0+--------+\n 0 1 2 3 \n--\nMCTS made incorrect play in 11.554 sec\n"
]
],
[
[
"### State with 2 step win as player 1",
"_____no_output_____"
]
],
[
[
"test_mcts([0,7,3,11,5,15,13,12],1);",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |X O - X |\n2 |- - - X |\n1 |- O - X |\n0 |O - - O |\nz0+--------+\n 0 1 2 3 \n--\nz3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |X O - X |\n2 |- - - X |\n1 |- O - X |\n0 |O O - O |\nz0+--------+\n 0 1 2 3 \n--\nMCTS made correct play in 5.456 sec\n"
]
],
[
[
"### State with 2 step win as player 2",
"_____no_output_____"
]
],
[
[
"test_mcts([14,0,7,3,11,5,15,13,12],1);",
"z3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |O X O O |\n2 |- - - O |\n1 |- X - O |\n0 |X - - X |\nz0+--------+\n 0 1 2 3 \n--\nz3+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz3+--------+\n 0 1 2 3 \nz2+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz2+--------+\n 0 1 2 3 \nz1+--------+\n3 |- - - - |\n2 |- - - - |\n1 |- - - - |\n0 |- - - - |\nz1+--------+\n 0 1 2 3 \nz0+--------+\n3 |O X O O |\n2 |- - - O |\n1 |- X - O |\n0 |X X - X |\nz0+--------+\n 0 1 2 3 \n--\nMCTS made correct play in 5.127 sec\n"
]
],
[
[
"#### Other Stuff",
"_____no_output_____"
]
],
[
[
"for filter in [\n list(range(16)),\n {1:{a: [2,9] for a in range(16)}},\n {9:list(range(16))},\n {10:None}\n ]:\n for d,r in [('canoical', rc), ('usual',r1)]:\n print(d, filter)\n r.print(filter=filter)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw"
]
| [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"raw"
]
]
|
ec6700fe3e5515fa38904898d4f9523f9aa3c123 | 21,287 | ipynb | Jupyter Notebook | sagemaker-python-sdk/chainer_cifar10/chainermn_distributed_cifar10.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
]
| 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | sagemaker-python-sdk/chainer_cifar10/chainermn_distributed_cifar10.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
]
| 4 | 2020-09-26T01:30:01.000Z | 2022-02-10T02:20:35.000Z | sagemaker-python-sdk/chainer_cifar10/chainermn_distributed_cifar10.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
]
| 7 | 2020-03-04T22:23:51.000Z | 2021-07-13T14:05:46.000Z | 45.680258 | 563 | 0.641471 | [
[
[
"## Distributed Training with Chainer and ChainerMN\n\nChainer can train in two modes: single-machine, and distributed. Unlike the single-machine notebook example that trains an image classification model on the CIFAR-10 dataset, we will write a Chainer script that uses `chainermn` to distribute training to multiple instances.\n\n[VGG](https://arxiv.org/pdf/1409.1556v6.pdf) is an architecture for deep convolution networks. In this example, we train a convolutional network to perform image classification using the CIFAR-10 dataset on multiple instances. CIFAR-10 consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. We'll train a model on SageMaker, deploy it to Amazon SageMaker, and then classify images using the deployed model.\n\nThe Chainer script runs inside of a Docker container running on SageMaker. For more information about the Chainer container, see the sagemaker-chainer-containers repository and the sagemaker-python-sdk repository:\n\n* https://github.com/aws/sagemaker-chainer-containers\n* https://github.com/aws/sagemaker-python-sdk\n\nFor more on Chainer and ChainerMN, please visit the Chainer and ChainerMN repositories:\n\n* https://github.com/chainer/chainer\n* https://github.com/chainer/chainermn\n\nThis notebook is adapted from the [CIFAR-10](https://github.com/chainer/chainer/tree/master/examples/cifar) example in the Chainer repository.",
"_____no_output_____"
]
],
[
[
"# Setup\nfrom sagemaker import get_execution_role\nimport sagemaker\n\nsagemaker_session = sagemaker.Session()\n\n# This role retrieves the SageMaker-compatible role used by this Notebook Instance.\nrole = get_execution_role()",
"_____no_output_____"
]
],
[
[
"## Downloading training and test data\n\nWe use helper functions provided by `chainer` to download and preprocess the CIFAR10 data. ",
"_____no_output_____"
]
],
[
[
"import chainer\n\nfrom chainer.datasets import get_cifar10\n\ntrain, test = get_cifar10()",
"_____no_output_____"
]
],
[
[
"## Uploading the data\n\nWe save the preprocessed data to the local filesystem, and then use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value `inputs` identifies the S3 location, which we will use when we start the Training Job.",
"_____no_output_____"
]
],
[
[
"import os\nimport shutil\n\nimport numpy as np\n\ntrain_data = [element[0] for element in train]\ntrain_labels = [element[1] for element in train]\n\ntest_data = [element[0] for element in test]\ntest_labels = [element[1] for element in test]\n\n\ntry:\n os.makedirs('/tmp/data/distributed_train_cifar')\n os.makedirs('/tmp/data/distributed_test_cifar')\n np.savez('/tmp/data/distributed_train_cifar/train.npz', data=train_data, labels=train_labels)\n np.savez('/tmp/data/distributed_test_cifar/test.npz', data=test_data, labels=test_labels)\n train_input = sagemaker_session.upload_data(path=os.path.join('/tmp', 'data', 'distributed_train_cifar'),\n key_prefix='notebook/distributed_chainer_cifar/train')\n test_input = sagemaker_session.upload_data(path=os.path.join('/tmp', 'data', 'distributed_test_cifar'),\n key_prefix='notebook/distributed_chainer_cifar/test')\nfinally:\n shutil.rmtree('/tmp/data')\nprint('training data at ', train_input)\nprint('test data at ', test_input)",
"_____no_output_____"
]
],
[
[
"## Writing the Chainer script to run on Amazon SageMaker\n\n### Training\n\nWe need to provide a training script that can run on the SageMaker platform. The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:\n\n* `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to.\n These artifacts are uploaded to S3 for model hosting.\n* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.\n* `SM_OUTPUT_DIR`: A string representing the filesystem path to write output artifacts to. Output artifacts may\n include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed\n and uploaded to S3 to the same S3 prefix as the model artifacts.\n\nSupposing two input channels, 'train' and 'test', were used in the call to the Chainer estimator's ``fit()`` method,\nthe following will be set, following the format `SM_CHANNEL_[channel_name]`:\n\n* `SM_CHANNEL_TRAIN`: A string representing the path to the directory containing data in the 'train' channel\n* `SM_CHANNEL_TEST`: Same as above, but for the 'test' channel.\n\nA typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an `argparse.ArgumentParser` instance. For example, the script run by this notebook starts with the following:\n\n```python\nimport argparse\nimport os\n\nif __name__ =='__main__':\n training_env = sagemaker_containers.training_env()\n \n num_gpus = int(os.environ['SM_NUM_GPUS'])\n \n parser = argparse.ArgumentParser()\n\n # retrieve the hyperparameters we set from the client in the notebook (with some defaults)\n parser.add_argument('--epochs', type=int, default=30)\n parser.add_argument('--batch-size', type=int, default=256)\n parser.add_argument('--learning-rate', type=float, default=0.05)\n parser.add_argument('--communicator', type=str, default='pure_nccl' if num_gpus > 0 else 'naive')\n\n # Data, model, and output directories. These are required.\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n parser.add_argument('--test', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n \n args, _ = parser.parse_known_args()\n \n # ... load from args.train and args.test, train a model, write model to args.model_dir.\n```\n\nBecause the Chainer container imports your training script, you should always put your training code in a main guard (`if __name__=='__main__':`) so that the container does not inadvertently run your training code at the wrong point in execution.\n\nFor more information about training environment variables, please visit https://github.com/aws/sagemaker-containers.\n\n### Hosting and Inference\n\nWe use a single script to train and host the Chainer model. You can also write separate scripts for training and hosting. In contrast with the training script, the hosting script requires you to implement functions with particular function signatures (or rely on defaults for those functions).\n\nThese functions load your model, deserialize data sent by a client, obtain inferences from your hosted model, and serialize predictions back to a client:\n\n* **`model_fn(model_dir)` (always required for hosting)**: This function is invoked to load model artifacts from those that were written into `model_dir` during training.\n\nThe script that this notebook runs uses the following `model_fn` function for hosting:\n```python\ndef model_fn(model_dir):\n chainer.config.train = False\n model = L.Classifier(net.VGG(10))\n serializers.load_npz(os.path.join(model_dir, 'model.npz'), model)\n return model.predictor\n```\n\n* `input_fn(input_data, content_type)`: This function is invoked to deserialize prediction data when a prediction request is made. The return value is passed to predict_fn. `input_data` is the serialized input data in the body of the prediction request, and `content_type`, the MIME type of the data.\n \n \n* `predict_fn(input_data, model)`: This function accepts the return value of `input_fn` as the `input_data` parameter and the return value of `model_fn` as the `model` parameter and returns inferences obtained from the model.\n \n \n* `output_fn(prediction, accept)`: This function is invoked to serialize the return value from `predict_fn`, which is passed in as the `prediction` parameter, back to the SageMaker client in response to prediction requests.\n\n\n`model_fn` is always required, but default implementations exist for the remaining functions. These default implementations can deserialize a NumPy array, invoking the model's `__call__` method on the input data, and serialize a NumPy array back to the client.\n\nThis notebook relies on the default `input_fn`, `predict_fn`, and `output_fn` implementations. See the Chainer sentiment analysis notebook for an example of how one can implement these hosting functions.\n\nPlease examine the script below, reproduced in its entirety. Training occurs behind the main guard, which prevents the function from being run when the script is imported, and `model_fn` loads the model saved into `model_dir` during training.\n\nThe script uses a chainermn Communicator to distribute training to multiple nodes. The Communicator depends on MPI (Message Passing Interface), so the Chainer container running on SageMaker runs this script with mpirun if the Chainer Estimator specifies a train_instance_count of two or greater, or if use_mpi in the Chainer estimator is true.\n\nBy default, one process is created per GPU (on GPU instances), or one per host (on CPU instances, which are not recommended for this notebook).\n\nFor more on writing Chainer scripts to run on SageMaker, or for more on the Chainer container itself, please see the following repositories: \n\n* For writing Chainer scripts to run on SageMaker: https://github.com/aws/sagemaker-python-sdk\n* For more on the Chainer container and default hosting functions: https://github.com/aws/sagemaker-chainer-containers\n",
"_____no_output_____"
]
],
[
[
"!pygmentize 'src/chainer_cifar_vgg_distributed.py'",
"_____no_output_____"
]
],
[
[
"## Running the training script on SageMaker\n\nTo train a model with a Chainer script, we construct a ```Chainer``` estimator using the [sagemaker-python-sdk](https://github.com/aws/sagemaker-python-sdk). We pass in an `entry_point`, the name of a script that contains a couple of functions with certain signatures (`train` and `model_fn`), and a `source_dir`, a directory containing all code to run inside the Chainer container. This script will be run on SageMaker in a container that invokes these functions to train and load Chainer models. \n\nThe ```Chainer``` class allows us to run our training function as a training job on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. In this case we will run our training job on two `ml.p2.xlarge` instances, but you may need to request a service limit increase on the number of training instances in order to train.\n\nThis script uses the `chainermn` package, which distributes training with MPI. Your script is run with `mpirun`, so a ChainerMN Communicator object can be used to distribute training. Arguments to `mpirun` are set to sensible defaults, but you can configure how your script is run in distributed mode. See the ```Chainer``` class documentation for more on configuring MPI.",
"_____no_output_____"
]
],
[
[
"from sagemaker.chainer.estimator import Chainer\n\nchainer_estimator = Chainer(entry_point='chainer_cifar_vgg_distributed.py',\n source_dir=\"src\",\n role=role,\n sagemaker_session=sagemaker_session,\n use_mpi=True,\n train_instance_count=2,\n train_instance_type='ml.p3.2xlarge',\n hyperparameters={'epochs': 30, 'batch-size': 256})\n\nchainer_estimator.fit({'train': train_input, 'test': test_input})",
"_____no_output_____"
]
],
[
[
"Our Chainer script writes various artifacts, such as plots, to a directory `output_data_dir`, the contents of which which SageMaker uploads to S3. Now we download and extract these artifacts.",
"_____no_output_____"
]
],
[
[
"from s3_util import retrieve_output_from_s3\n\nchainer_training_job = chainer_estimator.latest_training_job.name\n\ndesc = sagemaker_session.sagemaker_client. \\\n describe_training_job(TrainingJobName=chainer_training_job)\noutput_data = desc['ModelArtifacts']['S3ModelArtifacts'].replace('model.tar.gz', 'output.tar.gz')\n\nretrieve_output_from_s3(output_data, 'output/distributed_cifar')",
"_____no_output_____"
]
],
[
[
"These plots show the accuracy and loss over epochs:",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nfrom IPython.display import display\n\naccuracy_graph = Image(filename=\"output/distributed_cifar/accuracy.png\",\n width=800,\n height=800)\nloss_graph = Image(filename=\"output/distributed_cifar/loss.png\",\n width=800,\n height=800)\n\ndisplay(accuracy_graph, loss_graph)",
"_____no_output_____"
]
],
[
[
"## Deploying the Trained Model\n\nAfter training, we use the Chainer estimator object to create and deploy a hosted prediction endpoint. We can use a CPU-based instance for inference (in this case an `ml.m4.xlarge`), even though we trained on GPU instances.\n\nThe predictor object returned by `deploy` lets us call the new endpoint and perform inference on our sample images. ",
"_____no_output_____"
]
],
[
[
"predictor = chainer_estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')",
"_____no_output_____"
]
],
[
[
"### CIFAR10 sample images\n\nWe'll use these CIFAR10 sample images to test the service:\n\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/airplane1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/automobile1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/bird1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/cat1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/deer1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/dog1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/frog1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/horse1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/ship1.png\" />\n<img style=\"display: inline; height: 32px; margin: 0.25em\" src=\"images/truck1.png\" />\n\n",
"_____no_output_____"
],
[
"## Predicting using SageMaker Endpoint\n\nWe batch the images together into a single NumPy array to obtain multiple inferences with a single prediction request.",
"_____no_output_____"
]
],
[
[
"from skimage import io\nimport numpy as np\n\ndef read_image(filename):\n img = io.imread(filename)\n img = np.array(img).transpose(2, 0, 1)\n img = np.expand_dims(img, axis=0)\n img = img.astype(np.float32)\n img *= 1. / 255.\n img = img.reshape(3, 32, 32)\n return img\n\n\ndef read_images(filenames):\n return np.array([read_image(f) for f in filenames])\n\nfilenames = ['images/airplane1.png',\n 'images/automobile1.png',\n 'images/bird1.png',\n 'images/cat1.png',\n 'images/deer1.png',\n 'images/dog1.png',\n 'images/frog1.png',\n 'images/horse1.png',\n 'images/ship1.png',\n 'images/truck1.png']\n\nimage_data = read_images(filenames)",
"_____no_output_____"
]
],
[
[
"The predictor runs inference on our input data and returns a list of predictions whose argmax gives the predicted label of the input data. ",
"_____no_output_____"
]
],
[
[
"response = predictor.predict(image_data)\n\nfor i, prediction in enumerate(response):\n print('image {}: prediction: {}'.format(i, prediction.argmax(axis=0)))",
"_____no_output_____"
]
],
[
[
"## Cleanup\n\nAfter you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it.",
"_____no_output_____"
]
],
[
[
"chainer_estimator.delete_endpoint()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6702cdbb87973d588067f9d304756a4244fd3d | 1,806 | ipynb | Jupyter Notebook | _notebooks/2022-03-01-better-charts.ipynb | vdenoise/portfolio-quant-tech | 9e47fc112f15b64093cffc65e6c515641bd3dbc4 | [
"Apache-2.0"
]
| 1 | 2019-10-11T07:13:49.000Z | 2019-10-11T07:13:49.000Z | _notebooks/2022-03-01-better-charts.ipynb | vdenoise/portfolio-geek | 9e47fc112f15b64093cffc65e6c515641bd3dbc4 | [
"Apache-2.0"
]
| 1 | 2022-03-05T17:59:11.000Z | 2022-03-05T17:59:11.000Z | _notebooks/2022-03-01-better-charts.ipynb | vdenoise/portfolio-quant-tech | 9e47fc112f15b64093cffc65e6c515641bd3dbc4 | [
"Apache-2.0"
]
| null | null | null | 25.8 | 269 | 0.599668 | [
[
[
"# Better charts with Pandas and Python\n\n> \"In this article, we provide a bird's eye perspective on how to use Python and a great quantitative library, PyPortfolioOpt to implement Modern Portfolio Optimisation techniques. In particular, we look at the approach developed in 1956 by Harry Markowitz.\"\n\n- toc: true\n- hide: true\n- badges: true\n- comments: false\n- author: Vincent D.\n- categories: [charts]\n- tags: [pandas, charts]\n- image: images/portfolio-optimisation.png",
"_____no_output_____"
],
[
"## How to improve charts with Pandas\nhttps://www.shanelynn.ie/bar-plots-in-python-using-pandas-dataframes/",
"_____no_output_____"
],
[
"## Categorical Data and Sorting\n\nhttps://technology.amis.nl/data-analytics/ordering-rows-in-pandas-data-frame-and-bars-in-plotly-bar-chart-by-day-of-the-week-or-any-other-user-defined-order/\n",
"_____no_output_____"
]
]
]
| [
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown"
]
]
|
ec67112628cb685dba1691f4326b505e7f872066 | 152,570 | ipynb | Jupyter Notebook | Results.ipynb | Shingirai98/Xhosa_English_Translation | 340e5ecd70ebcef4520e6abeca539f49c416f7a2 | [
"MIT"
]
| null | null | null | Results.ipynb | Shingirai98/Xhosa_English_Translation | 340e5ecd70ebcef4520e6abeca539f49c416f7a2 | [
"MIT"
]
| null | null | null | Results.ipynb | Shingirai98/Xhosa_English_Translation | 340e5ecd70ebcef4520e6abeca539f49c416f7a2 | [
"MIT"
]
| null | null | null | 124.750613 | 16,014 | 0.706305 | [
[
[
"<a href=\"https://colab.research.google.com/github/Shingirai98/Xhosa_English_Translation/blob/main/Results.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# Libraries\nfrom google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
],
[
"# library imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\nfrom IPython.display import HTML\nfrom google.colab import data_table",
"_____no_output_____"
],
[
"def var_init(tag):\n source_language = \"en\"\n target_language = \"xh\"\n os.environ[\"src\"] = source_language # Sets them in bash as well, since we often use bash scripts\n os.environ[\"tgt\"] = target_language\n os.environ[\"tag\"] = tag\n tag = tag\n os.environ[\"gdrive_path\"] = \"/content/drive/My Drive/m/%s-%s-%s\" % (target_language, source_language, tag)\n",
"_____no_output_____"
],
[
"vars = [\"noam_0.65lr\",\"baseline-noam\", \"baseline_10epochs\", \"baseline\", \"noam_0.35lr\",\"noam_df.9\"]",
"_____no_output_____"
],
[
"var_init(vars[1])\n!echo $gdrive_path",
"/content/drive/My Drive/m/xh-en-baseline-noam\n"
],
[
"def check_float(potential_float):\n try:\n float(potential_float)\n return True\n except ValueError:\n return False\n",
"_____no_output_____"
],
[
"def toDataFrame(val):\n vals = []\n steps = []\n loss = []\n ppl = []\n bleu_score = []\n with open(val) as f:\n for i, line in enumerate(f):\n for word in line.split():\n if check_float(word):\n vals.append(float(word))\n elif word.isdigit():\n vals.append(int(word))\n\n for i in range(0, len(vals)//5):\n steps.append(vals[5*i])\n loss.append(vals[(5*i)+1])\n ppl.append(vals[(5*i)+2])\n bleu_score.append(vals[(5*i)+3])\n\n data = {'Steps': steps, 'Loss': loss, 'PPL': ppl, 'bleu':bleu_score}\n df = pd.DataFrame(data) \n return df",
"_____no_output_____"
],
[
"def color_negative_red(value):\n \"\"\"\n Colors elements in a dateframe\n green if positive and red if\n negative. Does not color NaN\n values.\n \"\"\"\n\n if value < 1:\n color = 'red'\n elif value >= 1 and value < 5:\n color = 'yellow'\n else:\n color = 'green'\n\n return 'color: %s' % color",
"_____no_output_____"
],
[
"def disp_tab(current_var):\n val = \"drive/My Drive/m/xh-en-\"+current_var+\"/models/xhen_reverse_transformer/validations.txt\"\n table1 = toDataFrame(val)\n \n data_table.enable_dataframe_formatter()\n return table1",
"_____no_output_____"
],
[
"def disp_plot(current_var):\n ax = plt.gca()\n val = \"drive/My Drive/m/xh-en-\"+current_var+\"/models/xhen_reverse_transformer/validations.txt\"\n table1 = toDataFrame(val)\n table1.plot(kind='line',x='Steps',y='bleu',ax=ax)\n plt.xlabel('Number of Steps')\n plt.ylabel('BLEU score')\n plt.title(current_var)\n\n plt.legend()",
"_____no_output_____"
],
[
"# source_file = 'xhosanavy.' + source_language\n# target_file = 'xhosanavy.' + target_language\n\n# lr = []\ncurrent_var = vars[1]\ndisp_tab(current_var)",
"_____no_output_____"
],
[
"disp_plot(current_var)",
"_____no_output_____"
],
[
"df = disp_tab(vars[2])\ndf.head(5)\n",
"_____no_output_____"
],
[
"df.tail(5)",
"_____no_output_____"
],
[
"disp_plot(vars[2])",
"_____no_output_____"
],
[
"disp_tab(vars[3])",
"_____no_output_____"
],
[
"disp_plot(vars[3])",
"_____no_output_____"
],
[
"disp_tab(vars[0])",
"_____no_output_____"
],
[
"disp_plot(vars[0])",
"_____no_output_____"
],
[
"disp_tab(vars[4])",
"_____no_output_____"
],
[
"disp_plot(vars[4])",
"_____no_output_____"
],
[
"disp_tab(vars[5])",
"_____no_output_____"
],
[
"disp_plot(vars[5])",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec672c82f3237e502a497b12db666fa5de832603 | 173,196 | ipynb | Jupyter Notebook | notebooks/cdk2_analysis.ipynb | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
]
| null | null | null | notebooks/cdk2_analysis.ipynb | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
]
| null | null | null | notebooks/cdk2_analysis.ipynb | AstraZeneca/jazzy | d06a5848165d2a256b52b75c3365715da0d36c4d | [
"Apache-2.0"
]
| null | null | null | 205.208531 | 1,059 | 0.638306 | [
[
[
"import os\nimport sys\nimport rdkit\nimport svgutils.transform as sg\n\nfrom jazzy import core, visualisation, helpers\n# from cairosvg import svg2png",
"_____no_output_____"
],
[
"# Import config\nopt_path = os.path.abspath(os.path.join(os.getcwd(), '..', 'optimisation'))\nsys.path.insert(0, opt_path)\nimport config\n\n# Open the compressed data\ndata_path = os.path.abspath(os.path.join(os.getcwd(), '..', config.DATA_PATH))",
"_____no_output_____"
],
[
"# Get all molecules in folder\ncdk2_path = os.path.join(data_path, \"cdk2_analysis\")\nfiles = os.listdir(cdk2_path)\nfiles = [f for f in files if f.endswith(\"sdf\") or f.endswith(\"mol\")]\nfiles",
"_____no_output_____"
],
[
"def atomistic_strength_from_file(file_path):\n \"\"\"Accepts a file path to an SDF/MOL file and generates an HB strength visualisation.\n \"\"\"\n rdkit_mol = rdkit.Chem.MolFromMolFile(file_path)\n rdkit_mol = rdkit.Chem.AddHs(rdkit_mol, addCoords=True)\n kallisto_mol = core.kallisto_molecule_from_rdkit_molecule(rdkit_mol)\n atoms_and_nbrs = core.get_covalent_atom_idxs(rdkit_mol)\n charges = core.get_charges_from_kallisto_molecule(kallisto_mol, charge=0)\n atomic_map = core.calculate_polar_strength_map(rdkit_mol, kallisto_mol, atoms_and_nbrs, charges)\n img_text = visualisation.depict_strengths(rdkit_mol, \n atomic_map, \n fig_size=(500, 500),\n flatten_molecule=True, \n highlight_atoms=True, \n ignore_sdc=True, \n ignore_sdx=False,\n ignore_sa=True,\n sdc_threshold=0.0, \n sdx_threshold=0.0,\n sa_threshold=0.0,\n rounding_digits=2)\n\n # Add ligand name as a title\n img_text = img_text.replace('svg:','')\n fig = sg.fromstring(img_text)\n label = sg.TextElement(250, 15, file_path.split(\"/\")[-1], size=14, \n font='sans-serif', anchor='middle', color='#000000')\n fig.append(label)\n img_text = fig.to_str().decode(\"utf-8\")\n return img_text",
"_____no_output_____"
],
[
"# Build strength visualisations\nstrengths = list()\nfor f in files:\n file_path = os.path.join(cdk2_path, f)\n strengths.append(atomistic_strength_from_file(file_path))",
"_____no_output_____"
],
[
"class HorizontalDisplay:\n \"\"\"\n Accepts a list of SVGs, concatenates them, and returns a horizonal rendering.\n \"\"\"\n def __init__(self, *args):\n self.args = args\n\n def _repr_html_(self):\n concat_svgs = ''.join(self.args[0])\n template = '<div style=\"\">{}</div>'\n return template.format(concat_svgs)",
"_____no_output_____"
],
[
"HorizontalDisplay(strengths)",
"_____no_output_____"
],
[
"def mol_vector_from_file(file_path):\n \"\"\"Accepts a file path to an SDF/MOL file and generates a mol vector.\n \"\"\"\n rdkit_mol = rdkit.Chem.MolFromMolFile(file_path)\n rdkit_mol = rdkit.Chem.AddHs(rdkit_mol, addCoords=True)\n kallisto_mol = core.kallisto_molecule_from_rdkit_molecule(rdkit_mol)\n atoms_and_nbrs = core.get_covalent_atom_idxs(rdkit_mol)\n charges = core.get_charges_from_kallisto_molecule(kallisto_mol, charge=0)\n atomic_map = core.calculate_polar_strength_map(rdkit_mol, kallisto_mol, atoms_and_nbrs, charges)\n mol_vector = helpers.sum_atomic_map(atomic_map)\n mol_vector[\"name\"] = file_path.split(\"/\")[-1]\n return mol_vector",
"_____no_output_____"
],
[
"# Build molecular vectors\nvects = list()\nfor f in files:\n file_path = os.path.join(cdk2_path, f)\n vects.append(mol_vector_from_file(file_path))\nvects",
"_____no_output_____"
],
[
"# Export renderings\n# for f in files:\n# file_path = os.path.join(cdk2_path, f)\n# output_filepath = f.split(\".\")[0] + \".png\"\n# svg2png(bytestring=atomistic_strength_from_file(file_path), write_to=output_filepath)\n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec672e3e11c99440371f71b44d271e9b7c52fa64 | 11,688 | ipynb | Jupyter Notebook | notebooks/senzing-addRecord.ipynb | Senzing/poc-notebook | 6608154a6f1293ec829540d3aa2770d46f66be1c | [
"Apache-2.0"
]
| null | null | null | notebooks/senzing-addRecord.ipynb | Senzing/poc-notebook | 6608154a6f1293ec829540d3aa2770d46f66be1c | [
"Apache-2.0"
]
| 1 | 2019-04-09T16:21:51.000Z | 2019-04-09T16:21:51.000Z | notebooks/senzing-addRecord.ipynb | Senzing/poc-notebook | 6608154a6f1293ec829540d3aa2770d46f66be1c | [
"Apache-2.0"
]
| null | null | null | 29.147132 | 404 | 0.585815 | [
[
[
"# Adding records\n\nThe `addRecord()` method creates a JSON string with information about your Senzing version.",
"_____no_output_____"
],
[
"## Prepare environment",
"_____no_output_____"
],
[
"### Initialize python environment",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nimport json\n\n# For RenderJSON\n\nimport uuid\nfrom IPython.display import display_javascript, display_html, display",
"_____no_output_____"
]
],
[
[
"### Helper class for JSON rendering\n\nA class for pretty-printing JSON.\nNot required by Senzing, \nbut helps visualize JSON.",
"_____no_output_____"
]
],
[
[
"class RenderJSON(object):\n def __init__(self, json_data):\n if isinstance(json_data, dict):\n self.json_str = json.dumps(json_data)\n elif isinstance(json_data, bytearray):\n self.json_str = json_data.decode()\n else:\n self.json_str = json_data\n self.uuid = str(uuid.uuid4())\n\n def _ipython_display_(self):\n display_html('<div id=\"{}\" style=\"height:100%; width:100%; background-color: LightCyan\"></div>'.format(self.uuid), raw=True)\n display_javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (self.uuid, self.json_str), raw=True)",
"_____no_output_____"
]
],
[
[
"### System path\n\nUpdate system path.",
"_____no_output_____"
]
],
[
[
"python_path = \"{0}/python\".format(\n os.environ.get(\"SENZING_G2_DIR\", \"/opt/senzing/g2\"))\nsys.path.append(python_path)",
"_____no_output_____"
]
],
[
[
"### Initialize variables\n\nCreate variables used for G2Engine.",
"_____no_output_____"
]
],
[
[
"%run senzing-init-config.ipynb",
"_____no_output_____"
],
[
"%store -r senzing_config_json\n%store -r config_id_bytearray\nconfig_id=config_id_bytearray.decode()",
"_____no_output_____"
],
[
"RenderJSON(senzing_config_json)",
"_____no_output_____"
]
],
[
[
"## G2Engine\n\nThe G2Engine API...",
"_____no_output_____"
]
],
[
[
"from senzing import G2Engine, G2Exception",
"_____no_output_____"
]
],
[
[
"## Initialization\n\nTo start using Senzing G2Engine, create and initialize an instance.\nThis should be done once per process.\nThe `init()` method accepts the following parameters:\n\n- **module_name:** A short name given to this instance of the G2 engine (i.e. your G2Module object)\n- **g2module_ini_pathname:** A fully qualified path to the G2 engine INI file (often /opt/senzing/g2/python/G2Module.ini)\n- **verbose_logging:** A boolean which enables diagnostic logging - this will print a massive amount of information to stdout (default = False)\n- **config_id:** (optional) The identifier value for the engine configuration can be returned here.\n\nCalling this function will return \"0\" upon success.",
"_____no_output_____"
]
],
[
[
"g2_engine = G2Engine()\ng2_engine.init(module_name, senzing_config_json, verbose_logging, config_id)",
"_____no_output_____"
]
],
[
[
"## Prime Engine\n\nThe `primeEngine()` method may optionally be called to pre-initialize some of the heavier weight internal resources of the G2 engine.",
"_____no_output_____"
]
],
[
[
"response = g2_engine.primeEngine()",
"_____no_output_____"
]
],
[
[
"## addRecord()\n\nOnce the Senzing engine is initialized, use addRecord() to load a record into the Senzing repository -- addRecord() can be called as many times as desired and from multiple threads at the same time. The addRecord() function returns \"0\" upon success, and accepts four parameters as input:\n\n- **datasource_code:** The name of the data source the record is associated with. This value is configurable to the system\n- **record_id:** The record ID, used to identify distinct records\n- **data_string:** A JSON document with the attribute data for the record\n- **load_id:** The observation load ID for the record; value can be null and will default to data_source\n",
"_____no_output_____"
]
],
[
[
"datasource_code = \"TEST\"\nrecord_id = \"1\"\nload_id = None\ndata = {\n\t\"NAMES\": [{\n\t\t\"NAME_TYPE\": \"PRIMARY\",\n\t\t\"NAME_LAST\": \"Smith\",\n\t\t\"NAME_FIRST\": \"John\",\n\t\t\"NAME_MIDDLE\": \"M\"\n\t}],\n\t\"PASSPORT_NUMBER\": \"PP11111\",\n\t\"PASSPORT_COUNTRY\": \"US\",\n\t\"DRIVERS_LICENSE_NUMBER\": \"DL11111\",\n\t\"SSN_NUMBER\": \"111-11-1111\"\n}\ndata_string = json.dumps(data)\n\ntry:\n g2_engine.addRecord(datasource_code, record_id, data_string, load_id)\n\nexcept G2Exception as err:\n print(g2_engine.getLastException())",
"_____no_output_____"
]
],
[
[
"## getRecord()\n\nUse `getRecord()` to retrieve a single record from the data repository; the record is assigned in JSON form to a user-designated buffer, and the function itself returns \"0\" upon success. Once the Senzing engine is initialized, `getRecord()` can be called as many times as desired and from multiple threads at the same time. The `getRecord()` function accepts the following parameters as input:\n\n- **data_source:** The name of the data source the record is associated with. This value is configurable to the system\n- **record_id:** The record ID, used to identify the record for retrieval\n- **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here",
"_____no_output_____"
]
],
[
[
"response_bytearray = bytearray()\n\ntry:\n g2_engine.getRecord(\n datasource_code,\n record_id,\n response_bytearray)\n\nexcept G2Exception as err:\n print(g2_engine.getLastException())\nRenderJSON(response_bytearray)",
"_____no_output_____"
]
],
[
[
"## getEntityByRecordID()\n\nUse `getEntityByRecordID()` to retrieve entity data based on the record ID of a particular data record. This function accepts the following parameters as input:\n\n- **datasource_code:** The name of the data source the record is associated with. This value is configurable to the system\n- **record_id:** The record ID for a particular data record\n- **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here.",
"_____no_output_____"
]
],
[
[
"response_list = []\nresult = g2_engine.getEntityByRecordID(datasource_code, record_id, response_list)\n\nresponse_string = \"\".join(response_list)\nresponse_dictionary = json.loads(response_string)\nresponse = json.dumps(response_dictionary, sort_keys=True, indent=4)\nprint(\"Result: {0}\\n{1}\".format(result, response))",
"_____no_output_____"
]
],
[
[
"## getEntityByEntityID()\n\nUse `getEntityByEntityID()` to retrieve entity data based on the ID of a resolved identity. This function accepts the following parameters as input:\n\n- **entity_id:** The numeric ID of a resolved entity\n- **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here.",
"_____no_output_____"
]
],
[
[
"entity_id = 1\nresponse_bytearray = bytearray()\n\ntry:\n g2_engine.getEntityByEntityID(entity_id, response_bytearray)\n\nexcept G2Exception as err:\n print(g2_engine.getLastException())\nRenderJSON(response_bytearray)",
"_____no_output_____"
]
],
[
[
"## searchByAttributes()\n\nUse `searchByAttributes()` to retrieve entity data based on a user-specified set of entity attributes. This function accepts the following parameters as input:\n\n- **data_string:** A JSON document with the attribute data to search for\n- **response:** A memory buffer for returning the response document; if an error occurred, an error response is stored here.",
"_____no_output_____"
]
],
[
[
"response_bytearray = bytearray()\n\ntry:\n g2_engine.searchByAttributes(data_string, response_bytearray)\n\nexcept G2Exception as err:\n print(g2_engine.getLastException())\nRenderJSON(response_bytearray)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec675c6dce2ecf4260b54de11baf9cd39e083057 | 6,930 | ipynb | Jupyter Notebook | notebooks/pipeline/pipeline_11.ipynb | GTDev87/tpt-hackathon | c6ce8bb970d59a8cac8e55137829cf014c98124a | [
"MIT"
]
| 2 | 2018-07-02T17:20:35.000Z | 2021-02-12T16:30:35.000Z | notebooks/pipeline/pipeline_11.ipynb | qinwf-nuan/keras-js | dafc91e6f58bd663656872014cf1d1bd5359c97d | [
"MIT"
]
| 1 | 2017-09-27T18:41:18.000Z | 2017-09-27T18:41:18.000Z | notebooks/pipeline/pipeline_11.ipynb | GTDev87/tpt-hackathon | c6ce8bb970d59a8cac8e55137829cf014c98124a | [
"MIT"
]
| 1 | 2018-02-02T17:35:01.000Z | 2018-02-02T17:35:01.000Z | 55 | 3,794 | 0.635065 | [
[
[
"import numpy as np\nimport json\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import backend as K",
"Using TensorFlow backend.\n"
],
[
"def format_decimal(arr, places=8):\n return [round(x * 10**places) / 10**places for x in arr]",
"_____no_output_____"
]
],
[
[
"### pipeline 11",
"_____no_output_____"
]
],
[
[
"data_in_shape = (8, 8, 2)\n\nconv_0 = Conv2D(4, 3, 3, activation='relu', border_mode='valid', subsample=(1, 1), dim_ordering='tf', bias=True)\npool_0 = AveragePooling2D(pool_size=(2, 2), strides=(1, 1), border_mode='valid', dim_ordering='tf')\n\ninput_layer = Input(shape=data_in_shape)\nx = conv_0(input_layer)\noutput_layer = pool_0(x)\nmodel = Model(input=input_layer, output=output_layer)\n\nnp.random.seed(12000)\ndata_in = 2 * np.random.random(data_in_shape) - 1\n\n# set weights to random (use seed for reproducibility)\nweights = []\nfor i, w in enumerate(model.get_weights()):\n np.random.seed(12000 + i)\n weights.append(2 * np.random.random(w.shape) - 1)\nmodel.set_weights(weights)\n\nresult = model.predict(np.array([data_in]))\n\nprint({\n 'input': {'data': format_decimal(data_in.ravel().tolist()), 'shape': list(data_in_shape)},\n 'weights': [{'data': format_decimal(weights[i].ravel().tolist()), 'shape': list(weights[i].shape)} for i in range(len(weights))],\n 'expected': {'data': format_decimal(result[0].ravel().tolist()), 'shape': list(result[0].shape)}\n})",
"{'weights': [{'data': [-0.19388144, -0.19001575, -0.09731586, -0.20392132, 0.52195872, -0.83362812, -0.41735846, -0.4328975, 0.43380002, -0.20766614, -0.7679554, -0.13498828, 0.81221727, -0.7898045, -0.40313372, -0.7062533, -0.36001102, -0.51930514, -0.55812459, -0.95621084, -0.7941376, -0.99856347, -0.60933887, -0.94050465, 0.85984897, 0.06789528, -0.53725708, 0.59134801, -0.39666546, 0.89661752, 0.22910667, 0.61376976, -0.93875236, 0.65402795, 0.1428073, -0.33452599, 0.90611209, 0.159331, 0.87139424, 0.19330817, 0.03415369, 0.48841223, -0.21573357, 0.76914577, 0.58694333, -0.75001631, 0.68083019, 0.91907979, -0.47421929, 0.87404362, -0.71564904, 0.8386454, 0.74925525, -0.29101729, -0.79739646, -0.72944405, 0.61943975, 0.63606455, 0.83240359, 0.00771567, -0.08668039, -0.66323566, -0.6568711, -0.08186235, 0.03399537, 0.2039048, 0.90356066, 0.09368177, -0.03195618, -0.61772711, 0.3717567, 0.88043978], 'shape': [3, 3, 2, 4]}, {'data': [-0.2865981, -0.73947835, 0.60869202, -0.19931851], 'shape': [4]}], 'input': {'data': [-0.19388144, -0.19001575, -0.09731586, -0.20392132, 0.52195872, -0.83362812, -0.41735846, -0.4328975, 0.43380002, -0.20766614, -0.7679554, -0.13498828, 0.81221727, -0.7898045, -0.40313372, -0.7062533, -0.36001102, -0.51930514, -0.55812459, -0.95621084, -0.7941376, -0.99856347, -0.60933887, -0.94050465, 0.85984897, 0.06789528, -0.53725708, 0.59134801, -0.39666546, 0.89661752, 0.22910667, 0.61376976, -0.93875236, 0.65402795, 0.1428073, -0.33452599, 0.90611209, 0.159331, 0.87139424, 0.19330817, 0.03415369, 0.48841223, -0.21573357, 0.76914577, 0.58694333, -0.75001631, 0.68083019, 0.91907979, -0.47421929, 0.87404362, -0.71564904, 0.8386454, 0.74925525, -0.29101729, -0.79739646, -0.72944405, 0.61943975, 0.63606455, 0.83240359, 0.00771567, -0.08668039, -0.66323566, -0.6568711, -0.08186235, 0.03399537, 0.2039048, 0.90356066, 0.09368177, -0.03195618, -0.61772711, 0.3717567, 0.88043978, 0.52451349, 0.65639836, -0.11815827, -0.5935923, -0.81520379, -0.11653893, -0.30373012, 0.23333123, -0.76455189, 0.7411041, -0.73334668, -0.64117813, -0.52816925, 0.72175618, -0.20883569, 0.91916448, -0.94918497, -0.73977154, 0.48306794, 0.53576453, 0.42844311, -0.40423223, 0.58140529, -0.80551698, -0.58405683, -0.77108705, -0.5249814, -0.3140284, -0.90075427, -0.27333001, -0.01088696, -0.78774164, -0.11778349, 0.57551081, 0.51202998, 0.30001746, -0.09656189, -0.91341054, -0.45544411, -0.55559633, 0.67950395, 0.85886773, 0.55813851, 0.95827444, -0.69859671, 0.93609409, -0.33106889, 0.85475746, 0.04118771, -0.50402696, -0.75944605, -0.39477802, -0.16001778, 0.02823298, -0.23748051, 0.5115979], 'shape': [8, 8, 2]}, 'expected': {'data': [0.06974199, 1.22076452, 1.82424057, 0.7483421, 0.06974199, 1.20541334, 1.62150133, 1.17863536, 0.30199394, 0.37979802, 1.5488044, 1.49579191, 0.84266365, 0.13482136, 1.24629283, 0.6912452, 1.08216572, 0.37153888, 1.31310272, 0.84929281, 0.27304667, 1.38722563, 1.40454769, 0.7483421, 0.24490587, 1.27858114, 1.15411615, 1.25485075, 0.47715783, 0.37597644, 1.19949126, 1.28506064, 0.42179984, 0.34311506, 0.72356665, 0.32109338, 0.11980587, 0.34311506, 0.0, 0.06555998, 0.55969381, 0.22475854, 0.3414948, 0.82348567, 0.53155297, 0.13528675, 0.13688047, 1.1318965, 0.17516388, 0.0, 0.0, 0.30841088, 0.22057109, 0.20829371, 0.27007097, 0.0, 0.40048063, 0.22537969, 0.59105206, 0.00139774, 0.35638911, 0.0, 0.59383386, 0.8933332, 0.35638911, 0.0, 0.92993259, 0.82348567, 0.17073184, 0.0, 0.60969913, 0.4473317, 0.34426671, 0.0583894, 0.66471696, 0.4473317, 0.35344443, 0.75945652, 0.86008, 0.05082553, 0.0, 0.0, 0.45695338, 0.06984752, 0.37820315, 0.0, 0.80457485, 0.0, 0.69989324, 0.0, 0.66324008, 0.4473317, 0.32169005, 0.34720582, 0.49270964, 0.4473317, 0.0, 1.03118694, 0.32507342, 0.04942778], 'shape': [5, 5, 4]}}\n"
]
]
]
| [
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6762379dbd7242c61ca71ceb5ffa6f24c01673 | 318,566 | ipynb | Jupyter Notebook | physics-statics-problem-sympy/physics-statics-problem-sympy.ipynb | mayanktiwari5225/physics-problems | 1ef50d60e1cdc2b544e4a1cb73628f8529ff9b37 | [
"MIT"
]
| null | null | null | physics-statics-problem-sympy/physics-statics-problem-sympy.ipynb | mayanktiwari5225/physics-problems | 1ef50d60e1cdc2b544e4a1cb73628f8529ff9b37 | [
"MIT"
]
| null | null | null | physics-statics-problem-sympy/physics-statics-problem-sympy.ipynb | mayanktiwari5225/physics-problems | 1ef50d60e1cdc2b544e4a1cb73628f8529ff9b37 | [
"MIT"
]
| null | null | null | 438.192572 | 221,496 | 0.934544 | [
[
[
"# Solving Statics Problem using SymPy\n\n\n## Question\n\n",
"_____no_output_____"
],
[
"## Solution\n\n### Installing [SymPy](https://pypi.org/project/sympy/) using [pip](https://pypi.org/project/pip/)\n\n\n~~~bash\npython3 -m pip install sympy\n~~~\n\n### Importing necessary modules from SymPy\n\n* [printing](https://docs.sympy.org/latest/modules/printing.html) - Useful for printing complex equations\n* [plotting](https://docs.sympy.org/latest/modules/plotting.html) - To print necessary plots in part (b) of question\n\n",
"_____no_output_____"
]
],
[
[
"from sympy.interactive import printing\nprinting.init_printing(use_latex=True)\nfrom sympy.plotting import plot\nfrom sympy import *\nimport sympy as sp",
"_____no_output_____"
]
],
[
[
"### Inserting equations and displaying the result",
"_____no_output_____"
]
],
[
[
"eq1 = sp.Function('eq1')\neq2 = sp.Function('eq2')\neq3 = sp.Function('eq3')\neq4 = sp.Function('eq4')\n\nAx, Ay, A, T, lc, l, d, w = sp.symbols('A_x A_y A T l_c l d w')\n\n#defining the equations\neq1 = Eq(Ax-T*(d/lc),0)\neq2 = Eq(Ay-(T*(sqrt(lc**2 - d**2))/lc)-w, 0)\neq3 = Eq((T*(sqrt(lc**2 - d**2)/lc)*d)-w*l, 0)\n\nprint(\"Given equations are\")\ndisplay(eq1)\ndisplay(eq2)\ndisplay(eq3)",
"Given equations are\n"
]
],
[
[
"### Solving the given equations using ``solve``\n\nSolving the given three equations gives the three forces **$ T $**, **$ {A_x} $** and **$ A_y $** in terms of *$ d, l_c\\ , l $ and $ w $*",
"_____no_output_____"
]
],
[
[
"#----------------------------------------PART (a)----------------------------------------#\n\n#solving the above three equations in terms of constants l, l_c, w and d\nprint(\"The forces in terms of given constants are\")\nsolution = solve([eq1, eq2, eq3], [Ax, Ay, T])\n\n#to display result in proper format\nsolution_dict = {sympify(key): sympify(solution[key], locals={'A_x': Ax, 'A_y' : Ay, 'T' : T}) for key in solution} \nfor key in solution_dict:\n display(Eq(key, dict_parser(key, solution_dict)))",
"The forces in terms of given constants are\n"
]
],
[
[
"### Subsituting given values in above equations\n\nSubstiting *$ l = 10\\,ft\\ , \\, l_c=5.5\\,ft \\ $ and $ w = 200 \\, lbs $* in given equations and displaying the results",
"_____no_output_____"
]
],
[
[
"#----------------------------------------PART (b)----------------------------------------#\n\n#equations after subsituting given values of\n#l = 10 ft\n#l_c = 5.5 ft\n#w = 200 lbs\neq1_s = eq1.subs([(l, 10), (lc, 5.5), (w, 200)])\neq2_s = eq2.subs([(l, 10), (lc, 5.5), (w, 200)])\neq3_s = eq3.subs([(l, 10), (lc, 5.5), (w, 200)])\n\nprint(\"\\n\\nEquations after subsituting given values are\")\ndisplay(eq1_s)\ndisplay(eq2_s)\ndisplay(eq3_s)",
"\n\nEquations after subsituting given values are\n"
]
],
[
[
"### Solving above equations in terms of $ d $\n\n\nOn solving above three equations in terms of $ d $ gives **$ T $**, **$ {A_x} $** and **$ A_y $** as function of a single variable $ d $",
"_____no_output_____"
]
],
[
[
"#solving the above three equations in terms of d\nprint(\"\\nThe quantities in terms of d are\")\nsolution_s = solve([eq1_s, eq2_s, eq3_s], [Ax, Ay, T])\n\n#printing the solved equations\nsolution_s_dict = {sympify(key): sympify(solution_s[key], locals={'A_x': Ax, 'A_y' : Ay, 'T' : T}) for key in solution} \nfor key in solution_s_dict:\n display(Eq(key, dict_parser(key, solution_s_dict)))",
"\nThe quantities in terms of d are\n"
]
],
[
[
"### Obtaining equation for force $ A $\n\nThe resultant of force components **$ A_x $** and **$ A_y $** is obtained as force **$ A $**\n\nBy subsituting values of **$ A_x $** and **$ A_y $** in equation **$ A = \\sqrt {A_x^2 +A_y^2} $** , force **$ A $** is obtained as a function of $ d $ ",
"_____no_output_____"
]
],
[
[
"#equation for A (resultant of Ax and Ay)\neq4 = Eq(A, sqrt(Ax**2 + Ay**2))\ndisplay(eq4)\n\n#getting equations of Ax and Ay after subsitution\nAx_s = dict_parser(Ax, solution_s_dict)\nAy_s = dict_parser(Ay, solution_s_dict)\n\n#equation of A(d) after subsituting the equation of Ax and Ay\nA_s = sqrt(Ax_s**2 + Ay_s**2)\ndisplay(Eq(A, A_s))",
"_____no_output_____"
]
],
[
[
"### Plotting $ A(d) $ and $ T(d) $ in the domain $ d \\in [0.5,5]$ ",
"_____no_output_____"
]
],
[
[
"#plotting A(d) as a function of d\nprint(\"\\nThe the plot of A(d) is\")\nplot(A_s, (d, 0.5, 5), xlabel=\"d\", ylabel=\"A(d)\")\n\n#getting the equation of T(d) after subsitution of known values\nT_s = dict_parser(T, solution_s_dict)\nprint(\"\\nThe the plot of T(d) is\")\nplot(T_s, (d, 0.5, 5), xlabel=\"d\", ylabel=\"T(d)\")",
"\nThe the plot of A(d) is\n"
]
],
[
[
"### Finding minima of $ T(d) $\n\nFor maxima or minima the first derivative of function must be equal to **ZERO**\n\nHence, $T'(d) = 0$\n\nOn solving above equation only those values of $ d $ are to be selected which lie within the given domain $ d \\in [0.5,5]$\n\nFor a minima to occur at above point required condition is that second derivative of $T(d)$ must be greater than zero (_positive_)\n\nHence, for minima, $T''(d) > 0$",
"_____no_output_____"
]
],
[
[
"#----------------------------------------PART (c)----------------------------------------#\n\n#evaluating the derivative of T(d) with respect to d\ndTdd = T_s.diff(d)\n\n#equating the derivative of T(d) to zero and solving for d to give the values of minima\neq5 = Eq(dTdd, 0)\nextremum_values_d_T = solve(eq5, d)\n#evaluating second derivative of T(d) with respect to T\nd2Tdd2 = dTdd.diff(d)\n\nfor d_min in extremum_values_d_T:\n d2Tdd2_d_min = d2Tdd2.subs(d, d_min)\n \n #condition for minima T''(d_min) > 0\n if d2Tdd2_d_min > 0:\n if 0.5 <= d_min <= 5:\n #saving the minimum value of d within the range in d_min_T variable\n d_min_T = d_min\n print(\"\\n\\nThe minima of T(d) occurs at %0.3f\" % d_min_T)\n\n#evaluating the minimun value of T(d)\nmin_T = T_s.subs(d, d_min_T)\nprint(\"The minimum value of T(d) at d = %.3f\" % d_min_T, \" is %.3f\" % min_T)",
"\n\nThe minima of T(d) occurs at 3.889\nThe minimum value of T(d) at d = 3.889 is 727.273\n"
]
],
[
[
"### Finding minima of $ A(d) $\n\nSimiliar to previous part, for maxima or minima the first derivative of function must be equal to **ZERO**\n\nHence, $A'(d) = 0$\n\nOn solving above equation only those values of $ d $ are to be selected which lie within the given domain $ d \\in [0.5,5]$\n\nFor a minima to occur at above point required condition is that second derivative of $A(d)$ must be greater than zero (_positive_)\n\nHence, for minima, $A''(d) > 0$",
"_____no_output_____"
]
],
[
[
"#----------------------------------------PART (d)----------------------------------------#\n\n#evaluating the derivative of A(d) with respect to d\ndAdd = A_s.diff(d)\n\n#equating the derivative of A(d) to zero and solving for d to give the values of minima\neq6 = Eq(dAdd, 0)\nextremum_values_d_A = solve(eq6, d)\n#evaluating second derivative of T(d) with respect to T\nd2Add2 = dAdd.diff(d)\n\nfor d_min in extremum_values_d_A:\n #considering only real values of d_min\n check_if_real = d_min.is_real\n if check_if_real == 1:\n d2Add2_d_min = d2Add2.subs(d, d_min)\n #condition for minima T''(d_min) > 0\n if d2Add2_d_min > 0:\n #considering values only within the desired range\n if 0.5 <= d_min <= 5:\n #saving the minimum value of d within the range in d_min_T variable\n d_min_A = d_min\n print(\"\\n\\nThe minima of A(d) occurs at %0.3f\" % d_min_A)\n\n#evaluating the minimun value of T(d)\nmin_A = A_s.subs(d, d_min_A)\nprint(\"The minimum value of A(d) at d = %.3f\" % d_min_A, \" is %.3f\" % min_A)",
"\n\nThe minima of A(d) occurs at 4.051\nThe minimum value of A(d) at d = 4.051 is 877.643\n"
]
],
[
[
"### Dictionary parser function\n\nA function to parse dictionary obtained after subsituting values in the equations.\n\nThis function makes assigning values to newer variables easier. Also, the function allows the equations to be displayed with and $ = $ 'equal to' sign *(displaying a dictionary as it is uses a $ : $ colon to associate variables with the function)*",
"_____no_output_____"
]
],
[
[
"#function to print the results\ndef dict_parser(expr, dict):\n while True:\n new_expr = expr.subs(dict)\n if new_expr == expr:\n return expr\n else:\n expr = new_expr",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6762d1f018028bb0e0aee1539e3f06dc434d5d | 103,129 | ipynb | Jupyter Notebook | python_code/.ipynb_checkpoints/tweet_keywords-checkpoint.ipynb | mateoias/trump_tweet_analysis | bf892cdb1a5938a3dd70245484f9691b21182eab | [
"MIT"
]
| 1 | 2021-01-21T02:53:03.000Z | 2021-01-21T02:53:03.000Z | python_code/.ipynb_checkpoints/tweet_keywords-checkpoint.ipynb | mateoias/trump_tweet_analysis | bf892cdb1a5938a3dd70245484f9691b21182eab | [
"MIT"
]
| null | null | null | python_code/.ipynb_checkpoints/tweet_keywords-checkpoint.ipynb | mateoias/trump_tweet_analysis | bf892cdb1a5938a3dd70245484f9691b21182eab | [
"MIT"
]
| 1 | 2020-11-07T05:41:45.000Z | 2020-11-07T05:41:45.000Z | 41.054538 | 93 | 0.406365 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"dataset = pd.read_csv(\"condensed_dow_and_sentiment.csv\")\ndataset",
"_____no_output_____"
],
[
"dataset[\"Tweet_text\"] = dataset[\"Tweet_text\"].str.lower()",
"_____no_output_____"
],
[
"#use filtered_data for tweets within the desired date range\nstart_date = '2018-01-01 00:00:00+00:00'\nend_date = '2018-12-31 00:00:00+00:00'\n\nbegin = dataset[\"Time\"] >= start_date\nend = dataset[\"Time\"] <= end_date\n\nbetween_dates = begin & end\nfiltered_data = dataset.loc[between_dates]\nfiltered_data",
"_____no_output_____"
],
[
"market_filtered = filtered_data[filtered_data['Tweet_text'].str.contains('market')]\nmarket_filtered\n\n#add a column thats the high minus the low for total variation for the day = volatility",
"_____no_output_____"
],
[
"market = dataset[dataset['Tweet_text'].str.contains('market')]\nmarket",
"_____no_output_____"
],
[
"stock = dataset[dataset['Tweet_text'].str.contains('stock')]\nstock",
"_____no_output_____"
],
[
"dow = dataset[dataset['Tweet_text'].str.contains('dow')]\ndow",
"_____no_output_____"
],
[
"dow = dataset[dataset['Tweet_text'].str.contains('dow ')]\ndow.head()",
"_____no_output_____"
],
[
"economy = dataset[dataset['Tweet_text'].str.contains('economy')]\neconomy",
"_____no_output_____"
],
[
"china = dataset[dataset['Tweet_text'].str.contains('china')]\nchina",
"_____no_output_____"
],
[
"tax_cuts = dataset[dataset['Tweet_text'].str.contains('tax cuts')]\ntax_cuts",
"_____no_output_____"
],
[
"rally = dataset[dataset['Tweet_text'].str.contains('rally')]\nrally",
"_____no_output_____"
],
[
"jobs = dataset[dataset['Tweet_text'].str.contains('jobs')]\njobs",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec676994025b7ac718f4203246539af4ee01f5a6 | 5,421 | ipynb | Jupyter Notebook | parse_gen_posts.ipynb | henrikBjorserud/twitbot | a3931125d69e1785094dbfadb9895b42203b0817 | [
"MIT"
]
| null | null | null | parse_gen_posts.ipynb | henrikBjorserud/twitbot | a3931125d69e1785094dbfadb9895b42203b0817 | [
"MIT"
]
| null | null | null | parse_gen_posts.ipynb | henrikBjorserud/twitbot | a3931125d69e1785094dbfadb9895b42203b0817 | [
"MIT"
]
| null | null | null | 22.873418 | 212 | 0.518908 | [
[
[
"import pickle",
"_____no_output_____"
],
[
"with open('gpt2_gentext_20220331_131430.txt') as f: #generated from gpt2 simple, 1500 epochs, lr = 1e-5 and 0,7 temp\n lines = f.readlines()",
"_____no_output_____"
],
[
" # <|startoftext|> I've just fallen in love med crushbaarn på. I'm glad jag satte på henne det en gång. Hon andas barn och höll p undan. I ❤ me <|endoftext|>\n # searchesd and removed neger\n# <|startoftext|> <|endoftext|> Båda barnen har förseglats mellan två oförtjänta ben. Idag är det Karen. <|endoftext|>\n\n",
"_____no_output_____"
],
[
"print(len(lines[3]))",
"202\n"
],
[
"print(lines[3])",
"<|startoftext|> Alla fuktiga er som jag orkat vänta en roll i smulad sjukdom. Alla fuktiga er som jag har byggt i målat ribbla. Alla fuktiga er som jag bjudit in i smulad hemmets fantasy. <|endoftext|>\n\n"
],
[
"def remove_breakers(line: str) -> str:\n line = line.replace('<|startoftext|>', '')\n line = line.replace('<|endoftext|>', '')\n line = line.replace('endoftext', '')\n line = line.replace('startoftext', '')\n line = line.replace('^', '')\n line = line.replace('0', '')\n line = line.replace('|', '')\n line = line.replace('>', '')\n line = line.replace('<', '')\n line = line.replace('<', '')\n line = line.replace('====================', '')\n line = line.strip()\n line = \" \".join(line.split())\n return line",
"_____no_output_____"
],
[
"def truncate_lines(lines: list) -> list:\n lines = [line for line in lines if len(line) < 250]\n lines = [line for line in lines if len(line) > 10]\n return lines",
"_____no_output_____"
],
[
"lines = [remove_breakers(i) for i in lines]",
"_____no_output_____"
],
[
"print(len(lines))",
"5674\n"
],
[
"lines = truncate_lines(lines)",
"_____no_output_____"
],
[
"print(len(lines))",
"3502\n"
],
[
"#print(lines)",
"_____no_output_____"
],
[
"#for i in lines:\n# print(i)",
"_____no_output_____"
],
[
"pickle.dump(lines, open( \"lines.pickle\", \"wb\" ) )",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec67706e62943c2306429aabcc828020a3d8f2b7 | 5,103 | ipynb | Jupyter Notebook | stats_lessons/TraditionalConfidenceIntervals.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
]
| null | null | null | stats_lessons/TraditionalConfidenceIntervals.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
]
| null | null | null | stats_lessons/TraditionalConfidenceIntervals.ipynb | Zabamund/datasci-nano | ec8d8e4b04670b1827ea7e0e304dde5c3ab11fd7 | [
"MIT"
]
| 1 | 2018-03-11T22:03:02.000Z | 2018-03-11T22:03:02.000Z | 26.169231 | 244 | 0.475407 | [
[
[
"# imports\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nnp.random.seed(42)\n\ncoffee_full = pd.read_csv('coffee_dataset.csv')\ncoffee_red = coffee_full.sample(200)\ncoffee_full.head(3)",
"_____no_output_____"
],
[
"# bootsrap approach to difference in mean\n# set up empty array\ndiff = []\n\n# run sampling xTimes\nfor _ in range(int(1e4)):\n # set up bootsample replacing sampled value back in 'hat', ie can be picked again\n bootsample = coffee_red.sample(200, replace=True)\n # get means\n mean_coff = bootsample[bootsample['drinks_coffee'] == True]['height'].mean()\n mean_nocoff = bootsample[bootsample['drinks_coffee'] == False]['height'].mean()\n # add diff in mean to empty array\n diff.append(mean_coff - mean_nocoff)\n\n# get 95-percentile\nnp.percentile(diff, 2.5), np.percentile(diff, 97.5)",
"_____no_output_____"
],
[
"# inbuilt t-test method\nimport statsmodels.stats.api as sms\n\nX1 = coffee_red[coffee_red['drinks_coffee'] == True]['height']\nX2 = coffee_red[coffee_red['drinks_coffee'] == False]['height']\n\ncm = sms.CompareMeans(sms.DescrStatsW(X1), sms.DescrStatsW(X2))\ncm.tconfint_diff(usevar='unequal')",
"/Users/geodev/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n"
]
],
[
[
"Percentiles are nearly identical whichever method is used.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown"
]
| [
[
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec677f2a1a50fa47e47685a18b652fbfc928efe1 | 72,771 | ipynb | Jupyter Notebook | hackandplay/CP_VTON.ipynb | ZipBomb/cp-vton | 857d8b0b517f86e50182f8691fd8d9b7b08194bd | [
"MIT"
]
| null | null | null | hackandplay/CP_VTON.ipynb | ZipBomb/cp-vton | 857d8b0b517f86e50182f8691fd8d9b7b08194bd | [
"MIT"
]
| null | null | null | hackandplay/CP_VTON.ipynb | ZipBomb/cp-vton | 857d8b0b517f86e50182f8691fd8d9b7b08194bd | [
"MIT"
]
| null | null | null | 60.340796 | 2,326 | 0.461035 | [
[
[
"!nvidia-smi",
"Sun Mar 14 22:40:44 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.56 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 40C P8 9W / 70W | 0MiB / 15109MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"!git clone https://github.com/ZipBomb/cp-vton",
"Cloning into 'cp-vton'...\nremote: Enumerating objects: 4, done.\u001b[K\nremote: Counting objects: 100% (4/4), done.\u001b[K\nremote: Compressing objects: 100% (4/4), done.\u001b[K\nremote: Total 127 (delta 0), reused 1 (delta 0), pack-reused 123\u001b[K\nReceiving objects: 100% (127/127), 816.75 KiB | 20.42 MiB/s, done.\nResolving deltas: 100% (64/64), done.\n"
],
[
"!cd cp-vton && python data_download.py",
"[*] Downloading data...\n\n"
],
[
"!pip install tensorboardX",
"Collecting tensorboardX\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/af/0c/4f41bcd45db376e6fe5c619c01100e9b7531c55791b7244815bac6eac32c/tensorboardX-2.1-py2.py3-none-any.whl (308kB)\n\r\u001b[K |█ | 10kB 19.5MB/s eta 0:00:01\r\u001b[K |██▏ | 20kB 15.2MB/s eta 0:00:01\r\u001b[K |███▏ | 30kB 12.9MB/s eta 0:00:01\r\u001b[K |████▎ | 40kB 12.0MB/s eta 0:00:01\r\u001b[K |█████▎ | 51kB 8.1MB/s eta 0:00:01\r\u001b[K |██████▍ | 61kB 8.2MB/s eta 0:00:01\r\u001b[K |███████▍ | 71kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▌ | 81kB 9.5MB/s eta 0:00:01\r\u001b[K |█████████▌ | 92kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▋ | 102kB 7.7MB/s eta 0:00:01\r\u001b[K |███████████▊ | 112kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████▊ | 122kB 7.7MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 133kB 7.7MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 143kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████████ | 153kB 7.7MB/s eta 0:00:01\r\u001b[K |█████████████████ | 163kB 7.7MB/s eta 0:00:01\r\u001b[K |██████████████████ | 174kB 7.7MB/s eta 0:00:01\r\u001b[K |███████████████████ | 184kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 194kB 7.7MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 204kB 7.7MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 215kB 7.7MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 225kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████████████████▍ | 235kB 7.7MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 245kB 7.7MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 256kB 7.7MB/s eta 0:00:01\r\u001b[K |███████████████████████████▋ | 266kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 276kB 7.7MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▊ | 286kB 7.7MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 296kB 7.7MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 307kB 7.7MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 317kB 7.7MB/s \n\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (1.19.5)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (1.15.0)\nRequirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (3.12.4)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from protobuf>=3.8.0->tensorboardX) (54.0.0)\nInstalling collected packages: tensorboardX\nSuccessfully installed tensorboardX-2.1\n"
],
[
"!cd cp-vton && python train.py --name gmm_train_new --stage GMM --workers 4 --save_count 5000 --shuffle",
"Namespace(batch_size=4, checkpoint='', checkpoint_dir='checkpoints', data_list='train_pairs.txt', datamode='train', dataroot='data', decay_step=100000, display_count=20, fine_height=256, fine_width=192, gpu_ids='', grid_size=5, keep_step=100000, lr=0.0001, name='gmm_train_new', radius=5, save_count=5000, shuffle=True, stage='GMM', tensorboard_dir='tensorboard', workers=4)\nStart to train stage: GMM, named: gmm_train_new!\n/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n cpuset_checked))\ninitialization method [normal]\ninitialization method [normal]\n/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:3826: UserWarning: Default grid_sample and affine_grid behavior has changed to align_corners=False since 1.3.0. Please specify align_corners=True if the old behavior is desired. See the documentation of grid_sample for details.\n \"Default grid_sample and affine_grid behavior has changed \"\nstep: 20, time: 0.608, loss: 0.076038\nstep: 40, time: 0.579, loss: 0.174177\nstep: 60, time: 0.578, loss: 0.086810\nstep: 80, time: 0.728, loss: 0.156248\nstep: 100, time: 0.661, loss: 0.158212\nstep: 120, time: 0.668, loss: 0.132279\nstep: 140, time: 0.632, loss: 0.146852\nstep: 160, time: 0.614, loss: 0.098327\nstep: 180, time: 0.602, loss: 0.053721\nstep: 200, time: 0.629, loss: 0.108041\nstep: 220, time: 0.622, loss: 0.118760\nstep: 240, time: 0.619, loss: 0.147240\nstep: 260, time: 0.581, loss: 0.104840\nstep: 280, time: 0.692, loss: 0.114260\nstep: 300, time: 0.599, loss: 0.134067\nstep: 320, time: 0.610, loss: 0.236127\nstep: 340, time: 0.556, loss: 0.087651\nstep: 360, time: 0.649, loss: 0.186804\nstep: 380, time: 0.682, loss: 0.198278\nstep: 400, time: 0.644, loss: 0.103931\nstep: 420, time: 0.591, loss: 0.128980\nstep: 440, time: 0.607, loss: 0.147639\nstep: 460, time: 0.626, loss: 0.142624\nstep: 480, time: 0.576, loss: 0.079114\nstep: 500, time: 0.547, loss: 0.101477\nstep: 520, time: 0.567, loss: 0.113824\nstep: 540, time: 0.595, loss: 0.135152\nstep: 560, time: 0.636, loss: 0.121002\nstep: 580, time: 0.561, loss: 0.168318\nstep: 600, time: 0.553, loss: 0.069844\nstep: 620, time: 0.604, loss: 0.065273\nstep: 640, time: 0.643, loss: 0.063236\nstep: 660, time: 0.676, loss: 0.121830\nstep: 680, time: 0.687, loss: 0.140832\nstep: 700, time: 0.670, loss: 0.126243\nstep: 720, time: 0.681, loss: 0.085236\nstep: 740, time: 0.561, loss: 0.085914\nstep: 760, time: 0.648, loss: 0.098715\nstep: 780, time: 0.655, loss: 0.151142\nstep: 800, time: 0.640, loss: 0.185962\nstep: 820, time: 0.642, loss: 0.130808\nstep: 840, time: 0.604, loss: 0.102189\nstep: 860, time: 0.652, loss: 0.094681\nstep: 880, time: 0.649, loss: 0.108581\nstep: 900, time: 0.550, loss: 0.073362\nstep: 920, time: 0.607, loss: 0.091619\nstep: 940, time: 0.606, loss: 0.129636\nstep: 960, time: 0.580, loss: 0.111350\nstep: 980, time: 0.695, loss: 0.125975\nstep: 1000, time: 0.600, loss: 0.139405\nstep: 1020, time: 0.642, loss: 0.110915\nstep: 1040, time: 0.573, loss: 0.112727\nstep: 1060, time: 0.607, loss: 0.097495\nstep: 1080, time: 0.577, loss: 0.152796\nstep: 1100, time: 0.583, loss: 0.125268\nstep: 1120, time: 0.606, loss: 0.081837\nstep: 1140, time: 0.629, loss: 0.049094\nstep: 1160, time: 0.568, loss: 0.076511\nstep: 1180, time: 0.608, loss: 0.129258\nstep: 1200, time: 0.622, loss: 0.083412\nstep: 1220, time: 0.589, loss: 0.154493\nstep: 1240, time: 0.506, loss: 0.181927\nstep: 1260, time: 0.642, loss: 0.153368\nstep: 1280, time: 0.590, loss: 0.106645\nstep: 1300, time: 0.584, loss: 0.086467\nstep: 1320, time: 0.564, loss: 0.055937\nstep: 1340, time: 0.649, loss: 0.118773\nstep: 1360, time: 0.624, loss: 0.153382\nstep: 1380, time: 0.562, loss: 0.137039\nstep: 1400, time: 0.640, loss: 0.133496\nstep: 1420, time: 0.584, loss: 0.128049\nstep: 1440, time: 0.658, loss: 0.108205\nstep: 1460, time: 0.594, loss: 0.104630\nstep: 1480, time: 0.544, loss: 0.077991\nstep: 1500, time: 0.619, loss: 0.176063\nstep: 1520, time: 0.627, loss: 0.068450\nstep: 1540, time: 0.662, loss: 0.084466\nstep: 1560, time: 0.571, loss: 0.038695\nstep: 1580, time: 0.605, loss: 0.159443\nstep: 1600, time: 0.585, loss: 0.076545\nstep: 1620, time: 0.612, loss: 0.111610\nstep: 1640, time: 0.692, loss: 0.100770\nstep: 1660, time: 0.634, loss: 0.068712\nstep: 1680, time: 0.587, loss: 0.122696\nstep: 1700, time: 0.623, loss: 0.141680\nstep: 1720, time: 0.575, loss: 0.062085\nstep: 1740, time: 0.676, loss: 0.088950\nstep: 1760, time: 0.588, loss: 0.121143\nstep: 1780, time: 0.675, loss: 0.104471\nstep: 1800, time: 0.709, loss: 0.114742\nstep: 1820, time: 0.605, loss: 0.058724\nstep: 1840, time: 0.643, loss: 0.239463\nstep: 1860, time: 0.554, loss: 0.124350\nstep: 1880, time: 0.616, loss: 0.126831\nstep: 1900, time: 0.614, loss: 0.216575\nstep: 1920, time: 0.622, loss: 0.123422\nstep: 1940, time: 0.623, loss: 0.127953\nstep: 1960, time: 0.616, loss: 0.131223\nstep: 1980, time: 0.598, loss: 0.112329\nstep: 2000, time: 0.560, loss: 0.098147\nstep: 2020, time: 0.636, loss: 0.169877\nstep: 2040, time: 0.602, loss: 0.093521\nstep: 2060, time: 0.572, loss: 0.112858\nstep: 2080, time: 0.595, loss: 0.074114\nstep: 2100, time: 0.607, loss: 0.072686\nstep: 2120, time: 0.620, loss: 0.136345\nstep: 2140, time: 0.579, loss: 0.131077\nstep: 2160, time: 0.581, loss: 0.110422\nstep: 2180, time: 0.643, loss: 0.097661\nstep: 2200, time: 0.714, loss: 0.092271\nstep: 2220, time: 0.572, loss: 0.088867\nstep: 2240, time: 0.601, loss: 0.062187\nstep: 2260, time: 0.564, loss: 0.094269\nstep: 2280, time: 0.593, loss: 0.261275\nstep: 2300, time: 0.599, loss: 0.176647\nstep: 2320, time: 0.568, loss: 0.064690\nstep: 2340, time: 0.595, loss: 0.068739\nstep: 2360, time: 0.657, loss: 0.149358\nstep: 2380, time: 0.564, loss: 0.090374\nstep: 2400, time: 0.608, loss: 0.075711\nstep: 2420, time: 0.572, loss: 0.147777\nstep: 2440, time: 0.633, loss: 0.084916\nstep: 2460, time: 0.609, loss: 0.104231\nstep: 2480, time: 0.575, loss: 0.092762\nstep: 2500, time: 0.593, loss: 0.077524\nstep: 2520, time: 0.663, loss: 0.281557\nstep: 2540, time: 0.615, loss: 0.099907\nstep: 2560, time: 0.564, loss: 0.078398\nstep: 2580, time: 0.672, loss: 0.118369\nstep: 2600, time: 0.609, loss: 0.138528\nstep: 2620, time: 0.581, loss: 0.059313\nstep: 2640, time: 0.630, loss: 0.107580\nstep: 2660, time: 0.599, loss: 0.099931\nstep: 2680, time: 0.619, loss: 0.143024\nstep: 2700, time: 0.672, loss: 0.093026\nstep: 2720, time: 0.561, loss: 0.075680\nstep: 2740, time: 0.715, loss: 0.117356\nstep: 2760, time: 0.589, loss: 0.108879\nstep: 2780, time: 0.636, loss: 0.120011\nstep: 2800, time: 0.550, loss: 0.091910\nstep: 2820, time: 0.571, loss: 0.070641\nstep: 2840, time: 0.617, loss: 0.093904\nstep: 2860, time: 0.590, loss: 0.112236\nstep: 2880, time: 0.651, loss: 0.096632\nstep: 2900, time: 0.618, loss: 0.076449\nstep: 2920, time: 0.566, loss: 0.160319\nstep: 2940, time: 0.549, loss: 0.139218\nstep: 2960, time: 0.583, loss: 0.088811\nstep: 2980, time: 0.608, loss: 0.110841\nstep: 3000, time: 0.584, loss: 0.087213\nstep: 3020, time: 0.557, loss: 0.072797\nstep: 3040, time: 0.606, loss: 0.101539\nstep: 3060, time: 0.615, loss: 0.126335\nstep: 3080, time: 0.588, loss: 0.146767\nstep: 3100, time: 0.592, loss: 0.074324\nstep: 3120, time: 0.588, loss: 0.124024\nstep: 3140, time: 0.567, loss: 0.088082\nstep: 3160, time: 0.572, loss: 0.102814\nstep: 3180, time: 0.562, loss: 0.119214\nstep: 3200, time: 0.650, loss: 0.128492\nstep: 3220, time: 0.545, loss: 0.095789\nstep: 3240, time: 0.688, loss: 0.154693\nstep: 3260, time: 0.597, loss: 0.100397\nstep: 3280, time: 0.621, loss: 0.079750\nstep: 3300, time: 0.684, loss: 0.119331\nstep: 3320, time: 0.611, loss: 0.137230\nstep: 3340, time: 0.603, loss: 0.069948\nstep: 3360, time: 0.576, loss: 0.062897\nstep: 3380, time: 0.655, loss: 0.126799\nstep: 3400, time: 0.640, loss: 0.089570\nstep: 3420, time: 0.634, loss: 0.206792\nstep: 3440, time: 0.600, loss: 0.117028\nstep: 3460, time: 0.611, loss: 0.083708\nstep: 3480, time: 0.543, loss: 0.158219\nstep: 3500, time: 0.493, loss: 0.124209\nstep: 3520, time: 0.676, loss: 0.173244\nstep: 3540, time: 0.572, loss: 0.078655\nstep: 3560, time: 0.693, loss: 0.181347\nstep: 3580, time: 0.577, loss: 0.083367\nstep: 3600, time: 0.608, loss: 0.106915\nstep: 3620, time: 0.622, loss: 0.068995\nstep: 3640, time: 0.513, loss: 0.087397\nstep: 3660, time: 0.617, loss: 0.167466\nstep: 3680, time: 0.606, loss: 0.087206\nstep: 3700, time: 0.636, loss: 0.110646\nstep: 3720, time: 0.609, loss: 0.108699\nstep: 3740, time: 0.552, loss: 0.075476\nstep: 3760, time: 0.595, loss: 0.128715\nstep: 3780, time: 0.589, loss: 0.092004\nstep: 3800, time: 0.623, loss: 0.094843\nstep: 3820, time: 0.607, loss: 0.106452\nstep: 3840, time: 0.569, loss: 0.105161\nstep: 3860, time: 0.607, loss: 0.128874\nstep: 3880, time: 0.579, loss: 0.090016\nstep: 3900, time: 0.591, loss: 0.093776\nstep: 3920, time: 0.534, loss: 0.081761\nstep: 3940, time: 0.662, loss: 0.075957\nstep: 3960, time: 0.527, loss: 0.069734\nstep: 3980, time: 0.579, loss: 0.092794\nstep: 4000, time: 0.586, loss: 0.104531\nstep: 4020, time: 0.557, loss: 0.076564\nstep: 4040, time: 0.617, loss: 0.083693\nstep: 4060, time: 0.637, loss: 0.123799\nstep: 4080, time: 0.559, loss: 0.112192\nstep: 4100, time: 0.640, loss: 0.067340\nstep: 4120, time: 0.631, loss: 0.097202\nstep: 4140, time: 0.711, loss: 0.123849\nstep: 4160, time: 0.621, loss: 0.231747\nstep: 4180, time: 0.670, loss: 0.079210\nstep: 4200, time: 0.656, loss: 0.069305\nstep: 4220, time: 0.598, loss: 0.118327\nstep: 4240, time: 0.603, loss: 0.124271\nstep: 4260, time: 0.608, loss: 0.080415\nstep: 4280, time: 0.609, loss: 0.065394\nstep: 4300, time: 0.570, loss: 0.062727\nstep: 4320, time: 0.665, loss: 0.068947\nstep: 4340, time: 0.640, loss: 0.137200\nstep: 4360, time: 0.645, loss: 0.060076\nstep: 4380, time: 0.565, loss: 0.096535\nstep: 4400, time: 0.627, loss: 0.139462\nstep: 4420, time: 0.606, loss: 0.069732\nstep: 4440, time: 0.564, loss: 0.092605\nstep: 4460, time: 0.604, loss: 0.090389\nstep: 4480, time: 0.618, loss: 0.170773\nstep: 4500, time: 0.576, loss: 0.075337\nstep: 4520, time: 0.625, loss: 0.157822\nstep: 4540, time: 0.634, loss: 0.110895\nstep: 4560, time: 0.645, loss: 0.177472\nstep: 4580, time: 0.564, loss: 0.073394\nstep: 4600, time: 0.595, loss: 0.087848\nstep: 4620, time: 0.614, loss: 0.113988\nstep: 4640, time: 0.590, loss: 0.109485\nstep: 4660, time: 0.575, loss: 0.121046\nstep: 4680, time: 0.672, loss: 0.091678\nstep: 4700, time: 0.623, loss: 0.120950\nstep: 4720, time: 0.599, loss: 0.115212\nstep: 4740, time: 0.594, loss: 0.092843\nstep: 4760, time: 0.611, loss: 0.114516\nstep: 4780, time: 0.587, loss: 0.075708\nstep: 4800, time: 0.594, loss: 0.100598\nstep: 4820, time: 0.623, loss: 0.123149\nstep: 4840, time: 0.631, loss: 0.100982\nstep: 4860, time: 0.621, loss: 0.062567\nstep: 4880, time: 0.618, loss: 0.091983\nstep: 4900, time: 0.611, loss: 0.155478\nstep: 4920, time: 0.603, loss: 0.143546\nstep: 4940, time: 0.617, loss: 0.077756\nstep: 4960, time: 0.699, loss: 0.091094\nstep: 4980, time: 0.604, loss: 0.083642\nstep: 5000, time: 0.612, loss: 0.110401\nstep: 5020, time: 0.564, loss: 0.136788\nstep: 5040, time: 0.603, loss: 0.085355\nstep: 5060, time: 0.597, loss: 0.105273\nstep: 5080, time: 0.653, loss: 0.124732\nstep: 5100, time: 0.564, loss: 0.096806\nstep: 5120, time: 0.577, loss: 0.048575\nstep: 5140, time: 0.577, loss: 0.061327\nstep: 5160, time: 0.599, loss: 0.144667\nstep: 5180, time: 0.644, loss: 0.149798\nstep: 5200, time: 0.580, loss: 0.088900\nstep: 5220, time: 0.641, loss: 0.066809\nstep: 5240, time: 0.584, loss: 0.057015\nstep: 5260, time: 0.621, loss: 0.094206\nstep: 5280, time: 0.595, loss: 0.086662\nstep: 5300, time: 0.569, loss: 0.061961\nstep: 5320, time: 0.596, loss: 0.097542\nstep: 5340, time: 0.563, loss: 0.094400\nstep: 5360, time: 0.658, loss: 0.093974\nstep: 5380, time: 0.656, loss: 0.131508\nstep: 5400, time: 0.606, loss: 0.104483\nstep: 5420, time: 0.604, loss: 0.089551\nstep: 5440, time: 0.548, loss: 0.092826\nstep: 5460, time: 0.641, loss: 0.090420\nstep: 5480, time: 0.550, loss: 0.068213\nstep: 5500, time: 0.572, loss: 0.074776\nstep: 5520, time: 0.625, loss: 0.102403\nstep: 5540, time: 0.542, loss: 0.087980\nstep: 5560, time: 0.649, loss: 0.118660\nstep: 5580, time: 0.569, loss: 0.108490\nstep: 5600, time: 0.611, loss: 0.085490\nstep: 5620, time: 0.596, loss: 0.198017\nstep: 5640, time: 0.566, loss: 0.109385\nstep: 5660, time: 0.649, loss: 0.182793\nstep: 5680, time: 0.703, loss: 0.161536\nstep: 5700, time: 0.596, loss: 0.062058\nstep: 5720, time: 0.662, loss: 0.113313\nstep: 5740, time: 0.560, loss: 0.075641\nstep: 5760, time: 0.577, loss: 0.087457\nstep: 5780, time: 0.620, loss: 0.108664\nstep: 5800, time: 0.642, loss: 0.107574\nstep: 5820, time: 0.614, loss: 0.072963\nstep: 5840, time: 0.672, loss: 0.102756\nstep: 5860, time: 0.606, loss: 0.097710\nstep: 5880, time: 0.626, loss: 0.071795\nstep: 5900, time: 0.618, loss: 0.081987\nstep: 5920, time: 0.580, loss: 0.128279\nstep: 5940, time: 0.596, loss: 0.117876\nstep: 5960, time: 0.614, loss: 0.075307\nstep: 5980, time: 0.594, loss: 0.079513\nstep: 6000, time: 0.633, loss: 0.114692\nstep: 6020, time: 0.568, loss: 0.133627\nstep: 6040, time: 0.638, loss: 0.147060\nstep: 6060, time: 0.584, loss: 0.118504\nstep: 6080, time: 0.554, loss: 0.129379\nstep: 6100, time: 0.635, loss: 0.094543\nstep: 6120, time: 0.551, loss: 0.104092\nstep: 6140, time: 0.619, loss: 0.115607\nstep: 6160, time: 0.593, loss: 0.101980\nstep: 6180, time: 0.613, loss: 0.114507\nstep: 6200, time: 0.616, loss: 0.081464\nstep: 6220, time: 0.662, loss: 0.084683\nstep: 6240, time: 0.648, loss: 0.106942\nstep: 6260, time: 0.556, loss: 0.092090\nstep: 6280, time: 0.622, loss: 0.127494\nstep: 6300, time: 0.584, loss: 0.070667\nstep: 6320, time: 0.584, loss: 0.260092\nstep: 6340, time: 0.647, loss: 0.043139\nstep: 6360, time: 0.567, loss: 0.120886\nstep: 6380, time: 0.619, loss: 0.110880\nstep: 6400, time: 0.614, loss: 0.096528\nstep: 6420, time: 0.587, loss: 0.123296\nstep: 6440, time: 0.611, loss: 0.129494\nstep: 6460, time: 0.596, loss: 0.151033\nstep: 6480, time: 0.673, loss: 0.066910\nstep: 6500, time: 0.633, loss: 0.102952\nstep: 6520, time: 0.596, loss: 0.092716\nstep: 6540, time: 0.559, loss: 0.127128\nstep: 6560, time: 0.668, loss: 0.100888\nstep: 6580, time: 0.639, loss: 0.102995\nstep: 6600, time: 0.629, loss: 0.057951\nstep: 6620, time: 0.573, loss: 0.124062\nstep: 6640, time: 0.588, loss: 0.097608\nstep: 6660, time: 0.612, loss: 0.075660\nstep: 6680, time: 0.591, loss: 0.058744\nstep: 6700, time: 0.597, loss: 0.055923\nstep: 6720, time: 0.595, loss: 0.104667\nstep: 6740, time: 0.630, loss: 0.122465\nstep: 6760, time: 0.654, loss: 0.043603\nstep: 6780, time: 0.621, loss: 0.156751\nstep: 6800, time: 0.621, loss: 0.094174\nstep: 6820, time: 0.620, loss: 0.135303\nstep: 6840, time: 0.565, loss: 0.069793\nstep: 6860, time: 0.540, loss: 0.099020\nstep: 6880, time: 0.619, loss: 0.111202\nstep: 6900, time: 0.611, loss: 0.104131\nstep: 6920, time: 0.583, loss: 0.105322\nstep: 6940, time: 0.580, loss: 0.092128\nstep: 6960, time: 0.629, loss: 0.069518\nstep: 6980, time: 0.715, loss: 0.084025\nstep: 7000, time: 0.580, loss: 0.105253\nstep: 7020, time: 0.661, loss: 0.116954\nstep: 7040, time: 0.554, loss: 0.106383\nstep: 7060, time: 0.588, loss: 0.097520\nstep: 7080, time: 0.685, loss: 0.083189\nstep: 7100, time: 0.598, loss: 0.089106\nstep: 7120, time: 0.596, loss: 0.083096\nstep: 7140, time: 0.618, loss: 0.134679\nstep: 7160, time: 0.592, loss: 0.094521\nstep: 7180, time: 0.546, loss: 0.086386\nstep: 7200, time: 0.665, loss: 0.116486\nstep: 7220, time: 0.605, loss: 0.096163\nstep: 7240, time: 0.582, loss: 0.114760\nstep: 7260, time: 0.564, loss: 0.071134\nstep: 7280, time: 0.640, loss: 0.139615\nstep: 7300, time: 0.585, loss: 0.153715\nstep: 7320, time: 0.567, loss: 0.044229\nstep: 7340, time: 0.656, loss: 0.105044\nstep: 7360, time: 0.576, loss: 0.063087\nstep: 7380, time: 0.601, loss: 0.072552\nstep: 7400, time: 0.599, loss: 0.073876\nstep: 7420, time: 0.605, loss: 0.076561\nstep: 7440, time: 0.630, loss: 0.102483\nstep: 7460, time: 0.626, loss: 0.112078\nstep: 7480, time: 0.649, loss: 0.196947\nstep: 7500, time: 0.589, loss: 0.070223\nstep: 7520, time: 0.579, loss: 0.072421\nstep: 7540, time: 0.606, loss: 0.094888\nstep: 7560, time: 0.578, loss: 0.093170\nstep: 7580, time: 0.567, loss: 0.066248\nstep: 7600, time: 0.604, loss: 0.068106\nstep: 7620, time: 0.523, loss: 0.099497\nstep: 7640, time: 0.548, loss: 0.054540\nstep: 7660, time: 0.597, loss: 0.096404\nstep: 7680, time: 0.607, loss: 0.123034\nstep: 7700, time: 0.626, loss: 0.117279\nstep: 7720, time: 0.544, loss: 0.144366\nstep: 7740, time: 0.589, loss: 0.092210\nstep: 7760, time: 0.585, loss: 0.075840\nstep: 7780, time: 0.550, loss: 0.057181\nstep: 7800, time: 0.649, loss: 0.124625\nstep: 7820, time: 0.563, loss: 0.089493\nstep: 7840, time: 0.593, loss: 0.106134\nstep: 7860, time: 0.648, loss: 0.086656\nstep: 7880, time: 0.559, loss: 0.105599\nstep: 7900, time: 0.575, loss: 0.076056\nstep: 7920, time: 0.568, loss: 0.100345\nstep: 7940, time: 0.586, loss: 0.056899\nstep: 7960, time: 0.596, loss: 0.105844\nstep: 7980, time: 0.639, loss: 0.093064\nstep: 8000, time: 0.597, loss: 0.113182\nstep: 8020, time: 0.530, loss: 0.078337\nstep: 8040, time: 0.577, loss: 0.113290\nstep: 8060, time: 0.596, loss: 0.110689\nstep: 8080, time: 0.599, loss: 0.141340\nstep: 8100, time: 0.626, loss: 0.118379\nstep: 8120, time: 0.569, loss: 0.046713\nstep: 8140, time: 0.618, loss: 0.094017\nstep: 8160, time: 0.645, loss: 0.146153\nstep: 8180, time: 0.609, loss: 0.103852\nstep: 8200, time: 0.599, loss: 0.047330\nstep: 8220, time: 0.611, loss: 0.082437\nstep: 8240, time: 0.616, loss: 0.130830\nstep: 8260, time: 0.600, loss: 0.062071\nstep: 8280, time: 0.554, loss: 0.051205\nstep: 8300, time: 0.645, loss: 0.094077\nstep: 8320, time: 0.557, loss: 0.106291\nstep: 8340, time: 0.575, loss: 0.110671\nstep: 8360, time: 0.617, loss: 0.139111\nstep: 8380, time: 0.592, loss: 0.067943\nstep: 8400, time: 0.582, loss: 0.080326\nstep: 8420, time: 0.543, loss: 0.077340\nstep: 8440, time: 0.580, loss: 0.046847\nstep: 8460, time: 0.623, loss: 0.135118\nstep: 8480, time: 0.612, loss: 0.087673\nstep: 8500, time: 0.550, loss: 0.057214\nstep: 8520, time: 0.559, loss: 0.062237\nstep: 8540, time: 0.578, loss: 0.070035\nstep: 8560, time: 0.584, loss: 0.098158\nstep: 8580, time: 0.569, loss: 0.080066\nstep: 8600, time: 0.644, loss: 0.144663\nstep: 8620, time: 0.574, loss: 0.079870\nstep: 8640, time: 0.605, loss: 0.067397\nstep: 8660, time: 0.607, loss: 0.079564\nstep: 8680, time: 0.633, loss: 0.066532\nstep: 8700, time: 0.557, loss: 0.068237\nstep: 8720, time: 0.562, loss: 0.063407\nstep: 8740, time: 0.596, loss: 0.075315\nstep: 8760, time: 0.611, loss: 0.115658\nstep: 8780, time: 0.641, loss: 0.119682\nstep: 8800, time: 0.586, loss: 0.070899\nstep: 8820, time: 0.682, loss: 0.160832\nstep: 8840, time: 0.554, loss: 0.090391\nstep: 8860, time: 0.566, loss: 0.095466\nstep: 8880, time: 0.599, loss: 0.108614\nstep: 8900, time: 0.657, loss: 0.141541\nstep: 8920, time: 0.631, loss: 0.097081\nstep: 8940, time: 0.615, loss: 0.087497\nstep: 8960, time: 0.611, loss: 0.096897\nstep: 8980, time: 0.605, loss: 0.098912\nstep: 9000, time: 0.550, loss: 0.093315\nstep: 9020, time: 0.584, loss: 0.115048\nstep: 9040, time: 0.587, loss: 0.093588\nstep: 9060, time: 0.559, loss: 0.060439\nstep: 9080, time: 0.603, loss: 0.073041\nstep: 9100, time: 0.548, loss: 0.084473\nstep: 9120, time: 0.577, loss: 0.048473\nstep: 9140, time: 0.595, loss: 0.203113\nstep: 9160, time: 0.532, loss: 0.071001\nstep: 9180, time: 0.618, loss: 0.078978\nstep: 9200, time: 0.593, loss: 0.112904\nstep: 9220, time: 0.630, loss: 0.187814\nstep: 9240, time: 0.548, loss: 0.059383\nstep: 9260, time: 0.600, loss: 0.110708\nstep: 9280, time: 0.631, loss: 0.073317\nstep: 9300, time: 0.577, loss: 0.122373\nstep: 9320, time: 0.589, loss: 0.120727\nstep: 9340, time: 0.609, loss: 0.093742\nstep: 9360, time: 0.588, loss: 0.076138\nstep: 9380, time: 0.683, loss: 0.127321\nstep: 9400, time: 0.608, loss: 0.104904\nstep: 9420, time: 0.518, loss: 0.059040\nstep: 9440, time: 0.625, loss: 0.128978\nstep: 9460, time: 0.605, loss: 0.118348\nstep: 9480, time: 0.600, loss: 0.057189\nstep: 9500, time: 0.611, loss: 0.099493\nstep: 9520, time: 0.621, loss: 0.110090\nstep: 9540, time: 0.607, loss: 0.141030\nstep: 9560, time: 0.673, loss: 0.090821\nstep: 9580, time: 0.635, loss: 0.121612\nstep: 9600, time: 0.588, loss: 0.090825\nstep: 9620, time: 0.635, loss: 0.080478\nstep: 9640, time: 0.565, loss: 0.033445\nstep: 9660, time: 0.596, loss: 0.150621\nstep: 9680, time: 0.555, loss: 0.137022\nstep: 9700, time: 0.569, loss: 0.096497\nstep: 9720, time: 0.579, loss: 0.086134\nstep: 9740, time: 0.572, loss: 0.043564\nstep: 9760, time: 0.559, loss: 0.080444\nstep: 9780, time: 0.617, loss: 0.145569\nstep: 9800, time: 0.600, loss: 0.077059\nstep: 9820, time: 0.652, loss: 0.105195\nstep: 9840, time: 0.595, loss: 0.068541\nstep: 9860, time: 0.524, loss: 0.063875\nstep: 9880, time: 0.579, loss: 0.097691\nstep: 9900, time: 0.641, loss: 0.117452\nstep: 9920, time: 0.566, loss: 0.070628\nstep: 9940, time: 0.536, loss: 0.071096\nstep: 9960, time: 0.588, loss: 0.094206\nstep: 9980, time: 0.630, loss: 0.103229\nstep: 10000, time: 0.588, loss: 0.074630\nstep: 10020, time: 0.535, loss: 0.089370\nstep: 10040, time: 0.582, loss: 0.129188\nstep: 10060, time: 0.588, loss: 0.041457\nstep: 10080, time: 0.596, loss: 0.086282\nstep: 10100, time: 0.551, loss: 0.066962\nstep: 10120, time: 0.619, loss: 0.122424\nstep: 10140, time: 0.594, loss: 0.053947\nstep: 10160, time: 0.594, loss: 0.071305\nstep: 10180, time: 0.597, loss: 0.086735\nstep: 10200, time: 0.607, loss: 0.106270\nstep: 10220, time: 0.580, loss: 0.076010\nstep: 10240, time: 0.617, loss: 0.058532\nstep: 10260, time: 0.575, loss: 0.085000\nstep: 10280, time: 0.594, loss: 0.076551\nstep: 10300, time: 0.629, loss: 0.122044\nstep: 10320, time: 0.612, loss: 0.106880\nstep: 10340, time: 0.594, loss: 0.130323\nstep: 10360, time: 0.597, loss: 0.154000\nstep: 10380, time: 0.622, loss: 0.071711\nstep: 10400, time: 0.554, loss: 0.084535\nstep: 10420, time: 0.576, loss: 0.087363\nstep: 10440, time: 0.612, loss: 0.093138\nstep: 10460, time: 0.610, loss: 0.064324\nstep: 10480, time: 0.589, loss: 0.061873\nstep: 10500, time: 0.564, loss: 0.110942\nstep: 10520, time: 0.572, loss: 0.050025\nstep: 10540, time: 0.604, loss: 0.051525\nstep: 10560, time: 0.601, loss: 0.076305\nstep: 10580, time: 0.593, loss: 0.066960\nstep: 10600, time: 0.624, loss: 0.076521\nstep: 10620, time: 0.612, loss: 0.094082\nstep: 10640, time: 0.598, loss: 0.054849\nstep: 10660, time: 0.505, loss: 0.079380\nstep: 10680, time: 0.707, loss: 0.084510\nstep: 10700, time: 0.552, loss: 0.085732\nstep: 10720, time: 0.587, loss: 0.107476\nstep: 10740, time: 0.605, loss: 0.130173\nstep: 10760, time: 0.567, loss: 0.062359\nstep: 10780, time: 0.572, loss: 0.054751\nstep: 10800, time: 0.562, loss: 0.112471\nstep: 10820, time: 0.558, loss: 0.125083\nstep: 10840, time: 0.600, loss: 0.085349\nstep: 10860, time: 0.631, loss: 0.122488\nstep: 10880, time: 0.640, loss: 0.081272\nstep: 10900, time: 0.607, loss: 0.053585\nstep: 10920, time: 0.616, loss: 0.083675\nstep: 10940, time: 0.573, loss: 0.168788\nstep: 10960, time: 0.624, loss: 0.068696\nstep: 10980, time: 0.680, loss: 0.116835\nstep: 11000, time: 0.579, loss: 0.138941\nstep: 11020, time: 0.619, loss: 0.121079\nstep: 11040, time: 0.633, loss: 0.085167\nstep: 11060, time: 0.586, loss: 0.113649\nstep: 11080, time: 0.578, loss: 0.067576\nstep: 11100, time: 0.551, loss: 0.047255\nstep: 11120, time: 0.603, loss: 0.047487\nstep: 11140, time: 0.707, loss: 0.118235\nstep: 11160, time: 0.648, loss: 0.102524\nstep: 11180, time: 0.567, loss: 0.066837\nstep: 11200, time: 0.624, loss: 0.063947\nstep: 11220, time: 0.618, loss: 0.135324\nstep: 11240, time: 0.555, loss: 0.138968\nstep: 11260, time: 0.535, loss: 0.088398\nstep: 11280, time: 0.599, loss: 0.067696\nstep: 11300, time: 0.622, loss: 0.064046\nstep: 11320, time: 0.603, loss: 0.093133\nstep: 11340, time: 0.570, loss: 0.068633\nstep: 11360, time: 0.572, loss: 0.059673\nstep: 11380, time: 0.641, loss: 0.079358\nstep: 11400, time: 0.588, loss: 0.046740\nstep: 11420, time: 0.602, loss: 0.159024\nstep: 11440, time: 0.621, loss: 0.153081\nstep: 11460, time: 0.610, loss: 0.053772\nstep: 11480, time: 0.562, loss: 0.045636\nstep: 11500, time: 0.587, loss: 0.078017\nstep: 11520, time: 0.609, loss: 0.069529\nstep: 11540, time: 0.648, loss: 0.092669\nstep: 11560, time: 0.571, loss: 0.124337\nstep: 11580, time: 0.577, loss: 0.055296\nstep: 11600, time: 0.527, loss: 0.085130\nstep: 11620, time: 0.570, loss: 0.099501\nstep: 11640, time: 0.617, loss: 0.100745\nstep: 11660, time: 0.614, loss: 0.058311\nstep: 11680, time: 0.590, loss: 0.080087\nstep: 11700, time: 0.607, loss: 0.105042\nstep: 11720, time: 0.592, loss: 0.096959\nstep: 11740, time: 0.594, loss: 0.109940\nstep: 11760, time: 0.563, loss: 0.073440\nstep: 11780, time: 0.583, loss: 0.130292\nstep: 11800, time: 0.558, loss: 0.052438\nstep: 11820, time: 0.597, loss: 0.088674\nstep: 11840, time: 0.585, loss: 0.094544\nstep: 11860, time: 0.625, loss: 0.052052\nstep: 11880, time: 0.545, loss: 0.096596\nstep: 11900, time: 0.678, loss: 0.105181\nstep: 11920, time: 0.629, loss: 0.065991\nstep: 11940, time: 0.566, loss: 0.039325\nstep: 11960, time: 0.620, loss: 0.070109\nstep: 11980, time: 0.573, loss: 0.097004\nstep: 12000, time: 0.558, loss: 0.096555\nstep: 12020, time: 0.608, loss: 0.111445\nstep: 12040, time: 0.617, loss: 0.074792\nstep: 12060, time: 0.552, loss: 0.070995\nstep: 12080, time: 0.570, loss: 0.044260\nstep: 12100, time: 0.578, loss: 0.112817\nstep: 12120, time: 0.533, loss: 0.110365\nstep: 12140, time: 0.585, loss: 0.074077\nstep: 12160, time: 0.682, loss: 0.127932\nstep: 12180, time: 0.613, loss: 0.094730\nstep: 12200, time: 0.564, loss: 0.072661\nstep: 12220, time: 0.534, loss: 0.082728\nstep: 12240, time: 0.606, loss: 0.060579\nstep: 12260, time: 0.563, loss: 0.098011\nstep: 12280, time: 0.508, loss: 0.080259\nstep: 12300, time: 0.629, loss: 0.076722\nstep: 12320, time: 0.645, loss: 0.060327\nstep: 12340, time: 0.590, loss: 0.076652\nstep: 12360, time: 0.621, loss: 0.122149\nstep: 12380, time: 0.523, loss: 0.094821\nstep: 12400, time: 0.622, loss: 0.068995\nstep: 12420, time: 0.629, loss: 0.134373\nstep: 12440, time: 0.588, loss: 0.054732\nstep: 12460, time: 0.556, loss: 0.092896\nstep: 12480, time: 0.643, loss: 0.122852\nstep: 12500, time: 0.572, loss: 0.081214\nstep: 12520, time: 0.554, loss: 0.087393\nstep: 12540, time: 0.649, loss: 0.076224\nstep: 12560, time: 0.531, loss: 0.078989\nstep: 12580, time: 0.593, loss: 0.124657\nstep: 12600, time: 0.584, loss: 0.119464\nstep: 12620, time: 0.568, loss: 0.081639\nstep: 12640, time: 0.571, loss: 0.058219\nstep: 12660, time: 0.595, loss: 0.074567\nstep: 12680, time: 0.588, loss: 0.103845\nstep: 12700, time: 0.590, loss: 0.109134\nstep: 12720, time: 0.636, loss: 0.091947\nstep: 12740, time: 0.614, loss: 0.071882\nstep: 12760, time: 0.562, loss: 0.059551\nstep: 12780, time: 0.642, loss: 0.125527\nstep: 12800, time: 0.639, loss: 0.087239\nstep: 12820, time: 0.587, loss: 0.079920\nstep: 12840, time: 0.571, loss: 0.064944\nstep: 12860, time: 0.646, loss: 0.301683\nstep: 12880, time: 0.592, loss: 0.056181\nstep: 12900, time: 0.561, loss: 0.070082\nstep: 12920, time: 0.537, loss: 0.045853\nstep: 12940, time: 0.600, loss: 0.074641\nstep: 12960, time: 0.568, loss: 0.072700\nstep: 12980, time: 0.572, loss: 0.085318\nstep: 13000, time: 0.519, loss: 0.065540\nstep: 13020, time: 0.542, loss: 0.090653\nstep: 13040, time: 0.579, loss: 0.075599\nstep: 13060, time: 0.617, loss: 0.090125\nstep: 13080, time: 0.562, loss: 0.147726\nstep: 13100, time: 0.595, loss: 0.081867\nstep: 13120, time: 0.678, loss: 0.146173\nstep: 13140, time: 0.650, loss: 0.073421\nstep: 13160, time: 0.604, loss: 0.105205\nstep: 13180, time: 0.621, loss: 0.080602\nstep: 13200, time: 0.656, loss: 0.097620\nstep: 13220, time: 0.578, loss: 0.083541\nstep: 13240, time: 0.631, loss: 0.096677\nstep: 13260, time: 0.607, loss: 0.120011\nstep: 13280, time: 0.601, loss: 0.062295\nstep: 13300, time: 0.552, loss: 0.073227\nstep: 13320, time: 0.644, loss: 0.114902\nstep: 13340, time: 0.578, loss: 0.139477\nstep: 13360, time: 0.539, loss: 0.061147\nstep: 13380, time: 0.578, loss: 0.084772\nstep: 13400, time: 0.546, loss: 0.074606\nstep: 13420, time: 0.638, loss: 0.098129\nstep: 13440, time: 0.606, loss: 0.078602\nstep: 13460, time: 0.699, loss: 0.092459\nstep: 13480, time: 0.571, loss: 0.088115\nstep: 13500, time: 0.553, loss: 0.061659\nstep: 13520, time: 0.584, loss: 0.124131\nstep: 13540, time: 0.591, loss: 0.075353\nstep: 13560, time: 0.656, loss: 0.100751\nstep: 13580, time: 0.579, loss: 0.041603\nstep: 13600, time: 0.646, loss: 0.108821\nstep: 13620, time: 0.623, loss: 0.111376\nstep: 13640, time: 0.537, loss: 0.097061\nstep: 13660, time: 0.613, loss: 0.104478\nstep: 13680, time: 0.609, loss: 0.105952\nstep: 13700, time: 0.602, loss: 0.068551\nstep: 13720, time: 0.565, loss: 0.062857\nstep: 13740, time: 0.585, loss: 0.089643\nstep: 13760, time: 0.574, loss: 0.109471\nstep: 13780, time: 0.656, loss: 0.100219\nstep: 13800, time: 0.535, loss: 0.094374\nstep: 13820, time: 0.629, loss: 0.064106\nstep: 13840, time: 0.548, loss: 0.077693\nstep: 13860, time: 0.612, loss: 0.115179\nstep: 13880, time: 0.579, loss: 0.069608\nstep: 13900, time: 0.561, loss: 0.060801\nstep: 13920, time: 0.604, loss: 0.111769\nstep: 13940, time: 0.617, loss: 0.063988\nstep: 13960, time: 0.619, loss: 0.110482\nstep: 13980, time: 0.562, loss: 0.088223\nstep: 14000, time: 0.600, loss: 0.120326\nstep: 14020, time: 0.583, loss: 0.073327\nstep: 14040, time: 0.687, loss: 0.113107\nstep: 14060, time: 0.536, loss: 0.059650\nstep: 14080, time: 0.590, loss: 0.118854\nstep: 14100, time: 0.623, loss: 0.078474\nstep: 14120, time: 0.565, loss: 0.050918\nstep: 14140, time: 0.652, loss: 0.080335\nstep: 14160, time: 0.662, loss: 0.097436\nstep: 14180, time: 0.640, loss: 0.122751\nstep: 14200, time: 0.624, loss: 0.071578\nstep: 14220, time: 0.452, loss: 0.071860\nstep: 14240, time: 0.550, loss: 0.070118\nstep: 14260, time: 0.606, loss: 0.075814\nstep: 14280, time: 0.591, loss: 0.112603\nstep: 14300, time: 0.572, loss: 0.233158\nstep: 14320, time: 0.581, loss: 0.067179\nstep: 14340, time: 0.552, loss: 0.074944\nstep: 14360, time: 0.514, loss: 0.081799\nstep: 14380, time: 0.622, loss: 0.107719\nstep: 14400, time: 0.687, loss: 0.091418\nstep: 14420, time: 0.720, loss: 0.107818\nstep: 14440, time: 0.630, loss: 0.103100\nstep: 14460, time: 0.598, loss: 0.065247\nstep: 14480, time: 0.550, loss: 0.061461\nstep: 14500, time: 0.627, loss: 0.140614\nstep: 14520, time: 0.618, loss: 0.069752\nstep: 14540, time: 0.579, loss: 0.116397\nstep: 14560, time: 0.584, loss: 0.070214\nstep: 14580, time: 0.629, loss: 0.064414\nstep: 14600, time: 0.591, loss: 0.109499\nstep: 14620, time: 0.580, loss: 0.076469\nstep: 14640, time: 0.574, loss: 0.071018\nstep: 14660, time: 0.607, loss: 0.079824\nstep: 14680, time: 0.578, loss: 0.078585\nstep: 14700, time: 0.533, loss: 0.053515\nstep: 14720, time: 0.548, loss: 0.080104\nstep: 14740, time: 0.607, loss: 0.075069\nstep: 14760, time: 0.593, loss: 0.077433\nstep: 14780, time: 0.599, loss: 0.090426\nstep: 14800, time: 0.647, loss: 0.105957\nstep: 14820, time: 0.554, loss: 0.072182\nstep: 14840, time: 0.609, loss: 0.100647\nstep: 14860, time: 0.613, loss: 0.087363\nstep: 14880, time: 0.630, loss: 0.118873\nstep: 14900, time: 0.604, loss: 0.085044\nstep: 14920, time: 0.580, loss: 0.077815\nstep: 14940, time: 0.624, loss: 0.067642\nstep: 14960, time: 0.614, loss: 0.101921\nstep: 14980, time: 0.616, loss: 0.125359\nstep: 15000, time: 0.564, loss: 0.106761\nstep: 15020, time: 0.623, loss: 0.084984\nstep: 15040, time: 0.673, loss: 0.110846\nstep: 15060, time: 0.652, loss: 0.143051\nstep: 15080, time: 0.613, loss: 0.125310\nstep: 15100, time: 0.579, loss: 0.099315\nstep: 15120, time: 0.553, loss: 0.062296\nstep: 15140, time: 0.683, loss: 0.107496\nstep: 15160, time: 0.580, loss: 0.076019\nstep: 15180, time: 0.584, loss: 0.058944\nstep: 15200, time: 0.606, loss: 0.119908\nstep: 15220, time: 0.637, loss: 0.097876\nstep: 15240, time: 0.633, loss: 0.124019\nstep: 15260, time: 0.527, loss: 0.074473\nstep: 15280, time: 0.614, loss: 0.079560\nstep: 15300, time: 0.588, loss: 0.128367\nstep: 15320, time: 0.554, loss: 0.045606\nstep: 15340, time: 0.588, loss: 0.081305\nstep: 15360, time: 0.596, loss: 0.100404\nstep: 15380, time: 0.551, loss: 0.064244\nstep: 15400, time: 0.569, loss: 0.057295\nstep: 15420, time: 0.660, loss: 0.063597\nstep: 15440, time: 0.576, loss: 0.057078\nstep: 15460, time: 0.566, loss: 0.075589\nstep: 15480, time: 0.627, loss: 0.074247\nstep: 15500, time: 0.596, loss: 0.044194\nstep: 15520, time: 0.589, loss: 0.092747\nstep: 15540, time: 0.558, loss: 0.095222\nstep: 15560, time: 0.612, loss: 0.104819\nstep: 15580, time: 0.581, loss: 0.075490\nstep: 15600, time: 0.545, loss: 0.097628\nstep: 15620, time: 0.702, loss: 0.150312\nstep: 15640, time: 0.554, loss: 0.056298\nstep: 15660, time: 0.621, loss: 0.103522\nstep: 15680, time: 0.609, loss: 0.262294\nstep: 15700, time: 0.563, loss: 0.072328\nstep: 15720, time: 0.611, loss: 0.128354\nstep: 15740, time: 0.620, loss: 0.078599\nstep: 15760, time: 0.588, loss: 0.096806\nstep: 15780, time: 0.597, loss: 0.166852\nstep: 15800, time: 0.608, loss: 0.110438\nstep: 15820, time: 0.605, loss: 0.108896\nstep: 15840, time: 0.568, loss: 0.056648\nstep: 15860, time: 0.627, loss: 0.074790\nstep: 15880, time: 0.612, loss: 0.099274\nstep: 15900, time: 0.673, loss: 0.068885\nstep: 15920, time: 0.624, loss: 0.063186\nstep: 15940, time: 0.614, loss: 0.082731\nstep: 15960, time: 0.530, loss: 0.102761\nstep: 15980, time: 0.609, loss: 0.132212\nstep: 16000, time: 0.604, loss: 0.038611\nstep: 16020, time: 0.612, loss: 0.106796\nstep: 16040, time: 0.581, loss: 0.087356\nstep: 16060, time: 0.582, loss: 0.059014\nstep: 16080, time: 0.580, loss: 0.076529\nstep: 16100, time: 0.642, loss: 0.080341\nstep: 16120, time: 0.587, loss: 0.065566\nstep: 16140, time: 0.628, loss: 0.079094\nstep: 16160, time: 0.604, loss: 0.061266\nstep: 16180, time: 0.592, loss: 0.071877\nstep: 16200, time: 0.606, loss: 0.122584\nstep: 16220, time: 0.557, loss: 0.102627\nstep: 16240, time: 0.632, loss: 0.096801\nstep: 16260, time: 0.617, loss: 0.067315\nstep: 16280, time: 0.627, loss: 0.082445\nstep: 16300, time: 0.640, loss: 0.121341\nstep: 16320, time: 0.639, loss: 0.094843\nstep: 16340, time: 0.637, loss: 0.089539\nstep: 16360, time: 0.543, loss: 0.071653\nstep: 16380, time: 0.594, loss: 0.096179\nstep: 16400, time: 0.565, loss: 0.081656\nstep: 16420, time: 0.606, loss: 0.083234\nstep: 16440, time: 0.625, loss: 0.072528\nstep: 16460, time: 0.597, loss: 0.106766\nstep: 16480, time: 0.610, loss: 0.092412\nstep: 16500, time: 0.623, loss: 0.070261\nstep: 16520, time: 0.613, loss: 0.155166\nstep: 16540, time: 0.562, loss: 0.072679\nstep: 16560, time: 0.590, loss: 0.122983\nstep: 16580, time: 0.576, loss: 0.073782\nstep: 16600, time: 0.629, loss: 0.067122\nstep: 16620, time: 0.587, loss: 0.080837\nstep: 16640, time: 0.613, loss: 0.099545\nstep: 16660, time: 0.546, loss: 0.052053\nstep: 16680, time: 0.589, loss: 0.085861\nstep: 16700, time: 0.556, loss: 0.087967\nstep: 16720, time: 0.543, loss: 0.111542\nstep: 16740, time: 0.643, loss: 0.068417\nstep: 16760, time: 0.566, loss: 0.112367\nstep: 16780, time: 0.554, loss: 0.136428\nstep: 16800, time: 0.597, loss: 0.039483\nstep: 16820, time: 0.600, loss: 0.070096\nstep: 16840, time: 0.565, loss: 0.076294\nstep: 16860, time: 0.568, loss: 0.047382\nstep: 16880, time: 0.587, loss: 0.068178\nstep: 16900, time: 0.566, loss: 0.059107\nstep: 16920, time: 0.612, loss: 0.092516\nstep: 16940, time: 0.571, loss: 0.060882\nstep: 16960, time: 0.628, loss: 0.094115\nstep: 16980, time: 0.566, loss: 0.063202\nstep: 17000, time: 0.619, loss: 0.112568\nstep: 17020, time: 0.636, loss: 0.136606\nstep: 17040, time: 0.575, loss: 0.074515\nstep: 17060, time: 0.582, loss: 0.126808\nstep: 17080, time: 0.595, loss: 0.067961\nstep: 17100, time: 0.632, loss: 0.097229\nstep: 17120, time: 0.619, loss: 0.081037\nstep: 17140, time: 0.585, loss: 0.042698\nstep: 17160, time: 0.596, loss: 0.095514\nstep: 17180, time: 0.571, loss: 0.069243\nstep: 17200, time: 0.587, loss: 0.056538\nstep: 17220, time: 0.565, loss: 0.122375\nstep: 17240, time: 0.587, loss: 0.088658\nstep: 17260, time: 0.528, loss: 0.101123\nstep: 17280, time: 0.558, loss: 0.052541\nstep: 17300, time: 0.587, loss: 0.117216\nstep: 17320, time: 0.545, loss: 0.045768\nstep: 17340, time: 0.576, loss: 0.090242\nstep: 17360, time: 0.614, loss: 0.061771\nstep: 17380, time: 0.582, loss: 0.110963\nstep: 17400, time: 0.671, loss: 0.081876\nstep: 17420, time: 0.589, loss: 0.077150\nstep: 17440, time: 0.628, loss: 0.145071\nstep: 17460, time: 0.585, loss: 0.063312\nstep: 17480, time: 0.579, loss: 0.071826\nstep: 17500, time: 0.655, loss: 0.075659\nstep: 17520, time: 0.618, loss: 0.075213\nstep: 17540, time: 0.695, loss: 0.091582\nstep: 17560, time: 0.565, loss: 0.125385\nstep: 17580, time: 0.589, loss: 0.072466\nstep: 17600, time: 0.587, loss: 0.123690\nstep: 17620, time: 0.575, loss: 0.095976\nstep: 17640, time: 0.583, loss: 0.120686\nstep: 17660, time: 0.553, loss: 0.046498\nstep: 17680, time: 0.595, loss: 0.064157\nstep: 17700, time: 0.597, loss: 0.076651\nstep: 17720, time: 0.593, loss: 0.054791\nstep: 17740, time: 0.667, loss: 0.075528\nstep: 17760, time: 0.591, loss: 0.101433\nstep: 17780, time: 0.132, loss: 0.038395\nstep: 17800, time: 0.759, loss: 0.082588\nstep: 17820, time: 0.568, loss: 0.097675\nstep: 17840, time: 0.617, loss: 0.088461\nstep: 17860, time: 0.505, loss: 0.063409\nstep: 17880, time: 0.582, loss: 0.129548\nstep: 17900, time: 0.668, loss: 0.080707\nstep: 17920, time: 0.571, loss: 0.074347\nstep: 17940, time: 0.557, loss: 0.063779\nstep: 17960, time: 0.598, loss: 0.059273\nstep: 17980, time: 0.593, loss: 0.081647\nstep: 18000, time: 0.529, loss: 0.074554\nstep: 18020, time: 0.574, loss: 0.103666\nstep: 18040, time: 0.593, loss: 0.073562\nstep: 18060, time: 0.675, loss: 0.101420\nstep: 18080, time: 0.544, loss: 0.077836\nstep: 18100, time: 0.584, loss: 0.079679\nstep: 18120, time: 0.589, loss: 0.074056\nstep: 18140, time: 0.580, loss: 0.062818\nstep: 18160, time: 0.549, loss: 0.057219\nstep: 18180, time: 0.524, loss: 0.103844\nstep: 18200, time: 0.573, loss: 0.094279\nstep: 18220, time: 0.600, loss: 0.105577\nstep: 18240, time: 0.585, loss: 0.074203\nstep: 18260, time: 0.660, loss: 0.096480\nstep: 18280, time: 0.641, loss: 0.075699\nstep: 18300, time: 0.581, loss: 0.099491\nstep: 18320, time: 0.619, loss: 0.077663\nstep: 18340, time: 0.593, loss: 0.079014\nstep: 18360, time: 0.635, loss: 0.058193\nstep: 18380, time: 0.622, loss: 0.146374\nstep: 18400, time: 0.624, loss: 0.096526\nstep: 18420, time: 0.661, loss: 0.049278\nstep: 18440, time: 0.625, loss: 0.042761\nstep: 18460, time: 0.600, loss: 0.056911\nstep: 18480, time: 0.590, loss: 0.072421\nstep: 18500, time: 0.547, loss: 0.095577\nstep: 18520, time: 0.690, loss: 0.097249\nstep: 18540, time: 0.571, loss: 0.054093\nstep: 18560, time: 0.570, loss: 0.062025\nstep: 18580, time: 0.611, loss: 0.080342\nstep: 18600, time: 0.576, loss: 0.075556\nstep: 18620, time: 0.629, loss: 0.071811\nstep: 18640, time: 0.673, loss: 0.089282\nstep: 18660, time: 0.512, loss: 0.049337\nstep: 18680, time: 0.576, loss: 0.063217\nstep: 18700, time: 0.609, loss: 0.071810\nstep: 18720, time: 0.707, loss: 0.065365\nstep: 18740, time: 0.536, loss: 0.067083\nstep: 18760, time: 0.581, loss: 0.053550\nstep: 18780, time: 0.610, loss: 0.079314\nstep: 18800, time: 0.581, loss: 0.031874\nstep: 18820, time: 0.643, loss: 0.082226\nstep: 18840, time: 0.598, loss: 0.057160\nstep: 18860, time: 0.555, loss: 0.062229\nstep: 18880, time: 0.599, loss: 0.037838\nstep: 18900, time: 0.587, loss: 0.079191\nstep: 18920, time: 0.573, loss: 0.061340\nstep: 18940, time: 0.623, loss: 0.083025\nstep: 18960, time: 0.643, loss: 0.091324\nstep: 18980, time: 0.617, loss: 0.049786\nstep: 19000, time: 0.595, loss: 0.076352\nstep: 19020, time: 0.578, loss: 0.086282\nstep: 19040, time: 0.551, loss: 0.072948\nstep: 19060, time: 0.591, loss: 0.086968\nstep: 19080, time: 0.612, loss: 0.047077\nstep: 19100, time: 0.530, loss: 0.066370\nstep: 19120, time: 0.615, loss: 0.070509\nstep: 19140, time: 0.591, loss: 0.070345\nstep: 19160, time: 0.564, loss: 0.096838\nstep: 19180, time: 0.626, loss: 0.101397\nstep: 19200, time: 0.559, loss: 0.079430\nstep: 19220, time: 0.581, loss: 0.058783\nstep: 19240, time: 0.668, loss: 0.079508\nstep: 19260, time: 0.589, loss: 0.090405\nstep: 19280, time: 0.595, loss: 0.110587\nstep: 19300, time: 0.560, loss: 0.120378\nstep: 19320, time: 0.634, loss: 0.094847\nstep: 19340, time: 0.604, loss: 0.120800\nstep: 19360, time: 0.592, loss: 0.098634\nstep: 19380, time: 0.579, loss: 0.051502\nstep: 19400, time: 0.549, loss: 0.068987\nstep: 19420, time: 0.561, loss: 0.137082\nstep: 19440, time: 0.566, loss: 0.058781\nstep: 19460, time: 0.625, loss: 0.062680\nstep: 19480, time: 0.573, loss: 0.079118\nstep: 19500, time: 0.573, loss: 0.071893\nstep: 19520, time: 0.585, loss: 0.081423\nstep: 19540, time: 0.518, loss: 0.059022\nstep: 19560, time: 0.567, loss: 0.071784\nstep: 19580, time: 0.553, loss: 0.054432\nstep: 19600, time: 0.655, loss: 0.098097\nstep: 19620, time: 0.607, loss: 0.057678\nstep: 19640, time: 0.588, loss: 0.074328\nstep: 19660, time: 0.587, loss: 0.065836\nstep: 19680, time: 0.565, loss: 0.076181\nstep: 19700, time: 0.584, loss: 0.044802\nstep: 19720, time: 0.543, loss: 0.079371\nstep: 19740, time: 0.600, loss: 0.078153\nstep: 19760, time: 0.586, loss: 0.055297\nstep: 19780, time: 0.567, loss: 0.093509\nstep: 19800, time: 0.612, loss: 0.076980\nstep: 19820, time: 0.602, loss: 0.090747\nstep: 19840, time: 0.565, loss: 0.130090\nstep: 19860, time: 0.631, loss: 0.094463\nstep: 19880, time: 0.601, loss: 0.059164\nstep: 19900, time: 0.567, loss: 0.075553\nstep: 19920, time: 0.550, loss: 0.050868\nstep: 19940, time: 0.589, loss: 0.084589\nstep: 19960, time: 0.626, loss: 0.052663\nstep: 19980, time: 0.591, loss: 0.055979\nstep: 20000, time: 0.586, loss: 0.086868\nstep: 20020, time: 0.575, loss: 0.062772\nstep: 20040, time: 0.650, loss: 0.119759\nstep: 20060, time: 0.593, loss: 0.090858\nstep: 20080, time: 0.596, loss: 0.087746\nstep: 20100, time: 0.612, loss: 0.105859\nstep: 20120, time: 0.561, loss: 0.087564\nstep: 20140, time: 0.572, loss: 0.099794\nstep: 20160, time: 0.582, loss: 0.063342\nstep: 20180, time: 0.569, loss: 0.049336\nstep: 20200, time: 0.654, loss: 0.109735\nstep: 20220, time: 0.623, loss: 0.117426\nstep: 20240, time: 0.617, loss: 0.108655\nstep: 20260, time: 0.604, loss: 0.076593\nstep: 20280, time: 0.582, loss: 0.082062\nstep: 20300, time: 0.649, loss: 0.060598\nstep: 20320, time: 0.590, loss: 0.104444\nstep: 20340, time: 0.552, loss: 0.100352\nstep: 20360, time: 0.550, loss: 0.086690\nstep: 20380, time: 0.607, loss: 0.048075\nstep: 20400, time: 0.565, loss: 0.060025\nstep: 20420, time: 0.602, loss: 0.139161\nstep: 20440, time: 0.591, loss: 0.097283\nstep: 20460, time: 0.613, loss: 0.085102\nstep: 20480, time: 0.643, loss: 0.086903\nstep: 20500, time: 0.558, loss: 0.106012\nstep: 20520, time: 0.583, loss: 0.070796\nstep: 20540, time: 0.627, loss: 0.061531\nstep: 20560, time: 0.523, loss: 0.100856\nstep: 20580, time: 0.609, loss: 0.084064\nstep: 20600, time: 0.583, loss: 0.088058\nstep: 20620, time: 0.619, loss: 0.104270\nstep: 20640, time: 0.586, loss: 0.055439\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6790c3e19f433cb8b55e42db535d387d668485 | 45,485 | ipynb | Jupyter Notebook | Theory-of-Probability-and-Mathematical-Statistics/Lesson_07.ipynb | dmitryrubtsov/Mathematics | b0cdf3c9b7bb1d4a96386ce04e2d50df2635e506 | [
"MIT"
]
| null | null | null | Theory-of-Probability-and-Mathematical-Statistics/Lesson_07.ipynb | dmitryrubtsov/Mathematics | b0cdf3c9b7bb1d4a96386ce04e2d50df2635e506 | [
"MIT"
]
| null | null | null | Theory-of-Probability-and-Mathematical-Statistics/Lesson_07.ipynb | dmitryrubtsov/Mathematics | b0cdf3c9b7bb1d4a96386ce04e2d50df2635e506 | [
"MIT"
]
| 5 | 2020-08-08T14:20:49.000Z | 2022-03-30T05:58:27.000Z | 124.275956 | 12,412 | 0.87996 | [
[
[
"# Lesson 07",
"_____no_output_____"
],
[
"1. Даны значения величины заработной платы заемщиков банка (zp) и значения их поведенческого кредитного скоринга (ks): zp = [35, 45, 190, 200, 40, 70, 54, 150, 120, 110], ks = [401, 574, 874, 919, 459, 739, 653, 902, 746, 832]. Используя математические операции, посчитать коэффициенты линейной регрессии, приняв за X заработную плату (то есть, zp - признак), а за y - значения скорингового балла (то есть, ks - целевая переменная). Произвести расчет как с использованием intercept, так и без.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\ndf = pd.DataFrame({'zp': [35, 45, 190, 200, 40, 70, 54, 150, 120, 110],\\\n 'ks': [401, 574, 874, 919, 459, 739, 653, 902, 746, 832]})",
"_____no_output_____"
]
],
[
[
"$$b = \\frac{\\overline{yx} - \\overline{y} \\cdot \\overline{x}}{\\overline{x^2} - (\\overline{x})^2}$$",
"_____no_output_____"
],
[
"$$a = \\overline{y} - b \\cdot (\\overline{x})^2$$",
"_____no_output_____"
]
],
[
[
"b = (np.mean(df.zp * df.ks) - np.mean(df.zp) * np.mean(df.ks)) / (np.mean(df.zp**2) - np.mean(df.zp) ** 2)\na = np.mean(df.ks) - b * np.mean(df.zp)\nprint(f'y = {a} + {b}x')",
"y = 444.1773573243596 + 2.620538882402765x\n"
],
[
"plt.scatter(df.zp, df.ks)\nt = np.arange(0, 200, 1)\nv = a + b * t\nplt.plot(t, v, color='r')",
"_____no_output_____"
]
],
[
[
"$$\\hat{B} = (X^T \\times X)^{-1} \\times X^T \\times Y$$",
"_____no_output_____"
]
],
[
[
"X = df.zp.values.reshape((10,1))\ny = df.ks.values.reshape((10,1))",
"_____no_output_____"
],
[
"np.linalg.inv(X.T @ X) @ X.T @ y",
"_____no_output_____"
],
[
"X = df.zp.values.reshape((10,1))\nX = np.hstack([np.ones((10, 1)), X])\ny = df.ks.values.reshape((10,1))",
"_____no_output_____"
],
[
"np.linalg.inv(X.T @ X) @ X.T @ y",
"_____no_output_____"
]
],
[
[
"2. Посчитать коэффициент линейной регрессии при заработной плате (zp), используя градиентный спуск (без intercept).",
"_____no_output_____"
]
],
[
[
"X = df.zp.values\ny = df.ks.values",
"_____no_output_____"
],
[
"w1 = 0.1\nalpha = 1e-6\nepsilon = 1\nn = X.shape[0]\ni = 0\n\nwhile abs(epsilon) > alpha:\n w_prev = w1\n w1 -= alpha * (2/n) * np.sum((w1 * X - y) * X)\n epsilon = w_prev - w1\n i += 1\n\nprint(f'y = {w1} * x. Коэффициент найден за {i} итераций, с точностью {alpha}.')",
"y = 5.8897853464590435 * x. Коэффициент найден за 430 итераций, с точностью 1e-06.\n"
],
[
"plt.scatter(df.zp, df.ks)\nt = np.arange(0, 200, 1)\nv = w1 * t\nplt.plot(t, v, color='r')",
"_____no_output_____"
]
],
[
[
"3. В каких случаях для вычисления доверительных интервалов и проверки статистических гипотез используется таблица значений функции Лапласа, а в каких - таблица критических точек распределения Стьюдента?",
"_____no_output_____"
],
[
"Таблица значений функции Лапласа используется, когда случайная величина распределена нормально и известна дисперсия или среднеквадратичное отклонение генеральной совокупности.\n\nТаблица критических точек распределения Стьюдента используется, когда случайная величина распределена нормально и не известна дисперсия или среднеквадратичное отклонение генеральной совокупности",
"_____no_output_____"
],
[
"4. \\* Произвести вычисления как в пункте 2, но с вычислением intercept. Учесть, что изменение коэффициентов должно производиться на каждом шаге одновременно (то есть изменение одного коэффициента не должно влиять на изменение другого во время одной итерации).",
"_____no_output_____"
]
],
[
[
"w0 = 442\nw1 = 0.1\nalpha = 1e-6\nepsilon0 = 1\nepsilon1 = 1\nn = X.shape[0]\ni = 0\n\nwhile abs(epsilon0) > alpha or abs(epsilon1) > alpha:\n w0_prev = w0\n w1_prev = w1\n w1 -= alpha * (2/n) * np.sum((w0 + w1 * X - y) * X)\n w0 -= alpha * (2/n) * np.sum((w0 + w1 * X - y))\n epsilon0 = w0_prev - w0\n epsilon1 = w1_prev - w1\n i += 1\n\nprint(f'y = {w0} + {w1} * x. Коэффициент найден за {i} итераций, с точностью {alpha}.')",
"y = 442.2061503441162 + 2.635047837298424 * x. Коэффициент найден за 179563 итераций, с точностью 1e-06.\n"
],
[
"plt.scatter(df.zp, df.ks)\nt = np.arange(0, 200, 1)\nv = w0 + w1 * t\nplt.plot(t, v, color='r')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
]
|
ec67a4149f0528e0e6792e403140e96f77755398 | 232,037 | ipynb | Jupyter Notebook | starter_code/WeatherPy.ipynb | moumitaghanti/Pythonapi_weatherPy | d8209053d44f76d5d001028889d631a3a9a2b78b | [
"ADSL"
]
| null | null | null | starter_code/WeatherPy.ipynb | moumitaghanti/Pythonapi_weatherPy | d8209053d44f76d5d001028889d631a3a9a2b78b | [
"ADSL"
]
| null | null | null | starter_code/WeatherPy.ipynb | moumitaghanti/Pythonapi_weatherPy | d8209053d44f76d5d001028889d631a3a9a2b78b | [
"ADSL"
]
| null | null | null | 125.901791 | 42,752 | 0.838401 | [
[
[
"# WeatherPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\n\n# Import API key\nfrom api_keys import api_key\n\n# Incorporated citipy to determine city based on latitude and longitude\nfrom citipy import citipy\n\n# Output File (CSV)\noutput_data_file = \"output_data/cities.csv\"\n\n# Range of latitudes and longitudes\nlat_range = (-90, 90)\nlng_range = (-180, 180)\n",
"_____no_output_____"
]
],
[
[
"## Generate Cities List",
"_____no_output_____"
]
],
[
[
"# List for holding lat_lngs and cities\nlat_lngs = []\ncities = []\n\n# Create a set of random lat and lng combinations\nlats = np.random.uniform(low=-90.000, high=90.000, size=1500)\nlngs = np.random.uniform(low=-180.000, high=180.000, size=1500)\nlat_lngs = zip(lats, lngs)\n\n# Identify nearest city for each lat, lng combination\nfor lat_lng in lat_lngs:\n city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name\n \n # If the city is unique, then add it to a our cities list\n if city not in cities:\n cities.append(city)\n\n# Print the city count to confirm sufficient count\nlen(cities)\n",
"_____no_output_____"
]
],
[
[
"### Perform API Calls\n* Perform a weather check on each city using a series of successive API calls.\n* Include a print log of each city as it'sbeing processed (with the city number and city name).\n",
"_____no_output_____"
]
],
[
[
"url = \"http://api.openweathermap.org/data/2.5/weather?\"\nunits = \"metric\"\ncity_res =[]\ncloudiness = []\ncountry = []\ndate = []\nhumidity = []\nlat =[]\nlng =[]\nmax_temp = []\nwind_speed = []\n\ncount_set = 1\ncount_rec = 1\nprint(\"Beginning Data Retrieval\")\nprint(\"-----------------------------\")\nfor city in cities:\n target_url = f\"{url}appid={api_key}&q={city}&units={units}\"\n response = requests.get(target_url)\n \n \n if response.status_code == 200:\n weather_response = response.json()\n cloudiness.append(weather_response['clouds']['all'])\n country.append(weather_response['sys']['country'])\n date.append(weather_response['dt'])\n humidity.append(weather_response['main']['humidity'])\n lat.append(weather_response['coord']['lat'])\n lng.append(weather_response['coord']['lon'])\n max_temp.append(weather_response['main']['temp_max'])\n wind_speed.append(weather_response['wind']['speed'])\n city_res.append(weather_response['name'])\n if count_rec < 50:\n print(f\"Processing Record {count_rec} of Set {count_set} |{city}\" )\n count_rec = count_rec+1\n \n else:\n count_rec = 0\n count_set = count_set+1\n \n else:\n print(f\"City not found. Skipping...\" )\n \n \n \nprint(\"-----------------------------\")\nprint(\"Data Retrieval Complete\")\nprint(\"-----------------------------\") \n \n\n ",
"Beginning Data Retrieval\n-----------------------------\nProcessing Record 1 of Set 1 |mecca\nProcessing Record 2 of Set 1 |cape town\nProcessing Record 3 of Set 1 |weiser\nProcessing Record 4 of Set 1 |caravelas\nCity not found. Skipping...\nProcessing Record 5 of Set 1 |chuy\nProcessing Record 6 of Set 1 |airai\nProcessing Record 7 of Set 1 |mataura\nProcessing Record 8 of Set 1 |chapais\nProcessing Record 9 of Set 1 |yellowknife\nProcessing Record 10 of Set 1 |mahebourg\nProcessing Record 11 of Set 1 |busselton\nProcessing Record 12 of Set 1 |lorengau\nProcessing Record 13 of Set 1 |saint-philippe\nProcessing Record 14 of Set 1 |narsaq\nCity not found. Skipping...\nProcessing Record 15 of Set 1 |hasaki\nProcessing Record 16 of Set 1 |tucano\nProcessing Record 17 of Set 1 |broome\nProcessing Record 18 of Set 1 |pisco\nProcessing Record 19 of Set 1 |carnarvon\nProcessing Record 20 of Set 1 |ambulu\nProcessing Record 21 of Set 1 |butaritari\nProcessing Record 22 of Set 1 |opuwo\nProcessing Record 23 of Set 1 |albany\nProcessing Record 24 of Set 1 |ushuaia\nProcessing Record 25 of Set 1 |new norfolk\nProcessing Record 26 of Set 1 |port elizabeth\nProcessing Record 27 of Set 1 |pevek\nCity not found. Skipping...\nProcessing Record 28 of Set 1 |iqaluit\nProcessing Record 29 of Set 1 |nhulunbuy\nCity not found. Skipping...\nProcessing Record 30 of Set 1 |punta arenas\nProcessing Record 31 of Set 1 |saldanha\nProcessing Record 32 of Set 1 |clyde river\nProcessing Record 33 of Set 1 |hithadhoo\nProcessing Record 34 of Set 1 |mount gambier\nProcessing Record 35 of Set 1 |los llanos de aridane\nProcessing Record 36 of Set 1 |tuktoyaktuk\nProcessing Record 37 of Set 1 |piacabucu\nProcessing Record 38 of Set 1 |dikson\nProcessing Record 39 of Set 1 |qaanaaq\nProcessing Record 40 of Set 1 |havelock\nCity not found. Skipping...\nProcessing Record 41 of Set 1 |mar del plata\nCity not found. Skipping...\nCity not found. Skipping...\nProcessing Record 42 of Set 1 |saint-augustin\nProcessing Record 43 of Set 1 |kapaa\nProcessing Record 44 of Set 1 |lompoc\nProcessing Record 45 of Set 1 |kaputa\nProcessing Record 46 of Set 1 |ballina\nProcessing Record 47 of Set 1 |shache\nProcessing Record 48 of Set 1 |guanica\nProcessing Record 49 of Set 1 |hermanus\nProcessing Record 0 of Set 2 |faanui\nProcessing Record 1 of Set 2 |grand gaube\nProcessing Record 2 of Set 2 |port macquarie\nProcessing Record 3 of Set 2 |port hedland\nProcessing Record 4 of Set 2 |morro bay\nProcessing Record 5 of Set 2 |richards bay\nProcessing Record 6 of Set 2 |puerto ayora\nCity not found. Skipping...\nProcessing Record 7 of Set 2 |mabaruma\nProcessing Record 8 of Set 2 |lakatoro\nProcessing Record 9 of Set 2 |rikitea\nProcessing Record 10 of Set 2 |severo-kurilsk\nProcessing Record 11 of Set 2 |poum\nCity not found. Skipping...\nProcessing Record 12 of Set 2 |nikolskoye\nCity not found. Skipping...\nProcessing Record 13 of Set 2 |lebu\nProcessing Record 14 of Set 2 |aldan\nProcessing Record 15 of Set 2 |kampot\nProcessing Record 16 of Set 2 |bambamarca\nProcessing Record 17 of Set 2 |kahului\nProcessing Record 18 of Set 2 |cherskiy\nProcessing Record 19 of Set 2 |vaini\nProcessing Record 20 of Set 2 |coihaique\nProcessing Record 21 of Set 2 |lasa\nProcessing Record 22 of Set 2 |ponta do sol\nProcessing Record 23 of Set 2 |san patricio\nProcessing Record 24 of Set 2 |shubarkuduk\nProcessing Record 25 of Set 2 |luderitz\nProcessing Record 26 of Set 2 |marzuq\nProcessing Record 27 of Set 2 |kimry\nProcessing Record 28 of Set 2 |vestmanna\nCity not found. Skipping...\nCity not found. Skipping...\nProcessing Record 29 of Set 2 |nova vicosa\nCity not found. Skipping...\nProcessing Record 30 of Set 2 |kununurra\nProcessing Record 31 of Set 2 |bantogon\nProcessing Record 32 of Set 2 |neuquen\nProcessing Record 33 of Set 2 |libreville\nProcessing Record 34 of Set 2 |port hawkesbury\nCity not found. Skipping...\nProcessing Record 35 of Set 2 |zeya\nProcessing Record 36 of Set 2 |yar-sale\nProcessing Record 37 of Set 2 |tuatapere\nProcessing Record 38 of Set 2 |dauphin\nProcessing Record 39 of Set 2 |dum duma\nProcessing Record 40 of Set 2 |codrington\nProcessing Record 41 of Set 2 |bubaque\nProcessing Record 42 of Set 2 |bambous virieux\nProcessing Record 43 of Set 2 |cabedelo\nProcessing Record 44 of Set 2 |atar\nProcessing Record 45 of Set 2 |khatanga\nProcessing Record 46 of Set 2 |atuona\nCity not found. Skipping...\nProcessing Record 47 of Set 2 |rawson\nProcessing Record 48 of Set 2 |port blair\nProcessing Record 49 of Set 2 |cabo san lucas\nProcessing Record 0 of Set 3 |hobart\nProcessing Record 1 of Set 3 |upernavik\nProcessing Record 2 of Set 3 |liusha\nProcessing Record 3 of Set 3 |san ramon\nProcessing Record 4 of Set 3 |galeana\nProcessing Record 5 of Set 3 |kieta\nProcessing Record 6 of Set 3 |arraial do cabo\nProcessing Record 7 of Set 3 |souillac\nProcessing Record 8 of Set 3 |naberera\nProcessing Record 9 of Set 3 |kostryzhivka\nProcessing Record 10 of Set 3 |kruisfontein\nProcessing Record 11 of Set 3 |saint george\nProcessing Record 12 of Set 3 |phan thiet\nProcessing Record 13 of Set 3 |kaitangata\nProcessing Record 14 of Set 3 |east london\nProcessing Record 15 of Set 3 |katsuura\nProcessing Record 16 of Set 3 |jamestown\nProcessing Record 17 of Set 3 |grivenskaya\nCity not found. Skipping...\nProcessing Record 18 of Set 3 |ilo\nProcessing Record 19 of Set 3 |ndioum\nProcessing Record 20 of Set 3 |porto novo\nProcessing Record 21 of Set 3 |wonthaggi\nProcessing Record 22 of Set 3 |naze\nProcessing Record 23 of Set 3 |cardston\nProcessing Record 24 of Set 3 |yauya\nProcessing Record 25 of Set 3 |semey\nProcessing Record 26 of Set 3 |berlevag\nCity not found. Skipping...\nProcessing Record 27 of Set 3 |roald\nProcessing Record 28 of Set 3 |tornio\nProcessing Record 29 of Set 3 |awbari\nProcessing Record 30 of Set 3 |desdunes\nProcessing Record 31 of Set 3 |honiara\nProcessing Record 32 of Set 3 |nokaneng\nCity not found. Skipping...\nCity not found. Skipping...\nCity not found. Skipping...\nProcessing Record 33 of Set 3 |thompson\nProcessing Record 34 of Set 3 |guliston\nProcessing Record 35 of Set 3 |ellensburg\nProcessing Record 36 of Set 3 |bambanglipuro\nProcessing Record 37 of Set 3 |marsa matruh\nProcessing Record 38 of Set 3 |kulhudhuffushi\nProcessing Record 39 of Set 3 |karimganj\nProcessing Record 40 of Set 3 |kodiak\nProcessing Record 41 of Set 3 |nome\nProcessing Record 42 of Set 3 |port alfred\nProcessing Record 43 of Set 3 |chokurdakh\nProcessing Record 44 of Set 3 |kavaratti\nProcessing Record 45 of Set 3 |sorland\nProcessing Record 46 of Set 3 |san joaquin\nProcessing Record 47 of Set 3 |zyryanka\nProcessing Record 48 of Set 3 |bredasdorp\nProcessing Record 49 of Set 3 |giovinazzo\nCity not found. Skipping...\nProcessing Record 0 of Set 4 |guerrero negro\nProcessing Record 1 of Set 4 |yaan\nProcessing Record 2 of Set 4 |nanortalik\nProcessing Record 3 of Set 4 |imeni stepana razina\nProcessing Record 4 of Set 4 |victoria\nCity not found. Skipping...\nProcessing Record 5 of Set 4 |la ronge\nProcessing Record 6 of Set 4 |kachug\nProcessing Record 7 of Set 4 |kijang\nProcessing Record 8 of Set 4 |kyren\nProcessing Record 9 of Set 4 |peace river\nProcessing Record 10 of Set 4 |bluff\nProcessing Record 11 of Set 4 |hay river\nProcessing Record 12 of Set 4 |ust-kulom\nProcessing Record 13 of Set 4 |majene\nProcessing Record 14 of Set 4 |buchanan\nProcessing Record 15 of Set 4 |nortelandia\nProcessing Record 16 of Set 4 |angoram\nProcessing Record 17 of Set 4 |avarua\nProcessing Record 18 of Set 4 |zambezi\nProcessing Record 19 of Set 4 |jiuquan\nProcessing Record 20 of Set 4 |ribeira grande\nProcessing Record 21 of Set 4 |port hardy\nProcessing Record 22 of Set 4 |broken hill\nProcessing Record 23 of Set 4 |georgetown\nProcessing Record 24 of Set 4 |varzea da palma\nProcessing Record 25 of Set 4 |manadhoo\nProcessing Record 26 of Set 4 |fortuna\nProcessing Record 27 of Set 4 |kaeo\nProcessing Record 28 of Set 4 |provideniya\nProcessing Record 29 of Set 4 |hirara\nCity not found. Skipping...\nCity not found. Skipping...\nProcessing Record 30 of Set 4 |erenhot\nProcessing Record 31 of Set 4 |sobolevo\nProcessing Record 32 of Set 4 |bonga\nProcessing Record 33 of Set 4 |khor\nProcessing Record 34 of Set 4 |sola\nProcessing Record 35 of Set 4 |esperance\nProcessing Record 36 of Set 4 |loreto\n"
]
],
[
[
"### Convert Raw Data to DataFrame\n* Export the city data into a .csv.\n* Display the DataFrame",
"_____no_output_____"
]
],
[
[
"#creating the weather dictionary\nweather_dict = {\"City\":city_res,\n \"Cloudiness\":cloudiness,\n \"Country\":country,\n \"Date\":date,\n \"Humidity\":humidity,\n \"Lat\":lat,\n \"Lng\":lng,\n \"Max Temp\": max_temp,\n \"Wind Speed\":wind_speed\n \n } \n\n",
"_____no_output_____"
],
[
"weather_df = pd.DataFrame(weather_dict)\nweather_df.count()",
"_____no_output_____"
],
[
"weather_df.head()",
"_____no_output_____"
]
],
[
[
"### Plotting the Data\n* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.\n* Save the plotted figures as .pngs.",
"_____no_output_____"
],
[
"#### Latitude vs. Temperature Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(weather_df[\"Lat\"],weather_df[\"Max Temp\"],edgecolors=\"black\",facecolors=\"steelblue\")\nplt.title(\"City Latitude vs. Max Temperature (08/22/19)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Max Temperature (F)\")\nplt.grid (b=True,which=\"major\",axis=\"both\",linestyle=\"-\",color=\"lightgrey\")\nplt.savefig(\"../Images/Latitude_Temperature_Plot.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Humidity Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(weather_df[\"Lat\"],weather_df[\"Humidity\"],edgecolors=\"black\",facecolors=\"steelblue\")\nplt.title(\"City Latitude vs. Humidity (08/22/19)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Humidity (%)\")\nplt.grid (b=True,which=\"major\",axis=\"both\",linestyle=\"-\",color=\"lightgrey\")\nplt.savefig(\"../Images/Latitude_Humidity.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Cloudiness Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(weather_df[\"Lat\"],weather_df[\"Cloudiness\"],edgecolors=\"black\",facecolors=\"steelblue\")\nplt.title(\"City Latitude vs. Cloudiness (08/22/19)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Cloudiness (%)\")\nplt.grid (b=True,which=\"major\",axis=\"both\",linestyle=\"-\",color=\"lightgrey\")\nplt.savefig(\"../Images/Latitude_Cloudiness.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Latitude vs. Wind Speed Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(weather_df[\"Lat\"],weather_df[\"Wind Speed\"],edgecolors=\"black\",facecolors=\"steelblue\")\nplt.title(\"City Latitude vs. Wind Speed (08/22/19)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Wind Speed (mph)\")\nplt.grid (b=True,which=\"major\",axis=\"both\",linestyle=\"-\",color=\"lightgrey\")\nplt.savefig(\"../Images/Latitude_WindSpeed.png\")\nplt.show()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec67b5f3e793d82b8f868b503abaaec625da3027 | 6,965 | ipynb | Jupyter Notebook | neural_network/02-imdb-binary-classification.ipynb | imkaran23/Jupyter | e402d27be7326c3d2032ad4451d37292bdd3fb14 | [
"MIT"
]
| 394 | 2019-10-21T00:47:25.000Z | 2022-03-31T21:06:03.000Z | neural_network/02-imdb-binary-classification.ipynb | imkaran23/Jupyter | e402d27be7326c3d2032ad4451d37292bdd3fb14 | [
"MIT"
]
| 52 | 2021-08-09T22:40:20.000Z | 2022-03-07T16:56:36.000Z | neural_network/02-imdb-binary-classification.ipynb | imkaran23/Jupyter | e402d27be7326c3d2032ad4451d37292bdd3fb14 | [
"MIT"
]
| 205 | 2019-10-08T17:28:46.000Z | 2022-03-31T18:26:23.000Z | 24.875 | 281 | 0.553051 | [
[
[
"#Imports\nfrom keras.datasets import imdb\n\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics,activations\n\nimport matplotlib.pyplot as plt",
"C:\\Users\\Hussnain\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n"
],
[
"#Downloading data from https://s3.amazonaws.com/text-datasets/imdb.npz\n\n(xtrain,ytrain), (xtest, ytest) = imdb.load_data(num_words=10000)",
"Downloading data from https://s3.amazonaws.com/text-datasets/imdb.npz\n 1048576/17464789 [>.............................] - ETA: 53:49"
],
[
"#Exploring the dataset\n\nprint('xtrain shape', xtrain.shape)\nprint('ytrain shape', ytrain.shape)\nprint()\nprint('xtest shape', xtest.shape)\nprint('ytest shape', ytest.shape)\nprint()\nprint('xtrain first review as dictionary index', xtrain[1])\nprint()\nprint()\nprint('ytrain label', ytrain[0])",
"_____no_output_____"
],
[
"#index to words mapping\nword_index = imdb.get_word_index()",
"_____no_output_____"
],
[
"reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])",
"_____no_output_____"
],
[
"decode_review = ' '.join([reverse_word_index.get(i-3, reverse_word_index.get(i)) for i in xtrain[22]])\ndecode_review",
"_____no_output_____"
],
[
"import numpy as np\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1. \n return results\n\nx_train = vectorize_sequences(xtrain)\nx_test = vectorize_sequences(xtest)",
"_____no_output_____"
],
[
"ytrain = np.asarray(ytrain).astype('float32')\nytest = np.asarray(ytest).astype('float32')",
"_____no_output_____"
],
[
"#model\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation=activations.relu, input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation=activations.relu))\nmodel.add(layers.Dense(1, activation=activations.sigmoid))",
"_____no_output_____"
],
[
"model.compile(optimizer=optimizers.RMSprop(lr=0.0001), loss=losses.mse, metrics=['acc'])",
"_____no_output_____"
],
[
"x_val = x_train[:10000]\ny_val = ytrain[:10000]\n\nx_train_partial = x_train[10000:]\ny_train_partial = ytrain[10000:]",
"_____no_output_____"
],
[
"history = model.fit(x_train_partial, y_train_partial, epochs=4, batch_size=512, validation_data=(x_val,y_val))\nhistory_dict = history.history\nhistory_dict.keys()\nprint(history.history['acc'][-1])\nprint(history.history['val_acc'][-1])",
"_____no_output_____"
],
[
"print(model.predict(x_train_partial[22:23]))",
"_____no_output_____"
],
[
"loss = history_dict['loss']\nval_loss = history_dict['val_loss']\nepochs = range(0, len(loss)+1)\nepochs",
"_____no_output_____"
],
[
"%matplotlib\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# \"bo\" is for \"blue dot\"\nplt.plot(epochs, loss, 'ro', label='Training loss')\n# b is for \"solid blue line\"\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
],
[
"plt.clf() # clear figure# clear \nacc_values = history_dict['acc']\nval_acc_values = history_dict['val_acc']\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec67c73e80df343d4b56fe32b8d04d943cb448a2 | 55,966 | ipynb | Jupyter Notebook | tutorial/part_1_build_dataset_1.ipynb | TuffDev/motion-sense | 5429f8c7db5d4097494f823257f5dcecd3bd52da | [
"MIT"
]
| 226 | 2018-03-01T17:24:36.000Z | 2022-03-21T13:45:47.000Z | tutorial/part_1_build_dataset_1.ipynb | TuffDev/motion-sense | 5429f8c7db5d4097494f823257f5dcecd3bd52da | [
"MIT"
]
| 7 | 2018-05-20T12:33:09.000Z | 2021-03-22T18:17:01.000Z | tutorial/part_1_build_dataset_1.ipynb | TuffDev/motion-sense | 5429f8c7db5d4097494f823257f5dcecd3bd52da | [
"MIT"
]
| 89 | 2018-04-16T17:25:27.000Z | 2022-03-22T11:14:04.000Z | 40.613933 | 218 | 0.367223 | [
[
[
"### Data Info\n(Source: https://bmi.teicrete.gr/en/the-mobifall-and-mobiact-datasets-2/) \n- 4 different types of `falls` performed by 66 participants\n- 11 different types of `ADLs` performed by 19 participants and 9 types of ADLs performed by 59 participants (plus one activity `LYI` which results from the `inactivity period after a fall` by 66 participants)\n- Five sub-scenarios which construct one scenario of daily living, which consists of a sequence of 50 activities and performed by 19 participants.\n\n\n- The annotated data in csv format, separated by each activity\n\n#### File names:\nFilename format:\n```\n<ADL OR FALL OR SCENARIO_CODE>_<SUBJECT_ID>_<TRIAL_NO>.txt\n```\n\n#### Subjects:\n```\n+------+---------+-----------+-------+----------+----------+----------+\n| ID | Name | Surname | Age | Height | Weight | Gender |\n+------+---------+-----------+-------+----------+----------+----------+\n| 1 | sub1 | sub1 | 32 | 180 | 85 | M | \n| 2 | sub2 | sub2 | 26 | 169 | 64 | M |\n| 3 | sub3 | sub3 | 26 | 164 | 55 | F | \n| 4 | sub4 | sub4 | 32 | 186 | 93 | M | \n| 5 | sub5 | sub5 | 36 | 160 | 50 | F | \n| 6 | sub6 | sub6 | 22 | 172 | 62 | F | \n| 7 | sub7 | sub7 | 25 | 189 | 80 | M | \n| 8 | sub8 | sub8 | 22 | 183 | 93 | M | \n| 9 | sub9 | sub9 | 30 | 177 | 102 | M | \n| 10 | sub10 | sub10 | 26 | 170 | 90 | F | \n| 11 | sub11 | sub11 | 26 | 168 | 80 | F | \n| 12 | sub12 | sub12 | 29 | 178 | 83 | M | \n| 13 | sub13 | sub13 | 24 | 177 | 62 | M | \n| 14 | sub14 | sub14 | 24 | 178 | 85 | M | \n| 15 | sub15 | sub15 | 25 | 173 | 82 | M | \n| 16 | sub16 | sub16 | 27 | 172 | 56 | F | \n| 17 | sub17 | sub17 | 25 | 173 | 67 | M | \n| 18 | sub18 | sub18 | 25 | 176 | 73 | M | \n| 19 | sub19 | sub19 | 25 | 161 | 63 | F | \n| 20 | sub20 | sub20 | 26 | 178 | 71 | M | \n| 21 | sub21 | sub21 | 25 | 180 | 70 | M | \n| 22 | sub22 | sub22 | 22 | 187 | 90 | M | \n| 23 | sub23 | sub23 | 23 | 171 | 64 | M | \n| 24 | sub24 | sub24 | 21 | 193 | 112 | M | \n| 25 | sub25 | sub25 | 22 | 170 | 62 | F | \n| 26 | sub26 | sub26 | 25 | 163 | 60 | F | \n| 27 | sub27 | sub27 | 25 | 180 | 82 | M | \n| 28 | sub28 | sub28 | 23 | 178 | 70 | Ã | \n| 29 | sub29 | sub29 | 27 | 186 | 103 | M | \n| 30 | sub30 | sub30 | 47 | 172 | 90 | M | \n| 31 | sub31 | sub31 | 27 | 170 | 75 | M | \n| 32 | sub32 | sub32 | 25 | 190 | 77 | M | \n| 33 | sub33 | sub33 | 27 | 171 | 70 | M | \n| 34 | sub34 | sub34 | 24 | 175 | 85 | Ã | \n| 35 | sub35 | sub35 | 23 | 181 | 76 | M | \n| 36 | sub36 | sub36 | 22 | 164 | 62 | F | \n| 37 | sub37 | sub37 | 25 | 172 | 63 | M | \n| 38 | sub38 | sub38 | 21 | 170 | 88 | F | \n| 39 | sub39 | sub39 | 26 | 174 | 79 | M | \n| 40 | sub40 | sub40 | 23 | 178 | 95 | M | \n| 41 | sub41 | sub41 | 20 | 172 | 67 | F | \n| 42 | sub42 | sub42 | 22 | 173 | 73 | M | \n| 43 | sub43 | sub43 | 24 | 179 | 80 | M | \n| 44 | sub44 | sub44 | 25 | 176 | 80 | M | \n| 45 | sub45 | sub45 | 26 | 175 | 92 | M | \n| 46 | sub46 | sub46 | 23 | 175 | 68 | F | \n| 47 | sub47 | sub47 | 21 | 180 | 85 | M | \n| 48 | sub48 | sub48 | 22 | 180 | 80 | M | \n| 49 | sub49 | sub49 | 23 | 178 | 75 | M | \n| 50 | sub50 | sub50 | 23 | 165 | 50 | F | \n| 51 | sub51 | sub51 | 23 | 171 | 70 | M | \n| 52 | sub52 | sub52 | 20 | 179 | 79 | M | \n| 53 | sub53 | sub53 | 27 | 186 | 120 | M | \n| 54 | sub54 | sub54 | 27 | 164 | 55 | F | \n| 55 | sub55 | sub55 | 28 | 178 | 78 | M | \n| 56 | sub56 | sub56 | 29 | 170 | 75 | M | \n| 57 | sub57 | sub57 | 21 | 187 | 70 | Ã |\n| 58 | sub58 | sub58 | 21 | 158 | 58 | F | \n| 59 | sub59 | sub59 | 26 | 175 | 70 | M | \n| 60 | sub60 | sub60 | 24 | 183 | 107 | M | \n| 61 | sub61 | sub61 | 24 | 170 | 70 | M | \n| 62 | sub62 | sub62 | 20 | 180 | 70 | M | \n| 63 | sub63 | sub63 | 24 | 187 | 85 | M | \n| 64 | sub64 | sub64 | 26 | 181 | 70 | M | \n| 65 | sub65 | sub65 | 40 | 170 | 100 | M | \n| 66 | sub66 | sub66 | 20 | 193 | 83 | M | \n| 67 | sub67 | sub67 | 23 | 180 | 67 | M | \n+------+---------+-----------+-------+----------+----------+----------+\n```\n\n#### Activities of Daily Living:\n```\n+----+-------+----------------------------+--------+----------+---------------------------------------------------+\n| No.| Label | Activity | Trials | Duration | Description |\n+----+-------+----------------------------+--------+----------+---------------------------------------------------+\n| 1 | STD | Standing | 1 | 5min | Standing with subtle movements |\n| 2 | WAL | Walking | 1 | 5min | Normal walking |\n| 3 | JOG | Jogging | 3 | 30s | Jogging |\n| 4 | JUM | Jumping | 3 | 30s | Continuous jumping |\n| 5 | STU | Stairs up | 6 | 10s | Stairs up (10 stairs) |\n| 6 | STN | Stairs down | 6 | 10s | Stairs down (10 stairs) |\n| 7 | SCH | Stand to sit(sit on chair) | 6 | 6s | Transition from standing to sitting |\n| 8 | SIT | Sitting on chair | 1 | 1min | Sitting on a chair with subtle movements |\n| 9 | CHU | Sit to stand(chair up) | 6 | 6s | Transition from sitting to standing |\n| 10 | CSI | Car-step in | 6 | 6s | Step in a car |\n| 11 | CSO | Car-step out | 6 | 6s | Step out a car |\n| 12 | LYI | Lying | 12 | - | Activity taken from the lying period after a fall |\n+----+-------+----------------------------+--------+----------+---------------------------------------------------+\n```\n\n\n#### Falls:\n```\n+----+-------+--------------------+--------+----------+---------------------------------------------------------+\n| No.| Label | Activity | Trials | Duration | Description |\n+----+-------+--------------------+--------+----------+---------------------------------------------------------+\n| 10 | FOL | Forward-lying | 3 | 10s | Fall Forward from standing, use of hands to dampen fall |\n| 11 | FKL | Front-knees-lying | 3 | 10s | Fall forward from standing, first impact on knees |\n| 12 | BSC | Back-sitting-chair | 3 | 10s | Fall backward while trying to sit on a chair |\n| 13 | SDL | Sideward-lying | 3 | 10s | Fall sidewards from standing, bending legs |\n+----+-------+--------------------+--------+----------+---------------------------------------------------------+\n```\n\n#### Scenarios:\n```\n+---------------------------------------------------------------------------------------------+\n| 1st Scenario of Leaving the Home (SLH), Total duration 125íí at max (1 trial/participant) |\n+----+-------+--------------------+-----------------------------------------------------------+\n| No.| Label | Activity | Description |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 1 | STD | Standing | The recording starts with the participant standing |\n| 2 | WAL | Walking | outside the door and locking the door. Then walks |\n| 3 | STN | Stairs down | and descent stairs to leave his home. Following, he |\n| 4 | WAL | Walking | riches the parking area where he stands in front of the |\n| 5 | STD | Standing | car, unlocks the lock of the car, opens the door and |\n| 6 | CSI | Car-step in | gets in the car. He remains sited for some seconds, |\n| 7 | SIT | Sitting on chair | then he gets out of the car, closes the door and stands |\n| 8 | CSO | Car-step out | in front of the door to lock the car. |\n| 9 | STD | Standing | |\n+-----+------+--------------------+-----------------------------------------------------------+\n| 2nd Scenario of Being at work (SBW), Total duration 185íí at max (1 trial/participant) |\n+----+-------+--------------------+-----------------------------------------------------------+\n| No.| Label | Activity | Description |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 1 | STD | Standing | The recording starts with the participant standing |\n| 2 | WAL | Walking | outside the cars door. Then walks from the parking |\n| 3 | STU | Stairs up | area to his work building. He walks and ascent stairs |\n| 4 | WAL | Walking | till he riches his office where he stops in front of the |\n| 5 | STD | Standing | door. Once he finds the keys he opens the door, gets |\n| 6 | WAL | Walking | in his office and walks to his chair, where he sits. |\n| 7 | SCH | Stand to sit | |\n| 8 | SIT | Sitting on chair | |\n+-----+------+--------------------+-----------------------------------------------------------+\n| 3rd Scenario of Leaving work (SLW), Total duration 185íí at max (1 trial/participant) |\n+----+-------+--------------------+-----------------------------------------------------------+\n| No.| Label | Activity | Description |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 1 | SIT | Sitting on chair | The recording starts with the participant sitting in the |\n| 2 | CHU | Sit to stand | chair in his office area. Then he gets up from the |\n| 3 | WAL | Walking | chair, walks to the door and stands outside the office |\n| 4 | STD | Standing | door. Once he find the keys, he lock the door and |\n| 5 | WAL | Walking | walks and descent stairs till he riches the parking |\n| 6 | STN | Stairs down | area. He walks to his car and stands in front of the |\n| 7 | WAL | Walking | car, unlocks the lock of the car, opens the door and |\n| 8 | STD | Standing | gets in the car. He remains sited for some seconds, |\n| 9 | CSI | Car-step in | then he gets out of the car, closes the door and stands |\n| 10 | SIT | Sitting on chair | in front of the door to lock the car. |\n| 11 | CSO | Car-step out | |\n| 12 | STD | Standing | |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 4th Scenario of Being Exercise (SBE), Total duration 125íí at max (1 trial/participant) |\n+----+-------+--------------------+-----------------------------------------------------------+\n| No.| Label | Activity | Description |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 1 | STD | Standing | The recording starts with the participant standing in |\n| 2 | WAL | Walking | front of the car. He starts his exercise by walking, |\n| 3 | JOG | Jogging | then starts jogging from some seconds and once again |\n| 4 | WAL | Walking | walking. Then he stops for some seconds to get a |\n| 5 | STD | Standing | breath and he starts jumping and once more he |\n| 6 | JUM | Jumping | standing to relax a little. Finally he walks till his |\n| 7 | STD | Standing | car and stands outside the door. |\n| 8 | WAL | Walking | |\n| 9 | STD | Standing | |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 5th Scenario of Returning at Home (SRH), Total duration 155íí at max (1 trial/participant) |\n+----+-------+--------------------+-----------------------------------------------------------+\n| No.| Label | Activity | Description |\n+----+-------+--------------------+-----------------------------------------------------------+\n| 1 | STD | Standing | The recording starts with the participant standing |\n| 2 | CSI | Car-step in | outside the cars door. He unlocks the lock of the car, |\n| 3 | SIT | Sitting on chair | opens the door and gets in the car. He remains sited |\n| 4 | CSO | Car-step out | for some seconds, then he gets out of the car, closes |\n| 5 | STD | Standing | the door and stands in front of the door to lock the |\n| 6 | WAL | Walking | car. Then walks from the parking area to his home. |\n| 7 | STU | Stairs up | He walks and ascent stairs till riches his home door, |\n| 8 | WAL | Walking | where he stands to finds the keys. Then he opens the |\n| 9 | STD | Standing | door, gets in his home, walks till a chair and sits. |\n| 10 | WAL | Walking | |\n| 11 | SCH | Stand to sit | |\n+----+-------+--------------------+-----------------------------------------------------------+\n```\n\n#### Sensors:\n```\n+------+---------------+----------------------------------------------------+--------------------------------------------------------------+\n| Code | Type | Values | Description |\n+------+---------------+----------------------------------------------------+--------------------------------------------------------------+\n| acc | accelerometer | timestamp(ns),x,y,z(m/s^2) | Acceleration force along the x y z axes (including gravity). |\n| gyro | gyroscope | timestamp(ns),x,y,z(rad/s) | Rate of rotation around the x y z axes (Angular velocity). |\n| ori | orientation | timestamp(ns),Azimuth,Pitch,Roll(degrees) | Angle around the z x y axes. |\n+------+---------------+----------------------------------------------------+--------------------------------------------------------------+\n```\n\n#### Related work: \n- Chatzaki C., Pediaditis M., Vavoulas G., Tsiknakis M. (2017) \n Human Daily Activity and Fall Recognition Using a Smartphoneís Acceleration Sensor. \n In: Rocker C., O'Donoghue J., Ziefle M., Helfert M., Molloy W. (eds) \n Information and Communication Technologies for Ageing Well and e-Health. ICT4AWE 2016.\n Communications in Computer and Information Science, vol 736, pp 100-118.\n Springer, Cham, DOI 10.1007/978-3-319-62704-5_7\n \n- Chatzaki, C., Pediaditis, M., Vavoulas, G. and Tsiknakis, M., \n \"Estimating normal and abnormal activities using smartphones\",\n In Proceedings of the 13th International Conference on Wearable Micro and Nano Technologies for Personalised Health (pHealth),\n\tv.224, pp 195-200,29-31 May 2016, Heraklion, Crete, Greece, DOI:10.3233/978-1-61499-653-8-195\n\n- Chatzaki Charikleia, \n\t\"Estimating human activity patterns in dynamic environments based on smart, wearable sensors : a feasibility study\",\n\t M.Sc. Thesis , Dept. Informatics Enginnering,Heraklion, Crete, Greece, 2016\n\t\n- Vavoulas, G., Chatzaki, C., Malliotakis, T., Pediaditis, M. and Tsiknakis, M., \n \"The MobiAct Dataset: Recognition of Activities of Daily Living using Smartphones\",\n In Proceedings of the International Conference on Information and Communication Technologies for Ageing Well and e-Health (ICT4AWE 2016),\n vol. 1, pp 143-151,ISBN: 978-989-758-180-9, DOI: 10.5220/0005792401430151\n\n- G. Vavoulas, M. Pediaditis, C. Chatzaki, E. G. Spanakis, M. Tsiknakis, \n \"The MobiFall Dataset: Fall Detection and Classification with a Smartphone\", \n invited publication for the International Journal of Monitoring and Surveillance Technologies Research,\n pp 44-56, 2014, DOI:10.4018/ijmstr.2014010103\n\t\n- G. Vavoulas, M. Pediaditis, E. Spanakis, M. Tsiknakis,\n \"The MobiFall Dataset: An Initial Evaluation of Fall Detection Algorithms Using Smartphones\",\n\t6th IEEE International Symposium on Monitoring & Surveillance Research (ISMSR): Healthcare-Bioinformatics, \n\tChania, Greece, 2013, DOI:10.1109/BIBE.2013.6701629\n\t",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport os\nimport pandas as pd\nds_inf = pd.read_csv('mobiAct_ds_inf.txt', sep=\"\\s*[|]\\s*\", engine='python')\nds_inf = ds_inf.drop(columns=['Unnamed: 0', 'Name', 'Surname', 'Unnamed: 8'], axis=1)\nds_inf",
"_____no_output_____"
],
[
"dir_name = \"Annotated Data\" \nact_folders = [x for x in os.listdir(dir_name) if len(x)==3]\nact_folders",
"_____no_output_____"
],
[
"columns = [#'timestamp','rel_time',\n 'acc_x','acc_y','acc_z',\n 'gyro_x', 'gyro_y', 'gyro_z',\n 'azimuth','pitch','roll','label']\ncolumns.extend(['act','age', 'height', 'weight','gender', 'pid', 'trial'])\ncolumns",
"_____no_output_____"
],
[
"train_dataset = pd.DataFrame(columns=columns)\ntest_dataset = pd.DataFrame(columns=columns)\ntrain_test_ratio = 0.67\nfor i, act in enumerate(act_folders):\n act_dirs = os.listdir(dir_name+\"/\"+act)\n for act_file in act_dirs:\n print(\"***\",act,round(i/len(act_folders), 2)*100,\"%\", \n act_file, \"train: \",len(train_dataset), \"test: \", len(test_dataset), end = \"\\r\")\n # <ACT_CODE>_<SUBJECT_ID>_<TRIAL_NO> \n pid = int(act_file.split('_')[1])\n trial = int(act_file.split('_')[2])\n tmp_inf = ds_inf[ds_inf[\"ID\"]==pid]\n p_gender = str(tmp_inf.iloc[0][\"Gender\"]).strip()\n p_age = int(tmp_inf.iloc[0][\"Age\"])\n p_height = int(tmp_inf.iloc[0][\"Height\"])\n p_weight = int(tmp_inf.iloc[0][\"Weight\"])\n \n tmp_data = pd.read_csv(dir_name+\"/\"+act+\"/\"+act_file)\n tmp_data = tmp_data.drop(columns=['timestamp','rel_time'], axis=1)\n \n tmp_data['gender'] = p_gender\n tmp_data['age'] = p_age\n tmp_data['height'] = p_height\n tmp_data['weight'] = p_weight\n tmp_data['pid'] = pid-1\n tmp_data['trial'] = trial\n tmp_data['act'] = act\n \n if act in ['STD', 'WAL', 'SIT']:\n tmp_size = len(tmp_data)\n train_dataset = train_dataset.append(tmp_data[:int(train_test_ratio*tmp_size)], ignore_index=True, sort=False)\n test_dataset = test_dataset.append(tmp_data[int(train_test_ratio*tmp_size):], ignore_index=True, sort=False)\n else:\n if trial%3 != 0:\n train_dataset = train_dataset.append(tmp_data, ignore_index=True, sort=False)\n else:\n test_dataset = test_dataset.append(tmp_data, ignore_index=True, sort=False)\n print(\"\\n\",act,round(i/len(act_folders), 2)*100,\"%\",\"\\n\")\ntrain_dataset.shape, test_dataset.shape",
"*** CHU 0.0 % CHU_64_6_annotated.csv train: 86264 test: 42094\n CHU 0.0 % \n\n*** FOL 5.0 % FOL_40_2_annotated.csv train: 334054 test: 1681712713\n FOL 5.0 % \n\n*** CSI 10.0 % CSI_19_2_annotated.csv train: 608091 test: 303661\n CSI 10.0 % \n\n*** JOG 15.0 % JOG_44_2_annotated.csv train: 1327348 test: 665752\n JOG 15.0 % \n\n*** CSO 20.0 % CSO_49_2_annotated.csv train: 1605689 test: 802780\n CSO 20.0 % \n\n*** SBE 25.0 % SBE_60_1_annotated.csv train: 2036696 test: 802780\n SBE 25.0 % \n\n*** WAL 30.0 % WAL_10_1_annotated.csv train: 4466049 test: 1987603\n WAL 30.0 % \n\n*** SBW 35.0 % SBW_66_1_annotated.csv train: 5151192 test: 2007390\n SBW 35.0 % \n\n*** SLW 40.0 % SLW_12_1_annotated.csv train: 5832457 test: 2007390\n SLW 40.0 % \n\n*** SIT 45.0 % SIT_20_1_annotated.csv train: 6011209 test: 2078204\n SIT 45.0 % \n\n*** SCH 50.0 % SCH_1_6_annotated.csv train: 6296705 test: 22204144\n SCH 50.0 % \n\n*** FKL 55.00000000000001 % FKL_35_1_annotated.csv train: 6545531 test: 2346520\n FKL 55.00000000000001 % \n\n*** SRH 60.0 % SRH_60_1_annotated.csv train: 7084473 test: 2346520\n SRH 60.0 % \n\n*** STD 65.0 % STD_33_1_annotated.csv train: 9481177 test: 3512301\n STD 65.0 % \n\n*** BSC 70.0 % BSC_26_2_annotated.csv train: 9768163 test: 3656998\n BSC 70.0 % \n\n*** JUM 75.0 % JUM_39_3_annotated.csv train: 10494130 test: 4013107\n JUM 75.0 % \n\n*** SDL 80.0 % SDL_47_2_annotated.csv train: 10741832 test: 4143888\n SDL 80.0 % \n\n*** SLH 85.0 % SLH_45_1_annotated.csv train: 11173724 test: 4143888\n SLH 85.0 % \n\n*** STN 90.0 % STN_53_6_annotated.csv train: 11671214 test: 4376792\n STN 90.0 % \n\n*** STU 95.0 % STU_19_1_annotated.csv train: 12140715 test: 4613674\n STU 95.0 % \n\n"
],
[
"train_dataset",
"_____no_output_____"
],
[
"train_dataset.to_csv(\"train_dataset.csv\", index=False)\ntest_dataset.to_csv(\"test_dataset.csv\", index=False)",
"_____no_output_____"
],
[
"train_dataset[train_dataset.columns[:9]].head()",
"_____no_output_____"
],
[
"train_dataset[train_dataset.columns[9:]].head()",
"_____no_output_____"
],
[
"x_train = train_dataset[train_dataset.columns[:9]]\ny_train = train_dataset[train_dataset.columns[9:]]\nx_test = test_dataset[test_dataset.columns[:9]]\ny_test = test_dataset[test_dataset.columns[9:]]\nprint(\"Train: x={}, y={}\\nTest: x={}, y={}\".format(x_train.shape, y_train.shape, x_test.shape, y_test.shape))",
"Train: x=(12142651, 9), y=(12142651, 8)\nTest: x=(4613674, 9), y=(4613674, 8)\n"
],
[
"dataset_name = \"MobiAct\" \nx_train.to_csv(dataset_name+\"_x_train.csv\", index=False)\nx_test.to_csv(dataset_name+\"_x_test.csv\", index=False)\ny_train.to_csv(dataset_name+\"_y_train.csv\", index=False)\ny_test.to_csv(dataset_name+\"_y_test.csv\", index=False)",
"_____no_output_____"
],
[
"import pandas as pd\ntrain_dataset = pd.read_csv(\"train_dataset.csv\")",
"_____no_output_____"
],
[
"train_dataset['label'].value_counts()",
"_____no_output_____"
],
[
"train_dataset['act'].value_counts()",
"_____no_output_____"
],
[
"set(train_dataset['act'].unique())-set(train_dataset['label'].unique())",
"_____no_output_____"
],
[
"set(train_dataset['label'].unique())-set(train_dataset['act'].unique())",
"_____no_output_____"
],
[
"test_dataset = pd.read_csv(\"test_dataset.csv\")",
"_____no_output_____"
],
[
"train_dataset = train_dataset.rename(columns={\"act\": \"adl_fall_scenario\"})\ntrain_dataset.head()",
"_____no_output_____"
],
[
"test_dataset = test_dataset.rename(columns={\"act\": \"adl_fall_scenario\"})\ntest_dataset.head()",
"_____no_output_____"
],
[
"x_train = train_dataset[train_dataset.columns[:9]]\ny_train = train_dataset[train_dataset.columns[9:]]\nx_test = test_dataset[test_dataset.columns[:9]]\ny_test = test_dataset[test_dataset.columns[9:]]\nprint(\"Train: x={}, y={}\\nTest: x={}, y={}\".format(x_train.shape, y_train.shape, x_test.shape, y_test.shape))",
"_____no_output_____"
],
[
"dataset_name = \"MobiAct\" \nx_train.to_csv(dataset_name+\"_x_train.csv\", index=False)\nx_test.to_csv(dataset_name+\"_x_test.csv\", index=False)\ny_train.to_csv(dataset_name+\"_y_train.csv\", index=False)\ny_test.to_csv(dataset_name+\"_y_test.csv\", index=False)",
"_____no_output_____"
],
[
"(sorted(train_dataset['label'].unique()))",
"_____no_output_____"
],
[
"ds_inf['Gender'].value_counts()",
"_____no_output_____"
],
[
"ds_inf.columns",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec67c8c83c15d2ca53dfee59b68b427b256e516b | 19,448 | ipynb | Jupyter Notebook | docs/ulab-tricks.ipynb | RoboticExplorationLab/micropython-ulab | b0679e6d16d87f5acb09dee690a71a54d4c2892b | [
"MIT"
]
| 232 | 2019-10-30T02:47:59.000Z | 2022-03-29T13:35:42.000Z | docs/ulab-tricks.ipynb | RoboticExplorationLab/micropython-ulab | b0679e6d16d87f5acb09dee690a71a54d4c2892b | [
"MIT"
]
| 325 | 2019-10-25T00:27:29.000Z | 2022-03-16T19:47:45.000Z | docs/ulab-tricks.ipynb | RoboticExplorationLab/micropython-ulab | b0679e6d16d87f5acb09dee690a71a54d4c2892b | [
"MIT"
]
| 73 | 2019-11-04T19:31:22.000Z | 2022-03-10T03:11:41.000Z | 33.415808 | 811 | 0.547563 | [
[
[
"%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
]
],
[
[
"## Notebook magic",
"_____no_output_____"
]
],
[
[
"from IPython.core.magic import Magics, magics_class, line_cell_magic\nfrom IPython.core.magic import cell_magic, register_cell_magic, register_line_magic\nfrom IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\nimport subprocess\nimport os",
"_____no_output_____"
],
[
"@magics_class\nclass PyboardMagic(Magics):\n @cell_magic\n @magic_arguments()\n @argument('-skip')\n @argument('-unix')\n @argument('-pyboard')\n @argument('-file')\n @argument('-data')\n @argument('-time')\n @argument('-memory')\n def micropython(self, line='', cell=None):\n args = parse_argstring(self.micropython, line)\n if args.skip: # doesn't care about the cell's content\n print('skipped execution')\n return None # do not parse the rest\n if args.unix: # tests the code on the unix port. Note that this works on unix only\n with open('/dev/shm/micropython.py', 'w') as fout:\n fout.write(cell)\n proc = subprocess.Popen([\"../../micropython/ports/unix/micropython\", \"/dev/shm/micropython.py\"], \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(proc.stdout.read().decode(\"utf-8\"))\n print(proc.stderr.read().decode(\"utf-8\"))\n return None\n if args.file: # can be used to copy the cell content onto the pyboard's flash\n spaces = \" \"\n try:\n with open(args.file, 'w') as fout:\n fout.write(cell.replace('\\t', spaces))\n printf('written cell to {}'.format(args.file))\n except:\n print('Failed to write to disc!')\n return None # do not parse the rest\n if args.data: # can be used to load data from the pyboard directly into kernel space\n message = pyb.exec(cell)\n if len(message) == 0:\n print('pyboard >>>')\n else:\n print(message.decode('utf-8'))\n # register new variable in user namespace\n self.shell.user_ns[args.data] = string_to_matrix(message.decode(\"utf-8\"))\n \n if args.time: # measures the time of executions\n pyb.exec('import utime')\n message = pyb.exec('t = utime.ticks_us()\\n' + cell + '\\ndelta = utime.ticks_diff(utime.ticks_us(), t)' + \n \"\\nprint('execution time: {:d} us'.format(delta))\")\n print(message.decode('utf-8'))\n \n if args.memory: # prints out memory information \n message = pyb.exec('from micropython import mem_info\\nprint(mem_info())\\n')\n print(\"memory before execution:\\n========================\\n\", message.decode('utf-8'))\n message = pyb.exec(cell)\n print(\">>> \", message.decode('utf-8'))\n message = pyb.exec('print(mem_info())')\n print(\"memory after execution:\\n========================\\n\", message.decode('utf-8'))\n\n if args.pyboard:\n message = pyb.exec(cell)\n print(message.decode('utf-8'))\n\nip = get_ipython()\nip.register_magics(PyboardMagic)",
"_____no_output_____"
]
],
[
[
"## pyboard",
"_____no_output_____"
]
],
[
[
"import pyboard\npyb = pyboard.Pyboard('/dev/ttyACM0')\npyb.enter_raw_repl()",
"_____no_output_____"
],
[
"pyb.exit_raw_repl()\npyb.close()",
"_____no_output_____"
],
[
"%%micropython -pyboard 1\n\nimport utime\nimport ulab as np\n\ndef timeit(n=1000):\n def wrapper(f, *args, **kwargs):\n func_name = str(f).split(' ')[1]\n def new_func(*args, **kwargs):\n run_times = np.zeros(n, dtype=np.uint16)\n for i in range(n):\n t = utime.ticks_us()\n result = f(*args, **kwargs)\n run_times[i] = utime.ticks_diff(utime.ticks_us(), t)\n print('{}() execution times based on {} cycles'.format(func_name, n, (delta2-delta1)/n))\n print('\\tbest: %d us'%np.min(run_times))\n print('\\tworst: %d us'%np.max(run_times))\n print('\\taverage: %d us'%np.mean(run_times))\n print('\\tdeviation: +/-%.3f us'%np.std(run_times)) \n return result\n return new_func\n return wrapper\n\ndef timeit(f, *args, **kwargs):\n func_name = str(f).split(' ')[1]\n def new_func(*args, **kwargs):\n t = utime.ticks_us()\n result = f(*args, **kwargs)\n print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us')\n return result\n return new_func",
"\n"
]
],
[
[
"__END_OF_DEFS__",
"_____no_output_____"
],
[
"# Tricks\n\nThis section of the book discusses a couple of tricks that can be exploited to either speed up computations, or save on RAM. However, there is probably no silver bullet, and you have to evaluate your code in terms of execution speed (if the execution is time critical), or RAM used. You should also keep in mind that, if a particular code snippet is optimised on some hardware, there is no guarantee that on another piece of hardware, you will get similar improvements. Hardware implementations are vastly different. Some microcontrollers do not even have an FPU, so you should not be surprised that you get significantly different benchmarks. Just to underline this statement, you can study the [collection of benchmarks](https://github.com/thiagofe/ulab_samples).",
"_____no_output_____"
],
[
"## Use an `ndarray`, if you can\n\nMany functions in `ulab` are implemented in a universal fashion, meaning that both generic `micropython` iterables, and `ndarray`s can be passed as an argument. E.g., both \n\n```python\nfrom ulab import numpy as np\n\nnp.sum([1, 2, 3, 4, 5])\n```\nand\n\n```python\nfrom ulab import numpy as np\n\na = np.array([1, 2, 3, 4, 5])\nnp.sum(a)\n```\n\nwill return the `micropython` variable 15 as the result. Still, `np.sum(a)` is evaluated significantly faster, because in `np.sum([1, 2, 3, 4, 5])`, the interpreter has to fetch 5 `micropython` variables, convert them to `float`, and sum the values, while the C type of `a` is known, thus the interpreter can invoke a single `for` loop for the evaluation of the `sum`. In the `for` loop, there are no function calls, the iteration simply walks through the pointer holding the values of `a`, and adds the values to an accumulator. If the array `a` is already available, then you can gain a factor of 3 in speed by calling `sum` on the array, instead of using the list. Compared to the python implementation of the same functionality, the speed-up is around 40 (again, this might depend on the hardware).\n\nOn the other hand, if the array is not available, then there is not much point in converting the list to an `ndarray` and passing that to the function. In fact, you should expect a slow-down: the constructor has to iterate over the list elements, and has to convert them to a numerical type. On top of that, it also has to reserve RAM for the `ndarray`.",
"_____no_output_____"
],
[
"## Use a reasonable `dtype`\n\nJust as in `numpy`, the default `dtype` is `float`. But this does not mean that that is the most suitable one in all scenarios. If data are streamed from an 8-bit ADC, and you only want to know the maximum, or the sum, then it is quite reasonable to use `uint8` for the `dtype`. Storing the same data in `float` array would cost 4 or 8 times as much RAM, with absolutely no gain. Do not rely on the default value of the constructor's keyword argument, and choose one that fits!",
"_____no_output_____"
],
[
"## Beware the axis!\n\nWhenever `ulab` iterates over multi-dimensional arrays, the outermost loop is the first axis, then the second axis, and so on. E.g., when the `sum` of \n\n```python\na = array([[1, 2, 3, 4],\n [5, 6, 7, 8], \n [9, 10, 11, 12]], dtype=uint8)\n```\n\nis being calculated, first the data pointer walks along `[1, 2, 3, 4]` (innermost loop, last axis), then is moved back to the position, where 5 is stored (this is the nesting loop), and traverses `[5, 6, 7, 8]`, and so on. Moving the pointer back to 5 is more expensive, than moving it along an axis, because the position of 5 has to be calculated, whereas moving from 5 to 6 is simply an addition to the address. Thus, while the matrix\n\n```python\nb = array([[1, 5, 9],\n [2, 6, 10], \n [3, 7, 11],\n [4, 8, 12]], dtype=uint8)\n```\n\nholds the same data as `a`, the summation over the entries in `b` is slower, because the pointer has to be re-wound three times, as opposed to twice in `a`. For small matrices the savings are not significant, but you would definitely notice the difference, if you had \n\n```\na = array(range(2000)).reshape((2, 1000))\nb = array(range(2000)).reshape((1000, 2))\n```\n\nThe moral is that, in order to improve on the execution speed, whenever possible, you should try to make the last axis the longest. As a side note, `numpy` can re-arrange its loops, and puts the longest axis in the innermost loop. This is why the longest axis is sometimes referred to as the fast axis. In `ulab`, the order of the axes is fixed. ",
"_____no_output_____"
],
[
"## Reduce the number of artifacts\n\nBefore showing a real-life example, let us suppose that we want to interpolate uniformly sampled data, and the absolute magnitude is not really important, we only care about the ratios between neighbouring value. One way of achieving this is calling the `interp` functions. However, we could just as well work with slices.",
"_____no_output_____"
]
],
[
[
"a = array([0, 10, 2, 20, 4], dtype=np.uint8)\nb = np.zeros(9, dtype=np.uint8)\n\nb[::2] = 2 * a\nb[1::2] = a[:-1] + a[1:]\n\nb //= 2\nb",
"_____no_output_____"
]
],
[
[
"`b` now has values from `a` at every even position, and interpolates the values on every odd position. If only the relative magnitudes are important, then we can even save the division by 2, and we end up with ",
"_____no_output_____"
]
],
[
[
"a = array([0, 10, 2, 20, 4], dtype=np.uint8)\nb = np.zeros(9, dtype=np.uint8)\n\nb[::2] = 2 * a\nb[1::2] = a[:-1] + a[1:]\n\nb",
"_____no_output_____"
]
],
[
[
"Importantly, we managed to keep the results in the smaller `dtype`, `uint8`. Now, while the two assignments above are terse and pythonic, the code is not the most efficient: the right hand sides are compound statements, generating intermediate results. To store them, RAM has to be allocated. This takes time, and leads to memory fragmentation. Better is to write out the assignments in 4 instructions:",
"_____no_output_____"
]
],
[
[
"b = np.zeros(9, dtype=np.uint8)\n\nb[::2] = a\nb[::2] += a\nb[1::2] = a[:-1]\nb[1::2] += a[1:]\n\nb",
"_____no_output_____"
]
],
[
[
"The results are the same, but no extra RAM is allocated, except for the views `a[:-1]`, and `a[1:]`, but those had to be created even in the origin implementation.",
"_____no_output_____"
],
[
"### Upscaling images\n\nAnd now the example: there are low-resolution thermal cameras out there. Low resolution might mean 8 by 8 pixels. Such a small number of pixels is just not reasonable to plot, no matter how small the display is. If you want to make the camera image a bit more pleasing, you can upscale (stretch) it in both dimensions. This can be done exactly as we up-scaled the linear array:",
"_____no_output_____"
]
],
[
[
"\nb = np.zeros((15, 15), dtype=np.uint8)\n\nb[1::2,::2] = a[:-1,:]\nb[1::2,::2] += a[1:, :]\nb[1::2,::2] //= 2\nb[::,1::2] = a[::,:-1:2]\nb[::,1::2] += a[::,2::2]\nb[::,1::2] //= 2",
"_____no_output_____"
]
],
[
[
"Up-scaling by larger numbers can be done in a similar fashion, you simply have more assignments.",
"_____no_output_____"
],
[
"There are cases, when one cannot do away with the intermediate results. Two prominent cases are the `where` function, and indexing by means of a Boolean array. E.g., in",
"_____no_output_____"
]
],
[
[
"a = array([1, 2, 3, 4, 5])\nb = a[a < 4]\nb",
"_____no_output_____"
]
],
[
[
"the expression `a < 4` produces the Boolean array, ",
"_____no_output_____"
]
],
[
[
"a < 4",
"_____no_output_____"
]
],
[
[
"If you repeatedly have such conditions in a loop, you might have to peridically call the garbage collector to remove the Boolean arrays that are used only once.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec67dd5fba7d94457c2ee125b17e89d85b79c724 | 4,839 | ipynb | Jupyter Notebook | notebook/dog-breed-dentification.ipynb | ameya-parab/dog-breed-identification | aaa41ca7aca0e590a6bb16982e431e1cfb040b0c | [
"MIT"
]
| null | null | null | notebook/dog-breed-dentification.ipynb | ameya-parab/dog-breed-identification | aaa41ca7aca0e590a6bb16982e431e1cfb040b0c | [
"MIT"
]
| null | null | null | notebook/dog-breed-dentification.ipynb | ameya-parab/dog-breed-identification | aaa41ca7aca0e590a6bb16982e431e1cfb040b0c | [
"MIT"
]
| null | null | null | 27.494318 | 89 | 0.543294 | [
[
[
"import os\nimport sys\n\nimport optuna\nimport torch\nimport pandas as pd\n\nsys.path.insert(0, os.path.join(os.getcwd(), \"..\"))\n\nfrom config import MODEL_PATH, STORAGE, DATA_DIR, BREED\nfrom src.dataset import fetch_dataset\nfrom src.model import EfficientNet\nfrom src.train import run_training, evaluate",
"_____no_output_____"
],
[
"N_TRIALS = 50\nos.environ[\"http_proxy\"] = \"\"\nos.environ[\"https_proxy\"] = \"\"\nos.environ[\"HTTP_PROXY\"] = \"\"\nos.environ[\"HTTPS_PROXY\"] = \"\"\n",
"_____no_output_____"
],
[
"def objective(trial):\n\n batch_size = trial.suggest_int(\"batch_size\", low=32, high=48)\n epochs = trial.suggest_int(\"epochs\", low=8, high=12)\n random_seed = trial.suggest_int(\"random_seed\", low=0, high=1000_000)\n lr = trial.suggest_loguniform(\"lr\", low=0.001, high=0.01)\n\n train_dataloader, valid_dataloader, _ = fetch_dataset(\n random_seed=random_seed, batch_size=batch_size\n )\n\n print(\n f\"Epoch: {epochs}, Batch: {batch_size}, LR: {lr}, Seed: {random_seed}\"\n )\n\n _, validation_loss = run_training(\n train_dataloader=train_dataloader,\n valid_dataloader=valid_dataloader,\n epochs=epochs,\n lr=lr,\n freeze_layers=True,\n )\n\n return validation_loss\n",
"_____no_output_____"
],
[
"study = optuna.create_study(\n study_name=\"dog-breed\",\n direction=\"minimize\",\n pruner=optuna.pruners.HyperbandPruner(),\n sampler=optuna.samplers.TPESampler(multivariate=True),\n storage=STORAGE,\n load_if_exists=True,\n)\n\nstudy.optimize(\n objective,\n n_trials=N_TRIALS,\n gc_after_trial=True,\n)\n",
"_____no_output_____"
],
[
"study = optuna.load_study(\n study_name=\"dog-breed\",\n pruner=optuna.pruners.HyperbandPruner(),\n sampler=optuna.samplers.TPESampler(multivariate=True),\n storage=STORAGE,\n)\n\ntrain_dataloader, valid_dataloader, test_dataloader = fetch_dataset(\n random_seed=study.best_params[\"random_seed\"],\n batch_size=study.best_params[\"batch_size\"],\n)\n\nrun_training(\n train_dataloader=train_dataloader,\n valid_dataloader=valid_dataloader,\n epochs=study.best_params[\"epochs\"],\n lr=study.best_params[\"lr\"],\n random_seed=study.best_params[\"random_seed\"],\n verbose=True\n)\n\nmodel = EfficientNet().model\nmodel.load_state_dict(torch.load(MODEL_PATH))\n\n_, probabilities, _ = evaluate(\n model=model, dataloader=test_dataloader\n )\n",
"_____no_output_____"
],
[
"ids = os.listdir(os.path.join(DATA_DIR, \"test\"))\nids = [_id.split(\".\")[0] for _id in ids]\nsubmission = pd.DataFrame(ids)\nsubmission.columns = [\"id\"]\n\nresult = pd.DataFrame(torch.vstack(probabilities).numpy())\nresult.columns = BREED\n\nsubmission = submission.merge(result, left_index=True, right_index=True)\nsubmission\n\nsubmission.to_csv(\n os.path.join(DATA_DIR, \"submission.csv\"),\n index=False,\n)\n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec67def9cb88ee8d2917b8c1ea528f7e5cf573cc | 18,006 | ipynb | Jupyter Notebook | Models/Patient-Level-Model.ipynb | 23tyagit/NeuraHealth | 25eb0efdddf82bcabaf02be253d660ed2accbcd6 | [
"MIT"
]
| null | null | null | Models/Patient-Level-Model.ipynb | 23tyagit/NeuraHealth | 25eb0efdddf82bcabaf02be253d660ed2accbcd6 | [
"MIT"
]
| null | null | null | Models/Patient-Level-Model.ipynb | 23tyagit/NeuraHealth | 25eb0efdddf82bcabaf02be253d660ed2accbcd6 | [
"MIT"
]
| null | null | null | 28.178404 | 155 | 0.539598 | [
[
[
"# \n# Author: Tanish Tyagi\n# \n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\n# machine learning libraries\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.metrics import confusion_matrix, average_precision_score, classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing\nimport seaborn as sns\nfrom sklearn.metrics import roc_auc_score, matthews_corrcoef, accuracy_score\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\n\n# deep learning libraries\nimport torch\nimport transformers\nfrom sklearn.model_selection import train_test_split\nfrom simpletransformers.classification import ClassificationModel, ClassificationArgs\n\nimport time\nimport math\nimport random\nfrom tqdm import tqdm\nimport regex as re\nfrom collections import Counter\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## Loading in Patient Level Dataset with ClinicalBERT Sequence Level Predictions",
"_____no_output_____"
]
],
[
[
"patient_level_preds = pd.read_csv(r\"Storage/Bert/john_hsu_sequence_preds_proba.csv\")",
"_____no_output_____"
],
[
"patient_level_preds = patient_level_preds[patient_level_preds[\"syndromic_dx\"].isna() == False]\npatient_level_preds = patient_level_preds.reset_index(drop = True)",
"_____no_output_____"
],
[
"patient_level_preds.columns",
"_____no_output_____"
]
],
[
[
"## Feature Engineering to Get Patient Level Labels",
"_____no_output_____"
]
],
[
[
"for i in tqdm(range(len(patient_level_preds))):\n if (int(patient_level_preds.at[i, \"syndromic_dx\"]) > 0):\n patient_level_preds.at[i, \"syndromic_dx\"] = 1\n else:\n patient_level_preds.at[i, \"syndromic_dx\"] = 0",
"_____no_output_____"
],
[
"patient_level_preds[\"syndromic_dx\"].value_counts()",
"_____no_output_____"
],
[
"patient_level_features = pd.DataFrame(columns = [\"PatientID\", \"percent_yes\", \"percent_no\", \"percent_neither\", \"sequence_count\", \"label\"])",
"_____no_output_____"
]
],
[
[
"## Percent yes, no, neither Feature Engineering",
"_____no_output_____"
]
],
[
[
"data = []\nfor i in tqdm(range(len(patient_level_preds[\"PatientID\"].unique()))):\n curr = patient_level_preds[patient_level_preds[\"PatientID\"] == str(patient_level_preds[\"PatientID\"].unique()[i])]\n seq_count = len(curr)\n\n if (seq_count <= 10):\n continue\n \n p_yes = len(curr[curr[\"class_pred\"] == 2]) / len(curr)\n p_no = len(curr[curr[\"class_pred\"] == 0]) / len(curr)\n p_ntr = len(curr[curr[\"class_pred\"] == 1]) / len(curr)\n\n no_count = len(curr[curr[\"syndromic_dx\"] == 0])\n yes_count = len(curr[curr[\"syndromic_dx\"] == 1])\n\n label = 0\n if (yes_count > no_count):\n label = 1 \n \n curr_dict = {\n \"PatientID\" : str(patient_level_preds[\"PatientID\"].unique()[i]),\n \"percent_yes\" : p_yes,\n \"percent_no\" : p_no,\n \"percent_neither\" : p_ntr,\n \"sequence_count\" : seq_count,\n \"label\" : label \n }\n\n data.append(curr_dict)\n\npatient_level_features = pd.DataFrame(data)",
"_____no_output_____"
],
[
"x = patient_level_features[\"sequence_count\"].describe()",
"_____no_output_____"
]
],
[
[
"## Converting Sequence Count Feature to a discrete value by bucketing based off quartiles",
"_____no_output_____"
]
],
[
[
"for i in tqdm(range(len(patient_level_features))):\n if (patient_level_features.at[i, \"sequence_count\"] <= x[\"25%\"]):\n patient_level_features.at[i, \"sequence_count\"] = 0\n elif (patient_level_features.at[i, \"sequence_count\"] <= x[\"50%\"]):\n patient_level_features.at[i, \"sequence_count\"] = 1\n elif (patient_level_features.at[i, \"sequence_count\"] <= x[\"75%\"]):\n patient_level_features.at[i, \"sequence_count\"] = 2\n else:\n patient_level_features.at[i, \"sequence_count\"] = 3",
"_____no_output_____"
],
[
"# patient_level_features.to_csv(r\"Storage/Bert/jh_patient_level_features.csv\", index = False)",
"_____no_output_____"
]
],
[
[
"## Splitting into Train, Validation, Test Splits",
"_____no_output_____"
]
],
[
[
"X = patient_level_features[[\"percent_yes\", \"percent_no\", \"percent_neither\", \"sequence_count\"]]\ny = patient_level_features[\"label\"]\n\ny_label = y.to_numpy()\nX_train, X_test_valid, y_train, y_test_valid = train_test_split(X,y,test_size=0.15, stratify=y_label)\n\ny_test_valid_label = y_test_valid.to_numpy()\nX_valid, X_test, y_valid, y_test = train_test_split(X_test_valid, y_test_valid,test_size=0.5, stratify=y_test_valid_label)",
"_____no_output_____"
]
],
[
[
"### Feature Standardization",
"_____no_output_____"
]
],
[
[
"num_cols = [\"percent_yes\",\"percent_no\", \"sequence_count\"]\n\nfor i in num_cols:\n scale = StandardScaler().fit(X_train[[i]])\n\n X_train[i] = scale.transform(X_train[[i]])\n X_valid[i] = scale.transform(X_valid[[i]])\n X_test[i] = scale.transform(X_test[[i]]) ",
"_____no_output_____"
],
[
"len(X_train), len(X_valid), len(X_test)",
"_____no_output_____"
],
[
"X_cross_validation = pd.concat([X_train, X_valid]).to_numpy()\ny_cross_validation = pd.concat([y_train, y_valid]).to_numpy()",
"_____no_output_____"
]
],
[
[
"## K-Fold Cross Validation Training Loop",
"_____no_output_____"
]
],
[
[
"kf = KFold(n_splits = 10, shuffle = True)\nkf.get_n_splits(X_cross_validation)",
"_____no_output_____"
],
[
"def logisitic_regression(X_train, y_train, X_test, y_test, c, want_conf_mat):\n # fitting model\n lr = LogisticRegression(penalty = 'l1', solver = 'liblinear', C = c, random_state = 0, class_weight = 'balanced')\n lr.fit(X_train, y_train)\n \n # predictions\n y_pred = lr.predict(X_test)\n y_prob = lr.predict_proba(X_test)\n\n # collecting results\n acc = metrics.accuracy_score(y_test, y_pred)\n auc = roc_auc_score(y_test, y_prob[:, 1])\n \n # if (save_model == True):\n # pickle.dump(lr, open(\"Storage/Model/\" + name, 'wb'))\n\n \n if (want_conf_mat == True):\n return lr, acc, auc, c, confusion_matrix(y_test, y_pred)\n \n return lr, acc, auc, c",
"_____no_output_____"
],
[
"counter = 0\ndf_list = []\n\nfor train_index, test_index in kf.split(X_cross_validation):\n # print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train_cv, X_test_cv = X.iloc[train_index], X.iloc[test_index]\n y_train_cv, y_test_cv = y.iloc[train_index], y.iloc[test_index]\n\n #X_train_cv = X_train_cv.drop(columns = [\"PatientID\"])\n #X_test_cv = X_test_cv.drop(columns = [\"PatientID\"])\n\n acc_list = []\n auc_list = []\n c_list = []\n\n # tuning for optimal lambda value\n for c in [0.01, 0.1, 1, 10, 100]:\n #name = \"Fold-\" + str((counter + 1)) + \"-Corr-\" + str(corr) + \"-C-\" + str(c) + \".sav\"\n lr, acc, auc, c = logisitic_regression(X_train_cv, y_train_cv, X_test_cv, y_test_cv, c, False)\n acc_list.append(acc)\n auc_list.append(auc)\n c_list.append(c)\n \n # gathering model stats\n acc_df = pd.DataFrame(acc_list, columns=['acc'])\n auc_df = pd.DataFrame(auc_list, columns=['auc'])\n c_df = pd.DataFrame(c_list, columns=['c_value'])\n \n assert len(acc_df) == len(auc_df) == len(c_df)\n \n iter_df = pd.concat([c_df, acc_df, auc_df], axis=1)\n iter_df['fold_number'] = [(counter + 1)] * len(iter_df)\n df_list.append(iter_df)\n \n print(\"Completed Fold #: \", counter + 1)\n counter += 1\n \n print(\"Stats DF has\", len(df_list), \"records\")",
"_____no_output_____"
],
[
"all_df = pd.concat(df_list)",
"_____no_output_____"
]
],
[
[
"## Finding Optimal Hyperparameters",
"_____no_output_____"
]
],
[
[
"average_results_df = []\n\nfor c in [0.01, 0.1, 1, 10, 100]:\n filtered = all_df[(all_df[\"c_value\"] == c)]\n avg_auc = filtered[\"auc\"].mean()\n avg_acc = filtered[\"acc\"].mean()\n\n filler = np.arange(5, 8)**2\n df = pd.DataFrame(filler.reshape(1, 3), columns = [\"c_value\", \"acc\", \"auc\"])\n df.loc[df.index] = [c, avg_acc, avg_auc]\n #print(df)\n \n average_results_df.append(df)",
"_____no_output_____"
],
[
"average_results_df = pd.concat(average_results_df)",
"_____no_output_____"
],
[
"average_results_df[average_results_df['auc'] == max(average_results_df['auc'])]",
"_____no_output_____"
]
],
[
[
"## Model Evaluation on Held Out Test Set",
"_____no_output_____"
]
],
[
[
"c = 100\n\nlr = LogisticRegression(penalty = 'l1', solver = 'liblinear', C = c, class_weight = 'balanced')\nlr.fit(X_train, y_train)",
"_____no_output_____"
],
[
"y_test_preds = lr.predict(X_test)",
"_____no_output_____"
]
],
[
[
"## Accuracy and AUC",
"_____no_output_____"
]
],
[
[
"acc = metrics.accuracy_score(y_test, y_test_preds)\nacc",
"_____no_output_____"
],
[
"y_test_prob = lr.predict_proba(X_test)\nauc = roc_auc_score(y_test, y_test_prob[:, 1])\nauc",
"_____no_output_____"
]
],
[
[
"## Classification Report",
"_____no_output_____"
]
],
[
[
"target_names = ['Negative', 'Positive']\nresults_lgr = classification_report(y_test, y_test_preds, target_names = target_names, output_dict=True)\nresults_lgr = pd.DataFrame(results_lgr).transpose()\nresults_lgr",
"_____no_output_____"
]
],
[
[
"## Saving Model and features plus ROC Curves",
"_____no_output_____"
]
],
[
[
"save = False\n\nif save == True:\n file_name = \"lr_12_26_patient_level.sav\"\n pickle.dump(lr, open(file_name, 'wb'))\n\n X_train_df = pd.concat([X_train, y_train])\n X_valid_df = pd.concat([X_valid, y_valid])\n X_test_df = pd.concat([X_test, y_test])\n\n X_train_df.to_csv(r\"Storage/Bert/patient_level_train_85.csv\", index = False)\n X_valid_df.to_csv(r\"Storage/Bert/patient_level_valid_85.csv\", index = False)\n X_test_df.to_csv(r\"Storage/Bert/patient_level_test_85.csv\", index = False)\n n_class = 2\n\n fpr = {}\n tpr = {}\n thresh = {}\n roc_auc = {}\n\n for i in range(n_class): \n fpr[i], tpr[i], thresh[i] = metrics.roc_curve(y_test, y_test_prob[:,i], pos_label = i)\n roc_auc[i] = metrics.auc(fpr[i], tpr[i])\n \n lw = 2\n\n # plotting \n plt.plot(fpr[0], tpr[0], linestyle='--', color='red', label='L1 Logistic Regression (area = %0.2f)' % roc_auc[0])\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive rate')\n plt.legend(loc='best')\n\n plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label='L1 Logistic Regression (area = %0.2f)' % roc_auc[1])\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive rate')\n plt.legend(loc='best')\n plt.show()\n\n # plt.savefig(\"Storage/Bert/patient_level_roc_85.svg\")\n",
"_____no_output_____"
]
],
[
[
"## Further Metrics",
"_____no_output_____"
]
],
[
[
"conf_mat = confusion_matrix(y_test, y_test_preds)",
"_____no_output_____"
],
[
"import numpy as np\nFP = conf_mat.sum(axis = 0) - np.diag(conf_mat) \nFN = conf_mat.sum(axis = 1) - np.diag(conf_mat)\nTP = np.diag(conf_mat)\nTN = conf_mat.sum() - (FP + FN + TP)\nFP = FP.astype(float)\nFN = FN.astype(float)\nTP = TP.astype(float)\nTN = TN.astype(float)\n\n# Sensitivity, hit rate, recall, or true positive rate\nTPR = TP/(TP+FN)\n\n# Specificity or true negative rate\nTNR = TN/(TN+FP) \n\n# Precision or positive predictive value\nPPV = TP/(TP+FP)\n\n# Negative predictive value\nNPV = TN/(TN+FN)\n\n# Fall out or false positive rate\nFPR = FP/(FP+TN)\n\n# False negative rate\nFNR = FN/(TP+FN)\n\n# False discovery rate\nFDR = FP/(TP+FP)\n\nprint(\"Sensitivity: \", TPR)\nprint(\"Specificity: \", TNR)\nprint(\"NPV: \", NPV)\nprint(\"PPV: \", PPV)\nprint(\"FPR: \", FPR)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec67dfc7674533a74f09dcc270d92c1e707291c6 | 25,439 | ipynb | Jupyter Notebook | Linear_Regression.ipynb | anmoljaiswal076/course_mldata_project | 67da0d65f566f0b904611b4ec73aee8523609857 | [
"MIT"
]
| null | null | null | Linear_Regression.ipynb | anmoljaiswal076/course_mldata_project | 67da0d65f566f0b904611b4ec73aee8523609857 | [
"MIT"
]
| null | null | null | Linear_Regression.ipynb | anmoljaiswal076/course_mldata_project | 67da0d65f566f0b904611b4ec73aee8523609857 | [
"MIT"
]
| null | null | null | 113.566964 | 15,264 | 0.749479 | [
[
[
"# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nbos1 = pd.read_csv('BostonHousing.txt')\nprint(bos1)",
" crim zn indus chas nox rm age dis rad tax \\\n0 0.00632 18.0 2.31 0 0.538 6.575 65.2 4.0900 1 296 \n1 0.02731 0.0 7.07 0 0.469 6.421 78.9 4.9671 2 242 \n2 0.02729 0.0 7.07 0 0.469 7.185 61.1 4.9671 2 242 \n3 0.03237 0.0 2.18 0 0.458 6.998 45.8 6.0622 3 222 \n4 0.06905 0.0 2.18 0 0.458 7.147 54.2 6.0622 3 222 \n5 0.02985 0.0 2.18 0 0.458 6.430 58.7 6.0622 3 222 \n6 0.08829 12.5 7.87 0 0.524 6.012 66.6 5.5605 5 311 \n7 0.14455 12.5 7.87 0 0.524 6.172 96.1 5.9505 5 311 \n8 0.21124 12.5 7.87 0 0.524 5.631 100.0 6.0821 5 311 \n9 0.17004 12.5 7.87 0 0.524 6.004 85.9 6.5921 5 311 \n10 0.22489 12.5 7.87 0 0.524 6.377 94.3 6.3467 5 311 \n11 0.11747 12.5 7.87 0 0.524 6.009 82.9 6.2267 5 311 \n12 0.09378 12.5 7.87 0 0.524 5.889 39.0 5.4509 5 311 \n13 0.62976 0.0 8.14 0 0.538 5.949 61.8 4.7075 4 307 \n14 0.63796 0.0 8.14 0 0.538 6.096 84.5 4.4619 4 307 \n15 0.62739 0.0 8.14 0 0.538 5.834 56.5 4.4986 4 307 \n16 1.05393 0.0 8.14 0 0.538 5.935 29.3 4.4986 4 307 \n17 0.78420 0.0 8.14 0 0.538 5.990 81.7 4.2579 4 307 \n18 0.80271 0.0 8.14 0 0.538 5.456 36.6 3.7965 4 307 \n19 0.72580 0.0 8.14 0 0.538 5.727 69.5 3.7965 4 307 \n20 1.25179 0.0 8.14 0 0.538 5.570 98.1 3.7979 4 307 \n21 0.85204 0.0 8.14 0 0.538 5.965 89.2 4.0123 4 307 \n22 1.23247 0.0 8.14 0 0.538 6.142 91.7 3.9769 4 307 \n23 0.98843 0.0 8.14 0 0.538 5.813 100.0 4.0952 4 307 \n24 0.75026 0.0 8.14 0 0.538 5.924 94.1 4.3996 4 307 \n25 0.84054 0.0 8.14 0 0.538 5.599 85.7 4.4546 4 307 \n26 0.67191 0.0 8.14 0 0.538 5.813 90.3 4.6820 4 307 \n27 0.95577 0.0 8.14 0 0.538 6.047 88.8 4.4534 4 307 \n28 0.77299 0.0 8.14 0 0.538 6.495 94.4 4.4547 4 307 \n29 1.00245 0.0 8.14 0 0.538 6.674 87.3 4.2390 4 307 \n.. ... ... ... ... ... ... ... ... ... ... \n476 4.87141 0.0 18.10 0 0.614 6.484 93.6 2.3053 24 666 \n477 15.02340 0.0 18.10 0 0.614 5.304 97.3 2.1007 24 666 \n478 10.23300 0.0 18.10 0 0.614 6.185 96.7 2.1705 24 666 \n479 14.33370 0.0 18.10 0 0.614 6.229 88.0 1.9512 24 666 \n480 5.82401 0.0 18.10 0 0.532 6.242 64.7 3.4242 24 666 \n481 5.70818 0.0 18.10 0 0.532 6.750 74.9 3.3317 24 666 \n482 5.73116 0.0 18.10 0 0.532 7.061 77.0 3.4106 24 666 \n483 2.81838 0.0 18.10 0 0.532 5.762 40.3 4.0983 24 666 \n484 2.37857 0.0 18.10 0 0.583 5.871 41.9 3.7240 24 666 \n485 3.67367 0.0 18.10 0 0.583 6.312 51.9 3.9917 24 666 \n486 5.69175 0.0 18.10 0 0.583 6.114 79.8 3.5459 24 666 \n487 4.83567 0.0 18.10 0 0.583 5.905 53.2 3.1523 24 666 \n488 0.15086 0.0 27.74 0 0.609 5.454 92.7 1.8209 4 711 \n489 0.18337 0.0 27.74 0 0.609 5.414 98.3 1.7554 4 711 \n490 0.20746 0.0 27.74 0 0.609 5.093 98.0 1.8226 4 711 \n491 0.10574 0.0 27.74 0 0.609 5.983 98.8 1.8681 4 711 \n492 0.11132 0.0 27.74 0 0.609 5.983 83.5 2.1099 4 711 \n493 0.17331 0.0 9.69 0 0.585 5.707 54.0 2.3817 6 391 \n494 0.27957 0.0 9.69 0 0.585 5.926 42.6 2.3817 6 391 \n495 0.17899 0.0 9.69 0 0.585 5.670 28.8 2.7986 6 391 \n496 0.28960 0.0 9.69 0 0.585 5.390 72.9 2.7986 6 391 \n497 0.26838 0.0 9.69 0 0.585 5.794 70.6 2.8927 6 391 \n498 0.23912 0.0 9.69 0 0.585 6.019 65.3 2.4091 6 391 \n499 0.17783 0.0 9.69 0 0.585 5.569 73.5 2.3999 6 391 \n500 0.22438 0.0 9.69 0 0.585 6.027 79.7 2.4982 6 391 \n501 0.06263 0.0 11.93 0 0.573 6.593 69.1 2.4786 1 273 \n502 0.04527 0.0 11.93 0 0.573 6.120 76.7 2.2875 1 273 \n503 0.06076 0.0 11.93 0 0.573 6.976 91.0 2.1675 1 273 \n504 0.10959 0.0 11.93 0 0.573 6.794 89.3 2.3889 1 273 \n505 0.04741 0.0 11.93 0 0.573 6.030 80.8 2.5050 1 273 \n\n ptratio b lstat medv \n0 15.3 396.90 4.98 24.0 \n1 17.8 396.90 9.14 21.6 \n2 17.8 392.83 4.03 34.7 \n3 18.7 394.63 2.94 33.4 \n4 18.7 396.90 5.33 36.2 \n5 18.7 394.12 5.21 28.7 \n6 15.2 395.60 12.43 22.9 \n7 15.2 396.90 19.15 27.1 \n8 15.2 386.63 29.93 16.5 \n9 15.2 386.71 17.10 18.9 \n10 15.2 392.52 20.45 15.0 \n11 15.2 396.90 13.27 18.9 \n12 15.2 390.50 15.71 21.7 \n13 21.0 396.90 8.26 20.4 \n14 21.0 380.02 10.26 18.2 \n15 21.0 395.62 8.47 19.9 \n16 21.0 386.85 6.58 23.1 \n17 21.0 386.75 14.67 17.5 \n18 21.0 288.99 11.69 20.2 \n19 21.0 390.95 11.28 18.2 \n20 21.0 376.57 21.02 13.6 \n21 21.0 392.53 13.83 19.6 \n22 21.0 396.90 18.72 15.2 \n23 21.0 394.54 19.88 14.5 \n24 21.0 394.33 16.30 15.6 \n25 21.0 303.42 16.51 13.9 \n26 21.0 376.88 14.81 16.6 \n27 21.0 306.38 17.28 14.8 \n28 21.0 387.94 12.80 18.4 \n29 21.0 380.23 11.98 21.0 \n.. ... ... ... ... \n476 20.2 396.21 18.68 16.7 \n477 20.2 349.48 24.91 12.0 \n478 20.2 379.70 18.03 14.6 \n479 20.2 383.32 13.11 21.4 \n480 20.2 396.90 10.74 23.0 \n481 20.2 393.07 7.74 23.7 \n482 20.2 395.28 7.01 25.0 \n483 20.2 392.92 10.42 21.8 \n484 20.2 370.73 13.34 20.6 \n485 20.2 388.62 10.58 21.2 \n486 20.2 392.68 14.98 19.1 \n487 20.2 388.22 11.45 20.6 \n488 20.1 395.09 18.06 15.2 \n489 20.1 344.05 23.97 7.0 \n490 20.1 318.43 29.68 8.1 \n491 20.1 390.11 18.07 13.6 \n492 20.1 396.90 13.35 20.1 \n493 19.2 396.90 12.01 21.8 \n494 19.2 396.90 13.59 24.5 \n495 19.2 393.29 17.60 23.1 \n496 19.2 396.90 21.14 19.7 \n497 19.2 396.90 14.10 18.3 \n498 19.2 396.90 12.92 21.2 \n499 19.2 395.77 15.10 17.5 \n500 19.2 396.90 14.33 16.8 \n501 21.0 391.99 9.67 22.4 \n502 21.0 396.90 9.08 20.6 \n503 21.0 396.90 5.64 23.9 \n504 21.0 393.45 6.48 22.0 \n505 21.0 396.90 7.88 11.9 \n\n[506 rows x 14 columns]\n"
],
[
"x = bos1.iloc[:,0:13]\ny = bos1[\"medv\"]",
"_____no_output_____"
],
[
"# code to plot correlation\n\n#library to establish correlation\nimport seaborn as sns\nnames = []\n#creating a correlation matrix\ncorrelations = bos1.corr()\nsns.heatmap(correlations, square = True, cmap=\"YlGnBu\")\nplt.yticks(rotation=0)\nplt.xticks(rotation=90)\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code"
]
]
|
ec67e8f145cc9425e883c65408ef01aba4a959f8 | 31,927 | ipynb | Jupyter Notebook | tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb | antrikshmisri/NeMo | 17fc2541172dd3d781bfa0b5a1fa41576d4bfe21 | [
"Apache-2.0"
]
| 1 | 2021-06-23T10:39:53.000Z | 2021-06-23T10:39:53.000Z | tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb | Oktai15/NeMo | 5b6dd3850129898be47cf0d65587897ec45a5b59 | [
"Apache-2.0"
]
| null | null | null | tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb | Oktai15/NeMo | 5b6dd3850129898be47cf0d65587897ec45a5b59 | [
"Apache-2.0"
]
| null | null | null | 54.390119 | 694 | 0.514956 | [
[
[
"\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n# If you're using Google Colab and not running locally, run this cell\n\n# install NeMo\nBRANCH = 'v1.0.0b2'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]",
"_____no_output_____"
],
[
"from nemo.collections import nlp as nemo_nlp\nfrom nemo.utils.exp_manager import exp_manager\nfrom nemo.utils import logging\n\nimport os\nimport wget\nimport torch\nimport pytorch_lightning as pl\nfrom omegaconf import OmegaConf",
"_____no_output_____"
]
],
[
[
"# Task Description\n**Joint Intent and Slot classification** - is a task of classifying an Intent and detecting all relevant Slots (Entities)\nfor this Intent in a query.\nFor example, in the query: `What is the weather in Santa Clara tomorrow morning?`, we would like to classify the query\nas a `weather` Intent, and detect `Santa Clara` as a `location` slot and `tomorrow morning` as a `date_time` slot.\nIntents and Slots names are usually task specific and defined as labels in the training data.\nThis is a fundamental step that is executed in any task-driven Conversational Assistant.\n\nOur Bert based model implementation enables to train and then detect both of these tasks together.\n",
"_____no_output_____"
],
[
"# Dataset and NeMo data format\n\nIn this tutorial we are going to use a virtual assistant interaction data set that can be downloaded from here: https://github.com/xliuhw/NLU-Evaluation-Data.\nThere are about 10K training and 1K testing queries which cover 64 various Intents and 55 Slots. \n\nTo work with NeMo NLP classification model, this dataset should be first converted to the NeMo format, which requires next files:\n- **dict.intents.csv** - list of all intent names in the data. One line per an intent name.\n- **dict.slots.csv** - list of all slot names in the data. One line per a slot name. It is possible to use both: B- I- notations, for separating between first and intermediate tokens for multi token slots. Or just use one slot type for each token of multi token slot. Our recommendation is to use later one, since it is simpler and there is no visible degradation in performance.\n- **train.tsv/test.tsv** - contain original queries, one per line, and intent number separated by tab. For example: `what alarms do i have set right now\t0`. Intent numbers are according to the intent line in the intent dictionary file (dict.intents.csv) starting from 0. First line of these files contains a header line: `sentence \\tab label`.\n- **train_slot.tvs/test_slot.tsv** - contain one line per a query, where instead each token there is a number of the token from the slots dictionary file (dict.slots.csv), starting from 0. Last 'out-of scope' token is usually located in the last line of the dictionary. Example: `54 0 0 54 54 12 12` (numbers separated by space). No header line in these files.\n\nNeMo provides **import_dataset.py** converter for few reference datasets (Assistant / Atis / Snips) which converts them to the NeMo data format for the Intent and Slot classification model. If you have your own annotated dataset in a different format, you will need to write a data converter. Possible recommended format for your own annotation, is to have one text file per all examples of one intent. With one line per query in a form like: `did i set an alarm to [alarm_type : wake up] in the [timeofday : morning]`, using brackets to define slot names. This is very similar to the assistant format from this example and you can use its converter to NeMo format with small changes. \n\nYou can run this utility as follows:\n\n**python examples/nlp/intent_slot_classification/data/import_datasets.py --dataset_name=assistant --source_data_dir=source_dir_name --target_data_dir=target_dir_name**\n",
"_____no_output_____"
],
[
"# Download, preprocess and explore the dataset\n## Download the dataset and convert it to the NeMo format",
"_____no_output_____"
]
],
[
[
"# you can replace DATA_DIR and NEMO_DIR with your own locations\nDATA_DIR = \".\"\nNEMO_DIR = '.'\n\n# download the converter files from github for the purpose of this tutorial\nwget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/intent_slot_classification/data/import_datasets.py', NEMO_DIR)\nwget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/intent_slot_classification/data/assistant_utils.py', NEMO_DIR)",
"_____no_output_____"
],
[
"# download and unzip the example dataset from github\nprint('Downloading dataset...')\nwget.download('https://github.com/xliuhw/NLU-Evaluation-Data/archive/master.zip', DATA_DIR)\n! unzip {DATA_DIR}/NLU-Evaluation-Data-master.zip -d {DATA_DIR}",
"_____no_output_____"
],
[
"# convert the dataset to the NeMo format\n!python {NEMO_DIR}/import_datasets.py --dataset_name=assistant --source_data_dir={DATA_DIR}/NLU-Evaluation-Data-master --target_data_dir={DATA_DIR}/nemo_format\n",
"_____no_output_____"
]
],
[
[
"## Data exploration\nYou can see the dataset in both the original and NeMo's formats. We have here 65 different Intents and 55 Slots, which could be typical commands for virtual assistants. Out of scope slot has the name 'O' and is the last in the dictionary of Slots. And we can see examples of queries and also format of training intent and slot files. ",
"_____no_output_____"
]
],
[
[
"# list of queries divided by intent files in the original training dataset\n! ls -l {DATA_DIR}/NLU-Evaluation-Data-master/dataset/trainset",
"_____no_output_____"
],
[
"# print all intents from the NeMo format intent dictionary\n!echo 'Intents: ' $(wc -l < {DATA_DIR}/nemo_format/dict.intents.csv)\n! cat {DATA_DIR}/nemo_format/dict.intents.csv",
"_____no_output_____"
],
[
"# print all slots from the NeMo format slot dictionary\n!echo 'Slots: ' $(wc -l < {DATA_DIR}/nemo_format/dict.slots.csv)\n! cat {DATA_DIR}/nemo_format/dict.slots.csv",
"_____no_output_____"
],
[
"# examples from the intent training file\n! head -n 10 {DATA_DIR}/nemo_format/train.tsv",
"_____no_output_____"
],
[
"# examples from the slot training file\n! head -n 10 {DATA_DIR}/nemo_format/train_slots.tsv",
"_____no_output_____"
]
],
[
[
"# Training model",
"_____no_output_____"
],
[
"## Model configuration\n\nOur Joint Intent and Slot classification model is comprised of the pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) model with an Intent and Slot Classification layer on top of it.\n\nAll model and training parameters are defined in the **intent_slot_classification_config.yaml** config file. This file is located in the folder **examples/nlp/intent_slot_classification/conf/**. It contains 2 main sections:\n- **model**: All arguments that are related to the Model - language model, token classifier, optimizer and schedulers, datasets and any other related information\n\n- **trainer**: Any argument to be passed to PyTorch Lightning\n\nWe will download the config file from repository for the purpose of the tutorial. If you have a version of NeMo installed locally, you can use it from the above folder.",
"_____no_output_____"
]
],
[
[
"# download the model config file from repository for the purpose of this example\nwget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/intent_slot_classification/conf/intent_slot_classification_config.yaml', NEMO_DIR)\n\n# print content of the config file\nconfig_file = \"intent_slot_classification_config.yaml\"\nprint(config_file)\nconfig = OmegaConf.load(config_file)\nprint(OmegaConf.to_yaml(config))",
"_____no_output_____"
]
],
[
[
"## Setting up Data within the config\n\nAmong other things, the config file contains dictionaries called train_ds and validation_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.\n\nThe converter utility creates both training and evaluation files in the same directory, so we need to specify `model.data_dir` parameter to this directory. Also notice that some config lines, including `model.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user.\n\n`config.model.intent_loss_weight` parameter - is a balance of training loss between Intent and Slot losses, a number between 0 to 1. Its default value is 0.6 which gives slightly higher priority to the Intent loss and it empirically works quite well. You can experiment with this value if you like.\nAlso you can try to change `config.model.class_balancing` parameter to `weighted_loss` and see if you get better accuracy.\n\nLet's now add the data directory path to the config.",
"_____no_output_____"
]
],
[
[
"config.model.data_dir = f'{DATA_DIR}/nemo_format'",
"_____no_output_____"
]
],
[
[
"## Building the PyTorch Lightning Trainer\n\nNeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem. `config.trainer.max_epochs` - param defines number of training epochs. Usually 50-100 epochs or less should be enough to train on your data. Let's instantiate the Trainer object.",
"_____no_output_____"
]
],
[
[
"# lets modify some trainer configs\n# checks if we have GPU available and uses it\ncuda = 1 if torch.cuda.is_available() else 0\nconfig.trainer.gpus = cuda\n\nconfig.trainer.precision = 16 if torch.cuda.is_available() else 32\n\n# for mixed precision training, uncomment the line below (precision should be set to 16 and amp_level to O1):\n# config.trainer.amp_level = O1\n\n# remove distributed training flags\nconfig.trainer.accelerator = None\n\n# setup a small number of epochs for demonstration purposes of this tutorial\nconfig.trainer.max_epochs = 5\n\ntrainer = pl.Trainer(**config.trainer)",
"_____no_output_____"
]
],
[
[
"## Setting up a NeMo Experiment\n\nNeMo has an experiment manager that handles logging and checkpointing for us, so let's use it. Model check points during training will be saved in this directory. ",
"_____no_output_____"
]
],
[
[
"exp_dir = exp_manager(trainer, config.get(\"exp_manager\", None))\n# the exp_dir provides a path to the current experiment for easy access\nprint(str(exp_dir))",
"_____no_output_____"
]
],
[
[
"## Initializing the model and Training\n\nInitial statistics of the dataset will be displayed at the beginning of the training and then Intent and Slot classification report will be displayed after each training epoch.",
"_____no_output_____"
]
],
[
[
"# initialize the model\nmodel = nemo_nlp.models.IntentSlotClassificationModel(config.model, trainer=trainer)\n\n# train\ntrainer.fit(model)",
"_____no_output_____"
]
],
[
[
"After training for 5 epochs, which should take no more than few minutes, you can expect training precision for this data set to be around these numbers (the accuracy will gradually continue to improve for this dataset up to about 50 epochs of training): \n```\nIntents:\n label precision recall f1 support \n alarm_query (label_id: 0) 94.74 94.74 94.74 19\n alarm_remove (label_id: 1) 100.00 100.00 100.00 11\n alarm_set (label_id: 2) 85.71 94.74 90.00 19\n audio_volume_down (label_id: 3) 0.00 0.00 0.00 8\n audio_volume_mute (label_id: 4) 100.00 86.67 92.86 15\n audio_volume_up (label_id: 5) 56.52 100.00 72.22 13\n calendar_query (label_id: 6) 55.00 57.89 56.41 19\n calendar_remove (label_id: 7) 88.89 84.21 86.49 19\n calendar_set (label_id: 8) 81.25 68.42 74.29 19\n cooking_recipe (label_id: 9) 86.36 100.00 92.68 19\n datetime_convert (label_id: 10) 0.00 0.00 0.00 8\n datetime_query (label_id: 11) 65.52 100.00 79.17 19\n email_addcontact (label_id: 12) 100.00 12.50 22.22 8\n email_query (label_id: 13) 83.33 78.95 81.08 19\n email_querycontact (label_id: 14) 62.50 78.95 69.77 19\n email_sendemail (label_id: 15) 70.83 89.47 79.07 19\n general_affirm (label_id: 16) 95.00 100.00 97.44 19\n general_commandstop (label_id: 17) 100.00 100.00 100.00 19\n general_confirm (label_id: 18) 100.00 100.00 100.00 19\n general_dontcare (label_id: 19) 100.00 100.00 100.00 19\n general_explain (label_id: 20) 100.00 94.74 97.30 19\n general_joke (label_id: 21) 100.00 100.00 100.00 12\n general_negate (label_id: 22) 95.00 100.00 97.44 19\n general_praise (label_id: 23) 100.00 94.74 97.30 19\n general_quirky (label_id: 24) 40.00 10.53 16.67 19\n general_repeat (label_id: 25) 100.00 100.00 100.00 19\n iot_cleaning (label_id: 26) 84.21 100.00 91.43 16\n iot_coffee (label_id: 27) 94.74 94.74 94.74 19\n iot_hue_lightchange (label_id: 28) 94.44 89.47 91.89 19\n iot_hue_lightdim (label_id: 29) 100.00 83.33 90.91 12\n iot_hue_lightoff (label_id: 30) 89.47 89.47 89.47 19\n iot_hue_lighton (label_id: 31) 0.00 0.00 0.00 3\n iot_hue_lightup (label_id: 32) 81.25 92.86 86.67 14\n iot_wemo_off (label_id: 33) 60.00 100.00 75.00 9\n iot_wemo_on (label_id: 34) 100.00 14.29 25.00 7\n lists_createoradd (label_id: 35) 78.95 78.95 78.95 19\n lists_query (label_id: 36) 78.95 78.95 78.95 19\n lists_remove (label_id: 37) 90.00 94.74 92.31 19\n music_likeness (label_id: 38) 70.59 66.67 68.57 18\n music_query (label_id: 39) 77.78 73.68 75.68 19\n music_settings (label_id: 40) 0.00 0.00 0.00 7\n news_query (label_id: 41) 77.78 73.68 75.68 19\n play_audiobook (label_id: 42) 90.00 94.74 92.31 19\n play_game (label_id: 43) 80.00 84.21 82.05 19\n play_music (label_id: 44) 53.85 73.68 62.22 19\n play_podcasts (label_id: 45) 89.47 89.47 89.47 19\n play_radio (label_id: 46) 93.75 78.95 85.71 19\n qa_currency (label_id: 47) 95.00 100.00 97.44 19\n qa_definition (label_id: 48) 85.00 89.47 87.18 19\n qa_factoid (label_id: 49) 45.16 73.68 56.00 19\n qa_maths (label_id: 50) 100.00 100.00 100.00 14\n qa_stock (label_id: 51) 95.00 100.00 97.44 19\n recommendation_events (label_id: 52) 94.44 89.47 91.89 19\n recommendation_locations (label_id: 53) 94.74 94.74 94.74 19\n recommendation_movies (label_id: 54) 100.00 100.00 100.00 10\n social_post (label_id: 55) 90.00 94.74 92.31 19\n social_query (label_id: 56) 94.74 100.00 97.30 18\n takeaway_order (label_id: 57) 93.75 78.95 85.71 19\n takeaway_query (label_id: 58) 85.71 94.74 90.00 19\n transport_query (label_id: 59) 83.33 78.95 81.08 19\n transport_taxi (label_id: 60) 100.00 100.00 100.00 18\n transport_ticket (label_id: 61) 89.47 89.47 89.47 19\n transport_traffic (label_id: 62) 100.00 100.00 100.00 19\n weather_query (label_id: 63) 100.00 89.47 94.44 19\n -------------------\n micro avg 85.04 85.04 85.04 1076\n macro avg 81.13 80.81 79.36 1076\n weighted avg 84.10 85.04 83.54 1076\n \nSlots:\n label precision recall f1 support \n alarm_type (label_id: 0) 0.00 0.00 0.00 0\n app_name (label_id: 1) 0.00 0.00 0.00 6\n artist_name (label_id: 2) 0.00 0.00 0.00 21\n audiobook_author (label_id: 3) 0.00 0.00 0.00 1\n audiobook_name (label_id: 4) 0.00 0.00 0.00 18\n business_name (label_id: 5) 60.00 56.60 58.25 53\n business_type (label_id: 6) 0.00 0.00 0.00 24\n change_amount (label_id: 7) 0.00 0.00 0.00 25\n coffee_type (label_id: 8) 0.00 0.00 0.00 4\n color_type (label_id: 9) 0.00 0.00 0.00 12\n cooking_type (label_id: 10) 0.00 0.00 0.00 0\n currency_name (label_id: 11) 84.09 75.51 79.57 49\n date (label_id: 12) 57.95 91.07 70.83 112\n definition_word (label_id: 13) 0.00 0.00 0.00 20\n device_type (label_id: 14) 74.55 51.25 60.74 80\n drink_type (label_id: 15) 0.00 0.00 0.00 0\n email_address (label_id: 16) 0.00 0.00 0.00 14\n email_folder (label_id: 17) 0.00 0.00 0.00 1\n event_name (label_id: 18) 100.00 13.24 23.38 68\n food_type (label_id: 19) 51.72 69.77 59.41 43\n game_name (label_id: 20) 60.00 14.29 23.08 21\n game_type (label_id: 21) 0.00 0.00 0.00 0\n general_frequency (label_id: 22) 0.00 0.00 0.00 9\n house_place (label_id: 23) 93.33 42.42 58.33 33\n ingredient (label_id: 24) 0.00 0.00 0.00 6\n joke_type (label_id: 25) 0.00 0.00 0.00 4\n list_name (label_id: 26) 0.00 0.00 0.00 21\n meal_type (label_id: 27) 0.00 0.00 0.00 0\n media_type (label_id: 28) 0.00 0.00 0.00 37\n movie_name (label_id: 29) 0.00 0.00 0.00 0\n movie_type (label_id: 30) 0.00 0.00 0.00 0\n music_album (label_id: 31) 0.00 0.00 0.00 0\n music_descriptor (label_id: 32) 0.00 0.00 0.00 3\n music_genre (label_id: 33) 0.00 0.00 0.00 9\n news_topic (label_id: 34) 0.00 0.00 0.00 17\n order_type (label_id: 35) 0.00 0.00 0.00 17\n person (label_id: 36) 44.86 92.31 60.38 52\n personal_info (label_id: 37) 0.00 0.00 0.00 20\n place_name (label_id: 38) 71.25 77.03 74.03 148\n player_setting (label_id: 39) 0.00 0.00 0.00 1\n playlist_name (label_id: 40) 0.00 0.00 0.00 1\n podcast_descriptor (label_id: 41) 0.00 0.00 0.00 13\n podcast_name (label_id: 42) 0.00 0.00 0.00 4\n radio_name (label_id: 43) 66.67 10.53 18.18 38\n relation (label_id: 44) 0.00 0.00 0.00 17\n song_name (label_id: 45) 0.00 0.00 0.00 22\n time (label_id: 46) 70.27 78.20 74.02 133\n time_zone (label_id: 47) 0.00 0.00 0.00 9\n timeofday (label_id: 48) 0.00 0.00 0.00 28\n transport_agency (label_id: 49) 0.00 0.00 0.00 9\n transport_descriptor (label_id: 50) 0.00 0.00 0.00 0\n transport_name (label_id: 51) 0.00 0.00 0.00 4\n transport_type (label_id: 52) 78.38 82.86 80.56 35\n weather_descriptor (label_id: 53) 0.00 0.00 0.00 17\n O (label_id: 54) 92.42 98.80 95.50 5920\n -------------------\n micro avg 89.10 89.10 89.10 7199\n macro avg 21.86 18.56 18.18 7199\n weighted avg 84.42 89.10 86.01 7199\n```",
"_____no_output_____"
],
[
"## Evaluation\nTo see how the model performs, we can evaluate the performance of the trained model on a test data file. Here we would load the best checkpoint (the one with the lowest validation loss) and create a model (eval_model) from the checkpoint. We will use the same trainer for testing.",
"_____no_output_____"
]
],
[
[
"# extract the path of the best checkpoint from the training, you may update it to any other saved checkpoint file\ncheckpoint_path = trainer.checkpoint_callback.best_model_path\n\n# load the model from this checkpoint\neval_model = nemo_nlp.models.IntentSlotClassificationModel.load_from_checkpoint(checkpoint_path=checkpoint_path)",
"_____no_output_____"
],
[
"# we will setup testing data reusing the same config (test section)\neval_model.setup_test_data(test_data_config=config.model.test_ds)\n\n# run the evaluation on the test dataset\ntrainer.test(model=model, ckpt_path=None, verbose=False)",
"_____no_output_____"
]
],
[
[
"## Inference from Examples\nNext step to see how the trained model will classify Intents and Slots for given queries from this domain. To improve the predictions you may need to train the model for more than 5 epochs.\n",
"_____no_output_____"
]
],
[
[
"queries = [\n 'set alarm for seven thirty am',\n 'lower volume by fifty percent',\n 'what is my schedule for tomorrow',\n]\n\npred_intents, pred_slots = eval_model.predict_from_examples(queries)\n\nlogging.info('The prediction results of some sample queries with the trained model:')\nfor query, intent, slots in zip(queries, pred_intents, pred_slots):\n logging.info(f'Query : {query}')\n logging.info(f'Predicted Intent: {intent}')\n logging.info(f'Predicted Slots: {slots}')",
"_____no_output_____"
]
],
[
[
"## Training Script\n\nIf you have NeMo installed locally (eg. cloned from the Github), you can also train the model with the example script: `examples/nlp/intent_slot_classification/intent_slot_classification.py.`\nThis script contains an example on how to train, evaluate and perform inference with the IntentSlotClassificationModel.\n\nTo run a training script, use:\n\n`cd examples/nlp/intent_slot_classification`\n\n`python intent_slot_classification.py model.data_dir=PATH_TO_DATA_DIR`\n\nBy default, this script uses examples/nlp/intent_slot_classification/conf/intent_slot_classification_config.py config file, and you may update all the params inside of this config file or alternatively providing them in the command line.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec67ef4a8a9e68be530ffe6d245c98572d026a2b | 16,838 | ipynb | Jupyter Notebook | notebooks/2b_model_testing.ipynb | nikhilrj/BatchSparkScoringPredictiveMaintenance | 57c2ca10bd789603b094d31c950c80f326beb730 | [
"MIT"
]
| null | null | null | notebooks/2b_model_testing.ipynb | nikhilrj/BatchSparkScoringPredictiveMaintenance | 57c2ca10bd789603b094d31c950c80f326beb730 | [
"MIT"
]
| null | null | null | notebooks/2b_model_testing.ipynb | nikhilrj/BatchSparkScoringPredictiveMaintenance | 57c2ca10bd789603b094d31c950c80f326beb730 | [
"MIT"
]
| null | null | null | 8,419 | 16,837 | 0.726155 | [
[
[
"# Step 2B: Model Testing\n\nThis notebook examines the model created in the `2b_model_building` notebook.\n\nUsing the `2a_feature_engineering` Jupyter notebook, this notebook creates a new test data set and scores the observations using the machine learning model (a decision tree classifier or a random forest classifier) created in the `2b_model_building` to predict when different components within the test machine population will fail. Then using the known labels from the existing data, we calculate a set of evaluation metrics to understand how the model may perform when used in production settings. \n\n**Note:** This notebook will take about 2-4 minutes to execute all cells, depending on the compute configuration you have setup.",
"_____no_output_____"
]
],
[
[
"# import the libraries\n# For some data handling\nimport numpy as np\nimport pandas as pd\n\nfrom collections import OrderedDict\n\nimport pyspark.sql.functions as F\nfrom pyspark.ml import PipelineModel\n# for creating pipelines and model\nfrom pyspark.ml.feature import StringIndexer, VectorAssembler, VectorIndexer\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.getOrCreate()\n\nimport matplotlib.pyplot as plt\n\n# This is the final feature data file.\ntesting_table = 'testing_data'\nmodel_type = 'RandomForest' # Use 'DecisionTree' or 'GBTClassifier' or 'RandomForest'",
"_____no_output_____"
],
[
"dbutils.widgets.removeAll()\ndbutils.widgets.text(\"Testing_table\",testing_table)\ndbutils.widgets.text(\"Model\", model_type)\n\ndbutils.widgets.text(\"start_date\", '2015-11-30')\n\ndbutils.widgets.text(\"to_date\", '2016-02-01')",
"_____no_output_____"
]
],
[
[
"# Prepare the Training/Testing data",
"_____no_output_____"
],
[
"A fundamental practice in machine learning is to calibrate and test your model parameters on data that has not been used to train the model. Evaluation of the model requires splitting the available data into a training portion, a calibration portion and an evaluation portion. Typically, 80% of data is used to train the model and 10% each to calibrate any parameter selection and evaluate your model.\n\nIn general random splitting can be used, but since time series data have an inherent correlation between observations. For predictive maintenance problems, a time-dependent spliting strategy is often a better approach to estimate performance. For a time-dependent split, a single point in time is chosen, the model is trained on examples up to that point in time, and validated on the examples after that point. This simulates training on current data and score data collected in the future data after the splitting point is not known. However, care must be taken on labels near the split point. In this case, feature records within 7 days of the split point can not be labeled as a failure, since that is unobserved data. \n\nIn the following code blocks, we create a data set to test the model.",
"_____no_output_____"
]
],
[
[
"#print(spark.catalog.listDatabases())\nspark.catalog.setCurrentDatabase(\"default\")\nexists = False\nfor tbl in spark.catalog.listTables():\n if tbl.name == dbutils.widgets.get(\"Testing_table\"):\n exists = True\n break",
"_____no_output_____"
],
[
"if not exists:\n dbutils.notebook.run(\"2a_feature_engineering\", 600, {\"features_table\": dbutils.widgets.get(\"Testing_table\"), \n \"start_date\": dbutils.widgets.get(\"start_date\"), \n \"to_date\": dbutils.widgets.get(\"to_date\")})",
"_____no_output_____"
]
],
[
[
"# Classification models\n\nA particular problem in predictive maintenance is machine failures are usually rare occurrences compared to normal operation. This is fortunate for the business as maintenance and saftey issues are few, but causes an imbalance in the label distribution. This imbalance leads to poor performance as algorithms tend to classify majority class examples at the expense of minority class, since the total misclassification error is much improved when majority class is labeled correctly. This causes low recall or precision rates, although accuracy can be high. It becomes a larger problem when the cost of false alarms is very high. To help with this problem, sampling techniques such as oversampling of the minority examples can be used. These methods are not covered in this notebook. Because of this, it is also important to look at evaluation metrics other than accuracy alone.\n\nWe will build and compare two different classification model approaches:\n\n - **Decision Tree Classifier**: Decision trees and their ensembles are popular methods for the machine learning tasks of classification and regression. Decision trees are widely used since they are easy to interpret, handle categorical features, extend to the multiclass classification setting, do not require feature scaling, and are able to capture non-linearities and feature interactions.\n\n - **Random Forest Classifier**: A random forest is an ensemble of decision trees. Random forests combine many decision trees in order to reduce the risk of overfitting. Tree ensemble algorithms such as random forests and boosting are among the top performers for classification and regression tasks.\n\nWe will to compare these models in the AML Workbench _runs_ screen. The next code block creates the model. You can choose between a _DecisionTree_ or _RandomForest_ by setting the 'model_type' variable. We have also included a series of model hyperparameters to guide your exploration of the model space.",
"_____no_output_____"
]
],
[
[
"model_pipeline = PipelineModel.load(\"dbfs:/storage/models/\" + dbutils.widgets.get(\"Model\") + \".pqt\")\n\nprint(\"Model loaded\")\nmodel_pipeline",
"_____no_output_____"
]
],
[
[
"To evaluate this model, we predict the component failures over the test data set. Since the test set has been created from data the model has not been seen before, it simulates future data. The evaluation then can be generalize to how the model could perform when operationalized and used to score the data in real time.",
"_____no_output_____"
]
],
[
[
"test_data = spark.table(dbutils.widgets.get(\"Testing_table\"))\n\n# define list of input columns for downstream modeling\n\n# We'll use the known label, and key variables.\nlabel_var = ['label_e']\nkey_cols =['machineID','dt_truncated']\n\n# Then get the remaing feature names from the data\ninput_features = test_data.columns\n\n# We'll use the known label, key variables and \n# a few extra columns we won't need.\nremove_names = label_var + key_cols + ['failure','model_encoded','model' ]\n\n# Remove the extra names if that are in the input_features list\ninput_features = [x for x in input_features if x not in set(remove_names)]\n\n#input_features\n# assemble features\nva = VectorAssembler(inputCols=(input_features), outputCol='features')\n\n# assemble features\ntest_data = va.transform(test_data).select('machineID','dt_truncated','label_e','features').cache()\n\n# set maxCategories so features with > 10 distinct values are treated as continuous.\nfeatureIndexer = VectorIndexer(inputCol=\"features\", \n outputCol=\"indexedFeatures\", \n maxCategories=10).fit(test_data)\n\n# fit on whole dataset to include all labels in index\nlabelIndexer = StringIndexer(inputCol=\"label_e\", outputCol=\"indexedLabel\").fit(test_data)\n\ntesting = test_data\n\nprint(testing.count())\n\n# make predictions. The Pipeline does all the same operations on the test data\npredictions = model_pipeline.transform(testing)\n\n# Create the confusion matrix for the multiclass prediction results\n# This result assumes a decision boundary of p = 0.5\nconf_table = predictions.stat.crosstab('indexedLabel', 'prediction')\nconfuse = conf_table.toPandas()\nconfuse.head()",
"_____no_output_____"
]
],
[
[
"The confusion matrix lists each true component failure in rows and the predicted value in columns. Labels numbered 0.0 corresponds to no component failures. Labels numbered 1.0 through 4.0 correspond to failures in one of the four components in the machine. As an example, the third number in the top row indicates how many days we predicted component 2 would fail, when no components actually did fail. The second number in the second row, indicates how many days we correctly predicted a component 1 failure within the next 7 days.\n\nWe read the confusion matrix numbers along the diagonal as correctly classifying the component failures. Numbers above the diagonal indicate the model incorrectly predicting a failure when non occured, and those below indicate incorrectly predicting a non-failure for the row indicated component failure.\n\nWhen evaluating classification models, it is convenient to reduce the results in the confusion matrix into a single performance statistic. However, depending on the problem space, it is impossible to always use the same statistic in this evaluation. Below, we calculate four such statistics.\n\n- **Accuracy**: reports how often we correctly predicted the labeled data. Unfortunatly, when there is a class imbalance (a large number of one of the labels relative to others), this measure is biased towards the largest class. In this case non-failure days.\n\nBecause of the class imbalance inherint in predictive maintenance problems, it is better to look at the remaining statistics instead. Here positive predictions indicate a failure.\n\n- **Precision**: Precision is a measure of how well the model classifies the truely positive samples. Precision depends on falsely classifying negative days as positive.\n\n- **Recall**: Recall is a measure of how well the model can find the positive samples. Recall depends on falsely classifying positive days as negative.\n\n- **F1**: F1 considers both the precision and the recall. F1 score is the harmonic average of precision and recall. An F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0.\n\nThese metrics make the most sense for binary classifiers, though they are still useful for comparision in our multiclass setting. Below we calculate these evaluation statistics for the selected classifier, and post them back to the AML workbench run time page for tracking between experiments.",
"_____no_output_____"
]
],
[
[
"# select (prediction, true label) and compute test error\n# select (prediction, true label) and compute test error\n# True positives - diagonal failure terms \ntp = confuse['1.0'][1]+confuse['2.0'][2]+confuse['3.0'][3]+confuse['4.0'][4]\n\n# False positves - All failure terms - True positives\nfp = np.sum(np.sum(confuse[['1.0', '2.0','3.0','4.0']])) - tp\n\n# True negatives \ntn = confuse['0.0'][0]\n\n# False negatives total of non-failure column - TN\nfn = np.sum(np.sum(confuse[['0.0']])) - tn\n\n# Accuracy is diagonal/total \nacc_n = tn + tp\nacc_d = np.sum(np.sum(confuse[['0.0','1.0', '2.0','3.0','4.0']]))\nacc = acc_n/acc_d\n\n# Calculate precision and recall.\nprec = tp/(tp+fp)\nrec = tp/(tp+fn)\n\n# Print the evaluation metrics to the notebook\nprint(\"Accuracy = %g\" % acc)\nprint(\"Precision = %g\" % prec)\nprint(\"Recall = %g\" % rec )\nprint(\"F1 = %g\" % (2.0 * prec * rec/(prec + rec)))\nprint(\"\")",
"_____no_output_____"
],
[
"importances = model_pipeline.stages[2]\nx = range(34)\n\nfig = plt.figure(1)\nax = fig.add_subplot(111)\n\nplt.bar(x, list(importances.featureImportances.values))\nplt.xticks(x)\nplt.xlabel('')\nax.set_xticklabels(input_features, rotation = 90, ha=\"left\")\n#plt.gcf().subplots_adjust(bottom=0.50)\nplt.tight_layout()\ndisplay()\n# input_features",
"_____no_output_____"
]
],
[
[
"Remember that this is a simulated data set. We would expect a model built on real world data to behave very differently. The accuracy may still be close to one, but the precision and recall numbers would be much lower.\n\n# Conclusion\n\nThe next step is to build the batch scoreing operations. The `3b_model_scoring` notebook takes parameters to define the data to be scored, and using the model created here, calulates the probability of component failure in the machine population specified.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
ec67fa9692cf31892493f53dc61de8227affd805 | 10,605 | ipynb | Jupyter Notebook | networkx_bipartite_weighted_graphs.ipynb | aixpact/networks | 3e5c3cb232e9cff8916d624c7c8433895d760b2c | [
"MIT"
]
| 1 | 2019-04-13T23:30:06.000Z | 2019-04-13T23:30:06.000Z | networkx_bipartite_weighted_graphs.ipynb | aixpact/networks | 3e5c3cb232e9cff8916d624c7c8433895d760b2c | [
"MIT"
]
| null | null | null | networkx_bipartite_weighted_graphs.ipynb | aixpact/networks | 3e5c3cb232e9cff8916d624c7c8433895d760b2c | [
"MIT"
]
| null | null | null | 24.952941 | 306 | 0.533899 | [
[
[
"# Creating and Manipulating Graphs\n\nEight employees at a small company were asked to choose 3 movies that they would most enjoy watching for the upcoming company movie night. These choices are stored in the file `Employee_Movie_Choices.txt`.\n\nA second file, `Employee_Relationships.txt`, has data on the relationships between different coworkers. \n\nThe relationship score has value of `-100` (Enemies) to `+100` (Best Friends). A value of zero means the two employees haven't interacted or are indifferent.\n\nBoth files are tab delimited.",
"_____no_output_____"
],
[
"---\n\n_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._\n\n---",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nfrom networkx.algorithms import bipartite\n\n# from itertools import permutations",
"_____no_output_____"
]
],
[
[
"### Data",
"_____no_output_____"
]
],
[
[
"# This is the set of employees\nemployees = set(['Pablo',\n 'Lee',\n 'Georgia',\n 'Vincent',\n 'Andy',\n 'Frida',\n 'Joan',\n 'Claude'])\n\n# This is the set of movies\nmovies = set(['The Shawshank Redemption',\n 'Forrest Gump',\n 'The Matrix',\n 'Anaconda',\n 'The Social Network',\n 'The Godfather',\n 'Monty Python and the Holy Grail',\n 'Snakes on a Plane',\n 'Kung Fu Panda',\n 'The Dark Knight',\n 'Mean Girls'])",
"_____no_output_____"
],
[
"# you can use the following function to plot graphs\n# make sure to comment it out before submitting to the autograder\ndef plot_graph(G, weight_name=None):\n '''\n G: a networkx G\n weight_name: name of the attribute for plotting edge weights (if G is weighted)\n '''\n %matplotlib notebook\n import matplotlib.pyplot as plt\n \n plt.figure()\n pos = nx.spring_layout(G)\n edges = G.edges()\n weights = None\n \n if weight_name:\n weights = [int(G[u][v][weight_name]) for u,v in edges]\n labels = nx.get_edge_attributes(G,weight_name)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)\n nx.draw_networkx(G, pos, edges=edges, width=weights);\n else:\n nx.draw_networkx(G, pos, edges=edges);",
"_____no_output_____"
]
],
[
[
"### Question 1\n\nUsing NetworkX, load in the bipartite graph from `Employee_Movie_Choices.txt` and return that graph.",
"_____no_output_____"
]
],
[
[
"!find .. | grep -i employee_movie",
"_____no_output_____"
],
[
"!head ./_data/Employee_Movie_Choices.txt",
"_____no_output_____"
],
[
"G = nx.read_edgelist('./_data/Employee_Movie_Choices.txt', delimiter='\\t')\nprint(nx.info(G))",
"_____no_output_____"
],
[
"list(G.edges(data=True))[:5]",
"_____no_output_____"
],
[
"list(G.nodes(data=True))[:5]",
"_____no_output_____"
]
],
[
[
"### Bipartite graph\n\nAdd nodes attributes named `'type'='movie'` `'type'='employee'`",
"_____no_output_____"
]
],
[
[
"G_df = pd.read_csv('./_data/Employee_Movie_Choices.txt', delimiter='\\t', skiprows=1, names=['employee', 'movie'])\nG_df.head()",
"_____no_output_____"
],
[
"_ = [G.add_node(G_df.loc[i, 'employee'], type='employee') for i, x in enumerate(G_df.index)]\n_ = [G.add_node(G_df.loc[i, 'movie'], type='movie') for i, x in enumerate(G_df.index)]\nG.nodes(data=True)",
"_____no_output_____"
]
],
[
[
"### Weighted projection of the graph\n\nWhich tells us how many movies different pairs of employees have in common.",
"_____no_output_____"
]
],
[
[
"L = [x for x in G if G.node[x]['type']=='employee']\nR = [x for x in G if G.node[x]['type']=='movie']\nL, R",
"_____no_output_____"
],
[
"B = nx.Graph() \nB.add_nodes_from(L, bipartite=0)\nB.add_nodes_from(R, bipartite=1)\n\nB.add_edges_from(G.edges())\nassert bipartite.is_bipartite(B) # Check if B is bipartite",
"_____no_output_____"
]
],
[
[
"#### Get weighted edges",
"_____no_output_____"
]
],
[
[
"P_employee.edges(data=True)",
"_____no_output_____"
],
[
"P_employee.get_edge_data('Andy', 'Georgia')",
"_____no_output_____"
],
[
"P_movie.edges(data=True)",
"_____no_output_____"
],
[
"P_movie.get_edge_data('Anaconda', 'Snakes on a Plane')",
"_____no_output_____"
],
[
"employee_weights = [attr['weight'] for u, v, attr in P_employee.edges(data=True)]\nemployee_weights",
"_____no_output_____"
],
[
"movie_weights = [attr['weight'] for u, v, attr in P_movie.edges(data=True)]\nmovie_weights",
"_____no_output_____"
]
],
[
[
"#### Visualise graph",
"_____no_output_____"
]
],
[
[
"P_employee = bipartite.weighted_projected_graph(B, L)\nnx.draw_networkx(P_employee, width=employee_weights)",
"_____no_output_____"
],
[
"P_movie = bipartite.weighted_projected_graph(B, R)\nnx.draw_networkx(P_movie, width=movie_weights)",
"_____no_output_____"
]
],
[
[
"### Question 4\n\nSuppose you'd like to find out if people that have a high relationship score also like the same types of movies.\n\nFind the Pearson correlation ( using `DataFrame.corr()` ) between employee relationship scores and the number of movies they have in common. If two employees have no movies in common it should be treated as a 0, not a missing value, and should be included in the correlation calculation.\n\n*This function should return a float.*",
"_____no_output_____"
]
],
[
[
"# \n\nG_df = pd.read_csv('./_data/Employee_Relationships.txt', delimiter='\\t', skiprows=1, names=['emp1', 'emp2', 'score'])\n\nG1 = nx.from_pandas_dataframe(G_df, 'emp1', 'emp2', edge_attr='score')\nG2 = P_employee\nG3 = nx.compose(G1, G2)\nprint(G3.edges(data=True))",
"_____no_output_____"
],
[
"df = pd.DataFrame(list(G3.edges(data=True)), columns=['emp1', 'emp2', 'score_weight'])\ndf.head()",
"_____no_output_____"
],
[
"def extract_dict_value(x, ftr):\n try: return x[ftr]\n except: return 0\n\ndf['score'] = df['score_weight'].map(lambda x: extract_dict_value(x, 'score'))\ndf['weight'] = df['score_weight'].map(lambda x: extract_dict_value(x, 'weight'))",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"#### Correlation between weight and score\n\nDo employees that have a high relationship score also like the same types of movies",
"_____no_output_____"
]
],
[
[
"cor = df['weight'].corr(df['score'])\ncor",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6800db493d065dd2b9edef8710c475e32c84ac | 10,986 | ipynb | Jupyter Notebook | notebooks/scrapers.ipynb | cvoglewede/boba | 935bded9b65717e0495b7779b4aedd735763d6fb | [
"FTL"
]
| 1 | 2020-08-13T21:38:05.000Z | 2020-08-13T21:38:05.000Z | notebooks/scrapers.ipynb | cvoglewede/boba | 935bded9b65717e0495b7779b4aedd735763d6fb | [
"FTL"
]
| 3 | 2021-03-31T19:48:25.000Z | 2021-12-13T20:41:46.000Z | notebooks/scrapers.ipynb | cvoglewede/boba | 935bded9b65717e0495b7779b4aedd735763d6fb | [
"FTL"
]
| null | null | null | 32.311765 | 1,285 | 0.542054 | [
[
[
"import pandas as pd\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport requests\nimport io\nimport re\nfrom lxml import html \nfrom tqdm import tqdm\nimport os\nimport json\nimport warnings\npd.set_option('max_columns',999)\nimport dotenv\nimport psycopg2",
"_____no_output_____"
],
[
"from bs4 import BeautifulSoup\n\nleague = 'all'\nqual = 0\nind = 1\nseason=2020\nurl = 'http://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg={}&qual={}&type=c,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,-1&season={}&month=0&season1={}&ind={}&team=&rost=&age=&filter=&players=&page=1_100000'\nurl = url.format(league, qual, season, season, ind)\n",
"_____no_output_____"
],
[
"s = requests.get(url).content\nsoup = BeautifulSoup(s, \"lxml\")\ntable = soup.find('table', {'class': 'rgMasterTable'})\ndata = []\nheadings = [row.text.strip() for row in table.find_all('th')[1:]]+['playerID']+['position']\nFBperc_indices = [i for i,j in enumerate(headings) if j=='FB%']\nheadings[FBperc_indices[1]]='FB% (Pitch)'\ntable_body = table.find('tbody')\nrows = table_body.find_all('tr')\nfor row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n s = row.find('a')['href']\n playerid = re.search('playerid=(.*)&', s)\n cols.append(playerid.group(1))\n position = re.search('position=(.*)', s)\n cols.append(position.group(1))\n data.append([ele for ele in cols[1:]])\n\ndata = pd.DataFrame(data=data, columns=headings)\n\ndata.replace(r'^\\s*$', np.nan, regex=True, inplace = True)\npercentages = ['Zone% (pi)','Contact% (pi)','Z-Contact% (pi)','O-Contact% (pi)','Swing% (pi)','Z-Swing% (pi)','O-Swing% (pi)','XX% (pi)','SL% (pi)','SI% (pi)','SB% (pi)','KN% (pi)','FS% (pi)','FC% (pi)','FA% (pi)','CU% (pi)','CS% (pi)','CH% (pi)','TTO%','Hard%','Med%','Soft%','Oppo%','Cent%','Pull%','Zone% (pfx)','Contact% (pfx)','Z-Contact% (pfx)','O-Contact% (pfx)','Swing% (pfx)','Z-Swing% (pfx)','O-Swing% (pfx)','UN% (pfx)','KN% (pfx)','SC% (pfx)','CH% (pfx)','EP% (pfx)','KC% (pfx)','CU% (pfx)','SL% (pfx)','SI% (pfx)','FO% (pfx)','FS% (pfx)','FC% (pfx)','FT% (pfx)','FA% (pfx)','SwStr%','F-Strike%','Zone%','Contact%','Z-Contact%','O-Contact%','Swing%','Z-Swing%','O-Swing%','PO%','XX%','KN%','SF%','CH%','CB%','CT%','SL%','FB%','BUH%','IFH%','HR/FB','IFFB%','FB% (Pitch)','GB%', 'LD%','GB/FB','K%','BB%']\nfor col in percentages:\n if not data[col].empty:\n if pd.api.types.is_string_dtype(data[col]):\n data[col] = data[col].str.strip(' %')\n data[col] = data[col].str.strip('%')\n data[col] = data[col].astype(float)/100.\n else:\n pass\ncols_to_numeric = [col for col in data.columns if col not in ['Name', 'Team', 'Age Rng', 'Dol','playerID','position']]\ndata[cols_to_numeric] = data[cols_to_numeric].astype(float)\ndata = data.sort_values(['WAR', 'OPS'], ascending=False)",
"_____no_output_____"
],
[
"# soup",
"_____no_output_____"
],
[
"# table",
"_____no_output_____"
],
[
"test_df = data[['playerID','Name','Season','position','Team','Age','G','HR','R','RBI','OBP','SLG']]\n\nlist(test_df.columns)\n",
"_____no_output_____"
],
[
"\nPGHOST = dotenv.dotenv_values()['PGHOST']\nPGDATABASE = dotenv.dotenv_values()['PGDATABASE']\nPGUSER = dotenv.dotenv_values()['PGUSER']\nPGPASSWORD = dotenv.dotenv_values()['PGPASSWORD']\nPGPORT = dotenv.dotenv_values()['PGPORT']\n\nconn_string = \"host=\" + PGHOST +\" port=\" + PGPORT +\" dbname=\" + PGDATABASE +\" user=\" + PGUSER +\" password=\"+ PGPASSWORD\nconn = psycopg2.connect(conn_string)\ncursor = conn.cursor()\n",
"_____no_output_____"
],
[
"# conn, cursor = connect()",
"_____no_output_____"
],
[
"create_table = \"\"\"\n CREATE TABLE IF NOT EXISTS test(\n playerID TEXT PRIMARY KEY NOT NULL,\n Name TEXT,\n Season INTEGER,\n position TEXT,\n Team INTEGER,\n Age INTEGER,\n G INTEGER,\n HR INTEGER,\n R INTEGER,\n RBI INTEGER,\n OBP INTEGER,\n SLG INTEGER\n )\n \"\"\"\ncursor.execute(create_table)\nconn.commit()",
"_____no_output_____"
],
[
"table = 'test'\ntuples = [tuple(x) for x in test_df.to_numpy()]\ncols = ','.join(list(test_df.columns))\nquery = \"INSERT INTO %s(%s) VALUES %%s\" % (table, cols)\n",
"_____no_output_____"
],
[
"import psycopg2.extras as extras\nconn_string = \"host=\" + PGHOST +\" port=\" + PGPORT +\" dbname=\" + PGDATABASE +\" user=\" + PGUSER +\" password=\"+ PGPASSWORD\nconn = psycopg2.connect(conn_string)\ncursor = conn.cursor()\n\nextras.execute_values(cursor, query, tuples)\nconn.commit()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec681c822fb92ca018c354dbcaf079b3e5b7c3fd | 3,991 | ipynb | Jupyter Notebook | cryptocurrency_price_notifier.ipynb | AI-ML4Finance/cryptocurrency_price_notifier | e195b26999d41fd1b668ebb59a653868f456087e | [
"MIT"
]
| 2 | 2021-04-13T18:52:58.000Z | 2021-04-13T18:54:50.000Z | cryptocurrency_price_notifier.ipynb | AI-ML4Finance/cryptocurrency_price_notifier | e195b26999d41fd1b668ebb59a653868f456087e | [
"MIT"
]
| null | null | null | cryptocurrency_price_notifier.ipynb | AI-ML4Finance/cryptocurrency_price_notifier | e195b26999d41fd1b668ebb59a653868f456087e | [
"MIT"
]
| null | null | null | 32.447154 | 701 | 0.509146 | [
[
[
"import requests\nimport schedule\nimport time\nfrom playsound import playsound\nimport os\nimport sys\n\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n print(\"\")\n\ncls()\n\ncoin = input(\"Select a cryptocurrency: BTC | ETH | BCH | ZEC | LTC | XRP | XLM : \")\nif coin.upper() not in [\"BTC\", \"BCH\", \"ETH\", \"LTC\", \"ZEC\", \"XRP\", \"XLM\"]:\n print(\"Cryptocurrency not supported.\")\n exit()\n\ncls()\n\nvalue = input(\"Enter the price you bought your \"+coin.upper()+\" in USD: \")\n\ncls()\n\nprint(\"Notifier launched, you can leave it in background.\")\n\nBELLOW = float(value)\n\ninc = 1\n\ndef req_price():\n r = requests.get('https://api.coinbase.com/v2/exchange-rates?currency='+coin.upper())\n return float(r.json()['data']['rates']['USD'])\n\ndef alert(price):\n if price < float(BELLOW):\n # mac notification\n if sys.platform == 'darwin':\n os.system(\"\"\"\n osascript -e 'display notification \"{}\" with title \"{}\"'\n \"\"\".format(\"Current price: $\"+str(price), coin.upper()+\" price dropped bellow $\"+str(BELLOW)))\n playsound(\"./suffer.mp3\")\n\ndef main():\n global inc\n\n if inc == 1:\n alert(req_price())\n inc += 1\n\n if float(BELLOW) < req_price():\n inc -= 1\n\nschedule.every(30).seconds.do(main)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)",
"\nSelect a cryptocurrency: BTC | ETH | BCH | ZEC | LTC | XRP | XLM : btc\n\nEnter the price you bought your BTC in USD: 1000\n\nNotifier launched, you can leave it in background.\n"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec6857d344b9d3599a2b17bc78189aa6ab4d4b73 | 13,048 | ipynb | Jupyter Notebook | notebooks/Wilhelm.ipynb | sys-bio/IntegralControl | 45954551a7bf10b111b1f1b15f53a435839517c5 | [
"MIT"
]
| null | null | null | notebooks/Wilhelm.ipynb | sys-bio/IntegralControl | 45954551a7bf10b111b1f1b15f53a435839517c5 | [
"MIT"
]
| null | null | null | notebooks/Wilhelm.ipynb | sys-bio/IntegralControl | 45954551a7bf10b111b1f1b15f53a435839517c5 | [
"MIT"
]
| null | null | null | 22.851138 | 258 | 0.472486 | [
[
[
"# Analysis of Wilhelm - \"The Smallest Chemical Reaction with Bistability\"",
"_____no_output_____"
],
[
"# Preliminaries",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import tellurium as te\nimport sympy\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom common_python.sympy import sympyUtil as su\nfrom common_python.ODEModel.ODEModel import ODEModel",
"_____no_output_____"
]
],
[
[
"## Constants",
"_____no_output_____"
]
],
[
[
"su.addSymbols(\"k_1 x k_2 k_3 y k_4\")",
"_____no_output_____"
]
],
[
[
"# Equations",
"_____no_output_____"
]
],
[
[
"stateDct = {\n x: 2 * k_1 * y - k_2 * x**2 - k_3 * x * y - k_4 * x,\n y: k_2 * x **2 - k_1 * y\n}",
"_____no_output_____"
]
],
[
[
"# Analysis of Fixed Points",
"_____no_output_____"
]
],
[
[
"stateDct[x]",
"_____no_output_____"
],
[
"stateDct[y]",
"_____no_output_____"
],
[
"model = ODEModel(stateDct)",
"_____no_output_____"
],
[
"valueDcts = [f.valueDct for f in model.fixedPoints]\nvalueDcts",
"_____no_output_____"
],
[
"sympy.simplify(valueDcts[1][x] - valueDcts[2][x])",
"_____no_output_____"
]
],
[
[
"So, require that $0.25 k_1 k_2 \\geq k_3 k_4$ for real fixed point.",
"_____no_output_____"
],
[
"## Non-zero fixed points",
"_____no_output_____"
]
],
[
[
"fp = valueDcts[1]\nfp[x].expand()",
"_____no_output_____"
],
[
"fp = valueDcts[2]\nfp[x].expand()",
"_____no_output_____"
],
[
"fp = valueDcts[1]\nfp[y].expand()",
"_____no_output_____"
],
[
"fp = valueDcts[2]\nfp[y].expand()",
"_____no_output_____"
]
],
[
[
"## When are fixed points > 0?",
"_____no_output_____"
],
[
"### Value of $x$",
"_____no_output_____"
]
],
[
[
"# Evaluation of x^* for fixed point 1\nfp = valueDcts[1]\nexpandedX = fp[x].expand()\nexpandedX",
"_____no_output_____"
]
],
[
[
"This is equivalent to the following.",
"_____no_output_____"
]
],
[
[
"inequality = (k_2 * k_3 * expandedX.args[0]) **2 / (k_1 * k_2) > (k_2 * k_3 * expandedX.args[1]) ** 2 / (k_1 * k_2)\ninequality",
"_____no_output_____"
]
],
[
[
"Since all $k > 0 $, $x^{\\star} > 0$.",
"_____no_output_____"
],
[
"### Value of $y$",
"_____no_output_____"
]
],
[
[
"# Evaluation of y^* for fixed point 1. This should be greater than 0.\nfp = valueDcts[1]\nexpandedY = fp[y].expand()\nexpandedY > 0",
"_____no_output_____"
],
[
"su.addSymbols(\"k_5\") # k_5 = k_3 * k_4 /(k_1 * k_2)\nterm = k_3 **2 / (k_1 * k_2)\n# lhs = term * expandedY.args[1]\nrhs = term * expandedY.args[0] - term * expandedY.args[2]\nrhs = rhs.subs(k_3 * k_4 /(k_1 * k_2), k_5)\nterm * expandedY.args[1] > - sympy.simplify(term * expandedY.args[0] - term * expandedY.args[2])",
"_____no_output_____"
],
[
"# Re-written\n0.5 > k_5 - sympy.sqrt(0.25 - k_5)",
"_____no_output_____"
],
[
"-sympy.sqrt(0.25 - k_5) < 0.5 - k_5",
"_____no_output_____"
],
[
"k_5 - 0.5 < sympy.sqrt(0.25 - k_5)",
"_____no_output_____"
]
],
[
[
"This is true for $k_5 \\in [0, 0.25]$. If $k_5 > 0.25$, there is no real solution.",
"_____no_output_____"
],
[
"### Evaluation of fixed points",
"_____no_output_____"
],
[
"Plot for values of $k_5$ outside the operating region.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec685bdf4547c12f1c00fbb94259994da8653e36 | 959,099 | ipynb | Jupyter Notebook | Example_1_Individual_estimation.ipynb | glamlab/glambox | 9ed47f896373111f0619999f51cd6176b6aa0084 | [
"MIT"
]
| 16 | 2018-06-04T16:05:54.000Z | 2022-01-17T22:38:34.000Z | Example_1_Individual_estimation.ipynb | glamlab/glambox | 9ed47f896373111f0619999f51cd6176b6aa0084 | [
"MIT"
]
| 22 | 2018-02-21T09:30:43.000Z | 2022-03-18T01:22:34.000Z | Example_1_Individual_estimation.ipynb | glamlab/glambox | 9ed47f896373111f0619999f51cd6176b6aa0084 | [
"MIT"
]
| 6 | 2018-03-15T16:45:17.000Z | 2021-04-09T05:02:44.000Z | 197.833952 | 451,532 | 0.886738 | [
[
[
"import os, errno\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport pymc3 as pm\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport glambox as gb",
"WARNING (theano.configdefaults): install mkl with `conda install mkl-service`: No module named 'mkl'\n"
],
[
"def make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise",
"_____no_output_____"
],
[
"make_sure_path_exists('examples/example_1/figures/')\nmake_sure_path_exists('examples/example_1/results/in_sample/traces/')\nmake_sure_path_exists('examples/example_1/results/in_sample/model_comparison/')\nmake_sure_path_exists('examples/example_1/results/out_of_sample/traces/')\nmake_sure_path_exists('examples/example_1/results/out_of_sample/predictions/')",
"_____no_output_____"
]
],
[
[
"# Example 1: Individual gaze biases",
"_____no_output_____"
],
[
"Our first example is based on the study by Thomas and colleagues (2019). Here, the authors study the association between gaze allocation and choice behaviour on the level of the individual. In particular, they explore whether (1) gaze biases are present on the individual level and (2) the strength of this association varies between individuals. In this example, we replicate this type of individual model-based analysis, including parameter estimation, comparison between multiple model variants, and out-of-sample prediction of choice and RT data.",
"_____no_output_____"
],
[
"### 1. Simulating data",
"_____no_output_____"
],
[
"First, we simulate a dataset containing 30 subjects, each performing 300 simple value-based choice trials. We assume that in each trial participants are asked to choose the item that they like most out of a set of three presented alternatives (e.g., snack food items; similar to the task described in Krajbich & Rangel (2011)). While participants perform the task, their eye movements, choices and RTs are measured. Before completing the choice trials, participants were asked to indicate their liking rating for each of the items used in the choice task on a liking rating scale between 1 and 10 (with 10 indicating strong liking and 1 indicating little liking). The resulting dataset contains a liking value for each item in a trial, the participants' choice and RT, as well as the participant's gaze towards each item in a trial (describing the fraction of trial time that the participant spent looking at each item in the choice set).",
"_____no_output_____"
]
],
[
[
"n_subjects = 30\nsubjects = np.arange(n_subjects)\nn_trials = 300\nn_items = 3",
"_____no_output_____"
]
],
[
[
"To simulate individuals' response behaviour, we utilize the parameter estimates that were obtained by Thomas et al. (2019) for the individuals in the three item choice dataset by Krajbich & Rangel (2011) (for an overview, see Fig. S1 of the manuscript). Importantly, we assume that 10 individuals do not exhibit a gaze bias, meaning that their choices are independent of the time that they spend looking at each item. To this end, we set the $\\gamma$ value of ten randomly selected individuals to 1. We further assume that individuals' gaze is distributed randomly with respect to the values of the items in a choice set. ",
"_____no_output_____"
]
],
[
[
"np.random.seed(1)\n\n# load empirical model parameters (taken from Thomas et al., 2019)\nestimates = pd.read_csv('resources/individual_estimates_sec_nhb2019.csv')\nkr2011 = estimates.loc[estimates['dataset'] == 'krajbich2011']\ngen_parameters = dict(v=kr2011['v'].values,\n gamma=kr2011['gamma'].values,\n s=kr2011['s'].values,\n tau=kr2011['tau'].values,\n t0=np.zeros(len(kr2011)))\n\n# define participants with no association between gaze and choice:\nno_gaze_bias_subjects = np.sort(np.random.choice(n_subjects, 10, replace=False))\ngaze_bias_subjects = np.array([s for s in subjects if s not in no_gaze_bias_subjects])\ngen_parameters['gamma'][no_gaze_bias_subjects] = 1",
"_____no_output_____"
]
],
[
[
"The resulting distribution of generating model parameters looks as follows:",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(4, 1, figsize=gb.plots._plots_internal.cm2inch(9,10), dpi=110, sharex=True)\n\nfor subject_set, color, label in zip([gaze_bias_subjects,\n no_gaze_bias_subjects],\n ['C0', 'C1'],\n ['gaze-bias', 'no gaze-bias']):\n # v\n axs[0].scatter(subject_set,\n gen_parameters['v'][subject_set],\n color=color,\n s=10)\n axs[0].set_ylabel(r'$v$', fontsize=7)\n \n # sigma\n axs[1].scatter(subject_set,\n gen_parameters['s'][subject_set],\n color=color,\n s=10)\n axs[1].set_ylabel(r'$\\sigma$', fontsize=7)\n\n # gamma\n axs[2].scatter(subject_set,\n gen_parameters['gamma'][subject_set],\n color=color,\n s=10)\n axs[2].set_ylabel(r'$\\gamma$', fontsize=7)\n\n # tau\n axs[3].scatter(subject_set,\n gen_parameters['tau'][subject_set],\n color=color,\n label=label,\n s=10)\n axs[3].set_ylabel(r'$\\tau$', fontsize=7)\n axs[3].set_xlabel('Subject', fontsize=7)\n \n\naxs[0].set_title('Generating GLAM parameters', fontsize=7)\naxs[0].set_ylim(-0.1, 1)\naxs[1].set_ylim(-0.1, 0.5)\naxs[2].set_ylim(-1.5, 1.2)\naxs[3].set_ylim(0, 5.1)\naxs[-1].legend(loc='upper left', frameon=True, fontsize=7)\nfor ax in axs:\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(axis='both', which='major', labelsize=7)\n ax.set_xticks([])\n \nfig.tight_layout()\nfig.savefig('examples/example_1/figures/Supplementary_Figure_1_generating_parameters.png', dpi=330)",
"_____no_output_____"
]
],
[
[
"These are the subjects that we defined as having no association of gaze allocation and choice behaviour:",
"_____no_output_____"
]
],
[
[
"no_gaze_bias_subjects",
"_____no_output_____"
]
],
[
[
"We first instantiate a GLAM model instance using `gb.GLAM()`and then use its `simulate_group` method. This method requires us to specify whether the individuals of the group are either simulated individually (and thereby independent of one another) or as part of a group with hierarchical parameter structure (where the individual model parameters are drawn from a group distribution, see below). For the former, the generating model parameters (indicated in the following as `gen_parameters`) are provided as a dictionary, containing a list of the individual participant values for each model parameter:",
"_____no_output_____"
]
],
[
[
"np.random.seed(2)\n\nglam = gb.GLAM()\nglam.simulate_group(kind='individual',\n n_individuals=n_subjects,\n n_trials=n_trials,\n n_items=n_items,\n parameters=gen_parameters,\n value_range=(1, 10))",
"_____no_output_____"
]
],
[
[
"We can then access the simulated data as part of our GLAM model instance.",
"_____no_output_____"
]
],
[
[
"data = glam.data.copy()",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
]
],
[
[
"As this example is focused on the individual level, we can further create a summary table, describing individuals' response behaviour on three behavioural metrics, using the `aggregate_subject_level_data` function from the `analysis` module. The resulting table contains individuals' mean RT, their probability of choosing the item with the highest item value from a choice set and a behavioural measure of the strength of the association between individuals' gaze allocation and choice behaviour (indicating the mean increase in choice probability for an item that was fixated on longer than the others, after correcting for the influence of the item value on choice behaviour; for further details, see Thomas et al. (2019)).",
"_____no_output_____"
]
],
[
[
"np.random.seed(3)\n\nsubject_data_summary = gb.analysis.aggregate_subject_level_data(data, n_items)",
"_____no_output_____"
],
[
"subject_data_summary.head()",
"_____no_output_____"
]
],
[
[
"### 2. Exploring the behavioural data",
"_____no_output_____"
],
[
"In a first step of our analysis, we explore differences in individuals' response behaviour. To this end, we plot the distributions of individuals' scores on the three behavioural metrics, and their associations, using the `plot_behaviour_associations` function implemented in the `plots` module:",
"_____no_output_____"
]
],
[
[
"np.random.seed(4)\n\nfig, axs = gb.plots.plot_behaviour_associations(data=data)\n\nfig.savefig('examples/example_1/figures/Figure_1_individual_differences.png', dpi=330)",
"_____no_output_____"
]
],
[
[
"The resulting plot shows that individuals' probability of choosing the best item, as well as the strength of their behavioural association of gaze and choice, are not associated with their mean RT (D-E). However, individuals' probability of choosing the best item increases with decreasing strength of the behavioural association of gaze and choice (F).",
"_____no_output_____"
],
[
"### 3. Likelihood-based model comparison",
"_____no_output_____"
],
[
"In a second step of our analysis, we want to test whether the response behaviour of each individual is better described by a decision model with or without gaze bias. To this end, we set up the two GLAM variants:",
"_____no_output_____"
],
[
"One GLAM variant that includes the gaze bias mechanism:",
"_____no_output_____"
]
],
[
[
"np.random.seed(5)\n\nglam_bias = gb.GLAM(data=data, name='glam_bias')\nglam_bias.make_model(kind='individual')",
"Generating single subject models for 30 subjects...\n"
]
],
[
[
"And one without a gaze bias (as indicated by `gamma_val=1`):",
"_____no_output_____"
]
],
[
[
"np.random.seed(6)\n\n# for the no-gaze-bias variant, we set the gamma-parameter to 1, indicating no influence of gaze allocation on choice behaviour\nglam_nobias = gb.GLAM(data=data, name='glam_nobias')\nglam_nobias.make_model(kind='individual', gamma_val=1)",
"Generating single subject models for 30 subjects...\n"
]
],
[
[
"Subsequently, we fit both models to the data of each individual and compare their fit by means of the Widely Applicable Information Criterion (WAIC; Vehtari et al., 2017): ",
"_____no_output_____"
],
[
"The `fit` method defaults to Metropolis-Hastings MCMC sampling (for methodological details, see the Methods Section of the manuscript). The `draws` argument sets the number of samples to be drawn. This excludes the tuning (or burn-in) samples, which can be set with the `tune` argument. In addition, the `fit` method accepts the same keyword arguments as the PyMC3 sample function, which it wraps (see the PyMC3 documentation for additional details). The `chains` argument sets the number of MCMC traces (it defaults to four and should be set to at least two, in order to allow convergence diagnostics).",
"_____no_output_____"
]
],
[
[
"n_tune = 5000\nn_draws = 5000\nn_chains = 4",
"_____no_output_____"
],
[
"np.random.seed(7)\n\nglam_bias.fit(method='MCMC',\n tune=n_tune,\n draws=n_draws,\n chains=n_chains)",
"Fitting 30 model(s) using MCMC...\n Fitting model 1 of 30...\n"
],
[
"np.random.seed(8)\n\nglam_nobias.fit(method='MCMC',\n tune=n_tune,\n draws=n_draws,\n chains=n_chains)",
"Fitting 30 model(s) using MCMC...\n Fitting model 1 of 30...\n"
]
],
[
[
"Convergence check:",
"_____no_output_____"
]
],
[
[
"def check_convergence(summary, varnames=['v', 's', 'tau'],\n n_eff_required=100, gelman_rubin_criterion=0.05):\n varnames = [varname + '__0_0' for varname in varnames]\n enough_eff_samples = np.all(summary.loc[varnames]['n_eff'] > n_eff_required)\n good_gelman = np.all(np.abs(summary.loc[varnames]['Rhat'] - 1.0) < gelman_rubin_criterion)\n if not enough_eff_samples or not good_gelman:\n return False\n else:\n return True",
"_____no_output_____"
],
[
"np.all([check_convergence(pm.summary(trace), varnames=['v', 's', 'gamma', 'tau']) for trace in glam_bias.trace])",
"_____no_output_____"
],
[
"np.all([check_convergence(pm.summary(trace), varnames=['v', 's', 'tau']) for trace in glam_nobias.trace])",
"_____no_output_____"
]
],
[
[
"Saving traces and traceplots for visual inspection:",
"_____no_output_____"
]
],
[
[
"for subject, subject_trace in enumerate(glam_bias.trace):\n gb.plots.traceplot(subject_trace)\n plt.savefig('examples/example_1/results/in_sample/traces/sub_{}_gaze_bias_model_trace.png'.format(subject), dpi=330)\n plt.close()\n pm.trace_to_dataframe(subject_trace).to_csv(\n 'examples/example_1/results/in_sample/traces/sub_{}_gaze_bias_model_trace.csv'.format(subject))",
"_____no_output_____"
],
[
"for subject, subject_trace in enumerate(glam_nobias.trace):\n gb.plots.traceplot(subject_trace)\n plt.savefig('examples/example_1/results/in_sample/traces/sub_{}_no_gaze_bias_model_trace.png'.format(subject), dpi=330)\n plt.close()\n pm.trace_to_dataframe(subject_trace).to_csv(\n 'examples/example_1/results/in_sample/traces/sub_{}_no_gaze_bias_model_trace.csv'.format(subject))",
"_____no_output_____"
]
],
[
[
"After convergence has been established for all parameter traces (for details on the suggested convergence criteria, see the Methods Section of the manuscript), we perform a model comparison on the individual level, using the `compare` function:",
"_____no_output_____"
],
[
"The resulting table can be used to identify the best fitting model (indicated by the lowest WAIC score) per individual.",
"_____no_output_____"
]
],
[
[
"comparison_df = gb.analysis.compare_models(models=[glam_bias, glam_nobias])\ncomparison_df",
"_____no_output_____"
]
],
[
[
"Visualising the individual WAIC differences:",
"_____no_output_____"
]
],
[
[
"dWAIC = []\nfor subject in subjects:\n comp_s = comparison_df.loc[comparison_df['subject'] == subject]\n dWAIC_s = comp_s.loc[comp_s['model'] == 'glam_bias', 'WAIC'].values - comp_s.loc[comp_s['model'] == 'glam_nobias', 'WAIC'].values\n dWAIC.append(dWAIC_s[0])\ndWAIC = np.array(dWAIC)\nnp.save('examples/example_1/results/in_sample/model_comparison/dWAIC_in_sample.npy', dWAIC)\ndWAIC",
"_____no_output_____"
],
[
"# identify subjects better described by each model variant\ngaze_bias_idx = dWAIC < 0\nno_gaze_bias_idx = dWAIC > 0",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1, figsize=gb.plots._plots_internal.cm2inch(18,6), dpi=110)\n\nax.bar(subjects[gaze_bias_idx], dWAIC[gaze_bias_idx], color='C0', label='gaze bias')\nax.bar(subjects[no_gaze_bias_idx], dWAIC[no_gaze_bias_idx], color='C1', label='no gaze bias')\nax.set_xlabel('Subjects', fontsize=7)\nax.set_ylabel('WAIC( gaze bias )'+' -\\n'+'WAIC( no gaze bias )', fontsize=7)\nax.legend(loc='lower left', frameon=False, fontsize=7)\nax.set_xticks([])\nax.tick_params(axis='both', which='major', labelsize=7)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nfig.tight_layout()\n\nfig.savefig('examples/example_1/figures/relative_model_comparison.png', dpi=330)",
"_____no_output_____"
]
],
[
[
"With this comparison, we are able to identify those participants whose response behaviour matches the assumption of gaze-biased evidence accumulation. In particular, we find that we accurately recover whether an individual has a gaze bias or not for 29 out of 30 individuals.",
"_____no_output_____"
]
],
[
[
"no_gaze_bias_subjects",
"_____no_output_____"
],
[
"subjects[no_gaze_bias_idx]",
"_____no_output_____"
],
[
"[s in subjects[no_gaze_bias_idx] for s in no_gaze_bias_subjects]",
"_____no_output_____"
]
],
[
[
"Looking at the individual parameter estimates (defined as MAP of the posterior distributions), we find that the individually fitted $\\gamma$ values cover a wide range between -0.8 and 1 (A), indicating strong variability in the strength of individuals' gaze bias. We also find that $\\gamma$ estimates have a strong negative correlation with individuals' scores on the behavioural gaze bias measure (B).",
"_____no_output_____"
]
],
[
[
"np.random.seed(10)\n\nfig, axs = plt.subplots(1, 2, figsize=gb.plots._plots_internal.cm2inch(9, 4.5), dpi=330)\n\naxs[0].hist(glam_bias.estimates['gamma'], bins=np.linspace(-1., 1, 20))\ngb.plots._plots_internal.plot_correlation(subject_data_summary['gaze_influence'],\n glam_bias.estimates['gamma'],\n ax=axs[1],\n ylim=(-1.2, 1.2))\n\naxs[0].set_ylabel('Frequency', fontsize=7)\naxs[0].set_xlabel(r'$\\gamma$', fontsize=7)\naxs[0].tick_params(axis='both', which='major', labelsize=7)\naxs[0].spines['top'].set_visible(False)\naxs[0].spines['right'].set_visible(False)\naxs[1].set_ylabel(r'$\\gamma$', fontsize=7)\naxs[1].set_xlabel('Gaze influence on P(choice | value)', fontsize=7)\naxs[1].tick_params(axis='both', which='major', labelsize=7)\nfor ax, label in zip(axs.ravel(), list('AB')):\n ax.text(-0.2,\n 1.1,\n label,\n transform=ax.transAxes,\n fontsize=7,\n fontweight='bold',\n va='top')\n\nfig.tight_layout()\n\nfig.savefig('examples/example_1/figures/Figure_3_gaze_bias_estimates.png', dpi=330)",
"_____no_output_____"
]
],
[
[
"### 4. Out-of-sample prediction",
"_____no_output_____"
],
[
"We have identified those participants whose response behaviour is better described by a GLAM variant with gaze-bias than one without. Yet, this analysis does not indicate whether the GLAM is a good model of individuals' response behaviour on an absolute level. To test this, we perform an out-of-sample prediction exercise.",
"_____no_output_____"
],
[
"We divide the data of each subject into even- and odd-numbered experiment trials and use the data of the even-numbered trials to fit both GLAM variants:",
"_____no_output_____"
]
],
[
[
"data_even = data[(data['trial']%2)==0].copy()\ndata_odd = data[(data['trial']%2)!=0].copy()",
"_____no_output_____"
],
[
"np.random.seed(11)\n\nglam_bias.exchange_data(data_even)\nglam_bias.fit(method='MCMC',\n tune=n_tune,\n draws=n_draws,\n chains=n_chains)",
"Replaced attached data (9000 trials) with new data (4500 trials)...\nFitting 30 model(s) using MCMC...\n Fitting model 1 of 30...\n"
],
[
"np.random.seed(12)\n\nglam_nobias.exchange_data(data_even)\nglam_nobias.fit(method='MCMC',\n tune=n_tune,\n draws=n_draws,\n chains=n_chains)",
"Replaced attached data (9000 trials) with new data (4500 trials)...\nFitting 30 model(s) using MCMC...\n Fitting model 1 of 30...\n"
]
],
[
[
"Again, we check all parameter traces for convergence, before proceeding further in the anlaysis:",
"_____no_output_____"
]
],
[
[
"np.all([check_convergence(pm.summary(trace), varnames=['v', 's', 'gamma', 'tau'])\n for trace in glam_bias.trace])",
"_____no_output_____"
],
[
"np.all([check_convergence(pm.summary(trace), varnames=['v', 's', 'tau'])\n for trace in glam_nobias.trace])",
"_____no_output_____"
],
[
"for subject, subject_trace in enumerate(glam_bias.trace):\n gb.plots.traceplot(subject_trace)\n plt.savefig('examples/example_1/results/out_of_sample/traces/sub_{}_gaze_bias_model_trace.png'.format(subject), dpi=330)\n plt.close()\n pm.trace_to_dataframe(subject_trace).to_csv(\n 'examples/example_1/results/out_of_sample/traces/sub_{}_gaze_bias_model_trace.csv'.format(subject))",
"_____no_output_____"
],
[
"for subject, subject_trace in enumerate(glam_nobias.trace):\n gb.plots.traceplot(subject_trace)\n plt.savefig('examples/example_1/results/out_of_sample/traces/sub_{}_no_gaze_bias_model_trace.png'.format(subject), dpi=330)\n plt.close()\n pm.trace_to_dataframe(subject_trace).to_csv(\n 'examples/example_1/results/out_of_sample/traces/sub_{}_no_gaze_bias_model_trace.csv'.format(subject))",
"_____no_output_____"
]
],
[
[
"We then evaluate the performance of both models in predicting individuals' response behaviour using the MAP estimates and item value and gaze data from the odd-numbered trials. To predict response behaviour for the odd-numbered trials, we use the `predict` method. We repeat every trial 50 times in the prediction (as specified through the `n_repeats` argument) to obtain a stable pattern of predictions:",
"_____no_output_____"
]
],
[
[
"n_repeats = 50",
"_____no_output_____"
],
[
"np.random.seed(13)\n\nglam_bias.exchange_data(data_odd)\nglam_bias.predict(n_repeats=n_repeats)\nglam_bias.prediction.to_csv('examples/example_1/results/out_of_sample/predictions/gaze_bias_model_predictions.csv')",
"\r 0%| | 0/4500 [00:00<?, ?it/s]"
],
[
"np.random.seed(14)\n\nglam_nobias.exchange_data(data_odd)\nglam_nobias.predict(n_repeats=n_repeats)\nglam_nobias.prediction.to_csv('examples/example_1/results/out_of_sample/predictions/no_gaze_bias_model_predictions.csv')",
"\r 0%| | 0/4500 [00:00<?, ?it/s]"
]
],
[
[
"To determine the absolute fit of both model variants to the data, we plot the individually predicted against the individually observed data on all three behavioural metrics. To do this, we use the `plot_individual_fit` function of the `plots` module. This function takes as input the observed data, as well as a list of the predictions of all model variants that ought to be compared. The argument `prediction_labels` specifies the naming used for each model in the resulting figure. For each model variant, the function creates a row of panels, plotting the observed against the predicted data:",
"_____no_output_____"
]
],
[
[
"np.random.seed(15)\n\nfig, axs = gb.plots.plot_individual_fit(observed=data_odd,\n predictions=[glam_bias.prediction,\n glam_nobias.prediction],\n prediction_labels=['gaze-bias', 'no gaze-bias'])\n\n# We'll change the xlabels to \"Simulated observed\", just to be clear that these are simulated data!\nfor ax in axs.ravel():\n xlabel = ax.get_xlabel()\n ax.set_xlabel('Simulated o' + xlabel[1:])\n\nfig.savefig('examples/example_1/figures/Figure_4_absolute_fit.png', dpi=330)",
"_____no_output_____"
]
],
[
[
"Both model variants perform well in capturing individuals' RTs and probability of choosing the best item (A, D, B, E). Importantly, only the GLAM variant with gaze bias is able to also recover the strength of the association between individuals' choice behaviour and gaze allocation (C).",
"_____no_output_____"
],
[
"### 5. References:",
"_____no_output_____"
],
[
"Thomas, A. W., Molter, F., Krajbich, I., Heekeren, H. R., & Mohr, P. N. (2019). Gaze bias differences capture individual choice behaviour. Nature human behaviour, 3(6), 625.\n\nVehtari, A., Gelman, A., & Gabry, J. (2017). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. Statistics and computing, 27(5), 1413-1432.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec687e6b0edc7e1833bf7479d73120b9b2b50578 | 640 | ipynb | Jupyter Notebook | Week2_Math/Day3_Stats2/Untitled.ipynb | bensondaled/PNI_Programming_Statistics_Bootcamp_2018 | 5954b3bedfb98282e4c52910a789a046df2e978c | [
"Apache-2.0"
]
| null | null | null | Week2_Math/Day3_Stats2/Untitled.ipynb | bensondaled/PNI_Programming_Statistics_Bootcamp_2018 | 5954b3bedfb98282e4c52910a789a046df2e978c | [
"Apache-2.0"
]
| null | null | null | Week2_Math/Day3_Stats2/Untitled.ipynb | bensondaled/PNI_Programming_Statistics_Bootcamp_2018 | 5954b3bedfb98282e4c52910a789a046df2e978c | [
"Apache-2.0"
]
| null | null | null | 16.410256 | 34 | 0.504688 | [
[
[
"x\n\nsem = std(x) / sqrt(len(x))",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec68831ad8477a00b2f345db7670b6e18f609508 | 474,063 | ipynb | Jupyter Notebook | visualizations/Word embeddings.ipynb | pvzweden/best | 83acdfc445ec122d027f4bd65f27a3836862b02d | [
"MIT"
]
| 21 | 2019-07-02T05:54:22.000Z | 2021-04-07T13:52:50.000Z | visualizations/Word embeddings.ipynb | pvzweden/best | 83acdfc445ec122d027f4bd65f27a3836862b02d | [
"MIT"
]
| 55 | 2019-07-03T18:59:26.000Z | 2020-12-15T08:10:00.000Z | visualizations/Word embeddings.ipynb | pvzweden/best | 83acdfc445ec122d027f4bd65f27a3836862b02d | [
"MIT"
]
| 9 | 2019-09-10T13:38:46.000Z | 2021-09-01T08:02:42.000Z | 833.151142 | 366,288 | 0.950277 | [
[
[
"# Word embeddings\nInspired by http://creatingdata.us/etc/streets/ we try to compute word vectors for each streetname and then visualize the relationship between streetnames.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom gensim.models import Word2Vec\nimport numpy as np\nimport multiprocessing\nfrom umap import UMAP\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set()",
"_____no_output_____"
]
],
[
[
"## Creating the Word2Vec models\n### Streets grouped by postcode",
"_____no_output_____"
],
[
"Reading the csv file of all streets into memory, this can be generated by the filter script. This assumes you have the file saved in the following folder",
"_____no_output_____"
]
],
[
[
"streets = pd.read_csv('../data/streets.csv')\n# Group streets by postcode\ngroups = streets.groupby('postcode')",
"/usr/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3057: DtypeWarning: Columns (3,5,6,8,10) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
]
],
[
[
"To train the Word2Vec model we normally need a list of sentences, usually mined from some text source. One way to emulate this with streetnames we consider each postcode a different sentence and list all the streetnames for a particular postcode.",
"_____no_output_____"
]
],
[
[
"# Create a list of lists containing the streetnames of each city\ncleaned = []\nfor m_id, values in groups:\n city = []\n for nl, fr in values[['streetname_nl', 'streetname_fr']].values:\n # try to add nl and fr names, handling the cases where they are null\n try:\n city.append(nl.lower())\n except:\n pass\n try:\n city.append(fr.lower())\n except:\n pass\n cleaned.append(city)",
"_____no_output_____"
]
],
[
[
"Now we can calculate the word vectors for each streetname. First we create our model. Notable parameters are the `min_count` stating that only when a street occurs at least twice it can be included in the word vectors and the `window` which specifies how far words can be from eachother to still be associated with eachother (10 is the maximum value). ",
"_____no_output_____"
]
],
[
[
"cores = multiprocessing.cpu_count() # Count the number of cores in a computer\npostcode_grouped = Word2Vec(min_count=2,\n window=10,\n size=300,\n sample=6e-5, \n alpha=0.03, \n min_alpha=0.0007, \n negative=20,\n workers=cores-1)\n# Build vocabulary, dropping streets that only occur once\npostcode_grouped.build_vocab(cleaned, progress_per=10000)\n# Train the word vectors\npostcode_grouped.train(cleaned, total_examples=postcode_grouped.corpus_count, epochs=30, report_delay=1)\n# Optimize model\npostcode_grouped.init_sims(replace=True)",
"_____no_output_____"
]
],
[
[
"This model is able to extract some of the associations between the occurrences of streets in different cities. Especially when looking at commonly occuring streetnames we can see some other streets in the same style.",
"_____no_output_____"
]
],
[
[
"postcode_grouped.wv.most_similar(positive=[\"dorpsstraat\"])",
"_____no_output_____"
]
],
[
[
"### Streets grouped on geolocation\nWe can group streets using geolocation as well (the geolocation of the addresses belonging to that street). We collect the streets in bins and make sure there is overlap between bins so neighboorhoods should be contained in at least one bin. Each bin is then interpreted as a 'sentence' for the creation of the word vectors.\n\nRead the addresses csv file, this assumes you have the file saved in the following folder",
"_____no_output_____"
]
],
[
[
"addresses = pd.read_csv('../data/belgium_addresses.csv')",
"/usr/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3057: DtypeWarning: Columns (5,8,9,10,12,13,15,16,17) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"min_x = addresses['EPSG:31370_x'].min()\nmax_x = addresses['EPSG:31370_x'].max()\nmin_y = addresses['EPSG:31370_y'].min()\nmax_y = addresses['EPSG:31370_y'].max()",
"_____no_output_____"
],
[
"binsize = 1000\ncoll = {}\n# Get the ids of the necessary columns\nx_id = addresses.columns.get_loc('EPSG:31370_x')\ny_id = addresses.columns.get_loc('EPSG:31370_y')\nnl_id = addresses.columns.get_loc('streetname_nl')\nfr_id = addresses.columns.get_loc('streetname_fr')\n\nfor row in addresses.values:\n # Get the bin offsets\n x = (row[0] // binsize) * binsize\n y = (row[1] // binsize) * binsize\n bins = [\n (x, y),\n (x + binsize/2, y),\n (x, y + binsize/2),\n (x + binsize/2, y + binsize/2),\n ]\n for pos in bins:\n if pos not in coll:\n coll[pos] = set()\n try:\n coll[pos].add(row[nl_id].lower())\n except:\n pass\n try:\n coll[pos].add(row[fr_id].lower())\n except:\n pass\n \nblocks = [list(el) for el in coll.values() if el]",
"_____no_output_____"
],
[
"geo_grouped = Word2Vec(min_count=10,\n window=10,\n size=300,\n sample=6e-5, \n alpha=0.03, \n min_alpha=0.0007, \n negative=20,\n workers=cores-1)\n\n# Build vocabulary\ngeo_grouped.build_vocab(blocks, progress_per=10000)\n# Train the word vectors\ngeo_grouped.train(blocks, total_examples=geo_grouped.corpus_count, epochs=30, report_delay=1)\n# Optimize the model\ngeo_grouped.init_sims(replace=True)",
"_____no_output_____"
],
[
"geo_grouped.most_similar(positive=[\"dorpsstraat\"])",
"/usr/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: Call to deprecated `most_similar` (Method will be removed in 4.0.0, use self.wv.most_similar() instead).\n \"\"\"Entry point for launching an IPython kernel.\n"
]
],
[
[
"## Visualizing the word vectors\nThe models generate word vectors with 300 dimensions, this kind of data is not easily visualized. To solve this we can apply dimensionality reduction, using the [UMAP](https://umap-learn.readthedocs.io/en/latest/) algorithm, to reduce the amount of dimensions to two which can then be visualized in a scatterplot",
"_____no_output_____"
]
],
[
[
"reducer = UMAP()",
"_____no_output_____"
]
],
[
[
"### Postcode groups",
"_____no_output_____"
]
],
[
[
"# Extract the vectors from the model\nvectors = []\nfor word in postcode_grouped.wv.vocab:\n vectors.append(postcode_grouped.wv[word])\n\nvectors = np.array(vectors)\n\n# Create the low dimensional embedding\nembedding = reducer.fit_transform(vectors)",
"/home/theod/.local/lib/python3.7/site-packages/umap/rp_tree.py:450: NumbaWarning: \u001b[1m\nCompilation is falling back to object mode WITH looplifting enabled because Function \"make_euclidean_tree\" failed type inference due to: \u001b[1m\u001b[1m\u001b[1mCannot unify RandomProjectionTreeNode(array(int64, 1d, C), bool, none, none, none, none) and RandomProjectionTreeNode(none, bool, array(float32, 1d, C), float64, RandomProjectionTreeNode(array(int64, 1d, C), bool, none, none, none, none), RandomProjectionTreeNode(array(int64, 1d, C), bool, none, none, none, none)) for '$14.16', defined at /home/theod/.local/lib/python3.7/site-packages/umap/rp_tree.py (457)\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/rp_tree.py\", line 457:\u001b[0m\n\u001b[1mdef make_euclidean_tree(data, indices, rng_state, leaf_size=30):\n <source elided>\n\n\u001b[1m left_node = make_euclidean_tree(data, left_indices, rng_state, leaf_size)\n\u001b[0m \u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n\u001b[0m\u001b[1m[1] During: resolving callee type: recursive(type(CPUDispatcher(<function make_euclidean_tree at 0x7efd19ee5378>)))\u001b[0m\n\u001b[0m\u001b[1m[2] During: typing of call at /home/theod/.local/lib/python3.7/site-packages/umap/rp_tree.py (457)\n\u001b[0m\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/rp_tree.py\", line 457:\u001b[0m\n\u001b[1mdef make_euclidean_tree(data, indices, rng_state, leaf_size=30):\n <source elided>\n\n\u001b[1m left_node = make_euclidean_tree(data, left_indices, rng_state, leaf_size)\n\u001b[0m \u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n @numba.jit()\n/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:725: NumbaWarning: \u001b[1mFunction \"make_euclidean_tree\" was compiled in object mode without forceobj=True.\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/rp_tree.py\", line 451:\u001b[0m\n\u001b[[email protected]()\n\u001b[1mdef make_euclidean_tree(data, indices, rng_state, leaf_size=30):\n\u001b[0m\u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n self.func_ir.loc))\n/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:734: NumbaDeprecationWarning: \u001b[1m\nFall-back from the nopython compilation path to the object mode compilation path has been detected, this is deprecated behaviour.\n\nFor more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/rp_tree.py\", line 451:\u001b[0m\n\u001b[[email protected]()\n\u001b[1mdef make_euclidean_tree(data, indices, rng_state, leaf_size=30):\n\u001b[0m\u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n warnings.warn(errors.NumbaDeprecationWarning(msg, self.func_ir.loc))\n/home/theod/.local/lib/python3.7/site-packages/umap/nndescent.py:92: NumbaPerformanceWarning: \u001b[1m\u001b[1m\nThe keyword argument 'parallel=True' was specified but no transformation for parallel execution was possible.\n\nTo find out why, try turning on parallel diagnostics, see http://numba.pydata.org/numba-doc/latest/user/parallel.html#diagnostics for help.\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/utils.py\", line 409:\u001b[0m\n\u001b[[email protected](parallel=True)\n\u001b[1mdef build_candidates(current_graph, n_vertices, n_neighbors, max_candidates, rng_state):\n\u001b[0m\u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\u001b[0m\n current_graph, n_vertices, n_neighbors, max_candidates, rng_state\n/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:588: NumbaPerformanceWarning: \u001b[1m\nThe keyword argument 'parallel=True' was specified but no transformation for parallel execution was possible.\n\nTo find out why, try turning on parallel diagnostics, see http://numba.pydata.org/numba-doc/latest/user/parallel.html#diagnostics for help.\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/nndescent.py\", line 47:\u001b[0m\n\u001b[1m @numba.njit(parallel=True)\n\u001b[1m def nn_descent(\n\u001b[0m \u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n self.func_ir.loc))\n/home/theod/.local/lib/python3.7/site-packages/umap/umap_.py:349: NumbaWarning: \u001b[1m\nCompilation is falling back to object mode WITH looplifting enabled because Function \"fuzzy_simplicial_set\" failed type inference due to: \u001b[1mUntyped global name 'nearest_neighbors':\u001b[0m \u001b[1m\u001b[1mcannot determine Numba type of <class 'function'>\u001b[0m\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/umap_.py\", line 467:\u001b[0m\n\u001b[1mdef fuzzy_simplicial_set(\n <source elided>\n if knn_indices is None or knn_dists is None:\n\u001b[1m knn_indices, knn_dists, _ = nearest_neighbors(\n\u001b[0m \u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\u001b[0m\n @numba.jit()\n/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:725: NumbaWarning: \u001b[1mFunction \"fuzzy_simplicial_set\" was compiled in object mode without forceobj=True.\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/umap_.py\", line 350:\u001b[0m\n\u001b[[email protected]()\n\u001b[1mdef fuzzy_simplicial_set(\n\u001b[0m\u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n self.func_ir.loc))\n/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:734: NumbaDeprecationWarning: \u001b[1m\nFall-back from the nopython compilation path to the object mode compilation path has been detected, this is deprecated behaviour.\n\nFor more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/umap_.py\", line 350:\u001b[0m\n\u001b[[email protected]()\n\u001b[1mdef fuzzy_simplicial_set(\n\u001b[0m\u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n warnings.warn(errors.NumbaDeprecationWarning(msg, self.func_ir.loc))\n"
],
[
"plt.figure(figsize=(14, 9))\nsns.scatterplot(embedding[:, 0], embedding[:, 1])",
"_____no_output_____"
]
],
[
[
"### Geo groups",
"_____no_output_____"
]
],
[
[
"# Extract the vectors from the model\nvectors = []\nfor word in geo_grouped.wv.vocab:\n vectors.append(geo_grouped.wv[word])\n\nvectors = np.array(vectors)\n\n# Create the low dimensional embedding\nembedding = reducer.fit_transform(vectors)",
"/home/theod/.local/lib/python3.7/site-packages/numba/compiler.py:588: NumbaPerformanceWarning: \u001b[1m\nThe keyword argument 'parallel=True' was specified but no transformation for parallel execution was possible.\n\nTo find out why, try turning on parallel diagnostics, see http://numba.pydata.org/numba-doc/latest/user/parallel.html#diagnostics for help.\n\u001b[1m\nFile \"../../../.local/lib/python3.7/site-packages/umap/nndescent.py\", line 47:\u001b[0m\n\u001b[1m @numba.njit(parallel=True)\n\u001b[1m def nn_descent(\n\u001b[0m \u001b[1m^\u001b[0m\u001b[0m\n\u001b[0m\n self.func_ir.loc))\n"
],
[
"plt.figure(figsize=(14, 9))\nsns.scatterplot(embedding[:, 0], embedding[:, 1])",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec6885672e4ebc07b5b6f371b48dcd1655aecfbe | 5,202 | ipynb | Jupyter Notebook | ch1/p92.ipynb | ScienceEater/glassCoding | 5849f9d0e1c5c5a86411e71f714a46a24d9df38e | [
"MIT"
]
| null | null | null | ch1/p92.ipynb | ScienceEater/glassCoding | 5849f9d0e1c5c5a86411e71f714a46a24d9df38e | [
"MIT"
]
| null | null | null | ch1/p92.ipynb | ScienceEater/glassCoding | 5849f9d0e1c5c5a86411e71f714a46a24d9df38e | [
"MIT"
]
| null | null | null | 17.876289 | 52 | 0.401961 | [
[
[
"# (map(int, input().split())))\nN, M, K = map(int, input().split())\nlis = list(map(int, input().split()))\n",
"5 8 3\n2 4 5 4 6\n"
],
[
"lisor = sorted(lis,reverse=True)",
"_____no_output_____"
],
[
"lisor",
"_____no_output_____"
],
[
"res = 0\ncounter = 0\nfir = lisor[0]\nsec = lisor[1]\n\nfor i in range(M):\n if counter == K:\n res += sec\n counter = 0\n else:\n res += fir\n counter += 1\n \nprint(res)",
"46\n"
],
[
"# answer 1\n\n# N, M, K\nn, m, k = map(int, input().split())\n# N nums\ndata = list(map(int, input().split()))\n\ndata.sort()\nfirst = data[n-1]\nsecond = data[n-2]\n\nresult = 0\n\nwhile True:\n for i in range(k):\n if m == 0:\n break\n result += first\n m -= 1\n if m == 0:\n break\n result += second\n m -= 1\n \nprint(result)",
"5 8 3\n2 4 5 4 6\n46\n"
],
[
"# revisited\n\nN, M, K = map(int, input().split())\ndata = list(map(int, input().split()))\n\ndata2 = sorted(data, reverse=True)\nfir = data2[0]\nsec = data2[1]\n\nres = 0\n\npattern = fir*K + sec\n\nres = (M / (K+1))*pattern + (M % (k+1))*fir\n\nprint(int(res))",
"5 8 3\n2 4 5 4 6\n46\n"
],
[
"# answer 2\n\n# N, M, K\nn, m, k = map(int, input().split())\n# N inputs\ndata = list(map(int, input().split()))\n\ndata.sort()\nfirst = data[n-1]\nsecond = data[n-2]\n\ncount = int(m/(k+1))*k + m%(k+1)\n\nresult = 0\nresult += (count) * first + (m-count)*second",
"5 8 3\n2 4 5 4 6\n"
],
[
"result",
"_____no_output_____"
],
[
"# revisited 2\n\nN, M, K = map(int, input().split())\nlis = list(map(int, input().split()))\nlis.sort()\nfir = lis[N-1]\nsec = lis[N-2]\n\npattern = (fir*K + sec)\n\na, b = divmod(M,K+1)\n\nresult = a*pattern + b*fir\n",
"5 8 3\n2 4 5 4 6\n"
],
[
"result",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec688c8a264ee193c3547e2ce9942113be41b124 | 49,843 | ipynb | Jupyter Notebook | pyTorch-Week5-DeepNNInitializationHe.ipynb | evertonaleixo/coursera-ibm-capstone | eb0f1879cd719aa0100a2ee5a3ba1c12f05fc841 | [
"Apache-2.0"
]
| null | null | null | pyTorch-Week5-DeepNNInitializationHe.ipynb | evertonaleixo/coursera-ibm-capstone | eb0f1879cd719aa0100a2ee5a3ba1c12f05fc841 | [
"Apache-2.0"
]
| null | null | null | pyTorch-Week5-DeepNNInitializationHe.ipynb | evertonaleixo/coursera-ibm-capstone | eb0f1879cd719aa0100a2ee5a3ba1c12f05fc841 | [
"Apache-2.0"
]
| null | null | null | 138.83844 | 18,766 | 0.796722 | [
[
[
"<a href=\"http://cocl.us/pytorch_link_top\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png\" width=\"750\" alt=\"IBM Product \" />\n</a> \n",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png\" width=\"200\" alt=\"cognitiveclass.ai logo\" />\n",
"_____no_output_____"
],
[
"<h1>Test Uniform, Default and He Initialization on MNIST Dataset with Relu Activation</h1>\n",
"_____no_output_____"
],
[
"<h3>Objective for this Notebook<h3> \n<h5> 1. Learn how to Define Several Neural Network, Criterion function, Optimizer.</h5>\n<h5> 2. Test Uniform, Default and He Initialization </h5> \n",
"_____no_output_____"
],
[
"<h2>Table of Contents</h2>\n<p>In this lab, you will test the Uniform Initialization, Default Initialization and He Initialization on the MNIST dataset with Relu Activation</p>\n\n<ul>\n <li><a href=\"#Model\">Neural Network Module and Training Function</a></li>\n <li><a href=\"#Makeup_Data\">Make Some Data</a></li>\n <li><a href=\"#Cost\">Define Several Neural Network, Criterion function, Optimizer</a></li>\n <li><a href=\"#Train\">Test Uniform, Default and He Initialization</a></li>\n <li><a href=\"#Result\">Analyze Results</a></li>\n</ul>\n<p>Estimated Time Needed: <strong>25 min</strong></p>\n\n<hr>\n",
"_____no_output_____"
],
[
"<h2>Preparation</h2>\n",
"_____no_output_____"
],
[
"We'll need the following libraries: \n",
"_____no_output_____"
]
],
[
[
"# Import the libraries we need to use in this lab\n\n# Using the following line code to install the torchvision library\n# !conda install -y torchvision\n\nimport torch \nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nimport torch.nn.functional as F\nimport matplotlib.pylab as plt\nimport numpy as np\n\ntorch.manual_seed(0)",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<h2 id=\"Model\">Neural Network Module and Training Function</h2> \n",
"_____no_output_____"
],
[
"Define the neural network module or class with He Initialization\n",
"_____no_output_____"
]
],
[
[
"# Define the class for neural network model with He Initialization\n\nclass Net_He(nn.Module):\n \n # Constructor\n def __init__(self, Layers):\n super(Net_He, self).__init__()\n self.hidden = nn.ModuleList()\n\n for input_size, output_size in zip(Layers, Layers[1:]):\n linear = nn.Linear(input_size, output_size)\n torch.nn.init.kaiming_uniform_(linear.weight, nonlinearity='relu')\n self.hidden.append(linear)\n\n # Prediction\n def forward(self, x):\n L = len(self.hidden)\n for (l, linear_transform) in zip(range(L), self.hidden):\n if l < L - 1:\n x = F.relu(linear_transform(x))\n else:\n x = linear_transform(x)\n return x",
"_____no_output_____"
]
],
[
[
"Define the class or neural network with Uniform Initialization\n",
"_____no_output_____"
]
],
[
[
"# Define the class for neural network model with Uniform Initialization\n\nclass Net_Uniform(nn.Module):\n \n # Constructor\n def __init__(self, Layers):\n super(Net_Uniform, self).__init__()\n self.hidden = nn.ModuleList()\n\n for input_size, output_size in zip(Layers, Layers[1:]):\n linear = nn.Linear(input_size,output_size)\n linear.weight.data.uniform_(0, 1)\n self.hidden.append(linear)\n \n # Prediction\n def forward(self, x):\n L = len(self.hidden)\n for (l, linear_transform) in zip(range(L), self.hidden):\n if l < L - 1:\n x = F.relu(linear_transform(x))\n else:\n x = linear_transform(x)\n \n return x",
"_____no_output_____"
]
],
[
[
"Class or Neural Network with PyTorch Default Initialization\n",
"_____no_output_____"
]
],
[
[
"# Define the class for neural network model with PyTorch Default Initialization\n\nclass Net(nn.Module):\n \n # Constructor\n def __init__(self, Layers):\n super(Net, self).__init__()\n self.hidden = nn.ModuleList()\n\n for input_size, output_size in zip(Layers, Layers[1:]):\n linear = nn.Linear(input_size, output_size)\n self.hidden.append(linear)\n \n def forward(self, x):\n L=len(self.hidden)\n for (l, linear_transform) in zip(range(L), self.hidden):\n if l < L - 1:\n x = F.relu(linear_transform(x))\n else:\n x = linear_transform(x)\n \n return x",
"_____no_output_____"
]
],
[
[
"Define a function to train the model, in this case the function returns a Python dictionary to store the training loss and accuracy on the validation data \n",
"_____no_output_____"
]
],
[
[
"# Define function to train model\n\ndef train(model, criterion, train_loader, validation_loader, optimizer, epochs = 100):\n i = 0\n loss_accuracy = {'training_loss': [], 'validation_accuracy': []} \n \n #n_epochs\n for epoch in range(epochs):\n for i, (x, y) in enumerate(train_loader):\n optimizer.zero_grad()\n z = model(x.view(-1, 28 * 28))\n loss = criterion(z, y)\n loss.backward()\n optimizer.step()\n loss_accuracy['training_loss'].append(loss.data.item())\n \n correct = 0\n for x, y in validation_loader:\n yhat = model(x.view(-1, 28 * 28))\n _, label = torch.max(yhat, 1)\n correct += (label == y).sum().item()\n accuracy = 100 * (correct / len(validation_dataset))\n loss_accuracy['validation_accuracy'].append(accuracy)\n \n return loss_accuracy",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<h2 id=\"Makeup_Data\">Make some Data</h2> \n",
"_____no_output_____"
],
[
"Load the training dataset by setting the parameters <code>train </code> to <code>True</code> and convert it to a tensor by placing a transform object int the argument <code>transform</code>\n",
"_____no_output_____"
]
],
[
[
"# Create the training dataset\n\ntrain_dataset = dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())",
"_____no_output_____"
]
],
[
[
"Load the testing dataset by setting the parameters train <code>False</code> and convert it to a tensor by placing a transform object int the argument <code>transform</code>\n",
"_____no_output_____"
]
],
[
[
"# Create the validation dataset\n\nvalidation_dataset = dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())",
"_____no_output_____"
]
],
[
[
"Create the training-data loader and the validation-data loader object \n",
"_____no_output_____"
]
],
[
[
"# Create the data loader for training and validation\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=2000, shuffle=True)\nvalidation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000, shuffle=False)",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<h2 id=\"Cost\">Define Neural Network, Criterion function, Optimizer and Train the Model</h2> \n",
"_____no_output_____"
],
[
"Create the criterion function \n",
"_____no_output_____"
]
],
[
[
"# Create the criterion function\n\ncriterion = nn.CrossEntropyLoss()",
"_____no_output_____"
]
],
[
[
"Create a list that contains layer size \n",
"_____no_output_____"
]
],
[
[
"# Create the parameters\n\ninput_dim = 28 * 28\noutput_dim = 10\nlayers = [input_dim, 100, 200, 100, output_dim]",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<h2 id=\"Train\">Test PyTorch Default Initialization, Xavier Initialization and Uniform Initialization</h2> \n",
"_____no_output_____"
],
[
"Train the network using PyTorch Default Initialization\n",
"_____no_output_____"
]
],
[
[
"# Train the model with the default initialization\n\nmodel = Net(layers)\nlearning_rate = 0.01\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\ntraining_results = train(model, criterion, train_loader,validation_loader, optimizer, epochs=30)",
"_____no_output_____"
]
],
[
[
"Train the network using He Initialization function\n",
"_____no_output_____"
]
],
[
[
"# Train the model with the He initialization\n\nmodel_He = Net_He(layers)\noptimizer = torch.optim.SGD(model_He.parameters(), lr=learning_rate)\ntraining_results_He = train(model_He, criterion, train_loader, validation_loader, optimizer, epochs=30)",
"_____no_output_____"
]
],
[
[
"Train the network using Uniform Initialization function\n",
"_____no_output_____"
]
],
[
[
"# Train the model with the Uniform initialization\n\nmodel_Uniform = Net_Uniform(layers)\noptimizer = torch.optim.SGD(model_Uniform.parameters(), lr=learning_rate)\ntraining_results_Uniform = train(model_Uniform, criterion, train_loader, validation_loader, optimizer, epochs=30)",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<h2 id=\"Result\">Analyze Results</h2> \n",
"_____no_output_____"
],
[
"Compare the training loss for each activation \n",
"_____no_output_____"
]
],
[
[
"# Plot the loss\n\nplt.plot(training_results_He['training_loss'], label='He')\nplt.plot(training_results['training_loss'], label='Default')\nplt.plot(training_results_Uniform['training_loss'], label='Uniform')\nplt.ylabel('loss')\nplt.xlabel('iteration ') \nplt.title('training loss iterations')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"Compare the validation loss for each model \n",
"_____no_output_____"
]
],
[
[
"# Plot the accuracy\n\nplt.plot(training_results_He['validation_accuracy'], label='He')\nplt.plot(training_results['validation_accuracy'], label='Default')\nplt.plot(training_results_Uniform['validation_accuracy'], label='Uniform') \nplt.ylabel('validation accuracy')\nplt.xlabel('epochs ') \nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->\n",
"_____no_output_____"
],
[
"<a href=\"http://cocl.us/pytorch_link_bottom\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png\" width=\"750\" alt=\"PyTorch Bottom\" />\n</a>\n",
"_____no_output_____"
],
[
"<h2>About the Authors:</h2> \n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. \n",
"_____no_output_____"
],
[
"Other contributors: <a href=\"https://www.linkedin.com/in/michelleccarey/\">Michelle Carey</a>, <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>\n",
"_____no_output_____"
],
[
"## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ---------- | ----------------------------------------------------------- |\n| 2020-09-23 | 2.0 | Srishti | Migrated Lab to Markdown and added to course repo in GitLab |\n\n<hr>\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n",
"_____no_output_____"
],
[
"<hr>\n",
"_____no_output_____"
],
[
"Copyright © 2018 <a href=\"cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu\">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
ec6894c8b652b0f92ee5c2b831b8b6392c6ad350 | 44,692 | ipynb | Jupyter Notebook | chapter06_optimization/rmsprop-gluon.ipynb | aswinjeff/git-clone-https-github.com-aswinjeff-deeplearning1 | 7daeec279b05ddc8e9db35af7396f1c4f6874a72 | [
"Apache-2.0"
]
| 2,796 | 2017-07-12T06:23:19.000Z | 2022-02-19T16:38:09.000Z | chapter06_optimization/rmsprop-gluon.ipynb | aswinjeff/git-clone-https-github.com-aswinjeff-deeplearning1 | 7daeec279b05ddc8e9db35af7396f1c4f6874a72 | [
"Apache-2.0"
]
| 337 | 2017-07-12T17:07:41.000Z | 2020-10-15T20:19:17.000Z | chapter06_optimization/rmsprop-gluon.ipynb | aswinjeff/git-clone-https-github.com-aswinjeff-deeplearning1 | 7daeec279b05ddc8e9db35af7396f1c4f6874a72 | [
"Apache-2.0"
]
| 867 | 2017-07-13T03:59:31.000Z | 2022-03-18T15:01:55.000Z | 266.02381 | 39,918 | 0.90712 | [
[
[
"# RMSprop with `Gluon`\n\n",
"_____no_output_____"
]
],
[
[
"import mxnet as mx\nfrom mxnet import autograd\nfrom mxnet import gluon\nfrom mxnet import ndarray as nd\nimport numpy as np\nimport random\n\nmx.random.seed(1)\nrandom.seed(1)\n\n# Generate data.\nnum_inputs = 2\nnum_examples = 1000\ntrue_w = [2, -3.4]\ntrue_b = 4.2\nX = nd.random_normal(scale=1, shape=(num_examples, num_inputs))\ny = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b\ny += .01 * nd.random_normal(scale=1, shape=y.shape)\ndataset = gluon.data.ArrayDataset(X, y)\n\nnet = gluon.nn.Sequential()\nnet.add(gluon.nn.Dense(1))\nsquare_loss = gluon.loss.L2Loss()",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi']= 120\nimport matplotlib.pyplot as plt\n\ndef train(batch_size, lr, gamma, epochs, period):\n assert period >= batch_size and period % batch_size == 0\n net.collect_params().initialize(mx.init.Normal(sigma=1), force_reinit=True)\n # RMSProp.\n trainer = gluon.Trainer(net.collect_params(), 'rmsprop',\n {'learning_rate': lr, 'gamma1': gamma})\n data_iter = gluon.data.DataLoader(dataset, batch_size, shuffle=True)\n total_loss = [np.mean(square_loss(net(X), y).asnumpy())]\n \n for epoch in range(1, epochs + 1):\n for batch_i, (data, label) in enumerate(data_iter):\n with autograd.record():\n output = net(data)\n loss = square_loss(output, label)\n loss.backward()\n trainer.step(batch_size)\n\n if batch_i * batch_size % period == 0:\n total_loss.append(np.mean(square_loss(net(X), y).asnumpy()))\n print(\"Batch size %d, Learning rate %f, Epoch %d, loss %.4e\" % \n (batch_size, trainer.learning_rate, epoch, total_loss[-1]))\n\n print('w:', np.reshape(net[0].weight.data().asnumpy(), (1, -1)), \n 'b:', net[0].bias.data().asnumpy()[0], '\\n')\n x_axis = np.linspace(0, epochs, len(total_loss), endpoint=True)\n plt.semilogy(x_axis, total_loss)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.show()",
"_____no_output_____"
],
[
"train(batch_size=10, lr=0.03, gamma=0.9, epochs=3, period=10)",
"Batch size 10, Learning rate 0.030000, Epoch 1, loss 7.5963e-01\nBatch size 10, Learning rate 0.030000, Epoch 2, loss 1.4048e-04\nBatch size 10, Learning rate 0.030000, Epoch 3, loss 1.1444e-04\nw: [[ 2.00390077 -3.39570308]] b: 4.20971 \n\n"
]
],
[
[
"## Next\n[AdaDalta from scratch](../chapter06_optimization/adadelta-scratch.ipynb)",
"_____no_output_____"
],
[
"For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
ec68ac12dea172ab21e9d97054d2b6bf15d559ec | 724 | ipynb | Jupyter Notebook | labs/Untitled.ipynb | MaybeS/DramaDatabase | 56f530535c57835ff4630dd5602c6d5488ff63f7 | [
"MIT"
]
| 1 | 2021-11-20T08:48:10.000Z | 2021-11-20T08:48:10.000Z | labs/Untitled.ipynb | MaybeS/DramaDatabase | 56f530535c57835ff4630dd5602c6d5488ff63f7 | [
"MIT"
]
| null | null | null | labs/Untitled.ipynb | MaybeS/DramaDatabase | 56f530535c57835ff4630dd5602c6d5488ff63f7 | [
"MIT"
]
| 3 | 2021-05-05T18:01:55.000Z | 2021-11-07T09:24:57.000Z | 16.837209 | 34 | 0.516575 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec68acda7bae9010835ad40fba5ec0c3e88b8e13 | 35,860 | ipynb | Jupyter Notebook | code/Results1D/dependence on batch size/eigenvalues distribution/eigenvalues by DGM(grid size = 100, 200, 400, 800, 1600).ipynb | wukekever/landscapes | addc5dd1e085b7ed3e6a4a76648aa31f7f6dcc03 | [
"MIT"
]
| 9 | 2020-11-13T10:53:05.000Z | 2021-12-17T23:07:56.000Z | code/Results1D/dependence on batch size/eigenvalues distribution/eigenvalues by DGM(grid size = 100, 200, 400, 800, 1600).ipynb | wukekever/landscapes | addc5dd1e085b7ed3e6a4a76648aa31f7f6dcc03 | [
"MIT"
]
| null | null | null | code/Results1D/dependence on batch size/eigenvalues distribution/eigenvalues by DGM(grid size = 100, 200, 400, 800, 1600).ipynb | wukekever/landscapes | addc5dd1e085b7ed3e6a4a76648aa31f7f6dcc03 | [
"MIT"
]
| null | null | null | 47.559682 | 159 | 0.632013 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"eigen_vals_of_DGM_100 = np.load(\"eigen_vals_of_DGM_100.npy\")\neigen_vals_of_DGM_200 = np.load(\"eigen_vals_of_DGM_200.npy\")\neigen_vals_of_DGM_400 = np.load(\"eigen_vals_of_DGM_400.npy\")\neigen_vals_of_DGM_800 = np.load(\"eigen_vals_of_DGM_800.npy\")\neigen_vals_of_DGM_1600 = np.load(\"eigen_vals_of_DGM_1600.npy\")",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np",
"_____no_output_____"
],
[
"help(plt.hist)",
"_____no_output_____"
],
[
"np.sort(data)",
"_____no_output_____"
],
[
"Finish_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[:,200]\ndata = Finish_eigen_vals_of_DGM_100\nprint(len(data))\nprint(data)\n# plt.hist(data, alpha = 0.5, color = '#ffffff')\nplt.hist(data, alpha = 0.5, color = 'b', bins = [0, 0.0001, 0.01, 0.1, 1])\nax = sns.rugplot(data,\n color = 'r',\n height = 0.2)\nplt.title('Distribution of eigenvalues (batch size = 100)', size = 12)\nax.set_xlabel('Eigenvalue', size = 12)\nax.set_ylabel('Counter', size = 12)\nplt.savefig(\"Eigenvalues_Distribution_of_DGM_Batchsize_100.eps\", dpi = 120)",
"_____no_output_____"
],
[
"Finish_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[:,200]\ndata = Finish_eigen_vals_of_DGM_100\nprint(len(data))\n# plt.hist(data, alpha = 0.5, color = '#ffffff')\nplt.hist(data, alpha = 0.5, color = 'b')\nax = sns.rugplot(data,\n color = 'r',\n height = 0.2)\nplt.title('Distribution of eigenvalues (batch size = 100)', size = 12)\nax.set_xlabel('Eigenvalue', size = 12)\nax.set_ylabel('Counter', size = 12)\nplt.savefig(\"Eigenvalues_Distribution_of_DGM_Batchsize_100.eps\", dpi = 120)",
"_____no_output_____"
],
[
"Finish_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[:,200]\ndata = Finish_eigen_vals_of_DGM_200\nprint(len(data))\n# plt.hist(data, alpha = 0.5, color = '#ffffff')\nplt.hist(data, alpha = 0.5, color = 'b')\nax = sns.rugplot(data,\n color = 'r',\n height = 0.2)\nplt.title('Distribution of eigenvalues (batch size = 200)', size = 12)\nax.set_xlabel('Eigenvalue', size = 12)\nax.set_ylabel('Counter', size = 12)\nplt.savefig(\"Eigenvalues_Distribution_of_DGM_Batchsize_200.eps\", dpi = 120)",
"_____no_output_____"
],
[
"Finish_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[:,200]\ndata = Finish_eigen_vals_of_DGM_400\nprint(len(data))\nprint(data)\n# plt.hist(data, alpha = 0.5, color = '#ffffff')\nplt.hist(data, alpha = 0.5, color = 'b')\nax = sns.rugplot(data,\n color = 'r',\n height = 0.2)\nplt.title('Distribution of eigenvalues (batch size = 400)', size = 12)\nax.set_xlabel('Eigenvalue', size = 12)\nax.set_ylabel('Counter', size = 12)\nplt.savefig(\"Eigenvalues_Distribution_of_DGM_Batchsize_400.eps\", dpi = 120)",
"_____no_output_____"
],
[
"Finish_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[:,200]\ndata = Finish_eigen_vals_of_DGM_1600\nprint(len(data))\nprint(data)\n# plt.hist(data, alpha = 0.5, color = '#ffffff')\nplt.hist(data, alpha = 0.5, color = 'b')\nax = sns.rugplot(data,\n color = 'r',\n height = 0.2)\nplt.title('Distribution of eigenvalues (batch size = 1600)', size = 12)\nax.set_xlabel('Eigenvalue', size = 12)\nax.set_ylabel('Counter', size = 12)\nplt.savefig(\"Eigenvalues_Distribution_of_DGM_Batchsize_1600.eps\", dpi = 120)",
"_____no_output_____"
],
[
"top_1_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[0,0:200]\ntop_2_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[1,0:200]\ntop_3_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[2,0:200]\ntop_4_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[3,0:200]\ntop_5_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[4,0:200]",
"_____no_output_____"
],
[
"# top 4\nprint(top_1_eigen_vals_of_DGM_100[-1])\nprint(top_2_eigen_vals_of_DGM_100[-1])\nprint(top_3_eigen_vals_of_DGM_100[-1])\nprint(top_4_eigen_vals_of_DGM_100[-1])\nprint(top_5_eigen_vals_of_DGM_100[-1])\nprint(np.log10(top_1_eigen_vals_of_DGM_100[-1]*top_2_eigen_vals_of_DGM_100[-1]*top_3_eigen_vals_of_DGM_100[-1]*top_4_eigen_vals_of_DGM_100[-1]))",
"_____no_output_____"
],
[
"bottom_1_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[-1,0:200]\nbottom_2_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[-2,0:200]\nbottom_3_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[-3,0:200]\nbottom_4_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[-4,0:200]\nbottom_5_eigen_vals_of_DGM_100 = eigen_vals_of_DGM_100[-5,0:200]",
"_____no_output_____"
],
[
"Epoch = [i for i in range(len(top_1_eigen_vals_of_DGM_100))]\nTop_1_eigen_vals_of_DGM_100 = list(top_1_eigen_vals_of_DGM_100)\nTop_2_eigen_vals_of_DGM_100 = list(top_2_eigen_vals_of_DGM_100)\nTop_3_eigen_vals_of_DGM_100 = list(top_3_eigen_vals_of_DGM_100)\nTop_4_eigen_vals_of_DGM_100 = list(top_4_eigen_vals_of_DGM_100)\nTop_5_eigen_vals_of_DGM_100 = list(top_5_eigen_vals_of_DGM_100)",
"_____no_output_____"
],
[
"Bottom_1_eigen_vals_of_DGM_100 = list(bottom_1_eigen_vals_of_DGM_100)\nBottom_2_eigen_vals_of_DGM_100 = list(bottom_2_eigen_vals_of_DGM_100)\nBottom_3_eigen_vals_of_DGM_100 = list(bottom_3_eigen_vals_of_DGM_100)\nBottom_4_eigen_vals_of_DGM_100 = list(bottom_4_eigen_vals_of_DGM_100)\nBottom_5_eigen_vals_of_DGM_100 = list(bottom_5_eigen_vals_of_DGM_100)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,10))\nplt.plot(Epoch, Top_1_eigen_vals_of_DGM_100, color = \"r\", linestyle= \"-\", linewidth = 1.5, label = \"Top 1 eigenvalue\")\nplt.plot(Epoch, Top_2_eigen_vals_of_DGM_100, color = \"k\", linestyle= \"-\", linewidth = 1.5, label = \"Top 2 eigenvalue\")\nplt.plot(Epoch, Top_3_eigen_vals_of_DGM_100, color = \"b\", linestyle= \"-\", linewidth = 1.5, label = \"Top 3 eigenvalue\")\nplt.plot(Epoch, Top_4_eigen_vals_of_DGM_100, color = \"g\", linestyle= \"-\", linewidth = 1.5, label = \"Top 4 eigenvalue\")\nplt.plot(Epoch, Top_5_eigen_vals_of_DGM_100, color = \"y\", linestyle= \"-\", linewidth = 1.5, label = \"Top 5 eigenvalue\")\nplt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_100, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 1 eigenvalue\")\nplt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_100, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 2 eigenvalue\")\nplt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_100, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 3 eigenvalue\")\nplt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_100, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 4 eigenvalue\")\nplt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_100, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 5 eigenvalue\")\nplt.xlabel(\"epoch ( * 50)\")\nplt.ylabel(\"Top and bottom five eigenvalues of DGM (batch size = 100)\")\nplt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\nplt.savefig(\"Eigenvalues_of_DGM_Batchsize_100.eps\", dpi = 120)\nplt.show() ",
"_____no_output_____"
],
[
"top_1_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[0,0:200]\ntop_2_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[1,0:200]\ntop_3_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[2,0:200]\ntop_4_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[3,0:200]\ntop_5_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[4,0:200]",
"_____no_output_____"
],
[
"print(top_1_eigen_vals_of_DGM_200[-1])\nprint(top_2_eigen_vals_of_DGM_200[-1])\nprint(top_3_eigen_vals_of_DGM_200[-1])\nprint(top_4_eigen_vals_of_DGM_200[-1])\nprint(top_5_eigen_vals_of_DGM_200[-1])\nprint(np.log10(top_1_eigen_vals_of_DGM_200[-1]*top_2_eigen_vals_of_DGM_200[-1]*top_3_eigen_vals_of_DGM_200[-1]*top_4_eigen_vals_of_DGM_200[-1]))",
"_____no_output_____"
],
[
"bottom_1_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[-1,0:200]\nbottom_2_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[-2,0:200]\nbottom_3_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[-3,0:200]\nbottom_4_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[-4,0:200]\nbottom_5_eigen_vals_of_DGM_200 = eigen_vals_of_DGM_200[-5,0:200]",
"_____no_output_____"
],
[
"Epoch = [i for i in range(len(top_1_eigen_vals_of_DGM_200))]\nTop_1_eigen_vals_of_DGM_200 = list(top_1_eigen_vals_of_DGM_200)\nTop_2_eigen_vals_of_DGM_200 = list(top_2_eigen_vals_of_DGM_200)\nTop_3_eigen_vals_of_DGM_200 = list(top_3_eigen_vals_of_DGM_200)\nTop_4_eigen_vals_of_DGM_200 = list(top_4_eigen_vals_of_DGM_200)\nTop_5_eigen_vals_of_DGM_200 = list(top_5_eigen_vals_of_DGM_200)",
"_____no_output_____"
],
[
"Bottom_1_eigen_vals_of_DGM_200 = list(bottom_1_eigen_vals_of_DGM_200)\nBottom_2_eigen_vals_of_DGM_200 = list(bottom_2_eigen_vals_of_DGM_200)\nBottom_3_eigen_vals_of_DGM_200 = list(bottom_3_eigen_vals_of_DGM_200)\nBottom_4_eigen_vals_of_DGM_200 = list(bottom_4_eigen_vals_of_DGM_200)\nBottom_5_eigen_vals_of_DGM_200 = list(bottom_5_eigen_vals_of_DGM_200)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,10))\nplt.plot(Epoch, Top_1_eigen_vals_of_DGM_200, color = \"r\", linestyle= \"-\", linewidth = 1.5, label = \"Top 1 eigenvalue\")\nplt.plot(Epoch, Top_2_eigen_vals_of_DGM_200, color = \"k\", linestyle= \"-\", linewidth = 1.5, label = \"Top 2 eigenvalue\")\nplt.plot(Epoch, Top_3_eigen_vals_of_DGM_200, color = \"b\", linestyle= \"-\", linewidth = 1.5, label = \"Top 3 eigenvalue\")\nplt.plot(Epoch, Top_4_eigen_vals_of_DGM_200, color = \"g\", linestyle= \"-\", linewidth = 1.5, label = \"Top 4 eigenvalue\")\nplt.plot(Epoch, Top_5_eigen_vals_of_DGM_200, color = \"y\", linestyle= \"-\", linewidth = 1.5, label = \"Top 5 eigenvalue\")\nplt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_200, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 1 eigenvalue\")\nplt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_200, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 2 eigenvalue\")\nplt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_200, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 3 eigenvalue\")\nplt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_200, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 4 eigenvalue\")\nplt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_200, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 5 eigenvalue\")\nplt.xlabel(\"epoch ( * 50)\")\nplt.ylabel(\"Top and bottom five eigenvalues of DGM (batch size = 200)\")\nplt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.85))\nplt.savefig(\"Eigenvalues_of_DGM_Batchsize_200.eps\", dpi = 120)\nplt.show()",
"_____no_output_____"
],
[
"# plt.figure(figsize=(12,10))\n# plt.plot(Epoch, Top_1_eigen_vals_of_DGM_200, color = \"r\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_1_eigen_vals\")\n# plt.plot(Epoch, Top_2_eigen_vals_of_DGM_200, color = \"k\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_2_eigen_vals\")\n# plt.plot(Epoch, Top_3_eigen_vals_of_DGM_200, color = \"b\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_3_eigen_vals\")\n# plt.plot(Epoch, Top_4_eigen_vals_of_DGM_200, color = \"g\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_4_eigen_vals\")\n# plt.plot(Epoch, Top_5_eigen_vals_of_DGM_200, color = \"y\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_5_eigen_vals\")\n# plt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_200, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_1_eigen_vals\")\n# plt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_200, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_2_eigen_vals\")\n# plt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_200, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_3_eigen_vals\")\n# plt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_200, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_4_eigen_vals\")\n# plt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_200, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_5_eigen_vals\")\n# plt.xlabel(\"epoch ( * 50)\")\n# plt.ylabel(\"top and bottom five eigen values of DGM (grid size = 200)\")\n# plt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\n# # plt.savefig(\"eigen_vals_of_DGM_200.eps\", dpi = 120)\n# plt.show() ",
"_____no_output_____"
],
[
"top_1_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[0,0:200]\ntop_2_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[1,0:200]\ntop_3_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[2,0:200]\ntop_4_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[3,0:200]\ntop_5_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[4,0:200]",
"_____no_output_____"
],
[
"print(top_1_eigen_vals_of_DGM_400[-1])\nprint(top_2_eigen_vals_of_DGM_400[-1])\nprint(top_3_eigen_vals_of_DGM_400[-1])\nprint(top_4_eigen_vals_of_DGM_400[-1])\nprint(top_5_eigen_vals_of_DGM_400[-1])\nprint(np.log10(top_1_eigen_vals_of_DGM_400[-1]*top_2_eigen_vals_of_DGM_400[-1]*top_3_eigen_vals_of_DGM_400[-1]*top_4_eigen_vals_of_DGM_400[-1]))",
"_____no_output_____"
],
[
"bottom_1_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[-1,0:200]\nbottom_2_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[-2,0:200]\nbottom_3_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[-3,0:200]\nbottom_4_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[-4,0:200]\nbottom_5_eigen_vals_of_DGM_400 = eigen_vals_of_DGM_400[-5,0:200]",
"_____no_output_____"
],
[
"Epoch = [i for i in range(len(top_1_eigen_vals_of_DGM_400))]\nTop_1_eigen_vals_of_DGM_400 = list(top_1_eigen_vals_of_DGM_400)\nTop_2_eigen_vals_of_DGM_400 = list(top_2_eigen_vals_of_DGM_400)\nTop_3_eigen_vals_of_DGM_400 = list(top_3_eigen_vals_of_DGM_400)\nTop_4_eigen_vals_of_DGM_400 = list(top_4_eigen_vals_of_DGM_400)\nTop_5_eigen_vals_of_DGM_400 = list(top_5_eigen_vals_of_DGM_400)",
"_____no_output_____"
],
[
"Bottom_1_eigen_vals_of_DGM_400 = list(bottom_1_eigen_vals_of_DGM_400)\nBottom_2_eigen_vals_of_DGM_400 = list(bottom_2_eigen_vals_of_DGM_400)\nBottom_3_eigen_vals_of_DGM_400 = list(bottom_3_eigen_vals_of_DGM_400)\nBottom_4_eigen_vals_of_DGM_400 = list(bottom_4_eigen_vals_of_DGM_400)\nBottom_5_eigen_vals_of_DGM_400 = list(bottom_5_eigen_vals_of_DGM_400)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,10))\nplt.plot(Epoch, Top_1_eigen_vals_of_DGM_400, color = \"r\", linestyle= \"-\", linewidth = 1.5, label = \"Top 1 eigenvalue\")\nplt.plot(Epoch, Top_2_eigen_vals_of_DGM_400, color = \"k\", linestyle= \"-\", linewidth = 1.5, label = \"Top 2 eigenvalue\")\nplt.plot(Epoch, Top_3_eigen_vals_of_DGM_400, color = \"b\", linestyle= \"-\", linewidth = 1.5, label = \"Top 3 eigenvalue\")\nplt.plot(Epoch, Top_4_eigen_vals_of_DGM_400, color = \"g\", linestyle= \"-\", linewidth = 1.5, label = \"Top 4 eigenvalue\")\nplt.plot(Epoch, Top_5_eigen_vals_of_DGM_400, color = \"y\", linestyle= \"-\", linewidth = 1.5, label = \"Top 5 eigenvalue\")\nplt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_400, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 1 eigenvalue\")\nplt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_400, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 2 eigenvalue\")\nplt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_400, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 3 eigenvalue\")\nplt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_400, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 4 eigenvalue\")\nplt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_400, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 5 eigenvalue\")\nplt.xlabel(\"epoch ( * 50)\")\nplt.ylabel(\"Top and bottom five eigenvalues of DGM (batch size = 400)\")\nplt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\nplt.savefig(\"Eigenvalues_of_DGM_Batchsize_400.eps\", dpi = 120)\nplt.show()",
"_____no_output_____"
],
[
"# plt.figure(figsize=(12,10))\n# plt.plot(Epoch, Top_1_eigen_vals_of_DGM_400, color = \"r\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_1_eigen_vals\")\n# plt.plot(Epoch, Top_2_eigen_vals_of_DGM_400, color = \"k\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_2_eigen_vals\")\n# plt.plot(Epoch, Top_3_eigen_vals_of_DGM_400, color = \"b\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_3_eigen_vals\")\n# plt.plot(Epoch, Top_4_eigen_vals_of_DGM_400, color = \"g\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_4_eigen_vals\")\n# plt.plot(Epoch, Top_5_eigen_vals_of_DGM_400, color = \"y\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_5_eigen_vals\")\n# plt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_400, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_1_eigen_vals\")\n# plt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_400, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_2_eigen_vals\")\n# plt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_400, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_3_eigen_vals\")\n# plt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_400, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_4_eigen_vals\")\n# plt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_400, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_5_eigen_vals\")\n# plt.xlabel(\"epoch ( * 50)\")\n# plt.ylabel(\"top and bottom five eigen values of DGM (grid size = 400)\")\n# plt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\n# # plt.savefig(\"eigen_vals_of_DGM_400.eps\", dpi = 120)\n# plt.show() ",
"_____no_output_____"
],
[
"top_1_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[0,0:200]\ntop_2_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[1,0:200]\ntop_3_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[2,0:200]\ntop_4_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[3,0:200]\ntop_5_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[4,0:200]",
"_____no_output_____"
],
[
"print(top_1_eigen_vals_of_DGM_800[-1])\nprint(top_2_eigen_vals_of_DGM_800[-1])\nprint(top_3_eigen_vals_of_DGM_800[-1])\nprint(top_4_eigen_vals_of_DGM_800[-1])\nprint(top_5_eigen_vals_of_DGM_800[-1])\nprint(np.log10(top_1_eigen_vals_of_DGM_800[-1]*top_2_eigen_vals_of_DGM_800[-1]*top_3_eigen_vals_of_DGM_800[-1]*top_4_eigen_vals_of_DGM_800[-1]))",
"_____no_output_____"
],
[
"bottom_1_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[-1,0:200]\nbottom_2_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[-2,0:200]\nbottom_3_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[-3,0:200]\nbottom_4_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[-4,0:200]\nbottom_5_eigen_vals_of_DGM_800 = eigen_vals_of_DGM_800[-5,0:200]",
"_____no_output_____"
],
[
"Epoch = [i for i in range(len(top_1_eigen_vals_of_DGM_800))]\nTop_1_eigen_vals_of_DGM_800 = list(top_1_eigen_vals_of_DGM_800)\nTop_2_eigen_vals_of_DGM_800 = list(top_2_eigen_vals_of_DGM_800)\nTop_3_eigen_vals_of_DGM_800 = list(top_3_eigen_vals_of_DGM_800)\nTop_4_eigen_vals_of_DGM_800 = list(top_4_eigen_vals_of_DGM_800)\nTop_5_eigen_vals_of_DGM_800 = list(top_5_eigen_vals_of_DGM_800)",
"_____no_output_____"
],
[
"Bottom_1_eigen_vals_of_DGM_800 = list(bottom_1_eigen_vals_of_DGM_800)\nBottom_2_eigen_vals_of_DGM_800 = list(bottom_2_eigen_vals_of_DGM_800)\nBottom_3_eigen_vals_of_DGM_800 = list(bottom_3_eigen_vals_of_DGM_800)\nBottom_4_eigen_vals_of_DGM_800 = list(bottom_4_eigen_vals_of_DGM_800)\nBottom_5_eigen_vals_of_DGM_800 = list(bottom_5_eigen_vals_of_DGM_800)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,10))\nplt.plot(Epoch, Top_1_eigen_vals_of_DGM_800, color = \"r\", linestyle= \"-\", linewidth = 1.5, label = \"Top 1 eigenvalue\")\nplt.plot(Epoch, Top_2_eigen_vals_of_DGM_800, color = \"k\", linestyle= \"-\", linewidth = 1.5, label = \"Top 2 eigenvalue\")\nplt.plot(Epoch, Top_3_eigen_vals_of_DGM_800, color = \"b\", linestyle= \"-\", linewidth = 1.5, label = \"Top 3 eigenvalue\")\nplt.plot(Epoch, Top_4_eigen_vals_of_DGM_800, color = \"g\", linestyle= \"-\", linewidth = 1.5, label = \"Top 4 eigenvalue\")\nplt.plot(Epoch, Top_5_eigen_vals_of_DGM_800, color = \"y\", linestyle= \"-\", linewidth = 1.5, label = \"Top 5 eigenvalue\")\nplt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_800, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 1 eigenvalue\")\nplt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_800, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 2 eigenvalue\")\nplt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_800, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 3 eigenvalue\")\nplt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_800, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 4 eigenvalue\")\nplt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_800, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 5 eigenvalue\")\nplt.xlabel(\"epoch ( * 50)\")\nplt.ylabel(\"Top and bottom five eigenvalues of DGM (batch size = 800)\")\nplt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.9))\nplt.savefig(\"Eigenvalues_of_DGM_Batchsize_800.eps\", dpi = 120)\nplt.show()",
"_____no_output_____"
],
[
"# plt.figure(figsize=(12,10))\n# plt.plot(Epoch, Top_1_eigen_vals_of_DGM_800, color = \"r\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_1_eigen_vals\")\n# plt.plot(Epoch, Top_2_eigen_vals_of_DGM_800, color = \"k\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_2_eigen_vals\")\n# plt.plot(Epoch, Top_3_eigen_vals_of_DGM_800, color = \"b\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_3_eigen_vals\")\n# plt.plot(Epoch, Top_4_eigen_vals_of_DGM_800, color = \"g\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_4_eigen_vals\")\n# plt.plot(Epoch, Top_5_eigen_vals_of_DGM_800, color = \"y\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_5_eigen_vals\")\n# plt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_800, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_1_eigen_vals\")\n# plt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_800, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_2_eigen_vals\")\n# plt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_800, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_3_eigen_vals\")\n# plt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_800, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_4_eigen_vals\")\n# plt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_800, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_5_eigen_vals\")\n# plt.xlabel(\"epoch ( * 50)\")\n# plt.ylabel(\"top and bottom five eigen values of DGM (grid size = 800)\")\n# plt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\n# # plt.savefig(\"eigen_vals_of_DGM_800.eps\", dpi = 120)\n# plt.show() ",
"_____no_output_____"
],
[
"top_1_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[0,0:200]\ntop_2_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[1,0:200]\ntop_3_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[2,0:200]\ntop_4_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[3,0:200]\ntop_5_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[4,0:200]",
"_____no_output_____"
],
[
"print(top_1_eigen_vals_of_DGM_1600[-1])\nprint(top_2_eigen_vals_of_DGM_1600[-1])\nprint(top_3_eigen_vals_of_DGM_1600[-1])\nprint(top_4_eigen_vals_of_DGM_1600[-1])\nprint(top_5_eigen_vals_of_DGM_1600[-1])\nprint(np.log10(top_1_eigen_vals_of_DGM_1600[-1]*top_2_eigen_vals_of_DGM_1600[-1]*top_3_eigen_vals_of_DGM_1600[-1]*top_4_eigen_vals_of_DGM_1600[-1]))",
"_____no_output_____"
],
[
"bottom_1_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[-1,0:200]\nbottom_2_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[-2,0:200]\nbottom_3_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[-3,0:200]\nbottom_4_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[-4,0:200]\nbottom_5_eigen_vals_of_DGM_1600 = eigen_vals_of_DGM_1600[-5,0:200]",
"_____no_output_____"
],
[
"Epoch = [i for i in range(len(top_1_eigen_vals_of_DGM_1600))]\nTop_1_eigen_vals_of_DGM_1600 = list(top_1_eigen_vals_of_DGM_1600)\nTop_2_eigen_vals_of_DGM_1600 = list(top_2_eigen_vals_of_DGM_1600)\nTop_3_eigen_vals_of_DGM_1600 = list(top_3_eigen_vals_of_DGM_1600)\nTop_4_eigen_vals_of_DGM_1600 = list(top_4_eigen_vals_of_DGM_1600)\nTop_5_eigen_vals_of_DGM_1600 = list(top_5_eigen_vals_of_DGM_1600)",
"_____no_output_____"
],
[
"Bottom_1_eigen_vals_of_DGM_1600 = list(bottom_1_eigen_vals_of_DGM_1600)\nBottom_2_eigen_vals_of_DGM_1600 = list(bottom_2_eigen_vals_of_DGM_1600)\nBottom_3_eigen_vals_of_DGM_1600 = list(bottom_3_eigen_vals_of_DGM_1600)\nBottom_4_eigen_vals_of_DGM_1600 = list(bottom_4_eigen_vals_of_DGM_1600)\nBottom_5_eigen_vals_of_DGM_1600 = list(bottom_5_eigen_vals_of_DGM_1600)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,10))\nplt.plot(Epoch, Top_1_eigen_vals_of_DGM_1600, color = \"r\", linestyle= \"-\", linewidth = 1.5, label = \"Top 1 eigenvalue\")\nplt.plot(Epoch, Top_2_eigen_vals_of_DGM_1600, color = \"k\", linestyle= \"-\", linewidth = 1.5, label = \"Top 2 eigenvalue\")\nplt.plot(Epoch, Top_3_eigen_vals_of_DGM_1600, color = \"b\", linestyle= \"-\", linewidth = 1.5, label = \"Top 3 eigenvalue\")\nplt.plot(Epoch, Top_4_eigen_vals_of_DGM_1600, color = \"g\", linestyle= \"-\", linewidth = 1.5, label = \"Top 4 eigenvalue\")\nplt.plot(Epoch, Top_5_eigen_vals_of_DGM_1600, color = \"y\", linestyle= \"-\", linewidth = 1.5, label = \"Top 5 eigenvalue\")\nplt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_1600, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 1 eigenvalue\")\nplt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_1600, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 2 eigenvalue\")\nplt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_1600, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 3 eigenvalue\")\nplt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_1600, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 4 eigenvalue\")\nplt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_1600, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom 5 eigenvalue\")\nplt.xlabel(\"epoch ( * 50)\")\nplt.ylabel(\"Top and bottom five eigenvalues of DGM (batch size = 1600)\")\nplt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.9))\nplt.savefig(\"Eigenvalues_of_DGM_Batchsize_1600.eps\", dpi = 120)\nplt.show()",
"_____no_output_____"
],
[
"# plt.figure(figsize=(12,10))\n# plt.plot(Epoch, Top_1_eigen_vals_of_DGM_1600, color = \"r\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_1_eigen_vals\")\n# plt.plot(Epoch, Top_2_eigen_vals_of_DGM_1600, color = \"k\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_2_eigen_vals\")\n# plt.plot(Epoch, Top_3_eigen_vals_of_DGM_1600, color = \"b\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_3_eigen_vals\")\n# plt.plot(Epoch, Top_4_eigen_vals_of_DGM_1600, color = \"g\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_4_eigen_vals\")\n# plt.plot(Epoch, Top_5_eigen_vals_of_DGM_1600, color = \"y\", linestyle= \"-.\", marker = \".\", linewidth = 0.5, label = \"Top_5_eigen_vals\")\n# plt.plot(Epoch, Bottom_1_eigen_vals_of_DGM_1600, color = \"r\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_1_eigen_vals\")\n# plt.plot(Epoch, Bottom_2_eigen_vals_of_DGM_1600, color = \"k\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_2_eigen_vals\")\n# plt.plot(Epoch, Bottom_3_eigen_vals_of_DGM_1600, color = \"b\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_3_eigen_vals\")\n# plt.plot(Epoch, Bottom_4_eigen_vals_of_DGM_1600, color = \"g\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_4_eigen_vals\")\n# plt.plot(Epoch, Bottom_5_eigen_vals_of_DGM_1600, color = \"y\", linestyle= \"-.\", marker = \"+\", linewidth = 0.5, label = \"Bottom_5_eigen_vals\")\n# plt.xlabel(\"epoch ( * 50)\")\n# plt.ylabel(\"top and bottom five eigen values of DGM (grid size = 1600)\")\n# plt.legend(loc = 'upper left', bbox_to_anchor = (0.7, 0.95))\n# # plt.savefig(\"eigen_vals_of_DGM_1600.eps\", dpi = 120)\n# plt.show() ",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec68b0e7f06c65a50def2d9e9df9ea2290b0ea0e | 75,620 | ipynb | Jupyter Notebook | tutorials/pipelines/unstack_lstm_timeseries_classifier.ipynb | sarahmish/GreenGuard | 2c3588f4b6afa70c81ecda21c8ea3ede1b53ac3f | [
"MIT"
]
| 14 | 2020-10-08T19:10:49.000Z | 2021-10-04T18:40:56.000Z | tutorials/pipelines/unstack_lstm_timeseries_classifier.ipynb | sarahmish/GreenGuard | 2c3588f4b6afa70c81ecda21c8ea3ede1b53ac3f | [
"MIT"
]
| 16 | 2020-06-04T15:16:45.000Z | 2021-09-15T17:26:56.000Z | tutorials/pipelines/unstack_lstm_timeseries_classifier.ipynb | sarahmish/GreenGuard | 2c3588f4b6afa70c81ecda21c8ea3ede1b53ac3f | [
"MIT"
]
| 6 | 2020-07-17T14:09:05.000Z | 2021-09-14T21:06:47.000Z | 32.096774 | 94 | 0.410077 | [
[
[
"# unstack_lstm_timeseries_classifier",
"_____no_output_____"
]
],
[
[
"from greenguard.demo import load_demo\n\ntarget_times, readings = load_demo()",
"_____no_output_____"
],
[
"pipeline_name = 'classes.unstack_lstm_timeseries_classifier'",
"_____no_output_____"
],
[
"from greenguard.pipeline import GreenGuardPipeline\n\npipeline = GreenGuardPipeline(pipeline_name)",
"_____no_output_____"
],
[
"pipeline.template['primitives']",
"_____no_output_____"
]
],
[
[
"# Step by Step execution",
"_____no_output_____"
],
[
"## Input Data",
"_____no_output_____"
]
],
[
[
"readings.head()",
"_____no_output_____"
],
[
"target_times.head()",
"_____no_output_____"
]
],
[
[
"## Data Preparation (part of GreenGuard Pipeline)\n\n* Input: target_times, readings, turbines\n* Output: X, y, readings, turbines\n* Effect: target_times has been split into X and y",
"_____no_output_____"
],
[
"## pandas.DataFrame.resample\n\n* Input: readings\n* Output: readings (resampled)\n* Effect: readings have been resampled to the indicated resample rule and turbine_id,\n signal_id and timestamp have been set as a multi-index",
"_____no_output_____"
]
],
[
[
"context = pipeline.fit(target_times, readings, output_=0)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame.unstack\n\n* Input: readings (resampled)\n* Output: readings (unstacked)\n* Effect: readings have been unstacked",
"_____no_output_____"
]
],
[
[
"step = 1\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame.pop\n\n* Input: readings (unstacked)\n* Output: readings (without turbine_id), turbine_id\n* Effect: turbine_id has been popped from readings",
"_____no_output_____"
]
],
[
[
"step = 2\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['turbine_id'].head()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame.pop\n\n* Input: readings (without turbine_id)\n* Output: readings (without timestamp), timestamp\n* Effect: timestamp has been popped from readings",
"_____no_output_____"
]
],
[
[
"step = 3\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['timestamp'].head()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## sklearn.impute.SimpleImputer\n\n* Input: readings (unstacked, no turbine_id, no timestamp)\n* Output: readings (imputed, numpy array)\n* Effect: readings have been imputed and converted to numpy array",
"_____no_output_____"
]
],
[
[
"step = 4\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'][0:5]",
"_____no_output_____"
]
],
[
[
"## sklearn.preprocessing.MinMaxScaler\n\n* Input: (imputed, array)\n* Output: readings (scaled, array)\n* Effect: readings have been scaled to [-1, 1] range",
"_____no_output_____"
]
],
[
[
"step = 5\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'][0:5]",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame\n\n* Input: readings (scaled, array)\n* Output: readings (dataframe)\n* Effect: readings have been converted into a dataframe",
"_____no_output_____"
]
],
[
[
"step = 6\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame.set\n\n* Input: readings (dataframe)\n* Output: readings (dataframe with turbine_id)\n* Effect: turbine_id has been set as a readings column",
"_____no_output_____"
]
],
[
[
"step = 7\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## pandas.DataFrame.set\n\n* Input: readings (dataframe with turbine_id)\n* Output: readings (dataframe with turbine_id and timestamp)\n* Effect: timestamp has been set as a readings column",
"_____no_output_____"
]
],
[
[
"step = 8\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].head()",
"_____no_output_____"
]
],
[
[
"## mlprimitives.custom.timeseries_preprocessing.cutoff_window_sequences\n\n* Input: X, readings (dataframe with turbine_id and timestamp)\n* Output: X\n* Effect: X has been converted to a 3d numpy array that contains 1 matrix of shape\n (window_size x num_signals) for each one of the target times.",
"_____no_output_____"
]
],
[
[
"pipeline._pipeline.get_hyperparameters()[\n 'mlprimitives.custom.timeseries_preprocessing.cutoff_window_sequences#1']",
"_____no_output_____"
],
[
"step = 9\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
],
[
"context.keys()",
"_____no_output_____"
],
[
"context['readings'].shape",
"_____no_output_____"
],
[
"context['y'].shape",
"_____no_output_____"
],
[
"context['X'].shape",
"_____no_output_____"
],
[
"context['X'][0][:3]",
"_____no_output_____"
]
],
[
[
"## keras.Sequential.LSTMTimeSeriesClassifier\n\n* Input: X, y\n* Output: \n* Effect: LSTM has been fitted.",
"_____no_output_____"
]
],
[
[
"step = 10\ncontext = pipeline.fit(**context, output_=step, start_=step)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec68b1a2183815659014322ec3acf3e3a6713aed | 132,684 | ipynb | Jupyter Notebook | pymaceuticals_starter.ipynb | kborig89/matplotlib-challenge | 707af3e8b8bc95868b4498290a4e1655639400f5 | [
"ADSL"
]
| null | null | null | pymaceuticals_starter.ipynb | kborig89/matplotlib-challenge | 707af3e8b8bc95868b4498290a4e1655639400f5 | [
"ADSL"
]
| null | null | null | pymaceuticals_starter.ipynb | kborig89/matplotlib-challenge | 707af3e8b8bc95868b4498290a4e1655639400f5 | [
"ADSL"
]
| null | null | null | 94.504274 | 17,216 | 0.781353 | [
[
[
"## Observations and Insights ",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as stats\nimport numpy as np\n\n\n# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)\n\n\n# Combine the data into a single dataset\nmouse_study_df=pd.merge(mouse_metadata,study_results, on=\"Mouse ID\")\n\n# Display the data table for preview\nmouse_study_df.head()\n",
"_____no_output_____"
],
[
"# Checking the number of mice.\nmice_unique_df=mouse_study_df.nunique()[\"Mouse ID\"]\nmice_unique_df\n",
"_____no_output_____"
],
[
"# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. \nduplicate_mouse=mouse_study_df.loc[mouse_study_df.duplicated(subset=[\"Mouse ID\",\"Timepoint\"])]\n\nduplicate_mouse\n\n",
"_____no_output_____"
],
[
"# Optional: Get all the data for the duplicate mouse ID. \nduplicate_mouse\n",
"_____no_output_____"
],
[
"# Create a clean DataFrame by dropping the duplicate mouse by its ID.\nclean_mouse=mouse_study_df.set_index(\"Mouse ID\")\nclean_mouse_df=clean_mouse.drop(index = \"g989\")\ncleaner_mouse_df=clean_mouse_df.reset_index()\ncleaner_mouse_df\n",
"_____no_output_____"
],
[
"# Checking the number of mice in the clean DataFrame.\ncleaner_mouse_unique_df=cleaner_mouse_df.nunique()[\"Mouse ID\"]\ncleaner_mouse_unique_df\n\n",
"_____no_output_____"
]
],
[
[
"## Summary Statistics",
"_____no_output_____"
]
],
[
[
"drug_mouse=mouse_study_df[\"Drug Regimen\"].value_counts()\ndrug_mouse",
"_____no_output_____"
],
[
"# Generate a summary statistics table of mean, median, variance, standard deviation,\n# and SEM of the tumor volume for each regimen\n# This method is the most straighforward, creating multiple series and putting them all together at the end.\n\ntumor_mean=mouse_study_df.groupby(\"Drug Regimen\").mean()[\"Tumor Volume (mm3)\"]\ntumor_median=mouse_study_df.groupby(\"Drug Regimen\").median()[\"Tumor Volume (mm3)\"]\ntumor_var=mouse_study_df.groupby(\"Drug Regimen\").var()[\"Tumor Volume (mm3)\"]\ntumor_sd=mouse_study_df.groupby(\"Drug Regimen\").std()[\"Tumor Volume (mm3)\"]\ntumor_sem=mouse_study_df.groupby(\"Drug Regimen\").sem()[\"Tumor Volume (mm3)\"]\n\nsummary_regimen_df=pd.DataFrame({\"Mean\":tumor_mean, \"Median\":tumor_median, \"Variance\":tumor_var,\n \"Standard Deviation\":tumor_sd, \"SEM\":tumor_sem})\nsummary_regimen_df\n\n",
"_____no_output_____"
],
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\n# This method produces everything in a single groupby function\nregimen_group=mouse_study_df.groupby([\"Drug Regimen\"])\nsummary2_regimen=regimen_group.agg({\"Tumor Volume (mm3)\":[\"mean\",\"median\",\"var\",\"std\",\"sem\"]})\nsummary2_regimen\n\n",
"_____no_output_____"
]
],
[
[
"## Bar and Pie Charts",
"_____no_output_____"
]
],
[
[
"# Generate a bar plot showing the total number of mice for each treatment \n# throughout the course of the study using pandas. \nfig_1=plt.figure()\ndrug=cleaner_mouse_df[\"Drug Regimen\"]\ndrug_mouse.plot.bar(drug_mouse, color=\"green\")\nplt.xlabel('Drug Regimen')\nplt.ylabel('Number of Mice')\nplt.show()\n",
"_____no_output_____"
],
[
"# Generate a bar plot showing the total number of mice for each treatment \n# throughout the course of the study using pyplot.\nfig_2=plt.figure()\ndrug=cleaner_mouse_df[\"Drug Regimen\"]\nplt.bar(drug_mouse.index.values,drug_mouse.values)\nplt.xticks(rotation=90)\nplt.xlabel('Drug Regimen')\nplt.ylabel('Number of Mice')\nplt.show()\n\n\n",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pandas\n\nsex_mouse=mouse_study_df[\"Sex\"].value_counts()\nsex_mouse\nsex_mouse.plot.pie(y=\"Sex\",autopct='%1.1f%%')\nsex_mouse",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pyplot\nsex_mouse=mouse_study_df[\"Sex\"].value_counts()\nsex_mouse\nplt.pie(sex_mouse.values,labels=sex_mouse.index.values,autopct='%1.1f%%')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Quartiles, Outliers and Boxplots",
"_____no_output_____"
]
],
[
[
"# Calculate the final tumor volume of each mouse across four of the treatment regimens: \n# Capomulin, Ramicane, Infubinol, and Ceftamin\ntreatment_list=[\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"]\n\n\n# Start by getting the last (greatest) timepoint for each mouse\nmax_tumor=cleaner_mouse_df.groupby([\"Mouse ID\"])[\"Timepoint\"].max()\nmax_tumor=max_tumor.reset_index()\n\n# Merge this group df with the original dataframe to get the tumor volume at the last timepoint\nmergedata=max_tumor.merge(cleaner_mouse_df, on=[\"Mouse ID\", \"Timepoint\"],how=\"left\")",
"_____no_output_____"
],
[
"# Put treatments into a list for for loop (and later for plot labels)\ntreatment_list=[\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"]\n\n# Create empty list to fill with tumor vol data (for plotting)\ntumor_vol_data=[]\n\n# Calculate the IQR and quantitatively determine if there are any potential outliers. \nfor drug in treatment_list:\n \n # Locate the rows which contain mice on each drug and get the tumor volumes \n tumor=mergedata.loc[mergedata[\"Drug Regimen\"]== drug,\"Tumor Volume (mm3)\"]\n \n # add subset\n tumor_vol_data.append(tumor)\n\n # Determine outliers using upper and lower bounds \n quartiles = tumor.quantile([.25,.5,.75])\n lowerq = quartiles[0.25]\n upperq = quartiles[0.75]\n iqr = upperq-lowerq\n \n lower_bound = lowerq - (1.5*iqr)\n upper_bound = upperq + (1.5*iqr)\n outliers=tumor.loc[(tumor<lower_bound)|(tumor>upper_bound)]\n print(f\"{drug}'s Potential outliers: {outliers}\")\n ",
"Capomulin's Potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\nRamicane's Potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\nInfubinol's Potential outliers: 31 36.321346\nName: Tumor Volume (mm3), dtype: float64\nCeftamin's Potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\n"
],
[
"# Generate a box plot of the final tumor volume of each mouse across four regimens of interest\n\nfig1,ax1=plt.subplots()\nax1.boxplot(tumor_vol_data,labels=treatment_list)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Line and Scatter Plots",
"_____no_output_____"
]
],
[
[
"# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin\n\ncapomulin=mouse_study_df.loc[mouse_study_df[\"Drug Regimen\"] == \"Capomulin\",:]\njusta_mouse=capomulin.loc[capomulin[\"Mouse ID\"] == \"m601\",:]\nx_axis= justa_mouse[\"Timepoint\"]\ny_axis = justa_mouse[\"Tumor Volume (mm3)\"]\n\nplt.title(\"Capomulin mouse m601\")\nplt.plot(x_axis, y_axis, color=\"purple\")\nplt.xlabel(\"Timepoint\")\nplt.ylabel(\"Tumor Volume (mm3)\")\n\nplt.show()\n",
"_____no_output_____"
],
[
"# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen\n\ncapomulin=mouse_study_df.loc[mouse_study_df[\"Drug Regimen\"] == \"Capomulin\",:]\ncapomulinavg=capomulin.groupby([\"Mouse ID\"]).mean()\n\n\nplt.scatter(capomulinavg[\"Weight (g)\"],capomulinavg[\"Tumor Volume (mm3)\"])\nplt.xlabel(\"Mouse Weight (g)\")\nplt.ylabel(\"Tumor Volume (mm3)\")\n\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Correlation and Regression",
"_____no_output_____"
]
],
[
[
"# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen\ncapomulin=mouse_study_df.loc[mouse_study_df[\"Drug Regimen\"] == \"Capomulin\",:]\ncapomulinavg=capomulin.groupby([\"Mouse ID\"]).mean()\n\n\nplt.scatter(capomulinavg[\"Weight (g)\"],capomulinavg[\"Tumor Volume (mm3)\"])\nplt.xlabel(\"Mouse Weight (g)\")\nplt.ylabel(\"Tumor Volume (mm3)\")\n\n\n(slope, intercept, rvalue, pvalue, stderr)= stats.linregress(capomulinavg[\"Weight (g)\"],capomulinavg[\"Tumor Volume (mm3)\"])\nregress_values= capomulinavg[\"Weight (g)\"] * slope + intercept\nline_eq=\"y = \" + str(round(slope,2)) + \"x +\" +str(round(intercept,2))\n\nplt.plot(capomulinavg[\"Weight (g)\"],regress_values,\"r-\")\n\n\n\n\n\nplt.show()\n\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec68c0abaaada3fb7733e8ab76e135009f8fc4c4 | 656,721 | ipynb | Jupyter Notebook | word2vec-embeddings/Negative_Sampling_Exercise.ipynb | sclark2006/dl-nanodegree-exercises | 45cd7ba98df03a6a4b2e0cecbbd5d9839a5a7736 | [
"MIT"
]
| 2 | 2021-02-07T19:32:37.000Z | 2021-03-16T17:18:38.000Z | word2vec-embeddings/Negative_Sampling_Exercise.ipynb | sclark2006/dl-nanodegree-exercises | 45cd7ba98df03a6a4b2e0cecbbd5d9839a5a7736 | [
"MIT"
]
| null | null | null | word2vec-embeddings/Negative_Sampling_Exercise.ipynb | sclark2006/dl-nanodegree-exercises | 45cd7ba98df03a6a4b2e0cecbbd5d9839a5a7736 | [
"MIT"
]
| null | null | null | 483.594256 | 596,368 | 0.931126 | [
[
[
"# Skip-gram Word2Vec\n\nIn this notebook, I'll lead you through using PyTorch to implement the [Word2Vec algorithm](https://en.wikipedia.org/wiki/Word2vec) using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.\n\n## Readings\n\nHere are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.\n\n* A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of Word2Vec from Chris McCormick \n* [First Word2Vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.\n* [Neural Information Processing Systems, paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for Word2Vec also from Mikolov et al.\n\n---\n## Word embeddings\n\nWhen you're dealing with words in text, you end up with tens of thousands of word classes to analyze; one for each word in a vocabulary. Trying to one-hot encode these words is massively inefficient because most values in a one-hot vector will be set to zero. So, the matrix multiplication that happens in between a one-hot input vector and a first, hidden layer will result in mostly zero-valued hidden outputs.\n\nTo solve this problem and greatly increase the efficiency of our networks, we use what are called **embeddings**. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the \"on\" input unit.\n\n<img src='assets/lookup_matrix.png' width=50%>\n\nInstead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example \"heart\" is encoded as 958, \"mind\" as 18094. Then to get hidden layer values for \"heart\", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**.\n \nThere is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix.\n\nEmbeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning.",
"_____no_output_____"
],
[
"---\n## Word2Vec\n\nThe Word2Vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words.\n\n<img src=\"assets/context_drink.png\" width=40%>\n\nWords that show up in similar **contexts**, such as \"coffee\", \"tea\", and \"water\" will have vectors near each other. Different words will be further away from one another, and relationships can be represented by distance in vector space.\n\n\nThere are two architectures for implementing Word2Vec:\n>* CBOW (Continuous Bag-Of-Words) and \n* Skip-gram\n\n<img src=\"assets/word2vec_architectures.png\" width=60%>\n\nIn this implementation, we'll be using the **skip-gram architecture** with **negative sampling** because it performs better than CBOW and trains faster with negative sampling. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.",
"_____no_output_____"
],
[
"---\n## Loading Data\n\nNext, we'll ask you to load in data and place it in the `data` directory\n\n1. Load the [text8 dataset](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/October/5bbe6499_text8/text8.zip); a file of cleaned up *Wikipedia article text* from Matt Mahoney. \n2. Place that data in the `data` folder in the home directory.\n3. Then you can extract it and delete the archive, zip file to save storage space.\n\nAfter following these steps, you should have one file in your data directory: `data/text8`.",
"_____no_output_____"
]
],
[
[
"# read in the extracted text file \nwith open('data/text8') as f:\n text = f.read()\n\n# print out the first 100 characters\nprint(text[:100])",
" anarchism originated as a term of abuse first used against early working class radicals including t\n"
]
],
[
[
"## Pre-processing\n\nHere I'm fixing up the text to make training easier. This comes from the `utils.py` file. The `preprocess` function does a few things:\n>* It converts any punctuation into tokens, so a period is changed to ` <PERIOD> `. In this data set, there aren't any periods, but it will help in other NLP problems. \n* It removes all words that show up five or *fewer* times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. \n* It returns a list of words in the text.\n\nThis may take a few seconds to run, since our text file is quite large. If you want to write your own functions for this stuff, go for it!",
"_____no_output_____"
]
],
[
[
"import utils\n\n# get list of words\nwords = utils.preprocess(text)\nprint(words[:30])",
"['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against', 'early', 'working', 'class', 'radicals', 'including', 'the', 'diggers', 'of', 'the', 'english', 'revolution', 'and', 'the', 'sans', 'culottes', 'of', 'the', 'french', 'revolution', 'whilst']\n"
],
[
"# print some stats about this word data\nprint(\"Total words in text: {}\".format(len(words)))\nprint(\"Unique words: {}\".format(len(set(words)))) # `set` removes any duplicate words",
"Total words in text: 16680599\nUnique words: 63641\n"
]
],
[
[
"### Dictionaries\n\nNext, I'm creating two dictionaries to convert words to integers and back again (integers to words). This is again done with a function in the `utils.py` file. `create_lookup_tables` takes in a list of words in a text and returns two dictionaries.\n>* The integers are assigned in descending frequency order, so the most frequent word (\"the\") is given the integer 0 and the next most frequent is 1, and so on. \n\nOnce we have our dictionaries, the words are converted to integers and stored in the list `int_words`.",
"_____no_output_____"
]
],
[
[
"vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)\nint_words = [vocab_to_int[word] for word in words]\n\nprint(int_words[:30])",
"[5233, 3080, 11, 5, 194, 1, 3133, 45, 58, 155, 127, 741, 476, 10571, 133, 0, 27349, 1, 0, 102, 854, 2, 0, 15067, 58112, 1, 0, 150, 854, 3580]\n"
]
],
[
[
"## Subsampling\n\nWords that show up often such as \"the\", \"of\", and \"for\" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by \n\n$$ P(w_i) = 1 - \\sqrt{\\frac{t}{f(w_i)}} $$\n\nwhere $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.\n\n> Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to `train_words`.",
"_____no_output_____"
]
],
[
[
"from collections import Counter\nimport random\nimport numpy as np\n\nthreshold = 1e-5\nword_counts = Counter(int_words)\n#print(list(word_counts.items())[0]) # dictionary of int_words, how many times they appear\n\ntotal_count = len(int_words)\nfreqs = {word: count/total_count for word, count in word_counts.items()}\np_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}\n# discard some frequent words, according to the subsampling equation\n# create a new list of words for training\ntrain_words = [word for word in int_words if random.random() < (1 - p_drop[word])]\n\nprint(train_words[:30])",
"[5233, 194, 10571, 27349, 854, 15067, 58112, 190, 10712, 1324, 2731, 6, 708, 2757, 7088, 5233, 44611, 2877, 2621, 8983, 4147, 6437, 36, 1137, 7573, 93, 11064, 7088, 89, 270]\n"
]
],
[
[
"## Making batches",
"_____no_output_____"
],
[
"Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to define a surrounding _context_ and grab all the words in a window around that word, with size $C$. \n\nFrom [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf): \n\n\"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $[ 1: C ]$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels.\"\n\n> **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.\n\nSay, we have an input and we're interested in the idx=2 token, `741`: \n```\n[5233, 58, 741, 10571, 27349, 0, 15067, 58112, 3580, 58, 10712]\n```\n\nFor `R=2`, `get_target` should return a list of four values:\n```\n[5233, 58, 10571, 27349]\n```",
"_____no_output_____"
]
],
[
[
"def get_target(words, idx, window_size=5):\n ''' Get a list of words in a window around an index. '''\n \n R = np.random.randint(1, window_size+1)\n start = idx - R if (idx - R) > 0 else 0\n stop = idx + R\n target_words = words[start:idx] + words[idx+1:stop+1]\n \n return list(target_words)",
"_____no_output_____"
],
[
"# test your code!\n\n# run this cell multiple times to check for random window selection\nint_text = [i for i in range(10)]\nprint('Input: ', int_text)\nidx=5 # word index of interest\n\ntarget = get_target(int_text, idx=idx, window_size=5)\nprint('Target: ', target) # you should get some indices around the idx",
"Input: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nTarget: [0, 1, 2, 3, 4, 6, 7, 8, 9]\n"
]
],
[
[
"### Generating Batches \n\nHere's a generator function that returns batches of input and target data for our model, using the `get_target` function from above. The idea is that it grabs `batch_size` words from a words list. Then for each of those batches, it gets the target words in a window.",
"_____no_output_____"
]
],
[
[
"def get_batches(words, batch_size, window_size=5):\n ''' Create a generator of word batches as a tuple (inputs, targets) '''\n \n n_batches = len(words)//batch_size\n \n # only full batches\n words = words[:n_batches*batch_size]\n \n for idx in range(0, len(words), batch_size):\n x, y = [], []\n batch = words[idx:idx+batch_size]\n for ii in range(len(batch)):\n batch_x = batch[ii]\n batch_y = get_target(batch, ii, window_size)\n y.extend(batch_y)\n x.extend([batch_x]*len(batch_y))\n yield x, y\n ",
"_____no_output_____"
],
[
"int_text = [i for i in range(20)]\nx,y = next(get_batches(int_text, batch_size=4, window_size=5))\n\nprint('x\\n', x)\nprint('y\\n', y)",
"x\n [0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3]\ny\n [1, 2, 3, 0, 2, 3, 1, 3, 0, 1, 2]\n"
]
],
[
[
"---\n## Validation\n\nHere, I'm creating a function that will help us observe our model as it learns. We're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them using the cosine similarity: \n\n<img src=\"assets/two_vectors.png\" width=30%>\n\n$$\n\\mathrm{similarity} = \\cos(\\theta) = \\frac{\\vec{a} \\cdot \\vec{b}}{|\\vec{a}||\\vec{b}|}\n$$\n\n\nWe can encode the validation words as vectors $\\vec{a}$ using the embedding table, then calculate the similarity with each word vector $\\vec{b}$ in the embedding table. With the similarities, we can print out the validation words and words in our embedding table semantically similar to those words. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.",
"_____no_output_____"
]
],
[
[
"def cosine_similarity(embedding, valid_size=16, valid_window=100, device='cpu'):\n \"\"\" Returns the cosine similarity of validation words with words in the embedding matrix.\n Here, embedding should be a PyTorch embedding module.\n \"\"\"\n \n # Here we're calculating the cosine similarity between some random words and \n # our embedding vectors. With the similarities, we can look at what words are\n # close to our random words.\n \n # sim = (a . b) / |a||b|\n \n embed_vectors = embedding.weight\n \n # magnitude of embedding vectors, |b|\n magnitudes = embed_vectors.pow(2).sum(dim=1).sqrt().unsqueeze(0)\n \n # pick N words from our ranges (0,window) and (1000,1000+window). lower id implies more frequent \n valid_examples = np.array(random.sample(range(valid_window), valid_size//2))\n valid_examples = np.append(valid_examples,\n random.sample(range(1000,1000+valid_window), valid_size//2))\n valid_examples = torch.LongTensor(valid_examples).to(device)\n \n valid_vectors = embedding(valid_examples)\n similarities = torch.mm(valid_vectors, embed_vectors.t())/magnitudes\n \n return valid_examples, similarities",
"_____no_output_____"
]
],
[
[
"---\n# SkipGram model\n\nDefine and train the SkipGram model. \n> You'll need to define an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) and a final, softmax output layer.\n\nAn Embedding layer takes in a number of inputs, importantly:\n* **num_embeddings** – the size of the dictionary of embeddings, or how many rows you'll want in the embedding weight matrix\n* **embedding_dim** – the size of each embedding vector; the embedding dimension\n\nBelow is an approximate diagram of the general structure of our network.\n<img src=\"assets/skip_gram_arch.png\" width=60%>\n\n>* The input words are passed in as batches of input word tokens. \n* This will go into a hidden layer of linear units (our embedding layer). \n* Then, finally into a softmax output layer. \n\nWe'll use the softmax layer to make a prediction about the context words by sampling, as usual.",
"_____no_output_____"
],
[
"---\n## Negative Sampling\n\nFor every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct example, but only a small number of incorrect, or noise, examples. This is called [\"negative sampling\"](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). \n\nThere are two modifications we need to make. First, since we're not taking the softmax output over all the words, we're really only concerned with one output word at a time. Similar to how we use an embedding table to map the input word to the hidden layer, we can now use another embedding table to map the hidden layer to the output word. Now we have two embedding layers, one for input words and one for output words. Secondly, we use a modified loss function where we only care about the true example and a small subset of noise examples.\n\n$$\n- \\large \\log{\\sigma\\left(u_{w_O}\\hspace{0.001em}^\\top v_{w_I}\\right)} -\n\\sum_i^N \\mathbb{E}_{w_i \\sim P_n(w)}\\log{\\sigma\\left(-u_{w_i}\\hspace{0.001em}^\\top v_{w_I}\\right)}\n$$\n\nThis is a little complicated so I'll go through it bit by bit. $u_{w_O}\\hspace{0.001em}^\\top$ is the embedding vector for our \"output\" target word (transposed, that's the $^\\top$ symbol) and $v_{w_I}$ is the embedding vector for the \"input\" word. Then the first term \n\n$$\\large \\log{\\sigma\\left(u_{w_O}\\hspace{0.001em}^\\top v_{w_I}\\right)}$$\n\nsays we take the log-sigmoid of the inner product of the output word vector and the input word vector. Now the second term, let's first look at \n\n$$\\large \\sum_i^N \\mathbb{E}_{w_i \\sim P_n(w)}$$ \n\nThis means we're going to take a sum over words $w_i$ drawn from a noise distribution $w_i \\sim P_n(w)$. The noise distribution is basically our vocabulary of words that aren't in the context of our input word. In effect, we can randomly sample words from our vocabulary to get these words. $P_n(w)$ is an arbitrary probability distribution though, which means we get to decide how to weight the words that we're sampling. This could be a uniform distribution, where we sample all words with equal probability. Or it could be according to the frequency that each word shows up in our text corpus, the unigram distribution $U(w)$. The authors found the best distribution to be $U(w)^{3/4}$, empirically. \n\nFinally, in \n\n$$\\large \\log{\\sigma\\left(-u_{w_i}\\hspace{0.001em}^\\top v_{w_I}\\right)},$$ \n\nwe take the log-sigmoid of the negated inner product of a noise vector with the input vector. \n\n<img src=\"assets/neg_sampling_loss.png\" width=50%>\n\nTo give you an intuition for what we're doing here, remember that the sigmoid function returns a probability between 0 and 1. The first term in the loss pushes the probability that our network will predict the correct word $w_O$ towards 1. In the second term, since we are negating the sigmoid input, we're pushing the probabilities of the noise words towards 0.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torch import nn\nimport torch.optim as optim",
"_____no_output_____"
],
[
"class SkipGramNeg(nn.Module):\n def __init__(self, n_vocab, n_embed, noise_dist=None):\n super().__init__()\n \n self.n_vocab = n_vocab\n self.n_embed = n_embed\n self.noise_dist = noise_dist\n \n # define embedding layers for input and output words\n self.in_embed = nn.Embedding(n_vocab, n_embed)\n self.out_embed = nn.Embedding(n_vocab, n_embed)\n # Initialize both embedding tables with uniform distribution\n self.in_embed.weight.data.uniform_(-1,1)\n self.out_embed.weight.data.uniform_(-1,1)\n \n \n def forward_input(self, input_words):\n # return input vector embeddings\n x = self.in_embed(input_words)\n return x\n \n def forward_output(self, output_words):\n # return output vector embeddings\n x = self.out_embed(output_words)\n return x\n \n def forward_noise(self, batch_size, n_samples):\n \"\"\" Generate noise vectors with shape (batch_size, n_samples, n_embed)\"\"\"\n if self.noise_dist is None:\n # Sample words uniformly\n noise_dist = torch.ones(self.n_vocab)\n else:\n noise_dist = self.noise_dist\n \n # Sample words from our noise distribution\n noise_words = torch.multinomial(noise_dist,\n batch_size * n_samples,\n replacement=True)\n \n device = \"cuda\" if self.out_embed.weight.is_cuda else \"cpu\"\n noise_words = noise_words.to(device)\n \n ## TODO: get the noise embeddings\n # reshape the embeddings so that they have dims (batch_size, n_samples, n_embed)\n noise_out = self.out_embed(noise_words)\n noise_out = noise_out.view(batch_size, n_samples, self.n_embed)\n \n return noise_out",
"_____no_output_____"
],
[
"class NegativeSamplingLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input_vectors, output_vectors, noise_vectors):\n \n batch_size, embed_size = input_vectors.shape\n \n # Input vectors should be a batch of column vectors\n input_vectors = input_vectors.view(batch_size, embed_size, 1)\n \n # Output vectors should be a batch of row vectors\n output_vectors = output_vectors.view(batch_size, 1, embed_size)\n \n # bmm = batch matrix multiplication\n # correct log-sigmoid loss\n out_loss = torch.bmm(output_vectors, input_vectors).sigmoid().log()\n out_loss = out_loss.squeeze()\n \n # incorrect log-sigmoid loss\n noise_loss = torch.bmm(noise_vectors.neg(), input_vectors).sigmoid().log()\n noise_loss = noise_loss.squeeze().sum(1) # sum the losses over the sample of noise vectors\n\n # negate and sum correct and noisy log-sigmoid losses\n # return average batch loss\n return -(out_loss + noise_loss).mean()",
"_____no_output_____"
]
],
[
[
"### Training\n\nBelow is our training loop, and I recommend that you train on GPU, if available.",
"_____no_output_____"
]
],
[
[
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Get our noise distribution\n# Using word frequencies calculated earlier in the notebook\nword_freqs = np.array(sorted(freqs.values(), reverse=True))\nunigram_dist = word_freqs/word_freqs.sum()\nnoise_dist = torch.from_numpy(unigram_dist**(0.75)/np.sum(unigram_dist**(0.75)))\n\n# instantiating the model\nembedding_dim = 300\nmodel = SkipGramNeg(len(vocab_to_int), embedding_dim, noise_dist=noise_dist).to(device)\n\n# using the loss that we defined\ncriterion = NegativeSamplingLoss() \noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nprint_every = 1500\nsteps = 0\nepochs = 5\n\n# train for some number of epochs\nfor e in range(epochs):\n \n # get our input, target batches\n for input_words, target_words in get_batches(train_words, 512):\n steps += 1\n inputs, targets = torch.LongTensor(input_words), torch.LongTensor(target_words)\n inputs, targets = inputs.to(device), targets.to(device)\n \n # input, outpt, and noise vectors\n input_vectors = model.forward_input(inputs)\n output_vectors = model.forward_output(targets)\n noise_vectors = model.forward_noise(inputs.shape[0], 5)\n\n # negative sampling loss\n loss = criterion(input_vectors, output_vectors, noise_vectors)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # loss stats\n if steps % print_every == 0:\n print(\"Epoch: {}/{}\".format(e+1, epochs))\n print(\"Loss: \", loss.item()) # avg batch loss at this point in training\n valid_examples, valid_similarities = cosine_similarity(model.in_embed, device=device)\n _, closest_idxs = valid_similarities.topk(6)\n\n valid_examples, closest_idxs = valid_examples.to('cpu'), closest_idxs.to('cpu')\n for ii, valid_idx in enumerate(valid_examples):\n closest_words = [int_to_vocab[idx.item()] for idx in closest_idxs[ii]][1:]\n print(int_to_vocab[valid_idx.item()] + \" | \" + ', '.join(closest_words))\n print(\"...\\n\")",
"Epoch: 1/5\nLoss: 7.007579803466797\nthey | through, pius, sentimental, conciliation, occuring\nare | ionians, thayer, the, israeli, solution\nsee | unclear, burroughs, in, belgian, tyrell\nhad | the, harriers, atlanta, senior, upgrades\nup | bucket, blanket, caucasus, productions, acidic\nby | via, strong, juliane, of, codex\nhave | away, chip, reforms, constants, in\nseven | the, early, any, confirming, functions\nliberal | retaken, told, christy, britain, two\nuniverse | mathematical, tripoli, testify, exegesis, sale\nexistence | tithe, tuft, sarajevo, involves, salieri\ndr | black, pro, agent, proposals, extrasolar\njoseph | nicknamed, wondering, chargaff, monday, locked\ncost | well, tailpiece, iudaea, numismatic, succeeded\nreport | yeast, waits, bears, magical, hellas\nexperience | service, calculates, secondly, drought, speed\n...\n\nEpoch: 1/5\nLoss: 5.279017448425293\neight | zero, one, a, in, and\nstate | lema, or, gateway, nine, louis\nonly | korean, avenge, valleys, bangui, kicked\non | the, and, one, to, a\nmay | by, to, of, that, for\ninto | mead, zero, sar, callisto, mercer\nother | is, and, in, the, a\nan | this, the, a, to, in\nstage | tripolitania, wholeness, relevance, pleaded, viewpoint\nmainly | curricula, curries, scientific, misspelled, obliterate\napplications | clanking, probably, edema, adjective, increment\nchannel | because, singapore, studi, researcher, multiplexer\ntaking | can, contempt, superorder, hegelian, y\nmarriage | blinded, gasification, preceded, union, seaboard\nknow | credits, an, includes, ed, cpr\nparis | orfeo, receding, revenues, seven, writ\n...\n\nEpoch: 1/5\nLoss: 4.112913608551025\nthree | six, one, five, eight, zero\nthere | all, the, to, or, a\nth | of, one, in, zero, six\nmany | as, have, or, that, a\nhistory | in, of, i, many, after\ninto | that, not, of, is, a\neight | one, nine, six, four, seven\nit | in, is, the, as, for\ncentre | practices, finally, cyanide, elvish, squads\nplaced | common, dharmas, grape, mystique, antisocial\nwoman | sightings, westminster, cdma, passos, classicist\napplications | increment, taken, mits, adjective, pyrrhus\nexcept | li, longstreet, grounds, representation, elevators\nengine | history, popular, bilaterally, article, esl\nevent | fingernails, and, sometimes, with, in\ngold | allergy, while, adders, states, rivas\n...\n\nEpoch: 1/5\nLoss: 3.533597946166992\nth | in, was, one, century, who\nthree | four, zero, six, seven, one\nthat | which, the, to, is, and\nhowever | to, the, in, by, that\nonly | a, the, because, it, which\na | in, the, is, an, of\nnot | the, as, to, that, they\nno | a, they, is, the, that\nconstruction | prices, inward, amplifies, consumption, average\nplaced | common, marbled, dharmas, each, atrial\nbrother | then, three, implications, quotient, one\nprimarily | se, focussed, world, interstellar, latinate\nversions | abusing, ontogeny, ecumenical, systems, free\naccepted | the, dim, remembers, considered, gillingham\ngold | moth, acids, be, allergy, ehrlich\nreport | hellas, bassoon, motifs, transcribed, sega\n...\n\nEpoch: 1/5\nLoss: 3.533855438232422\nif | any, are, can, or, must\nsee | of, was, in, by, modern\na | the, and, on, of, by\ntime | and, of, was, to, is\nafter | had, he, eight, was, by\nwith | in, of, the, and, by\nnot | to, that, it, some, have\non | a, by, in, the, to\nevent | sometimes, point, world, limestone, chd\nproposed | eclipse, they, countries, program, mandate\napplied | salamis, bhopal, defined, they, any\nknow | don, that, you, no, something\nrise | century, without, organization, in, sunburst\narts | clothes, several, goldwater, sig, few\nhold | that, however, referring, not, except\nnobel | prize, b, player, d, american\n...\n\nEpoch: 1/5\nLoss: 3.308270215988159\nb | d, composer, american, writer, eight\nsystem | software, applications, systems, user, access\neight | one, zero, three, six, nine\nwhere | is, not, in, of, then\nsome | such, to, which, that, are\nno | to, that, thus, know, it\ntime | up, when, the, at, first\nfour | one, zero, two, five, seven\nrise | southern, part, eastern, behalf, century\ndiscovered | computational, science, the, particles, r\naccount | those, some, mass, to, given\npolice | military, army, held, war, federal\nfrac | defined, sqrt, equation, y, function\nmainly | small, people, only, largely, now\npre | the, and, well, see, successfully\nparis | german, de, died, french, france\n...\n\nEpoch: 2/5\nLoss: 2.7641279697418213\nmore | the, of, are, some, much\nso | not, that, must, depending, be\nto | the, from, on, but, of\nmost | these, is, other, include, with\nalso | and, see, with, in, the\ntwo | zero, seven, six, three, four\nwould | that, to, them, they, but\nby | with, the, of, and, in\nanimals | species, animal, humans, human, may\nnotes | text, classical, singers, reading, works\noperating | computer, os, interface, mac, bit\nhold | christ, would, no, that, should\nexistence | argument, views, god, doctrines, believers\ncost | low, system, allow, range, business\nevent | to, opportunity, possession, end, time\ngold | silver, with, at, for, once\n...\n\nEpoch: 2/5\nLoss: 2.707792282104492\nbetween | the, divided, part, thus, main\nthey | to, their, enough, may, ability\nused | types, with, into, to, although\nduring | was, period, were, their, early\ninto | the, with, and, to, used\nfrom | the, and, to, is, sometimes\nth | century, the, history, st, french\nhe | his, she, him, her, father\nrecorded | songs, song, known, style, whom\northodox | catholic, christianity, church, christian, churches\nmagazine | edition, series, award, tv, television\ncreation | scientific, views, faith, religious, christian\naward | awards, academy, prize, baseball, singer\nbrother | her, son, his, married, henry\ncost | cheaper, expensive, speed, low, commercial\nhttp | www, com, external, website, htm\n...\n\nEpoch: 2/5\nLoss: 2.859250545501709\nfour | five, three, zero, two, one\nmany | as, most, has, such, often\nwith | a, the, in, or, of\nfirst | was, in, he, later, his\nwas | he, after, had, son, became\nits | and, the, has, european, regional\nth | century, st, nd, rd, history\nwere | was, and, destroyed, who, battle\nwriters | literary, literature, philosophy, novelists, philosophers\nsan | francisco, california, south, los, nevada\nolder | families, age, females, median, males\nconsists | is, normally, are, or, the\nprofessional | league, players, college, competitive, sports\nquite | even, especially, survive, comes, somewhat\nparis | jean, charles, de, brother, le\npolice | military, civilians, prison, criminal, troops\n...\n\nEpoch: 2/5\nLoss: 2.9967691898345947\ncan | if, allows, be, used, requires\nover | to, the, majority, most, around\nzero | five, two, three, seven, four\neight | six, four, five, nine, seven\nsee | references, is, external, one, links\nas | the, often, and, a, of\nabout | zero, with, five, the, from\nthese | some, as, through, is, and\nengine | engines, powered, mechanical, fuel, diesel\nrise | economic, influence, cleared, policy, arose\nlived | century, believed, great, peoples, mediterranean\nplaced | or, are, walls, ground, too\nnotes | tone, on, poem, appears, according\nstage | also, such, form, many, musical\nquestion | questions, view, reason, true, know\npope | church, catholic, holy, bishops, rome\n...\n\nEpoch: 2/5\nLoss: 2.596045732498169\nabout | and, to, has, of, with\nth | century, nd, dynasty, st, rd\nof | and, the, by, into, to\ncalled | in, the, form, and, are\ns | by, of, was, a, in\nfrom | to, in, the, and, by\nare | or, is, have, usually, the\nthree | six, nine, four, seven, two\nprince | henry, duke, william, vii, son\ndiscovered | sources, observations, in, solar, astronomers\nrunning | run, first, team, chris, on\ninstance | means, or, particular, conversely, non\nquite | sense, such, too, but, very\nrecorded | tour, rock, concert, recording, record\narticles | article, detailed, online, regarding, documents\nshown | appear, have, or, usually, such\n...\n\nEpoch: 2/5\nLoss: 2.9898293018341064\nfirst | s, before, new, was, the\nthere | is, all, in, example, note\nis | g, a, are, there, form\nknown | of, as, the, its, from\nin | and, the, of, see, as\nwar | battle, forces, allied, fought, nazi\nwhile | still, to, such, form, both\nall | there, the, here, to, number\nquite | more, make, much, even, usually\naccepted | knowledge, believe, concerning, scholars, view\nreport | reports, cia, about, commission, news\ndr | actress, research, stanley, one, founder\njoseph | thomas, john, robert, chemist, politician\nexperience | perception, consciousness, cognitive, subjective, mind\npolice | officer, personnel, civil, assassination, injured\ndefense | defence, forces, combat, police, tactics\n...\n\nEpoch: 3/5\nLoss: 2.211784601211548\nstate | states, council, constitution, elected, federal\nthis | to, that, the, another, any\nthree | two, zero, five, four, six\nas | with, also, such, some, a\nwould | could, had, put, concluded, out\nwar | forces, soviet, invasion, fought, troops\nmore | much, the, also, many, often\nstates | united, state, nations, presidents, federal\nengine | engines, powered, motors, diesel, car\noperating | os, mac, unix, platforms, ibm\nrecorded | songs, albums, concert, recording, rock\ngold | silver, iron, precious, yellow, tin\nroad | roads, highway, town, bridge, ferry\nevent | which, unless, beating, teams, wwf\nlived | later, early, few, probably, people\ntroops | army, forces, war, invasion, siege\n...\n\nEpoch: 3/5\nLoss: 2.286799430847168\nsix | four, one, two, five, seven\nthe | of, and, in, from, a\nmore | as, in, the, many, on\nthey | their, to, are, would, but\nare | these, or, is, similar, usually\nyears | year, three, period, was, over\nthree | four, one, zero, five, seven\nwas | had, later, became, soon, defeated\naccepted | accept, believe, conclusion, should, valid\nlived | tribe, inhabited, whom, he, later\nbill | jim, anderson, mike, eric, joe\nissue | act, issues, political, legislation, argue\narticles | online, overview, article, wiki, publishing\nconsists | consist, distinct, consisting, each, structure\nshown | a, this, is, such, the\ninstance | or, either, such, example, function\n...\n\nEpoch: 3/5\nLoss: 2.5285630226135254\nthat | the, to, is, as, fact\nunited | states, kingdom, nations, countries, commonwealth\npeople | ethnic, americans, spoken, african, culture\nwould | that, when, not, to, the\nor | be, is, depending, either, may\nis | an, or, be, are, function\nbeen | has, some, these, impact, s\nwar | soviet, forces, allied, fought, allies\nstage | performances, score, rock, fame, tour\nhold | hand, must, christ, be, body\nbehind | facing, beating, off, street, team\napplications | data, efficient, application, functions, software\nhttp | www, org, edu, html, htm\nrecorded | records, recordings, recording, songs, record\npowers | power, vested, governed, monarch, monarchy\nhit | hits, album, solo, singles, albums\n...\n\nEpoch: 3/5\nLoss: 2.3479809761047363\nis | are, be, which, or, if\nthat | it, be, which, is, are\nfrom | of, the, and, in, by\nabout | zero, around, references, days, two\nhe | his, was, him, she, her\ncalled | are, with, the, a, each\nits | and, to, by, in, more\nnine | seven, one, four, five, zero\narticles | page, encyclopedia, pages, article, links\nprince | son, crown, emperor, princess, king\nhttp | html, www, org, htm, com\nhold | hand, a, the, should, then\nexperience | perception, focused, skills, techniques, moral\northodox | church, churches, christ, christians, orthodoxy\nproposed | possibility, planetary, theory, theoretical, assessment\nwriters | fiction, novelists, poets, literature, authors\n...\n\nEpoch: 3/5\nLoss: 2.5519044399261475\nthere | is, be, in, or, are\nsee | references, external, also, list, article\nd | b, american, writer, composer, laureate\ntheir | to, were, of, for, have\nused | use, such, using, uses, commonly\nof | the, and, in, a, to\nworld | one, in, club, nine, international\nin | and, the, of, which, were\nocean | atlantic, islands, sea, island, coast\ntroops | forces, army, guard, war, rebels\napplications | software, application, specification, interface, clients\npope | bishops, constantinople, church, papal, catholicism\nnotes | octave, tone, note, composed, minor\nbible | testament, biblical, septuagint, hebrew, genesis\nplaced | down, right, too, kind, to\nmarriage | husband, divorce, her, marriages, children\n...\n\nEpoch: 3/5\nLoss: 2.419459342956543\nthis | the, be, so, is, of\nin | the, and, of, also, as\nthat | to, they, have, not, be\nsystem | systems, interface, software, windows, operating\nit | to, be, can, or, that\ncalled | a, the, to, include, into\nbetween | in, the, by, third, with\ntime | the, than, at, with, would\nevent | teams, upset, events, attendance, championship\nassembly | elections, elected, legislature, vote, appointed\nnobel | prize, recipient, laureate, physicist, chemist\npre | early, references, innovations, supported, modern\nsmith | writer, john, jr, adam, joseph\nheavy | metal, warfare, airborne, tank, heavier\nreport | reported, reports, commission, september, investigation\nshows | show, tv, movies, lyrics, sketch\n...\n\nEpoch: 4/5\nLoss: 2.630585193634033\nbetween | in, of, to, is, the\nand | in, of, to, the, with\nhowever | as, since, that, this, all\nup | their, to, out, from, where\nhave | in, a, all, are, not\nin | and, the, of, by, a\nis | a, in, of, the, it\nbeen | these, that, to, well, such\nprince | succeeded, throne, emperor, king, iv\nadditional | voice, system, extra, sound, typical\ncentre | street, city, shopping, located, town\npolice | officer, officers, killing, army, sentenced\nhold | belief, or, will, avoid, giving\nexcept | any, only, often, there, all\nparis | france, antoine, des, fran, exhibition\nengineering | sciences, technology, institute, engineers, research\n...\n\nEpoch: 4/5\nLoss: 2.179471015930176\nwith | the, to, have, as, a\nwill | if, you, a, without, be\nmore | than, and, are, is, with\nall | not, a, that, for, the\namerican | actress, actor, joe, association, musician\nwhile | to, in, the, them, their\nduring | early, after, was, period, late\nwho | a, she, their, later, him\naccount | according, excess, antiquities, herodotus, harris\nproposed | predicted, formulated, theory, accepted, quantum\ndiscovered | observed, astronomer, astronomers, chemist, discovery\nsmith | joseph, james, john, william, jr\nexcept | grouped, each, all, arranged, only\nanimals | animal, species, humans, insects, plants\ncentre | street, square, city, largest, area\naward | awards, winners, best, awarded, academy\n...\n\nEpoch: 4/5\nLoss: 2.582883834838867\nwhen | that, the, as, in, or\nstates | united, union, state, nations, republic\ntwo | three, zero, one, five, seven\nthere | the, of, are, a, may\nthree | six, two, zero, four, one\nhe | his, was, him, she, himself\nthese | are, have, many, as, that\npeople | living, republic, ethnic, americans, abroad\nroad | downtown, street, roads, avenue, highway\npressure | liquid, temperature, heat, constant, energy\nrunning | runs, run, mac, platforms, windows\ndiscovered | known, discovery, astronomer, planets, discoveries\nengine | engines, combustion, fuel, diesel, turbine\napplied | applying, mathematics, electrical, fields, engineering\nfile | files, user, windows, microsoft, interface\nbbc | television, tv, broadcast, programmes, programme\n...\n\nEpoch: 4/5\nLoss: 2.408916473388672\nd | b, politician, actress, j, born\nnine | seven, one, four, six, zero\nbetween | the, it, defined, in, than\nis | or, the, this, which, not\nup | being, have, out, and, to\nthere | that, are, is, or, of\neight | seven, one, four, six, five\nbeen | the, by, and, that, has\ngrand | prix, duchy, member, jury, house\nbehind | face, defensive, staged, passes, scenes\ndefense | agency, personnel, strategic, peacekeeping, defence\ndiscovered | discoveries, probably, discovery, found, scientists\nconstruction | constructed, buildings, railway, building, brick\njoseph | smith, samuel, wrote, james, ernst\nrise | occurred, during, characterized, established, rising\nversions | version, pc, unix, microsoft, format\n...\n\nEpoch: 4/5\nLoss: 2.272409677505493\nhave | these, be, to, an, not\nthan | only, less, more, the, it\nwhen | he, a, after, to, would\nseven | six, one, five, nine, four\nfrom | to, in, and, the, of\nwhere | a, at, up, left, called\nan | by, of, the, in, a\nzero | two, nine, one, six, four\nassembly | legislative, legislature, appoints, elected, deputies\nparis | du, la, fran, henri, le\nsquare | kilometers, boundaries, km, mi, kilometres\nstage | rock, best, actors, performance, recording\nmarriage | divorce, daughter, her, marriages, marry\nocean | atlantic, islands, island, pacific, coast\nheavy | accidents, metal, armored, lighter, water\ngrand | duchy, commander, duke, knights, title\n...\n\nEpoch: 4/5\nLoss: 2.2415850162506104\nuse | used, uses, systems, need, standard\nhave | are, to, that, been, there\nworld | united, city, famous, largest, germany\nstate | states, county, michigan, missouri, municipal\ncalled | is, in, a, or, also\nzero | one, two, three, five, six\nbetween | the, east, north, western, in\na | is, the, and, to, of\nconsists | consisting, comprises, main, are, composed\nwoman | children, man, husband, female, she\nexcept | any, length, non, consists, occurs\ninstance | such, or, defined, particular, use\nrunning | run, processor, java, os, architectures\nmainly | regions, especially, mostly, region, due\nnobel | prize, laureate, recipient, physicist, chemist\nderived | word, meaning, latin, used, names\n...\n\nEpoch: 5/5\nLoss: 2.374793767929077\nwho | he, him, whom, his, wife\nonly | which, and, was, not, they\nhis | he, him, brother, son, himself\nhave | been, some, not, all, that\nabout | of, roughly, claims, two, zero\nfour | seven, three, two, five, one\nwith | in, the, of, and, by\nfirst | later, until, second, at, before\nsomething | think, know, what, thing, anything\nparis | france, du, la, des, sur\nrunning | run, os, ratings, ran, jump\nevent | held, occurred, events, winning, conference\ncreation | created, genesis, violated, states, cult\napplied | technical, called, used, systems, is\nolder | families, households, age, median, years\nunits | unit, measurement, infantry, equal, metric\n...\n\nEpoch: 5/5\nLoss: 2.1454501152038574\ntheir | they, to, the, many, and\nfirst | a, second, at, the, became\nbe | not, can, is, may, to\nmore | the, much, as, have, many\nworld | countries, the, became, first, since\ncalled | from, the, a, which, form\nhowever | to, have, they, not, as\nover | three, years, zero, one, time\nmean | q, meaning, variance, n, k\naward | awards, winners, best, academy, outstanding\nice | melting, winter, frozen, hockey, glaciers\nunits | unit, regiments, battalions, infantry, battalion\noperations | operation, personnel, logistics, u, task\nhit | hits, hitting, baseball, inning, pitch\nscale | large, smaller, theoretically, theoretical, precision\nissue | comics, issues, book, books, issued\n...\n\nEpoch: 5/5\nLoss: 2.4414429664611816\ncalled | a, is, or, also, of\nthey | their, have, not, those, them\nas | and, a, of, the, by\nan | a, the, this, by, is\nbeen | have, more, these, in, were\namerican | actress, singer, laureate, actor, comedian\nused | using, use, as, are, uses\nbetween | divided, over, the, and, than\nreport | reported, reports, evidence, investigation, drug\nocean | atlantic, pacific, island, lakes, coast\nhold | body, holds, any, is, hand\nexcept | often, there, can, grouped, or\nanimals | animal, mammals, species, humans, insect\nfrac | mathbf, cdot, x, sum, equations\nwriters | fiction, novelists, poets, literature, philosophers\nconsists | each, usually, consisting, form, directly\n...\n\nEpoch: 5/5\nLoss: 2.3553061485290527\nso | to, not, the, has, make\nthis | the, it, is, which, an\nb | d, mathematician, laureate, n, physicist\nsome | as, are, to, all, were\nwhile | most, they, other, to, was\non | the, s, and, from, a\nunited | states, kingdom, nations, pacific, australia\nof | the, to, by, in, and\nmainly | spoken, ethnic, concentrated, communities, western\negypt | egyptian, mesopotamia, arab, syria, sinai\nalternative | problem, theory, theories, these, and\nbible | testament, tanakh, scripture, biblical, gospel\nocean | atlantic, pacific, islands, island, km\nhttp | www, html, org, com, htm\nprofessional | amateur, sports, players, clubs, team\nproposed | proposal, planetary, model, formulated, predictions\n...\n\nEpoch: 5/5\nLoss: 2.3105480670928955\ni | me, t, don, you, my\ninto | the, and, a, to, of\nsystem | systems, operating, term, allows, connections\namerican | actor, actress, singer, b, writer\nby | the, in, and, a, on\nthey | that, them, while, not, their\nmore | be, most, in, than, the\nnine | one, two, zero, six, four\ntroops | forces, army, marched, brigade, armed\nreport | reported, reports, terrorist, commission, committee\nadditional | required, each, prayers, are, all\ngovernor | attorney, office, lieutenant, president, executive\nmainly | regions, concentrated, speaking, imported, mostly\npre | modern, legendary, greek, european, developed\naward | awards, best, academy, awarded, winners\nhttp | www, htm, html, com, links\n...\n\nEpoch: 5/5\nLoss: 2.3180344104766846\nand | the, a, of, in, to\nd | b, american, composer, writer, politician\nan | of, a, the, in, and\nat | in, the, was, on, time\nhad | was, his, he, s, were\nstates | united, governors, state, union, constitution\nfour | two, one, five, seven, six\nhistory | links, in, the, website, see\nshows | show, television, starred, sketch, pop\nplaced | hands, down, reverse, including, rows\nice | frozen, melting, glaciers, glacial, freezing\ngovernor | president, attorney, executive, lieutenant, minister\nmean | sum, q, function, variance, k\nfreedom | liberty, economic, freedoms, free, liberties\nprince | princess, eldest, empress, duke, throne\naccepted | universally, claim, rejected, modern, hotly\n...\n\n"
]
],
[
[
"## Visualizing the word vectors\n\nBelow we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out [this post from Christopher Olah](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) to learn more about T-SNE and other ways to visualize high-dimensional data.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE",
"_____no_output_____"
],
[
"# getting embeddings from the embedding layer of our model, by name\nembeddings = model.in_embed.weight.to('cpu').data.numpy()",
"_____no_output_____"
],
[
"viz_words = 380\ntsne = TSNE()\nembed_tsne = tsne.fit_transform(embeddings[:viz_words, :])",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(16, 16))\nfor idx in range(viz_words):\n plt.scatter(*embed_tsne[idx, :], color='steelblue')\n plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec68c406e25e4a952a51f46e2561187d63b8532a | 18,218 | ipynb | Jupyter Notebook | reinforcement_learning/rl_resource_allocation_ray_customEnv/rl_bin_packing_ray_custom.ipynb | CloudaYolla/amazon-sagemaker-examples | aa22e256e56ad2f02347666d38cf3e98cd29d840 | [
"Apache-2.0"
]
| 1 | 2020-11-23T23:58:24.000Z | 2020-11-23T23:58:24.000Z | reinforcement_learning/rl_resource_allocation_ray_customEnv/rl_bin_packing_ray_custom.ipynb | Aljasere/amazon-sagemaker-examples | 16ab3e57ce11b31cbb947cfd3fc630ab7dd6e546 | [
"Apache-2.0"
]
| null | null | null | reinforcement_learning/rl_resource_allocation_ray_customEnv/rl_bin_packing_ray_custom.ipynb | Aljasere/amazon-sagemaker-examples | 16ab3e57ce11b31cbb947cfd3fc630ab7dd6e546 | [
"Apache-2.0"
]
| null | null | null | 37.408624 | 604 | 0.609342 | [
[
[
"# Solving Bin Packing Problem with Amazon SageMaker RL\n\nThis notebook shows an example of how to use reinforcement learning to solve the online stochastic bin packing problem. In the classic version of bin packing, we are given items of different sizes and need to pack them into as few bins as possible. In the online stochastic version, items arrive one at a time and item sizes are drawn from an unknown distribution. The task is to find a feasible packing that minimizes the number of bins used to pack all of the items that arrive within the time horizon.",
"_____no_output_____"
],
[
"## Problem Statement\n\nIn the stochastic bin packing problem, items arrive online, one in each time period $t$. Items can be of different sizes. Upon arrival, the item needs to be packed into one of the bins. We assume the number of bins one can open is unlimited. Assuming each bin has size of $10$, the usage is illustrated in the figure below. Each rectangle corresponds to a bin, with a utilization level between 0 (empty) to 10 (full).",
"_____no_output_____"
],
[
"<img src=\"images/rl_bin_packing.png\" width=\"300\" align=\"center\"/>",
"_____no_output_____"
],
[
"This problem can be formulated as an Markov Decision Process (MDP). Under the customized simulator, we define:\n\n1. *State*: Current item size and the number of bins at each level. Initially, all the bins are empty.\n\n2. *Action*: Pick a bin level which can fit the item. The number of actions possible is the number of existed non-empty bins with one action for each level, and action $0$ corresponds to opening a new bin.Invalid actions such as picking a level for which bins do not exist yet are masked.\n\n2. *Reward*: Negative of incremental waste as each item is put into a bin. If the item is put into an existing bin, the incremental waste will reduce by item size. If the item is put into a new bin, the waste increases by the empty space left in the new bin.",
"_____no_output_____"
],
[
"## Using Amazon SageMaker RL\n\nAmazon SageMaker RL allows you to train your RL agents in cloud machines using docker containers. You do not have to worry about setting up your machines with the RL toolkits and deep learning frameworks. You can easily switch between many different machines setup for you, including powerful GPU machines that give a big speedup. You can also choose to use multiple machines in a cluster to further speedup training, often necessary for production level loads.",
"_____no_output_____"
],
[
"## Pre-requsites\n### Roles and permissions\n\nTo get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.",
"_____no_output_____"
]
],
[
[
"import sagemaker\nimport boto3\nimport sys\nimport os\nimport glob\nimport re\nimport subprocess\nfrom IPython.display import HTML\nimport time\nfrom time import gmtime, strftime\nsys.path.append(\"common\")\nfrom misc import get_execution_role, wait_for_s3_object\nfrom sagemaker.rl import RLEstimator, RLToolkit, RLFramework",
"_____no_output_____"
]
],
[
[
"### Setup S3 bucket\n\nSet up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata. ",
"_____no_output_____"
]
],
[
[
"sage_session = sagemaker.session.Session()\ns3_bucket = sage_session.default_bucket() \ns3_output_path = 's3://{}/'.format(s3_bucket)\nprint(\"S3 bucket path: {}\".format(s3_output_path))",
"_____no_output_____"
]
],
[
[
"### Define Variables \n\nWe define variables such as the job prefix for the training jobs *and the image path for the container (only when this is BYOC).*",
"_____no_output_____"
]
],
[
[
"# create a descriptive job name \njob_name_prefix = 'rl-binpacking'",
"_____no_output_____"
]
],
[
[
"### Configure where training happens\n\nYou can train your RL training jobs using the SageMaker notebook instance or local notebook instance. In both of these scenarios, you can run the following in either local or SageMaker modes. The local mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`.",
"_____no_output_____"
]
],
[
[
"# run in local_mode on this machine, or as a SageMaker TrainingJob?\nlocal_mode = False\n\nif local_mode:\n instance_type = 'local'\nelse:\n # If on SageMaker, pick the instance type\n instance_type = \"ml.c5.2xlarge\"",
"_____no_output_____"
]
],
[
[
"### Create an IAM role\nEither get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running locally, set it to an IAM role with `AmazonSageMakerFullAccess` and `CloudWatchFullAccess permissions`.",
"_____no_output_____"
]
],
[
[
"try:\n role = sagemaker.get_execution_role()\nexcept:\n role = get_execution_role()\n\nprint(\"Using IAM role arn: {}\".format(role))",
"_____no_output_____"
]
],
[
[
"### Install docker for `local` mode\n\nIn order to work in `local` mode, you need to have docker installed. When running from you local instance, please make sure that you have docker or docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script \n\nNote, you can only run a single local notebook at one time.",
"_____no_output_____"
]
],
[
[
"# only run from SageMaker notebook instance\nif local_mode:\n !/bin/bash ./common/setup.sh",
"_____no_output_____"
]
],
[
[
"## Setup the environment\n\nThe environment is defined in a Python file called `bin_packing_env.py` in the `./src` directory. It implements the `init()`, `step()` and `reset()` functions that describe how the environment behaves. This is consistent with Open AI Gym interfaces for defining an environment.\n\n- Init() - initialize the environment in a pre-defined state\n- Step() - take an action on the environment\n- reset()- restart the environment on a new episode\n- [if applicable] render() - get a rendered image of the environment in its current state",
"_____no_output_____"
]
],
[
[
"# uncomment the following line to see the environment\n# !pygmentize src/bin_packing_env.py",
"_____no_output_____"
]
],
[
[
"## Write the training code\n\nThe training code is written in the file `train_bin_packing.py` which is also uploaded in the `/src` directory. \nFirst import the environment files and the preset files, and then define the main() function. ",
"_____no_output_____"
]
],
[
[
"!pygmentize src/train_bin_packing.py",
"_____no_output_____"
]
],
[
[
"## Train the RL model using the Python SDK Script mode\n\nIf you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The [RLEstimator](https://sagemaker.readthedocs.io/en/stable/sagemaker.rl.html) is used for training RL jobs. \n\n1. Specify the source directory where the gym environment and training code is uploaded.\n2. Specify the entry point as the training code \n3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container. \n4. Define the training parameters such as the instance count, job name, S3 path for output and job name. \n5. Specify the hyperparameters for the RL agent algorithm. The RLCOACH_PRESET or the RLRAY_PRESET can be used to specify the RL agent algorithm you want to use. \n6. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.",
"_____no_output_____"
],
[
"### Define Metric\nA list of dictionaries that defines the metric(s) used to evaluate the training jobs. Each dictionary contains two keys: ‘Name’ for the name of the metric, and ‘Regex’ for the regular expression used to extract the metric from the logs.",
"_____no_output_____"
]
],
[
[
"metric_definitions = [{'Name': 'episode_reward_mean',\n 'Regex': 'episode_reward_mean: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'episode_reward_max',\n 'Regex': 'episode_reward_max: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'episode_len_mean',\n 'Regex': 'episode_len_mean: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'entropy',\n 'Regex': 'entropy: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'episode_reward_min',\n 'Regex': 'episode_reward_min: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'vf_loss',\n 'Regex': 'vf_loss: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'},\n {'Name': 'policy_loss',\n 'Regex': 'policy_loss: ([-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?)'}, \n]",
"_____no_output_____"
]
],
[
[
"### Define Estimator\nThis Estimator executes an RLEstimator script in a managed Reinforcement Learning (RL) execution environment within a SageMaker Training Job. The managed RL environment is an Amazon-built Docker container that executes functions defined in the supplied entry_point Python script.",
"_____no_output_____"
]
],
[
[
"train_entry_point = \"train_bin_packing.py\"\ntrain_job_max_duration_in_seconds = 60 * 10\n\nestimator = RLEstimator(entry_point=train_entry_point,\n source_dir=\"src\",\n dependencies=[\"common/sagemaker_rl\"],\n toolkit=RLToolkit.RAY,\n toolkit_version='0.6.5',\n framework=RLFramework.TENSORFLOW,\n role=role,\n instance_type=instance_type,\n instance_count=1,\n output_path=s3_output_path,\n base_job_name=job_name_prefix,\n metric_definitions=metric_definitions,\n max_run=train_job_max_duration_in_seconds,\n hyperparameters={}\n )",
"_____no_output_____"
],
[
"estimator.fit(wait=local_mode)\n\njob_name=estimator._current_job_name\nprint(\"Job name: {}\".format(job_name))",
"_____no_output_____"
]
],
[
[
"## Visualization\n\nRL training can take a long time. So while it's running there are a variety of ways we can track progress of the running training job. Some intermediate output gets saved to S3 during training, so we'll set up to capture that.",
"_____no_output_____"
]
],
[
[
"s3_url = \"s3://{}/{}\".format(s3_bucket,job_name)\n\nintermediate_folder_key = \"{}/output/intermediate/\".format(job_name)\nintermediate_url = \"s3://{}/{}training/\".format(s3_bucket, intermediate_folder_key)\n\nprint(\"S3 job path: {}\".format(s3_url))\nprint(\"Intermediate folder path: {}\".format(intermediate_url))",
"_____no_output_____"
]
],
[
[
"### Plot metrics for training job\nWe can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom sagemaker.analytics import TrainingJobAnalytics",
"_____no_output_____"
],
[
"if not local_mode:\n df = TrainingJobAnalytics(job_name, ['episode_reward_mean']).dataframe()\n df_min = TrainingJobAnalytics(job_name, ['episode_reward_min']).dataframe()\n df_max = TrainingJobAnalytics(job_name, ['episode_reward_max']).dataframe()\n df['rl_reward_mean'] = df['value']\n df['rl_reward_min'] = df_min['value']\n df['rl_reward_max'] = df_max['value']\n num_metrics = len(df)\n \n if num_metrics == 0:\n print(\"No algorithm metrics found in CloudWatch\")\n else:\n plt = df.plot(x='timestamp', y=['rl_reward_mean'], figsize=(18,6), fontsize=18, legend=True, style='-', color=['b','r','g'])\n plt.fill_between(df.timestamp, df.rl_reward_min, df.rl_reward_max, color='b', alpha=0.2)\n plt.set_ylabel('Mean reward per episode', fontsize=20)\n plt.set_xlabel('Training time (s)', fontsize=20)\n plt.legend(loc=4, prop={'size': 20})\nelse:\n print(\"Can't plot metrics in local mode.\")",
"_____no_output_____"
]
],
[
[
"#### Monitor training progress\nYou can repeatedly run the visualization cells to get the latest metrics as the training job proceeds.",
"_____no_output_____"
],
[
"## Training Results",
"_____no_output_____"
],
[
"You can let the training job run longer by specifying `train_max_run` in `RLEstimator`. The figure below illustrates the reward function of the RL policy vs. that of Best Fit, a classic heuristic. The sub-figures correspond to three categories of the item distribution, Perfectly Packable with Bounded Waste (BW), Perfectly Packable (PP) and Linear Waste (LW) respectively. The experiments are conducted on a p3.8x instance. For more details on the environment setup and how different parameters are set, please refer to [ORL: Reinforcement Learning Benchmarks for Online Stochastic Optimization\nProblems](https://arxiv.org/pdf/1911.10641.pdf).",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
ec68d4ff7410fc7f2b91217f1546eccd34644ffc | 58,070 | ipynb | Jupyter Notebook | examples/getting-started-movielens/03a-Training-with-TF.ipynb | lgardenhire/Merlin-1 | 7e2ddb15f684a747a84083e99f673315f054c02d | [
"Apache-2.0"
]
| null | null | null | examples/getting-started-movielens/03a-Training-with-TF.ipynb | lgardenhire/Merlin-1 | 7e2ddb15f684a747a84083e99f673315f054c02d | [
"Apache-2.0"
]
| null | null | null | examples/getting-started-movielens/03a-Training-with-TF.ipynb | lgardenhire/Merlin-1 | 7e2ddb15f684a747a84083e99f673315f054c02d | [
"Apache-2.0"
]
| null | null | null | 84.897661 | 34,680 | 0.817789 | [
[
[
"# Copyright 2021 NVIDIA Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================",
"_____no_output_____"
]
],
[
[
"<img src=\"http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png\" style=\"width: 90px; float: right;\">\n\n# Getting Started MovieLens: Training with TensorFlow\n\n## Overview\n\nWe observed that TensorFlow training pipelines can be slow as the dataloader is a bottleneck. The native dataloader in TensorFlow randomly sample each item from the dataset, which is very slow. The window dataloader in TensorFlow is not much faster. In our experiments, we are able to speed-up existing TensorFlow pipelines by 9x using a highly optimized dataloader.<br><br>\n\nApplying deep learning models to recommendation systems faces unique challenges in comparison to other domains, such as computer vision and natural language processing. The datasets and common model architectures have unique characteristics, which require custom solutions. Recommendation system datasets have terabytes in size with billion examples but each example is represented by only a few bytes. For example, the [Criteo CTR dataset](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/), the largest publicly available dataset, is 1.3TB with 4 billion examples. The model architectures have normally large embedding tables for the users and items, which do not fit on a single GPU. You can read more in our [blogpost](https://medium.com/nvidia-merlin/why-isnt-your-recommender-system-training-faster-on-gpu-and-what-can-you-do-about-it-6cb44a711ad4).\n\n### Learning objectives\n\nThis notebook explains, how to use the NVTabular dataloader to accelerate TensorFlow training.\n1. Use **NVTabular dataloader** with TensorFlow Keras model\n2. Leverage **multi-hot encoded input features**\n\n### MovieLens25M\n\nThe [MovieLens25M](https://grouplens.org/datasets/movielens/25m/) is a popular dataset for recommender systems and is used in academic publications. The dataset contains 25M movie ratings for 62,000 movies given by 162,000 users. Many projects use only the user/item/rating information of MovieLens, but the original dataset provides metadata for the movies, as well. For example, which genres a movie has. Although we may not improve state-of-the-art results with our neural network architecture, the purpose of this notebook is to explain how to integrate multi-hot categorical features into a neural network.",
"_____no_output_____"
],
[
"## NVTabular dataloader for TensorFlow\n\nWe’ve identified that the dataloader is one bottleneck in deep learning recommender systems when training pipelines with TensorFlow. The dataloader cannot prepare the next batch fast enough and therefore, the GPU is not fully utilized. \n\nWe developed a highly customized tabular dataloader for accelerating existing pipelines in TensorFlow. In our experiments, we see a speed-up by 9x of the same training workflow with NVTabular dataloader. NVTabular dataloader’s features are:\n- removing bottleneck of item-by-item dataloading\n- enabling larger than memory dataset by streaming from disk\n- reading data directly into GPU memory and remove CPU-GPU communication\n- preparing batch asynchronously in GPU to avoid CPU-GPU communication\n- supporting commonly used .parquet format\n- easy integration into existing TensorFlow pipelines by using similar API - works with tf.keras models\n\nMore information in our [blogpost](https://medium.com/nvidia-merlin/training-deep-learning-based-recommender-systems-9x-faster-with-tensorflow-cc5a2572ea49).",
"_____no_output_____"
],
[
"## Getting Started",
"_____no_output_____"
]
],
[
[
"# External dependencies\nimport os\nimport time\nimport gc\nimport glob\nimport os\n\nimport nvtabular as nvt",
"_____no_output_____"
]
],
[
[
"We define our base input directory, containing the data.",
"_____no_output_____"
]
],
[
[
"INPUT_DATA_DIR = os.environ.get('INPUT_DATA_DIR', os.path.expanduser(\"~/nvt-examples/movielens/data/\"))\nMODEL_BASE_DIR = os.environ.get('MODEL_BASE_DIR', os.path.join(INPUT_DATA_DIR, \"model\"))",
"_____no_output_____"
]
],
[
[
"### Defining Hyperparameters",
"_____no_output_____"
],
[
"First, we define the data schema and differentiate between single-hot and multi-hot categorical features. Note, that we do not have any numerical input features. ",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 1024*32 # Batch Size\nCATEGORICAL_COLUMNS = ['movieId', 'userId'] # Single-hot\nCATEGORICAL_MH_COLUMNS = ['genres'] # Multi-hot\nNUMERIC_COLUMNS = []\n\n# Output from ETL-with-NVTabular\nTRAIN_PATHS = sorted(glob.glob(os.path.join(INPUT_DATA_DIR, \"train\", \"*.parquet\")))\nVALID_PATHS = sorted(glob.glob(os.path.join(INPUT_DATA_DIR, \"valid\", \"*.parquet\")))",
"_____no_output_____"
]
],
[
[
"In the previous notebook, we used NVTabular for ETL and stored the workflow to disk. We can load the NVTabular workflow to extract important metadata for our training pipeline.",
"_____no_output_____"
]
],
[
[
"workflow = nvt.Workflow.load(os.path.join(INPUT_DATA_DIR, \"workflow\"))",
"_____no_output_____"
]
],
[
[
"The embedding table shows the cardinality of each categorical variable along with its associated embedding size. Each entry is of the form `(cardinality, embedding_size)`.",
"_____no_output_____"
]
],
[
[
"EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(workflow)\nEMBEDDING_TABLE_SHAPES",
"_____no_output_____"
]
],
[
[
"### Initializing NVTabular Dataloader for Tensorflow",
"_____no_output_____"
],
[
"We import TensorFlow and some NVTabular TF extensions, such as custom TensorFlow layers supporting multi-hot and the NVTabular TensorFlow data loader.",
"_____no_output_____"
]
],
[
[
"import os\nimport time\nimport tensorflow as tf\n\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\n\n# we can control how much memory to give tensorflow with this environment variable\n# IMPORTANT: make sure you do this before you initialize TF's runtime, otherwise\n# TF will have claimed all free GPU memory\nos.environ['TF_MEMORY_ALLOCATION'] = \"0.7\" # fraction of free memory\nfrom nvtabular.loader.tensorflow import KerasSequenceLoader, KerasSequenceValidater\nfrom nvtabular.framework_utils.tensorflow import layers\nfrom tensorflow.python.feature_column import feature_column_v2 as fc",
"_____no_output_____"
]
],
[
[
"First, we take a look on our data loader and how the data is represented as tensors. The NVTabular data loader are initialized as usually and we specify both single-hot and multi-hot categorical features as cat_names. The data loader will automatically recognize the single/multi-hot columns and represent them accordingly.",
"_____no_output_____"
]
],
[
[
"train_dataset_tf = KerasSequenceLoader(\n TRAIN_PATHS, # you could also use a glob pattern\n batch_size=BATCH_SIZE,\n label_names=['rating'],\n cat_names=CATEGORICAL_COLUMNS+CATEGORICAL_MH_COLUMNS,\n cont_names=NUMERIC_COLUMNS,\n engine='parquet',\n shuffle=True,\n buffer_size=0.06, # how many batches to load at once\n parts_per_chunk=1\n)\n\nvalid_dataset_tf = KerasSequenceLoader(\n VALID_PATHS, # you could also use a glob pattern\n batch_size=BATCH_SIZE,\n label_names=['rating'],\n cat_names = CATEGORICAL_COLUMNS+CATEGORICAL_MH_COLUMNS,\n cont_names=NUMERIC_COLUMNS,\n engine='parquet',\n shuffle=False,\n buffer_size=0.06,\n parts_per_chunk=1\n)",
"_____no_output_____"
]
],
[
[
"Let's generate a batch and take a look on the input features.<br><br>\nWe can see, that the single-hot categorical features (`userId` and `movieId`) have a shape of `(32768, 1)`, which is the batchsize (as usually).<br><br>\nFor the multi-hot categorical feature `genres`, we receive two Tensors `genres__values` and `genres__nnzs`.<br><br>\n`genres__values` are the actual data, containing the genre IDs. Note that the Tensor has more values than the batch_size. The reason is, that one datapoint in the batch can contain more than one genre (multi-hot).<br>\n`genres__nnzs` are a supporting Tensor, describing how many genres are associated with each datapoint in the batch.<br><br>\nFor example,\n- if the first value in `genres__nnzs` is `5`, then the first 5 values in `genres__values` are associated with the first datapoint in the batch (movieId/userId).<br>\n- if the second value in `genres__nnzs` is `2`, then the 6th and the 7th values in `genres__values` are associated with the second datapoint in the batch (continuing after the previous value stopped).<br> \n- if the third value in `genres_nnzs` is `1`, then the 8th value in `genres__values` are associated with the third datapoint in the batch. \n- and so on",
"_____no_output_____"
]
],
[
[
"batch = next(iter(train_dataset_tf))\nbatch[0]",
"_____no_output_____"
]
],
[
[
"We can see that the sum of `genres__nnzs` is equal to the shape of `genres__values`.",
"_____no_output_____"
]
],
[
[
"tf.reduce_sum(batch[0]['genres__nnzs'])",
"_____no_output_____"
]
],
[
[
"As each datapoint can have a different number of genres, it is more efficient to represent the genres as two flat tensors: One with the actual values (`genres__values`) and one with the length for each datapoint (`genres__nnzs`).",
"_____no_output_____"
]
],
[
[
"del batch\ngc.collect()",
"_____no_output_____"
]
],
[
[
"### Defining Neural Network Architecture",
"_____no_output_____"
],
[
"We will define a common neural network architecture for tabular data.\n* Single-hot categorical features are fed into an Embedding Layer\n* Each value of a multi-hot categorical features is fed into an Embedding Layer and the multiple Embedding outputs are combined via averaging\n* The output of the Embedding Layers are concatenated\n* The concatenated layers are fed through multiple feed-forward layers (Dense Layers with ReLU activations)\n* The final output is a single number with sigmoid activation function",
"_____no_output_____"
],
[
"First, we will define some dictonary/lists for our network architecture.",
"_____no_output_____"
]
],
[
[
"inputs = {} # tf.keras.Input placeholders for each feature to be used\nemb_layers = []# output of all embedding layers, which will be concatenated",
"_____no_output_____"
]
],
[
[
"We create `tf.keras.Input` tensors for all 4 input features.",
"_____no_output_____"
]
],
[
[
"for col in CATEGORICAL_COLUMNS:\n inputs[col] = tf.keras.Input(\n name=col,\n dtype=tf.int32,\n shape=(1,)\n )\n# Note that we need two input tensors for multi-hot categorical features\nfor col in CATEGORICAL_MH_COLUMNS:\n inputs[col+'__values'] = tf.keras.Input(\n name=f\"{col}__values\", \n dtype=tf.int64, \n shape=(1,)\n )\n inputs[col+'__nnzs'] = tf.keras.Input(\n name=f\"{col}__nnzs\", \n dtype=tf.int64, \n shape=(1,)\n )",
"_____no_output_____"
]
],
[
[
"Next, we initialize Embedding Layers with `tf.feature_column.embedding_column`.",
"_____no_output_____"
]
],
[
[
"for col in CATEGORICAL_COLUMNS+CATEGORICAL_MH_COLUMNS:\n emb_layers.append(\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_identity(\n col, \n EMBEDDING_TABLE_SHAPES[col][0] # Input dimension (vocab size)\n ), EMBEDDING_TABLE_SHAPES[col][1] # Embedding output dimension\n )\n )\nemb_layers",
"_____no_output_____"
]
],
[
[
"NVTabular implemented a custom TensorFlow layer `layers.DenseFeatures`, which takes as an input the different `tf.Keras.Input` and pre-initialized `tf.feature_column` and automatically concatenate them into a flat tensor. In the case of multi-hot categorical features, `DenseFeatures` organizes the inputs `__values` and `__nnzs` to define a `RaggedTensor` and combine them. `DenseFeatures` can handle numeric inputs, as well, but MovieLens does not provide numerical input features.",
"_____no_output_____"
]
],
[
[
"emb_layer = layers.DenseFeatures(emb_layers)\nx_emb_output = emb_layer(inputs)\nx_emb_output",
"_____no_output_____"
]
],
[
[
"We can see that the output shape of the concatenated layer is equal to the sum of the individual Embedding output dimensions (1040 = 16+512+512).\n",
"_____no_output_____"
]
],
[
[
"EMBEDDING_TABLE_SHAPES",
"_____no_output_____"
]
],
[
[
"We add multiple Dense Layers. Finally, we initialize the `tf.keras.Model` and add the optimizer.",
"_____no_output_____"
]
],
[
[
"x = tf.keras.layers.Dense(128, activation=\"relu\")(x_emb_output)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(128, activation=\"relu\")(x)\nx = tf.keras.layers.Dense(1, activation=\"sigmoid\", name=\"output\")(x)\n\nmodel = tf.keras.Model(inputs=inputs, outputs=x)\nmodel.compile('sgd', 'binary_crossentropy')",
"_____no_output_____"
],
[
"# You need to install the dependencies\ntf.keras.utils.plot_model(model)",
"_____no_output_____"
]
],
[
[
"### Training the deep learning model",
"_____no_output_____"
],
[
"We can train our model with `model.fit`. We need to use a Callback to add the validation dataloader.",
"_____no_output_____"
]
],
[
[
"validation_callback = KerasSequenceValidater(valid_dataset_tf)\n\nhistory = model.fit(train_dataset_tf, callbacks=[validation_callback], epochs=1)",
"611/611 [==============================] - 20s 27ms/step - loss: 0.6722\n{'val_loss': 0.6599317}\n"
],
[
"# print validation loss\nhistory.history",
"_____no_output_____"
],
[
"MODEL_NAME_TF = os.environ.get(\"MODEL_NAME_TF\", \"movielens_tf\")\nMODEL_PATH_TEMP_TF = os.path.join(MODEL_BASE_DIR, MODEL_NAME_TF, \"1/model.savedmodel\")\n\nmodel.save(MODEL_PATH_TEMP_TF)",
"INFO:tensorflow:Assets written to: /root/nvt-examples/movielens/data/model/movielens_tf/1/model.savedmodel/assets\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec68e133f51f0474cda62c4ffaa33b8edec7388b | 163,332 | ipynb | Jupyter Notebook | P3-capstone-project/Capstone-Notebook.ipynb | suryasanchez/machine-learning-engineer-nanodegree | 8bb4c7b1258dd1aad95011d9fcb25546ed9d9324 | [
"MIT"
]
| 7 | 2020-11-29T02:33:23.000Z | 2021-06-28T04:45:31.000Z | P3-capstone-project/Capstone-Notebook.ipynb | suryasanchez/machine-learning-engineer-nanodegree | 8bb4c7b1258dd1aad95011d9fcb25546ed9d9324 | [
"MIT"
]
| null | null | null | P3-capstone-project/Capstone-Notebook.ipynb | suryasanchez/machine-learning-engineer-nanodegree | 8bb4c7b1258dd1aad95011d9fcb25546ed9d9324 | [
"MIT"
]
| 3 | 2020-09-24T18:26:36.000Z | 2021-06-30T10:55:26.000Z | 49.963903 | 15,440 | 0.665601 | [
[
[
"# Send custom emails to leads with Machine Learning\n### Capstone project - Machine Learning Engineer Nanodegree\n\n# 1. Definition\n\n## 1.1 Project Overview\n\nI am the founder of a digital agency called DeepIdea Lab. The services we offer are website creation, digital marketing, and graphic design. We have a partner who is sending us leads. The lead details include a description of the project each lead is looking for. This data is automatically pushed into our CRM. The next step is to send an email to the lead using one of our templates depending on the project’s description. However, we would like to automate this part. This is the topic of this capstone project: using machine learning to send a custom email to the lead based on the project’s description.\n\n## 1.2. Problem Statement\nSending a manually composed email to every lead is time-consuming. We already have a few email templates available to be sent. However, so far, someone needs to read the project description in order to choose the right template. This is a typical natural language processing problem.\n\n\n\n## 1.3 Metrics\nBased on the dataset we have, a decent evaluation metrics would be the accuracy of the trained model on the test data. We will get new project descriptions from new leads so the accuracy on test data is a good metric.\n\n# 2. Analysis\n\n## 2.1 Gathering data\n\nHere are the first steps to gather the data and create a dataset.\n\n* CRM connection settings\n* Check the format of the data from the API and explore the content\n* Create the dataset",
"_____no_output_____"
]
],
[
[
"# import the required modules\nimport requests, json\nimport re\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### CRM connection settings\n\nThis is the configuration to connect to the API.",
"_____no_output_____"
]
],
[
[
"# enter your token, domain and view id in config.json.example and rename to config.json\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\napi_token = config['CONFIG']['TOKEN']\ndomain = config['CONFIG']['FRESHSALES_DOMAIN']\nview_id = config['CONFIG']['VIEW_ID']",
"_____no_output_____"
],
[
"# define a function to connect to the Freshsales CRM API\ndef api_call(api_token = api_token, domain = domain, view_id = view_id, page_id_index = 1):\n api_url_base = 'https://{}.freshsales.io/api/'.format(domain)\n api_lead_request = 'leads/view/{}?page={}'.format(view_id, page_id_index)\n api_url = api_url_base + api_lead_request\n headers = {'Content-Type': 'application/json',\n 'Authorization': 'Token token={0}'.format(api_token)}\n response = requests.get(api_url, headers=headers)\n data = response.json()\n \n return data",
"_____no_output_____"
]
],
[
[
"### Check the format of the data from the API and explore the content\n\nBelow we navigate through the result of the CRM's API to see the format we get.",
"_____no_output_____"
]
],
[
[
"# explore the data\ndata = api_call()\n\n# data = json.dumps(data, indent=2)\n\nprint (data['leads'][22]['recent_note'])",
"Budget: 6000\n\nTitle: Création d'entreprise\n\nProjet de création site web e-commerce pour commercialiser.\nJe souhaiterais une étude tarifaire, mais si possible une maquette et avoir des informations dans les compétences suivantes:\n - technique (catalogue en ligne, gestion du panier client, gestion des comptes clients, paiements en ligne…),\n- partie graphique (logo, design, éléments graphiques…),\n- aspects juridiques (mentions obligatoires, protection des données, conditions générales de vente et d’utilisation…,\n- maintenance du site et connaître la réactivité en cas de problème.\n\nTime limit: 6month\n\n\n"
],
[
"# function to loop over all the leads and extract the notes\ndef get_leads_notes():\n\n leads_id = []\n leads_note = []\n\n # for every page of leads\n for i in range(api_call()['meta']['total_pages']):\n page_index = i + 1\n data = api_call(page_id_index = page_index)\n\n # for every lead on the page\n for i in range(len(data['leads'])):\n\n lead_id = data['leads'][i]['id']\n lead_note = data['leads'][i]['recent_note']\n\n if lead_note:\n # drop urls\n pattern = r'https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=] \\\n {1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)'\n match = re.match(pattern, lead_note)\n if match:\n lead_note = None\n\n if lead_note:\n leads_id.append(lead_id)\n leads_note.append(lead_note)\n \n return leads_id, leads_note",
"_____no_output_____"
]
],
[
[
"### Create the dataset\n\nWe create a DataFrame with all the project descriptions from the leads.",
"_____no_output_____"
]
],
[
[
"leads_id, leads_note = get_leads_notes()\n\nleads_data = pd.DataFrame(data={'id': leads_id, 'note': leads_note})",
"_____no_output_____"
],
[
"leads_data.head()",
"_____no_output_____"
],
[
"# see a sample of the data\ndata_sample = leads_data.iloc[50,1]\nprint(data_sample)",
"Budget: 50\n\nTitle: Création site internet photographe\n\nJe viens de crée un site internet avec wordpress et le thème Enfold. Je souhaiterai crée des galeries photos avec la possibilité de les commander en ligne directement.\nPar exemple : En ouvrant un album, nous voyons toutes les photos et sur chaque photos, nous pouvons cliquer sur panier et choisir le format désiré.\n\nTime limit: as_soon_as_possible\n\n\n"
]
],
[
[
"## 2.2 Data pre-processing\n\nThe raw data is not ready to be sent to an estimator yet. We need to process the data as follows:\n\n* Cleaning of the data\n* Export to CSV for labeling\n* Import the labeled CSV\n* Reduce the number of classes\n* Detect the language of the text\n* Keep only French text\n* Tokenization\n* Bag-of-Words features\n* Save the processed training dataset locally\n\n\n### Cleaning the data",
"_____no_output_____"
]
],
[
[
"def cleaning_data(data, symbols = False):\n data = re.sub(r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)', '', data) # Remove email\n data = data.replace('\\r', '').replace('\\n', ' ').replace(' ', ' ')\n data = re.sub(r'Budget: .* Title: ', '', data) # Remove \"Budget\" \n data = re.sub(r' Time limit: .*', '', data) # Remove \"Time limit\"\n data = re.sub(r' +', ' ', data) # Remove extra space\n \n if symbols == False:\n data = re.sub(r\"[^a-zA-ZÀ-ÿ]\", \" \", data.lower()) # Remove symbols and convert to lower case\n\n return data",
"_____no_output_____"
],
[
"# explore a sample of the data after cleaning\n# but keep the symbols (easier to read the text for labeling)\n\nprint(cleaning_data(data_sample, symbols = True))",
"Création site internet photographe Je viens de crée un site internet avec wordpress et le thème Enfold. Je souhaiterai crée des galeries photos avec la possibilité de les commander en ligne directement. Par exemple : En ouvrant un album, nous voyons toutes les photos et sur chaque photos, nous pouvons cliquer sur panier et choisir le format désiré.\n"
],
[
"r, c = leads_data.shape\nprint(\"There are {} rows and {} columns before cleaning the dataset.\".format(r, c))\n\n# drop if row is missing data\nclean_leads_data = leads_data.dropna()\nr, c = clean_leads_data.shape\nprint(\"There are {} rows and {} columns after dropping missing value in the dataset.\".format(r, c))\n\ndata = clean_leads_data\ndata.head()",
"There are 86 rows and 2 columns before cleaning the dataset.\nThere are 86 rows and 2 columns after dropping missing value in the dataset.\n"
],
[
"data_for_labeling = clean_leads_data\n\nfor index, row in data_for_labeling.iterrows():\n data_for_labeling.iloc[index, 1] = cleaning_data(row['note'], symbols = True)\n \ndata_for_labeling.head()",
"_____no_output_____"
]
],
[
[
"### Export to CSV for labeling\n\nWe wil export the dataset to a CSV file so we can manually label each entry with the correct category. ",
"_____no_output_____"
]
],
[
[
"import os\n\ndata_dir = './data' # The folder we will use for storing data\nif not os.path.exists(data_dir): # Make sure that the folder exists\n os.makedirs(data_dir)\n\ndata_for_labeling.to_csv(os.path.join(data_dir, 'leads.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"After exporting the CSV, the database has been manually labeled with a spreadsheet and has been converted again to a new CSV file.\n\n### Import the labeled CSV",
"_____no_output_____"
]
],
[
[
"# add the name of columns\ncolumn_names = []\nfor column in data_for_labeling.columns:\n column_names.append(column)\ncolumn_names.append('category')\n\nlabeled_leads = pd.read_csv(os.path.join(data_dir, 'labeled_leads.csv'), header=None, names=column_names)\n\nlabeled_leads.head(5)",
"_____no_output_____"
]
],
[
[
"It is important to see if the number of values in each category is more or less equal. Otherwise it will create a bias during the training.",
"_____no_output_____"
]
],
[
[
"%config InlineBackend.figure_format = 'retina'\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# see the distribution of the data\nlabeled_leads['category'].value_counts().plot(kind='bar', rot=0);",
"_____no_output_____"
]
],
[
[
"As we can see the category 'website' is much bigger than all the others.\nIn order to create better prediction without overfitting we need to make the entries in each category more even.\n\nLet's create new categories as follows:\n* website\n* maintenance + seo\n* migration\n* custom + application\n\nNote that we should also check the language of the project description. We will do that later.\n\n### Reduce the number of classes\n\nWe reduce the number of categories to 4 (instead of 6 appearing in the labeled CSV).",
"_____no_output_____"
]
],
[
[
"category_bundle = {'website' : 0,\n 'maintenance' : 1,\n 'seo' : 1,\n 'migration' : 2,\n 'custom' : 3,\n 'application' : 3}",
"_____no_output_____"
],
[
"for index, row in labeled_leads.iterrows():\n labeled_leads.iloc[index, 2] = category_bundle[row['category']]",
"_____no_output_____"
],
[
"print(labeled_leads['category'].value_counts())\nlabeled_leads['category'].value_counts().plot(kind='bar', rot=0);",
"0 46\n2 15\n3 9\n1 8\nName: category, dtype: int64\n"
]
],
[
[
"The distribution of the data is a bit better now since we bundle the categories together.\nLet's see below an extract of our new dataset with the new categories.",
"_____no_output_____"
]
],
[
[
"labeled_leads.head(10)",
"_____no_output_____"
]
],
[
[
"### Detect the language of the text\n\nNext step of the data cleaning is to verify if the data is only in one language and if not we only keep the main language for training.\n\nTo do so we use the module langdetect.",
"_____no_output_____"
]
],
[
[
"!pip install langdetect",
"Requirement already satisfied: langdetect in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (1.0.8)\nRequirement already satisfied: six in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from langdetect) (1.11.0)\n\u001b[33mYou are using pip version 10.0.1, however version 20.0.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
],
[
"from langdetect import detect\n\nlang_iso = {\"ar\" : \"arabic\",\n \"da\" : \"danish\",\n \"nl\" : \"dutch\",\n \"en\" : \"english\",\n \"fi\" : \"finnish\",\n \"fr\" : \"french\",\n \"de\" : \"german\",\n \"hu\" : \"hungarian\",\n \"it\" : \"italian\",\n \"no\" : \"norwegian\",\n \"pt\" : \"portuguese\",\n \"ro\" : \"romanian\",\n \"ru\" : \"russian\",\n \"es\" : \"spanish\",\n \"sv\" : \"swedish\"}",
"_____no_output_____"
],
[
"labeled_leads['lang'] = np.nan\nfor index in range(labeled_leads.shape[0]):\n labeled_leads.loc[index, 'lang'] = lang_iso[detect(labeled_leads.loc[index, 'note'])]\n\nlabeled_leads.head()",
"_____no_output_____"
],
[
"print(labeled_leads['lang'].value_counts())\nlabeled_leads['lang'].value_counts().plot(kind='bar', rot=0);",
"french 70\nenglish 4\nitalian 2\ngerman 2\nName: lang, dtype: int64\n"
]
],
[
[
"We see that the main language is French. To do our training we will drop the other languages.\n\n### Keep only French text",
"_____no_output_____"
]
],
[
[
"# we keep only french text\nindexNames = labeled_leads[labeled_leads['lang'] != 'french'].index\nlabeled_leads.drop(indexNames , inplace=True)\n\nprint(labeled_leads['lang'].value_counts())\n\n#labeled_leads.head()",
"french 70\nName: lang, dtype: int64\n"
],
[
"# drop columns we don't need anymore\nlabeled_leads_out = labeled_leads[['category', 'note']].copy()\nlabeled_leads_out.head()",
"_____no_output_____"
]
],
[
[
"Now that we have a clean dataset with labeled classes, we can process the text.\n\n### Tokenization\n\nIt's pretty difficult for a ML algorithm to process raw texts.\n\nSo the method we are using is creating tokens from words in the text.",
"_____no_output_____"
]
],
[
[
"import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\n\ndef lead_note_to_words(note):\n nltk.download(\"stopwords\", quiet=True)\n stemmer = PorterStemmer()\n \n data = cleaning_data(note) # Clean the text\n lang = lang_iso[detect(data)] # Detect the language of the text\n words = data.split() # Split string into words\n\n words = [w for w in words if w not in stopwords.words(lang)] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n\n return words",
"_____no_output_____"
],
[
"# remove the name of the columns\nlabeled_leads_out.to_csv(os.path.join(data_dir, 'labeled_leads_out.csv'), header=False, index=False)\ndata = pd.read_csv(os.path.join(data_dir, 'labeled_leads_out.csv'))\ndata_values = data.values",
"_____no_output_____"
],
[
"# labels in the 1st column\ndata_y = data_values[:, 0]\n\n# data in the 2nd column\ndata_X = [lead_note_to_words(note) for note in data_values[:, 1]]\n\nprint('See below an example of the tokens\\n')\nprint(data_X[0])",
"See below an example of the tokens\n\n['migrat', 'nouveau', 'site', 'jimdo', 'place', 'ancien', 'récupérat', 'adress', 'mail', 'bonjour', 'indépend', 'immobili', 'depui', 'an', 'depui', 'quelqu', 'moi', 'site', 'fonctionn', 'plu', 'copin', 'a', 'fait', 'gabarit', 'nouveau', 'site', 'jimdo', 'a', 'dû', 'partir', 'étranger', 'sort', 'fair', 'switcher', 'tout', 'cela', 'mainten', 'essayé', 'aussi', 'comprend', 'rien', 'dn', 'domain', 'registr', 'hébergeur', 'chinoi', 'sai', 'just', 'eurodn', 'code', 'jimdo', 'etc']\n"
],
[
"for i in range(len(data_X)):\n data_X[i] = ' '.join([str(elem) for elem in data_X[i]]) ",
"_____no_output_____"
],
[
"print('See below the same example after concatenate the tokens together again:\\n')\nprint(data_X[0])",
"See below the same example after concatenate the tokens together again:\n\nmigrat nouveau site jimdo place ancien récupérat adress mail bonjour indépend immobili depui an depui quelqu moi site fonctionn plu copin a fait gabarit nouveau site jimdo a dû partir étranger sort fair switcher tout cela mainten essayé aussi comprend rien dn domain registr hébergeur chinoi sai just eurodn code jimdo etc\n"
]
],
[
[
"### Bag-of-Words features\n\nWe will now use the Bag-of-Words method. Learn more about that on [Wikipedia](https://en.wikipedia.org/wiki/Bag-of-words_model).",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import CountVectorizer\n\ndef extract_BoW_features(words_train, words_test, vocabulary_size=40):\n \"\"\"Extract Bag-of-Words for a given set of documents, already preprocessed into words.\"\"\"\n \n # Fit a vectorizer to training documents and use it to transform them\n vectorizer = CountVectorizer(max_features=vocabulary_size)\n features_train = vectorizer.fit_transform(words_train).toarray()\n\n # Apply the same vectorizer to transform the test documents (ignore unknown words)\n features_test = vectorizer.transform(words_test).toarray()\n \n vocabulary = vectorizer.vocabulary_\n \n # Return both the extracted features as well as the vocabulary\n return features_train, features_test, vocabulary",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\ntrain_X, test_X, train_y, test_y = train_test_split(data_X, data_y, test_size=0.15, random_state=42)",
"_____no_output_____"
],
[
"# Extract Bag-of-Words features for both training and test datasets\ntrain_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)",
"_____no_output_____"
],
[
"print(vocabulary)",
"{'création': 8, 'site': 34, 'web': 37, 'actuel': 0, 'bonjour': 5, 'créer': 9, 'chez': 6, 'plu': 29, 'import': 19, 'lign': 24, 'besoin': 4, 'être': 39, 'sou': 35, 'gestion': 16, 'tout': 36, 'migrat': 26, 'internet': 21, 'afin': 1, 'fair': 14, 'lien': 23, 'serveur': 32, 'infomaniak': 20, 'wordpress': 38, 'page': 28, 'pouvoir': 30, 'merci': 25, 'mise': 27, 'jour': 22, 'base': 3, 'doit': 10, 'fichier': 15, 'si': 33, 'hébergement': 18, 'domain': 11, 'recherch': 31, 'donné': 12, 'avoir': 2, 'contenu': 7, 'email': 13, 'http': 17}\n"
],
[
"print('See below an example of the encoded bag-of-Words\\n')\n\nprint(train_X[0][0:300])",
"See below an example of the encoded bag-of-Words\n\n[2 0 0 0 1 1 2 0 2 1 0 0 0 0 0 0 1 0 0 1 0 0 0 0 2 0 0 0 0 1 0 0 0 0 2 1 1\n 2 0 1]\n"
],
[
"# Then we split the training set further into training and validation sets.\ntrain_X, val_X, train_y, val_y = train_test_split(train_X, train_y, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"print('The training dataset has {} notes with {} labels.'.format(len(train_X), len(train_y)))\nprint('The validation dataset has {} notes with {} labels.'.format(len(val_X), len(val_y)))\nprint('The test dataset has {} notes with {} labels.'.format(len(test_X), len(test_y)))",
"The training dataset has 46 notes with 46 labels.\nThe validation dataset has 12 notes with 12 labels.\nThe test dataset has 11 notes with 11 labels.\n"
]
],
[
[
"### Save the processed training dataset locally\n\nWe export the CSV files ready for training locally.",
"_____no_output_____"
]
],
[
[
"pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)\n\npd.concat([pd.DataFrame(val_y), pd.DataFrame(val_X)], axis=1).to_csv(os.path.join(data_dir, 'val.csv'),\n header=False, index=False)\npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X)], axis=1).to_csv(os.path.join(data_dir, 'train.csv'),\n header=False, index=False)",
"_____no_output_____"
]
],
[
[
"## 2.3 Training and testing the model\n\nWe can now focus on training the model and check its accuracy.\n\n* Uploading the training data\n* XGBoost model\n* Testing the model",
"_____no_output_____"
],
[
"### Uploading the training data\n\nWe need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.",
"_____no_output_____"
]
],
[
[
"import sagemaker\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\n\nsession = sagemaker.Session()\n\nbucket = session.default_bucket()\nprefix = 'lead_nlp_automation'\n\nrole = sagemaker.get_execution_role()",
"_____no_output_____"
],
[
"test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)\nval_location = session.upload_data(os.path.join(data_dir, 'val.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"### XGBoost model\n\nIt's the XGBoost model that has been chosen. XGBoost outperforms several other well-known implementations of gradient tree boosting (Pafka, 2015).\n\nLet's create the xgb estimator first.",
"_____no_output_____"
]
],
[
[
"# As stated above, we use this utility method to construct the image name for the training container.\ncontainer = get_image_uri(session.boto_region_name, 'xgboost', '0.90-1')\n\n# Now that we know which container to use, we can construct the estimator object.\nxgb = sagemaker.estimator.Estimator(container, # The image name of the training container\n role, # The IAM role to use (our current role in this case)\n train_instance_count=1, # The number of instances to use for training\n train_instance_type='ml.m4.xlarge', # The type of instance to use for training\n output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),\n # Where to save the output (the model artifacts)\n sagemaker_session=session) # The current SageMaker session",
"_____no_output_____"
]
],
[
[
"Here we set the hyperparameters for training.",
"_____no_output_____"
]
],
[
[
"xgb.set_hyperparameters(max_depth=10,\n eta=0.42,\n gamma=0,\n min_child_weight=2,\n subsample=0.87,\n objective='multi:softmax',\n early_stopping_rounds=10,\n num_round=200,\n num_class=4)",
"_____no_output_____"
],
[
"# This is a wrapper around the location of our train and validation data,\n# to make sure that SageMakerknows our data is in csv format.\ns3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')\ns3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')",
"_____no_output_____"
],
[
"xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})",
"2020-03-29 03:10:59 Starting - Starting the training job...\n2020-03-29 03:11:00 Starting - Launching requested ML instances...\n2020-03-29 03:11:58 Starting - Preparing the instances for training......\n2020-03-29 03:12:52 Downloading - Downloading input data...\n2020-03-29 03:13:09 Training - Downloading the training image..\u001b[34mINFO:sagemaker-containers:Imported framework sagemaker_xgboost_container.training\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter objective value multi:softmax to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34mINFO:sagemaker_xgboost_container.training:Running XGBoost Sagemaker in algorithm mode\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[03:13:43] 46x40 matrix with 1840 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[03:13:43] 12x40 matrix with 480 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Single node training.\u001b[0m\n\u001b[34mINFO:root:Train matrix has 46 rows\u001b[0m\n\u001b[34mINFO:root:Validation matrix has 12 rows\u001b[0m\n\u001b[34m[0]#011train-merror:0.282609#011validation-merror:0.416667\u001b[0m\n\u001b[34m[1]#011train-merror:0.304348#011validation-merror:0.416667\u001b[0m\n\u001b[34m[2]#011train-merror:0.23913#011validation-merror:0.416667\u001b[0m\n\u001b[34m[3]#011train-merror:0.173913#011validation-merror:0.416667\u001b[0m\n\u001b[34m[4]#011train-merror:0.195652#011validation-merror:0.333333\u001b[0m\n\u001b[34m[5]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[6]#011train-merror:0.152174#011validation-merror:0.333333\u001b[0m\n\u001b[34m[7]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[8]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[9]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[10]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[11]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[12]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\u001b[34m[13]#011train-merror:0.130435#011validation-merror:0.416667\u001b[0m\n\u001b[34m[14]#011train-merror:0.130435#011validation-merror:0.333333\u001b[0m\n\n2020-03-29 03:13:53 Uploading - Uploading generated training model\n2020-03-29 03:13:53 Completed - Training job completed\nTraining seconds: 61\nBillable seconds: 61\n"
]
],
[
[
"### Testing the model\n\nNow that we have fitted our model to the training data, using the validation data to avoid overfitting, we can test our model. To do this we will make use of SageMaker's Batch Transform functionality. To start with, we need to build a transformer object from our fit model.",
"_____no_output_____"
]
],
[
[
"xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')",
"_____no_output_____"
],
[
"xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')",
"_____no_output_____"
],
[
"xgb_transformer.wait()",
".....................\n\u001b[34m[2020-03-29 03:17:32 +0000] [15] [INFO] Starting gunicorn 19.10.0\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [15] [INFO] Listening at: unix:/tmp/gunicorn.sock (15)\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [15] [INFO] Using worker: gevent\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [22] [INFO] Booting worker with pid: 22\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [23] [INFO] Booting worker with pid: 23\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [27] [INFO] Booting worker with pid: 27\u001b[0m\n\u001b[34m[2020-03-29 03:17:32 +0000] [31] [INFO] Booting worker with pid: 31\u001b[0m\n\u001b[34m[2020-03-29:03:17:37:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:03:17:37 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:03:17:37 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:03:17:37:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:03:17:37 +0000] \"POST /invocations HTTP/1.1\" 200 44 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[32m2020-03-29T03:17:37.751:[sagemaker logs]: MaxConcurrentTransforms=4, MaxPayloadInMB=6, BatchStrategy=MULTI_RECORD\u001b[0m\n"
],
[
"!aws s3 cp --recursive $xgb_transformer.output_path $data_dir",
"Completed 44 Bytes/44 Bytes (819 Bytes/s) with 1 file(s) remaining\rdownload: s3://sagemaker-eu-west-1-384935747368/sagemaker-xgboost-2020-03-29-03-14-11-704/test.csv.out to data/test.csv.out\r\n"
],
[
"pred_y = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)",
"_____no_output_____"
]
],
[
[
"Let's check below the accuracy of the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\nground = test_y.astype(int)\nresults = pred_y.values.flatten().astype(int)\n\naccuracy_score(ground, results)",
"_____no_output_____"
],
[
"## print out the array of predicted and true labels\nprint('\\nPredicted class labels: ')\nprint(results)\nprint('\\nTrue class labels: ')\nprint(ground)",
"\nPredicted class labels: \n[1 0 3 2 2 0 0 0 3 0 0]\n\nTrue class labels: \n[0 0 3 0 1 1 0 2 0 0 0]\n"
]
],
[
[
"As we have a very small dataset it is interesting to check if the data of the validation set is well mixed.",
"_____no_output_____"
]
],
[
[
"# check if the validation dataset is mixed\nprint('\\nValidation class labels: ')\nprint(val_y.astype(int))",
"\nValidation class labels: \n[3 0 0 3 0 0 0 0 0 2 1 0]\n"
]
],
[
[
"## 2.4 Tuning the Hyperparameters\n\n* Hyperparameter Tuner\n* Testing the model\n\n### Hyperparameter Tuner\n\nLet's try to launch a hyperparameters' tuning job to see if it can improve our model's performance.",
"_____no_output_____"
]
],
[
[
"from sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner\n\nxgb_hyperparameter_tuner = HyperparameterTuner(estimator = xgb, # The estimator object to use as the basis\n objective_metric_name = 'validation:merror', # The metric to compare\n objective_type = 'Minimize', # Minimize or maximize the metric\n max_jobs = 20, # The total number of models to train\n max_parallel_jobs = 3, # The number of models to train in parallel\n hyperparameter_ranges = {\n 'max_depth': IntegerParameter(3, 13),\n 'eta' : ContinuousParameter(0.05, 0.6),\n 'min_child_weight': IntegerParameter(2, 8),\n 'subsample': ContinuousParameter(0.5, 0.9),\n 'gamma': ContinuousParameter(0, 10),\n })",
"_____no_output_____"
],
[
"xgb_hyperparameter_tuner.fit({'train': s3_input_train, 'validation': s3_input_validation})\nxgb_hyperparameter_tuner.wait()",
"...................................................................................................................................................................................................................................................................................!\n"
],
[
"xgb_hyperparameter_tuner.best_training_job()",
"_____no_output_____"
],
[
"xgb_attached = sagemaker.estimator.Estimator.attach(xgb_hyperparameter_tuner.best_training_job())",
"2020-03-29 01:33:16 Starting - Preparing the instances for training\n2020-03-29 01:33:16 Downloading - Downloading input data\n2020-03-29 01:33:16 Training - Training image download completed. Training in progress.\n2020-03-29 01:33:16 Uploading - Uploading generated training model\n2020-03-29 01:33:16 Completed - Training job completed\u001b[34mINFO:sagemaker-containers:Imported framework sagemaker_xgboost_container.training\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter _tuning_objective_metric value validation:merror to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter objective value multi:softmax to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34mINFO:sagemaker_xgboost_container.training:Running XGBoost Sagemaker in algorithm mode\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[01:33:06] 46x40 matrix with 1840 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[01:33:06] 12x40 matrix with 480 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Single node training.\u001b[0m\n\u001b[34mINFO:root:Setting up HPO optimized metric to be : merror\u001b[0m\n\u001b[34mINFO:root:Train matrix has 46 rows\u001b[0m\n\u001b[34mINFO:root:Validation matrix has 12 rows\u001b[0m\n\u001b[34m[0]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[1]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[2]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[3]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[4]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[5]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[6]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[7]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[8]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[9]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\n\u001b[34m[10]#011train-merror:0.478261#011validation-merror:0.333333\u001b[0m\nTraining seconds: 62\nBillable seconds: 62\n"
],
[
"xgb_transformer = xgb_attached.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')",
"_____no_output_____"
],
[
"xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')\nxgb_transformer.wait()",
"......................\u001b[34m[2020-03-29 01:39:53 +0000] [14] [INFO] Starting gunicorn 19.10.0\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [14] [INFO] Listening at: unix:/tmp/gunicorn.sock (14)\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [14] [INFO] Using worker: gevent\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [21] [INFO] Booting worker with pid: 21\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [22] [INFO] Booting worker with pid: 22\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [26] [INFO] Booting worker with pid: 26\u001b[0m\n\u001b[34m[2020-03-29 01:39:53 +0000] [27] [INFO] Booting worker with pid: 27\u001b[0m\n\u001b[34m[2020-03-29:01:40:15:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:40:15 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:40:15:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:40:15 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:01:40:15:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:40:15 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:01:40:16:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:40:16 +0000] \"POST /invocations HTTP/1.1\" 200 44 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:40:15:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:40:15 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:40:16:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:40:16 +0000] \"POST /invocations HTTP/1.1\" 200 44 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[32m2020-03-29T01:40:15.941:[sagemaker logs]: MaxConcurrentTransforms=4, MaxPayloadInMB=6, BatchStrategy=MULTI_RECORD\u001b[0m\n\n"
],
[
"!aws s3 cp --recursive $xgb_transformer.output_path $data_dir",
"Completed 44 Bytes/44 Bytes (799 Bytes/s) with 1 file(s) remaining\rdownload: s3://sagemaker-eu-west-1-384935747368/sagemaker-xgboost-200329-0113-018-b6165-2020-03-29-01-36-33-248/test.csv.out to data/test.csv.out\r\n"
],
[
"pred_y = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)",
"_____no_output_____"
]
],
[
[
"### Testing the model\n\nLet's discover now the accuracy of the best trained model below.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\nground = test_y.astype(int)\nresults = pred_y.values.flatten().astype(int)\n\naccuracy_score(ground, results)",
"_____no_output_____"
],
[
"## print out the array of predicted and true labels\nprint('\\nPredicted class labels: ')\nprint(results)\nprint('\\nTrue class labels: ')\nprint(ground)",
"\nPredicted class labels: \n[0 0 0 0 0 0 0 0 0 0 0]\n\nTrue class labels: \n[0 0 3 0 1 1 0 2 0 0 0]\n"
],
[
"# check if the validation dataset is mixed\nprint('\\nValidation class labels: ')\nprint(val_y.astype(int))",
"\nValidation class labels: \n[3 0 0 3 0 0 0 0 0 2 1 0]\n"
]
],
[
[
"## 2.5 Binary classification\n\n* Two categories\n* Training of the model\n* Testing the model\n* Tuning the Hyperparameters\n\n### Two categories\n\nThe accuracy of the last model is 63%. Therefore it could be improved. The reason is probably because our dataset is very small. A way to improve the accuracy is to reduce the number of categories. Here we decide to reduce to 2 categories.",
"_____no_output_____"
]
],
[
[
"# convert class 1, 2, 3 to 1\n\nfor i in range(labeled_leads_out.shape[0]):\n if labeled_leads_out.iloc[i,0] != 0:\n labeled_leads_out.iloc[i,0] = 1",
"_____no_output_____"
],
[
"print(labeled_leads_out['category'].value_counts())\nlabeled_leads_out['category'].value_counts().plot(kind='bar', rot=0);",
"0 40\n1 30\nName: category, dtype: int64\n"
]
],
[
[
"You can see above that now the number in each category is almost equal.\n\nThe steps below are the same as before but in a binary way.",
"_____no_output_____"
]
],
[
[
"# remove the name of the columns\nlabeled_leads_out.to_csv(os.path.join(data_dir, 'labeled_leads_out.csv'), header=False, index=False)\ndata = pd.read_csv(os.path.join(data_dir, 'labeled_leads_out.csv'))\ndata_values = data.values",
"_____no_output_____"
],
[
"# labels in the 1st column\ndata_y = data_values[:, 0]\n\n# data in the 2nd column\ndata_X = [lead_note_to_words(note) for note in data_values[:, 1]]\n\nfor i in range(len(data_X)):\n data_X[i] = ' '.join([str(elem) for elem in data_X[i]]) ",
"_____no_output_____"
],
[
"train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"# Extract Bag of Words features for both training and test datasets\ntrain_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)",
"_____no_output_____"
],
[
"# Then we split the training set further into training and validation sets.\ntrain_X, val_X, train_y, val_y = train_test_split(train_X, train_y, test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"print('The training dataset has {} notes with {} labels.'.format(len(train_X), len(train_y)))\nprint('The validation dataset has {} notes with {} labels.'.format(len(val_X), len(val_y)))\nprint('The test dataset has {} notes with {} labels.'.format(len(test_X), len(test_y)))",
"The training dataset has 44 notes with 44 labels.\nThe validation dataset has 11 notes with 11 labels.\nThe test dataset has 14 notes with 14 labels.\n"
],
[
"pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)\n\npd.concat([pd.DataFrame(val_y), pd.DataFrame(val_X)], axis=1).to_csv(os.path.join(data_dir, 'val.csv'),\n header=False, index=False)\npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X)], axis=1).to_csv(os.path.join(data_dir, 'train.csv'),\n header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Training of the model\n\nWe start again our training this time with the new binary labels.",
"_____no_output_____"
]
],
[
[
"test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)\nval_location = session.upload_data(os.path.join(data_dir, 'val.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)",
"_____no_output_____"
],
[
"# As stated above, we use this utility method to construct the image name for the training container.\ncontainer = get_image_uri(session.boto_region_name, 'xgboost', '0.90-1')\n\n# Now that we know which container to use, we can construct the estimator object.\nxgb = sagemaker.estimator.Estimator(container, # The image name of the training container\n role, # The IAM role to use (our current role in this case)\n train_instance_count=1, # The number of instances to use for training\n train_instance_type='ml.m4.xlarge', # The type of instance to use for training\n output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),\n # Where to save the output (the model artifacts)\n sagemaker_session=session) # The current SageMaker session",
"_____no_output_____"
],
[
"xgb.set_hyperparameters(max_depth=10,\n eta=0.6,\n gamma=0.01,\n min_child_weight=5,\n subsample=0.5,\n objective='reg:squarederror',\n early_stopping_rounds=10,\n num_round=300)",
"_____no_output_____"
],
[
"# This is a wrapper around the location of our train and validation data, to make sure that SageMaker\n# knows our data is in csv format.\ns3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')\ns3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')",
"_____no_output_____"
],
[
"xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})",
"2020-03-29 01:40:48 Starting - Starting the training job...\n2020-03-29 01:40:49 Starting - Launching requested ML instances...\n2020-03-29 01:41:46 Starting - Preparing the instances for training......\n2020-03-29 01:42:36 Downloading - Downloading input data...\n2020-03-29 01:42:53 Training - Downloading the training image..\u001b[34mINFO:sagemaker-containers:Imported framework sagemaker_xgboost_container.training\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter objective value reg:squarederror to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34mINFO:sagemaker_xgboost_container.training:Running XGBoost Sagemaker in algorithm mode\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[01:43:27] 44x40 matrix with 1760 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[01:43:27] 11x40 matrix with 440 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Single node training.\u001b[0m\n\u001b[34mINFO:root:Train matrix has 44 rows\u001b[0m\n\u001b[34mINFO:root:Validation matrix has 11 rows\u001b[0m\n\u001b[34m[0]#011train-rmse:0.497137#011validation-rmse:0.515278\u001b[0m\n\u001b[34m[1]#011train-rmse:0.466174#011validation-rmse:0.522416\u001b[0m\n\u001b[34m[2]#011train-rmse:0.471392#011validation-rmse:0.539135\u001b[0m\n\u001b[34m[3]#011train-rmse:0.444671#011validation-rmse:0.531298\u001b[0m\n\u001b[34m[4]#011train-rmse:0.439342#011validation-rmse:0.547014\u001b[0m\n\u001b[34m[5]#011train-rmse:0.419185#011validation-rmse:0.510564\u001b[0m\n\u001b[34m[6]#011train-rmse:0.41716#011validation-rmse:0.475217\u001b[0m\n\u001b[34m[7]#011train-rmse:0.413058#011validation-rmse:0.473305\u001b[0m\n\u001b[34m[8]#011train-rmse:0.378226#011validation-rmse:0.428544\u001b[0m\n\u001b[34m[9]#011train-rmse:0.379991#011validation-rmse:0.429384\u001b[0m\n\u001b[34m[10]#011train-rmse:0.366998#011validation-rmse:0.45817\u001b[0m\n\u001b[34m[11]#011train-rmse:0.368964#011validation-rmse:0.488108\u001b[0m\n\u001b[34m[12]#011train-rmse:0.367791#011validation-rmse:0.484734\u001b[0m\n\u001b[34m[13]#011train-rmse:0.370061#011validation-rmse:0.493689\u001b[0m\n\u001b[34m[14]#011train-rmse:0.362424#011validation-rmse:0.480047\u001b[0m\n\u001b[34m[15]#011train-rmse:0.353324#011validation-rmse:0.506803\u001b[0m\n\u001b[34m[16]#011train-rmse:0.343148#011validation-rmse:0.475295\u001b[0m\n\u001b[34m[17]#011train-rmse:0.348827#011validation-rmse:0.476543\u001b[0m\n\u001b[34m[18]#011train-rmse:0.34175#011validation-rmse:0.489671\u001b[0m\n\n2020-03-29 01:43:37 Uploading - Uploading generated training model\n2020-03-29 01:43:37 Completed - Training job completed\nTraining seconds: 61\nBillable seconds: 61\n"
]
],
[
[
"### Testing the model\n\nWe test the new model accuracy.",
"_____no_output_____"
]
],
[
[
"xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')",
"_____no_output_____"
],
[
"xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')",
"_____no_output_____"
],
[
"xgb_transformer.wait()",
"........................\u001b[34m[2020-03-29 01:47:44 +0000] [15] [INFO] Starting gunicorn 19.10.0\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [15] [INFO] Listening at: unix:/tmp/gunicorn.sock (15)\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [15] [INFO] Using worker: gevent\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [22] [INFO] Booting worker with pid: 22\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [23] [INFO] Booting worker with pid: 23\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [24] [INFO] Booting worker with pid: 24\u001b[0m\n\u001b[34m[2020-03-29 01:47:44 +0000] [31] [INFO] Booting worker with pid: 31\u001b[0m\n\u001b[34m[2020-03-29:01:48:05:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:01:48:05:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:01:48:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"POST /invocations HTTP/1.1\" 200 266 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:48:05:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:48:05:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:01:48:05:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:01:48:05 +0000] \"POST /invocations HTTP/1.1\" 200 266 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[32m2020-03-29T01:48:05.620:[sagemaker logs]: MaxConcurrentTransforms=4, MaxPayloadInMB=6, BatchStrategy=MULTI_RECORD\u001b[0m\n\n"
],
[
"!aws s3 cp --recursive $xgb_transformer.output_path $data_dir",
"Completed 266 Bytes/266 Bytes (4.5 KiB/s) with 1 file(s) remaining\rdownload: s3://sagemaker-eu-west-1-384935747368/sagemaker-xgboost-2020-03-29-01-44-00-182/test.csv.out to data/test.csv.out\r\n"
],
[
"pred_y = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\n\nground = test_y.astype(int)\nresults = pred_y.values.flatten().astype(int)\n\naccuracy_score(ground, results)",
"_____no_output_____"
],
[
"## print out the array of predicted and true labels\nprint('\\nPredicted class labels: ')\nprint(results)\nprint('\\nTrue class labels: ')\nprint(ground)",
"\nPredicted class labels: \n[0 0 0 0 0 0 0 0 1 0 0 0 0 0]\n\nTrue class labels: \n[0 0 1 0 1 1 0 1 0 0 0 1 1 1]\n"
],
[
"# check if the validation dataset is mixed\nprint('\\nValidation class labels: ')\nprint(val_y.astype(int))",
"\nValidation class labels: \n[0 1 0 1 1 0 0 1 0 0 1]\n"
]
],
[
[
"### Tuning the Hyperparameters\n\nLet's try to improve the hyperparameters of our new model.",
"_____no_output_____"
]
],
[
[
"from sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner\n\nxgb_hyperparameter_tuner = HyperparameterTuner(estimator = xgb, # The estimator object to use as the basis\n objective_metric_name = 'validation:rmse', # The metric used to compare\n objective_type = 'Minimize', # Minimize or maximize the metric\n max_jobs = 20, # The total number of models to train\n max_parallel_jobs = 3, # The number of models to train in parallel\n hyperparameter_ranges = {\n 'max_depth': IntegerParameter(3, 13),\n 'eta' : ContinuousParameter(0.05, 0.6),\n 'min_child_weight': IntegerParameter(2, 8),\n 'subsample': ContinuousParameter(0.5, 0.9),\n 'gamma': ContinuousParameter(0, 10),\n })",
"_____no_output_____"
],
[
"xgb_hyperparameter_tuner.fit({'train': s3_input_train, 'validation': s3_input_validation})\nxgb_hyperparameter_tuner.wait()",
"..................................................................................................................................................................................................................................................................................!\n"
],
[
"xgb_hyperparameter_tuner.best_training_job()",
"_____no_output_____"
],
[
"xgb_attached = sagemaker.estimator.Estimator.attach(xgb_hyperparameter_tuner.best_training_job())",
"2020-03-29 02:10:03 Starting - Preparing the instances for training\n2020-03-29 02:10:03 Downloading - Downloading input data\n2020-03-29 02:10:03 Training - Training image download completed. Training in progress.\n2020-03-29 02:10:03 Uploading - Uploading generated training model\n2020-03-29 02:10:03 Completed - Training job completed\u001b[34mINFO:sagemaker-containers:Imported framework sagemaker_xgboost_container.training\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter _tuning_objective_metric value validation:rmse to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter objective value reg:squarederror to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34mINFO:sagemaker_xgboost_container.training:Running XGBoost Sagemaker in algorithm mode\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[02:09:52] 44x40 matrix with 1760 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[02:09:52] 11x40 matrix with 440 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Single node training.\u001b[0m\n\u001b[34mINFO:root:Setting up HPO optimized metric to be : rmse\u001b[0m\n\u001b[34mINFO:root:Train matrix has 44 rows\u001b[0m\n\u001b[34mINFO:root:Validation matrix has 11 rows\u001b[0m\n\u001b[34m[0]#011train-rmse:0.409378#011validation-rmse:0.450323\u001b[0m\n\u001b[34m[1]#011train-rmse:0.379292#011validation-rmse:0.466927\u001b[0m\n\u001b[34m[2]#011train-rmse:0.328662#011validation-rmse:0.457355\u001b[0m\n\u001b[34m[3]#011train-rmse:0.296896#011validation-rmse:0.433616\u001b[0m\n\u001b[34m[4]#011train-rmse:0.274303#011validation-rmse:0.408504\u001b[0m\n\u001b[34m[5]#011train-rmse:0.274371#011validation-rmse:0.408684\u001b[0m\n\u001b[34m[6]#011train-rmse:0.274487#011validation-rmse:0.408967\u001b[0m\n\u001b[34m[7]#011train-rmse:0.274327#011validation-rmse:0.408569\u001b[0m\n\u001b[34m[8]#011train-rmse:0.274046#011validation-rmse:0.407529\u001b[0m\n\u001b[34m[9]#011train-rmse:0.274054#011validation-rmse:0.40758\u001b[0m\n\u001b[34m[10]#011train-rmse:0.274907#011validation-rmse:0.409833\u001b[0m\n\u001b[34m[11]#011train-rmse:0.274646#011validation-rmse:0.409318\u001b[0m\n\u001b[34m[12]#011train-rmse:0.275205#011validation-rmse:0.410365\u001b[0m\n\u001b[34m[13]#011train-rmse:0.275284#011validation-rmse:0.410499\u001b[0m\n\u001b[34m[14]#011train-rmse:0.275048#011validation-rmse:0.41009\u001b[0m\n\u001b[34m[15]#011train-rmse:0.275171#011validation-rmse:0.410306\u001b[0m\n\u001b[34m[16]#011train-rmse:0.27415#011validation-rmse:0.408022\u001b[0m\n\u001b[34m[17]#011train-rmse:0.274259#011validation-rmse:0.408378\u001b[0m\n\u001b[34m[18]#011train-rmse:0.27416#011validation-rmse:0.408055\u001b[0m\nTraining seconds: 66\nBillable seconds: 66\n"
],
[
"xgb_transformer = xgb_attached.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')",
"_____no_output_____"
],
[
"xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')\nxgb_transformer.wait()",
".....................\n.\u001b[34m[2020-03-29 02:15:11 +0000] [15] [INFO] Starting gunicorn 19.10.0\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [15] [INFO] Listening at: unix:/tmp/gunicorn.sock (15)\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [15] [INFO] Using worker: gevent\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [22] [INFO] Booting worker with pid: 22\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [23] [INFO] Booting worker with pid: 23\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [27] [INFO] Booting worker with pid: 27\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [15] [INFO] Starting gunicorn 19.10.0\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [15] [INFO] Listening at: unix:/tmp/gunicorn.sock (15)\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [15] [INFO] Using worker: gevent\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [22] [INFO] Booting worker with pid: 22\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [23] [INFO] Booting worker with pid: 23\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [27] [INFO] Booting worker with pid: 27\u001b[0m\n\u001b[34m[2020-03-29 02:15:11 +0000] [31] [INFO] Booting worker with pid: 31\u001b[0m\n\u001b[35m[2020-03-29 02:15:11 +0000] [31] [INFO] Booting worker with pid: 31\u001b[0m\n\u001b[34m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[34m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m[2020-03-29:02:15:17:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"POST /invocations HTTP/1.1\" 200 269 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"GET /ping HTTP/1.1\" 200 0 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"GET /execution-parameters HTTP/1.1\" 200 84 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[35m[2020-03-29:02:15:17:INFO] No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[35m[2020-03-29:02:15:17:INFO] Determined delimiter of CSV input is ','\u001b[0m\n\u001b[35m169.254.255.130 - - [29/Mar/2020:02:15:17 +0000] \"POST /invocations HTTP/1.1\" 200 269 \"-\" \"Go-http-client/1.1\"\u001b[0m\n\u001b[32m2020-03-29T02:15:17.711:[sagemaker logs]: MaxConcurrentTransforms=4, MaxPayloadInMB=6, BatchStrategy=MULTI_RECORD\u001b[0m\n"
],
[
"!aws s3 cp --recursive $xgb_transformer.output_path $data_dir",
"Completed 269 Bytes/269 Bytes (4.9 KiB/s) with 1 file(s) remaining\rdownload: s3://sagemaker-eu-west-1-384935747368/sagemaker-xgboost-200329-0148-018-5e034-2020-03-29-02-11-47-612/test.csv.out to data/test.csv.out\r\n"
],
[
"pred_y = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)",
"_____no_output_____"
]
],
[
[
"We can see below the accuracy after a hyperparameter tuning job of our new binary classification.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\nground = test_y.astype(int)\nresults = pred_y.values.flatten().astype(int)\n\naccuracy_score(ground, results)",
"_____no_output_____"
],
[
"## print out the array of predicted and true labels\nprint('\\nPredicted class labels: ')\nprint(results)\nprint('\\nTrue class labels: ')\nprint(ground)",
"\nPredicted class labels: \n[0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n\nTrue class labels: \n[0 0 1 0 1 1 0 1 0 0 0 1 1 1]\n"
],
[
"# check if the validation dataset is mixed\nprint('\\nValidation class labels: ')\nprint(val_y.astype(int))",
"\nValidation class labels: \n[0 1 0 1 1 0 0 1 0 0 1]\n"
]
],
[
[
"## 2.6 Deploy the model\n\nWe will deploy now an API endpoint with our favourite trained model.",
"_____no_output_____"
]
],
[
[
"# Deploy the trained model\n\nxgb_predictor = xgb.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge')",
"WARNING:sagemaker:Using already existing model: sagemaker-xgboost-2020-03-29-03-10-59-734\n"
],
[
"xgb_predictor.endpoint",
"_____no_output_____"
],
[
"from sagemaker.predictor import csv_serializer\n\n# We need to tell the endpoint the format of the data we are sending\nxgb_predictor.content_type = 'text/csv'\nxgb_predictor.serializer = csv_serializer\n\npred_y = xgb_predictor.predict(pd.DataFrame(test_X).values).decode('utf-8')\n# predictions is currently a comma delimited string and so we would like to break it up\n# as a numpy array.\npred_y = np.fromstring(pred_y, sep=',')",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(test_y.astype('int'), pred_y.astype('int'))",
"_____no_output_____"
]
],
[
[
"## 2.7 Web application\n\n* Input testing\n* AWS Lambda\n* HTML web app\n\n### Input testing\n\nWe will create now the web application that will format a new text and send to our model's API to predict the category.\n\nThe web application is a HTML file with some JS talking to the endpoint.\n\nThe code that formats the data is hosted on AWS Lambda. We need to place the vocabulary inside our lambda function as well.\n\nFirst let's try a test input string to our endpoint.",
"_____no_output_____"
]
],
[
[
"vocab = {'création': 9, 'site': 33, 'web': 36, 'actuel': 0, 'bonjour': 4,\n 'chez': 6, 'plu': 28, 'import': 18, 'lign': 23, 'besoin': 3, 'être': 39,\n 'sou': 34, 'gestion': 15, 'tout': 35, 'migrat': 25, 'internet': 20,\n 'afin': 1, 'fair': 13, 'lien': 22, 'serveur': 31, 'infomaniak': 19,\n 'wordpress': 37, 'page': 27, 'pouvoir': 29, 'merci': 24, 'mise': 26,\n 'jour': 21, 'base': 2, 'doit': 10, 'fichier': 14, 'si': 32,\n 'hébergement': 17, 'domain': 11, 'recherch': 30, 'donné': 12, 'contenu': 8,\n 'www': 38, 'ch': 5, 'com': 7, 'http': 16}",
"_____no_output_____"
],
[
"# Replace the content of test_X with any text you want to test\ntest_X = 'Hello, je me suis trompé de migration'\ntest_X = lead_note_to_words(test_X)\ntest_X = ' '.join([str(elem) for elem in test_X]) ",
"_____no_output_____"
],
[
"def bow_encoding(words, vocabulary):\n bow = [0] * len(vocabulary) # Start by setting the count for each word in the vocabulary to zero.\n for word in words.split(): # For each word in the string\n if word in vocabulary: # If the word is one that occurs in the vocabulary, increase its count.\n bow[vocabulary[word]] += 1\n return bow",
"_____no_output_____"
],
[
"bow = bow_encoding(test_X, vocab)",
"_____no_output_____"
],
[
"endpoint_name = xgb_predictor.endpoint",
"_____no_output_____"
],
[
"response = session.sagemaker_runtime_client.invoke_endpoint(\n EndpointName = endpoint_name,\n ContentType = 'text/csv',\n Body = ','.join([str(val) for val in bow]).encode('utf-8'))",
"_____no_output_____"
],
[
"print(response['Body'].read().decode('utf-8'))",
"0.8204420208930969\n"
]
],
[
[
"### AWS Lambda\n\nBelow is the lambda function. Keep in mind to import the folder 'nltk' and 'nltk_data'.\n\nYou also need to tell Lambda where is the nltk_data folder by adding an environment variable.\n\n```json\nNLTK_DATA ./nltk_data\n```\n\n",
"_____no_output_____"
],
[
"```python\n\nimport boto3\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\nimport json\n\ndef cleaning_data(data, symbols = False):\n data = re.sub(r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)', '', data) # Remove email\n data = data.replace('\\r', '').replace('\\n', ' ').replace(' ', ' ')\n data = re.sub(r'Budget: .* Title: ', '', data) # Remove \"Budget\" \n data = re.sub(r' Time limit: .*', '', data) # and \"Time limit\"\n data = re.sub(r' +', ' ', data) # Remove extra space\n \n if symbols == False:\n data = re.sub(r\"[^a-zA-ZÀ-ÿ]\", \" \", data.lower()) # Remove symbols and convert to lower case\n\n return data\n\n\ndef lead_note_to_words(note):\n # nltk.download(\"stopwords\", quiet=True) # We already added the nltk_data folder manually\n stemmer = PorterStemmer()\n \n data = cleaning_data(note) # Clean the text\n words = data.split() # Split string into words\n\n words = [w for w in words if w not in stopwords.words('french')] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n\n return words\n \n\ndef bow_encoding(words, vocabulary):\n bow = [0] * len(vocabulary) # Start by setting the count for each word in the vocabulary to zero.\n for word in words.split(): # For each word in the string\n if word in vocabulary: # If the word is one that occurs in the vocabulary, increase its count.\n bow[vocabulary[word]] += 1\n return bow\n\n\ndef lambda_handler(event, context):\n\n vocab = {'création': 9, 'site': 33, 'web': 36, 'actuel': 0, 'bonjour': 4,\n 'chez': 6, 'plu': 28, 'import': 18, 'lign': 23, 'besoin': 3, 'être': 39,\n 'sou': 34, 'gestion': 15, 'tout': 35, 'migrat': 25, 'internet': 20,\n 'afin': 1, 'fair': 13, 'lien': 22, 'serveur': 31, 'infomaniak': 19,\n 'wordpress': 37, 'page': 27, 'pouvoir': 29, 'merci': 24, 'mise': 26,\n 'jour': 21, 'base': 2, 'doit': 10, 'fichier': 14, 'si': 32,\n 'hébergement': 17, 'domain': 11, 'recherch': 30, 'donné': 12, 'contenu': 8,\n 'www': 38, 'ch': 5, 'com': 7, 'http': 16}\n\n words = lead_note_to_words(event['body'])\n words = ' '.join([str(elem) for elem in words]) \n bow = bow_encoding(words, vocab)\n\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given\n response = runtime.invoke_endpoint(EndpointName = 'sagemaker-xgboost-2020-03-28-20-51-47-434',\n # The name of the endpoint we created\n ContentType = 'text/csv',\n Body = ','.join([str(val) for val in bow]).encode('utf-8'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n return {\n 'statusCode' : 200,\n 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },\n 'body' : str(result)\n }\n\n```",
"_____no_output_____"
],
[
"### HTML web app\n\nTo try the app, download the file index.html and open it in a browser.\n\nIt should look like that:\n\n\n\nBelow an example of text you can try in the app:\n\n>Je cherche une personne pour terminer mon site créé avec Opencard vu que la personne qui s'en occupait n'a pas fait les choses correctement, soit, CHF 1'500.- de perdu…\nCe site est un site d'information concernant des supports publicitaires (briquets, stylos, T-shirts, affiches publicitaires, etc…) avec demandes d'offres uniquement.\nPhotos et descriptifs (couleurs, dimensions, etc…) de chaque produit.\nFormulaire de contact.\nCréer une liste de partenaires avec lien vers leur site.",
"_____no_output_____"
]
],
[
[
"# when the endpoint is not needed anymore\nxgb_predictor.delete_endpoint()",
"_____no_output_____"
]
],
[
[
"# 3. Conclusion\n\n* 3.1 Reflection\n* 3.2 Improvement\n\n## 3.1 Reflection\n\nThe task seemed quite simple in the beginning but actually became much more difficult because of the small size of the dataset.\n\nThe key to find a good result was to figure out what were the best parameters for the following variables:\n\n* Ratio training, validation and test dataset\n* Size of Bag-of-Words' vocabulary\n* Hyperparameters\n\nThe XGBoost model is working fine because we can see that the RMSE measure is decreasing towards the training set. In order to avoid overfitting we provide a validation set. However the model struggles to improve the RMSE of the validation set because of the small size of the dataset.\n\nWe decided to keep the multiclass trained model for our deployed endpoint API because compared to the binary classifier model it achieves higher accuracy.\n\n## 3.2 Improvement\n\nWithout any doubt, a way to improve our model's accuracy is to grow the dataset. In the future the CRM database will contains more and more leads so it will be possible to train again our model to get a better accuracy.\n\n## 3.3 Application\n\nThe API can now be used in the CRM to automate the sending of the emails. We can either choose to send an email automatically when the category is found or only when the model predicts 'website'. We can send the email manually when the predicted category is other than 'website' because of the low accuracy. The dataset contains many more projects about websites therefore the model is more accurate to label website projects.\n\n# 4. References\n\nPafka, S. (2015, May 19). *Benchmarking Random Forest Implementations* Retrieved from<br>\n http://datascience.la/benchmarking-random-forest-implementations/",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec68e3b316ef33f3dfb7c79ad21bdca97b4afd0c | 18,500 | ipynb | Jupyter Notebook | tutorials/chemistry/1_programmatic_approach.ipynb | Vismai-Khanderao/qiskit-tutorials | 4c68381b725da201d938b9bd213b83eee4f5dc5e | [
"Apache-2.0"
]
| null | null | null | tutorials/chemistry/1_programmatic_approach.ipynb | Vismai-Khanderao/qiskit-tutorials | 4c68381b725da201d938b9bd213b83eee4f5dc5e | [
"Apache-2.0"
]
| null | null | null | tutorials/chemistry/1_programmatic_approach.ipynb | Vismai-Khanderao/qiskit-tutorials | 4c68381b725da201d938b9bd213b83eee4f5dc5e | [
"Apache-2.0"
]
| null | null | null | 34.514925 | 646 | 0.606811 | [
[
[
"# _*Qiskit Chemistry, Programmatic Approach*_ \n\nThe latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorial.\n\n***\n## Contributors\nRichard Chen<sup>[1]</sup>, Antonio Mezzacapo<sup>[1]</sup>, Marco Pistoia<sup>[1]</sup>, Stephen Wood<sup>[1]</sup>\n### Affiliation\n- <sup>[1]</sup>IBMQ",
"_____no_output_____"
],
[
"### Introduction\nThis notebook illustrates how to use Qiskit Chemistry's programmatic APIs.\n\nIn this notebook, we decompose the computation of the ground state energy of a molecule into 4 steps:\n 1. Define a molecule and get integrals from a computational chemistry driver (PySCF in this case)\n 2. Construct a Fermionic Hamiltonian and map it onto a qubit Hamiltonian\n 3. Instantiate and initialize dynamically-loaded algorithmic components, such as the quantum algorithm VQE, the optimizer and variational form it will use, and the initial_state to initialize the variational form\n 4. Run the algorithm on a quantum backend and retrieve the results",
"_____no_output_____"
]
],
[
[
"# import common packages\nimport numpy as np\n\nfrom qiskit import Aer\n\n# lib from Qiskit Aqua\nfrom qiskit.aqua import QuantumInstance\nfrom qiskit.aqua.algorithms import VQE, NumPyMinimumEigensolver\nfrom qiskit.aqua.operators import Z2Symmetries\nfrom qiskit.aqua.components.optimizers import COBYLA\n\n# lib from Qiskit Aqua Chemistry\nfrom qiskit.chemistry import FermionicOperator\nfrom qiskit.chemistry.drivers import PySCFDriver, UnitsType\nfrom qiskit.chemistry.components.variational_forms import UCCSD\nfrom qiskit.chemistry.components.initial_states import HartreeFock",
"_____no_output_____"
]
],
[
[
"### Step 1: Define a molecule\nHere, we use LiH in the sto3g basis with the PySCF driver as an example.\nThe `molecule` object records the information from the PySCF driver.",
"_____no_output_____"
]
],
[
[
"# using driver to get fermionic Hamiltonian\n# PySCF example\ndriver = PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6', unit=UnitsType.ANGSTROM,\n charge=0, spin=0, basis='sto3g')\nmolecule = driver.run()",
"_____no_output_____"
]
],
[
[
"### Step 2: Prepare qubit Hamiltonian\nHere, we setup the **to-be-frozen** and **to-be-removed** orbitals to reduce the problem size when we map to the qubit Hamiltonian. Furthermore, we define the **mapping type** for the qubit Hamiltonian.\nFor the particular `parity` mapping, we can further reduce the problem size.",
"_____no_output_____"
]
],
[
[
"# please be aware that the idx here with respective to original idx\nfreeze_list = [0]\nremove_list = [-3, -2] # negative number denotes the reverse order\nmap_type = 'parity'\n\nh1 = molecule.one_body_integrals\nh2 = molecule.two_body_integrals\nnuclear_repulsion_energy = molecule.nuclear_repulsion_energy\n\nnum_particles = molecule.num_alpha + molecule.num_beta\nnum_spin_orbitals = molecule.num_orbitals * 2\nprint(\"HF energy: {}\".format(molecule.hf_energy - molecule.nuclear_repulsion_energy))\nprint(\"# of electrons: {}\".format(num_particles))\nprint(\"# of spin orbitals: {}\".format(num_spin_orbitals))",
"HF energy: -8.854072040283647\n# of electrons: 4\n# of spin orbitals: 12\n"
],
[
"# prepare full idx of freeze_list and remove_list\n# convert all negative idx to positive\nremove_list = [x % molecule.num_orbitals for x in remove_list]\nfreeze_list = [x % molecule.num_orbitals for x in freeze_list]\n# update the idx in remove_list of the idx after frozen, since the idx of orbitals are changed after freezing\nremove_list = [x - len(freeze_list) for x in remove_list]\nremove_list += [x + molecule.num_orbitals - len(freeze_list) for x in remove_list]\nfreeze_list += [x + molecule.num_orbitals for x in freeze_list]\n\n# prepare fermionic hamiltonian with orbital freezing and eliminating, and then map to qubit hamiltonian\n# and if PARITY mapping is selected, reduction qubits\nenergy_shift = 0.0\nqubit_reduction = True if map_type == 'parity' else False\n\nferOp = FermionicOperator(h1=h1, h2=h2)\nif len(freeze_list) > 0:\n ferOp, energy_shift = ferOp.fermion_mode_freezing(freeze_list)\n num_spin_orbitals -= len(freeze_list)\n num_particles -= len(freeze_list)\nif len(remove_list) > 0:\n ferOp = ferOp.fermion_mode_elimination(remove_list)\n num_spin_orbitals -= len(remove_list)\n\nqubitOp = ferOp.mapping(map_type=map_type, threshold=0.00000001)\nqubitOp = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles) if qubit_reduction else qubitOp\nqubitOp.chop(10**-10)\n\nprint(qubitOp.print_details())\nprint(qubitOp)",
"IIII\t(-0.20765933501970762+0j)\nIIIZ\t(-0.09376337484626396+0j)\nIIZX\t(-0.0031775814548701616+0j)\nIIIX\t(0.0031775814548701616+0j)\nIIXX\t(-0.0012513965999571266+0j)\nIIYY\t(0.0012513965999571266+0j)\nIIZZ\t(-0.2116250951510974+0j)\nIIXZ\t(0.019200533863103476+0j)\nIIXI\t(0.019200533863103476+0j)\nIIZI\t(0.3581026994577039+0j)\nIZII\t(0.09376337484626406+0j)\nZXII\t(0.003177581454870162+0j)\nIXII\t(0.003177581454870162+0j)\nXXII\t(-0.001251396599957117+0j)\nYYII\t(0.001251396599957117+0j)\nZZII\t(-0.2116250951510974+0j)\nXZII\t(-0.019200533863103483+0j)\nXIII\t(0.019200533863103483+0j)\nZIII\t(-0.3581026994577039+0j)\nIZIZ\t(-0.121827742158206+0j)\nIZZX\t(0.012144897228081718+0j)\nIZIX\t(-0.012144897228081718+0j)\nIZXX\t(0.03169874598733776+0j)\nIZYY\t(-0.03169874598733776+0j)\nIXIZ\t(0.012144897228081717+0j)\nZXIZ\t(0.012144897228081717+0j)\nIXZX\t(-0.0032659954996661924+0j)\nZXZX\t(-0.0032659954996661924+0j)\nIXIX\t(0.0032659954996661924+0j)\nZXIX\t(0.0032659954996661924+0j)\nIXXX\t(-0.008650156860619578+0j)\nZXXX\t(-0.008650156860619578+0j)\nIXYY\t(0.008650156860619578+0j)\nZXYY\t(0.008650156860619578+0j)\nYYIZ\t(0.031698745987337754+0j)\nXXIZ\t(-0.031698745987337754+0j)\nYYZX\t(-0.008650156860619578+0j)\nXXZX\t(0.008650156860619578+0j)\nYYIX\t(0.008650156860619578+0j)\nXXIX\t(-0.008650156860619578+0j)\nYYXX\t(-0.030981613344624754+0j)\nXXXX\t(0.030981613344624754+0j)\nYYYY\t(0.030981613344624754+0j)\nXXYY\t(-0.030981613344624754+0j)\nZZIZ\t(0.05590251078516701+0j)\nZZZX\t(0.0018710427514219098+0j)\nZZIX\t(-0.0018710427514219098+0j)\nZZXX\t(0.00310400411606565+0j)\nZZYY\t(-0.00310400411606565+0j)\nXIIZ\t(0.012841723180766517+0j)\nXZIZ\t(-0.012841723180766517+0j)\nXIZX\t(-0.0023521521732532856+0j)\nXZZX\t(0.0023521521732532856+0j)\nXIIX\t(0.0023521521732532856+0j)\nXZIX\t(-0.0023521521732532856+0j)\nXIXX\t(-0.007975908750571819+0j)\nXZXX\t(0.007975908750571819+0j)\nXIYY\t(0.007975908750571819+0j)\nXZYY\t(-0.007975908750571819+0j)\nZIIZ\t(0.11346110712684766+0j)\nZIZX\t(-0.01083836382875494+0j)\nZIIX\t(0.01083836382875494+0j)\nZIXX\t(-0.03355135311123255+0j)\nZIYY\t(0.03355135311123255+0j)\nIZZZ\t(-0.05590251078516701+0j)\nIZXZ\t(-0.012841723180766517+0j)\nIZXI\t(-0.012841723180766517+0j)\nIXZZ\t(-0.0018710427514219096+0j)\nZXZZ\t(-0.0018710427514219096+0j)\nIXXZ\t(0.0023521521732532856+0j)\nZXXZ\t(0.0023521521732532856+0j)\nIXXI\t(0.0023521521732532856+0j)\nZXXI\t(0.0023521521732532856+0j)\nYYZZ\t(-0.00310400411606565+0j)\nXXZZ\t(0.00310400411606565+0j)\nYYXZ\t(0.007975908750571819+0j)\nXXXZ\t(-0.007975908750571819+0j)\nYYXI\t(0.007975908750571819+0j)\nXXXI\t(-0.007975908750571819+0j)\nZZZZ\t(0.08447056807294229+0j)\nZZXZ\t(-0.008994911953942242+0j)\nZZXI\t(-0.008994911953942242+0j)\nXIZZ\t(-0.008994911953942242+0j)\nXZZZ\t(0.008994911953942242+0j)\nXIXZ\t(0.0066120470661577375+0j)\nXZXZ\t(-0.0066120470661577375+0j)\nXIXI\t(0.0066120470661577375+0j)\nXZXI\t(-0.0066120470661577375+0j)\nZIZZ\t(0.06035891281078855+0j)\nZIXZ\t(0.011019231644721898+0j)\nZIXI\t(0.011019231644721898+0j)\nIZZI\t(0.11346110712684766+0j)\nIXZI\t(-0.01083836382875494+0j)\nZXZI\t(-0.01083836382875494+0j)\nYYZI\t(-0.03355135311123255+0j)\nXXZI\t(0.03355135311123255+0j)\nZZZI\t(-0.06035891281078855+0j)\nXIZI\t(-0.0110192316447219+0j)\nXZZI\t(0.0110192316447219+0j)\nZIZI\t(-0.11344680300366612+0j)\n\nRepresentation: paulis, qubits: 4, size: 100\n"
]
],
[
[
"We use the classical eigen decomposition to get the smallest eigenvalue as a reference.",
"_____no_output_____"
]
],
[
[
"# Using exact eigensolver to get the smallest eigenvalue\nexact_eigensolver = NumPyMinimumEigensolver(qubitOp)\nret = exact_eigensolver.run()\nprint('The computed energy is: {:.12f}'.format(ret.eigenvalue.real))\nprint('The total ground state energy is: {:.12f}'.format(ret.eigenvalue.real + energy_shift + nuclear_repulsion_energy))",
"The computed energy is: -1.077059745735\nThe total ground state energy is: -7.881072044031\n"
]
],
[
[
"### Step 3: Initiate and configure dynamically-loaded instances\nTo run VQE with the UCCSD variational form, we require\n- VQE algorithm\n- Classical Optimizer\n- UCCSD variational form\n- Prepare the initial state in the HartreeFock state",
"_____no_output_____"
],
[
"### [Optional] Setup token to run the experiment on a real device\nIf you would like to run the experiment on a real device, you need to setup your account first.\n\nNote: If you did not store your token yet, use `IBMQ.save_account('MY_API_TOKEN')` to store it first.",
"_____no_output_____"
]
],
[
[
"# from qiskit import IBMQ\n# provider = IBMQ.load_account()",
"_____no_output_____"
],
[
"backend = Aer.get_backend('statevector_simulator')",
"_____no_output_____"
],
[
"# setup COBYLA optimizer\nmax_eval = 200\ncobyla = COBYLA(maxiter=max_eval)\n\n# setup HartreeFock state\nHF_state = HartreeFock(num_spin_orbitals, num_particles, map_type, \n qubit_reduction)\n\n# setup UCCSD variational form\nvar_form = UCCSD(num_orbitals=num_spin_orbitals, num_particles=num_particles, \n active_occupied=[0], active_unoccupied=[0, 1],\n initial_state=HF_state, qubit_mapping=map_type, \n two_qubit_reduction=qubit_reduction, num_time_slices=1)\n\n# setup VQE\nvqe = VQE(qubitOp, var_form, cobyla)\nquantum_instance = QuantumInstance(backend=backend)",
"_____no_output_____"
]
],
[
[
"### Step 4: Run algorithm and retrieve the results",
"_____no_output_____"
]
],
[
[
"results = vqe.run(quantum_instance)\nprint('The computed ground state energy is: {:.12f}'.format(results.eigenvalue.real))\nprint('The total ground state energy is: {:.12f}'.format(results.eigenvalue.real + energy_shift + nuclear_repulsion_energy))\nprint(\"Parameters: {}\".format(results.optimal_point))",
"The computed ground state energy is: -1.057852463157\nThe total ground state energy is: -7.861864761453\nParameters: [-3.84008067e-07 -3.52828133e-05 -3.84008067e-07 -9.35672387e-05]\n"
],
[
"import qiskit.tools.jupyter\n%qiskit_version_table\n%qiskit_copyright",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec68e3d9ca12aa904f98a21334017ddb2e415838 | 10,227 | ipynb | Jupyter Notebook | data_management/megadb/importer_notebooks/megadb_add_islandconservation_rodents_2020.ipynb | FFI-Vietnam/CameraTraps-FFIVietnamAdaptation | 308107436332aa07a73bf75b124d11947fde557c | [
"MIT"
]
| 402 | 2019-05-08T17:28:25.000Z | 2022-03-27T19:30:07.000Z | data_management/megadb/importer_notebooks/megadb_add_islandconservation_rodents_2020.ipynb | FFI-Vietnam/CameraTraps-FFIVietnamAdaptation | 308107436332aa07a73bf75b124d11947fde557c | [
"MIT"
]
| 72 | 2019-05-07T18:33:32.000Z | 2022-03-10T07:48:39.000Z | data_management/megadb/importer_notebooks/megadb_add_islandconservation_rodents_2020.ipynb | FFI-Vietnam/CameraTraps-FFIVietnamAdaptation | 308107436332aa07a73bf75b124d11947fde557c | [
"MIT"
]
| 162 | 2019-05-18T15:45:27.000Z | 2022-03-25T20:17:45.000Z | 23.564516 | 248 | 0.528014 | [
[
[
"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = 'all' # default is ‘last_expr'\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys\nsys.path.append('/home/mink/notebooks/CameraTraps') # append this repo to PYTHONPATH\nsys.path.append('/home/mink/lib/ai4eutils')",
"_____no_output_____"
],
[
"import json\nimport os\nfrom collections import Counter, defaultdict\nfrom random import sample\nfrom shutil import copyfile\nfrom multiprocessing.pool import ThreadPool\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport path_utils # ai4eutils\n\nfrom data_management.megadb.schema import sequences_schema_check\nfrom data_management.megadb.converters.cct_to_megadb import process_sequences",
"_____no_output_____"
]
],
[
[
"# islandconservation_rodents_2020",
"_____no_output_____"
]
],
[
[
"dataset_name = 'islandconservation_rodents_2020'\n\ncontainer_root = '/mink_disk_0/camtraps/island-conservation-private/' \npath_prefix = 'rodents-2021.01.05/contrapest/Rodent Photos/'\n\npath_to_output = f'/home/mink/camtraps/data/megadb_jsons/{dataset_name}.json' \npath_to_output_temp = f'/home/mink/camtraps/data/megadb_jsons/{dataset_name}_temp.json' ",
"_____no_output_____"
]
],
[
[
"## Step 0 - Add an entry to the `datasets` table\n\nDone",
"_____no_output_____"
],
[
"## Step 1 - Prepare the `sequence` objects to insert into the database\n\nExtracted camera (~location) from file name. There is a spreadsheet in the folder called ContraPestAug2020TimelapseTemplate.xlsx with timestamp and MD/reviewer labels.",
"_____no_output_____"
]
],
[
[
"folder = os.path.join(container_root, path_prefix)\n\npaths = path_utils.recursive_file_list(folder)\nlen(paths)\npaths = sorted([p.split(folder)[1] for p in paths if path_utils.is_image_file(p)])\nlen(paths)",
"_____no_output_____"
],
[
"paths[0]",
"_____no_output_____"
],
[
"locations = set()\nsequences = []\n\nfor p in paths:\n location = p.split('.')[0]\n locations.add(location)\n sequences.append({\n 'dataset': dataset_name,\n 'seq_id': f'dummy_{len(sequences)}',\n 'images': [\n {\n 'file': p,\n 'frame_num': 1 # only one image, but easier for ingesting the annotations\n }\n ],\n 'location': location,\n 'class': ['rodent']\n })",
"_____no_output_____"
],
[
"len(locations)",
"_____no_output_____"
],
[
"len(sequences)\nsequences[700]",
"_____no_output_____"
]
],
[
[
"## Step 2 - Pass the schema check\n\nOnce your metadata are in the MegaDB format for `sequence` items, we check that they conform to the format's schema.\n\nIf the format conforms, the following messages will be printed:\n\n```\nVerified that the sequence items meet requirements not captured by the schema.\nVerified that the sequence items conform to the schema.\n```\n\nFor large datasets, the second step will take some time (~ a minute). \n\nOtherwise there will be an error message describing what's wrong. Please fix the issues until all checks are passed. You might need to write some snippets of code to loop through the `sequence` items to understand which entries have problems.",
"_____no_output_____"
]
],
[
[
"%%time\n\nsequences_schema_check.sequences_schema_check(sequences)",
"Verified that the sequence items meet requirements not captured by the schema.\nVerified that the sequence items conform to the schema.\nCPU times: user 115 ms, sys: 0 ns, total: 115 ms\nWall time: 114 ms\n"
],
[
"with open(path_to_output_temp, 'w', encoding='utf-8') as f:\n json.dump(sequences, f, indent=1, ensure_ascii=False)",
"_____no_output_____"
]
],
[
[
"### Step 2b - copy images to flat folder",
"_____no_output_____"
]
],
[
[
"def copy_file(src_path, dst_path):\n return copyfile(src_path, dst_path)",
"_____no_output_____"
],
[
"%%time\n\npath_pairs = []\nfor seq in tqdm(sequences):\n seq_id = seq['seq_id']\n for im in seq['images']:\n src_path = os.path.join(container_root, path_prefix, im['file'])\n assert os.path.exists(src_path), src_path\n frame = 1\n dst_path = os.path.join('/mink_disk_0/camtraps/imerit12b', \n f'{dataset_name}.seq{seq_id}.frame{frame}.jpg')\n path_pairs.append((src_path, dst_path))",
"100%|██████████| 845/845 [00:00<00:00, 1115.30it/s]"
],
[
"len(path_pairs)\npath_pairs[-100]",
"_____no_output_____"
],
[
"%%time\n\nwith ThreadPool(8) as pool:\n dst_paths = pool.starmap(copy_file, path_pairs)",
"CPU times: user 725 ms, sys: 2.02 s, total: 2.74 s\nWall time: 3.41 s\n"
],
[
"len(dst_paths)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec68e547ca62427e6546f79a309b644fffa27291 | 3,404 | ipynb | Jupyter Notebook | notebooks/si_02_matched_nodes_statistics.ipynb | QuantLaw/Complex-Societies-and-Growth | 3d55cdfa60960298e05cbd5a3ad341f48d7f52e4 | [
"CC-BY-4.0"
]
| 3 | 2020-10-18T07:14:29.000Z | 2022-03-20T14:06:19.000Z | notebooks/si_02_matched_nodes_statistics.ipynb | QuantLaw/Complex-Societies-and-Growth | 3d55cdfa60960298e05cbd5a3ad341f48d7f52e4 | [
"CC-BY-4.0"
]
| null | null | null | notebooks/si_02_matched_nodes_statistics.ipynb | QuantLaw/Complex-Societies-and-Growth | 3d55cdfa60960298e05cbd5a3ad341f48d7f52e4 | [
"CC-BY-4.0"
]
| 1 | 2021-07-21T15:25:11.000Z | 2021-07-21T15:25:11.000Z | 25.787879 | 102 | 0.500588 | [
[
[
"## Matched Nodes Statistics\n\nIn this notebook, we compute the percentage of nodes that we managed to map between snapshots\nusing on our multi-pass node alignment heuristic, as reported in the text of the paper.",
"_____no_output_____"
],
[
"### Preparations",
"_____no_output_____"
]
],
[
[
"import networkx as nx\nimport os\nimport json\nimport pandas as pd\nfrom quantlaw.utils.networkx import get_leaves",
"_____no_output_____"
],
[
"data = []\nfor dataset in ['us', 'de']:\n folder = f'../../legal-networks-data/{dataset}/4_crossreference_graph/subseqitems/'\n leaves_dict = {}\n for file in sorted(os.listdir(folder)):\n if file.endswith('.gpickle.gz'):\n G = nx.read_gpickle(folder+file)\n leaves = get_leaves(G)\n leaves_dict[file.split('.')[0]] = len(leaves)\n print(file, 'done')\n \n folder = f'../../legal-networks-data/{dataset}/5_snapshot_mapping_edgelist/subseqitems/'\n for file in sorted(os.listdir(folder)):\n if file.endswith('.json'):\n with open(folder+file) as f:\n mappings = json.load(f)\n file_base = os.path.splitext(file)[0]\n snapshot_1, snapshot_2 = file_base.split('_')\n data.append({\n 'dataset': dataset,\n 'year1': snapshot_1,\n 'year2': snapshot_2,\n 'count1': leaves_dict[snapshot_1],\n 'count2': leaves_dict[snapshot_2],\n 'mapped': len(mappings)\n })",
"_____no_output_____"
],
[
"df = pd.DataFrame(data).sort_values(['dataset', 'year1'])\ndf['Mappend Ratio'] = [t.mapped / min(t.count1, t.count2) for t in df.itertuples()]\ndf.to_csv('../graphics/matched_stats.csv')",
"_____no_output_____"
],
[
"df = pd.read_csv('../graphics/matched_stats.csv')\ndf.groupby('dataset')['Mappend Ratio'].describe()",
"_____no_output_____"
]
],
[
[
"### End\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec68e660c1a35b6dd4f7c06ddc72b35f02752ac6 | 1,575 | ipynb | Jupyter Notebook | projects/02-speed_dating/01-Speed_dating_part_1.ipynb | pattypooh/pez-jedha | 9a23b5446158c0f95b05f81ff2ff6629e56b1800 | [
"MIT"
]
| null | null | null | projects/02-speed_dating/01-Speed_dating_part_1.ipynb | pattypooh/pez-jedha | 9a23b5446158c0f95b05f81ff2ff6629e56b1800 | [
"MIT"
]
| null | null | null | projects/02-speed_dating/01-Speed_dating_part_1.ipynb | pattypooh/pez-jedha | 9a23b5446158c0f95b05f81ff2ff6629e56b1800 | [
"MIT"
]
| null | null | null | 33.510638 | 334 | 0.645079 | [
[
[
"# Speed Dating - Part I\n\n## Challenge description\n\nWe will start a new data visualization and exploration project. Your goal will be to try to understand *love*! It's a very complicated subject so we've simplified it. Your goal is going to be to understand what happens during a speed dating and especially to understand what will influence the obtaining of a **second date**.\n\nThis is a Kaggle competition on which you can find more details here :\n\n[Speed Dating Dataset](https://www.kaggle.com/annavictoria/speed-dating-experiment#Speed%20Dating%20Data%20Key.doc)\n\nTake some time to read the description of the challenge and try to understand each of the variables in the dataset. Help yourself with this from the document : *Speed Dating - Variable Description.md*\n\n### Rendering\n\nTo be successful in this project, you will need to do a descriptive analysis of the main factors that influence getting a second appointment.",
"_____no_output_____"
]
]
]
| [
"markdown"
]
| [
[
"markdown"
]
]
|
ec68f8d8d4e7828cae1fdc4758ab9ead3bb9987e | 1,448 | ipynb | Jupyter Notebook | elasticsearch.ipynb | w121211/flair | 64d74ef6afc348f6a04c66408cce4e21b8103d25 | [
"MIT"
]
| null | null | null | elasticsearch.ipynb | w121211/flair | 64d74ef6afc348f6a04c66408cce4e21b8103d25 | [
"MIT"
]
| null | null | null | elasticsearch.ipynb | w121211/flair | 64d74ef6afc348f6a04c66408cce4e21b8103d25 | [
"MIT"
]
| null | null | null | 20.985507 | 117 | 0.504144 | [
[
[
"# elasticsearch",
"_____no_output_____"
]
],
[
[
"# !pip install elasticsearch elasticsearch_dsl",
"_____no_output_____"
],
[
"import elasticsearch\nfrom elasticsearch_dsl import connections, Document, Date, Keyword, Q, Search, Text, Range, Integer, Boolean\n\n\nclass Entity(Document):\n symbol = Keyword(index=False) # either ticker, tag, null\n \n class Index:\n name = \"temp\"\n settings = {\n 'number_of_shards': 1,\n 'number_of_replicas': 0\n }\n\ndef init():\n connections.create_connection(hosts=['es:9200'])\n Entity.init()\n\ninit()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
]
]
|
ec68fc26c87e085fd0e902125018a9829496d66f | 11,774 | ipynb | Jupyter Notebook | python_basics/Inheriance.ipynb | jai-singhal/data_science | e0d9a1e3bfd3a6d2feb88fa549e86c9628bd8b2a | [
"MIT"
]
| null | null | null | python_basics/Inheriance.ipynb | jai-singhal/data_science | e0d9a1e3bfd3a6d2feb88fa549e86c9628bd8b2a | [
"MIT"
]
| null | null | null | python_basics/Inheriance.ipynb | jai-singhal/data_science | e0d9a1e3bfd3a6d2feb88fa549e86c9628bd8b2a | [
"MIT"
]
| null | null | null | 24.427386 | 1,197 | 0.486835 | [
[
[
"class Base:\n def __init__(self):\n print(\"Base class\")\n def f(self):\n print(\"Base::f() called\")\n \nclass Derived(Base):\n def __init__(self):\n super().__init__()\n print(\"Derived class\")\n def f(self):\n print(\"Derived::f() called\")\n \n",
"_____no_output_____"
],
[
"b = Base()\nb.f()",
"Base class\nBase::f() called\n"
],
[
"d = Derived()\nd.f()",
"Base class\nDerived class\nDerived::f() called\n"
],
[
"#Example of creating list\n\nclass MyList:\n def __init__(self, *args, **kwargs):\n self._items = list(args)\n \n def add(self, item):\n self._items.append(item)\n \n def __getitem__(self, index):\n if index > len(self._items):\n raise IndexError(\"LIST INDEX OUT OF RANGE\")\n return self._items[index]\n\n def sort(self):\n return self._items.sort()\n \n def __len__(self):\n return len(self._items)\n \n def __repr__(self):\n return \"MyList: {}\".format(self._items)\n \n \nclass SortedList(MyList): #always make a list which is sorted\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.sort()\n \n def add(self, value):\n super().add(value)\n self.sort()\n \n def __repr__(self):\n return \"SortedList: {}\".format(self._items)\n",
"_____no_output_____"
],
[
"x = MyList(6, 4, 0, 10, 3, 4)\ny = SortedList(6, 4, 0, 10, 3, 4)\n\nx.add(1)\ny.add(1)",
"_____no_output_____"
],
[
"print(x._items)\nprint(y._items)\nprint(x[10])",
"[6, 4, 0, 10, 3, 4, 1]\n[0, 1, 3, 4, 4, 6, 10]\n"
]
],
[
[
"## isInstance()\ndetermines if an object is of sepecied type\nReturn True or False",
"_____no_output_____"
]
],
[
[
"isinstance(3, int)",
"_____no_output_____"
],
[
"isinstance(\"Jai\", str)",
"_____no_output_____"
],
[
"isinstance(x or y, MyList)",
"_____no_output_____"
],
[
"isinstance(4, (float, MyList, int))",
"_____no_output_____"
]
],
[
[
"## Multiple Inheritance",
"_____no_output_____"
]
],
[
[
"class Base1:\n def __init__(self):\n print(\"Base1::__init__() called\")\n \n def f(self):\n pass\n \nclass Base2:\n def __init__(self):\n print(\"Base2::__init__() called\")\n \n def g(self):\n pass\n \n def f(self):\n pass\n \nclass Derived(Base1, Base2):\n pass\n# def __init__(self):\n# print(\"Derived::__init__() called\")\n def h(self):\n pass\n \n def f(self):\n pass",
"_____no_output_____"
],
[
"d = Derived() # if derived init is not present, it calls the init of base1(first base class inherited)",
"Base1::__init__() called\n"
],
[
"Derived.mro() #Mro == Method resolution order:- It ",
"_____no_output_____"
],
[
"class MyClass:\n x = 7 # so called public class varible\n _bar = 6 # so called protected class variable\n __foo = 5 #so called private class variable\n\n def __init__(self):\n pass\n def f(self):\n pass\n\ndir(MyClass)\nobj = MyClass()\nobj._MyClass__foo # can access the private variable",
"_____no_output_____"
],
[
"class MyClass:\n def __call__(cls, *args, **kwargs):\n print(cls, args, kwargs)\n def __init__(self):\n print(\"__init__ called\")\n def f(self):\n pass\n \nobj1 = MyClass()",
"__init__ called\n"
],
[
"class Base:\n def __init__(self, *args, **kwargs):\n print(\"Base __init__\")\n self.name = \"Jai\"\n \n def f(self):\n print(\"Base:f\")\n \nclass Derived(Base):\n def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n #or\n super(Derived, self).__init__(*args, **kwargs)\n \n print(\"Derived __init__\")\n self.age = 21\n \n def g(self):\n print(\"Derived: g\")\n print(self.name, self.age)\n \nd = Derived()\nd.g()",
"Base __init__\nBase __init__\nDerived __init__\nDerived: g\nJai 21\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec68feb52a2b7fff109590d57aeeb1c7a5ae45e8 | 23,845 | ipynb | Jupyter Notebook | verifying-results-for-data-augmentation-approach.ipynb | bp-high/BpHigh_at_Dravidian_Lang_Tech_ACL-2022 | 7252f824e7b59d66096bbd1b29a0e6e07664ee0f | [
"MIT"
]
| null | null | null | verifying-results-for-data-augmentation-approach.ipynb | bp-high/BpHigh_at_Dravidian_Lang_Tech_ACL-2022 | 7252f824e7b59d66096bbd1b29a0e6e07664ee0f | [
"MIT"
]
| null | null | null | verifying-results-for-data-augmentation-approach.ipynb | bp-high/BpHigh_at_Dravidian_Lang_Tech_ACL-2022 | 7252f824e7b59d66096bbd1b29a0e6e07664ee0f | [
"MIT"
]
| null | null | null | 33.34965 | 133 | 0.456909 | [
[
[
"from tqdm.auto import tqdm\nimport os\n\nimport numpy as np\nimport pandas as pd \n\nimport matplotlib.pyplot as plt\nimport seaborn as sns \n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split , StratifiedKFold\n\n\nimport tensorflow as tf \nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import Model, load_model, save_model\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.layers import Input,Dense, LSTM, RNN, Bidirectional, GlobalAveragePooling2D , Dropout, Conv1D, Flatten\nfrom tensorflow.keras.utils import to_categorical\n\nfrom transformers import TFAutoModel , AutoTokenizer\n\nfrom sklearn.metrics import classification_report",
"_____no_output_____"
],
[
"col_names = ['labels','text']\ndf_tamil_mine = pd.read_csv('../input/augmented-test-predictions/BpHigh_tamil.tsv',sep ='\\t')\ndf_tamil_en_mine = pd.read_csv('../input/augmented-test-predictions/BpHigh_tamil-english.tsv',sep ='\\t')\ndf_tamil_given = pd.read_csv('../input/cross-verifying-results/ta-misogyny-test.csv',names=col_names,sep ='\\t')\ndf_tamil_en_given = pd.read_csv('../input/cross-verifying-results/ta-en-misogyny-test.csv',names=col_names,sep ='\\t')",
"_____no_output_____"
],
[
"def transform_df_labels(df):\n df = df.replace({'Counter-speech':0,\n 'Homophobia':1, \n 'Hope-Speech':2, \n 'Misandry':3, \n 'Misogyny':4, \n 'None-of-the-above':5, \n 'Transphobic':6,\n 'Xenophobia':7})\n return df",
"_____no_output_____"
],
[
"df_tamil_mine",
"_____no_output_____"
],
[
"df_tamil_given.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 700 entries, 0 to 699\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 labels 700 non-null object\n 1 text 699 non-null object\ndtypes: object(2)\nmemory usage: 11.1+ KB\n"
],
[
"null_list = df_tamil_given[df_tamil_given['text'].isnull()].index.tolist()",
"_____no_output_____"
],
[
"df_tamil_given.dropna(how='any',axis=0,inplace=True)",
"_____no_output_____"
],
[
"df_tamil_given",
"_____no_output_____"
],
[
"df_tamil_en_given.dropna(how='any',axis=0,inplace=True)",
"_____no_output_____"
],
[
"df_tamil_mine_transformed = transform_df_labels(df_tamil_mine['label'])\ndf_tamil_given_transformed = transform_df_labels(df_tamil_given['labels'])\ndf_tamil_en_mine_transformed = transform_df_labels(df_tamil_en_mine['label'])\ndf_tamil_en_given_transformed = transform_df_labels(df_tamil_en_given['labels'])\n",
"_____no_output_____"
],
[
"target_names = ['Counter-speech',\n 'Homophobia', \n 'Hope-Speech', \n 'Misandry', \n 'Misogyny', \n 'None-of-the-above', \n 'Transphobic',\n 'Xenophobia']\nprint(classification_report(df_tamil_given_transformed ,df_tamil_mine_transformed,target_names=target_names))",
" precision recall f1-score support\n\n Counter-speech 0.33 0.17 0.23 47\n Homophobia 0.00 0.00 0.00 8\n Hope-Speech 0.00 0.00 0.00 26\n Misandry 0.09 0.05 0.06 127\n Misogyny 0.29 0.19 0.23 48\nNone-of-the-above 0.64 0.86 0.73 416\n Transphobic 0.00 0.00 0.00 2\n Xenophobia 0.00 0.00 0.00 25\n\n accuracy 0.55 699\n macro avg 0.17 0.16 0.16 699\n weighted avg 0.44 0.55 0.48 699\n\n"
],
[
"print(classification_report(df_tamil_en_given_transformed ,df_tamil_en_mine_transformed,target_names=target_names))",
" precision recall f1-score support\n\n Counter-speech 0.27 0.14 0.18 88\n Homophobia 0.00 0.00 0.00 56\n Hope-Speech 0.05 0.03 0.04 70\n Misandry 0.19 0.04 0.07 292\n Misogyny 0.02 0.02 0.02 57\nNone-of-the-above 0.66 0.94 0.78 1141\n Transphobic 0.00 0.00 0.00 58\n Xenophobia 0.00 0.00 0.00 95\n\n accuracy 0.59 1857\n macro avg 0.15 0.15 0.13 1857\n weighted avg 0.45 0.59 0.50 1857\n\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6915fec72ca2a35cbb3ecc03974ff100af002d | 147,010 | ipynb | Jupyter Notebook | src/ML Algorithms1/LinearRegression/1.linear_regression.ipynb | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
]
| null | null | null | src/ML Algorithms1/LinearRegression/1.linear_regression.ipynb | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
]
| 3 | 2019-12-26T05:13:55.000Z | 2020-03-07T06:59:56.000Z | src/ML Algorithms1/LinearRegression/1.linear_regression.ipynb | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
]
| null | null | null | 166.678005 | 43,152 | 0.877797 | [
[
[
"import pandas as pd\nelectricity = pd.read_excel('/home/sudeep/sources/github/pythongithub/src/ML Algorithms1/LinearRegression/CCPP/Folds5x2_pp.xlsx')\nprint(electricity.info())\nelectricity.head(3)",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 9568 entries, 0 to 9567\nData columns (total 5 columns):\nAT 9568 non-null float64\nV 9568 non-null float64\nAP 9568 non-null float64\nRH 9568 non-null float64\nPE 9568 non-null float64\ndtypes: float64(5)\nmemory usage: 373.8 KB\nNone\n"
],
[
"# \tfull name\n# AT\tAmbiental Temperature\n# V\tExhaust Vacuum\n# AP\tAmbiental Pressure\n# RH\tRelative Humidity\n# PE\tElectrical Energy Output\n# Let’s first decide what training set sizes we want to use for generating the learning curves. \n# The minimum value is 1. The maximum is given by the number of instances in the training set. \n# Our training set has 9568 instances, so the maximum value is 9568. \n# However, we haven’t yet put aside a validation set. \n# We’ll do that using an 80:20 ratio, ending up with a training set of 7654 instances (80%), and a validation set of 1914 instances (20%). \n# Given that our training set will have 7654 instances, the maximum value we can use to generate our learning curves is 7654. \n# For our case, here, we use these six sizes:",
"_____no_output_____"
],
[
"train_sizes = [1, 100, 500, 2000, 5000, 7654]\ntrain_sizes",
"_____no_output_____"
],
[
"# An important thing to be aware of is that for each specified size a new model is trained. If you’re using cross-validation, which we’ll do in this post, k models will be trained for each training size (where k is given by the number of folds used for cross-validation). To save code running time, it’s good practice to limit yourself to 5-10 training sizes.\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import learning_curve\nfeatures = ['AT', 'V', 'AP', 'RH']\ntarget = 'PE'",
"_____no_output_____"
],
[
"train_sizes, train_scores, validation_scores = learning_curve(\nestimator = LinearRegression(),\nX= electricity[features],\ny = electricity[target], train_sizes = train_sizes, cv = 5, scoring='neg_mean_squared_error')",
"_____no_output_____"
],
[
"train_sizes, train_scores, validation_scores",
"_____no_output_____"
],
[
"print('Training scores:\\n\\n', train_scores)\n# print('\\n', '-' * 70) # separator to make the output easy to read\nprint('\\nValidation scores:\\n\\n', validation_scores)",
"Training scores:\n\n [[ -0. -0. -0. -0. -0. ]\n [-19.71230701 -18.31492642 -18.31492642 -18.31492642 -18.31492642]\n [-18.14420459 -19.63885072 -19.63885072 -19.63885072 -19.63885072]\n [-21.53603444 -20.18568787 -19.98317419 -19.98317419 -19.98317419]\n [-20.47708899 -19.93364211 -20.56091569 -20.4150839 -20.4150839 ]\n [-20.98565335 -20.63006094 -21.04384703 -20.63526811 -20.52955609]]\n\nValidation scores:\n\n [[-619.30514723 -379.81090366 -374.4107861 -370.03037109 -373.30597982]\n [ -21.80224219 -23.01103419 -20.81350389 -22.88459236 -23.44955492]\n [ -19.96005238 -21.2771561 -19.75136596 -21.4325615 -21.89067652]\n [ -19.92863783 -21.35440062 -19.62974239 -21.38631648 -21.811031 ]\n [ -19.88806264 -21.3183303 -19.68228562 -21.35019525 -21.75949097]\n [ -19.9046791 -21.33448781 -19.67831137 -21.31935146 -21.73778949]]\n"
],
[
"validation_scores.shape, len(validation_scores)",
"_____no_output_____"
],
[
"# To plot the learning curves, we need only a single error score per training set size, not 5. For this reason, in the next code cell we take the mean value of each row and also flip the signs of the error scores (as discussed above).\ntrain_scores_mean = -train_scores.mean(axis=1)\nvalidation_scores_mean = -validation_scores.mean(axis = 1)\n",
"_____no_output_____"
],
[
"print('Mean training scores\\n\\n', pd.Series(train_scores_mean, index = train_sizes))\nprint('\\n', '-' * 20) # separator\nprint('\\nMean validation scores\\n\\n',pd.Series(validation_scores_mean, index = train_sizes))",
"Mean training scores\n\n 1 -0.000000\n100 18.594403\n500 19.339921\n2000 20.334249\n5000 20.360363\n7654 20.764877\ndtype: float64\n\n --------------------\n\nMean validation scores\n\n 1 423.372638\n100 22.392186\n500 20.862362\n2000 20.822026\n5000 20.799673\n7654 20.794924\ndtype: float64\n"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.style.use('seaborn')\nplt.plot(train_sizes, train_scores_mean, label = 'Training error')\nplt.plot(train_sizes, validation_scores_mean, label = 'Validation error')\nplt.ylabel('MSE', fontsize = 14)\nplt.xlabel('Training set size', fontsize = 14)\nplt.title('Learning curves for a linear regression model', fontsize = 18, y = 1.03)\nplt.legend()\nplt.ylim(0,40)",
"_____no_output_____"
],
[
"# There’s a lot of information we can extract from this plot. Let’s proceed granularly. When the training set size is 1, we can see that the MSE for the training set is 0. This is normal behavior, since the model has no problem fitting perfectly a single data point. So when tested upon the same data point, the prediction is perfect. But when tested on the validation set (which has 1914 instances), the MSE rockets up to roughly 423.4. This relatively high value is the reason we restrict the y-axis range between 0 and 40. This enables us to read most MSE values with precision. Such a high value is expected, since it’s extremely unlikely that a model trained on a single data point can generalize accurately to 1914 new instances it hasn’t seen in training. When the training set size increases to 100, the training MSE increases sharply, while the validation MSE decreases likewise.\n\n# The linear regression model doesn’t predict all 100 training points perfectly, so the training MSE is greater than 0. However, the model performs much better now on the validation set because it’s estimated with more data. From 500 training data points onward, the validation MSE stays roughly the same. This tells us something extremely important: adding more training data points won’t lead to significantly better models. So instead of wasting time (and possibly money) with collecting more data, we need to try something else, like switching to an algorithm that can build more complex models. \n# Technically, that value of 20 has MW (megawatts squared) as units (the units get squared as well when we compute the MSE). But the values in our target column are in MW (according to the documentation). Taking the square root of 20 MW results in approximately 4.5 MW. Each target value represents net hourly electrical energy output. So for each hour our model is off by 4.5 MW on average. According to this Quora answer, 4.5 MW is equivalent to the heat power produced by 4500 handheld hair dryers. And this would add up if we tried to predict the total energy output for one day or a longer period. We can conclude that the an MSE of 20 MW is quite large. So our model has a bias problem.\n\n# But is it a low bias problem or a high bias problem? To find the answer, we need to look at the training error. If the training error is very low, it means that the training data is fitted very well by the estimated model. If the model fits the training data very well, it means it has low bias with respect to that set of data. If the training error is high, it means that the training data is not fitted well enough by the estimated model. If the model fails to fit the training data well, it means it has high bias with respect to that set of data. \n# A narrow gap indicates low variance. Generally, the more narrow the gap, the lower the variance. The opposite is also true: the wider the gap, the greater the variance. Let’s now explain why this is the case. As we’ve discussed earlier, if the variance is high, then the model fits training data too well. When training data is fitted too well, the model will have trouble generalizing on data that hasn’t seen in training. When such a model is tested on its training set, and then on a validation set, the training error will be low and the validation error will generally be high. As we change training set sizes, this pattern continues, and the differences between training and validation errors will determine that gap between the two learning curves.\n\n\n\n# The relationship between the training and validation error, and the gap can be summarized this way: So the bigger the difference between the two errors, the bigger the gap. The bigger the gap, the bigger the variance. In our case, the gap is very narrow, so we can safely conclude that the variance is low. High training MSE scores are also a quick way to detect low variance. If the variance of a learning algorithm is low, then the algorithm will come up with simplistic and similar models as we change the training sets. Because the models are overly simplified, they cannot even fit the training data well (they underfit the data). So we should expect high training MSEs. Hence, high training MSEs can be used as indicators of low variance. \n# In our case, the training MSE plateaus at around 20, and we’ve already concluded that’s a high value. So besides the narrow gap, we now have another confirmation that we have a low variance problem. So far, we can conclude that:\n\n# Our learning algorithm suffers from high bias and low variance, underfitting the training data.\n# Adding more instances (rows) to the training data is hugely unlikely to lead to better models under the current learning algorithm.\n# One solution at this point is to change to a more complex learning algorithm. This should decrease the bias and increase the variance. A mistake would be to try to increase the number of training instances. Generally, these other two fixes also work when dealing with a high bias and low variance problem:\n\n# Training the current learning algorithm on more features (to avoid collecting new data, you can generate easily polynomial features). This should lower the bias by increasing the model’s complexity.\n# Decreasing the regularization of the current learning algorithm, if that’s the case. In a nutshell, regularization prevents the algorithm from fitting the training data too well. If we decrease regularization, the model will fit training data better, and, as a consequence, the variance will increase and the bias will decrease.",
"_____no_output_____"
],
[
"### Bundling our previous work into a function ###\ndef learning_curves(estimator, data, features, target, train_sizes, cv):\n train_sizes, train_scores, validation_scores = learning_curve(\n estimator, data[features], data[target], train_sizes = train_sizes,\n cv = cv, scoring = 'neg_mean_squared_error')\n train_scores_mean = -train_scores.mean(axis = 1)\n validation_scores_mean = -validation_scores.mean(axis = 1)\n plt.plot(train_sizes, train_scores_mean, label = 'Training error')\n plt.plot(train_sizes, validation_scores_mean, label = 'Validation error')\n plt.ylabel('MSE', fontsize = 14)\n plt.xlabel('Training set size', fontsize = 14)\n title = 'Learning curves for a ' + str(estimator).split('(')[0] + ' model'\n plt.title(title, fontsize = 18, y = 1.03)\n plt.legend()\n plt.ylim(0,40)\n ### Plotting the two learning curves ###\nfrom sklearn.ensemble import RandomForestRegressor\nplt.figure(figsize = (16,5))\nfor model, i in [(RandomForestRegressor(), 1), (LinearRegression(),2)]:\n plt.subplot(1,2,i)\n learning_curves(model, electricity, features, target, train_sizes, 5)",
"_____no_output_____"
],
[
"from sklearn import preprocessing\nimport numpy as np",
"_____no_output_____"
],
[
"electricity.head()",
"_____no_output_____"
],
[
"electricity_scaled = preprocessing.scale(electricity)",
"_____no_output_____"
],
[
"electricity_scaled",
"_____no_output_____"
],
[
"electricity_scaled.mean(axis=0)",
"_____no_output_____"
],
[
"electricity_scaled.std(axis=0)",
"_____no_output_____"
],
[
"electricity.hist()",
"_____no_output_____"
],
[
"type(electricity)",
"_____no_output_____"
],
[
"type(electricity_scaled)",
"_____no_output_____"
],
[
"electricity_scaled_df= pd.DataFrame(electricity_scaled)",
"_____no_output_____"
],
[
"electricity_scaled_df.hist()",
"_____no_output_____"
],
[
"mu, sigma = 0, 0.1 # mean and standard deviation",
"_____no_output_____"
],
[
"s = np.random.normal(mu, sigma, 100)\ns",
"_____no_output_____"
],
[
"abs(mu - np.mean(s)) < 0.01\n",
"_____no_output_____"
],
[
"abs(sigma - np.std(s, ddof=1)) < 0.01",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\ncount, bins, ignored = plt.hist(s, 30, density=True)\nplt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\n np.exp( - (bins - mu)**2 / (2 * sigma**2) ),\n linewidth=2, color='r')",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec691d11553f8d453fa520782a54176306ba37d5 | 114,103 | ipynb | Jupyter Notebook | numpy-random.ipynb | TommyVaughan/numpy-random | 4d6f779e2a450bd92fb30db6eea71366d6d6ebea | [
"Apache-2.0"
]
| null | null | null | numpy-random.ipynb | TommyVaughan/numpy-random | 4d6f779e2a450bd92fb30db6eea71366d6d6ebea | [
"Apache-2.0"
]
| null | null | null | numpy-random.ipynb | TommyVaughan/numpy-random | 4d6f779e2a450bd92fb30db6eea71366d6d6ebea | [
"Apache-2.0"
]
| null | null | null | 99.566318 | 7,864 | 0.844123 | [
[
[
"# The numpy-random package",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"In this assignment I will be looking at the numpy.random package. I hope to:\n\n1) Explain the overall purpose of the package.\n\n2) Explain the use of the \"Simple random data\" and \"Permutations\" functions.\n\n3) Explain the use and purpose of at least five \"Distributions\" functions.\n\n4) Explain the use of seeds in generating pseudorandom numbers.",
"_____no_output_____"
],
[
"## Why generate random numbers?\n\nRandomness is used as a tool or feature in preparing data and in learning algorithims that map input data to output data in order to make predictions. The online casino industry is a good example of how this is used on a daily basis. ",
"_____no_output_____"
],
[
"## What is the NumPy Random Package?",
"_____no_output_____"
],
[
"NumPy is the fundamental package for scientific computing in Python. The name is an acronym for \"Numeric Python\" It is a library that provides a multidimensional array object, various derived objects and an assortment of routines for fast operations on arrays, including mathematical, logical, shape manipulation, sorting, selecting, I/O, discrete Fournier transforms, basic linear algebra, basic statistical operations, random simulation and much more.\n\nThe Numpy.Random package is a package within the overall Numpy package.It's unsually used in conjunction with other Python packages. This package allows for generating random data that can be analysed. This package contains numerous functions that fall under four seperate headings, random sampling, permutations, distributions and random generator. I will be looking into these further in this assignment.",
"_____no_output_____"
]
],
[
[
"# Need to import the numpy package and matplotlib to investihgate the functions.\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nsns.set(color_codes=True) # settings for seaborn plotting style\nsns.set(rc={'figure.figsize':(4.5,3)}) # settings for seaborn plot sizes",
"_____no_output_____"
]
],
[
[
"## Simple Random Sampling",
"_____no_output_____"
],
[
"The simple random data function in the numpy.random package provides a number of different functions that allow for pseudorandom random data to be generated. \n\nA look at the main Rand function:",
"_____no_output_____"
],
[
"#### Rand Function\nThis function generates random numbers between 0 and 1 but not including 1.",
"_____no_output_____"
]
],
[
[
"np.random.rand() ",
"_____no_output_____"
]
],
[
[
"It's also possible to generate more than one random number at a time by adding any additional number of rows and columns.",
"_____no_output_____"
]
],
[
[
"np.random.rand(5,10) ",
"_____no_output_____"
]
],
[
[
"It's also possible to go further and add new arrays.",
"_____no_output_____"
]
],
[
[
"np.random.rand(3, 4, 5)",
"_____no_output_____"
]
],
[
[
"Plotting a histogram to have a closer look at these randomly generated numbers where numpy will take all of the random values generated and plot them accordingly.",
"_____no_output_____"
]
],
[
[
"x = np.random.rand (10000)\nplt.hist(x)",
"_____no_output_____"
]
],
[
[
"On the histogram it's possible to see that out of the 10000 values generated, the breakdown betweem 0.0 and 1 has been broken into 10's (0.0, 0.1, 0.2, 0.3 etc) and there random numbers generated within these subsets is \"roughly\" the same. The values are all near 1000 so it shows that there was a relatively even spread of random numbers selected between 0.0 and 1.0. ",
"_____no_output_____"
],
[
"## Permutations",
"_____no_output_____"
],
[
"In mathematics and statistics, a permutation is an arrangement of the members of a non-empty set. This is different to a combination which focuses on the selection of objects without regard to the order in which they are selected. In numpy.random there are two permutation functions that allow for a distinct set of numbers to be generated in permutations. These are the Permutation Function and the Shuffle Function.",
"_____no_output_____"
],
[
"#### Permutation Function\nThis function allows a sequence of numbers to be generated in no specific order",
"_____no_output_____"
]
],
[
[
"np.random.permutation(10)",
"_____no_output_____"
]
],
[
[
"It's also possible to break the sequence of numbers into seperate sets.",
"_____no_output_____"
]
],
[
[
"arr = np.arange(12).reshape((4, 3))\nnp.random.permutation(arr)",
"_____no_output_____"
]
],
[
[
"It's also possible to provide a specific set of numbers which can be returned in a permutated range.",
"_____no_output_____"
]
],
[
[
"np.random.permutation([1, 5, 8, 11, 15 ])",
"_____no_output_____"
]
],
[
[
"#### Shuffle Function\nThis function when used like this is very similar to the permutations function.",
"_____no_output_____"
]
],
[
[
"arr = np.arange(10)\nnp.random.shuffle(arr)\narr",
"_____no_output_____"
]
],
[
[
"With multi-dimensional arrays,these are only shuffled along the first axis so it is the array rows that are shuffled but not the numbers in each set.",
"_____no_output_____"
]
],
[
[
"arr = np.arange(9).reshape((3, 3))\nnp.random.shuffle(arr)\narr",
"_____no_output_____"
]
],
[
[
"## Distributions",
"_____no_output_____"
],
[
"The distribution of a statistical data set is a listing or function showing all the possible values of the data and how often they occur. When a distribution of categorial data is organized, it's possible to see the number or percentage in each group. In numpy there are multiple functions that allow data samples to be drawn from different types of distributions. I will take a closer look at five of these different Distribution Functions. ",
"_____no_output_____"
],
[
"#### 1. Binomial Distribution\nIn statistics, the binomial distribution is a frequency distribution of the possible number of successful outcomes in a given number of trials in each of which there is the same probability of success. The criteria for a binomial distribution are:\n\n1) The experiment consists of \"n\" repeated trials.\n\n2) Each trial can only result in two possible outcomes, a \"success\" or a \"failure\".\n\n3) The probability of success, denoted by \"p\", is the same on every trial.\n\n4) The trials are interdependent where the outcome of one trial does not affect the outcome of the other trials.\n\nThe best example of a binomial distribution is a simple coin flip experiment.\n\nA coin is flipped 10(n) times.\nEach flip can result only in two possible outcomes, heads or tails. Lets say heads = success and tails = failure\nThe probability of success (heads) is constant - 0.5(p) on every trial.\nEach flip has no impact on the previous flip.\n\nLet's have a look using the np.random.bionomial function:",
"_____no_output_____"
]
],
[
[
"n, p = 11, .5 # number of trials, probability of each trial\ns = np.random.binomial(n, p, 100) # s is the number of times the coin lands showing heads (success)\nplt.hist(s)\nplt.show()\n# result of flipping a coin 10 times (and if this trial of 10 flips was conducted 100 times)",
"_____no_output_____"
]
],
[
[
"#### Looking at binomial distribution on an additional graph available online see references",
"_____no_output_____"
]
],
[
[
"from scipy.stats import binom # import binom module from scipy.stats to generate random variables \nbinom.rvs(n=10,p=0.5) # as with the coin example lets look at the number of success from 10 trials with a 0.5 probability of success\ndata_binom = binom.rvs(n=10,p=0.5,size=1000) # use binom.rvs to repeat the trials with size arguement so 1000 times the trails are repeated\nax = sns.distplot(data_binom,\n kde=False,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Binomial', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
]
],
[
[
"We can see the graph produces similar results to the example, if a coin is flipped 10 times there is more of a chance that the probability of success \"landing on heads\" will be closer to the central probability chance of 5 in 10 then on the outer findges of a 9 in 10 or 1 in 10 chance of success as indicated in the graph. In actual fact it is clear to see that the outcome of 5 heads and 5 tails in 10 flips occured in the higest number of trials about 260 times. We can also see a similar structure to the first graph I drafted.",
"_____no_output_____"
],
[
"#### 2. Uniform Distribution\n\nIn statistics, uniform distribution also known as rectangle distribution, is a type of probability in which all outcomes are equally likely, where each variable has the same probability that it will be the outcome. There are two types of uniform distribution, discrete and continuous. \n\nThe possible result of rolling a die is an example of discrete uniform distribution. It's posssible to roll a 1,2,3,4,5,6 so a roll of the die generates a discrete distribution with a probability (p)= 1/6 for each outcome. A uniform distribution with only two possible outcomes is a special case of the binomial distribution which was discussed above.\n\nA variable X is said to be uniformly distributed if the density function is f(x) = 1/(b-a) where a and b are the parameters.\n\nLets have a look using the np.random.uniform function:",
"_____no_output_____"
]
],
[
[
"s = np.random.uniform(1,7,1000)\nnp.all(s >= 1)\nnp.all(s < 7)\ncount, bins, ignored = plt.hist(s, 15, density=True)\nplt.plot(bins, np.ones_like(bins))\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Looking at uniform distribution on an additional graph available online see references",
"_____no_output_____"
]
],
[
[
"from scipy.stats import uniform # import uniform distribution module\nn = 10000 # random numbers from uniform distribution\na = 0 # Generate numbers from 0 to 10\nb = 10\ndata_uniform = uniform.rvs(size=n, loc = a, scale=b)\nax = sns.distplot(data_uniform,\n bins=100,\n kde=False,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Uniform ', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
]
],
[
[
"The np.random.uniform function is very similar to the np.random.rand function the main difference being that the random.uniform function will return values of any interger.\n\nLooking at the above graph I drafted it looks almost rectangle as the probability of rolling all six sides of the die is equal so no one side has a greater probability hence there is no one section with a much larger bar on the graph.The second graph also indicates this type of uniform relationship but spread out over a larrger data set.",
"_____no_output_____"
],
[
"#### 3. Normal Distribution\n\nNormal distribution, sometimes referred to as the Gaussian distribution or bell curve, is a distribution that occurs naturally in many situations. An example of this would be if a class of students completed an exam, the bulk of students will score a C, while a smaller numbers of students will score a B or a D and an even smaller percentage again will score an A or an F. This creates a distribution that represents a bell where half of the data will fall to the left of the mean and half of the data will fall to the right.\n\nThe properties of a normal distribution aee:\n\n1) The mean, mode and median are all equal.\n\n2) The curve is symmetric at the centre, i.e. around the mean.\n\n3) Exactly half of the values are to the left of the centre and half of the values are to the right.\n\n4) The total area under the curve is one.\n\nLets have a look at the np.random.normal function:",
"_____no_output_____"
]
],
[
[
"mu, sigma = 0, 1 # mean and standard deviation\ns = np.random.normal(mu, sigma, 1000)\nplt.hist(s)\nplt.show",
"_____no_output_____"
]
],
[
[
"#### Looking at normal distribution on an additional graph available online see references",
"_____no_output_____"
]
],
[
[
"from scipy.stats import norm # import norm module from scipy.stats to generate random variables\ndata_normal = norm.rvs(size=10000,loc=0,scale=1) # generate random numbers from N(0,1). loc is the mean and scale is the standard deviation\nax = sns.distplot(data_normal,\n bins=100,\n kde=False,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Normal', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
]
],
[
[
"Looking at the first graph I drafted based on the example in the numpy documentation, if we tie it back to the example of a class of students getting exam results. If we assume the 0 = a score of C, 2 = a score of B and -2 = a score of D, we can see that the majority of students would fall into the middle of the graph, getting not too high of a score and not too low of a score, and then a much lower number of students falling in the outer fringes of the curve either scoring very low (on the left) or very high (on the right). The second graph also shows how this type of distribution results in a bell shaped curve when graphed.",
"_____no_output_____"
],
[
"#### 4. Gamma Distribution\n\nIn statistics gamma distribution is a two-parametre family of continuous probability distributions. Gamma distributions occur frequently in models used in engineering for checking time to failure of equipment, in meterology to measure rain fall and in business areas such as insurance claims and loan defaults for which the variables are always positive and the results are skewed. \n\nA really good example is that of a call centre. We may know fairly precisely the average voulme of calls that is received but we cannot effectively calculate the probability that any one call will arrive at a specific time. The gamma distribution can be useful when dealing with rates, such as call arrival times, or wait times in a queue.\n\nLets have a look using the np.random.gamma function:",
"_____no_output_____"
]
],
[
[
"shape, scale = 2., 2. # mean=4, std=2*sqrt(2) \ns = np.random.gamma(shape, scale, 1000)\nplt.hist(s)\nplt.show",
"_____no_output_____"
],
[
"from scipy.stats import gamma # import the gamma distribution module\ndata_gamma = gamma.rvs(a=5, size=10000)\nax = sns.distplot(data_gamma,\n kde=False,\n bins=100,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Gamma', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
]
],
[
[
"Looking at the above graphs, we see that the gamma function is typically right skewed which means that the peak of the probabiltiy distribution function will generally be found on the left-hand side of the plot. The graph is skwed like this because a natural limit prevents outcomes on one side, for example call waiting times cannot be less than zero. ",
"_____no_output_____"
],
[
"#### 5. Poisson Distribution\n\nIn statistics the poissson distribution is a discrete probability distribution of a given number of events occurring in a fixed interval of time or space if these events occur with a known constant rate and independently of the time since the last event. \n\nThe conditions for poisson distribution are:\n\n1) An event can occur any number of times during a time period.\n\n2) The rate of occurance is constant.\n\n3) The probability of an event occuring is proportional to the length of the time period, for example it should be twice as likely for an event to occur in a two hour time period than it is for an event to occur in a one hour period.\n\nA call centre is again a good exapmple of this distribution. It can be used to model the number ofcalls an office would receive during a given hour, let's say lunch hour, if they know they average 10 calls per hour during this time period. Although the average is 10 calls, the centre could get any number of calls during this time. The events are independent as one call is not expected to trigger another call. The occurence rate may be assumed to be constant. Therefore it is reasonable that it is as likely to receive a call during the first half hour as it is in the last.\n\nLets have a look using the np.random.poisson function:",
"_____no_output_____"
]
],
[
[
"plt.subplot(1, 2, 1)\ns = np.random.poisson(5, 10000)\ncount, bins, ignored = plt.hist(s, 14, density=True)\nplt.hist(s)\n\n\nplt.subplot(1, 2, 2)\ns = np.random.poisson(20, 10000) # changed the mu to 20\ncount, bins, ignored = plt.hist(s, 14, density=True)\nplt.hist(s)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The basic shape of a poission distribution graph is subject to change. A graph with a low mean density is highly skewed, see above left, but if I increase this mean the distribution will spread out and become more symmetric as in above right.\n\nThis can be seen even more clearly on the graphs below.",
"_____no_output_____"
],
[
"#### Looking at normal distribution on an additional graph available online see references",
"_____no_output_____"
]
],
[
[
"from scipy.stats import poisson # import the gamma distribution module\ndata_poisson = poisson.rvs(mu=3, size=10000)\nax = sns.distplot(data_poisson,\n kde=False,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Poisson', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
],
[
"from scipy.stats import poisson # import the gamma distribution module\ndata_poisson = poisson.rvs(mu=10, size=10000) # changed the mu to 10\nax = sns.distplot(data_poisson,\n kde=False,\n color='skyblue',\n hist_kws={\"linewidth\": 15,'alpha':1})\nax.set(xlabel='Poisson', ylabel='Frequency')",
"C:\\Users\\I304302\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
]
],
[
[
"## The use of Seeds in Generating Pseudorandom Numbers.\n\nMost random numbers that are generated in numpy often look random but they are actually determined by some algorithims. So these are actually pseudo random numbers rather than an out and out random number. During this random number generation, the seed is actually the starting point in the sequence. The idea behind using seeds is for debugging programs.\n\nIf the same seed is used when generating random numbers, the result will be the same random numbers.\n\nLets have a look at this in numpy:",
"_____no_output_____"
]
],
[
[
"np.random.rand(5) # Generate 5 random numbers",
"_____no_output_____"
],
[
"np.random.rand(5) # Generate 5 more random numbers. We see they are all different to the first 5 numbers generated.",
"_____no_output_____"
],
[
"np.random.seed(20) # Define a seed for the rand function",
"_____no_output_____"
],
[
"np.random.rand(5) # After setting a seed we can see the 5 numbers generated are the same as the initial 5 generated.",
"_____no_output_____"
]
],
[
[
"In this assignment I covered why random data is used. I looked at the numpy package and how it's numpy.rand function allows for various ways ways to generate random data through random data sampling, permutations and distributions. I also looked at how a seed works when generating random data. ",
"_____no_output_____"
],
[
"## References\n\nhttps://docs.scipy.org/doc/numpy-1.14.5/user/whatisnumpy.html - Information on Numpy\n\nhttps://www.python-course.eu/numpy.php - Additional information on Numpy\n\nhttps://www.packtpub.com/mapt/book/big_data_and_business_intelligence/9781785285110/2/ch02lvl1sec16/numpy-random-numbers - Information on Numpy and random numbers\n\nhttps://www.dummies.com/education/math/statistics/what-the-distribution-tells-you-about-a-statistical-data-set/ - Information on distribution\n\nhttps://stattrek.com/statistics/dictionary.aspx?definition=permutation – Information on permutations\n\nhttps://stattrek.com/probability-distributions/binomial.aspx - Information on binomial distributions\n\nhttps://www.investopedia.com/terms/u/uniform-distribution.asp - Information on uniform distribution\n\nhttps://www.statisticshowto.datasciencecentral.com/probability-and-statistics/normal-distributions/ - Information on normal distribution\n\nhttps://study.com/academy/lesson/gamma-distribution-definition-equations-examples.html - Information on gamma distribution\n\nhttps://brilliant.org/wiki/poisson-distribution/ - Information on poisson distribution\n\nhttps://www.quora.com/What-is-seed-in-random-number-generation - Information on seeds\n\nhttp://cmdlinetips.com/2018/03/probability-distributions-in-python/ - Seaborn graphs\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6920366f76842dac83f5167ac3bf4d311d12e6 | 561,034 | ipynb | Jupyter Notebook | python-for-data-visualization-projects/plotly-cufflinks/plotly_cufflinks_project.ipynb | niccololampa/data-science-projects | f1d7c208ad59c5e54614af17210cad997bb6fee5 | [
"MIT"
]
| null | null | null | python-for-data-visualization-projects/plotly-cufflinks/plotly_cufflinks_project.ipynb | niccololampa/data-science-projects | f1d7c208ad59c5e54614af17210cad997bb6fee5 | [
"MIT"
]
| null | null | null | python-for-data-visualization-projects/plotly-cufflinks/plotly_cufflinks_project.ipynb | niccololampa/data-science-projects | f1d7c208ad59c5e54614af17210cad997bb6fee5 | [
"MIT"
]
| null | null | null | 67.448185 | 78,388 | 0.67582 | [
[
[
"# Plotly and Cufflinks Project",
"_____no_output_____"
],
[
"## Imports and Set-up",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n%matplotlib inline",
"_____no_output_____"
],
[
"from plotly import __version__\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot",
"_____no_output_____"
],
[
"import cufflinks as cf",
"_____no_output_____"
],
[
"from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot",
"_____no_output_____"
],
[
"# For Notebooks\ninit_notebook_mode(connected=True)",
"_____no_output_____"
],
[
"# For offline use\ncf.go_offline()",
"IOPub data rate exceeded.\nThe notebook server will temporarily stop sending output\nto the client in order to avoid crashing it.\nTo change this limit, set the config variable\n`--NotebookApp.iopub_data_rate_limit`.\n"
]
],
[
[
"### Fake Data",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(np.random.randn(100,4),columns='A B C D'.split())",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df2 = pd.DataFrame({'Category':['A','B','C'],'Values':[32,43,50]})",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
]
],
[
[
"Cufflinks and iplot()\n\n* scatter\n* bar\n* box\n* spread\n* ratio\n* heatmap\n* surface\n* histogram\n* bubble",
"_____no_output_____"
]
],
[
[
"df.plot()",
"_____no_output_____"
],
[
"df.iplot()",
"_____no_output_____"
]
],
[
[
"## Scatter",
"_____no_output_____"
]
],
[
[
"df.iplot(kind='scatter',x='A',y='B',size=10)",
"_____no_output_____"
],
[
"df.iplot(kind='scatter',x='A',y='B',mode='markers',size=10)",
"_____no_output_____"
]
],
[
[
"## Bar Plots",
"_____no_output_____"
]
],
[
[
"df2.iplot(kind='bar',x='Category',y='Values')",
"_____no_output_____"
],
[
"df.count().iplot(kind='bar')",
"_____no_output_____"
]
],
[
[
"## Boxplots",
"_____no_output_____"
]
],
[
[
"df.iplot(kind='box')",
"_____no_output_____"
]
],
[
[
"## 3d Surface",
"_____no_output_____"
]
],
[
[
"df3 = pd.DataFrame({'x':[1,2,3,4,5],'y':[10,20,30,20,10],'z':[5,4,3,2,1]})\ndf3.iplot(kind='surface',colorscale='rdylbu')",
"_____no_output_____"
]
],
[
[
"## Spread",
"_____no_output_____"
]
],
[
[
"df[['A','B']].iplot(kind='spread')",
"_____no_output_____"
]
],
[
[
"## histogram",
"_____no_output_____"
]
],
[
[
"df['A'].iplot(kind='hist',bins=25)",
"_____no_output_____"
],
[
"df.iplot(kind='hist',bins=25)",
"_____no_output_____"
],
[
"df.iplot(kind='bubble',x='A',y='B',size='C')",
"_____no_output_____"
]
],
[
[
"## scatter_matrix()\n\nSimilar to sns.pairplot()",
"_____no_output_____"
]
],
[
[
"df.scatter_matrix()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec692361599cbbbe2c04989f9b301f82c37ea2b7 | 138,439 | ipynb | Jupyter Notebook | Research/GoogleSmartPhone/code/GSDC4_MovingWindowSequence.ipynb | leeh8911/BeSuperRepo | 688891f9b8e6336e144f635b0df0337fdbde40ea | [
"MIT"
]
| null | null | null | Research/GoogleSmartPhone/code/GSDC4_MovingWindowSequence.ipynb | leeh8911/BeSuperRepo | 688891f9b8e6336e144f635b0df0337fdbde40ea | [
"MIT"
]
| null | null | null | Research/GoogleSmartPhone/code/GSDC4_MovingWindowSequence.ipynb | leeh8911/BeSuperRepo | 688891f9b8e6336e144f635b0df0337fdbde40ea | [
"MIT"
]
| null | null | null | 32.859957 | 1,648 | 0.479388 | [
[
[
"# Load Libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom glob import glob\nimport os\nimport matplotlib.pyplot as plt\nfrom tqdm.notebook import tqdm\nfrom pathlib import Path\nimport plotly.express as px\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torchsummary\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\nimport warnings\nwarnings.filterwarnings(action='ignore')",
"_____no_output_____"
],
[
"device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\nprint(device)",
"cuda:0\n"
]
],
[
[
"# Set Hyper Parameters",
"_____no_output_____"
]
],
[
[
"hyper_parameters = {\n \"SEED\":20180724,\n \"notebookName\":\"MovingWindowSequence\",\n \"nepochs\":10000,\n \"batch_size\":256,\n \"learning_rate\":0.001,\n \"window_size\":128,\n \"max_patience_count\":1000\n}\n\nnepochs = hyper_parameters['nepochs']",
"_____no_output_____"
],
[
"torch.manual_seed(hyper_parameters[\"SEED\"])",
"_____no_output_____"
],
[
"f\"./models/{hyper_parameters['notebookName']}\"",
"_____no_output_____"
],
[
"\nPATH = Path(f\"./models/{hyper_parameters['notebookName']}\")\nif os.path.isdir(PATH):\n dir_list = os.listdir(PATH)\n num_files = 0\n while True:\n if os.path.isfile(str(PATH / f\"{num_files}\")):\n print(num_files)\n num_files += 1\n else:\n break\nelse:\n os.mkdir(PATH)\n num_files = 0\nnum_files",
"_____no_output_____"
]
],
[
[
"# Set Path",
"_____no_output_____"
]
],
[
[
"data_dir = Path(\"../input/google-smartphone-decimeter-challenge\")",
"_____no_output_____"
]
],
[
[
"# Help Functions",
"_____no_output_____"
],
[
"# Load Data",
"_____no_output_____"
]
],
[
[
"df_train_default = pd.read_pickle(str(data_dir / \"gsdc_extract_train.pkl.gzip\"))",
"_____no_output_____"
],
[
"df_test = pd.read_pickle(str(data_dir / \"gsdc_extract_test.pkl.gzip\"))",
"_____no_output_____"
],
[
"for col in df_train_default.columns:\n print(col)",
"collectionName\nphoneName\nmillisSinceGpsEpoch\nlatDeg\nlngDeg\nheightAboveWgs84EllipsoidM\nphone\ntimeSinceFirstFixSeconds\nhDop\nvDop\nspeedMps\ncourseDegree\nt_latDeg\nt_lngDeg\nt_heightAboveWgs84EllipsoidM\nconstellationType\nsvid\nsignalType\nreceivedSvTimeInGpsNanos\nxSatPosM\nySatPosM\nzSatPosM\nxSatVelMps\nySatVelMps\nzSatVelMps\nsatClkBiasM\nsatClkDriftMps\nrawPrM\nrawPrUncM\nisrbM\nionoDelayM\ntropoDelayM\nutcTimeMillis\nelapsedRealtimeNanos\nyawDeg\nrollDeg\npitchDeg\nutcTimeMillis_Status\nSignalCount\nSignalIndex\nConstellationType\nSvid\nCarrierFrequencyHz\nCn0DbHz\nAzimuthDegrees\nElevationDegrees\nUsedInFix\nHasAlmanacData\nHasEphemerisData\nBasebandCn0DbHz\nutcTimeMillis_UncalMag\nelapsedRealtimeNanos_UncalMag\nUncalMagXMicroT\nUncalMagYMicroT\nUncalMagZMicroT\nBiasXMicroT\nBiasYMicroT\nBiasZMicroT\nutcTimeMillis_UncalAccel\nelapsedRealtimeNanos_UncalAccel\nUncalAccelXMps2\nUncalAccelYMps2\nUncalAccelZMps2\nBiasXMps2\nBiasYMps2\nBiasZMps2\nutcTimeMillis_UncalGyro\nelapsedRealtimeNanos_UncalGyro\nUncalGyroXRadPerSec\nUncalGyroYRadPerSec\nUncalGyroZRadPerSec\nDriftXRadPerSec\nDriftYRadPerSec\nDriftZRadPerSec\nutcTimeMillis_Raw\nTimeNanos\nLeapSecond\nFullBiasNanos\nBiasNanos\nBiasUncertaintyNanos\nDriftNanosPerSecond\nDriftUncertaintyNanosPerSecond\nHardwareClockDiscontinuityCount\nSvid_Raw\nTimeOffsetNanos\nState\nReceivedSvTimeNanos\nReceivedSvTimeUncertaintyNanos\nCn0DbHz_Raw\nPseudorangeRateMetersPerSecond\nPseudorangeRateUncertaintyMetersPerSecond\nAccumulatedDeltaRangeState\nAccumulatedDeltaRangeMeters\nAccumulatedDeltaRangeUncertaintyMeters\nCarrierFrequencyHz_Raw\nMultipathIndicator\nConstellationType_Raw\nAgcDb\nBasebandCn0DbHz_Raw\nFullInterSignalBiasNanos\nFullInterSignalBiasUncertaintyNanos\nSatelliteInterSignalBiasNanos\nSatelliteInterSignalBiasUncertaintyNanos\nCodeType\nChipsetElapsedRealtimeNanos\ndlatDeg\ndlngDeg\ndheight\ndlatDeg_Scaled\ndlngDeg_Scaled\ndheight_Scaled\nxSatPosM_Scaled\nySatPosM_Scaled\nzSatPosM_Scaled\nxSatVelMps_Scaled\nySatVelMps_Scaled\nzSatVelMps_Scaled\nUncalGyroXRadPerSec_Scaled\nUncalGyroYRadPerSec_Scaled\nUncalGyroZRadPerSec_Scaled\nDriftXRadPerSec_Scaled\nDriftYRadPerSec_Scaled\nDriftZRadPerSec_Scaled\nUncalAccelXMps2_Scaled\nUncalAccelYMps2_Scaled\nUncalAccelZMps2_Scaled\nBiasXMps2_Scaled\nBiasYMps2_Scaled\nBiasZMps2_Scaled\nUncalMagXMicroT_Scaled\nUncalMagYMicroT_Scaled\nUncalMagZMicroT_Scaled\nBiasXMicroT_Scaled\nBiasYMicroT_Scaled\nBiasZMicroT_Scaled\nyawDeg_Scaled\nrollDeg_Scaled\npitchDeg_Scaled\nGPS_L1\nGPS_L5\nGAL_E1\nGAL_E5A\nGLO_G1\nBDS_B1I\nBDS_B1C\nBDS_B2A\nQZS_J1\nQZS_J5\n"
]
],
[
[
"# Dataloader",
"_____no_output_____"
]
],
[
[
"df_train_default['phone'].value_counts()",
"_____no_output_____"
]
],
[
[
"# Split functions",
"_____no_output_____"
]
],
[
[
"def CustomTrainValidSplit(df:pd.DataFrame, valid_size):\n phones = df['phone'].unique()\n \n valid_num = int(len(phones) * valid_size)\n train_num = len(phones) - valid_num\n \n indexes = np.array(range(len(phones)))\n indexes = np.random.choice(indexes, len(indexes), replace = False)\n \n df_train = []\n for phone in phones[indexes[:train_num]]:\n df_train.append(df[df['phone'] == phone])\n df_train = pd.concat(df_train).reset_index().drop(columns = 'index')\n \n df_valid = []\n for phone in phones[indexes[train_num:-1]]:\n df_valid.append(df[df['phone'] == phone])\n df_valid = pd.concat(df_valid).reset_index().drop(columns = 'index')\n \n return df_train, df_valid\n \ndf_train, df_valid = CustomTrainValidSplit(df_train_default.sort_values(by = ['phone', 'millisSinceGpsEpoch'], ignore_index = True), valid_size = 0.1)\ndf_test = df_test.sort_values(by = ['phone', 'millisSinceGpsEpoch'], ignore_index = True)\nprint(df_train.shape, df_valid.shape)\n ",
"(118904, 148) (11245, 148)\n"
],
[
"df_train",
"_____no_output_____"
]
],
[
[
"# Create Data Loader",
"_____no_output_____"
],
[
"## Moving Window Function",
"_____no_output_____"
]
],
[
[
"def MovingWindow(current_index, max_index, window_size, ratio = 0.5):\n left_window = int(window_size * ratio)\n right_window = window_size - left_window\n \n left_index = np.arange(current_index - left_window, current_index)\n right_index = np.arange(current_index, current_index + right_window)\n \n index = np.concatenate([left_index, right_index])\n index[index < 0] = 0\n index[index >= max_index] = max_index-1\n \n return index",
"_____no_output_____"
]
],
[
[
"## Custom Dataset",
"_____no_output_____"
]
],
[
[
"class CustomDataset(torch.utils.data.Dataset):\n def __init__(self, df:pd.DataFrame, \n features = ['latDeg', 'lngDeg', 'heightAboveWgs84EllipsoidM'], \n labels = ['t_latDeg', 't_lngDeg', 't_heightAboveWgs84EllipsoidM'],\n window_size = 100,\n train = False,\n augment = False,\n crop_ratio = 0.0,\n device = 'cpu'):\n self.df = df\n self.features = features\n self.labels = labels\n self.len = df.shape[0]\n self.window_size = window_size\n self.train = train\n self.augment = augment\n self.crop_ratio = crop_ratio\n self.device = device\n self.phones = df['phone'].unique()\n \n self.df_dict = dict()\n for phone in tqdm(self.phones):\n self.df_dict[phone] = df[df['phone'] == phone]\n \n \n def __len__(self):\n return self.len\n \n def __getitem__(self, idx):\n origin_idx = idx\n current_sample = self.df.iloc[idx]\n phone = current_sample['phone']\n millisSinceGpsEpoch = current_sample['millisSinceGpsEpoch']\n df = self.df_dict[phone]\n \n start_index = df.index[0]\n \n self.data = df[features].astype(float).values\n if self.train == True:\n self.true = df[labels].astype(float).values\n else:\n self.true = []\n \n window_index = MovingWindow(idx - start_index, df.shape[0], self.window_size)\n data = self.data[window_index, :]\n \n \n indx = [phone, millisSinceGpsEpoch]\n \n # data shape : num_of_features X window_size\n # true shape : num_of_labels X 1\n data = torch.Tensor(data)\n if self.augment:\n crop_size = int(self.crop_ratio * self.window_size * np.random.rand(1))\n crop_start_index = np.random.randint(0, self.window_size - crop_size)\n data[crop_start_index:crop_size, :] = 0\n \n if self.train is False:\n true = []\n else:\n true = self.true[idx - start_index, :]\n true = torch.Tensor(true.astype(float))\n return data, true, indx\n \n",
"_____no_output_____"
]
],
[
[
"## Feature Select",
"_____no_output_____"
]
],
[
[
"features = [\n 'latDeg', \n 'lngDeg', \n 'heightAboveWgs84EllipsoidM',\n 'dlatDeg_Scaled',\n 'dlngDeg_Scaled',\n 'dheight_Scaled',\n 'xSatPosM_Scaled',\n 'ySatPosM_Scaled',\n 'zSatPosM_Scaled',\n 'xSatVelMps_Scaled',\n 'ySatVelMps_Scaled',\n 'zSatVelMps_Scaled',\n 'UncalGyroXRadPerSec_Scaled',\n 'UncalGyroYRadPerSec_Scaled',\n 'UncalGyroZRadPerSec_Scaled',\n 'DriftXRadPerSec_Scaled',\n 'DriftYRadPerSec_Scaled',\n 'DriftZRadPerSec_Scaled',\n 'UncalAccelXMps2_Scaled',\n 'UncalAccelYMps2_Scaled',\n 'UncalAccelZMps2_Scaled',\n 'BiasXMps2_Scaled',\n 'BiasYMps2_Scaled',\n 'BiasZMps2_Scaled',\n 'UncalMagXMicroT_Scaled',\n 'UncalMagYMicroT_Scaled',\n 'UncalMagZMicroT_Scaled',\n 'BiasXMicroT_Scaled',\n 'BiasYMicroT_Scaled',\n 'BiasZMicroT_Scaled',\n 'yawDeg_Scaled',\n 'rollDeg_Scaled',\n 'pitchDeg_Scaled',\n 'GPS_L1', \n 'GPS_L5', \n 'GAL_E1', \n 'GAL_E5A', \n 'GLO_G1', \n 'BDS_B1I', \n 'BDS_B1C', \n 'BDS_B2A', \n 'QZS_J1', \n 'QZS_J5'\n]\nstatus_features = [\n]\n\nlabels = [\n 't_latDeg', \n 't_lngDeg', \n 't_heightAboveWgs84EllipsoidM',\n# 'courseDegree',\n# 'hDop',\n# 'vDop',\n# 'speedMps'\n ]\n\nprint(df_train[features].shape)\nprint(df_train[features].describe())\n",
"(118904, 43)\n latDeg lngDeg heightAboveWgs84EllipsoidM \\\ncount 118904.000000 118904.000000 118904.000000 \nmean 37.435567 -122.154980 21.783362 \nstd 0.082466 0.143705 73.308272 \nmin 37.322844 -122.472214 -6157.470000 \n25% 37.373910 -122.273127 -27.310000 \n50% 37.424290 -122.116144 1.770000 \n75% 37.469032 -122.069911 56.780000 \nmax 37.690836 -121.881855 13701.980000 \n\n dlatDeg_Scaled dlngDeg_Scaled dheight_Scaled xSatPosM_Scaled \\\ncount 118904.000000 118904.000000 118904.000000 118904.000000 \nmean 0.003680 -0.001599 -0.012571 0.012272 \nstd 1.013329 1.008292 1.002822 0.724084 \nmin -3.367212 -2.837517 -84.852009 -1.703958 \n25% -0.141651 -0.257462 -0.365538 -0.313048 \n50% 0.068484 0.088219 -0.242765 0.000000 \n75% 0.197894 0.206375 0.221136 0.000000 \nmax 2.689582 2.871625 188.239406 2.588478 \n\n ySatPosM_Scaled zSatPosM_Scaled xSatVelMps_Scaled ... \\\ncount 118904.000000 118904.000000 118904.000000 ... \nmean -0.003605 0.010974 0.003385 ... \nstd 0.737901 0.718791 0.731761 ... \nmin -1.604379 -2.959637 -2.805447 ... \n25% -0.234712 0.000000 -0.143455 ... \n50% 0.000000 0.000000 0.000000 ... \n75% 0.000000 0.430128 0.005197 ... \nmax 3.452574 2.112431 2.136355 ... \n\n GPS_L1 GPS_L5 GAL_E1 GAL_E5A \\\ncount 118904.000000 118904.000000 118904.000000 118904.000000 \nmean 0.166512 0.053867 0.102907 0.066440 \nstd 0.372542 0.225756 0.303838 0.249051 \nmin 0.000000 0.000000 0.000000 0.000000 \n25% 0.000000 0.000000 0.000000 0.000000 \n50% 0.000000 0.000000 0.000000 0.000000 \n75% 0.000000 0.000000 0.000000 0.000000 \nmax 1.000000 1.000000 1.000000 1.000000 \n\n GLO_G1 BDS_B1I BDS_B1C BDS_B2A QZS_J1 \\\ncount 118904.000000 118904.000000 118904.0 118904.0 118904.000000 \nmean 0.104891 0.032261 0.0 0.0 0.001682 \nstd 0.306415 0.176694 0.0 0.0 0.040978 \nmin 0.000000 0.000000 0.0 0.0 0.000000 \n25% 0.000000 0.000000 0.0 0.0 0.000000 \n50% 0.000000 0.000000 0.0 0.0 0.000000 \n75% 0.000000 0.000000 0.0 0.0 0.000000 \nmax 1.000000 1.000000 0.0 0.0 1.000000 \n\n QZS_J5 \ncount 118904.000000 \nmean 0.001564 \nstd 0.039520 \nmin 0.000000 \n25% 0.000000 \n50% 0.000000 \n75% 0.000000 \nmax 1.000000 \n\n[8 rows x 43 columns]\n"
]
],
[
[
"## Get Datasets",
"_____no_output_____"
]
],
[
[
"train_data = CustomDataset(df_train, \n features = features, labels = labels, \n window_size = hyper_parameters['window_size'], \n train = True, \n augment = True,\n crop_ratio = 0.1,\n device = device)\nvalid_data = CustomDataset(df_valid, \n features = features, labels = labels, \n window_size = hyper_parameters['window_size'], \n train = True, \n augment = False,\n crop_ratio = 0.0,\n device = device)\ntest_data = CustomDataset(df_test, \n features = features, labels = labels, \n window_size = hyper_parameters['window_size'], \n train = False, \n augment = False,\n crop_ratio = 0.0,\n device = device)",
"_____no_output_____"
]
],
[
[
"## Get Data Loader",
"_____no_output_____"
]
],
[
[
"train_loader = DataLoader(train_data, batch_size = hyper_parameters['batch_size'], shuffle = True)\nvalid_loader = DataLoader(valid_data, batch_size = hyper_parameters['batch_size'], shuffle = False)\ntest_loader = DataLoader(test_data, batch_size = hyper_parameters['batch_size'], shuffle = False)",
"_____no_output_____"
]
],
[
[
"# Build Model\n## Define Loss and Score",
"_____no_output_____"
]
],
[
[
"def torch_haversine(lat1, lon1, lat2, lon2):\n lat1=lat1 % 360\n lon1=lon1 % 360\n lat2=lat2 % 360\n lon2=lon2 % 360\n\n lat1, lat2, lon1, lon2 = map(torch.deg2rad, [lat1, lat2, lon1, lon2])\n\n dlat = (lat2 - lat1)\n dlon = (lon2 - lon1)\n\n a = torch.sin(dlat / 2.0)**2 + torch.cos(lat1) * torch.cos(lat2) * (torch.sin(dlon / 2.0)**2)\n c = 2 * torch.arcsin(a ** 0.5)\n\n dist = 6_367_000 * c\n\n return dist\n\ndef CustomLoss(predict:torch.Tensor, target:torch.Tensor):\n loss = SmoothL1Loss(predict, target)\n gpsLoss = GpsLoss(predict, target)\n if (loss < 1):\n loss = gpsLoss\n return loss\n\ndef SmoothL1Loss(predict:torch.Tensor, target:torch.Tensor):\n \n return nn.SmoothL1Loss()(predict, target)\n\ndef GpsLoss(predict:torch.Tensor, target:torch.Tensor):\n dist = torch_haversine(predict[:,0], predict[:,1], target[:,0], target[:,1])\n\n return dist.mean()\n\ndef GpsScore(predict:torch.Tensor, target:torch.Tensor):\n dist = torch_haversine(predict[:,0], predict[:,1], target[:,0], target[:,1])\n\n return (torch.quantile(dist, 0.5) + torch.quantile(dist, 0.95))/2",
"_____no_output_____"
]
],
[
[
"## Build Custom Model",
"_____no_output_____"
]
],
[
[
"\nclass ConvBlock(nn.Module):\n def __init__(self, input_features, features = 128):\n super().__init__()\n \n self.fuse = nn.Conv1d(input_features + features, features, kernel_size = 1)\n self.conv = nn.Conv1d(input_features, features, kernel_size=3, padding = 1)\n self.batch = nn.BatchNorm1d(features)\n self.pool = nn.AvgPool1d(kernel_size = 2)\n \n def forward(self, x):\n skip = x\n \n x = self.conv(x)\n x = F.relu(x)\n x = self.batch(x)\n x = torch.cat([skip, x], axis = 1)\n x = self.fuse(x)\n x = self.pool(x)\n return x\n \nclass BaseModel(nn.Module):\n def __init__(self, input_size = (100, 3), output_size = 3):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n \n self.conv1 = ConvBlock(input_size[1], 256)\n self.conv2 = ConvBlock(256, 512)\n \n self.batch_norm = nn.BatchNorm1d(512)\n self.layer_norm = nn.LayerNorm([32, 512])\n \n self.fc1 = nn.Linear(32*512, 1024)\n self.layer_norm1 = nn.LayerNorm(1024)\n self.fc2 = nn.Linear(1024, 512)\n self.layer_norm2 = nn.LayerNorm(512)\n self.fc3 = nn.Linear(512, output_size)\n \n self.drop06 = nn.Dropout(0.6)\n self.drop03 = nn.Dropout(0.3)\n self.drop01 = nn.Dropout(0.1)\n \n nn.init.kaiming_normal_(self.fc1.weight)\n nn.init.kaiming_normal_(self.fc2.weight)\n nn.init.kaiming_normal_(self.fc3.weight)\n \n \n def forward(self, x):\n input_size = self.input_size \n output_size = self.output_size\n \n x[:,:,:2] = torch.deg2rad(x[:,:,:2])\n \n \n x = x.transpose(2,1)\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.transpose(2,1)\n \n x = x.reshape(-1, 32*512)\n \n x = self.drop06(x)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.layer_norm1(x)\n \n x = self.drop03(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.layer_norm2(x)\n \n x = self.drop01(x)\n x = self.fc3(x)\n \n x[:,:2] = torch.rad2deg(x[:,:2])\n \n return x",
"_____no_output_____"
]
],
[
[
"## Compile Model",
"_____no_output_____"
]
],
[
[
"model = BaseModel((hyper_parameters['window_size'], len(features)), len(labels))\nmodel.to(device)\n# model.load_state_dict(torch.load(\"./models/Baseline3/model-4.pth\"))\n\n# loss_func = nn.SmoothL1Loss()\nloss_func = CustomLoss\noptimizer = optim.Adam(model.parameters(), lr = hyper_parameters['learning_rate'])\nscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,\n mode = 'min',\n factor = 0.1,\n patience = 5,\n verbose = True)",
"_____no_output_____"
]
],
[
[
"# Fit and Validate\n## Train",
"_____no_output_____"
]
],
[
[
"def train(epoch, progress_log):\n model.train() # 신경망을 학습 모드로 전환\n\n # 데이터로더에서 미니배치를 하나씩 꺼내 학습을 수행\n predict = []\n ground = []\n \n for data, targets, _ in progress_log:\n \n data = data.to(device)\n targets = targets.to(device)\n \n optimizer.zero_grad() # 경사를 0으로 초기화\n outputs = model(data) # 데이터를 입력하고 출력을 계산\n loss = loss_func(outputs, targets) # 출력과 훈련 데이터 정답 간의 오차를 계산\n \n loss.backward() # 오차를 역전파 계산\n optimizer.step() # 역전파 계산한 값으로 가중치를 수정\n \n predict.append(outputs)\n ground.append(targets)\n \n # 정확도 출력\n predict = torch.cat(predict,axis = 0)\n ground = torch.cat(ground,axis = 0)\n \n loss = loss_func(predict, ground)\n gpsloss = GpsLoss(predict, ground)\n gpsscore = GpsScore(predict, ground)\n return loss, gpsloss, gpsscore",
"_____no_output_____"
]
],
[
[
"## Valid",
"_____no_output_____"
]
],
[
[
"def valid(progress_log):\n model.eval() # 신경망을 추론 모드로 전환\n\n # 데이터로더에서 미니배치를 하나씩 꺼내 추론을 수행\n predict = []\n ground = []\n \n with torch.no_grad(): # 추론 과정에는 미분이 필요없음\n for data, targets, _ in progress_log:\n \n data = data.to(device)\n targets = targets.to(device)\n \n outputs = model(data) # 데이터를 입력하고 출력을 계산\n loss = loss_func(outputs, targets) # 출력과 훈련 데이터 정답 간의 오차를 계산\n \n predict.append(outputs)\n ground.append(targets)\n \n # 정확도 출력\n predict = torch.cat(predict,axis = 0)\n ground = torch.cat(ground,axis = 0)\n \n loss = loss_func(predict, ground)\n gpsloss = GpsLoss(predict, ground)\n gpsscore = GpsScore(predict, ground)\n return loss, gpsloss, gpsscore",
"_____no_output_____"
]
],
[
[
"## Test",
"_____no_output_____"
]
],
[
[
"def test(dataloader):\n model.eval() # 신경망을 추론 모드로 전환\n \n output_list = []\n with torch.no_grad(): # 추론 과정에는 미분이 필요없음\n for data, _, index in tqdm(dataloader):\n data = data.to(device)\n outputs = model(data) # 데이터를 입력하고 출력을 계산\n df_temp = pd.DataFrame()\n df_temp['phone'] = index[0]\n df_temp['millisSinceGpsEpoch'] = index[1]\n df_temp[['latDeg', 'lngDeg', 'heightAboveWgs84EllipsoidM']] = outputs[:,:3].to('cpu').numpy()\n output_list.append(df_temp)\n \n predicts = pd.concat(output_list)\n return predicts\n ",
"_____no_output_____"
]
],
[
[
"## Fit",
"_____no_output_____"
]
],
[
[
"train_loss_list = []\ntrain_gloss_list = []\ntrain_score_list = []\nvalid_loss_list = []\nvalid_gloss_list = []\nvalid_score_list = []\n\npatience_count = 0\nmin_valid_score = np.inf\ncheckpoint_name = \"\"\n\nif not os.path.isdir(f\"./models/{hyper_parameters['notebookName']}/model-{num_files}_checkpoint/\"):\n os.mkdir(f\"./models/{hyper_parameters['notebookName']}/model-{num_files}_checkpoint/\")\n \nprog_epoch = tqdm(range(0, hyper_parameters['nepochs']), position = 0, desc = 'EPOCH')\nfor epoch in prog_epoch:\n print( \"-------------------------------------------------------\")\n print(f\"|EPOCH: {epoch+1}/{nepochs}\")\n prog_train = tqdm(train_loader, desc = 'TRAIN', leave = False)\n prog_valid = tqdm(valid_loader, desc = 'VALID', leave = False)\n\n train_loss, train_gpsloss, train_gpsscore = train(epoch, prog_train)\n valid_loss, valid_gpsloss, valid_gpsscore = valid(prog_valid)\n \n scheduler.step(valid_gpsscore)\n if valid_gpsscore < min_valid_score:\n print(f\"|{epoch+1}-th model is checked!, *model-{epoch}-{valid_gpsscore}.pth*\")\n min_valid_score= valid_gpsscore\n checkpoint_name = f\"./models/{hyper_parameters['notebookName']}/model-{num_files}_checkpoint/model-{epoch}-{valid_gpsscore}.pth\"\n torch.save(model.state_dict(), checkpoint_name)\n else:\n patience_count+=1\n if(patience_count > hyper_parameters['max_patience_count']):\n break\n \n train_loss_list.append(train_loss)\n train_gloss_list.append(train_gpsloss)\n train_score_list.append(train_gpsscore)\n valid_loss_list.append(valid_loss)\n valid_gloss_list.append(valid_gpsloss)\n valid_score_list.append(valid_gpsscore)\n \n print(f\"|TRAIN: loss={train_loss:.6f}, gloss={train_gpsloss:.6f}, score={train_gpsscore:.6f}|\")\n print(f\"|VALID: loss={valid_loss:.6f}, gloss={valid_gpsloss:.6f}, score={valid_gpsscore:.6f}|\")\n\n\nhistory = dict()\nhistory['train_loss'] = train_loss_list\nhistory['train_gpsloss'] = train_gloss_list\nhistory['train_score'] = train_score_list\nhistory['valid_loss'] = valid_loss_list\nhistory['valid_gpsloss'] = valid_gloss_list\nhistory['valid_score'] = valid_score_list",
"_____no_output_____"
]
],
[
[
"## Visualization",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (16,6))\nplt.subplot(3,1,1)\nplt.plot(history['train_loss'], label = 'train')\nplt.plot(history['valid_loss'], label = 'valid')\nplt.ylabel('loss')\n\nplt.subplot(3,1,2)\nplt.plot(history['train_gpsloss'], label = 'train')\nplt.plot(history['valid_gpsloss'], label = 'valid')\nplt.ylabel('gpsloss')\n\nplt.subplot(3,1,3)\nplt.plot(history['train_score'], label = 'train')\nplt.plot(history['valid_score'], label = 'valid')\nplt.ylabel('score')",
"_____no_output_____"
],
[
"import numpy as np\nA = np.random.rand(2, 10)\nB = np.random.rand(2, 1)\n\nprint(A, B, A - B)",
"[[0.74335537 0.40151104 0.37760753 0.0348814 0.31775234 0.74628005\n 0.32268267 0.58125136 0.46762097 0.71231719]\n [0.94903722 0.8873724 0.86205945 0.45757112 0.00785885 0.3839011\n 0.42533975 0.59044915 0.93183954 0.19551289]] [[0.90712221]\n [0.78405319]] [[-0.16376684 -0.50561116 -0.52951468 -0.8722408 -0.58936987 -0.16084215\n -0.58443953 -0.32587084 -0.43950123 -0.19480502]\n [ 0.16498402 0.10331921 0.07800626 -0.32648208 -0.77619434 -0.40015209\n -0.35871345 -0.19360405 0.14778635 -0.5885403 ]]\n"
]
],
[
[
"# Output",
"_____no_output_____"
]
],
[
[
"# Load submission sample\nsubmission = pd.read_csv(str(data_dir / \"sample_submission.csv\"))\nprint(submission.shape)\nsubmission.head()",
"_____no_output_____"
],
[
"model.load_state_dict(torch.load(checkpoint_name))\ntorch.save(model.state_dict(), f\"./models/{hyper_parameters['notebookName']}/model-{num_files}_checkpoint/model-{epoch}-{min_valid_score}.pth\")",
"_____no_output_____"
],
[
"predict = test(test_loader)\nprint(predict.shape)\npredict.head()",
"_____no_output_____"
],
[
"submission = submission[['phone', 'millisSinceGpsEpoch']].merge(predict[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']]\n , on = ['phone', 'millisSinceGpsEpoch'])\nprint(submission.shape)\nsubmission.head()",
"_____no_output_____"
],
[
"submission.to_csv(f\"./models/{hyper_parameters['notebookName']}/result-{num_files}.csv\", index = False)\npd.DataFrame([]).to_csv(PATH / f\"{num_files}\")",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6954b4c314cb55da5e72dbf217937278efbab8 | 576 | ipynb | Jupyter Notebook | CNN/Untitled.ipynb | Saiful1721648/Detected-of-COVID-19-and-Pneumonia-Using-Deep-Convolutional-Neural-Network | 35f09c1c6b78f0807c540b803d9e5404e7c62bd8 | [
"Apache-2.0"
]
| null | null | null | CNN/Untitled.ipynb | Saiful1721648/Detected-of-COVID-19-and-Pneumonia-Using-Deep-Convolutional-Neural-Network | 35f09c1c6b78f0807c540b803d9e5404e7c62bd8 | [
"Apache-2.0"
]
| null | null | null | CNN/Untitled.ipynb | Saiful1721648/Detected-of-COVID-19-and-Pneumonia-Using-Deep-Convolutional-Neural-Network | 35f09c1c6b78f0807c540b803d9e5404e7c62bd8 | [
"Apache-2.0"
]
| null | null | null | 16.941176 | 34 | 0.524306 | []
| []
| []
|
ec6957be0fcfeb62d1e8b5213bed0597a484190c | 47,296 | ipynb | Jupyter Notebook | labs/lab2_imdb_morpho/practicaPeliculasSolution.ipynb | albarji/curso-analisis-textos | 27ec4c653007267f41556df5fde5ac507c9e7993 | [
"MIT"
]
| 14 | 2019-08-18T18:57:47.000Z | 2021-10-01T10:43:57.000Z | labs/lab2_imdb_morpho/practicaPeliculasSolution.ipynb | albarji/curso-analisis-textos | 27ec4c653007267f41556df5fde5ac507c9e7993 | [
"MIT"
]
| null | null | null | labs/lab2_imdb_morpho/practicaPeliculasSolution.ipynb | albarji/curso-analisis-textos | 27ec4c653007267f41556df5fde5ac507c9e7993 | [
"MIT"
]
| 2 | 2020-05-09T16:43:32.000Z | 2021-01-04T19:56:40.000Z | 36.97889 | 570 | 0.511079 | [
[
[
"# Ejercicio: análisis morfológico y aplicación a opiniones sobre películas",
"_____no_output_____"
],
[
"<img src=\"img/drama.png\" style=\"width:400px;height:400px;\">",
"_____no_output_____"
],
[
"En este ejercicio vamos a utilizar críticas escritas en [IMDB](http://www.imdb.com/) para tratar de extraer automáticamente la opinión expresada, positiva o negativa, de un texto. Para ello utilizamos algunas técnicas técnicas de análisis morfológico del texto.\n\nEl objetivo del ejercicio es construir un sistema que dado el texto en inglés de una crítica sea capaz de estimar si esa crítica expresa una opinión positiva o negativa. Empezaremos construyendo un clasificador de opinión sencillo, para ir introduciendo características cada vez más complicadas e ir mejorando nuestros resultados.",
"_____no_output_____"
],
[
"## Instrucciones",
"_____no_output_____"
],
[
"A lo largo de este cuaderno encontrarás celdas vacías que tendrás que rellenar con tu propio código. Sigue las instrucciones del cuaderno y presta especial atención a los siguientes iconos:\n\n<table>\n<tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">Deberás responder a la pregunta indicada con el código o contestación que escribas en la celda inferior.</td></tr>\n <tr><td width=\"80\"><img src=\"img/exclamation.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">Esto es una pista u observación que te puede ayudar a resolver la práctica.</td></tr>\n <tr><td width=\"80\"><img src=\"img/pro.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">Este es un ejercicio avanzado y voluntario que puedes realizar si quieres profundar más sobre el tema. Te animamos a intentarlo para aprender más ¡Ánimo!</td></tr>\n</table>\n\nPara evitar problemas de compatibilidad y de paquetes no instalados, se recomienda ejecutar este notebook bajo uno de los [entornos recomendados de Text Mining](https://github.com/albarji/teaching-environments/tree/master/textmining).\n\nAdicionalmente si necesitas consultar la ayuda de cualquier función python puedes colocar el cursor de escritura sobre el nombre de la misma y pulsar Mayúsculas+Shift para que aparezca un recuadro con sus detalles. Ten en cuenta que esto únicamente funciona en las celdas de código.\n\n¡Adelante!",
"_____no_output_____"
],
[
"## Preliminares",
"_____no_output_____"
],
[
"En primer lugar vamos a fijar la semilla aleatoria para que los resultados sean reproducibles entre diferentes ejecuciones del notebook.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.random.seed(12345)",
"_____no_output_____"
]
],
[
[
"## Carga y preparación de datos",
"_____no_output_____"
],
[
"Los datos que usaremos en esta práctica son un conjunto preparado de los datos empleados en el artículo\n\n Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. (2011). Learning Word Vectors for Sentiment Analysis. The 49th Annual Meeting of the Association for Computational Linguistics (ACL 2011).\n \ny consisten en críticas de películas escritas en la web en inglés IMDB. Se han tomado aquellas críticas con una puntuación mayor a 7 como **opiniones positivas**, mientras que aquellas con puntuación menor a 4 se han tomado como **opiniones negativas**.\n\nLos datos están todos contenidos en el fichero *data/data.csv*, en formato CSV separado por tabuladores. El fichero contiene únicamente dos columnas, la primera de ellas indicando el tipo de opinión (1 = opinión positiva, 0 = opinión negativa) y la segunda de ellas el texto de la crítica.",
"_____no_output_____"
],
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Carga en un Dataframe de Pandas los datos del fichero <i>data/data.csv</i>. Analiza los primeros registros del Dataframe. ¿Parecen coherentes los valores de opinión con el texto?\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\nimport pandas as pd\ndata = pd.read_csv(\"./data/data.csv\", sep='\\t')\ndata.head()",
"_____no_output_____"
]
],
[
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/pro.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n El fichero cargado arriba es una versión reducida del conjunto completo de datos. Si quieres optar por usar todos los datos para esta práctica puedes cargar el fichero <i>data/datafull.csv.gz</i>. Ten en cuenta que los tiempos de cálculo serán mucho mayores, aunque a cambio podrás conseguir mejores resultados de clasificación.\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\n#data = pd.read_csv(\"./data/datafull.csv\", sep='\\t')\n#data.head()",
"_____no_output_____"
]
],
[
[
"Ahora vamos a preparar dos listas de índices, que nos indiquen qué parte de los datos vamos a usar para entrenamiento y qué parte para test.",
"_____no_output_____"
],
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Genera dos listas, una conteniendo los índices de la primera mitad de las filas del DataFrame de datos (índices de train), y otra conteniendo los índices de la otra mitad (índices de test).\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\nimport math\nnrows = data.shape[0]\nsplitpoint = math.floor(nrows * 0.50)\ntrainidx = list(range(splitpoint))\ntestidx = list(range(splitpoint, len(data)))",
"_____no_output_____"
]
],
[
[
"## Modelo inicial",
"_____no_output_____"
],
[
"Para poder valorar si las técnicas avanzadas que vamos a emplear aportan algo de utilidad a este problema, vamos a empezar con una solución muy sencilla basada en bag of words, estimando la precisión de clasificación que podemos obtener con ella y usando este valor como referencia.",
"_____no_output_____"
],
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Utilizando lo que has aprendido en la práctica anterior, construye un sistema de clasificación basado en unigramas de palabras que aprenda de los datos de entrenamiento, y calcula el error de estimación en test del mismo. Como modelo de clasificación utiliza una SVM lineal, con sus parámetros por defecto. No realices ningún proceso de búsqueda para optimizar los parámetros del modelo (tipo GridSearchCV).\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import GridSearchCV\n\npipeline = Pipeline([\n ('vectorizer', CountVectorizer()),\n ('classifier', LinearSVC())\n ]\n)\n\nparams = {\n 'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n 'vectorizer__analyzer' : ['word'],\n 'vectorizer__ngram_range' : [(1, 1), (1,2), (1,3)]\n}\n\nmodel = GridSearchCV(pipeline, params, n_jobs = 7)\n\nmodel.fit(data[\"text\"][trainidx].values, data[\"sentiment\"][trainidx])\nmodel.score(data[\"text\"][testidx].values, data[\"sentiment\"][testidx])\n\n# 0.80 con datos pequeños\n# 0.88544 con datos grandes",
"/home/alvaro/miniconda3/envs/textmining-labs/lib/python3.6/site-packages/sklearn/model_selection/_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.\n warnings.warn(CV_WARNING, FutureWarning)\n"
]
],
[
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/exclamation.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n El nivel de precisión que has obtenido, ¿crees que sería adecuado para una aplicación real? ¿Piensas que puede mejorarse?\n </td>\n </tr> \n</table>",
"_____no_output_____"
],
[
"## Análisis morfosintático con spaCy",
"_____no_output_____"
],
[
"En entornos donde existe mucho texto expresado de forma natural lo habitual es que una palabra aparezca con diversas conjugaciones y formas, sin que el significado final del texto cambie demasiado (salvo matices que discutiremos más adelante). En estos casos un paso de preprocesamiento habitual es convertir las palabras a lemas, o eliminar categorías morfológicas que aportan poca información. Para esto es imprescindible realizar un **análisis morfosintáctico** del texto, lo cual podemos hacer fácilmente para diversos idiomas utilizando la librería **spaCy**.",
"_____no_output_____"
]
],
[
[
"import spacy",
"_____no_output_____"
]
],
[
[
"spaCy utiliza modelos morfosintácticos específicos para cada idioma. Por defecto la librería no incluye ningún modelo, pero podemos instalarlo de manera sencilla con comandos a python. La siguiente línea ejecuta un comando de sistema para instalar el modelo de spaCy para el idioma inglés.",
"_____no_output_____"
]
],
[
[
"!python -m spacy download en",
"Collecting en_core_web_sm==2.0.0 from https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.0.0/en_core_web_sm-2.0.0.tar.gz#egg=en_core_web_sm==2.0.0\n\u001b[?25l Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.0.0/en_core_web_sm-2.0.0.tar.gz (37.4MB)\n\u001b[K 100% |████████████████████████████████| 37.4MB 68.9MB/s ta 0:00:01 46% |███████████████ | 17.5MB 28.1MB/s eta 0:00:01\n\u001b[?25hInstalling collected packages: en-core-web-sm\n Running setup.py install for en-core-web-sm ... \u001b[?25ldone\n\u001b[?25hSuccessfully installed en-core-web-sm-2.0.0\n\n\u001b[93m Linking successful\u001b[0m\n /home/alvaro/miniconda3/envs/textmining-labs/lib/python3.6/site-packages/en_core_web_sm\n -->\n /home/alvaro/miniconda3/envs/textmining-labs/lib/python3.6/site-packages/spacy/data/en\n\n You can now load the model via spacy.load('en')\n\n"
]
],
[
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/exclamation.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Si el comando anterior produce un error relacionado con la falta de permisos, deberás ejecutarlo desde una terminal de Anaconda lanzada con permisos de administrador.\n </td>\n </tr> \n</table>",
"_____no_output_____"
],
[
"Una vez obtenido el modelo, podemos cargarlo en memoria con",
"_____no_output_____"
]
],
[
[
"nlp = spacy.load('en')",
"_____no_output_____"
]
],
[
[
"y analizar una frase de ejemplo de la siguiente manera",
"_____no_output_____"
]
],
[
[
"frase = \"The black cat sat peacefully on the mat.\"\ndoc = nlp(frase)",
"_____no_output_____"
]
],
[
[
"**doc** es ahora una versión de la frase que contiene toda la información morfológica y sintáctica extraída por el analizador. Podemos iterar sobre cada uno de los tokens de la frase de la siguiente forma",
"_____no_output_____"
]
],
[
[
"for token in doc:\n print(\"Token:\", token)",
"Token: The\nToken: black\nToken: cat\nToken: sat\nToken: peacefully\nToken: on\nToken: the\nToken: mat\nToken: .\n"
]
],
[
[
"Igualmente podemos acceder a cada uno de los tokens por su posición en la frase",
"_____no_output_____"
]
],
[
[
"print(doc[2])",
"cat\n"
]
],
[
[
"Pero lo más interesante son los diferentes campos con información extra que contiene cada token. Campos como\n* *texto*: texto original\n* *lemma_*: lema\n* *pos_*: Part of Speech (categoría morfológica) simple\n* *tag_*: categoría morfológica detallada\n* *shape_*: patrón de mayúsculas/minúsculas\n* *is_alpha*: si el token se componente de caracteres alfabéticos\n* *is_stop*: si el token ha sido detectado como una stopword\n* *head*: token padre en el árbol de dependencia\n* *dep_*: relación sintáctica con el token padre\n* etc...",
"_____no_output_____"
],
[
"Por ejemplo, vamos a imprimir toda esta información para el primer token de la frase",
"_____no_output_____"
]
],
[
[
"token = doc[0]\nprint(\"Texto:\", token.text)\nprint(\"Lema:\", token.lemma_)\nprint(\"POS:\", token.pos_)\nprint(\"Tag:\", token.tag_)\nprint(\"Forma:\", token.shape_)\nprint(\"Es alpha:\", token.is_alpha)\nprint(\"Es stopword:\", token.is_stop)\nprint(\"Token padre:\", token.head)\nprint(\"Relación sintáctica:\", token.dep_)",
"Texto: The\nLema: the\nPOS: DET\nTag: DT\nForma: Xxx\nEs alpha: True\nEs stopword: False\nToken padre: cat\nRelación sintáctica: det\n"
]
],
[
[
"Podemos hacer esto con toda la frase y mostrarlo como una tabla (DataFrame) para mayor claridad",
"_____no_output_____"
]
],
[
[
"pd.DataFrame(\n columns=[\"token\", \"lema\", \"POS\", \"tag\", \"shap\", \"isalpha\", \"isstop\", \"padre\", \"dep\"],\n data=[[token.text, token.lemma_, token.pos_, token.tag_,\n token.shape_, token.is_alpha, token.is_stop, token.head, token.dep_]\n for token in doc]\n)",
"_____no_output_____"
]
],
[
[
"También podemos visualizar el árbol sintático de dependencias usando la utilidad **displaCy**",
"_____no_output_____"
]
],
[
[
"from spacy import displacy\n\ndisplacy.render(doc, style='dep', jupyter=True)",
"_____no_output_____"
]
],
[
[
"Como hemos visto, el análisis morfosintáctico de spaCy nos proporciona mucha información, pero también puede ser costoso de realizar cuando tenemos gran cantidad de textos. Si no necesitamos de todos los componentes del análisis, podemos acelerar el tiempo de cálculo desactivando algunos elementos del proceso. Por ejemplo, cargando de nuevo el modelo de la siguiente forma",
"_____no_output_____"
]
],
[
[
"nlpfast = spacy.load('en', disable=['ner', 'parser'])",
"_____no_output_____"
]
],
[
[
"Con esto tenemos un analizador morfosintáctico que no realiza detección de entidades (`ner`) ni análisis del árbol sintáctico de dependencias (`parser`), pero que a cambio ejecuta a mayor velocidad, como podemos comprobar en las siguientes dos celdas.",
"_____no_output_____"
]
],
[
[
"%%time\nfor _ in range(10):\n nlp(\"The black cat sat peacefully on the mat.\")",
"CPU times: user 588 ms, sys: 0 ns, total: 588 ms\nWall time: 155 ms\n"
],
[
"%%time\nfor _ in range(10):\n nlpfast(\"The black cat sat peacefully on the mat.\")",
"CPU times: user 140 ms, sys: 0 ns, total: 140 ms\nWall time: 34.6 ms\n"
]
],
[
[
"Visto el funcionamiento de spaCy, vamos a pasar a ejecutar el análisis morfosintáctico para cada texto de nuestros datos.",
"_____no_output_____"
],
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Crea una nueva columna en el DataFrame de datos que contenga una versión analizada con spaCy del texto correspondiente. Es suficiente con que apliques el objeto <b>nlp</b> a cada texto y guardes el resultado.\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\ndef addanalyzed(df):\n analyzed = [nlp(text) for text in df[\"text\"]]\n df[\"analyzed\"] = pd.Series(analyzed, index = df.index)\n \naddanalyzed(data)\ndata.head()",
"_____no_output_____"
]
],
[
[
"## Filtrado por morfología",
"_____no_output_____"
],
[
"Vamos a sacar partido a la información morfológica que nos proporciona spaCy para mejorar el modelo predictivo. Para ello realizaremos dos operaciones:\n\n* Filtrar los textos para quedarnos solo con aquellas palabras de las categorías morfológicas con más carga de emoción.\n* Filtrar los textos para no incluir palabras stopwords.\n* Sustituir cada token por su lema, para así reducir el tamaño del vocabulario y simplificar el problema.",
"_____no_output_____"
],
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Crea una nueva columna en el DataFrame de datos que contenga una versión modificado del texto con únicamente los lemas de aquellos tokens cuyas etiquetas POS sean de clase <b>nombre</b>, <b>verbo</b>, <b>adjetivo</b> o <b>adverbio</b>.\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\ndef addposfilter(df):\n posfilter = [\" \".join([token.lemma_ for token in text \n if token.pos_ in {\"NOUN\", \"VERB\", \"ADJ\", \"ADV\"} and not token.is_stop]) \n for text in df[\"analyzed\"]]\n df[\"posfilter\"] = pd.Series(posfilter, index = df.index)\n \naddposfilter(data)\ndata.head()",
"_____no_output_____"
]
],
[
[
"<table>\n <tr>\n <tr><td width=\"80\"><img src=\"img/question.png\" style=\"width:auto;height:auto\"></td><td style=\"text-align:left\">\n Repite los pasos que realizaste en el caso del modelo inicial (al inicio de esta práctica) para construir un nuevo modelo, esta vez basado en los textos que has preparados en lugar de los textos originales. Mide el nivel de score sobre el conjunto de test, ¿has conseguido alguna mejora en precisión? ¿Y en tiempos de entrenamiento?\n </td>\n </tr> \n</table>",
"_____no_output_____"
]
],
[
[
"####### INSERT YOUR CODE HERE\npipeline = Pipeline([\n ('vectorizer', CountVectorizer()),\n ('classifier', LinearSVC())\n ]\n)\n\nparams = {\n 'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n 'vectorizer__analyzer' : ['word'],\n 'vectorizer__ngram_range' : [(1, 1), (1,2), (1,3)]\n}\n\nmodel = GridSearchCV(pipeline, params, n_jobs = 7)\nmodel.fit(data[\"posfilter\"][trainidx].values, data[\"sentiment\"][trainidx])\nmodel.score(data[\"posfilter\"][testidx].values, data[\"sentiment\"][testidx])\n\n# Best small: 0.8088 (lemmas, filter stopwords)\n# Small: 0.8056 (lemmas, filter stopwords, filter pos NOUN VERB ADJ ADV)",
"/home/alvaro/miniconda3/envs/textmining-labs/lib/python3.6/site-packages/sklearn/model_selection/_split.py:2053: FutureWarning: You should specify a value for 'cv' instead of relying on the default value. The default value will change from 3 to 5 in version 0.22.\n warnings.warn(CV_WARNING, FutureWarning)\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec695aa98cb2929d45b409effab2257aaf779b5e | 120,748 | ipynb | Jupyter Notebook | PracticeProjects/Classification.ipynb | vivekanandjoshi/ds | d4e293f7a4dfc32edb24877a382c80d329791f39 | [
"Apache-2.0"
]
| null | null | null | PracticeProjects/Classification.ipynb | vivekanandjoshi/ds | d4e293f7a4dfc32edb24877a382c80d329791f39 | [
"Apache-2.0"
]
| null | null | null | PracticeProjects/Classification.ipynb | vivekanandjoshi/ds | d4e293f7a4dfc32edb24877a382c80d329791f39 | [
"Apache-2.0"
]
| null | null | null | 75.279302 | 20,540 | 0.824701 | [
[
[
"# Classification",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.random.seed(42)\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## 1.) Load data and Build Model",
"_____no_output_____"
],
[
"Import MNIST dataset from sklearn. The dataset contains 70k small images of digits handwritten by high school students and employees of US Census Bureau. Often known as Hello word Program of Machine Learning.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original')\nmnist",
"_____no_output_____"
],
[
"#Let's look at the dataset\n\nX,y = mnist[\"data\"],mnist[\"target\"]\nprint(X.shape)\nprint(y.shape)",
"(70000, 784)\n(70000,)\n"
],
[
"#Let's check one of the entry\n\n#the images are 28 X 28 pixels and each feature simply represents one pixel's intensity, so there are 784 features. \n\n#to check one entry we are grabbing an instance's feature vector, \n#reshape it to 28 X 28 array and display it using Matplotlib imshow() fucntion.\n\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nsome_digit = X[25000]\nsome_digit_image = some_digit.reshape(28,28)\n\nplt.imshow(some_digit_image, cmap = matplotlib.cm.binary, interpolation = \"nearest\")\n\nplt.axis(\"off\")\nplt.show()",
"_____no_output_____"
],
[
"# Now check target variable and see if it looks like 4.\n\ny[25000]",
"_____no_output_____"
],
[
"#Create a test set and set it aside before inspecting data closely. \n#MNIST dataset is already split into training and testing set ( the first 60 k rows are training and last 10k rows are test set)\n\n#Also we will shuffle the training set so that all cross validation folds will be similar \n#( it wo't be good if our training set miss some digits)\n\n\nimport numpy as np\n\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\n\nshuffle_index = np.random.permutation(60000)\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]",
"_____no_output_____"
],
[
"#See some entries\nX_train\n",
"_____no_output_____"
]
],
[
[
"For simplification, we will only try to identify one digit . So our classifier will be binary classifier which is capable of distinguishing b/w just two classes \"4\" and \"not 4\"",
"_____no_output_____"
]
],
[
[
"y_train_4 = (y_train == 4)\ny_test_4 = (y_test == 4)",
"_____no_output_____"
],
[
"#Now let's pick a classifier and train it. Let's use SGD classifier (Stochastic Gradient Descent)\n\nfrom sklearn.linear_model import SGDClassifier\n\nsgd_clf = SGDClassifier(random_state=42)\nsgd_clf.fit(X_train, y_train_4)\n\n#SGD classifier relies on randomness during training, to produce reproducible results, we have to set random_state parameter",
"_____no_output_____"
],
[
"#Now let's predict the images of number 4\n\nsgd_clf.predict([some_digit])",
"_____no_output_____"
]
],
[
[
"Looks like our model predicted number 4 correctly. It's time to evaluate the model's performance",
"_____no_output_____"
],
[
"## 2.) Performance Measures",
"_____no_output_____"
],
[
"### 2.1) Measuring Accuracy Using Cross-Validation",
"_____no_output_____"
]
],
[
[
"#We can implement cross validation using scikit learn StratifiedKFold\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.base import clone\n\nskfolds = StratifiedKFold(n_splits=3, random_state=42)\nfor train_index, test_index in skfolds.split(X_train, y_train_4):\n clone_clf = clone(sgd_clf)\n X_train_folds = X_train[train_index]\n y_train_folds = (y_train_4[train_index])\n X_test_fold = X_train[test_index]\n y_test_fold = (y_train_4[test_index])\n\n clone_clf.fit(X_train_folds, y_train_folds)\n y_pred = clone_clf.predict(X_test_fold)\n n_correct = sum(y_pred == y_test_fold)\n print(n_correct/len(y_pred))",
"0.9641017949102545\n0.98015\n0.9733986699334967\n"
]
],
[
[
"## 2.2) Measuring accuracy Using cross_val_score() function",
"_____no_output_____"
]
],
[
[
"#using cross_val_score() function to evaluate SGD classifier model using K-fold cross validation\n\nfrom sklearn.model_selection import cross_val_score\ncross_val_score(sgd_clf, X_train, y_train_4, cv =3, scoring = 'accuracy')",
"_____no_output_____"
]
],
[
[
"We can see our accuracy is above 95% on all cross-valdation folds. Amazing, no? But, let's check a very dumb classifier which \nclassifies every single image as \"not-4\" class.",
"_____no_output_____"
]
],
[
[
"from sklearn.base import BaseEstimator\n\n#there are two functions in the class Never4Classifier, fit() and predict()\n\nclass Never4Classifier(BaseEstimator):\n def fit(self, X, y=None):\n pass #do nothing\n def predict(self, X):\n return np.zeros((len(X), 1), dtype=bool) #return an array of 'len(X) * 1' with all elements as zero in bool (false)\n \n \nnever_4_clf = Never4Classifier() #Create an object of class Never4Classifier\ncross_val_score(never_4_clf, X_train, y_train_4, cv=3, scoring='accuracy')",
"_____no_output_____"
]
],
[
[
"This means even a dumb classifier which predicts everything as \"not-4\" also gives us an accuracy of more than 90%. Why?\nThis is simply because only 10% of the data images are 4, so if you guess all the data as 'not-4', you are right more than 90% of the time.\n\nThis means, accuracy is not the prefereed measure when you have skewed datasets. so, what's next?",
"_____no_output_____"
],
[
"## Confusion Matrix",
"_____no_output_____"
],
[
"Confusion Matrix is a simple idea of counting the number of times instances of the class A are classified as class B. For example, in a multi class classification for this dataset, to know the number of times classifier confused images of 5s with images of 3, you would look 5th row and 3rd column of the confusion matrix.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"To compute confusion matrix, we need to have set of predictions first and then compared to actual targets. But, for now, we will do it on validation set, because we should do predictions on test set only once.\n\nfor this, we can use cross_val_predict() function, which does cross validation and instead of returning accuracy scores, it returns the predictions made on each test fold.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_val_predict\n\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_4, cv =3)\n\n\n#Now, we can use confusion matrix() func. We just simply need to pass target classes (y_train_4) and pred classes (y_train_pred)\n\nfrom sklearn.metrics import confusion_matrix\n\nconfusion_matrix(y_train_4, y_train_pred)",
"_____no_output_____"
]
],
[
[
"The above matrix tells us:\n\n1st row is Negatives -\n\n*53278* are not 4s and are correctly classified as 'not-4' - TRUE NEGATIVES (TN)\n\n*880* are not 4s but are classified as 4 - FALSE POSITIVES (FP)\n\n2nd Row is Positives - \n\n*767* are 4s but wrongly classified as 'not-4' - FALSE NEGATIVES (FN)\n\n*5075* are 4s and correctly calssified as 4 - TRUE POSITIVES (TP)\n\n\nA Perfect classifier would be which have only TRUE NEGATIVES (TN) and TRUE POSITIVES (TP) . So there would be zeroes for FALSE POSITIVES (FP) and FALSE NEGATIVES (FN)",
"_____no_output_____"
]
],
[
[
"#If all predictions are same as target\n\ny_train_perfect_predictions = y_train_4\nconfusion_matrix(y_train_4, y_train_perfect_predictions)",
"_____no_output_____"
]
],
[
[
"## Precision, Recall, F1 Score",
"_____no_output_____"
],
[
"Precision is accuracy of positive predictions.\n\n**precision** = $TP/(TP+FP)$, which means how many are true positives out of total positives.\n\n\nRecall is ratio of positive instances that are correctly detected by the classifier\n\n**recall**, **sensitivity** or **true positive rate** = $TP/(TP+FN)$, how many are true positives out of correctly classified.\n\n\n**F1 Score** = harmonic mean of precision and recall. this can be used to compare two classifiers, the higher the harmonic mean, the better the classifier is.\n\nF1 = $ TP/(TP + ((FN + FP)/2)) $",
"_____no_output_____"
]
],
[
[
"#Precision and recall\n\nfrom sklearn.metrics import precision_score, recall_score\n\nprecision_score(y_train_4, y_train_pred) # 5075/(5075+880) ",
"_____no_output_____"
],
[
"recall_score(y_train_4, y_train_pred) # 5075(5075+767)",
"_____no_output_____"
],
[
"from sklearn.metrics import f1_score\nf1_score(y_train_4, y_train_pred)",
"_____no_output_____"
]
],
[
[
"## Precision Recall Tradeoff",
"_____no_output_____"
],
[
"Increasing Precision reduces recall and vice versa.\n\nScikit learn gives access to decision scores that it uses to make predictions. Instead of calling the classifier predict() method, we can call it decision_function, which returns a score for each instance and then make prediction on the based of the threshold you want.",
"_____no_output_____"
]
],
[
[
"y_scores = sgd_clf.decision_function([some_digit])\n\ny_scores",
"_____no_output_____"
],
[
"#When threshold is zero, the result is same as predict method\n\nthreshold = 0\ny_some_digit_pred = (y_scores > threshold)\ny_some_digit_pred",
"_____no_output_____"
],
[
"#Now let's chnage some threshold above than decision score. We can see that prediction is false now.\n\nthreshold = 200000\n\ny_some_digit_pred = (y_scores > threshold)\ny_some_digit_pred",
"_____no_output_____"
]
],
[
[
"So, how to decide which threshold to use?\n\nFor this, we first need to get the scores of all instances (example) using cross_val_predict() function, but instead of predictions we will get decision scores.",
"_____no_output_____"
]
],
[
[
"y_scores = cross_val_predict(sgd_clf, X_train, y_train_4, cv = 3, method=\"decision_function\")",
"_____no_output_____"
]
],
[
[
"Once we have these scores, we can compute precision and recall for all possible threshold using the precision_recall_curve() function",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import precision_recall_curve\n\nprecisions, recalls, thresholds = precision_recall_curve(y_train_4, y_scores)",
"_____no_output_____"
]
],
[
[
"And Finally, we can plot precision and recall as function of threshold value using Matplotlib",
"_____no_output_____"
]
],
[
[
"def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds,precisions[:-1],\"b-\", label = \"Precision\")\n plt.plot(thresholds,recalls[:-1], \"g-\", label = \"Recall\")\n plt.xlabel(\"Thresholds\")\n plt.legend(loc=\"upper left\")\n plt.ylim([0,1])\n \n \n\nplt.figure(figsize=(8, 4))\nplot_precision_recall_vs_threshold(precisions,recalls,thresholds)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Another way is to plot precision directly against recall and then observe this graph. You will probably want to select a precision/recall tradeoff just before the sharp drop. But then it depends on the project.",
"_____no_output_____"
]
],
[
[
"def plot_precision_vs_recall(precisions, recalls):\n plt.plot(recalls,precisions,\"b--\")\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n \n\nplot_precision_vs_recall(precisions,recalls)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Now let's suppose you want a precision of 90%, so from the graph above the above graph tells us that you can get precision of 90% at the threshold above 40000. so, to make predictions ,instead of calling predict(), we can simply run below code. ",
"_____no_output_____"
]
],
[
[
"y_train_pred_90 = (y_scores > 40000)",
"_____no_output_____"
],
[
"#Let's check precision and recall with the new threshold \n\nprint(precision_score(y_train_4, y_train_pred_90))\nprint(recall_score(y_train_4, y_train_pred_90))",
"0.8946691176470588\n0.8331051009928107\n"
]
],
[
[
"# If someone says let's reach 99% precision, you should ask, \"at which recall?\"",
"_____no_output_____"
],
[
"## The ROC curve",
"_____no_output_____"
],
[
"Receiver operating characterstic (ROC) curve is very similar to precision/recall curver, but instead of plotting precision vs recall, the ROC curve plots the true positive rate (recall) against the false positive rate. \n\nFPR is the ratio of negative instances that are incorrectly classified as positive. It is equal to 1 minus true negative rate, which is ratio of negative instances that are correctly classified as negative.\n\nTNR is also know as specificity\n\nHence ROC curves plot recall(sensitivity) vs 1-specificity\n\n",
"_____no_output_____"
]
],
[
[
"#import the roc_curve and calculate TPR, FPR for various thresholds.\n\nfrom sklearn.metrics import roc_curve\n\nfpr, tpr, thresholds = roc_curve(y_train_4, y_scores)",
"_____no_output_____"
],
[
"#plot FPR vs TPR \n\ndef plot_roc_curve(fpr,tpr,label=None):\n plt.plot(fpr,tpr, linewidth=2, label=label)\n plt.plot([0,1],[0,1],'k--')\n plt.axis([0,1,0,1])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n \n \nplot_roc_curve(fpr,tpr)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can use **Area under the curve (AUC)** to compare classifiers, A perfect classifier will have ROC AUC equal to 1, whereas a purely random classifier will have ROC AUC equal to 0.5",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import roc_auc_score\n\nroc_auc_score(y_train_4, y_scores)",
"_____no_output_____"
]
],
[
[
"### Which curve to use ROC or PR?",
"_____no_output_____"
],
[
"As a rule of thumb, we should prefer PR curve when poitive class is rare or when we care more about false positives than false negatives, and the ROC curve otherwise.",
"_____no_output_____"
],
[
"## Performance measure for different classifiers ",
"_____no_output_____"
],
[
"Let's measure **RandomForestClassifier** and **SGDClassifier** and compare their **ROC Curve** and **ROC AUC curve** ",
"_____no_output_____"
]
],
[
[
"#import RandomForestClassifier\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest_clf = RandomForestClassifier(random_state=42)\ny_probas_forest = cross_val_predict(forest_clf, X_train, y_train_4, cv=3, method ='predict_proba')",
"c:\\users\\vivekanand.joshi\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\sklearn\\ensemble\\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n from numpy.core.umath_tests import inner1d\n"
],
[
"#change probablities into scores. We will use poitive class probablities as their scores.\n\ny_scores_forest = y_probas_forest[:,1] #score = proba of positive class\n\n\nfpr_forest, tpr_forest,thresholds_forest = roc_curve(y_train_4, y_scores_forest)",
"_____no_output_____"
],
[
"#Plot ROC curve\n\nplt.plot(fpr,tpr,\"b:\", label = \"SGD\")\nplot_roc_curve(fpr_forest,tpr_forest, \"Random Forest\")\nplt.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
],
[
"#Check ROC AUC score for RandomForestClassifier\n\nroc_auc_score(y_train_4, y_scores_forest)",
"_____no_output_____"
],
[
"#print precision and recall score for RandomForestClassifier\n\ny_train_pred_rf = cross_val_predict(forest_clf, X_train, y_train_4, cv=3)\n\nprint(precision_score(y_train_4, y_train_pred_rf))\nprint(recall_score(y_train_4, y_train_pred_rf))",
"0.9896003262642741\n0.8307086614173228\n"
]
],
[
[
"# Multiclass Classification",
"_____no_output_____"
],
[
" For distinguishing more than 2 classes.\n \n **One-versus-all (OvA)** and **One-versus-one(OvO)** strategy.\n \n **OvA** - Make N binary classifier. Then get the decision score for each classifier and select the one with the highest.\n \n **OvO** - If there are N classes, $N*(N-1)/2$ classifiers and see which class wins the most duels.",
"_____no_output_____"
]
],
[
[
"#scikit learn automatically run OvA (except for SVM classifiers for which it uses OvO)\n\nsgd_clf.fit(X_train, y_train) #for all classes and not only for not-4 class\nsgd_clf.predict([some_digit])",
"_____no_output_____"
],
[
"#under the hood scikit learn ran 10 binary classifiers and selected the output with largest decision_function() method.\n#We can check scores for each binary classifier and see if this actually happens. 4 indeed has highest score.\n\nsome_digit_scores = sgd_clf.decision_function([some_digit])\nsome_digit_scores",
"_____no_output_____"
],
[
"np.argmax(some_digit_scores)",
"_____no_output_____"
],
[
"#to force OneVsOneClassifier in sckit learn we can create an instance of it.\n\nfrom sklearn.multiclass import OneVsOneClassifier\novo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))\novo_clf.fit(X_train,y_train)\novo_clf.predict([some_digit])",
"_____no_output_____"
],
[
"#check how many classifiers were created\n\nlen(ovo_clf.estimators_)",
"_____no_output_____"
],
[
"#Let's train a multiclass RandomForestClassifier\n\nforest_clf.fit(X_train, y_train)\nforest_clf.predict([some_digit])",
"_____no_output_____"
],
[
"#Since RandomForestClassifier can directly classify instances into multiple classes, scikit-learn doesn't have to run OvO or OvA\n#We can check probablities for each class. We can see 4th has highest probability.\n\nforest_clf.predict_proba([some_digit])",
"_____no_output_____"
],
[
"#Scaling to get better results. \n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\ncross_val_score(sgd_clf, X_train_scaled, y_train,cv=3, scoring=\"accuracy\")",
"_____no_output_____"
]
],
[
[
"# Error Analysis ",
"_____no_output_____"
]
],
[
[
"#We need to make predictions using the cross_val_predict() function and then call a confusion_matrix() function\n\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nconf_mx",
"_____no_output_____"
],
[
"#We can create an image representation as well for the above matrix.\n\nplt.matshow(conf_mx,cmap=plt.cm.gray)\nplt.show()",
"_____no_output_____"
]
],
[
[
"The confusion matrix looks fairly good, since most of the images are on the main diagonal which means they were correctly classified. The 5 is slightly darker than other digits, why? there could be two reasons, first, that there are few images of 5, second, classifier doesn't perform well on 5s as on other digits.\n\nLet's focus on errors now.\n\n1.) We divide each value in the confusion matrix by the number of images in each class.\n\n2.) Now fill diagonals with zeros to keep only the errors.",
"_____no_output_____"
]
],
[
[
"row_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx/row_sums\n\n\nnp.fill_diagonal(norm_conf_mx,0)\nplt.matshow(norm_conf_mx, cmap=plt.cm.gray)\n\n#ROWS = Actual Classes\n#COLUMNS = Predicted Classes",
"_____no_output_____"
]
],
[
[
"The above figure helps us to know where we need to focus to make our predictions better. for example, 3 and 5 classifier doesn't perform well\n\n8 and 9 classifiers are not predicting good.",
"_____no_output_____"
],
[
"## Multilabel Classification",
"_____no_output_____"
],
[
"In some cases, you would want your classifier predict multiple classes for one instance. for example, in a face recognition classifier, one image can have three faces. For example if classifier is recognize to identify three faces, Alice, Bob, Charles, then when it is shown a picture which has Alice and Charles in it it should predict **1, 0, 1**. \n\nSuch a classification is called multilabeled classification. ",
"_____no_output_____"
]
],
[
[
"#let's create two labels, large digit (7,8,9) and odd number\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\ny_train_large = (y_train >=7)\ny_train_odd = (y_train%2 ==1)\ny_multilabel = np.c_[y_train_large,y_train_odd]\n\n\nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train,y_multilabel)",
"_____no_output_____"
],
[
"knn_clf.predict([some_digit])",
"_____no_output_____"
]
],
[
[
"This is right as 4 is neither large, nor odd",
"_____no_output_____"
]
],
[
[
"y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_train, cv =3)\nf1_score(y_train,y_train_knn_pred, average = \"macro\")",
"_____no_output_____"
]
],
[
[
"# Exercises",
"_____no_output_____"
],
[
"## Exercise 1 - Build a MNIST classifier with over 97% accuracy.",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\n\nknn_clf_ex = KNeighborsClassifier(n_jobs=-1, weights='distance', n_neighbors=4)\nknn_clf_ex.fit(X_train,y_train)",
"_____no_output_____"
],
[
"y_knn_pred = knn_clf_ex.predict(X_test)",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_knn_pred)",
"_____no_output_____"
]
],
[
[
"The question is how to get the above hyper-parameters? For this we need to import GridSearchCV\n\nWe need to import GridSearchCV and set different parameters as a list. The classifier will run the model for all possible combinations. \n\nThen we can find best parameters as per highest metric through grid_search.best_params_ and grid.search.best_score_",
"_____no_output_____"
]
],
[
[
"#Set a parameter grids with different options and run the classifier with different permutations.\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid =[{'weights':[\"uniform\",\"distance\"], 'n_neighbors':[3,4,5]}]\n\nknn_clf = KNeighborsClassifier()\ngrid_search = GridSearchCV(knn_clf, param_grid,cv=5,verbose =3, n_jobs =-1)\ngrid_search.fit(X_train, y_train)",
"Fitting 5 folds for each of 6 candidates, totalling 30 fits\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6967b191b2859446cb0d23ceca51c40bddbce2 | 9,416 | ipynb | Jupyter Notebook | Spring2021_DeCal_Material/Homework/Week7/.ipynb_checkpoints/Week 7 HW 6-checkpoint.ipynb | emilyma53/Python_DeCal | 1b98351ecd16f93a5357c9e00af18dde82c813b1 | [
"MIT"
]
| 2 | 2020-10-24T04:46:05.000Z | 2020-10-24T04:48:50.000Z | Spring2021_DeCal_Material/Homework/Week7/.ipynb_checkpoints/Week 7 HW 6-checkpoint.ipynb | emilyma53/Python_DeCal | 1b98351ecd16f93a5357c9e00af18dde82c813b1 | [
"MIT"
]
| null | null | null | Spring2021_DeCal_Material/Homework/Week7/.ipynb_checkpoints/Week 7 HW 6-checkpoint.ipynb | emilyma53/Python_DeCal | 1b98351ecd16f93a5357c9e00af18dde82c813b1 | [
"MIT"
]
| 1 | 2021-09-30T23:10:25.000Z | 2021-09-30T23:10:25.000Z | 30.771242 | 388 | 0.601529 | [
[
[
"# Homework 6",
"_____no_output_____"
],
[
"This homework is all about useful external libraries that are most common to use in astronomy research. The two most important libraries apart from scipy, numpy, and matplotlib are **astropy** and **pandas**. We explore the basics of these super versatile libraries. ",
"_____no_output_____"
],
[
"# Astropy (50 Points)",
"_____no_output_____"
],
[
"## CRAZY UNIT CONVERSION!!! (20 Points)",
"_____no_output_____"
],
[
"As you take more astronomy classes, you will face more and more unit conversion problems - they are annoying. That's why astropy.units is very helpful. Let's do some practices here.\n\nThe documentations for astropy.units and astropy.constants will very helpful to you.\n\nastropy.units documentation: https://docs.astropy.org/en/stable/units/\n\nastropy.constants documentation: https://docs.astropy.org/en/stable/constants/\n\nNOTE: In this problem, you MUST use astropy.constants when doing calculations involving fundamental constants. Also, you cannot look up values such as solar mass, earth mass, etc. Use the two packages solely.",
"_____no_output_____"
],
[
"### Problem 1) Speed of light (5 Points)\n\nWhat is the speed of light ($c$) in $pc/yr$?",
"_____no_output_____"
]
],
[
[
"### Write your code here",
"_____no_output_____"
]
],
[
[
"### Problem 2) Newton's 2nd Law (5 Points)\n\nRecall that NII states \n$$F =ma\\,\\,.$$\nSay a force of $97650134N$ is exerted on an object having a mass of $0.0071$ earth mass. What is the acceleration of the object in $AU/days^2$?",
"_____no_output_____"
]
],
[
[
"### Write your code here",
"_____no_output_____"
]
],
[
[
"### Problem 3) Newton's Universal Law of Gravitation (10 Points)\n\nRecall that the gravitational acceleration due to an object with mass $m$ at a distance $r$ is given by \n$$a_g = \\frac{Gm}{r^2}\\,\\,.$$\nWhat is the gravitational acceleration due to a planet of $3.1415926$ Jupiter-mass at a distance of $1.523AU$? Give your answer in $pc/yr^2$.",
"_____no_output_____"
]
],
[
[
"### Write your code here",
"_____no_output_____"
]
],
[
[
"## Visualising Coordinate Transformation (30 Points)",
"_____no_output_____"
],
[
"We introduced coordinate transformation using astropy, but maybe that was too astract to you, so let's use this problem as a way for you to visualise this process. Each part will be worth **5 Points**\n\nThere are several things you need to do:\n1. Open up the FITS file named 'clusters.fits' (this part of the code is written for you already)\n\n\n2. Read it as a table using astropy.table (you will have to import the packages you need and write your own code from hereafter)\n\n\n3. Plot the positions of all the objects in the table, COLOUR-CODED by their types (there is a column named 'CLASS'), with RA on the x-axis and DEC on the y-axis. You should see a curved trend with a huge dip in the middle.\n\n\n4. Carry out a coordinate transformation from the ICRS coordinates to the galactic coordinates - there is a column named \"DISTANCE\" which you will need. \n\n\n5. Now plot the position of all the objects in the galactic coordinates, with $\\ell$ on the x-axis and $b$ on the y-axis; again, colour-code everything by their \"CLASS\". If you did everything correctly, you should see that the curve in the previous plot resembles a horizontal band. \n\n\n6. Answer this question: What is that curved band in the first plot and the horizontal band in the second plot? Does it make sense that the band got straightened up? Why?\n\n\nNote: When you make your plots, please include the axis labels with units and the legend.",
"_____no_output_____"
]
],
[
[
"from astropy.io import fits\n#You will have to import other packages to complete this problem \n\n###IMPORT YOUR OTHER PACKAGES HERE\n",
"_____no_output_____"
],
[
"fits_file = fits.open('clusters.fits')\n\n#To read the fits file as a table, simply run the line: Table.read(fits_file)\n#Although you will have to write up your code to get that Table function \n\n### YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"(DOUBLE CLICK HERE TO ANSWER QUESTION 6):\n\nYOUR ANSWER: ",
"_____no_output_____"
],
[
"# Pandas (40 Points)\n\nOne of the most efficient and easy to use libraries for importing data files. We will explore the basics here.\n\nLet's import some data that represents the position of a ball being thrown off the roof of Campbell Hall. Using some basic kinematics we can derive the following equation.\n\n$$y(t) = -\\frac{1}{2} g t^2 + v_{0,y} t + y_0$$\n\nFor this problem we need to import our position measurements from our fellow colleagues in our research group.\n\n<img src='diagram.jpeg' width=\"600\" height=\"400\"> ",
"_____no_output_____"
],
[
"## Problem 5 (5 Points)\n\nYour job for this problem is to simply read in the file named **\"projectile.csv\"** using the pandas library (DONT USE `numpy`). Print out your DataFrame so we can see what the data looks like as a table.",
"_____no_output_____"
]
],
[
[
"###YOUR CODE HERE###",
"_____no_output_____"
]
],
[
[
"## Problem 6 (5 Points)\n\nNow load your DataFrame columns into numpy arrays and make a plot of Position vs. Time.",
"_____no_output_____"
]
],
[
[
"###YOUR CODE HERE###",
"_____no_output_____"
]
],
[
[
"## Problem 7 (10 Points)\n\nIn the last problem set we learned how to curve fit a quadratic equation. The above equation is also a quadratic equation with respect to time. Use what we learned last week to fit a curve to the noisy data from our fellow researchers. Explicitly print out what the initial velocity $v_{0,y}$ and initial height $y_0$ are based on your curve fit along with their respective errors. ",
"_____no_output_____"
]
],
[
[
"###YOUR CODE HERE###",
"_____no_output_____"
]
],
[
[
"## Problem 8 (10 Points)\n\nAlright now we have a model function that can fit the function as a function of time. create two lists/arrays of values using this function. One list's values should be time where we use `t = np.linspace(0,5,100)` to create the values and the other list should be your model's output after taking in all those times. (A list of the values you would normally plot)\n\nOnce you have created your two lists of values, construct a pandas DataFrame using these lists. Your data frame should have two columns with 100 values each. ",
"_____no_output_____"
]
],
[
[
"###Your Code Here###",
"_____no_output_____"
]
],
[
[
"## Problem 9 (10 Points)\n\nLast part of the problem set! This is basically one line of code. Export your new DataFrame to a csv file called **\"trajectory.csv\"**, this will be useful for your colleagues!",
"_____no_output_____"
]
],
[
[
"###Your Code Here###",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec696970f31b0a64f3170a348043b4b2632028b3 | 19,016 | ipynb | Jupyter Notebook | examples/extract_text_with_apache_tika.ipynb | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
]
| 20 | 2020-10-30T19:52:12.000Z | 2021-11-12T12:51:58.000Z | examples/extract_text_with_apache_tika.ipynb | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
]
| 26 | 2020-10-30T19:58:44.000Z | 2022-03-31T01:41:55.000Z | examples/extract_text_with_apache_tika.ipynb | campagnucci/querido-diario-toolbox | 3ec99564ae92f1b5456f351f34e7745b4385c79e | [
"MIT"
]
| 9 | 2020-10-30T20:15:33.000Z | 2022-02-18T16:44:15.000Z | 78.904564 | 11,703 | 0.643721 | [
[
[
"Arquivos auxiliares são encontrados no repositório da biblioteca e os `.jar` são baixados ao executar `make setup`.",
"_____no_output_____"
]
],
[
[
"pip install querido-diario-toolbox",
"Defaulting to user installation because normal site-packages is not writeable\nRequirement already satisfied: querido-diario-toolbox in /home/gcc/.local/lib/python3.9/site-packages (0.1.0)\nRequirement already satisfied: python-magic in /home/gcc/.local/lib/python3.9/site-packages (from querido-diario-toolbox) (0.4.24)\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"# import statements\nfrom pathlib import Path\nimport os, re\n\nfrom querido_diario_toolbox.etl.text_extractor import create_text_extractor\nfrom querido_diario_toolbox import Gazette\n",
"_____no_output_____"
],
[
"# define path variables\nROOT = Path(os.path.abspath('..'))\nDATA = ROOT / 'examples'\nTEST = ROOT / 'tests/data'\nprint(f\"ROOT: {ROOT}\")\nprint(f\"DATA: {DATA}\")\nprint(f\"TEST: {TEST}\")",
"ROOT: /home/jvanz/repositories/querido-diario-toolbox\nDATA: /home/jvanz/repositories/querido-diario-toolbox/examples\nTEST: /home/jvanz/repositories/querido-diario-toolbox/tests/data\n"
],
[
"# use actual gazette file\nactual_gazette = os.path.join(DATA, '38004a4b724a24c1e3c746596acf04efb0c95a58.pdf')\nprint(actual_gazette)\n\n# use test files to show extraction from image\nfake_png = TEST / \"fake_gazette.png\"",
"/home/jvanz/repositories/querido-diario-toolbox/examples/38004a4b724a24c1e3c746596acf04efb0c95a58.pdf\n"
],
[
"# load toolbox classes and methods\n# Create the text extractor\nconfig = {\"apache_tika_jar\": f'{ROOT}/tests/bin/tika-app-1.24.1.jar'}\napache_tika_text_extractor = create_text_extractor(config)\n\n# process a single, easy to work with gazette\ngazette = Gazette(\n filepath=actual_gazette\n)\n",
"_____no_output_____"
],
[
"# extract content\napache_tika_text_extractor.extract_text(gazette)",
"_____no_output_____"
],
[
"# load content into memory\napache_tika_text_extractor.load_content(gazette)\ngazette.content",
"_____no_output_____"
],
[
"# extract metadata\napache_tika_text_extractor.extract_metadata(gazette)\napache_tika_text_extractor.load_metadata(gazette)\ngazette.metadata",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec696c2320646c760ea210cdfead92fe9f1f8a71 | 352 | ipynb | Jupyter Notebook | Machine Learning/visualization/.ipynb_checkpoints/Data visulaization-checkpoint.ipynb | HackerLion123/Machine-Learning | 71224ea97ba4aaded13a700e07b498469299964b | [
"MIT"
]
| 1 | 2018-07-21T15:41:40.000Z | 2018-07-21T15:41:40.000Z | Machine Learning/visualization/.ipynb_checkpoints/Data visulaization-checkpoint.ipynb | HackerLion123/Machine-Learning | 71224ea97ba4aaded13a700e07b498469299964b | [
"MIT"
]
| null | null | null | Machine Learning/visualization/.ipynb_checkpoints/Data visulaization-checkpoint.ipynb | HackerLion123/Machine-Learning | 71224ea97ba4aaded13a700e07b498469299964b | [
"MIT"
]
| null | null | null | 14.666667 | 30 | 0.517045 | [
[
[
"import pandas as pd\nimport matplotlib as plt",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
ec696da1570a17fbe4858f7685b56082f3fd1c50 | 2,122 | ipynb | Jupyter Notebook | data/Sequence-comparer.ipynb | ericmjl/protein-systematic-characterization | 3ac44d672380490d8e602aa024e40009fdf306b0 | [
"MIT"
]
| null | null | null | data/Sequence-comparer.ipynb | ericmjl/protein-systematic-characterization | 3ac44d672380490d8e602aa024e40009fdf306b0 | [
"MIT"
]
| 44 | 2016-08-31T14:58:13.000Z | 2017-04-07T19:01:56.000Z | data/Sequence-comparer.ipynb | ericmjl/protein-systematic-characterization | 3ac44d672380490d8e602aa024e40009fdf306b0 | [
"MIT"
]
| 1 | 2016-08-31T14:33:35.000Z | 2016-08-31T14:33:35.000Z | 29.887324 | 590 | 0.568332 | [
[
[
"from Bio import SeqIO, AlignIO, Seq\n",
"_____no_output_____"
],
[
"vic_seq = SeqIO.read(open(\"victoria-pb2.fasta\"), \"fasta\")\nvic_seq\n",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code"
]
]
|
ec6974d50e5f41f1e1fce7d577a8dc87c902b9a8 | 23,493 | ipynb | Jupyter Notebook | Big-Data-Clusters/CU2/Public/content/monitor-k8s/tsg063-get-storage-classes.ipynb | gantz-at-incomm/tigertoolbox | 9ea80d39a3c5e0c77553fc851c5ee787fbf9291d | [
"MIT"
]
| 541 | 2019-05-07T11:41:25.000Z | 2022-03-29T17:33:19.000Z | Big-Data-Clusters/CU2/Public/content/monitor-k8s/tsg063-get-storage-classes.ipynb | gantz-at-incomm/tigertoolbox | 9ea80d39a3c5e0c77553fc851c5ee787fbf9291d | [
"MIT"
]
| 89 | 2019-05-09T14:23:52.000Z | 2022-01-13T20:21:04.000Z | Big-Data-Clusters/CU2/Public/content/monitor-k8s/tsg063-get-storage-classes.ipynb | gantz-at-incomm/tigertoolbox | 9ea80d39a3c5e0c77553fc851c5ee787fbf9291d | [
"MIT"
]
| 338 | 2019-05-08T05:45:16.000Z | 2022-03-28T15:35:03.000Z | 58.879699 | 520 | 0.409952 | [
[
[
"TSG063 - Get storage classes (Kubernetes)\n=========================================\n\nDescription\n-----------\n\nGet the Kubernetes storage classes available in the cluster\n\nSteps\n-----\n\n### Common functions\n\nDefine helper functions used in this notebook.",
"_____no_output_____"
]
],
[
[
"# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows\nimport sys\nimport os\nimport re\nimport json\nimport platform\nimport shlex\nimport shutil\nimport datetime\n\nfrom subprocess import Popen, PIPE\nfrom IPython.display import Markdown\n\nretry_hints = {} # Output in stderr known to be transient, therefore automatically retry\nerror_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help\ninstall_hint = {} # The SOP to help install the executable if it cannot be found\n\nfirst_run = True\nrules = None\ndebug_logging = False\n\ndef run(cmd, return_output=False, no_output=False, retry_count=0):\n \"\"\"Run shell command, stream stdout, print stderr and optionally return output\n\n NOTES:\n\n 1. Commands that need this kind of ' quoting on Windows e.g.:\n\n kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}\n\n Need to actually pass in as '\"':\n\n kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='\"'data-pool'\"')].metadata.name}\n\n The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:\n \n `iter(p.stdout.readline, b'')`\n\n The shlex.split call does the right thing for each platform, just use the '\"' pattern for a '\n \"\"\"\n MAX_RETRIES = 5\n output = \"\"\n retry = False\n\n global first_run\n global rules\n\n if first_run:\n first_run = False\n rules = load_rules()\n\n # When running `azdata sql query` on Windows, replace any \\n in \"\"\" strings, with \" \", otherwise we see:\n #\n # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')\n #\n if platform.system() == \"Windows\" and cmd.startswith(\"azdata sql query\"):\n cmd = cmd.replace(\"\\n\", \" \")\n\n # shlex.split is required on bash and for Windows paths with spaces\n #\n cmd_actual = shlex.split(cmd)\n\n # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries\n #\n user_provided_exe_name = cmd_actual[0].lower()\n\n # When running python, use the python in the ADS sandbox ({sys.executable})\n #\n if cmd.startswith(\"python \"):\n cmd_actual[0] = cmd_actual[0].replace(\"python\", sys.executable)\n\n # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail\n # with:\n #\n # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)\n #\n # Setting it to a default value of \"en_US.UTF-8\" enables pip install to complete\n #\n if platform.system() == \"Darwin\" and \"LC_ALL\" not in os.environ:\n os.environ[\"LC_ALL\"] = \"en_US.UTF-8\"\n\n # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`\n #\n if cmd.startswith(\"kubectl \") and \"AZDATA_OPENSHIFT\" in os.environ:\n cmd_actual[0] = cmd_actual[0].replace(\"kubectl\", \"oc\")\n\n # To aid supportabilty, determine which binary file will actually be executed on the machine\n #\n which_binary = None\n\n # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to\n # get JWT tokens, it returns \"(56) Failure when receiving data from the peer\". If another instance\n # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost\n # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we\n # look for the 2nd installation of CURL in the path)\n if platform.system() == \"Windows\" and cmd.startswith(\"curl \"):\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, \"curl.exe\")\n if os.path.exists(p) and os.access(p, os.X_OK):\n if p.lower().find(\"system32\") == -1:\n cmd_actual[0] = p\n which_binary = p\n break\n\n # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this\n # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) \n #\n # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.\n #\n if which_binary == None:\n which_binary = shutil.which(cmd_actual[0])\n\n if which_binary == None:\n if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:\n display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))\n\n raise FileNotFoundError(f\"Executable '{cmd_actual[0]}' not found in path (where/which)\")\n else: \n cmd_actual[0] = which_binary\n\n start_time = datetime.datetime.now().replace(microsecond=0)\n\n print(f\"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)\")\n print(f\" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})\")\n print(f\" cwd: {os.getcwd()}\")\n\n # Command-line tools such as CURL and AZDATA HDFS commands output\n # scrolling progress bars, which causes Jupyter to hang forever, to\n # workaround this, use no_output=True\n #\n\n # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait\n #\n wait = True \n\n try:\n if no_output:\n p = Popen(cmd_actual)\n else:\n p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)\n with p.stdout:\n for line in iter(p.stdout.readline, b''):\n line = line.decode()\n if return_output:\n output = output + line\n else:\n if cmd.startswith(\"azdata notebook run\"): # Hyperlink the .ipynb file\n regex = re.compile(' \"(.*)\"\\: \"(.*)\"') \n match = regex.match(line)\n if match:\n if match.group(1).find(\"HTML\") != -1:\n display(Markdown(f' - \"{match.group(1)}\": \"{match.group(2)}\"'))\n else:\n display(Markdown(f' - \"{match.group(1)}\": \"[{match.group(2)}]({match.group(2)})\"'))\n\n wait = False\n break # otherwise infinite hang, have not worked out why yet.\n else:\n print(line, end='')\n if rules is not None:\n apply_expert_rules(line)\n\n if wait:\n p.wait()\n except FileNotFoundError as e:\n if install_hint is not None:\n display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))\n\n raise FileNotFoundError(f\"Executable '{cmd_actual[0]}' not found in path (where/which)\") from e\n\n exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()\n\n if not no_output:\n for line in iter(p.stderr.readline, b''):\n try:\n line_decoded = line.decode()\n except UnicodeDecodeError:\n # NOTE: Sometimes we get characters back that cannot be decoded(), e.g.\n #\n # \\xa0\n #\n # For example see this in the response from `az group create`:\n #\n # ERROR: Get Token request returned http error: 400 and server \n # response: {\"error\":\"invalid_grant\",# \"error_description\":\"AADSTS700082: \n # The refresh token has expired due to inactivity.\\xa0The token was \n # issued on 2018-10-25T23:35:11.9832872Z\n #\n # which generates the exception:\n #\n # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte\n #\n print(\"WARNING: Unable to decode stderr line, printing raw bytes:\")\n print(line)\n line_decoded = \"\"\n pass\n else:\n\n # azdata emits a single empty line to stderr when doing an hdfs cp, don't\n # print this empty \"ERR:\" as it confuses.\n #\n if line_decoded == \"\":\n continue\n \n print(f\"STDERR: {line_decoded}\", end='')\n\n if line_decoded.startswith(\"An exception has occurred\") or line_decoded.startswith(\"ERROR: An error occurred while executing the following cell\"):\n exit_code_workaround = 1\n\n # inject HINTs to next TSG/SOP based on output in stderr\n #\n if user_provided_exe_name in error_hints:\n for error_hint in error_hints[user_provided_exe_name]:\n if line_decoded.find(error_hint[0]) != -1:\n display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))\n\n # apply expert rules (to run follow-on notebooks), based on output\n #\n if rules is not None:\n apply_expert_rules(line_decoded)\n\n # Verify if a transient error, if so automatically retry (recursive)\n #\n if user_provided_exe_name in retry_hints:\n for retry_hint in retry_hints[user_provided_exe_name]:\n if line_decoded.find(retry_hint) != -1:\n if retry_count < MAX_RETRIES:\n print(f\"RETRY: {retry_count} (due to: {retry_hint})\")\n retry_count = retry_count + 1\n output = run(cmd, return_output=return_output, retry_count=retry_count)\n\n if return_output:\n return output\n else:\n return\n\n elapsed = datetime.datetime.now().replace(microsecond=0) - start_time\n\n # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so\n # don't wait here, if success known above\n #\n if wait: \n if p.returncode != 0:\n raise SystemExit(f'Shell command:\\n\\n\\t{cmd} ({elapsed}s elapsed)\\n\\nreturned non-zero exit code: {str(p.returncode)}.\\n')\n else:\n if exit_code_workaround !=0 :\n raise SystemExit(f'Shell command:\\n\\n\\t{cmd} ({elapsed}s elapsed)\\n\\nreturned non-zero exit code: {str(exit_code_workaround)}.\\n')\n\n print(f'\\nSUCCESS: {elapsed}s elapsed.\\n')\n\n if return_output:\n return output\n\ndef load_json(filename):\n \"\"\"Load a json file from disk and return the contents\"\"\"\n\n with open(filename, encoding=\"utf8\") as json_file:\n return json.load(json_file)\n\ndef load_rules():\n \"\"\"Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable\"\"\"\n\n try:\n\n # Load this notebook as json to get access to the expert rules in the notebook metadata.\n #\n j = load_json(\"tsg063-get-storage-classes.ipynb\")\n\n except:\n pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?\n\n else:\n if \"metadata\" in j and \\\n \"azdata\" in j[\"metadata\"] and \\\n \"expert\" in j[\"metadata\"][\"azdata\"] and \\\n \"rules\" in j[\"metadata\"][\"azdata\"][\"expert\"]:\n\n rules = j[\"metadata\"][\"azdata\"][\"expert\"][\"rules\"]\n\n rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.\n\n # print (f\"EXPERT: There are {len(rules)} rules to evaluate.\")\n\n return rules\n\ndef apply_expert_rules(line):\n \"\"\"Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so\n inject a 'HINT' to the follow-on SOP/TSG to run\"\"\"\n\n global rules\n\n for rule in rules:\n\n # rules that have 9 elements are the injected (output) rules (the ones we want). Rules\n # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,\n # not ../repair/tsg029-nb-name.ipynb)\n if len(rule) == 9:\n notebook = rule[1]\n cell_type = rule[2]\n output_type = rule[3] # i.e. stream or error\n output_type_name = rule[4] # i.e. ename or name \n output_type_value = rule[5] # i.e. SystemExit or stdout\n details_name = rule[6] # i.e. evalue or text \n expression = rule[7].replace(\"\\\\*\", \"*\") # Something escaped *, and put a \\ in front of it!\n\n if debug_logging:\n print(f\"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.\")\n\n if re.match(expression, line, re.DOTALL):\n\n if debug_logging:\n print(\"EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'\".format(output_type_name, output_type_value, expression, notebook))\n\n match_found = True\n\n display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))\n\n\n\nprint('Common functions defined successfully.')\n\n# Hints for binary (transient fault) retry, (known) error and install guide\n#\nretry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}\nerror_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}\ninstall_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}",
"_____no_output_____"
]
],
[
[
"### Show the Kubernetes storage classes",
"_____no_output_____"
]
],
[
[
"run('kubectl get sc')",
"_____no_output_____"
],
[
"print('Notebook execution complete.')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec697bb18822213ff8ec05e4bc655b28f4f7ed7d | 1,498 | ipynb | Jupyter Notebook | notebooks/lecture/01-jupyter/.ipynb_checkpoints/03-autoreload-checkpoint.ipynb | juls-dotcom/python_workshop | ba4b8061910490f4e1c1236ce2c0ad12ef02a671 | [
"MIT"
]
| null | null | null | notebooks/lecture/01-jupyter/.ipynb_checkpoints/03-autoreload-checkpoint.ipynb | juls-dotcom/python_workshop | ba4b8061910490f4e1c1236ce2c0ad12ef02a671 | [
"MIT"
]
| null | null | null | notebooks/lecture/01-jupyter/.ipynb_checkpoints/03-autoreload-checkpoint.ipynb | juls-dotcom/python_workshop | ba4b8061910490f4e1c1236ce2c0ad12ef02a671 | [
"MIT"
]
| null | null | null | 21.098592 | 232 | 0.568758 | [
[
[
"Read the following [blog](https://blog.godatadriven.com/write-less-terrible-notebook-code) and clone the code for an example project from [here](https://github.com/hgrif/example-project) in the same folder as this notebook.\n\nInstall the package in your environment as described in the blog and run the cells below.",
"_____no_output_____"
],
[
"# Autoreload example",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2 ",
"_____no_output_____"
],
[
"from exampleproject import data",
"_____no_output_____"
],
[
"# Change the file exampleproject/data.py and rerun this cell\ndata.hello_world()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec69847f0b58cc58afa1c0ec720cdd31d2248b57 | 12,537 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 8 - Transfer Learning (Solution).ipynb | johnsonjoseph37/deep-learning-v2-pytorch | 566dd73c3e289ef16bc8a30f814284b8e243f731 | [
"MIT"
]
| null | null | null | intro-to-pytorch/Part 8 - Transfer Learning (Solution).ipynb | johnsonjoseph37/deep-learning-v2-pytorch | 566dd73c3e289ef16bc8a30f814284b8e243f731 | [
"MIT"
]
| null | null | null | intro-to-pytorch/Part 8 - Transfer Learning (Solution).ipynb | johnsonjoseph37/deep-learning-v2-pytorch | 566dd73c3e289ef16bc8a30f814284b8e243f731 | [
"MIT"
]
| null | null | null | 42.212121 | 662 | 0.563691 | [
[
[
"# Transfer Learning\n\nIn this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html). \n\nImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).\n\nOnce trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.\n\nWith `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models",
"_____no_output_____"
]
],
[
[
"Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`.",
"_____no_output_____"
]
],
[
[
"data_dir = 'Cat_Dog_data'\n\n# TODO: Define transforms for the training data and testing data\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n# Pass transforms in here, then run the next cell to see how the transforms look\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=64)",
"_____no_output_____"
]
],
[
[
"We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.html#id5). Let's print out the model architecture so we can see what's going on.",
"_____no_output_____"
]
],
[
[
"model = models.densenet121(pretrained=True)\nmodel",
"_____no_output_____"
]
],
[
[
"This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers.",
"_____no_output_____"
]
],
[
[
"# Freeze parameters so we don't backprop through them\nfor param in model.parameters():\n param.requires_grad = False\n\nfrom collections import OrderedDict\nclassifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(1024, 500)),\n ('relu', nn.ReLU()),\n ('fc2', nn.Linear(500, 2)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \nmodel.classifier = classifier",
"_____no_output_____"
]
],
[
[
"With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.\n\nPyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU.",
"_____no_output_____"
]
],
[
[
"import time",
"_____no_output_____"
],
[
"for device in ['cpu', 'cuda']:\n\n criterion = nn.NLLLoss()\n # Only train the classifier parameters, feature parameters are frozen\n optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\n\n model.to(device)\n\n for ii, (inputs, labels) in enumerate(trainloader):\n\n # Move input and label tensors to the GPU\n inputs, labels = inputs.to(device), labels.to(device)\n\n start = time.time()\n\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if ii==3:\n break\n \n print(f\"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds\")",
"_____no_output_____"
]
],
[
[
"You can write device agnostic code which will automatically use CUDA if it's enabled like so:\n```python\n# at beginning of the script\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n...\n\n# then whenever you get a new Tensor or Module\n# this won't copy if they are already on the desired device\ninput = data.to(device)\nmodel = MyModule(...).to(device)\n```\n\nFrom here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.\n\n>**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen.",
"_____no_output_____"
]
],
[
[
"# Use GPU if it's available\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = models.densenet121(pretrained=True)\n\n# Freeze parameters so we don't backprop through them\nfor param in model.parameters():\n param.requires_grad = False\n \nmodel.classifier = nn.Sequential(nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 2),\n nn.LogSoftmax(dim=1))\n\ncriterion = nn.NLLLoss()\n\n# Only train the classifier parameters, feature parameters are frozen\noptimizer = optim.Adam(model.classifier.parameters(), lr=0.003)\n\nmodel.to(device);",
"_____no_output_____"
],
[
"epochs = 1\nsteps = 0\nrunning_loss = 0\nprint_every = 5\nfor epoch in range(epochs):\n for inputs, labels in trainloader:\n steps += 1\n # Move input and label tensors to the default device\n inputs, labels = inputs.to(device), labels.to(device)\n \n logps = model.forward(inputs)\n loss = criterion(logps, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n \n if steps % print_every == 0:\n test_loss = 0\n accuracy = 0\n model.eval()\n with torch.no_grad():\n for inputs, labels in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n batch_loss = criterion(logps, labels)\n \n test_loss += batch_loss.item()\n \n # Calculate accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n print(f\"Epoch {epoch+1}/{epochs}.. \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Test loss: {test_loss/len(testloader):.3f}.. \"\n f\"Test accuracy: {accuracy/len(testloader):.3f}\")\n running_loss = 0\n model.train()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec699d8d193fc50246dc4398d5950404f7c54569 | 613,336 | ipynb | Jupyter Notebook | notebooks/mpl.ipynb | MrDaiki/distributed-learning-contributivity | f57032ee3c64637752cd3eeeb684e0dcf9506f3e | [
"Apache-2.0"
]
| null | null | null | notebooks/mpl.ipynb | MrDaiki/distributed-learning-contributivity | f57032ee3c64637752cd3eeeb684e0dcf9506f3e | [
"Apache-2.0"
]
| null | null | null | notebooks/mpl.ipynb | MrDaiki/distributed-learning-contributivity | f57032ee3c64637752cd3eeeb684e0dcf9506f3e | [
"Apache-2.0"
]
| null | null | null | 905.961595 | 73,928 | 0.942516 | [
[
[
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nroot_folder = Path().absolute().parent / \"experiments\" \n\n# Get latest generated folder\nsubfolder_list = [f for f in root_folder.iterdir()]\nsubfolder_list_creation_time = [f.stat().st_ctime for f in subfolder_list]\nlatest_subfolder_idx = subfolder_list_creation_time.index(max(subfolder_list_creation_time))\nexperiment_path = subfolder_list[latest_subfolder_idx]\n\n# Read CSV results\ndf = pd.read_csv(experiment_path / \"results.csv\")\n",
"_____no_output_____"
],
[
"# Config file\nwith open(experiment_path / \"config.yml\") as f:\n print(f.read())",
"experiment_name: seqseq_grad_update\nn_repeats: 10\nscenario_params_list:\n - partners_count: \n - 10\n amounts_per_partner: \n - [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]\n samples_split_option: \n - [[1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific'], [1, 'specific']]\n samples_split_option: \n - 'stratified'\n multi_partner_learning_approach:\n - 'fedavg'\n - 'seq-pure'\n - 'seq-with-final-agg'\n - 'seqavg'\n aggregation_weighting: \n - 'uniform' \n gradient_updates_per_pass_count: \n - 4\n - 8\n - 16\n epoch_count: \n - 50\n minibatch_count: \n - 1\n - 2\n - 5\n - 10\n"
],
[
"experiment_path\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 298 entries, 0 to 297\nData columns (total 22 columns):\naggregation_weighting 298 non-null object\namounts_per_partner 298 non-null object\ndataset_name 298 non-null object\nepoch_count 298 non-null float64\nfinal_relative_nb_samples 298 non-null object\ngradient_updates_per_pass_count 298 non-null float64\nis_early_stopping 298 non-null float64\nlearning_computation_time_sec 298 non-null float64\nminibatch_count 298 non-null float64\nmpl_nb_epochs_done 298 non-null float64\nmpl_test_score 298 non-null float64\nmulti_partner_learning_approach 298 non-null object\nnb_samples_used 298 non-null float64\npartners_count 298 non-null float64\nsamples_split_option 298 non-null object\nscenario_name 298 non-null object\nshort_scenario_name 298 non-null object\nsingle_partner_test_mode 298 non-null object\ntest_data_samples_count 298 non-null float64\ntrain_data_samples_count 298 non-null float64\nrandom_state 298 non-null int64\nscenario_id 298 non-null int64\ndtypes: float64(11), int64(2), object(9)\nmemory usage: 51.3+ KB\n"
],
[
"scenario_ids = df.scenario_id.unique()\nrandom_states = df.random_state.unique()\nsamples_split_options = df.samples_split_option.unique()\namounts_per_partner_list = df.amounts_per_partner.unique()\ngradient_updates_per_pass_count_list = df.gradient_updates_per_pass_count.unique()\n\nprint('Random state', random_states)\nprint('Scenario id', scenario_ids)\nprint('amounts_per_partner', amounts_per_partner_list)\nprint()\n\ndf.info()\ndf.head(5)",
"Random state [0 1 2 3 4 5 6]\nScenario id [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23\n 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47]\namounts_per_partner ['[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]']\n\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 298 entries, 0 to 297\nData columns (total 22 columns):\naggregation_weighting 298 non-null object\namounts_per_partner 298 non-null object\ndataset_name 298 non-null object\nepoch_count 298 non-null float64\nfinal_relative_nb_samples 298 non-null object\ngradient_updates_per_pass_count 298 non-null float64\nis_early_stopping 298 non-null float64\nlearning_computation_time_sec 298 non-null float64\nminibatch_count 298 non-null float64\nmpl_nb_epochs_done 298 non-null float64\nmpl_test_score 298 non-null float64\nmulti_partner_learning_approach 298 non-null object\nnb_samples_used 298 non-null float64\npartners_count 298 non-null float64\nsamples_split_option 298 non-null object\nscenario_name 298 non-null object\nshort_scenario_name 298 non-null object\nsingle_partner_test_mode 298 non-null object\ntest_data_samples_count 298 non-null float64\ntrain_data_samples_count 298 non-null float64\nrandom_state 298 non-null int64\nscenario_id 298 non-null int64\ndtypes: float64(11), int64(2), object(9)\nmemory usage: 51.3+ KB\n"
],
[
"df[\"computation_time_minutes\"] = df.learning_computation_time_sec / 60\ndf.short_scenario_name = df.samples_split_option + df.amounts_per_partner + ' mb ' + df.minibatch_count.astype(str)\ndf.short_scenario_name.unique()\n#df.contributivity_method = df.contributivity_method.str.replace('values', '')",
"_____no_output_____"
],
[
"# Helper function for plot\ndef get_x_tick_amount(df, scenario_id, partners_ids):\n \n x_ticks = []\n for partner_id in partners_ids:\n current_df = df[(df.scenario_id == scenario_id) & (df.partner_id == partner_id)]\n amount = current_df.amount_per_partner.unique()\n\n assert(len(amount) == 1)\n amount = amount[0]\n \n x_ticks.append('partner_' + str(partner_id) + ' ' + str(amount))\n return x_ticks",
"_____no_output_____"
],
[
"# Plot loop\nfor amounts_per_partner in amounts_per_partner_list:\n for gradient_updates_per_pass_count in gradient_updates_per_pass_count_list:\n \n current_df = df[(df.amounts_per_partner == amounts_per_partner) &\n (df.gradient_updates_per_pass_count == gradient_updates_per_pass_count)]\n\n sns.set(style=\"ticks\")\n #current_df.info()\n print(\"################################################\")\n title = \"gradient_updates_per_pass_count: \" + str(gradient_updates_per_pass_count)\n print(title)\n\n plt.figure(figsize=(10, 6), dpi=120)\n plt.title(title)\n ax = sns.swarmplot(x=\"multi_partner_learning_approach\", y=\"mpl_test_score\", hue=\"minibatch_count\",\n data=current_df)\n plt.ylim([0, 1])\n\n plt.figure(figsize=(10, 6), dpi=120)\n plt.title(title)\n ax = sns.boxplot(x=\"multi_partner_learning_approach\", y=\"mpl_nb_epochs_done\", hue=\"minibatch_count\",\n data=current_df)\n plt.ylim([0, 50])\n\n\n ax = plt.figure(figsize=(10, 6), dpi=120)\n plt.title(title)\n sns.boxplot(x=\"multi_partner_learning_approach\", y=\"computation_time_minutes\", hue=\"minibatch_count\",\n data=current_df)\n plt.ylim(bottom=0)\n\n\n plt.show()\n ",
"################################################\ngradient_updates_per_pass_count: 4.0\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec69a2818fe1c16ff08d576148e10225843fdbd2 | 534,740 | ipynb | Jupyter Notebook | code/pendulum2.ipynb | SSModelGit/ModSimPy | 4d1e3d8c3b878ea876e25e6a74509535f685f338 | [
"MIT"
]
| null | null | null | code/pendulum2.ipynb | SSModelGit/ModSimPy | 4d1e3d8c3b878ea876e25e6a74509535f685f338 | [
"MIT"
]
| null | null | null | code/pendulum2.ipynb | SSModelGit/ModSimPy | 4d1e3d8c3b878ea876e25e6a74509535f685f338 | [
"MIT"
]
| null | null | null | 117.55111 | 94,082 | 0.784245 | [
[
[
"# Modeling and Simulation in Python\n\nChapter 10 Example: Rigid Pendulum\n\nCopyright 2017 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)\n",
"_____no_output_____"
]
],
[
[
"# If you want the figures to appear in the notebook, \n# and you want to interact with them, use\n# %matplotlib notebook\n\n# If you want the figures to appear in the notebook, \n# and you don't want to interact with them, use\n# %matplotlib inline\n\n# If you want the figures to appear in separate windows, use\n# %matplotlib qt5\n\n# tempo switch from one to another, you have to select Kernel->Restart\n\n%matplotlib notebook\n\nfrom modsim import *",
"_____no_output_____"
]
],
[
[
"### Pendulum",
"_____no_output_____"
],
[
"This notebook solves the Spider-Man problem from spiderman.ipynb, demonstrating a different development process for physical simulations.\n\nIn `pendulum_sympy`, we derive the equations of motion for a rigid pendulum without drag, yielding:\n\n$ \\ddot{x} = \\frac{x}{x^{2} + y^{2}} \\left(g y - vx^{2} - vy^{2}\\right) $\n\n$ \\ddot{y} = - \\frac{1}{x^{2} + y^{2}} \\left(g x^{2} + y \\left(vx^{2} + vy^{2}\\right)\\right) $\n\nWe'll use the same conditions we saw in `spiderman.ipynb`",
"_____no_output_____"
]
],
[
[
"condition = Condition(g = 9.8,\n m = 75,\n area = 1,\n rho = 1.2,\n v_term = 60,\n duration = 30,\n length0 = 100,\n angle = (270 - 45),\n k = 20)",
"_____no_output_____"
]
],
[
[
"Now here's a version of `make_system` that takes a `Condition` object as a parameter.\n\n`make_system` uses the given value of `v_term` to compute the drag coefficient `C_d`.",
"_____no_output_____"
]
],
[
[
"def make_system(condition):\n \"\"\"Makes a System object for the given conditions.\n \n condition: Condition with height, g, m, diameter, \n rho, v_term, and duration\n \n returns: System with init, g, m, rho, C_d, area, and ts\n \"\"\"\n unpack(condition)\n \n theta = np.deg2rad(angle)\n x, y = pol2cart(theta, length0)\n P = Vector(x, y)\n V = Vector(0, 0)\n \n init = State(x=P.x, y=P.y, vx=V.x, vy=V.y)\n C_d = 2 * m * g / (rho * area * v_term**2)\n ts = linspace(0, duration, 501)\n \n \n return System(init=init, g=g, m=m, rho=rho,\n C_d=C_d, area=area, length0=length0,\n k=k, ts=ts)",
"_____no_output_____"
]
],
[
[
"Let's make a `System`",
"_____no_output_____"
]
],
[
[
"system = make_system(condition)\nsystem",
"_____no_output_____"
],
[
"system.init",
"_____no_output_____"
]
],
[
[
"To write the slope function, we can get the expressions for `ax` and `ay` directly from SymPy and plug them in.",
"_____no_output_____"
]
],
[
[
"def slope_func(state, t, system):\n \"\"\"Computes derivatives of the state variables.\n \n state: State (x, y, x velocity, y velocity)\n t: time\n system: System object with length0, m, k\n \n returns: sequence (vx, vy, ax, ay)\n \"\"\"\n x, y, vx, vy = state\n unpack(system)\n\n ax = x*(g*y - vx**2 - vy**2)/(x**2 + y**2)\n ay = -(g*x**2 + y*(vx**2 + vy**2))/(x**2 + y**2)\n\n return vx, vy, ax, ay",
"_____no_output_____"
]
],
[
[
"As always, let's test the slope function with the initial conditions.",
"_____no_output_____"
]
],
[
[
"slope_func(system.init, 0, system)",
"_____no_output_____"
]
],
[
[
"And then run the simulation.",
"_____no_output_____"
]
],
[
[
"%time run_odeint(system, slope_func)",
"CPU times: user 132 ms, sys: 0 ns, total: 132 ms\nWall time: 130 ms\n"
]
],
[
[
"### Visualizing the results\n\nWe can extract the x and y components as `Series` objects.",
"_____no_output_____"
]
],
[
[
"xs = system.results.x\nys = system.results.y",
"_____no_output_____"
]
],
[
[
"The simplest way to visualize the results is to plot x and y as functions of time.",
"_____no_output_____"
]
],
[
[
"newfig()\nplot(xs, label='x')\nplot(ys, label='y')\n\ndecorate(xlabel='Time (s)',\n ylabel='Position (m)')",
"_____no_output_____"
]
],
[
[
"We can plot the velocities the same way.",
"_____no_output_____"
]
],
[
[
"vxs = system.results.vx\nvys = system.results.vy",
"_____no_output_____"
],
[
"newfig()\nplot(vxs, label='vx')\nplot(vys, label='vy')\n\ndecorate(xlabel='Time (s)',\n ylabel='Velocity (m/s)')",
"_____no_output_____"
]
],
[
[
"Another way to visualize the results is to plot y versus x. The result is the trajectory through the plane of motion.",
"_____no_output_____"
]
],
[
[
"newfig()\nplot(xs, ys, label='trajectory')\n\ndecorate(xlabel='x position (m)',\n ylabel='y position (m)')",
"_____no_output_____"
]
],
[
[
"We can also animate the trajectory. If there's an error in the simulation, we can sometimes spot it by looking at animations.",
"_____no_output_____"
]
],
[
[
"newfig()\ndecorate(xlabel='x position (m)',\n ylabel='y position (m)',\n xlim=[-100, 100],\n ylim=[-200, -50],\n legend=False)\n\nfor x, y in zip(xs, ys):\n plot(x, y, 'bo', update=True)\n sleep(0.01)",
"_____no_output_____"
]
],
[
[
"Here's a function that encapsulates that code and runs the animation in (approximately) real time.",
"_____no_output_____"
]
],
[
[
"def animate2d(xs, ys, speedup=1):\n \"\"\"Animate the results of a projectile simulation.\n \n xs: x position as a function of time\n ys: y position as a function of time\n \n speedup: how much to divide `dt` by\n \"\"\"\n # get the time intervals between elements\n ts = xs.index\n dts = np.diff(ts)\n dts = np.append(dts, 0)\n\n # decorate the plot\n newfig()\n decorate(xlabel='x position (m)',\n ylabel='y position (m)',\n xlim=[xs.min(), xs.max()],\n ylim=[ys.min(), ys.max()],\n legend=False)\n\n # loop through the values\n for x, y, dt in zip(xs, ys, dts):\n plot(x, y, 'bo', update=True)\n sleep(dt / speedup)",
"_____no_output_____"
],
[
"animate2d(system.results.x, system.results.y)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec69a84f4654da01846f3c8f68ae7c1271bd5074 | 3,842 | ipynb | Jupyter Notebook | codewar/codewar.6 kyu.iq-test.ipynb | oleng/solved | ef2b338c16f2bfc5416fce80b174b439cbb2565e | [
"MIT"
]
| null | null | null | codewar/codewar.6 kyu.iq-test.ipynb | oleng/solved | ef2b338c16f2bfc5416fce80b174b439cbb2565e | [
"MIT"
]
| null | null | null | codewar/codewar.6 kyu.iq-test.ipynb | oleng/solved | ef2b338c16f2bfc5416fce80b174b439cbb2565e | [
"MIT"
]
| null | null | null | 43.168539 | 406 | 0.430765 | [
[
[
"### IQ Test (6 kyu) \nhttps://www.codewars.com/kata/552c028c030765286c00007d \n2020-02-10T07:20:11.285Z \nCategory: reference, Tags: ['Fundamentals', 'Logic']",
"_____no_output_____"
],
[
"Bob is preparing to pass IQ test. The most frequent task in this test is `to find out which one of the given numbers differs from the others`. Bob observed that one number usually differs from the others in **evenness**. Help Bob — to check his answers, he needs a program that among the given numbers finds one that is different in evenness, and return a position of this number.\n\n`!` Keep in mind that your task is to help Bob solve a `real IQ test`, which means indexes of the elements start from `1 (not 0)`\n\n##Examples :\n\n```csharp\nIQ.Test(\"2 4 7 8 10\") => 3 // Third number is odd, while the rest of the numbers are even\n\nIQ.Test(\"1 2 1 1\") => 2 // Second number is even, while the rest of the numbers are odd\n```\n```javascript\niqTest(\"2 4 7 8 10\") => 3 // Third number is odd, while the rest of the numbers are even\n\niqTest(\"1 2 1 1\") => 2 // Second number is even, while the rest of the numbers are odd\n```\n```typescript\niqTest(\"2 4 7 8 10\") => 3 // Third number is odd, while the rest of the numbers are even\n\niqTest(\"1 2 1 1\") => 2 // Second number is even, while the rest of the numbers are odd\n```\n```ruby\niq_test(\"2 4 7 8 10\") => 3 // Third number is odd, while the rest of the numbers are even\n\niq_test(\"1 2 1 1\") => 2 // Second number is even, while the rest of the numbers are odd\n```\n \n```python\niq_test(\"2 4 7 8 10\") => 3 // Third number is odd, while the rest of the numbers are even\n\niq_test(\"1 2 1 1\") => 2 // Second number is even, while the rest of the numbers are odd\n```\n \n",
"_____no_output_____"
]
],
[
[
"# Solution\n\ndef iq_test(numbers):\n even = [int(n) % 2 for n in numbers.split()]\n return even.index(0) + 1 \\\n if even.count(0) < even.count(1) \\\n else even.index(1) + 1",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
]
]
|
ec69b35de7cbc3c39091e4cfe2885bbd1e089765 | 138,318 | ipynb | Jupyter Notebook | analysis/simulation/legacy/simple/bootstrap_simulation.ipynb | yelabucsf/scrna-parameter-estimation | 218ef38b87f8d777d5abcb04913212cbcb21ecb1 | [
"MIT"
]
| 2 | 2021-03-17T20:31:54.000Z | 2022-03-17T19:24:37.000Z | analysis/simulation/legacy/simple/bootstrap_simulation.ipynb | yelabucsf/scrna-parameter-estimation | 218ef38b87f8d777d5abcb04913212cbcb21ecb1 | [
"MIT"
]
| 1 | 2021-08-23T20:55:07.000Z | 2021-08-23T20:55:07.000Z | analysis/simulation/legacy/simple/bootstrap_simulation.ipynb | yelabucsf/scrna-parameter-estimation | 218ef38b87f8d777d5abcb04913212cbcb21ecb1 | [
"MIT"
]
| 1 | 2020-04-06T05:43:31.000Z | 2020-04-06T05:43:31.000Z | 164.078292 | 21,536 | 0.905428 | [
[
[
"import numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Heteroskedastic",
"_____no_output_____"
]
],
[
[
"x.shape",
"_____no_output_____"
],
[
"beta1 = 0.3\nbeta2 = 2\nx = np.arange(1, 10, 1)\nmeans = np.exp(beta1*x)\nthetas = np.exp(beta2*x)\ny = stats.nbinom.rvs(*convert_params(means, thetas), size=(10000, x.shape[0]))\nx = np.tile(x, (1, 10000)).reshape(-1)\ny = y.reshape(-1)",
"_____no_output_____"
],
[
"data = pd.DataFrame()\ndata['x'] = x\ndata['y'] = y",
"_____no_output_____"
],
[
"def convert_params(mu, theta):\n \"\"\"\n Convert mean/dispersion parameterization of a negative binomial to the ones scipy supports\n\n See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations\n \"\"\"\n r = theta\n var = mu + 1 / r * mu ** 2\n p = (var - mu) / var\n return r, 1 - p\n",
"_____no_output_____"
],
[
"sns.violinplot(\npd.Series(x),\ny.reshape(-1))",
"_____no_output_____"
],
[
"((y - np.exp(beta1*x))*x).mean()",
"_____no_output_____"
],
[
"a = np.arange(0.1, 10, 0.01)\nb = a + 5*a**2",
"_____no_output_____"
],
[
"plt.scatter(np.log(a), np.log(b))",
"_____no_output_____"
],
[
"plt.scatter(a, b)",
"_____no_output_____"
]
],
[
[
"### Binomial beta case",
"_____no_output_____"
]
],
[
[
"p = 0.1",
"_____no_output_____"
],
[
"distances = []\nNs = np.arange(5, 5000, 1)\n\nfor N in Ns:\n \n binom_x = np.arange(0, N, 1)\n binom_y = stats.binom.pmf(np.arange(0, N, 1), N, p)\n binom_x = binom_x / N\n binom_y = binom_y * N\n \n beta_x = binom_x\n beta_y = stats.beta.pdf(beta_x, N*p, N*(1-p))\n \n \n \n distances.append(((binom_y - beta_y)**2).mean())",
"_____no_output_____"
],
[
"plt.plot(Ns, distances)",
"_____no_output_____"
],
[
"distances[-1:]",
"_____no_output_____"
],
[
"N = 1000\np = 0.5",
"_____no_output_____"
],
[
"binom_x = np.arange(0, N, 1)\nbinom_y = stats.binom.pmf(np.arange(0, N, 1), N, p)\nbinom_x = binom_x / N\nbinom_y = binom_y * N",
"_____no_output_____"
],
[
"p = 0.1\nnoise_level = 0.2049755522580501\np_sq = (noise_level+1)*p**2",
"_____no_output_____"
],
[
"m = p\nv = p_sq - p**2",
"_____no_output_____"
],
[
"v",
"_____no_output_____"
],
[
"m*(1-m)",
"_____no_output_____"
],
[
"alpha = m*(m*(1-m)/v - 1)\nbeta = (1-m)*(m*(1-m)/v - 1)",
"_____no_output_____"
],
[
"qs = stats.beta.rvs(alpha, beta, size=10000)",
"_____no_output_____"
],
[
"qs.var()",
"_____no_output_____"
],
[
"plt.hist(qs, bins=100);\nplt.xlim(0, 1)",
"_____no_output_____"
],
[
"plt.hist(qs, bins=100);\nplt.xlim(0, 1)",
"_____no_output_____"
],
[
"plt.plot(binom_x, binom_y)\nprint(N)\n\n\nplt.plot(np.arange(0, 1, 0.01), stats.beta.pdf(np.arange(0, 1, 0.01), N*p, N*(1-p)))",
"1000\n"
],
[
"plt.plot(binom_x, binom_y)\nprint(N)\nplt.plot(np.arange(0, 1, 0.01), stats.beta.pdf(np.arange(0, 1, 0.01), N*p, N*(1-p)))",
"25\n"
],
[
"plt.plot(binom_x, binom_y)\nprint(N)\nplt.plot(np.arange(0, 1, 0.01), stats.beta.pdf(np.arange(0, 1, 0.01), N*p, N*(1-p)))",
"10\n"
],
[
"plt.plot(np.arange(0, 1, 0.01), stats.beta.pdf(np.arange(0, 1, 0.01), N*p, N*(1-p)))",
"_____no_output_____"
]
],
[
[
"### Computing ASL",
"_____no_output_____"
]
],
[
[
"a = stats.norm.rvs(loc=-3.5, size=5000)\nb = stats.norm.rvs(loc=3.5, size=5000)",
"_____no_output_____"
],
[
"sns.distplot(a)\nsns.distplot(b)",
"_____no_output_____"
],
[
"import itertools",
"_____no_output_____"
],
[
"sig = 0",
"_____no_output_____"
],
[
"%%time\n(np.array([(x-y) for x,y in itertools.product(a,b)]) > 0).sum()",
"CPU times: user 5.67 s, sys: 949 ms, total: 6.62 s\nWall time: 6.61 s\n"
],
[
"%%time\nsig = 0\nfor x, y in itertools.product(a,b):\n \n if x > y:\n sig += 1\nprint(sig)",
"4\nCPU times: user 3.66 s, sys: 13.2 ms, total: 3.68 s\nWall time: 3.69 s\n"
],
[
"np.array([(x-y) for x,y in itertools.product(a,b)]) > 0)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec69b68e792a99ca7a72021f30b7b4568975e68e | 1,792 | ipynb | Jupyter Notebook | Learn Julia/07. Incrementation and Addition Assigment.ipynb | charlesjansen/Learn-Julia-Programming-Language | bcc657efd59bda27a57fdbedb693ea33760b14eb | [
"MIT"
]
| 1 | 2021-01-03T01:42:09.000Z | 2021-01-03T01:42:09.000Z | Learn Julia/07. Incrementation and Addition Assigment.ipynb | charlesjansen/Learn-Julia-Programming-Language | bcc657efd59bda27a57fdbedb693ea33760b14eb | [
"MIT"
]
| null | null | null | Learn Julia/07. Incrementation and Addition Assigment.ipynb | charlesjansen/Learn-Julia-Programming-Language | bcc657efd59bda27a57fdbedb693ea33760b14eb | [
"MIT"
]
| 2 | 2020-11-30T03:43:35.000Z | 2022-01-06T17:31:49.000Z | 16.144144 | 61 | 0.453683 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
ec69d0ed9212c347954452e97598472a7d8f3d41 | 8,565 | ipynb | Jupyter Notebook | semana_1/dia_4/.ipynb_checkpoints/Ejercicios Flujos de Control-checkpoint.ipynb | franciscocanon-thebridge/DS-part-time-sep2020 | ecb72efdbf91a3a94ea566a08f56b9c97bd929d4 | [
"MIT"
]
| 1 | 2020-10-16T16:13:02.000Z | 2020-10-16T16:13:02.000Z | semana_1/dia_4/.ipynb_checkpoints/Ejercicios Flujos de Control-checkpoint.ipynb | ClaraLL7/DS-part-time-sep2020 | 1cbae90b78f994d14926822a551df9c13c89548f | [
"MIT"
]
| null | null | null | semana_1/dia_4/.ipynb_checkpoints/Ejercicios Flujos de Control-checkpoint.ipynb | ClaraLL7/DS-part-time-sep2020 | 1cbae90b78f994d14926822a551df9c13c89548f | [
"MIT"
]
| 3 | 2020-10-15T18:53:54.000Z | 2020-10-16T17:25:28.000Z | 28.741611 | 580 | 0.564857 | [
[
[
"",
"_____no_output_____"
],
[
"# Ejercicios Flujos de control",
"_____no_output_____"
],
[
"## Ejercicio 1\nDeclara una variable numérica que será una hora cualquiera del día. Implementa mediante sentencias `if/elif/else` la siguiente casuística:\n\n* Si es entre las 0 y las 7 print \"Durmiendo\"\n* Si es entre las 9 y las 17, print \"Trabajando\"\n* Si es entre las 19 y las 21, print \"Clase\"\n* Si es entre las 22 y las 24, print \"Descanso\"\n* En cualquier otro caso, print \"Transporte o error\"",
"_____no_output_____"
]
],
[
[
"hora = 15\n\nif hora >= 0 and hora <= 7:\n print(\"Durmiendo\")\nelif hora >= 9 and hora <= 17:\n print(\"Trabajando\")\nelif hora >= 19 and hora <= 21:\n print(\"Clase\")\nelif hora >= 22 and hora <= 24:\n print(\"Descanso\")\nelse:\n print(\"Transporte o error\")",
"Trabajando\n"
]
],
[
[
"## Ejercicio 2\nEn este ejercicio vamos a implementar un calculador de precios de casas muy sencillo. Tenemos las siguientes variables:\n\n> superficie\n>\n> distrito\n\nImplementa mediante sentencias `if/elif/else` la siguiente casuística:\n\n1. Si el distrito es \"Moncloa\" o \"Centro\", y además la superficie es superior a 100 metros cuadrados, el precio de la casa es de 1000\n2. Si el distrito es \"Salamanca\", y además la superficie de la casa es al menos de 150 metros, el precio de la casa es de 1500\n3. Si el distrito no es \"Retiro\" y la superficie está entre 60 y 80 metros, el precio es de 600\n4. En cualquier otro caso, el precio será de 0",
"_____no_output_____"
]
],
[
[
"distrito = \"Salamanca\"\nsuperficie = 150\n\nprecio = 0\n\nif (distrito == \"Moncloa\" or distrito == \"Centro\") and (superficie > 100):\n precio = 1000\nelif distrito == 'Salamanca' and superficie >= 150:\n variable2 = 10\nelif distrito != \"Retiro\" and (superficie >= 60 and superficie <= 80):\n precio = 600\nelse:\n precio = 0\n \nprint(\"El precio del piso es: \", precio)",
"_____no_output_____"
]
],
[
[
"## Ejercicio 3\nEn este ejercicio vamos a realizar un programa muy parecido. En este caso queremos que se cumplan las siguientes condiciones:\n\n1. Primero se compruebe si el distrito es \"Retiro\". Si es asi, que imprima \"Distrito Retiro\", y si no, \"Otro distrito\"\n2. Una vez se haya comprobado el distrito, si la superficie es mayor de 100 metros cuadrados, que imprima un precio de 1000, y si no, de 500.\n\nHay que usar `ifs` anidados",
"_____no_output_____"
],
[
"## Ejercicio 4\nEscribe un programa que tenga dos variables: un numero, y una lista numérica. El programa debe recorrer la lista e imprimir por pantalla cada elemento de la lista multiplicado por el número",
"_____no_output_____"
],
[
"## Ejercicio 5\nImprime por pantalla los números -10 al -1. En ese orden. Consulta [la documentación](https://www.w3schools.com/python/ref_func_range.asp) de `range`",
"_____no_output_____"
],
[
"## Ejercicio 6\nDada la siguiente lista:\n\nlist1 = [12, 15, 32, 42, 55, 75, 122, 132, 150, 180, 200]\n\nImplementa un programa que los recorra e imprima por pantalla todos los divisibles por 5. Si nos encontramos con alguno que sea mayor que 150, detener el bucle.",
"_____no_output_____"
]
],
[
[
"\n",
"_____no_output_____"
]
],
[
[
"## Ejercicio 7\nEscribe un programa de Python que imprima por pantalla todos los números divisibles por 5 y divisibles por 7, dentro del rango de valores (150, 350)",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
],
[
[
"## Ejercicio 8\nImplementa un programa que imprima por pantalla el siguiente patrón\n\n5 4 3 2 1 \n\n4 3 2 1 \n\n3 2 1 \n\n2 1 \n\n1\n\nNOTA: NO hay lineas en blanco entre una línea y otra.",
"_____no_output_____"
],
[
"## Ejercicio 9\nEn este ejercicio vamos a crear un pequeño juego. Se trata de intentar adivinar un numero del 1 al 5. Tenemos dos intentos para acertar. Pasos a seguir:\n\n1. Ya viene implementado cómo obtener un número aleatorio del 1 al 5\n2. Tendrás que declarar en una variable el numero de vidas, y mediante un bucle while, comprobar que todavia quedan vidas.\n3. Dentro del bucle, obtener el valor del usuario y comprobar si es ese el numero. Si no, actualizar las vidas",
"_____no_output_____"
]
],
[
[
"from random import randint\n\n# Genera un numero aleatorio del 1 al 5\nrandom_number = randint(1, 5)\n",
"_____no_output_____"
]
],
[
[
"## Ejercicios extra",
"_____no_output_____"
],
[
"Más ejercicios en [este link](https://www.w3resource.com/python-exercises/python-conditional-statements-and-loop-exercises.php)\n\nY [aquí](https://erlerobotics.gitbooks.io/erle-robotics-learning-python-gitbook-free/loops/exercises_loops.html)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec69d37f898776b9617543336a68a93535bc47bd | 54,122 | ipynb | Jupyter Notebook | notebooks/1.5-am-LSA-training.ipynb | goelshivani321/Implementing-Bisecting-K-means-Algorithm- | 5858b5d1ae67e1dc6be43694200833f9a093a3bf | [
"MIT"
]
| 2 | 2019-01-23T20:07:37.000Z | 2020-12-17T15:47:23.000Z | notebooks/1.5-am-LSA-training.ipynb | goelshivani321/Implementing-Bisecting-K-means-Algorithm- | 5858b5d1ae67e1dc6be43694200833f9a093a3bf | [
"MIT"
]
| null | null | null | notebooks/1.5-am-LSA-training.ipynb | goelshivani321/Implementing-Bisecting-K-means-Algorithm- | 5858b5d1ae67e1dc6be43694200833f9a093a3bf | [
"MIT"
]
| 3 | 2020-03-22T23:00:55.000Z | 2021-12-31T01:19:58.000Z | 27.543003 | 101 | 0.388992 | [
[
[
"# hack to import local classes\nimport sys\nsys.path.append('..')\n\n%load_ext autoreload\n%autoreload 2\n\nfrom src.models import train_model\nfrom src.data import read_transform\nfrom sklearn.metrics import calinski_harabaz_score\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import csr_matrix, hstack, save_npz, load_npz\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set(style=\"whitegrid\")",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"#Read CSR matrix from the input file\ncsrMatrix = read_transform.csr_read('../data/raw/train.dat')\n\n#Scale the CSR matrix by idf (Inverse Document Frequency)\ncsrIDF = read_transform.csr_idf(csrMatrix, copy=True)\n\n#Normalize the rows of a CSR matrix by their L-2 norm.\ncsrL2Normalized = read_transform.csr_l2normalize(csrIDF, copy=True)\n\n#Obtain a dense ndarray representation of the CSR matrix.\ndenseMatrix = csrL2Normalized.toarray()",
"_____no_output_____"
],
[
"csrL2Normalized.shape",
"_____no_output_____"
],
[
"pd.DataFrame(denseMatrix).head()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 2, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(denseMatrix, labels)",
"_____no_output_____"
]
],
[
[
"# LSA - Latent Semantic Analysis",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import TruncatedSVD",
"_____no_output_____"
],
[
"svd = TruncatedSVD(n_components=5000, n_iter=10, random_state=10, algorithm='arpack')",
"_____no_output_____"
],
[
"csrL2Normalized_svd = svd.fit_transform(csrL2Normalized)",
"_____no_output_____"
],
[
"svd.explained_variance_ratio_.sum() * 100",
"_____no_output_____"
],
[
"pd.DataFrame(csrL2Normalized_svd).head()",
"_____no_output_____"
],
[
"csrL2Normalized_svd = csr_matrix(csrL2Normalized_svd)",
"_____no_output_____"
],
[
"csrL2Normalized_svd",
"_____no_output_____"
],
[
"csrL2Normalized_svd = read_transform.csr_l2normalize(csrL2Normalized_svd, copy=True)",
"_____no_output_____"
],
[
"pd.DataFrame(csrL2Normalized_svd.toarray()).head()",
"_____no_output_____"
],
[
"save_npz('../data/interim/csr_svd_normalized_5000-arpack.npz', csrL2Normalized_svd)",
"_____no_output_____"
]
],
[
[
"# Bisect on SVD",
"_____no_output_____"
]
],
[
[
"denseMatrix = csrL2Normalized_svd.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 2, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(denseMatrix, labels)",
"_____no_output_____"
]
],
[
[
"# Submission",
"_____no_output_____"
]
],
[
[
"denseMatrix = csrL2Normalized_svd.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 7, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(csrL2Normalized.toarray(), labels)",
"_____no_output_____"
],
[
"read_transform.write_predictions(labels, '../models/predictions/1.5-am-lsa-arpack.dat')",
"_____no_output_____"
],
[
"denseMatrix = csrL2Normalized.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 7, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(csrL2Normalized.toarray(), labels)",
"_____no_output_____"
]
],
[
[
"# Non Negative Matrix Factorization",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import NMF",
"_____no_output_____"
],
[
"nmf = NMF(n_components=550, random_state=10, alpha=.1, l1_ratio=.5, verbose=True)",
"_____no_output_____"
],
[
"csrL2Normalized_nmf = nmf.fit_transform(csrL2Normalized)",
"violation: 1.0\nviolation: 0.007381502004166063\nviolation: 0.006559960891150529\nviolation: 0.003928112329562171\nviolation: 0.0025834742644676506\nviolation: 0.0018327749861842082\nviolation: 0.0013509157618990767\nviolation: 0.0009910607203590502\nviolation: 0.0007753259203580676\nviolation: 0.0006189964680479151\nviolation: 0.0004975888464482741\nviolation: 0.000409449091168145\nviolation: 0.00034712471504103856\nviolation: 0.00030310845303377766\nviolation: 0.00026775325872107735\nviolation: 0.0002418054809956908\nviolation: 0.0002250083152105965\nviolation: 0.000214720340370855\nviolation: 0.00020683478220220988\nviolation: 0.00019641318267255475\nviolation: 0.0001820108464820063\nviolation: 0.0001697237926184946\nviolation: 0.0001603272899390658\nviolation: 0.00015421006783794964\nviolation: 0.00014886539511119634\nviolation: 0.00014370965060167047\nviolation: 0.0001383410885760771\nviolation: 0.00013342096964458818\nviolation: 0.00012913063888743645\nviolation: 0.0001255220079701261\nviolation: 0.00012239948461763452\nviolation: 0.00012005610002216494\nviolation: 0.00011798412795233577\nviolation: 0.00011536616062857128\nviolation: 0.00011105816385450739\nviolation: 0.00010477508742653625\nviolation: 9.731859044599501e-05\nConverged at iteration 37\n"
],
[
"pd.DataFrame(csrL2Normalized_nmf).head()",
"_____no_output_____"
],
[
"csrL2Normalized_nmf = csr_matrix(csrL2Normalized_nmf)",
"_____no_output_____"
],
[
"csrL2Normalized_nmf",
"_____no_output_____"
],
[
"csrL2Normalized_nmf = read_transform.csr_l2normalize(csrL2Normalized_nmf, copy=True)",
"_____no_output_____"
],
[
"pd.DataFrame(csrL2Normalized_nmf.toarray()).head()",
"_____no_output_____"
],
[
"save_npz('../data/interim/csr_nmf_normalized_550.npz', csrL2Normalized_nmf)",
"_____no_output_____"
]
],
[
[
"# Bisect on NMF 500",
"_____no_output_____"
]
],
[
[
"csrL2Normalized_nmf = load_npz('../data/interim/csr_nmf_normalized_500.npz')",
"_____no_output_____"
],
[
"csrL2Normalized_nmf",
"_____no_output_____"
],
[
"denseMatrix = csrL2Normalized_nmf.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 7, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(csrL2Normalized.toarray(), labels)",
"_____no_output_____"
],
[
"read_transform.write_predictions(labels, '../models/predictions/1.5-am-lsa-nmf-500-euclid.dat')",
"_____no_output_____"
]
],
[
[
"# Bisect on NMF 550",
"_____no_output_____"
]
],
[
[
"csrL2Normalized_nmf = load_npz('../data/interim/csr_nmf_normalized_550.npz')",
"_____no_output_____"
],
[
"csrL2Normalized_nmf",
"_____no_output_____"
],
[
"denseMatrix = csrL2Normalized_nmf.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 7, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(csrL2Normalized.toarray(), labels)",
"_____no_output_____"
],
[
"read_transform.write_predictions(labels, '../models/predictions/1.5-am-lsa-nmf-550.dat')",
"_____no_output_____"
]
],
[
[
"# Bisect on NMF 1000",
"_____no_output_____"
]
],
[
[
"csrL2Normalized_nmf = load_npz('../data/interim/csr_nmf_normalized_1000.npz')",
"_____no_output_____"
],
[
"csrL2Normalized_nmf",
"_____no_output_____"
],
[
"denseMatrix = csrL2Normalized_nmf.toarray()",
"_____no_output_____"
],
[
"labels = train_model.bisecting_kmeans(denseMatrix, 7, 10)",
"_____no_output_____"
],
[
"pd.DataFrame(labels)[0].value_counts()",
"_____no_output_____"
],
[
"calinski_harabaz_score(csrL2Normalized.toarray(), labels)",
"_____no_output_____"
],
[
"read_transform.write_predictions(labels, '../models/predictions/1.5-am-lsa-nmf-1000.dat')",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec69edef52b81cfe7fcd1cd6f723656c028831f6 | 113,345 | ipynb | Jupyter Notebook | module1-define-ml-problems/ANDRONIK_MKRTYCHEV_LS_DS_231_assignment.ipynb | andronikmk/DS-Unit-2-Applied-Modeling | 8b497238fdc71d4fc10ce7a6d6fd98e1e17485d2 | [
"MIT"
]
| null | null | null | module1-define-ml-problems/ANDRONIK_MKRTYCHEV_LS_DS_231_assignment.ipynb | andronikmk/DS-Unit-2-Applied-Modeling | 8b497238fdc71d4fc10ce7a6d6fd98e1e17485d2 | [
"MIT"
]
| null | null | null | module1-define-ml-problems/ANDRONIK_MKRTYCHEV_LS_DS_231_assignment.ipynb | andronikmk/DS-Unit-2-Applied-Modeling | 8b497238fdc71d4fc10ce7a6d6fd98e1e17485d2 | [
"MIT"
]
| null | null | null | 52.66961 | 17,362 | 0.456659 | [
[
[
"<a href=\"https://colab.research.google.com/github/andronikmk/DS-Unit-2-Applied-Modeling/blob/master/module1-define-ml-problems/ANDRONIK_MKRTYCHEV_LS_DS_231_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Lambda School Data Science\n\n*Unit 2, Sprint 3, Module 1*\n\n---\n\n\n# Define ML problems\n\nYou will use your portfolio project dataset for all assignments this sprint.\n\n## Assignment\n\nComplete these tasks for your project, and document your decisions.\n\n- [x] Choose your target. Which column in your tabular dataset will you predict?\n- [x] Is your problem regression or classification?\n- [x] How is your target distributed?\n - Classification: How many classes? Are the classes imbalanced?\n - Regression: Is the target right-skewed? If so, you may want to log transform the target.\n- [] Choose your evaluation metric(s).\n - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy?\n - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics?\n- [ ] Choose which observations you will use to train, validate, and test your model.\n - Are some observations outliers? Will you exclude them?\n - Will you do a random split or a time-based split?\n- [ ] Begin to clean and explore your data.\n- [ ] Begin to choose which features, if any, to exclude. Would some features \"leak\" future information?\n\nIf you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset.\n\nSome students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393)",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px",
"_____no_output_____"
],
[
"# import df\ndf = pd.read_csv('/content/data_uk_v3.csv')\ndf.head()",
"_____no_output_____"
],
[
"# choose target\ndf.describe()",
"_____no_output_____"
],
[
"# describe target\ndf['Real_consumption_earnings_Growth_GB'].describe()",
"_____no_output_____"
],
[
"# number of missing values\ndf.isna().sum()",
"_____no_output_____"
],
[
"# number of missing values in Real_consumption_earnings_Growth_GB\ndf['Real_consumption_earnings_Growth_GB'].isnull().sum()",
"_____no_output_____"
],
[
"# how is target distributed\ndf['Real_consumption_earnings_Growth_GB'].describe()",
"_____no_output_____"
],
[
"# number of unique values\ny = df['Real_consumption_earnings_Growth_GB']\ny.nunique()",
"_____no_output_____"
],
[
"# shape\ndf.shape",
"_____no_output_____"
],
[
"# sort missing values\ndf.isnull().sum().value_counts()",
"_____no_output_____"
],
[
"# Regression: Is the target right-skewed? No, not right or left skewed\npx.histogram(df, x='Real_consumption_earnings_Growth_GB',title='Histogram for Wage Growth')",
"_____no_output_____"
],
[
"# time-based split\ndf['Year'].describe()",
"_____no_output_____"
],
[
"df['Year'].describe()",
"_____no_output_____"
],
[
"# split train, val and test\ntrain = df[df['Year'] <= 1600]\nval = df[df['Year'] <= 1650]\ntest = df[df['Year'] >= 1650]",
"_____no_output_____"
],
[
"# shape train, val and test\ntrain.shape, val.shape, test.shape",
"_____no_output_____"
]
],
[
[
"### First model --> shallow decision tree",
"_____no_output_____"
]
],
[
[
"# cat_encode install via pip\n!pip install category_encoders",
"Requirement already satisfied: category_encoders in /usr/local/lib/python3.6/dist-packages (2.1.0)\nRequirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.25.3)\nRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.17.5)\nRequirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.22.1)\nRequirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.5.1)\nRequirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.4.1)\nRequirement already satisfied: statsmodels>=0.6.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.10.2)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2.6.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.20.0->category_encoders) (0.14.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.4.1->category_encoders) (1.12.0)\n"
],
[
"# import libraries\nimport category_encoders as ce\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.tree import DecisionTreeRegressor",
"_____no_output_____"
],
[
"# feature selection\ntarget = 'Real_consumption_earnings_Growth_GB'\nfeatures = train.columns.drop([target, 'Year'])\nX_train = train[features]\ny_train = train[target]\nX_val = val[features]\ny_val = val[target]",
"_____no_output_____"
],
[
"# pipeline\npipeline = make_pipeline(\n SimpleImputer(strategy='median'),\n DecisionTreeRegressor(random_state=42)\n)",
"_____no_output_____"
],
[
"# shape\nX_train.shape, y_train.shape",
"_____no_output_____"
],
[
"# fit model\npipeline.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# validation accuracy, looks like no leakage\nprint('Validation Accuracy', pipeline.score(X_val, y_val))",
"Validation Accuracy 0.9997952206058953\n"
],
[
"# Bonus data exploration with sns\n# NO right skew!\nimport seaborn as sns\nsns.distplot(y);",
"_____no_output_____"
],
[
"# Example code for log-transform. doesnt apply because of (-) values\n# y_log = np.log1p(y)\n# sns.distplot(y_log)\n# y_untransformed = np.expm1(y_log)\n# sns.distplot(y_untransformed)\n# plt.title('Back to the original units');",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec69fc4c94fbc7fb86df47102fee1e94308e8f3c | 18,753 | ipynb | Jupyter Notebook | evaluation/Baseline_kth_football2.ipynb | jutanke/mvpose | 33eb107490e17d51301d6b22e4f5b93ab6a11cf3 | [
"MIT"
]
| 4 | 2020-03-06T15:05:09.000Z | 2020-04-14T17:57:33.000Z | evaluation/Baseline_kth_football2.ipynb | jutanke/mvpose | 33eb107490e17d51301d6b22e4f5b93ab6a11cf3 | [
"MIT"
]
| null | null | null | evaluation/Baseline_kth_football2.ipynb | jutanke/mvpose | 33eb107490e17d51301d6b22e4f5b93ab6a11cf3 | [
"MIT"
]
| null | null | null | 28.413636 | 243 | 0.475924 | [
[
[
"# import os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n%matplotlib inline\n\nimport json\nSettings = json.load(open('../settings.txt'))\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os.path import join\nfrom cselect import color as cs\nimport sys\nsys.path.insert(0,'../')\nsys.path.insert(0,'../samples')\nsys.path.insert(0,'../debugging')\nfrom mvpose.data import shelf\nfrom time import time\n\ndata_root = Settings['data_root']\ntmp = Settings['tmp']\n\nimport mvpose.data.kth_football2 as kth\nfrom mvpose import pose\nfrom mvpose.settings import get_settings\nfrom paf_loader import Loader\nfrom mvpose.evaluation import pcp\nfrom mvpose.plot.limbs import draw_mscoco_human, draw_mscoco_human2d\nfrom mvpose.baseline.baseline import estimate\nfrom openpose import OpenPose\nfrom mvpose.data.openpose import OpenPoseKeypoints, MultiOpenPoseKeypoints\n\nseq1_zipname = 'player2sequence1.zip'\nseq1_dir = 'Sequence 1'\n\npeak_threshold = 0.08\npe = OpenPose(tmp=tmp, peak_threshold=peak_threshold)\n\n# ====================================================\n# Extractor Functions\n# ====================================================\ndef extract_best(pos3d, Humans, alpha):\n \"\"\"\n Extracts the best fitting human for the calculation\n \"\"\"\n larms = 0\n uarms = 0\n ulegs = 0\n llegs = 0\n all_parts = 0\n \n for h in Humans:\n r = pcp.evaluate(pos3d, Humans[0], alpha)\n if all_parts < r.all_parts:\n larms = r.lower_arms\n uarms = r.upper_arms\n ulegs = r.upper_legs\n llegs = r.lower_legs\n \n return {\n \"larms\": larms,\n \"uarms\": uarms,\n \"ulegs\": ulegs,\n \"llegs\": llegs\n }",
"Using TensorFlow backend.\n"
],
[
"from mvpose.baseline.tracking import tracking\nfrom time import time\n\nCalib = []\nposes_per_frame = []\nPos3d = []\n\n_start = time()\n#for frame in range(0, 214):\nend_frame = 214\nfor frame in range(0, end_frame):\n Im, calib, pos2d, pos3d = kth.get(\n data_root, seq1_zipname, seq1_dir, frame, player=2)\n Calib.append(calib)\n Pos3d.append(pos3d)\n \n txt_add = str(peak_threshold)\n if 0.099 < peak_threshold < 0.101:\n txt_add = ''\n \n name = 'cvpr_kth_' + seq1_zipname + txt_add\n predictions = pe.predict(Im, name, frame)\n poses_per_frame.append(predictions)\n_end = time()\nprint('elapsed', _end - _start)",
"elapsed 22.340583324432373\n"
],
[
"len(poses_per_frame)",
"_____no_output_____"
],
[
"_start = time()\ntracks = tracking(Calib, poses_per_frame,\n epi_threshold=110,\n scale_to_mm=1000,\n max_distance_between_tracks=200,\n distance_threshold=200,\n correct_limb_size=False,\n merge_distance=200)\n_end = time()\nprint('tracking: elapsed', _end - _start)\n\ntrack = tracks[0]\n\nfrom mvpose.baseline.tracking import Track\n# track = Track.smoothing(track, sigma=2.3, interpolation_range=5)\n# track = Track.smoothing(track, sigma=6, interpolation_range=5)\n\ntrack = Track.smoothing(track, sigma=2, interpolation_range=5)\n\n# for jid in range(18):\n# A = track.poses[0]\n# B = track_new.poses[0]\n# if A[jid] is not None and B[jid] is not None:\n# print('jid', jid)\n# print(\"\\t\", np.round(A[jid], 4))\n# print('\\t', np.round(B[jid], 4))\n \nalpha = 0.5\n\nlarms = []\nuarms = []\nllegs = []\nulegs = []\n\n_start = time()\nfor frame in range(0, end_frame):\n \n pos3d = Pos3d[frame]\n pose = track.get_by_frame(frame)\n Humans = []\n if pose is not None:\n Humans.append(pose)\n \n Humans = kth.transform3d_from_mscoco(Humans)\n result = extract_best(pos3d, Humans, alpha)\n \n larms.append(result['larms'])\n uarms.append(result['uarms'])\n llegs.append(result['llegs'])\n ulegs.append(result['ulegs'])\n\n_end = time()\nprint('elapsed', _end - _start)",
"_____no_output_____"
],
[
"# 0.03 --> 0.9024532710280373\n# 0.05 --> 0.9024532710280373\n# 0.08 --> 0.9036214953271028",
"_____no_output_____"
],
[
"print(\"upper arms:\\t\", np.mean(uarms))\nprint(\"lower arms:\\t\", np.mean(larms))\nprint(\"upper legs:\\t\", np.mean(ulegs))\nprint(\"lower legs:\\t\", np.mean(llegs))\nprint('avg:\\t\\t', np.mean([\n np.mean(uarms), np.mean(larms), np.mean(ulegs), np.mean(llegs)\n]))",
"_____no_output_____"
],
[
"# Smoothing, hm = 0.1\n# upper arms:\t 0.9906542056074766\n# lower arms:\t 0.9649532710280374\n# upper legs:\t 0.9649532710280374\n# lower legs:\t 0.8995327102803738\n# avg:\t\t 0.9550233644859814\n\n# upper arms:\t 0.9906542056074766\n# lower arms:\t 0.9672897196261683\n# upper legs:\t 0.9649532710280374\n# lower legs:\t 0.8995327102803738\n# avg:\t\t 0.955607476635514",
"_____no_output_____"
],
[
"plt.plot(range(len(larms)), larms)\n\n# print(np.argmin(llegs))\nfor frame, value in enumerate(larms):\n if value == 0:\n print(frame)",
"_____no_output_____"
],
[
"from mvpose.baseline.hypothesis import get_believe\nFRAME = 5\n# FRAME = 0\n\nIm, calib, pos2d, pos3d = kth.get(data_root, seq1_zipname, seq1_dir, FRAME, player=2)\n\npos3d = Pos3d[FRAME]\n\npredictions = poses_per_frame[FRAME]\nHumans = predictions\n\nfig = plt.figure(figsize=(16,6))\n\ncolors = ['blue', 'red', 'green']\n\nfig = plt.figure(figsize=(16, 8))\nfor idx, (im, pred) in enumerate(zip(Im, predictions)):\n ax = fig.add_subplot(1, 3, idx+1); ax.axis('off')\n ax.imshow(im, alpha=0.6)\n \n for human in pred:\n draw_mscoco_human2d(ax, human[:, 0:2], color='red', lcolor='blue', linewidth=3)\n\nfor prs in predictions:\n for p in prs:\n print(get_believe(np.squeeze(p)))\n \nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(16,12))\n\nprint(\"FRAME\", FRAME)\nH = track.get_by_frame(FRAME)\nif len(H) == 18:\n H = [H]\n \nHumans = kth.transform3d_from_mscoco(H)\n\nprint(extract_best(pos3d, Humans, alpha=0.5))\n\ncolors = ['blue', 'red', 'green', 'teal']\n\nfor cid, cam in enumerate(calib):\n ax = fig.add_subplot(1, 3, 1+cid)\n ax.axis('off')\n im = Im[cid]\n h,w,_ = im.shape\n ax.set_xlim([0, w])\n ax.set_ylim([h, 0])\n ax.imshow(im, alpha=0.6)\n \n for pid, hyp in enumerate(H):\n draw_mscoco_human(ax, hyp, cam, alpha=0.5,\n color='blue', lcolor='red', linewidth=3)\n \nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"assert False",
"_____no_output_____"
],
[
" # R_ANKLE 0\n # R_KNEE 1\n # R_HIP 2\n # L_HIP 3\n # L_KNEE 4\n # L_ANKLE 5\n # R_WRIST 6\n # R_ELBOW 7\n # R_SHOULDER 8\n # L_SHOULDER 9\n # L_ELBOW 10\n # L_WRIST 11\n # BOTTOM_HEAD 12\n # TOP_HEAD 13\n \n\nfrom mpl_toolkits.mplot3d import Axes3D\n\njid = 4 # 7 = hand left, 4 = hand right\n\nHL = []\n\nfor t in range(0, 214):\n pose = track.get_by_frame(t)\n if pose is None or pose[jid] is None:\n HL.append(None)\n else:\n HL.append(pose[jid])\n \nio = [0 if e is None else 1 for e in HL]\n\nxy = np.array([(e[0], e[1], e[2]) for e in HL if e is not None])\n\numin = np.min(xy)\numax = np.max(xy)\n\nfig = plt.figure(figsize=(12, 12))\nax = fig.add_subplot(111, projection='3d')\nax.set_xlim([umin, umax])\nax.set_ylim([umin, umax])\nax.set_zlim([umin, umax])\n\n\n# xy = xy[100:130]\n\nax.plot(xy[:, 0], xy[:, 1], xy[:, 2])\n\nplt.show()",
"_____no_output_____"
],
[
"p_gt = []\nt_gt = []\np = []\nt = []\n\njid = 10 # right hand\ngt_jid = 0 # right foot\n\nfor frame in range(0, 214):\n \n gt = Pos3d[frame]\n p_gt.append(gt[gt_jid])\n t_gt.append(frame)\n \n pose = track.get_by_frame(frame)\n if pose is None or pose[jid] is None:\n continue\n p.append(pose[jid])\n t.append(frame)\n \np_gt = np.array(p_gt)\np = np.array(p)\n\nfig = plt.figure(figsize=(10, 4))\nax = fig.add_subplot(111)\n\n\nprint(p.shape)\n\ndim = 2\n\nax.plot(t_gt, p_gt[:, dim])\nax.plot(t, p[:, dim])\n\nplt.show()",
"_____no_output_____"
],
[
"# fill the gaps with interpolations\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing\nimport numpy.linalg as la\n\nt = []\np = []\nfor frame in range(0, 214):\n pose = track.get_by_frame(frame)\n if pose is None or pose[jid] is None:\n pts = []\n start_frame = max(0, frame - 5)\n end_frame = min(214, frame + 5)\n for _frame in range(start_frame, end_frame):\n _pose = track.get_by_frame(_frame)\n if _pose is None or _pose[jid] is None:\n continue\n pts.append(_pose[jid])\n assert len(pts) > 0\n pt = np.mean(pts, axis=0)\n else:\n pt = pose[jid]\n \n p.append(pt)\n t.append(frame)\np = np.array(p)\n\n# gaussian blur\nsigma = 1.8\n#data = np.expand_dims(p[:, dim])\ndata = p[:, dim]\n\np_gaussian = gaussian_filter1d(data, sigma, mode='reflect')\n\n\n# ES\nes = SimpleExpSmoothing(data).fit(smoothing_level=0.9)\np_es = es.fittedvalues\n\n\nfig = plt.figure(figsize=(10, 4))\nax = fig.add_subplot(111)\n\nax.plot(t_gt, p_gt[:, dim], label='gt')\nax.plot(t, p[:, dim], label='original signal')\nax.plot(t, p_gaussian, label='gaussian')\n# ax.plot(t, p_es, label='exp smoothing')\n\ndef l2(gt, data):\n return la.norm(gt - data)\n\nprint('original\\t', l2(p_gt[:, dim], p[:, dim]))\nprint('gaussian\\t', l2(p_gt[:, dim], p_gaussian))\n# print('expsmooth\\t', l2(p_gt[:, dim], p_es))\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"def smoothing(track, sigma, interpolation_range=4):\n first_frame = track.frames[0]\n last_frame = track.last_seen()\n\n XYZ = []\n for frame in range(first_frame, last_frame):\n pose = track.get_by_frame(frame)\n if pose is None or pose[jid] is None:\n pts = []\n start_frame = max(first_frame, frame - 5)\n end_frame = min(last_frame, frame + 5)\n for _frame in range(start_frame, end_frame):\n _pose = track.get_by_frame(_frame)\n if _pose is None or _pose[jid] is None:\n continue\n pts.append(_pose[jid])\n assert len(pts) > 0\n pt = np.mean(pts, axis=0)\n else:\n pt = pose[jid]\n XYZ.append(pt)\n \n XYZ = np.array(XYZ) * 1000\n XYZ_sm = np.empty_like(XYZ)\n for dim in range(0, 3):\n D = XYZ[:, dim]\n D = gaussian_filter1d(D, sigma, mode='reflect')\n XYZ_sm[:, dim] = D\n return XYZ_sm, XYZ\n \nS = []\nA = []\nM = []\nO = []\n\nfor sigma in np.linspace(1, 2, 10):\n Pred, Orig = smoothing(track, sigma)\n Gt = p_gt[0:213, ] * 1000\n \n l2 = la.norm(Pred - Gt, axis=1)\n l2_o = la.norm(Orig - Gt, axis=1)\n# print('sigma ' + str(sigma) + '\\t')\n# print('\\tmean ', np.mean(l2))\n# print('\\tmax ', np.max(l2))\n# print('\\torig:', np.mean(l2_o))\n \n S.append(sigma)\n A.append(np.mean(l2))\n M.append(np.max(l2))\n O.append(np.mean(l2_o))\n\nfig = plt.figure(figsize=(12, 5))\nax = fig.add_subplot(111)\n\n\nax.plot(S, A, label='mean')\nax.plot(S, M, label='max')\nax.plot(S, O, label='orignal')\n\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"X = Orig[:, 0]\nX_ = Pred[:, 0]\nX__ = Gt[:, 0]\n\nprint(\"g\", Gt.shape)\n\nt = range(len(X))\n\nfig = plt.figure(figsize=(10, 4))\nax = fig.add_subplot(111)\n\nax.plot(t, X, label='original')\nax.plot(t, X_, label='prediction')\nax.plot(t, X__, label='gt')\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"def check_dim(Orig, Gt, dim, sigma):\n D = Orig[:, dim]\n G = Gt[:, dim]\n F = gaussian_filter1d(D, sigma, mode='reflect')\n return np.mean((F - G)**2)\n \n \nSigma = np.linspace(0.5, 5, 20)\nX = []\nY = []\nZ = []\n\nfor s in Sigma:\n X.append(check_dim(Orig, Gt, 0, s))\n Y.append(check_dim(Orig, Gt, 1, s))\n Z.append(check_dim(Orig, Gt, 2, s))\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\n\nax.plot(Sigma, X, label='X')\nax.plot(Sigma, Y, label='Y')\nax.plot(Sigma, Z, label='Z')\n\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6a2079c6aea4b3cc441c10034c1b872ee9d9d8 | 5,131 | ipynb | Jupyter Notebook | CUYLER_FIELHAUER_Sprint1_03_Variables_and_Numerical_Types_Assignment.ipynb | cuylerf/2021_22-Ratza-Intro-CS-Sem-2 | 345e1a10e0b3e4785303e2ab40529f4709744816 | [
"BSD-3-Clause"
]
| null | null | null | CUYLER_FIELHAUER_Sprint1_03_Variables_and_Numerical_Types_Assignment.ipynb | cuylerf/2021_22-Ratza-Intro-CS-Sem-2 | 345e1a10e0b3e4785303e2ab40529f4709744816 | [
"BSD-3-Clause"
]
| null | null | null | CUYLER_FIELHAUER_Sprint1_03_Variables_and_Numerical_Types_Assignment.ipynb | cuylerf/2021_22-Ratza-Intro-CS-Sem-2 | 345e1a10e0b3e4785303e2ab40529f4709744816 | [
"BSD-3-Clause"
]
| null | null | null | 24.089202 | 301 | 0.453518 | [
[
[
"<a href=\"https://colab.research.google.com/github/cuylerf/2021_22-Ratza-Intro-CS-Sem-2/blob/main/CUYLER_FIELHAUER_Sprint1_03_Variables_and_Numerical_Types_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Key Concepts",
"_____no_output_____"
],
[
"### What does it mean for a variable to be mutable or immutable?\na mutable variable can be changed after its created but imutable cant\n\n\n### What is the name of the enhancement proposal that outlines Python code style?\npep-8\n\n\n### What is the difference between static and dynamic variables?\nstatic is non changing variables but dynamic can change\n\n\n### What are reserved keywords?\nthey are keywords that cant be used in names like variables\n\n\n### What are the rules for variable naming in Python?\nyou can name it anything that isnt a key word and you have to put underscores as spaces\n\n\n### What are the two main numerical types in Python?\nstrings and integers\n\n\n### What are the two components of a complex number?\nthe number and the letter\n\n\n",
"_____no_output_____"
],
[
"# Code",
"_____no_output_____"
],
[
"Assign the variables `var_1`, `var_2`, and `var_3` to the value `\"potato\"`",
"_____no_output_____"
]
],
[
[
"var_1=potato\nvar_2=var_1\nvar_3=var_2",
"_____no_output_____"
]
],
[
[
"Convert an integer to a float and back to an integer",
"_____no_output_____"
]
],
[
[
"ten=10 \n\nprint(float(ten))\nint(ten)\nprint(ten)",
"10.0\n10\n"
],
[
"",
"_____no_output_____"
]
],
[
[
"Evaluate `42 = 6j + 6`",
"_____no_output_____"
]
],
[
[
"j=(42-6)/6\nprint(j)",
"6.0\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
ec6a31ff61aaf39d7070790cb18970a1bc8a7445 | 2,477 | ipynb | Jupyter Notebook | notebooks/book1/11/ridgePathProstate.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
]
| null | null | null | notebooks/book1/11/ridgePathProstate.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
]
| 1 | 2022-03-27T04:59:50.000Z | 2022-03-27T04:59:50.000Z | notebooks/book1/11/ridgePathProstate.ipynb | patel-zeel/pyprobml | 027ef3c13a2a63d958e05fdedb68fd7b8f0e0261 | [
"MIT"
]
| 2 | 2022-03-26T11:52:36.000Z | 2022-03-27T05:17:48.000Z | 31.35443 | 114 | 0.578926 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
ec6a3a269034badbc2638b461aa8d82e935d7db1 | 259,651 | ipynb | Jupyter Notebook | notebooks/DCEGM-Upper-Envelope.ipynb | sbenthall/DemARK | ef1c010091d28c7dea2e5d4fa0f746e67c6b23f4 | [
"Apache-2.0"
]
| null | null | null | notebooks/DCEGM-Upper-Envelope.ipynb | sbenthall/DemARK | ef1c010091d28c7dea2e5d4fa0f746e67c6b23f4 | [
"Apache-2.0"
]
| null | null | null | notebooks/DCEGM-Upper-Envelope.ipynb | sbenthall/DemARK | ef1c010091d28c7dea2e5d4fa0f746e67c6b23f4 | [
"Apache-2.0"
]
| null | null | null | 259.132735 | 21,916 | 0.917177 | [
[
[
"# DCEGM Upper Envelope\n## [\"The endogenous grid method for discrete-continuous dynamic choice models with (or without) taste shocks\"](https://onlinelibrary.wiley.com/doi/abs/10.3982/QE643)\n\n<p style=\"text-align: center;\"><small><small><small>For the following badges: GitHub does not allow click-through redirects; right-click to get the link, then paste into navigation bar</small></small></small></p>\n\n[](https://mybinder.org/v2/gh/econ-ark/DemARK/master?filepath=notebooks%2FDCEGM-Upper-Envelope.ipynb)\n\n[](https://colab.research.google.com/github/econ-ark/DemARK/blob/master/notebooks/DCEGM-Upper-Envelope.ipynb)\n\n\n\nThis notebook provides a simple introduction to the upper envelope calculation in the \"DCEGM\" algorithm <cite data-cite=\"6202365/4F64GG8F\"></cite>. It takes the EGM method proposed in <cite data-cite=\"6202365/HQ6H9JEI\"></cite>, and extends it to the mixed choice (discrete and continuous) case. It handles various constraints. It works on a 1-dimensional problems.\n\nThe main challenge in the types of models considered in DCEGM is, that the first order conditions to the Bellman equations are no longer sufficient to find an optimum. Though, they are still necessary in a broad class of models. This means that our EGM step will give us (resource, consumption) pairs that do fulfill the FOCs, but that are sub-optimal (there's another consumption choices for the same initial resources that gives a higher value).\n\nTake a consumption model formulated as:\n$$\n\\max_{\\{c_t\\}^T_{t=1}} \\sum^T_{t=1}\\beta^t\\cdot u(c_t)\n$$\ngiven some initial condition on $x$ and some laws of motion for the states, though explicit references to states are omitted. Then, if we're in a class of models described in EGM\n, we can show that\n$$\nc_t = {u_{c}}^{-1}[E_t(u_c(c_{t+1}))]\n$$\nuniquely determines an optimal consumption today given the expected marginal utility of consuming tomorrow. However, if there is a another choice in the choice set, and that choice is discrete, we get\n$$\n\\max_{\\{c_t, d_t\\}^T_{t=1}} \\sum^T_{t=1}\\beta^t\\cdot u(c_t, d_t)\n$$\nagain given initial conditions and the laws of motion. Then, we can show that\n$$\nc_t = {u_{c}}^{-1}[E_t(u_c(c_{t+1}))]\n$$\nwill produce solutions that are necessary but not sufficient. Note, that there is no explicit mentioning of the discrete choices in the expectation, but they obviously vary over the realized states in general. For the optimal consumption, it doesn't matter what the choice is exactly, only what expected marginal utility is tomorrow. The algorithm presented in [1] is designed to take advantage of models with this structure.\n\nTo visualize the problem, consider the following pictures that show the output of an EGM step from the model in the REMARK [linkhere].",
"_____no_output_____"
]
],
[
[
"# imports\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# here for now, should be\n# from HARK import discontools or whatever name is chosen\nfrom HARK.interpolation import LinearInterp\nfrom HARK.dcegm import calcSegments, calcMultilineEnvelope",
"_____no_output_____"
],
[
"m_common = np.linspace(0,1.0,100)\nm_egm = np.array([0.0, 0.04, 0.25, 0.15, 0.1, 0.3, 0.6,0.5, 0.35, 0.6, 0.75,0.85])\nc_egm = np.array([0.0, 0.03, 0.1, 0.07, 0.05, 0.36, 0.4, 0.6, 0.8, 0.9,0.9,0.9])\nvt_egm = np.array( [0.0, 0.05, 0.1,0.04, 0.02,0.2, 0.7, 0.5, 0.2, 0.9, 1.0, 1.2])",
"_____no_output_____"
],
[
"plt.plot(m_egm, vt_egm)\nplt.xlabel(\"resources\")\nplt.ylabel(\"transformed values\")",
"_____no_output_____"
],
[
"plt.plot(m_egm, c_egm)\nplt.xlabel(\"resources\")\nplt.ylabel(\"consumption\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"The point of DCEGM is to realize, that the segments on the `(m, vt)` curve that are decreasing, cannot be optimal. This leaves us with a set of increasing line segments, as seen below (`dcegmSegments` is the function in HARK that calculates the breaks where the curve goes from increasing to decreasing).",
"_____no_output_____"
]
],
[
[
"rise, fall = calcSegments(m_egm, vt_egm)",
"_____no_output_____"
]
],
[
[
"In `rise` we have all the starting indices for the segments that are \"good\", that is `(m, vt)` draws an increasing curve.",
"_____no_output_____"
]
],
[
[
"rise",
"_____no_output_____"
]
],
[
[
"We see that `rise` has its first index at `0`, then again at `4`, and lastly at `8`. Let's look at `fall`.",
"_____no_output_____"
]
],
[
[
"fall",
"_____no_output_____"
]
],
[
[
"We see that the last segment is increasing (as the last element of `rise` is larger than the last element of `fall`), and we see that `len(fall)` is one larger than number of problematic segments in the plot. The index of the last point in `m_egm`/`c_egm`/`vt_egm` is added for convenience when we do the upper envelope step (and is also convenient below for drawing the segments!).\n\nWe can use `fall` and `rise` to draw only the relevant segments that we will use to construct an upper envelope.",
"_____no_output_____"
]
],
[
[
"for j in range(len(fall)):\n idx = range(rise[j],fall[j]+1)\n plt.plot(m_egm[idx], vt_egm[idx])\nplt.xlabel(\"resources\")\nplt.ylabel(\"transformed values\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"Let us now use the `calcMultilineEnvelope` function to do the full DCEGM step: find segments and calculate upper envelope in one sweep.",
"_____no_output_____"
]
],
[
[
"m_upper, c_upper, v_upper = calcMultilineEnvelope(m_egm, c_egm, vt_egm, m_common)",
"_____no_output_____"
],
[
"for j in range(len(fall)):\n idx = range(rise[j],fall[j]+1)\n plt.plot(m_egm[idx], vt_egm[idx])\nplt.plot(m_upper, v_upper, 'k')\nplt.xlabel(\"resources\")\nplt.ylabel(\"transformed values\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"And there we have it! These functions are the building blocks for univariate discrete choice modeling in HARK, so hopefully this little demo helped better understand what goes on under the hood, or it was a help if you're extending some existing class with a discrete choice.",
"_____no_output_____"
],
[
"# An example: writing a will\n\nWe now present a basic example to illustrate the use of the previous tools in solving dynamic optimization problems with discrete and continuous decisions.\n\nThe model represents an agent that lives for three periods and decides how much of his resources to consume in each of them. On the second period, he must additionally decide whether to hire a lawyer to write a will. Having a will has the upside of allowing the agent to leave a bequest in his third and last period of life, which gives him utility, but has the downside that the lawyer will charge a fraction of his period 3 resources.\n\nOn each period, the agent receives a deterministic amount of resources $w$. The problem, therefore, is fully deterministic.\n\nI now present the model formally, solving it backwards.\n\nBut first, some setup and calibration:",
"_____no_output_____"
]
],
[
[
"# Import tools for linear interpolation and finding optimal\n# discrete choices.\nfrom HARK.interpolation import calcLogSumChoiceProbs\n\n# Import CRRA utility (and related) functions from HARK \nfrom HARK.utilities import CRRAutility, CRRAutilityP, CRRAutilityP_inv\n\n# Solution method parameters\naGrid = np.linspace(0,8,400) # Savings grid for EGM.\n\n# Model parameters\n\n# Parameters that need to be fixed\n# Relative risk aversion. This is fixed at 2 in order to mantain\n# the analytical solution that we use, from Carroll (2000)\nCRRA = 2 \n\n# Parameters that can be changed.\nw = 1 # Deterministic wage per period.\nwillCstFac = 0.35 # Fraction of resources charged by lawyer for writing a will.\nDiscFac = 0.98 # Time-discount factor.\n\n# Define utility (and related) functions\nu = lambda x: CRRAutility(x,CRRA)\nuP = lambda x: CRRAutilityP(x, CRRA)\nuPinv = lambda x: CRRAutilityP_inv(x, CRRA)\n\n# Create a grid for market resources\nmGrid = (aGrid-aGrid[0])*1.5\nmGridPlots = np.linspace(w,10*w,100)\nmGridPlotsC = np.insert(mGridPlots,0,0)\n\n# Transformations for value funtion interpolation\nvTransf = lambda x: np.exp(x)\nvUntransf = lambda x: np.log(x)",
"_____no_output_____"
]
],
[
[
"# The third (last) period of life\n\nIn the last period of life, the agent's problem is determined by his total amount of resources $m_3$ and a state variable $W$ that indicates whether he wrote a will ($W=1$) or not ($W=0$).\n\n### The agent without a will\n\nAn agent who does not have a will simply consumes all of his available resources. Therefore, his value and consumption functions will be:\n\n\\begin{equation}\nV_3(m_3,W=0) = u(m_3)\n\\end{equation}\n\n\\begin{equation}\nc_3(m_3, W=0) = m_3\n\\end{equation}\n\nWhere $u(\\cdot)$ gives the utility from consumption. We assume a CRRA specification $u(c) = \\frac{c^{1-\\rho}}{1-\\rho}$.\n\n### The agent with a will\n\nAn agent who wrote a will decides how to allocate his available resources $m_3$ between his consumption and a bequest. We assume an additive specification for the utility of a given consumption-bequest combination that follows a particular case in [Carroll (2000)](http://www.econ2.jhu.edu/people/ccarroll/Why.pdf). The component of utility from leaving a bequest $x$ is assumed to be $\\ln (x+1)$. Therefore, the agent's value function is\n\n\\begin{equation}\nV_3(m_3, W=1) = \\max_{0\\leq c_3 \\leq m_3} u(c_3) + \\ln(m_3 - c_3 + 1)\n\\end{equation}\n\nFor ease of exposition we consider the case $\\rho = 2$, where [Carroll (2000)](http://www.econ2.jhu.edu/people/ccarroll/Why.pdf) shows that the optimal consumption level is given by\n\n\\begin{equation}\nc_3(m_3, W=1) = \\min \\left[m_3, \\frac{-1 + \\sqrt{1 + 4(m_3+1)}}{2} \\right].\n\\end{equation}\n\nThe consumption function shows that $m_3=1$ is the level of resources at which an important change of behavior occurs: agents leave bequests only for $m_3 > 1$. Since an important change of behavior happens at this point, we call it a 'kink-point' and add it to our grids.",
"_____no_output_____"
]
],
[
[
"# Agent without a will\nmGrid3_no = mGrid\ncGrid3_no = mGrid\nvGrid3_no = u(cGrid3_no)\n\n# Create functions\nc3_no = LinearInterp(mGrid3_no, cGrid3_no) # (0,0) is already here.\nvT3_no = LinearInterp(mGrid3_no, vTransf(vGrid3_no), lower_extrap = True)\nv3_no = lambda x: vUntransf(vT3_no(x))\n\n# Agent with a will\n\n# Define an auxiliary function with the analytical consumption expression\nc3will = lambda m: np.minimum(m, -0.5 + 0.5*np.sqrt(1+4*(m+1)))\n\n# Find the kink point\nmKink = 1.0\nindBelw = mGrid < mKink\nindAbve = mGrid > mKink\n\nmGrid3_wi = np.concatenate([mGrid[indBelw],\n np.array([mKink]),\n mGrid[indAbve]])\n\ncGrid3_wi = c3will(mGrid3_wi)\n\ncAbve = c3will(mGrid[indAbve])\nbeqAbve = mGrid[indAbve] - c3will(mGrid[indAbve])\nvGrid3_wi = np.concatenate([u(mGrid[indBelw]),\n u(np.array([mKink])),\n u(cAbve) + np.log(1+beqAbve)])\n\n# Create functions\nc3_wi = LinearInterp(mGrid3_wi, cGrid3_wi) # (0,0) is already here\nvT3_wi = LinearInterp(mGrid3_wi, vTransf(vGrid3_wi), lower_extrap = True)\nv3_wi = lambda x: vUntransf(vT3_wi(x))\n\nplt.figure()\n\nplt.plot(mGridPlots, v3_wi(mGridPlots), label = 'Will')\nplt.plot(mGridPlots, v3_no(mGridPlots), label = 'No Will')\nplt.title('Period 3: Value functions')\nplt.xlabel('Market resources')\nplt.legend()\nplt.show()\n\nplt.plot(mGridPlotsC, c3_wi(mGridPlotsC), label = 'Will')\nplt.plot(mGridPlotsC, c3_no(mGridPlotsC), label = 'No Will')\nplt.title('Period 3: Consumption Functions')\nplt.xlabel('Market resources')\nplt.legend()\nplt.show()",
"c:\\github\\hark\\HARK\\utilities.py:141: RuntimeWarning: divide by zero encountered in reciprocal\n return( c**(1.0 - gam) / (1.0 - gam) )\n"
]
],
[
[
"# The second period\n\nOn the second period, the agent takes his resources as given (the only state variable) and makes two decisions:\n- Whether to write a will or not.\n- What fraction of his resources to consume.\n\nThese decisions can be seen as happening sequentially: the agent first decides whether to write a will or not, and then consumes optimally in accordance with his previous decision. Since we solve the model backwards in time, we first explore the consumption decision, conditional on the choice of writing a will or not.\n\n## An agent who decides not to write a will\n\nAfter deciding not to write a will, an agent solves the optimization problem expressed in the following conditional value function\n\n\\begin{equation}\n\\begin{split}\n\\nu (m_2|w=0) &= \\max_{0\\leq c \\leq m_2} u(c) + \\beta V_3(m_3,W=0)\\\\\ns.t.&\\\\\nm_3 &= m_2 - c + w\n\\end{split} \n\\end{equation}\n\nWe can approximate a solution to this problem through the method of endogenous gridpoints. This yields approximations to $\\nu(\\cdot|w=0)$ and $c_2(\\cdot|w=0)$",
"_____no_output_____"
]
],
[
[
"# Second period, not writing a will\n\n# Compute market resources at 3 with and without a will\nmGrid3_cond_nowi = aGrid + w\n# Compute marginal value of assets in period 3 for each ammount of savings in 2\nvPGrid3_no = uP(c3_no(mGrid3_cond_nowi))\n# Get consumption through EGM inversion of the euler equation\ncGrid2_cond_no = uPinv(DiscFac*vPGrid3_no)\n\n# Get beginning-of-period market resources\nmGrid2_cond_no = aGrid + cGrid2_cond_no\n\n# Compute value function\nvGrid2_cond_no = u(cGrid2_cond_no) + DiscFac*v3_no(mGrid3_cond_nowi)\n\n# Create interpolating value and consumption functions\nvT2_cond_no = LinearInterp(mGrid2_cond_no, vTransf(vGrid2_cond_no), lower_extrap = True)\nv2_cond_no = lambda x: vUntransf(vT2_cond_no(x))\nc2_cond_no = LinearInterp(np.insert(mGrid2_cond_no,0,0), np.insert(cGrid2_cond_no,0,0))",
"_____no_output_____"
]
],
[
[
"## An agent who decides to write a will\n\nAn agent who decides to write a will also solves for his consumption dinamically. We assume that the lawyer that helps the agent write his will takes some fraction $\\tau$ of his total resources in period 3. Therefore, the evolution of resources is given by $m_3 = (1-\\tau)(m_2 - c_2 + w)$. The conditional value function of the agent is therefore:\n\n\\begin{equation}\n\\begin{split}\n\\nu (m_2|w=1) &= \\max_{0\\leq c \\leq m_2} u(c) + \\beta V_3(m_3,W=1)\\\\\ns.t.&\\\\\nm_3 &= (1-\\tau)(m_2 - c + w)\n\\end{split} \n\\end{equation}\n\nWe also approximate a solution to this problem using the EGM. This yields approximations to $\\nu(\\cdot|w=1)$ and $c_2(\\cdot|w=1)$.",
"_____no_output_____"
]
],
[
[
"# Second period, writing a will\n\n# Compute market resources at 3 with and without a will\nmGrid3_cond_will = (1-willCstFac)*(aGrid + w)\n# Compute marginal value of assets in period 3 for each ammount of savings in 2\nvPGrid3_wi = uP(c3_wi(mGrid3_cond_will))\n# Get consumption through EGM inversion of the euler equation\ncGrid2_cond_wi = uPinv(DiscFac*(1-willCstFac)*vPGrid3_wi)\n# Get beginning-of-period market resources\nmGrid2_cond_wi = aGrid + cGrid2_cond_wi\n\n# Compute value function\nvGrid2_cond_wi = u(cGrid2_cond_wi) + DiscFac*v3_wi(mGrid3_cond_will)\n\n# Create interpolating value and consumption functions\nvT2_cond_wi = LinearInterp(mGrid2_cond_wi, vTransf(vGrid2_cond_wi), lower_extrap = True)\nv2_cond_wi = lambda x: vUntransf(vT2_cond_wi(x))\nc2_cond_wi = LinearInterp(np.insert(mGrid2_cond_wi,0,0), np.insert(cGrid2_cond_wi,0,0))",
"_____no_output_____"
]
],
[
[
"## The decision whether to write a will or not\n\nWith the conditional value functions at hand, we can now express and solve the decision of whether to write a will or not, and obtain the unconditional value and consumption functions.\n\n\\begin{equation}\nV_2(m_2) = \\max \\{ \\nu (m_2|w=0), \\nu (m_2|w=1) \\}\n\\end{equation}\n\n\\begin{equation}\nw^*(m_2) = \\arg \\max_{w \\in \\{0,1\\}} \\{ \\nu (m_2|w=w) \\}\n\\end{equation}\n\n\\begin{equation}\nc_2(m_2) = c_2(m_2|w=w^*(m_2))\n\\end{equation}\n\nWe now construct these objects.",
"_____no_output_____"
]
],
[
[
"# We use HARK's 'calcLogSumchoiceProbs' to compute the optimal\n# will decision over our grid of market resources.\n# The function also returns the unconditional value function\n# Use transformed values since -given sigma=0- magnitudes are unimportant. This\n# avoids NaNs at m \\approx 0.\nvTGrid2, willChoice2 = calcLogSumChoiceProbs(np.stack((vT2_cond_wi(mGrid),\n vT2_cond_no(mGrid))),\n sigma = 0)\nvGrid2 = vUntransf(vTGrid2)\n\n# Plot the optimal decision rule\nplt.plot(mGrid, willChoice2[0])\nplt.title('$w^*(m)$')\nplt.ylabel('Write will (1) or not (0)')\nplt.xlabel('Market resources: m')\nplt.show()\n\n# With the decision rule we can get the unconditional consumption function\ncGrid2 = (willChoice2*np.stack((c2_cond_wi(mGrid),c2_cond_no(mGrid)))).sum(axis=0)\n\nvT2 = LinearInterp(mGrid, vTransf(vGrid2), lower_extrap = True)\nv2 = lambda x: vUntransf(vT2(x))\nc2 = LinearInterp(mGrid, cGrid2)\n\n# Plot the conditional and unconditional value functions\nplt.plot(mGridPlots, v2_cond_wi(mGridPlots), label = 'Cond. Will')\nplt.plot(mGridPlots, v2_cond_no(mGridPlots), label = 'Cond. No will')\nplt.plot(mGridPlots, v2(mGridPlots), 'k--',label = 'Uncond.')\nplt.title('Period 2: Value Functions')\nplt.xlabel('Market resources')\nplt.legend()\nplt.show()\n\n# Plot the conditional and unconditiional consumption\n# functions\nplt.plot(mGridPlotsC, c2_cond_wi(mGridPlotsC), label = 'Cond. Will')\nplt.plot(mGridPlotsC, c2_cond_no(mGridPlotsC), label = 'Cond. No will')\nplt.plot(mGridPlotsC, c2(mGridPlotsC), 'k--',label = 'Uncond.')\nplt.title('Period 2: Consumption Functions')\nplt.xlabel('Market resources')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# The first period\n\nIn the first period, the agent simply observes his market resources and decides what fraction of them to consume. His problem is represented by the following value function\n\n\\begin{equation}\n\\begin{split}\nV (m_1) &= \\max_{0\\leq c \\leq m_1} u(c) + \\beta V_2(m_2)\\\\\ns.t.&\\\\\nm_2 &= m_1 - c + w.\n\\end{split} \n\\end{equation}\n\nAlthough this looks like a simple problem, there are complications introduced by the kink in $V_2(\\cdot)$, which is clearly visible in the plot from the previous block. Particularly, note that $V_2'(\\cdot)$ and $c_2(\\cdot)$ are not monotonic: there are now multiple points $m$ for which the slope of $V_2(m)$ is equal. Thus, the Euler equation becomes a necessary but not sufficient condition for optimality and the traditional EGM inversion step can generate non-monotonic endogenous $m$ gridpoints.\n\nWe now illustrate this phenomenon.",
"_____no_output_____"
]
],
[
[
"# EGM step\n\n# Period 2 resources implied by the exogenous savings grid\nmGrid2 = aGrid + w\n# Envelope condition\nvPGrid2 = uP(c2(mGrid2))\n# Inversion of the euler equation\ncGrid1 = uPinv(DiscFac*vPGrid2)\n# Endogenous gridpoints\nmGrid1 = aGrid + cGrid1\nvGrid1 = u(cGrid1) + DiscFac*v2(mGrid2)\n\nplt.plot(mGrid1)\nplt.title('Endogenous gridpoints')\nplt.xlabel('Position: i')\nplt.ylabel('Endogenous grid point: $m_i$')\nplt.show()\n\n\nplt.plot(mGrid1,vGrid1)\nplt.title('Value function at grid points')\nplt.xlabel('Market resources: m')\nplt.ylabel('Value function')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The previous cell applies the endogenous gridpoints method to the first period problem. The plots illustrate that the sequence of resulting endogenous gridpoints $\\{m_i\\}_{i=1}^N$ is not monotonic. This results in intervals of market resources over which we have multiple candidate values for the value function. This is the point where we must apply the upper envelope function illustrated above.\n\nWe finally use the resulting consumption and value grid points to create the first period value and consumption functions. ",
"_____no_output_____"
]
],
[
[
"# Calculate envelope\nvTGrid1 = vTransf(vGrid1) # The function operates with *transformed* value grids\n\nrise, fall = calcSegments(mGrid1, vTGrid1)\nmGrid1_up, cGrid1_up, vTGrid1_up = calcMultilineEnvelope(mGrid1, cGrid1,\n vTGrid1, mGrid)\n\n# Create functions\nc1_up = LinearInterp(mGrid1_up, cGrid1_up)\nv1T_up = LinearInterp(mGrid1_up, vTGrid1_up)\nv1_up = lambda x: vUntransf(v1T_up(x))\n\n# Show that there is a non-monothonicity and that the upper envelope fixes it\nplt.plot(mGrid1,vGrid1, label = 'EGM Points')\nplt.plot(mGridPlots, v1_up(mGridPlots), 'k--', label = 'Upper Envelope')\nplt.title('Period 1: Value function')\nplt.legend()\nplt.show()\n\nplt.plot(mGrid1,cGrid1, label = 'EGM Points')\nplt.plot(mGridPlotsC,c1_up(mGridPlotsC),'k--', label = 'Upper Envelope')\nplt.title('Period 1: Consumption function')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# References\n[1] Iskhakov, F. , Jørgensen, T. H., Rust, J. and Schjerning, B. (2017), The endogenous grid method for discrete‐continuous dynamic choice models with (or without) taste shocks. Quantitative Economics, 8: 317-365. doi:10.3982/QE643\n\n[2] Carroll, C. D. (2006). The method of endogenous gridpoints for solving dynamic stochastic optimization problems. Economics letters, 91(3), 312-320.\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
ec6a410d66e47b49b4d7f9a41571b700e8bccd80 | 474,938 | ipynb | Jupyter Notebook | 1. Load and Visualize Data.ipynb | KozyLigh/P1_Facial_Keypoints | f2f2a1a7788b47ea67bc90d625779cde14855991 | [
"MIT"
]
| null | null | null | 1. Load and Visualize Data.ipynb | KozyLigh/P1_Facial_Keypoints | f2f2a1a7788b47ea67bc90d625779cde14855991 | [
"MIT"
]
| null | null | null | 1. Load and Visualize Data.ipynb | KozyLigh/P1_Facial_Keypoints | f2f2a1a7788b47ea67bc90d625779cde14855991 | [
"MIT"
]
| null | null | null | 717.429003 | 179,512 | 0.947966 | [
[
[
"# Facial Keypoint Detection\n \nThis project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. \n\nLet's take a look at some examples of images and corresponding facial keypoints.\n\n<img src='images/key_pts_example.png' width=50% height=50%/>\n\nFacial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face.\n\n<img src='images/landmarks_numbered.jpg' width=30% height=30%/>\n\n---",
"_____no_output_____"
],
[
"## Load and Visualize Data\n\nThe first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints.\n\n#### Training and Testing Data\n\nThis facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data.\n\n* 3462 of these images are training images, for you to use as you create a model to predict keypoints.\n* 2308 are test images, which will be used to test the accuracy of your model.\n\nThe information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y).\n\n---",
"_____no_output_____"
]
],
[
[
"# import the required libraries\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2",
"_____no_output_____"
],
[
"key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')\n\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nprint('Image name: ', image_name)\nprint('Landmarks shape: ', key_pts.shape)\nprint('First 4 key pts: {}'.format(key_pts[:4]))",
"Image name: Luis_Fonsi_21.jpg\nLandmarks shape: (68, 2)\nFirst 4 key pts: [[ 45. 98.]\n [ 47. 106.]\n [ 49. 110.]\n [ 53. 119.]]\n"
],
[
"# print out some stats about the data\nprint('Number of images: ', key_pts_frame.shape[0])",
"Number of images: 3462\n"
]
],
[
[
"## Look at some images\n\nBelow, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape.",
"_____no_output_____"
]
],
[
[
"def show_keypoints(image, key_pts):\n \"\"\"Show image with keypoints\"\"\"\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')\n",
"_____no_output_____"
],
[
"# Display a few different types of images by changing the index n\n\n# select an image by index in our data frame\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nplt.figure(figsize=(5, 5))\nshow_keypoints(mpimg.imread(os.path.join('data/training/', image_name)), key_pts)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Dataset class and Transformations\n\nTo prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n#### Dataset class\n\n``torch.utils.data.Dataset`` is an abstract class representing a\ndataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network.\n\n\nYour custom dataset should inherit ``Dataset`` and override the following\nmethods:\n\n- ``__len__`` so that ``len(dataset)`` returns the size of the dataset.\n- ``__getitem__`` to support the indexing such that ``dataset[i]`` can\n be used to get the i-th sample of image/keypoint data.\n\nLet's create a dataset class for our face keypoints dataset. We will\nread the CSV file in ``__init__`` but leave the reading of images to\n``__getitem__``. This is memory efficient because all the images are not\nstored in the memory at once but read as required.\n\nA sample of our dataset will be a dictionary\n``{'image': image, 'keypoints': key_pts}``. Our dataset will take an\noptional argument ``transform`` so that any required processing can be\napplied on the sample. We will see the usefulness of ``transform`` in the\nnext section.\n",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import Dataset, DataLoader\n\nclass FacialKeypointsDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.key_pts_frame = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.key_pts_frame)\n\n def __getitem__(self, idx):\n image_name = os.path.join(self.root_dir,\n self.key_pts_frame.iloc[idx, 0])\n \n image = mpimg.imread(image_name)\n \n # if image has an alpha color channel, get rid of it\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n key_pts = key_pts.astype('float').reshape(-1, 2)\n sample = {'image': image, 'keypoints': key_pts}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample",
"_____no_output_____"
]
],
[
[
"Now that we've defined this class, let's instantiate the dataset and display some images.",
"_____no_output_____"
]
],
[
[
"# Construct the dataset\nface_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/')\n\n# print some stats about the dataset\nprint('Length of dataset: ', len(face_dataset))",
"Length of dataset: 3462\n"
],
[
"# Display a few of the images from the dataset\nnum_to_display = 3\n\nfor i in range(num_to_display):\n \n # define the size of images\n fig = plt.figure(figsize=(20,10))\n \n # randomly select a sample\n rand_i = np.random.randint(0, len(face_dataset))\n sample = face_dataset[rand_i]\n\n # print the shape of the image and keypoints\n print(i, sample['image'].shape, sample['keypoints'].shape)\n\n ax = plt.subplot(1, num_to_display, i + 1)\n ax.set_title('Sample #{}'.format(i))\n \n # Using the same display function, defined earlier\n show_keypoints(sample['image'], sample['keypoints'])\n",
"0 (131, 131, 3) (68, 2)\n1 (167, 148, 3) (68, 2)\n2 (360, 334, 3) (68, 2)\n"
]
],
[
[
"## Transforms\n\nNow, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors.\n\nTherefore, we will need to write some pre-processing code.\nLet's create four transforms:\n\n- ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1]\n- ``Rescale``: to rescale an image to a desired size.\n- ``RandomCrop``: to crop an image randomly.\n- ``ToTensor``: to convert numpy images to torch images.\n\n\nWe will write them as callable classes instead of simple functions so\nthat parameters of the transform need not be passed everytime it's\ncalled. For this, we just need to implement ``__call__`` method and \n(if we require parameters to be passed in), the ``__init__`` method. \nWe can then use a transform like this:\n\n tx = Transform(params)\n transformed_sample = tx(sample)\n\nObserve below how these transforms are generally applied to both the image and its keypoints.\n\n",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import transforms, utils\n# tranforms\n\nclass Normalize(object):\n \"\"\"Convert a color image to grayscale and normalize the color range to [0,1].\"\"\" \n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n image_copy = np.copy(image)\n key_pts_copy = np.copy(key_pts)\n\n # convert image to grayscale\n image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n # scale color range from [0, 255] to [0, 1]\n image_copy= image_copy/255.0\n \n # scale keypoints to be centered around 0 with a range of [-1, 1]\n # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50\n key_pts_copy = (key_pts_copy - 100)/50.0\n\n\n return {'image': image_copy, 'keypoints': key_pts_copy}\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = cv2.resize(image, (new_w, new_h))\n \n # scale the pts, too\n key_pts = key_pts * [new_w / w, new_h / h]\n\n return {'image': img, 'keypoints': key_pts}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n key_pts = key_pts - [left, top]\n\n return {'image': image, 'keypoints': key_pts}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n # if image has no grayscale color channel, add one\n if(len(image.shape) == 2):\n # add that third color dim\n image = image.reshape(image.shape[0], image.shape[1], 1)\n \n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n \n return {'image': torch.from_numpy(image),\n 'keypoints': torch.from_numpy(key_pts)}",
"_____no_output_____"
]
],
[
[
"## Test out the transforms\n\nLet's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size.",
"_____no_output_____"
]
],
[
[
"# test out some of these transforms\nrescale = Rescale(100)\ncrop = RandomCrop(50)\ncomposed = transforms.Compose([Rescale(250),\n RandomCrop(224)])\n\n# apply the transforms to a sample image\ntest_num = 500\nsample = face_dataset[test_num]\n\nfig = plt.figure()\nfor i, tx in enumerate([rescale, crop, composed]):\n transformed_sample = tx(sample)\n\n ax = plt.subplot(1, 3, i + 1)\n plt.tight_layout()\n ax.set_title(type(tx).__name__)\n show_keypoints(transformed_sample['image'], transformed_sample['keypoints'])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Create the transformed dataset\n\nApply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size).",
"_____no_output_____"
]
],
[
[
"# define the data tranform\n# order matters! i.e. rescaling should come before a smaller crop\ndata_transform = transforms.Compose([Rescale(250),\n RandomCrop(224),\n Normalize(),\n ToTensor()])\n\n# create the transformed dataset\ntransformed_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv',\n root_dir='data/training/',\n transform=data_transform)\n",
"_____no_output_____"
],
[
"# print some stats about the transformed data\nprint('Number of images: ', len(transformed_dataset))\n\n# make sure the sample tensors are the expected size\nfor i in range(5):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['keypoints'].size())\n",
"Number of images: 3462\n0 torch.Size([1, 224, 224]) torch.Size([68, 2])\n1 torch.Size([1, 224, 224]) torch.Size([68, 2])\n2 torch.Size([1, 224, 224]) torch.Size([68, 2])\n3 torch.Size([1, 224, 224]) torch.Size([68, 2])\n4 torch.Size([1, 224, 224]) torch.Size([68, 2])\n"
]
],
[
[
"## Data Iteration and Batching\n\nRight now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to:\n\n- Batch the data\n- Shuffle the data\n- Load the data in parallel using ``multiprocessing`` workers.\n\n``torch.utils.data.DataLoader`` is an iterator which provides all these\nfeatures, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network!\n\n---\n\n",
"_____no_output_____"
],
[
"## Ready to Train!\n\nNow that you've seen how to load and transform our data, you're ready to build a neural network to train on this data.\n\nIn the next notebook, you'll be tasked with creating a CNN for facial keypoint detection.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6a44d9844557f792006eea92b372d5a7cdfe09 | 37,489 | ipynb | Jupyter Notebook | GetData.ipynb | clin-projects/LoL-predictor | 71434e66714e950bbbc317b5234dcb9094817982 | [
"MIT"
]
| null | null | null | GetData.ipynb | clin-projects/LoL-predictor | 71434e66714e950bbbc317b5234dcb9094817982 | [
"MIT"
]
| null | null | null | GetData.ipynb | clin-projects/LoL-predictor | 71434e66714e950bbbc317b5234dcb9094817982 | [
"MIT"
]
| null | null | null | 40.224249 | 1,681 | 0.566806 | [
[
[
"api_key = 'RGAPI-35c1b7c3-e9a6-4c8d-aa75-d4f1d44102bf'",
"_____no_output_____"
],
[
"# use riotwatcher (https://github.com/pseudonym117/Riot-Watcher)\n\nfrom riotwatcher import RiotWatcher\nimport numpy as np\n\nwatcher = RiotWatcher(api_key)\n\nmy_region = 'na1'",
"_____no_output_____"
],
[
"# https://www.leagueofgraphs.com/rankings/summoners/na\n# Doublelift, TF blade, thtr, Pobelter, CG Solo, Tony Top,\n\nsummoner_id = watcher.summoner.by_name(my_region, 'Jurassiq')",
"_____no_output_____"
],
[
"print(summoner_id)",
"{'id': 19587365, 'accountId': 32281250, 'name': 'Tony Top', 'profileIconId': 3551, 'revisionDate': 1532984182000, 'summonerLevel': 127}\n"
],
[
"summoner_id['accountId']",
"_____no_output_____"
],
[
"matches = watcher.match.matchlist_by_account(my_region, account_id = summoner_id['accountId'])",
"_____no_output_____"
],
[
"# pull all matches from api\n\nfor i, cur_match in enumerate(matches['matches']):\n cur_match_id = cur_match['gameId']\n match = watcher.match.by_id(my_region, match_id = cur_match_id)\n fout = './dat/match_' + str(cur_match_id) + '.json'\n print(i, cur_match_id, fout)\n with open(fout, 'w') as fp:\n json.dump(match, fp)",
"0 2836050929 ./dat/match_2836050929.json\n1 2836015259 ./dat/match_2836015259.json\n2 2835988988 ./dat/match_2835988988.json\n3 2835984590 ./dat/match_2835984590.json\n4 2835990464 ./dat/match_2835990464.json\n5 2835966589 ./dat/match_2835966589.json\n6 2835954180 ./dat/match_2835954180.json\n7 2835951719 ./dat/match_2835951719.json\n8 2835874509 ./dat/match_2835874509.json\n9 2834840602 ./dat/match_2834840602.json\n10 2834786374 ./dat/match_2834786374.json\n11 2834791460 ./dat/match_2834791460.json\n12 2834766666 ./dat/match_2834766666.json\n13 2834751618 ./dat/match_2834751618.json\n14 2834733504 ./dat/match_2834733504.json\n15 2834707521 ./dat/match_2834707521.json\n16 2834658772 ./dat/match_2834658772.json\n17 2834651947 ./dat/match_2834651947.json\n18 2834646686 ./dat/match_2834646686.json\n19 2834464586 ./dat/match_2834464586.json\n20 2834437645 ./dat/match_2834437645.json\n21 2834399361 ./dat/match_2834399361.json\n22 2834400019 ./dat/match_2834400019.json\n23 2834353492 ./dat/match_2834353492.json\n24 2834322962 ./dat/match_2834322962.json\n25 2834295603 ./dat/match_2834295603.json\n26 2834290276 ./dat/match_2834290276.json\n27 2834282500 ./dat/match_2834282500.json\n28 2834245457 ./dat/match_2834245457.json\n29 2834224945 ./dat/match_2834224945.json\n30 2834196917 ./dat/match_2834196917.json\n31 2834173721 ./dat/match_2834173721.json\n32 2834148958 ./dat/match_2834148958.json\n33 2834164425 ./dat/match_2834164425.json\n34 2834159988 ./dat/match_2834159988.json\n35 2833981424 ./dat/match_2833981424.json\n36 2833943467 ./dat/match_2833943467.json\n37 2833925849 ./dat/match_2833925849.json\n38 2833887208 ./dat/match_2833887208.json\n39 2833857451 ./dat/match_2833857451.json\n40 2833828025 ./dat/match_2833828025.json\n41 2833782604 ./dat/match_2833782604.json\n42 2833742196 ./dat/match_2833742196.json\n43 2833697744 ./dat/match_2833697744.json\n44 2833700105 ./dat/match_2833700105.json\n45 2833684441 ./dat/match_2833684441.json\n46 2832966419 ./dat/match_2832966419.json\n47 2832950413 ./dat/match_2832950413.json\n48 2832904914 ./dat/match_2832904914.json\n49 2832892944 ./dat/match_2832892944.json\n50 2832541519 ./dat/match_2832541519.json\n51 2832532906 ./dat/match_2832532906.json\n52 2832505191 ./dat/match_2832505191.json\n53 2832490423 ./dat/match_2832490423.json\n54 2832472803 ./dat/match_2832472803.json\n55 2832437206 ./dat/match_2832437206.json\n56 2832424102 ./dat/match_2832424102.json\n57 2832407250 ./dat/match_2832407250.json\n58 2832392057 ./dat/match_2832392057.json\n59 2832386781 ./dat/match_2832386781.json\n60 2832363390 ./dat/match_2832363390.json\n61 2831946080 ./dat/match_2831946080.json\n62 2831938917 ./dat/match_2831938917.json\n63 2831921258 ./dat/match_2831921258.json\n64 2831876341 ./dat/match_2831876341.json\n65 2831881181 ./dat/match_2831881181.json\n66 2831408154 ./dat/match_2831408154.json\n67 2831389913 ./dat/match_2831389913.json\n68 2831349539 ./dat/match_2831349539.json\n69 2831328291 ./dat/match_2831328291.json\n70 2830711106 ./dat/match_2830711106.json\n71 2830585435 ./dat/match_2830585435.json\n72 2830557179 ./dat/match_2830557179.json\n73 2830563199 ./dat/match_2830563199.json\n74 2830548195 ./dat/match_2830548195.json\n75 2830543862 ./dat/match_2830543862.json\n76 2830529867 ./dat/match_2830529867.json\n77 2830507595 ./dat/match_2830507595.json\n78 2830504177 ./dat/match_2830504177.json\n79 2830501501 ./dat/match_2830501501.json\n80 2830415123 ./dat/match_2830415123.json\n81 2830385089 ./dat/match_2830385089.json\n82 2830356322 ./dat/match_2830356322.json\n83 2830326796 ./dat/match_2830326796.json\n84 2830287042 ./dat/match_2830287042.json\n85 2830237305 ./dat/match_2830237305.json\n86 2830038952 ./dat/match_2830038952.json\n87 2829948581 ./dat/match_2829948581.json\n88 2829943102 ./dat/match_2829943102.json\n89 2829896334 ./dat/match_2829896334.json\n90 2829901003 ./dat/match_2829901003.json\n91 2829862449 ./dat/match_2829862449.json\n92 2829825357 ./dat/match_2829825357.json\n93 2829819946 ./dat/match_2829819946.json\n94 2829804082 ./dat/match_2829804082.json\n95 2829347254 ./dat/match_2829347254.json\n96 2828859878 ./dat/match_2828859878.json\n"
],
[
"matches['matches'][0]['gameId']",
"_____no_output_____"
],
[
"print(matches['matches'][0]['gameId'])",
"2834728655\n"
],
[
"cur_match_id = matches['matches'][1]['gameId']",
"_____no_output_____"
],
[
"timeline = watcher.match.timeline_by_match(my_region, match_id = cur_match_id)",
"_____no_output_____"
],
[
"timeline.keys()",
"_____no_output_____"
],
[
"timeline['frames'][0].keys()",
"_____no_output_____"
],
[
"len(timeline['frames'][0]['participantFrames'])",
"_____no_output_____"
],
[
"len(timeline['frames'][-3]['events'])",
"_____no_output_____"
],
[
"timeline['frames']",
"_____no_output_____"
],
[
"timeline['frames'][-1]['participantFrames']",
"_____no_output_____"
],
[
"match = watcher.match.by_id(my_region, match_id = cur_match_id)\nfout = './dat/match_' + str(cur_match_id) + '.json'\nwith open(fout, 'w') as fp:\n json.dump(match, fp)",
"_____no_output_____"
],
[
"import json\n\n# https://stackoverflow.com/questions/7100125/storing-python-dictionaries\nfout = './dat/match_' + str(cur_match_id) + '.json'\nwith open(fout, 'w') as fp:\n json.dump(match, fp)",
"_____no_output_____"
],
[
"with open(fout, 'r') as fp:\n data = json.load(fp)",
"_____no_output_____"
],
[
"match.keys()",
"_____no_output_____"
],
[
"solo_vars = ['creepsPerMinDeltas', 'goldPerMinDeltas']\nteam_outcome = ['win']\ntime_deltas = ['0-10', '10-20', '20-30', '30-end']\n\ndat_labels = []\nprint('labels:')\nfor v in solo_vars:\n for t in time_deltas:\n cur_label = v + '_' + t\n dat_labels.append(cur_label)\n print(\"('\"+ cur_label + \"', int),\")",
"labels:\n('creepsPerMinDeltas_0-10', int),\n('creepsPerMinDeltas_10-20', int),\n('creepsPerMinDeltas_20-30', int),\n('creepsPerMinDeltas_30-end', int),\n('goldPerMinDeltas_0-10', int),\n('goldPerMinDeltas_10-20', int),\n('goldPerMinDeltas_20-30', int),\n('goldPerMinDeltas_30-end', int),\n"
],
[
"def get_winner(match):\n if match['teams'][0]['win'] == 'Win':\n return 0\n elif match['teams'][1]['win'] == 'Win':\n return 1\n else:\n return -1\n \ndef get_team_data(match_id, winner):\n team_dat = []\n \n team_vars = ['firstBlood', 'firstTower', 'firstInhibitor', 'firstBaron', 'firstDragon', 'firstRiftHerald']\n \n for v in team_vars:\n cur_dat = match['teams'][winner][v]*1\n team_dat.append(cur_dat)\n return np.array(team_dat)\n\ndef get_role_lane(match_id, participant_id):\n role = match['participants'][participant_id]['timeline']['role']\n lane = match['participants'][participant_id]['timeline']['lane']\n return [role, lane]\n\ndef get_participant_data(match_id, winner, debug=False):\n\n participant_data = []\n\n m = len(match['participants'][0]['timeline'][solo_vars[0]])\n \n participant_ids = [(x+winner*5) % 10 for x in range(10)]\n \n for participant_id in participant_ids:\n [role, lane] = get_role_lane(match_id, participant_id)\n cur_participant_data = []\n for v in solo_vars:\n for i, t in enumerate(time_deltas):\n if i + 1 <= m:\n cur_participant_data.append(np.round(match['participants'][participant_id]['timeline'][v][t],2))\n else:\n cur_participant_data.append(-1)\n \n cur_participant_data = np.array(cur_participant_data,\n dtype = object)\n participant_data.append([role, lane, cur_participant_data])\n \n return np.array(participant_data)\n\ndef process_match(match_id):\n winner = get_winner(match_id)\n team_dat = get_team_data(match_id, winner)\n duration = match_id['gameDuration'] / 60\n duration_id = int(duration/10) # 0 if last 10-20, 1 if 20-30, 2 if 30-40, 3 if 40+, etc.\n participant_data = get_participant_data(match_id, winner)\n return winner, team_dat, duration, duration_id, participant_data",
"_____no_output_____"
],
[
"match_data = process_match(match)",
"_____no_output_____"
],
[
"match_data[4]",
"_____no_output_____"
],
[
"match.keys()",
"_____no_output_____"
],
[
"match['gameType']",
"_____no_output_____"
],
[
"duration = match['gameDuration'] / 60\nduration_id = int(duration/10)-1 # 0 if last 10-20, 1 if 20-30, 2 if 30-40, 3 if 40+, etc.\nprint(duration_id)",
"1\n"
],
[
"match['participants'][0]",
"_____no_output_____"
],
[
"winner = 1\n[print((x+winner*5) % 10) for x in range(10)]",
"5\n6\n7\n8\n9\n0\n1\n2\n3\n4\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6a450437f661df23b31d35847e044bc2a8525a | 696,865 | ipynb | Jupyter Notebook | Transition Matrix/Transition Matrix --- Ridge.ipynb | snagcliffs/data_driven_sarc | beeaecbb9e9b4c3912525b598b80b2e3e100b44d | [
"MIT"
]
| 1 | 2020-03-06T21:29:46.000Z | 2020-03-06T21:29:46.000Z | Transition Matrix/Transition Matrix --- Ridge.ipynb | snagcliffs/data_driven_sarc | beeaecbb9e9b4c3912525b598b80b2e3e100b44d | [
"MIT"
]
| null | null | null | Transition Matrix/Transition Matrix --- Ridge.ipynb | snagcliffs/data_driven_sarc | beeaecbb9e9b4c3912525b598b80b2e3e100b44d | [
"MIT"
]
| null | null | null | 777.751116 | 197,820 | 0.946345 | [
[
[
"%pylab inline\n\nimport numpy as np\nimport seaborn\nfrom MuscleHelperFunctions import *\n\nfrom sklearn.linear_model import RidgeCV, LassoCV\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.preprocessing import PolynomialFeatures\n\nnp.random.seed(1)",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"# Load in data from MC simulations\ndatapath = '../MC_data'\nsarc_list = LoadData(datapath, verbose = True)",
"1000\n2000\n3000\n"
],
[
"# Split into training and testing sets\ntraining_frac = 0.9\nnum_params = len(sarc_list.keys())\n\ntraining_runs = np.random.choice(num_params, int(num_params*training_frac), replace = False)\ntesting_runs = [j for j in range(num_params) if j not in training_runs]\n\nTraining = [list(sarc_list.keys())[j] for j in training_runs]\nTesting = [list(sarc_list.keys())[j] for j in testing_runs]",
"_____no_output_____"
]
],
[
[
"# Fit model for transition probabilities",
"_____no_output_____"
]
],
[
[
"N = 720\nP12 = []; P11 = []; P22 = []; P21 = []; P23 = []; P33 = []; P31 = []; P32 = []\nthreshold = 5\n\nfor key in Training:\n \n runs = sarc_list[key]\n n = len(runs[0])\n \n state_fracs_ave = N*np.vstack([np.mean(np.vstack([np.array(run['xb_fraction_free']) for run in runs]),axis=0),\n np.mean(np.vstack([np.array(run['xb_fraction_loose']) for run in runs]),axis=0),\n np.mean(np.vstack([np.array(run['xb_fraction_tight']) for run in runs]),axis=0)])\n \n state_fracs_min = N*np.vstack([np.min(np.vstack([np.array(run['xb_fraction_free']) for run in runs]),axis=0),\n np.min(np.vstack([np.array(run['xb_fraction_loose']) for run in runs]),axis=0),\n np.min(np.vstack([np.array(run['xb_fraction_tight']) for run in runs]),axis=0)])\n \n P12_ave = np.mean(np.vstack([run['xb_trans_12']/(N*run['xb_fraction_free']) for run in runs]), axis = 0)\n P21_ave = np.mean(np.vstack([run['xb_trans_21']/(N*run['xb_fraction_loose']) for run in runs]), axis = 0)\n P23_ave = np.mean(np.vstack([run['xb_trans_23']/(N*run['xb_fraction_loose']) for run in runs]), axis = 0)\n P31_ave = np.mean(np.vstack([run['xb_trans_31']/(N*run['xb_fraction_tight']) for run in runs]), axis = 0)\n P32_ave = np.mean(np.vstack([run['xb_trans_32']/(N*run['xb_fraction_tight']) for run in runs]), axis = 0)\n \n axial_force = np.mean(np.vstack([run['axial_force'] for run in runs]), axis = 0)\n radial_tension = np.mean(np.vstack([run['radial_tension'] for run in runs]), axis = 0)\n \n # Loop over timesteps\n for j in range(1,n):\n \n # External forcing\n perm = runs[0]['actin_permissiveness'][j]\n zt = runs[0]['z_line'][j] - runs[0]['z_line'][j-1]\n z = runs[0]['z_line'][j] - np.mean(runs[0]['z_line'])\n \n # Only look at ones where we can get transition probabilities\n if state_fracs_ave[0,j] >= threshold and state_fracs_min[0,j] > 0:\n P12.append([perm, zt, z, radial_tension[j], P12_ave[j]])\n\n if state_fracs_ave[1,j] >= threshold and state_fracs_min[1,j] > 0:\n P21.append([perm, zt, z, radial_tension[j], P21_ave[j]])\n P23.append([perm, zt, z, radial_tension[j], P23_ave[j]])\n\n if state_fracs_ave[2,j] >= threshold and state_fracs_min[2,j] > 0:\n P31.append([perm, zt, z, radial_tension[j], P31_ave[j]])\n P32.append([perm, zt, z, radial_tension[j], P32_ave[j]])",
"_____no_output_____"
],
[
"P = {}\nP[(1,2)] = P12\nP[(2,1)] = P21\nP[(2,3)] = P23\nP[(3,2)] = P32\nP[(3,1)] = P31\nXi = {}\n\ntrans_prob_deg = 3\n\nfor key, Pr in P.items():\n \n n = len(Pr)\n \n poly = PolynomialFeatures(degree=trans_prob_deg)\n X = poly.fit_transform(np.vstack([np.array(j[:-1]) for j in Pr]))\n PP = np.vstack([j[-1] for j in Pr])\n Xi[key] = LassoCV(cv=10, n_jobs=-1, max_iter=25000, normalize = True).fit(X,PP.flatten())\n\n print(key)",
"(1, 2)\n(2, 1)\n(2, 3)\n(3, 2)\n(3, 1)\n"
]
],
[
[
"# Fit model for axial force and radial tension",
"_____no_output_____"
]
],
[
[
"dt = 0.05\ndata =[]\n\nfor key in Training:\n \n runs = sarc_list[key]\n \n # Get forcing info from previous step\n axial_force = np.mean(np.vstack([run['axial_force'] for run in runs]), axis = 0)\n radial_tension = np.mean(np.vstack([run['radial_tension'] for run in runs]), axis = 0)\n \n # Get avaliable info from current step\n z = np.array(runs[0]['z_line'][1:])\n ls = np.array(runs[0]['lattice_spacing'][1:])\n actin = np.array(runs[0]['actin_permissiveness'][1:])\n zt = np.array(runs[0]['z_line'][1:])-np.array(runs[0]['z_line'][:-1])\n fraction_free = np.mean(np.vstack([run['xb_fraction_free'][1:] for run in runs]), axis = 0)\n fraction_loose = np.mean(np.vstack([run['xb_fraction_loose'][1:] for run in runs]), axis = 0)\n fraction_tight = np.mean(np.vstack([run['xb_fraction_tight'][1:] for run in runs]), axis = 0)\n \n # Combine everything into matrix\n X = np.hstack([col.reshape(4399,1) for col in [z-np.mean(z),(z-np.mean(z)).clip(0), zt, ls, \\\n fraction_loose, fraction_tight]])\n force = np.hstack([axial_force[1:].reshape(4399,1), \\\n radial_tension[1:].reshape(4399,1)])\n \n data.append([X, force])\n \nX = np.vstack([dataset[0] for dataset in data])\ny = np.vstack([dataset[1] for dataset in data])",
"_____no_output_____"
],
[
"force_deg = 2\nX2 = PolynomialFeatures(degree=force_deg).fit_transform(X)\n\naxial_predictor = RidgeCV(cv=5).fit(X2,y[:,0])\nradial_predictor = RidgeCV(cv=5).fit(X2,y[:,1])\n\n# axial_predictor = GradientBoostingRegressor(n_estimators=100,max_depth=3).fit(X2,y[:,0])\n# radial_predictor = GradientBoostingRegressor(n_estimators=100,max_depth=3).fit(X2,y[:,1])",
"/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-18): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n/home/samuel/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=9.36327e-17): result may not be accurate.\n overwrite_a=True).T\n"
]
],
[
[
"# Test on novel input",
"_____no_output_____"
]
],
[
[
"###########################################\nregime = Testing[0]\n###########################################\n\nruns = sarc_list[regime]\nz = np.array(sarc_list[regime][0]['z_line'])[1:]\nls = np.array(runs[0]['lattice_spacing'][1:])\nlst = np.array(runs[0]['lattice_spacing'][1:]) - np.array(runs[0]['lattice_spacing'][:-1])\nzt = np.array(sarc_list[regime][0]['z_line'])[1:]-np.array(sarc_list[regime][0]['z_line'])[:-1]\naxial_force = np.mean(np.vstack([run['axial_force'] for run in runs]), axis = 0)\naxial_force_dev = np.std(np.vstack([run['axial_force'] for run in runs]), axis = 0)\nradial_tension = np.mean(np.vstack([run['radial_tension'] for run in runs]), axis = 0)\nradial_tension_dev = np.std(np.vstack([run['radial_tension'] for run in runs]), axis = 0)\np = np.array(sarc_list[regime][0]['actin_permissiveness'])[1:]\nN = len(z)\n\n###########################################\n#\n# Get true values relating to transition probabilities and fractions in each state\n#\n###########################################\n\nNum_bridges = 720\nP12_true = np.nanmean(np.vstack([run['xb_trans_12']/(Num_bridges*run['xb_fraction_free']) for run in runs]), axis = 0)\nP21_true = np.nanmean(np.vstack([run['xb_trans_21']/(Num_bridges*run['xb_fraction_loose']) for run in runs]), axis = 0)\nP23_true = np.nanmean(np.vstack([run['xb_trans_23']/(Num_bridges*run['xb_fraction_loose']) for run in runs]), axis = 0)\nP31_true = np.nanmean(np.vstack([run['xb_trans_31']/(Num_bridges*run['xb_fraction_tight']) for run in runs]), axis = 0)\nP32_true = np.nanmean(np.vstack([run['xb_trans_32']/(Num_bridges*run['xb_fraction_tight']) for run in runs]), axis = 0)\n\nP11_true = 1 - P12_true\nP22_true = 1 - P21_true - P23_true\nP33_true = 1 - P32_true - P31_true\nw = 15\nP12_smooth = MovingAve(P12_true, w)\nP21_smooth = MovingAve(P21_true, w)\nP23_smooth = MovingAve(P23_true, w)\nP32_smooth = MovingAve(P32_true, w)\nP31_smooth = MovingAve(P31_true, w)\nP11_smooth = MovingAve(P11_true, w)\nP22_smooth = MovingAve(P22_true, w)\nP33_smooth = MovingAve(P33_true, w)\n\nP11_model = np.zeros(N-1); P12_model = np.zeros(N-1); P21_model = np.zeros(N-1); P22_model = np.zeros(N-1);\nP23_model = np.zeros(N-1); P31_model = np.zeros(N-1); P32_model = np.zeros(N-1); P33_model = np.zeros(N-1);\n\nstate_fracs = np.vstack([np.mean(np.vstack([run[item][1:] for run in runs]), axis = 0) for item \\\n in ['xb_fraction_free','xb_fraction_loose','xb_fraction_tight']])\nstate_fracs_dev = np.vstack([np.std(np.vstack([run[item][1:] for run in runs]), axis = 0) for item \\\n in ['xb_fraction_free','xb_fraction_loose','xb_fraction_tight']])\n\n###########################################\n#\n# Loop through time to get model prediction\n#\n###########################################\n\n# Initialize state fractions and forcing\nS = np.zeros((3,N))\nS[:,0] = state_fracs[:,0]\n\nF = np.zeros((2,N))\nF[0,0] = axial_force[0]\nF[1,0] = radial_tension[0]\n\nfor j in range(N-1):\n \n zm = z[j]-np.mean(z) # mean subtracted\n zc = zm.clip(0) # positive component\n lsm = ls[j]-np.mean(ls)\n \n X_trans = PolynomialFeatures(degree=trans_prob_deg).fit_transform((np.array([p[j], zt[j], zm,\\\n F[1,j]])).reshape(1, -1))\n \n P12_model[j] = Xi[(1,2)].predict(X_trans).clip(0,1)\n P21_model[j] = Xi[(2,1)].predict(X_trans).clip(0,1)\n P23_model[j] = Xi[(2,3)].predict(X_trans).clip(0,1)\n P31_model[j] = Xi[(3,1)].predict(X_trans).clip(0,1)\n P32_model[j] = Xi[(3,2)].predict(X_trans).clip(0,1)\n \n P11_model[j] = 1 - P12_model[j]\n P22_model[j] = 1 - P21_model[j] - P23_model[j]\n P33_model[j] = 1 - P31_model[j] - P32_model[j]\n \n TP = np.vstack([np.hstack([P11_model[j], P21_model[j], P31_model[j]]),\n np.hstack([P12_model[j], P22_model[j], P32_model[j]]),\n np.hstack([0, P23_model[j], P33_model[j]])])\n \n # Update fraction in each state\n S[:,j+1] = TP.dot(S[:,j])\n \n # Compute new forcing\n X_force = PolynomialFeatures(degree=force_deg).fit_transform(np.array([zm, zc, zt[j+1], ls[j+1], \\\n S[1,j+1], S[2,j+1]]).reshape(1, -1))\n\n F[0,j+1] = axial_predictor.predict(X_force)\n F[1,j+1] = radial_predictor.predict(X_force)",
"/home/samuel/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:27: RuntimeWarning: Mean of empty slice\n/home/samuel/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:28: RuntimeWarning: Mean of empty slice\n"
],
[
"figure(figsize = (15,10))\n\nlabel_font_size = 16\n\nsubplot(3,1,1)\nplot(P11_true, 'b', label = 'Full Model')\nplot(P11_model, 'r', label = 'Data Driven')\nxticks([])\nylabel(r'$P_{11}$', fontsize = label_font_size)\nlegend(fontsize = label_font_size)\n\nsubplot(3,1,2)\nplot(P21_true, 'b')\nplot(P21_model, 'r')\nxticks([])\nylabel(r'$P_{22}$', fontsize = label_font_size)\n\nsubplot(3,1,3)\nplot(P32_true, 'b')\nplot(P32_model, 'r')\nylabel(r'$P_{33}$', fontsize = label_font_size)\nxlabel(r'timestep', fontsize = label_font_size)",
"_____no_output_____"
],
[
"figure(figsize = (15,8))\n\nlabel_font_size = 18\n\ntrue_trans_probs = [P11_true, P12_true, P21_true, P22_true, P23_true, P31_true, P32_true, P33_true]\nmodel_trans_probs = [P11_model, P12_model, P21_model, P22_model, P23_model, P31_model, P32_model, P33_model]\ntrans_prob_labels = [r'$\\mathbf{A}_{11}$', r'$\\mathbf{A}_{12}$', r'$\\mathbf{A}_{21}$', r'$\\mathbf{A}_{22}$', \\\n r'$\\mathbf{A}_{23}$', r'$\\mathbf{A}_{31}$', r'$\\mathbf{A}_{32}$', r'$\\mathbf{A}_{33}$']\n\nfor j in range(8):\n subplot(4,2,j+1)\n plot(true_trans_probs[j], 'b', label = 'Full Model')\n plot(model_trans_probs[j], 'r', label = 'Data Driven')\n yticks(fontsize = label_font_size)\n ylabel(trans_prob_labels[j], fontsize = label_font_size)\n xlim([0,4400])\n \n if j > 5: \n xticks(fontsize = label_font_size)\n xlabel('Timestep', fontsize = label_font_size)\n else:\n xticks(fontsize = 0)\n\ntight_layout()",
"_____no_output_____"
],
[
"figure(figsize = (15,6))\n\nlabel_font_size = 18\n\nsubplot(3,1,1)\nplot(state_fracs[0,:], 'b', label = 'Full Model')\nplot(S[0,:], 'r', label = 'Data Driven')\nfill_between(np.arange(4399),state_fracs[0,:]+state_fracs_dev[0,:], state_fracs[0,:]-state_fracs_dev[0,:])\nxticks(fontsize = 0)\nyticks(fontsize = label_font_size)\nylabel(r'$y_1$', fontsize = label_font_size)\nxlim([0,4400])\n\nsubplot(3,1,2)\nplot(state_fracs[1,:], 'b')\nplot(S[1,:], 'r')\nfill_between(np.arange(4399),state_fracs[1,:]+state_fracs_dev[1,:], state_fracs[1,:]-state_fracs_dev[1,:])\nxticks(fontsize = 0)\nyticks(fontsize = label_font_size)\nylabel(r'$y_2$', fontsize = label_font_size)\nxlim([0,4400])\n\nsubplot(3,1,3)\nplot(state_fracs[2,:], 'b', label = 'Full Simulation')\nplot(S[2,:], 'r', label = 'Data Driven')\nfill_between(np.arange(4399),state_fracs[2,:]+state_fracs_dev[2,:], state_fracs[2,:]-state_fracs_dev[2,:])\nylabel(r'$y_3$', fontsize = label_font_size)\nxlabel('Timestep', fontsize = label_font_size)\nxlim([0,4400])\nyticks(fontsize = label_font_size)\nxticks(fontsize = label_font_size)\n\ntight_layout()",
"_____no_output_____"
],
[
"figure(figsize = (15,6))\n\nlabel_font_size = 16\n\nsubplot(2,1,1)\nplot(axial_force, 'b', label = 'Full Model')\nplot(F[0,:], 'r', label = 'Data Driven')\nfill_between(np.arange(4400),axial_force+axial_force_dev, axial_force-axial_force_dev)\nxticks(fontsize = 0)\nylabel('Axial Force (pN)', fontsize = label_font_size)\nxlim([0,4400])\nyticks(fontsize = label_font_size)\n\nsubplot(2,1,2)\nplot(radial_tension, 'b')\nplot(F[1,:], 'r')\nfill_between(np.arange(4400),radial_tension+radial_tension_dev, radial_tension-radial_tension_dev)\nylabel('Radial Tension (pN)', fontsize = label_font_size)\nxlabel('Timestep', fontsize = label_font_size)\nxlim([0,4400])\nxticks(fontsize = label_font_size)\nyticks(fontsize = label_font_size)",
"_____no_output_____"
]
],
[
[
"# Compute workloops for all regimes",
"_____no_output_____"
]
],
[
[
"# Get workloops for all regimes\nmodel = {}\ncount = 0\n\nfor regime in sarc_list.keys():\n \n print('\\r', count, end = '')\n \n runs = sarc_list[regime]\n z = np.array(sarc_list[regime][0]['z_line'])[1:]\n ls = np.array(runs[0]['lattice_spacing'][1:])\n lst = np.array(runs[0]['lattice_spacing'][1:]) - np.array(runs[0]['lattice_spacing'][:-1])\n zt = np.array(sarc_list[regime][0]['z_line'])[1:]-np.array(sarc_list[regime][0]['z_line'])[:-1]\n axial_force = np.mean(np.vstack([run['axial_force'] for run in runs]), axis = 0)\n radial_tension = np.mean(np.vstack([run['radial_tension'] for run in runs]), axis = 0)\n p = np.array(sarc_list[regime][0]['actin_permissiveness'])[1:]\n N = len(z)\n\n ###########################################\n #\n # Get true values relating to transition probabilities and fractions in each state\n #\n ###########################################\n\n Num_bridges = 720\n P12_true = np.mean(np.vstack([run['xb_trans_12']/(Num_bridges*run['xb_fraction_free']) for run in runs]), axis = 0)\n P21_true = np.mean(np.vstack([run['xb_trans_21']/(Num_bridges*run['xb_fraction_loose']) for run in runs]), axis = 0)\n P23_true = np.mean(np.vstack([run['xb_trans_23']/(Num_bridges*run['xb_fraction_loose']) for run in runs]), axis = 0)\n P31_true = np.mean(np.vstack([run['xb_trans_31']/(Num_bridges*run['xb_fraction_tight']) for run in runs]), axis = 0)\n P32_true = np.mean(np.vstack([run['xb_trans_32']/(Num_bridges*run['xb_fraction_tight']) for run in runs]), axis = 0)\n P11_true = 1 - P12_true\n P22_true = 1 - P21_true - P23_true\n P33_true = 1 - P32_true - P31_true\n w = 15\n P12_smooth = MovingAve(P12_true, w)\n P21_smooth = MovingAve(P21_true, w)\n P23_smooth = MovingAve(P23_true, w)\n P32_smooth = MovingAve(P32_true, w)\n P31_smooth = MovingAve(P31_true, w)\n P11_smooth = MovingAve(P11_true, w)\n P22_smooth = MovingAve(P22_true, w)\n P33_smooth = MovingAve(P33_true, w)\n\n P11_model = np.zeros(N-1); P12_model = np.zeros(N-1); P21_model = np.zeros(N-1); P22_model = np.zeros(N-1);\n P23_model = np.zeros(N-1); P31_model = np.zeros(N-1); P32_model = np.zeros(N-1); P33_model = np.zeros(N-1);\n\n state_fracs = np.vstack([np.mean(np.vstack([run[item][1:] for run in runs]), axis = 0) for item \\\n in ['xb_fraction_free','xb_fraction_loose','xb_fraction_tight']])\n\n ###########################################\n #\n # Loop through time to get model prediction\n #\n ###########################################\n\n # Initialize state fractions and forcing\n S = np.zeros((3,N))\n S[:,0] = state_fracs[:,0]\n\n F = np.zeros((2,N+1))\n F[0,0] = axial_force[0]\n F[1,0] = radial_tension[0]\n\n for j in range(N-1):\n\n zm = z[j]-np.mean(z) # mean subtracted\n zc = zm.clip(0) # clipped\n lsm = ls[j]-np.mean(ls)\n X_trans = PolynomialFeatures(degree=trans_prob_deg).fit_transform((np.array([p[j], zt[j], zm, \\\n F[1,j]])).reshape(1, -1))\n \n P12_model[j] = Xi[(1,2)].predict(X_trans).clip(0,1)\n P21_model[j] = Xi[(2,1)].predict(X_trans).clip(0,1)\n P23_model[j] = Xi[(2,3)].predict(X_trans).clip(0,1)\n P31_model[j] = Xi[(3,1)].predict(X_trans).clip(0,1)\n P32_model[j] = Xi[(3,2)].predict(X_trans).clip(0,1)\n\n P11_model[j] = 1 - P12_model[j]\n P22_model[j] = 1 - P21_model[j] - P23_model[j]\n P33_model[j] = 1 - P31_model[j] - P32_model[j]\n\n TP = np.vstack([np.hstack([P11_model[j], P21_model[j], P31_model[j]]),\n np.hstack([P12_model[j], P22_model[j], P32_model[j]]),\n np.hstack([0, P23_model[j], P33_model[j]])])\n\n # Update fraction in each state\n S[:,j+1] = TP.dot(S[:,j])\n\n # Compute new forcing\n X_force = PolynomialFeatures(degree=force_deg).fit_transform(np.array([zm, zc, zt[j+1], ls[j+1], \\\n S[1,j+1], S[2,j+1]]).reshape(1, -1))\n \n F[0,j+1] = axial_predictor.predict(X_force)\n F[1,j+1] = radial_predictor.predict(X_force)\n\n model[regime] = F\n \n count = count + 1",
" 149"
],
[
"work = {}\ndt = 0.05\n\ncount = 0\n\nfor j in range(len(list(sarc_list.keys()))):\n \n print('\\r', count, end = '')\n\n regime = list(sarc_list.keys())[j]\n period = int(regime[0]/dt)\n \n z = np.array(sarc_list[regime][0]['z_line'])\n axial = np.mean(np.vstack(run['axial_force'] for run in sarc_list[regime]), axis = 0)\n axial_model = model[regime][0,:]\n\n # Get work per loop and record\n beta = 3\n start = 114 # so that we have exactly three periods\n end = start + beta*period\n \n work_true = sum([-(z[i]-z[i-1])*axial[i] for i in range(start, end)]) / beta\n work_model = sum([-(z[i]-z[i-1])*axial_model[i] for i in range(start, end)]) / beta\n work_std_dev = np.std([sum([(z[i]-z[i-1])*run['axial_force'][i] for i in range(start, end)]) \\\n for run in sarc_list[regime]]) / beta \n work[regime] = [work_true, work_model, work_std_dev]\n \n count = count +1",
"\r 0"
],
[
"figure(figsize = (15,9))\ndt = 0.05\n\nlabel_fontsize = 16\ncount = 0\n\nfor regime in Testing:\n \n period = int(regime[0]/dt)\n \n z = np.array(sarc_list[regime][0]['z_line'])\n axial = np.mean(np.vstack(run['axial_force'] for run in sarc_list[regime]), axis = 0)\n axial_model = model[regime][0,:]\n \n subplot(3,5,count+1)\n\n scatter(z,axial, color = 'b', s = 10)\n scatter(z,axial_model, color = 'r', s = 10)\n\n if count == 10:\n xlabel(r'Length (nm)', fontsize = label_fontsize)\n ylabel('Force (pN)', fontsize = label_fontsize)\n \n count = count +1\n \n xticks(fontsize = label_fontsize)\n yticks(fontsize = label_fontsize)\n \ntight_layout()",
"_____no_output_____"
],
[
"figure(figsize = (10,10))\n\nlabel_fontsize = 18\n\nww = np.linspace(-80000,20000,2)\nplot(ww,ww, color = 'k', linewidth = 1)\nxlim([-6,2])\nylim([-6,2])\n\nerrorbar([work[key][0]/1000 for key in Training], [work[key][1]/1000 for key in Training], \\\n xerr=[work[key][2]/1000 for key in Training], color = 'b', fmt='o', ecolor='b', capthick=1, \\\n label = 'Training Data')\n\nerrorbar([work[key][0]/1000 for key in Testing], [work[key][1]/1000 for key in Testing], \\\n xerr=[work[key][2]/1000 for key in Testing], color = 'r', fmt='o', ecolor='r', capthick=2, \\\n label = 'Testing Data', zorder = 1000)\n\nlabel_fontsize = 18\nxlabel(r'Work from Monte Carlo Model ($J\\cdot10^{-18}$)', fontsize = label_fontsize)\nylabel(r'Work from Data Driven Model ($J\\cdot10^{-18}$)', fontsize = label_fontsize)\n\nxticks(fontsize = 18)\nyticks(fontsize = 18)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
ec6a49a790b3f57b46bb0a278a120545bf7d46f4 | 146,183 | ipynb | Jupyter Notebook | html_table.ipynb | davism02/Web-Design-Challenge | c2134999b28c836f957c5de895af506de4f7e953 | [
"ADSL"
]
| null | null | null | html_table.ipynb | davism02/Web-Design-Challenge | c2134999b28c836f957c5de895af506de4f7e953 | [
"ADSL"
]
| null | null | null | html_table.ipynb | davism02/Web-Design-Challenge | c2134999b28c836f957c5de895af506de4f7e953 | [
"ADSL"
]
| null | null | null | 488.906355 | 137,394 | 0.387993 | [
[
[
"# Dependency\nimport pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"Resources/cities.csv\")\ndf",
"_____no_output_____"
],
[
"html = df.to_html()\nhtml",
"_____no_output_____"
],
[
"text_file = open(\"data.html\", \"w\")\ntext_file.write(html)\ntext_file.close()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code"
]
]
|
ec6a4d9b26685e8c6363d8efafdbdfdab5b93d21 | 174,172 | ipynb | Jupyter Notebook | _notebooks/2021-07-31-Answering-Business-Questions-Online-Music-Store-SQL.ipynb | MiguelAHG/mahg-data-science | d40462b8f973335e06f4100f62e4dd690e137c27 | [
"Apache-2.0"
]
| null | null | null | _notebooks/2021-07-31-Answering-Business-Questions-Online-Music-Store-SQL.ipynb | MiguelAHG/mahg-data-science | d40462b8f973335e06f4100f62e4dd690e137c27 | [
"Apache-2.0"
]
| null | null | null | _notebooks/2021-07-31-Answering-Business-Questions-Online-Music-Store-SQL.ipynb | MiguelAHG/mahg-data-science | d40462b8f973335e06f4100f62e4dd690e137c27 | [
"Apache-2.0"
]
| null | null | null | 47.600984 | 15,059 | 0.543371 | [
[
[
"# \"Answering Business Questions for an Online Music Store using SQL\"\n\n> \"I use intermediate SQL techniques like views, joins, aggregations, and set operations in order to solve 4 scenarios about a hypothetical online music store. Results are communicated with Matplotlib and Altair visualizations.\"\n\n- author: Migs Germar\n- toc: true\n- branch: master\n- badges: true\n- comments: true\n- categories: [sql, sqlite, python, pandas, numpy, matplotlib, seaborn, altair]\n- hide: false\n- search_exclude: false\n- image: images/2021-07-31-music-shop.jfif",
"_____no_output_____"
],
[
"<center><img src = \"https://miguelahg.github.io/mahg-data-science/images/2021-07-31-music-shop.jfif\" alt = \"A wall of square vinyl record cases.\"></center>\n\n<center><a href = \"https://unsplash.com/photos/fEVaiLwWvlU\">Unsplash | Clay Banks</a></center>",
"_____no_output_____"
],
[
"# Overview\n\nThe [Chinook database](https://github.com/lerocha/chinook-database) by Luis Rocha and Brice Lambson is a sample database about a hypothetical digital media store called Chinook. This store sells individual music tracks online, similar to iTunes. The database contains tables covering various aspects of the company, such as the employees, customers, invoices, tracks, albums, and artists.\n\nThe schema below, which was designed by Dataquest, lists the columns under each table. Columns connected by lines contain **matching information**.",
"_____no_output_____"
],
[
"<center><img src = \"https://miguelahg.github.io/mahg-data-science/images/chinook-schema-dataquest.png\" alt = \"Dataquest's schema of the Chinook database.\"></center>\n\n<center><a href = \"https://app.dataquest.io/c/46/m/191/guided-project%3A-answering-business-questions-using-sql/1/introduction-and-schema-diagram\">Dataquest Guided Project: Answering Business Questions Using SQL</a></center>",
"_____no_output_____"
],
[
"The matching columns allow us to perform joins on these tables. Thus, we are able to answer more complicated questions about the data.\n\nIn our hypothetical scenario, the Chinook company has requested us to answer the following business questions:\n\n- What are the best-selling music genres with regards to USA customers? Based on this, which new albums should be purchased for the Chinook store?\n- Which of Chinook's sales support agents has the highest total sales from their assigned customers? Can the exemplary performance of these employees be explained by any information in the database?\n- What are the statistics on the customers and sales for each country where Chinook offers its service?\n- How many purchases are full albums, and how many are selected sets of tracks? Based on this, what strategy should Chinook adopt when buying new tracks from record companies?\n\nSQL will be used to answer all of these questions. Matplotlib and Altair will also be used to produce helpful visualizations.",
"_____no_output_____"
],
[
"> Note: I wrote this notebook by following a guided project on the [Dataquest](https://www.dataquest.io/) platform, specifically the [Guided Project: Answering Business Questions Using SQL](https://app.dataquest.io/c/46/m/191/guided-project%3A-answering-business-questions-using-sql/1/introduction-and-schema-diagram). The general project flow and research questions came from Dataquest. However, the text and code here are written by me unless stated otherwise.",
"_____no_output_____"
],
[
"# Preparations",
"_____no_output_____"
],
[
"Install the necessary packages.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport altair as alt",
"_____no_output_____"
]
],
[
[
"Connect to the database using SQLite.",
"_____no_output_____"
]
],
[
[
"%load_ext sql\n%sql sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db",
"_____no_output_____"
]
],
[
[
"# Analysis\n\n## Tables and Views\n\nFirst, we'll inspect the tables and views available in the `chinook.db` database.\n\n- Tables contain columns of data. Each column has a different name and data type.\n- Views do not contain data. Instead, these are pre-written SQL queries which show a transformation of existing data. Thus, it can be called a \"virtual table.\" ([Sławińska 2020](https://learnsql.com/blog/sql-view/))",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT\n name,\n type\nFROM sqlite_master\nWHERE type IN (\"table\", \"view\")\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\n"
]
],
[
[
"Originally, there were only 11 tables and 0 views in the database. The views listed above are ones which I made throughout this project. I will show how I created these views in later sections.\n\nFor now, let's inspect the `customer` table, as we will be using it to answer most of the company's business questions.",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT *\nFROM customer\nLIMIT 5\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\n"
]
],
[
[
"Each row in this table contains data on a different customer of Chinook. Each customer has a unique customer ID and an assigned support representative from Chinook. The support rep's employee ID is stored in the `support_rep_id` column. The other columns contain information on the customer's name, occupation, location, and contact details.\n\n> Note: All names and personal details in the Chinook database are fictitious and randomly generated. Public use of this database is not a breach of data privacy.",
"_____no_output_____"
],
[
"## Best-Selling Music Genres in the USA\n\nIn our first scenario, Chinook has signed a deal with a new record company, so its tracks can now be put up for sale on the Chinook store. The record company has 4 albums so far; below are the artist names and their genres.\n\n- Regal (Hip-Hop)\n- Red Tone (Punk)\n- Meteor and the Girls (Pop)\n- Slim Jim Bites (Blues)\n\nHowever, Chinook would like to spread its releases over time, so it will only add 3 albums to the store. Thus, we have to determine the best-selling genres on the store. Furthermore, since the record company would like to target a USA audience, we can narrow our analysis to Chinook's USA customers.\n\nFirst, we create a view called `usa_track_purchases`. This will show the genre, track name, unit price, and quantity bought for each of the invoice lines of USA customers.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS usa_track_purchases;\n\nCREATE VIEW usa_track_purchases AS\n SELECT\n il.invoice_line_id AS invoice_line_id,\n g.name AS genre,\n t.name AS track_name,\n il.unit_price AS unit_price,\n il.quantity AS quantity\n FROM customer AS c\n INNER JOIN\n invoice AS iv\n ON iv.customer_id = c.customer_id\n INNER JOIN\n invoice_line AS il\n ON il.invoice_id = iv.invoice_id\n INNER JOIN\n track AS t\n ON t.track_id = il.track_id\n INNER JOIN\n genre AS g\n ON g.genre_id = t.genre_id\n WHERE c.country = \"USA\"\n;\n\nSELECT *\nFROM usa_track_purchases\nLIMIT 7\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"By querying the view above, we will create another view, `usa_genre_sales`. This will contain the following specific information about each genre:\n\n- Number of tracks sold\n- Percentage of tracks sold\n- Total sales in US dollars",
"_____no_output_____"
]
],
[
[
"%%sql \nDROP VIEW IF EXISTS usa_genre_sales;\n\nCREATE VIEW usa_genre_sales AS\n SELECT\n genre,\n \n SUM(quantity) AS number_sold,\n \n --Get the quantity per genre and divide it by the total quantity of USA purchases.\n ROUND(\n CAST(SUM(quantity) AS Float)\n / CAST(\n (SELECT COUNT(*) FROM usa_track_purchases)\n AS Float\n )\n * 100.0,\n 2\n ) AS percentage_sold,\n \n ROUND(\n SUM(unit_price * CAST(quantity AS Float)),\n 2\n ) AS total_sales\n \n FROM usa_track_purchases\n GROUP BY genre\n ORDER BY number_sold DESC, total_sales DESC\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\n"
],
[
"%%sql result <<\nSELECT *\nFROM usa_genre_sales\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nReturning data to local variable result\n"
],
[
"usa_genre_df = result.DataFrame()\nusa_genre_df",
"_____no_output_____"
]
],
[
[
"We can make a bar graph from this result in order to communicate findings better.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n(\n alt.Chart(usa_genre_df)\n .mark_bar()\n .encode(\n x = alt.X(\"genre:N\", title = \"Music Genre\", sort = \"-y\"),\n y = alt.Y(\"percentage_sold:Q\", title = \"Percentage of All Purchases in the USA\"),\n color = alt.Color(\"total_sales:Q\", title = \"Total Sales (USD)\"),\n tooltip = usa_genre_df.columns.tolist(),\n )\n .properties(\n title = \"Popularity of Music Genres with Chinook's USA Customers\",\n height = 300,\n width = 600,\n )\n .configure_axis(\n labelAngle = 30,\n )\n .interactive()\n)",
"_____no_output_____"
]
],
[
[
"One can hover over each bar in the chart above for a tooltip with more specific information.\n\nResults show that Rock is the best-selling music genre as it makes up 53% of total purchases in the USA. Rock is followed by the Alternative & Punk and Metal genres, which each make up over 10% of purchases.\n\nOn a side note, the `total_sales` column's values are very close to that of the `number_sold` column. This can be explained by the fact that track prices range from USD 0.99 to USD 1.99.",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT\n MIN(unit_price) AS min_price,\n MAX(unit_price) AS max_price\nFROM track\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\n"
]
],
[
[
"Since there is little variation among track prices, the genre with the most units sold is usually also the genre with the highest sales.\n\nGoing back to the scenario at hand, we need to compare the statistics on the Hip-Hop, Punk, Pop, and Blues genres. We will run a query for this below.",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT *\nFROM usa_genre_sales\nWHERE genre IN (\"Hip Hop/Rap\", \"Alternative & Punk\", \"Pop\", \"Blues\")\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\n"
]
],
[
[
"The result above shows that Alternative & Punk, Blues, and Pop are the three best-selling genres out of the four. Notably, Alternative & Punk makes up 12.37% of all purchases in the USA.\n\nTherefore, we would recommend the Chinook administration to add the albums of **Red Tone (Punk), Slim Jim Bites (Blues), and Meteor and the Girls (Pop)** to the digital store. The album of Regal (Hip-Hop) has lower priority and can be added at a later date.",
"_____no_output_____"
],
[
"## Sales Support Agent Performance\n\nNext, Chinook is requesting us to evaluate the performance of its sales support agents. Each customer is assigned to an agent after their first purchase. Since there are only 3 agents, each agent provides support to many customers. Details about the agents are shown in the query below.",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT *\nFROM employee\nWHERE title = \"Sales Support Agent\"\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\n"
]
],
[
[
"Since we have data on each customer's purchases, we can calculate the total purchases associated with each support agent. We can then use this to compare the performance of the agents.\n\nFirst, we create a view called `agent_customer` by joining the `employee` table with the `customer` table based on the support representative ID. We will include some extra details about each agent, such as their birthdate and hire date. We won't include their location since we know that they are all in Calgary, AB, Canada. As for the customers, we will include their customer ID, name, and total purchases as the sum of their invoices.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS agent_customer;\n\nCREATE VIEW agent_customer AS\n SELECT\n e.employee_id AS agent_id,\n e.first_name || \" \" || e.last_name AS agent_name,\n e.birthdate AS agent_bd,\n e.hire_date AS agent_hire_date,\n c.customer_id AS customer_id,\n c.first_name || \" \" || c.last_name AS customer_name,\n SUM(iv.total) AS customer_total_purchases\n FROM employee AS e\n LEFT JOIN\n customer AS c\n ON c.support_rep_id = e.employee_id\n LEFT JOIN\n invoice AS iv\n ON iv.customer_id = c.customer_id\n WHERE e.title = \"Sales Support Agent\"\n GROUP BY c.customer_id\n ORDER BY agent_id, customer_id\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\n"
],
[
"%%sql result <<\nSELECT *\nFROM agent_customer\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nReturning data to local variable result\n"
],
[
"agent_customer_df = result.DataFrame()\nagent_customer_df.head()",
"_____no_output_____"
]
],
[
[
"Next, we will query this view in order to determine the following information about each agent:\n\n- number of customers\n- total sales from the agent's customers\n- percentage of all agents' sales\n- average sales per customer",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS agent_stats;\n\nCREATE VIEW agent_stats AS\n SELECT\n agent_id,\n agent_name,\n agent_bd,\n agent_hire_date,\n COUNT(customer_id) AS number_customers,\n ROUND(\n SUM(customer_total_purchases),\n 2\n ) AS sales_number,\n ROUND(\n SUM(customer_total_purchases) \n / (SELECT SUM(customer_total_purchases) FROM agent_customer)\n * 100,\n 2\n )AS sales_percentage,\n ROUND(\n AVG(customer_total_purchases),\n 2\n ) AS average_per_customer\n FROM agent_customer\n GROUP BY agent_id\n;\n\nSELECT *\nFROM agent_stats\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"At first glance, Jane Peacock seems like the best-performing agent since Chinook got the highest total sales from her customers (USD 1731.51). This idea is cast into doubt when it is mentioned that she had the highest number of customers (21).\n\nAlso, the differences among the average sales per customer is quite small. Each of Jane's customers spends 3 more dollars on Chinook than each of Margaret's. Each of Margaret's customers spends 2 more dollars on Chinook than each of Steve Johnson's.\n\nThe average sales per customer may also be influenced by outliers, as shown by the boxplot below.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n(\n alt.Chart(agent_customer_df)\n .mark_boxplot()\n .encode(\n y = alt.Y(\"customer_total_purchases:Q\", title = \"Customer Total Purchases\"),\n x = alt.X(\"agent_name:N\", title = \"Agent Name\"),\n )\n .properties(\n title = \"Distribution of Customer Purchases by Sales Support Agent\",\n height = 300,\n width = 500,\n )\n .interactive()\n)",
"_____no_output_____"
]
],
[
[
"If we hover over each of the boxes above, we can get more information such as minimum, 1st quartile, median, etc. The median is less influenced by outliers, so we can look at that. The median values are 79.20 (Jane), 77.72 (Margaret), and 75.74 (Steve). These values are still very close to each other.\n\nTherefore, we cannot conclusively state that any agent performs better than the others.",
"_____no_output_____"
],
[
"## Sales Data by Country\n\nNext, Chinook is requesting us to analyze sales in each country where it offers its service. Specifically, they would like to know the:\n\n- total number of customers\n- total value of sales\n- average sales per customer\n- average order value\n - Every order is a batch purchase of multiple tracks.\n \nFurthermore, since there are some countries in the database with only one customer, we shall group these customers together under a category called \"Other\", which must appear at the very bottom of our final result.\n\nFirst, we will create a view, `country_invoices`, which shows all invoices and the country of the customer.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS country_invoices;\n\nCREATE VIEW country_invoices AS\n SELECT\n c.country AS country,\n c.customer_id AS customer_id,\n c.first_name || \" \" || c.last_name AS customer_name,\n iv.invoice_id AS invoice_id,\n iv.total AS order_value\n FROM customer AS c\n LEFT JOIN\n invoice AS iv\n ON iv.customer_id = c.customer_id\n;\n\nSELECT *\nFROM country_invoices\nLIMIT 5\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"Then, we will create a view, `country_customers`, which shows the total purchases per customer.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS country_customers;\n\nCREATE VIEW country_customers AS\n SELECT\n country,\n customer_id,\n customer_name,\n SUM(order_value) AS customer_total_purchase\n FROM country_invoices\n GROUP BY customer_id\n;\n\nSELECT *\nFROM country_customers\nLIMIT 5\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"We will then create a view called `country_labels`. It will show the number of customers per country. Countries with only 1 customer will be given a label of \"Other\".",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS country_labels;\n\nCREATE VIEW country_labels AS\n SELECT\n country,\n COUNT(customer_id) AS number_customers,\n CASE\n WHEN COUNT(customer_id) > 1 THEN country\n ELSE \"Other\"\n END AS country_label,\n CASE\n WHEN COUNT(customer_id) > 1 THEN 0\n ELSE 1\n END AS is_other\n FROM country_customers\n GROUP BY country\n;\n\nSELECT *\nFROM country_labels\nLIMIT 10\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"We will also create a view called `country_avg_order` which simply shows the average order value per country. This will be done by querying the `country_invoices` view.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS country_avg_order;\n\nCREATE VIEW country_avg_order AS\n SELECT\n cl.country_label,\n AVG(civ.order_value) AS avg_order_value\n FROM country_invoices AS civ\n INNER JOIN\n country_labels AS cl\n ON cl.country = civ.country\n GROUP BY cl.country_label\n;\n\nSELECT *\nFROM country_avg_order\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"Finally, we will create a view called `country_stats` that shows all of the 4 statistics that were requested by the Chinook management. The \"Other\" entry will be forced to the bottom of the query result using the `is_other` column we created in the `country_labels` view.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS country_stats;\n\nCREATE VIEW country_stats AS\n SELECT\n l.country_label,\n COUNT(c.customer_id) AS number_customers,\n ROUND(SUM(c.customer_total_purchase), 2) AS total_sales,\n ROUND(AVG(c.customer_total_purchase), 2) AS avg_sales_customer,\n ROUND(a.avg_order_value, 2) AS avg_order_value\n FROM country_customers AS c\n INNER JOIN\n country_labels AS l\n ON l.country = c.country\n INNER JOIN\n country_avg_order AS a\n ON a.country_label = l.country_label\n GROUP BY l.country_label\n ORDER BY\n l.is_other ASC,\n total_sales DESC\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\n"
],
[
"%%sql result <<\nSELECT *\nFROM country_stats\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nReturning data to local variable result\n"
],
[
"country_stats_df = result.DataFrame()\ncountry_stats_df",
"_____no_output_____"
]
],
[
[
"We can make a scatter plot with the data above in order to make results more clear.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n# Exclude the \"Other\" entry.\nmain_df = country_stats_df.loc[country_stats_df[\"country_label\"] != \"Other\"]\n\n# Base layer.\nbase = (\n alt.Chart(main_df)\n .mark_point(size = 300) # Set default size of points to 300 pixels.\n .encode(\n x = alt.X(\"number_customers:Q\", title = \"Number of Customers\"),\n y = alt.Y(\"total_sales:Q\", title = \"Total Sales (USD)\"),\n )\n)\n\n# Scatter plot layer.\npoints = base.encode(\n color = alt.Color(\"avg_sales_customer:Q\", title = \"Average Sales per Customer (USD)\"),\n tooltip = country_stats_df.columns.tolist(),\n)\n\n# Text layer.\ntext = (\n base\n .mark_text( # Move text to the top left of each point.\n align = \"right\",\n dy = -5,\n dx = -20,\n )\n .encode(\n text = \"country_label:N\"\n )\n)\n\n# Combine layers.\nchart = (\n (points + text)\n .properties(\n title = \"Chinook Sales by Country\",\n height = 300,\n width = 700,\n )\n .interactive()\n)\n\n# Display chart.\nchart",
"_____no_output_____"
]
],
[
[
"Looking at the top right of the chart, we can see that the USA has the highest number of Chinook customers (13), as well as the highest total sales (USD 1040.49). All of the other countries have only 2 to 8 customers.\n\nOn the other hand, the Czech Republic has the highest average sales per customer at USD 136.62; this is indicated by its dark color in the chart. This country also had the highest average order value at USD 9.11. This means that though the country has few customers, these people are avid buyers of music.\n\nChinook may benefit from marketing its service more aggressively in countries other than the USA where it has gained a good foothold, such as **Canada, Brazil, and France**. Chinook may also target the **Czech Republic** since the customers there seem to buy a lot of music on a per-person basis.",
"_____no_output_____"
],
[
"## Comparing Purchase Types: Full Album vs Selected Sets\n\nIn our last scenario, we have been requested to compare the popularities of Chinook's two purchase types:\n\n- Full album\n - The customer buys a full album.\n - Albums are pre-defined in Chinook's library. The customer may not add other tracks on top of an album.\n- Selected set of tracks\n - The customer manually selects any number of individual tracks.\n\nIn both cases, each track is bought at its unit price; there are no discounts.\n\nCurrently, Chinook's purchasing strategy is to **buy full albums** from record companies. However, Chinook doesn't know whether full album purchases are popular among its customers. If selected sets are more popular, then Chinook may switch to a new strategy in which it will only **buy the most popular individual tracks** from record companies.\n\nOur analysis will help Chinook make the final decision. For each purchase type, we will show the number of invoices and percentage of all invoices with that type. The type with the higher number will be the more popular one.\n\nFirst, we create a view, `invoice_tracks`, that shows all tracks under each invoice. The album associated with each track is also shown.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS invoice_tracks_bought;\n\nCREATE VIEW invoice_tracks_bought AS\n SELECT\n iv.invoice_id AS invoice_id,\n il.track_id AS track_id,\n t.name AS track_name,\n t.album_id AS album_id,\n a.title AS album_name\n FROM invoice AS iv\n INNER JOIN\n invoice_line AS il\n ON il.invoice_id = iv.invoice_id\n INNER JOIN\n track AS t\n ON t.track_id = il.track_id\n INNER JOIN\n album AS a\n ON a.album_id = t.album_id\n ORDER BY invoice_id, track_id, album_id\n;\n\nSELECT *\nFROM invoice_tracks_bought\nLIMIT 7\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"Then, we create a view called `invoice_album`, which shows only the album associated with the first track of each invoice. This way, there is only **one row per invoice**. We will also include the total purchase amount of each invoice so that we can use it later.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS invoice_album;\n\nCREATE VIEW invoice_album AS \n SELECT\n itb.invoice_id AS invoice_id,\n MIN(itb.album_id) AS album_id,\n itb.album_name AS album_name,\n iv.total AS total_purchase\n FROM invoice_tracks_bought AS itb\n INNER JOIN\n invoice AS iv\n ON iv.invoice_id = itb.invoice_id\n GROUP BY itb.invoice_id\n ORDER BY itb.invoice_id\n;\n\nSELECT *\nFROM invoice_album\nLIMIT 5\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"Then, we join `invoice_album` with the `track` table in order to **list all of the tracks** under the album associated with each invoice. The result will be a view called `invoice_full_album_tracks`.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS invoice_full_album_tracks;\n\nCREATE VIEW invoice_full_album_tracks AS\n SELECT\n ia.*,\n t.track_id AS track_id,\n t.name AS track_name\n FROM invoice_album AS ia\n INNER JOIN\n track AS t\n ON t.album_id = ia.album_id\n ORDER BY invoice_id, album_id, track_id\n;\n\nSELECT *\nFROM invoice_full_album_tracks\nLIMIT 5\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"The `invoice_full_album_tracks` view looks very similar to the `invoice_tracks_bought` view at first glance. However, there is a difference:\n\n- `invoice_tracks_bought`\n - It contains the tracks **bought** in each invoice.\n - Each set of tracks may or may not be a full album.\n- `invoice_full_album_tracks`\n - It contains all of the tracks under one album associated with an invoice.\n - Some of these tracks **may not have been bought** by the customer.\n \nNext, we will create a new view called `invoice_purchase_type` which indicates whether each invoice is a \"Full Album\" or \"Selected Set\" purchase.\n\nIn order to determine this, we will have a `CASE` statement which can be explained as follows:\n\n- `WHEN` clause: If the set of tracks bought and the full album are **exactly the same**, mark the purchase type as \"Full Album\".\n- `ELSE` clause: Otherwise, mark the purchase type as \"Selected Set\".\n\nInside the `WHEN` clause, we have a rather complicated-looking set of operations. Let's look at one part:\n\n (\n SELECT itb.track_id\n FROM invoice_tracks_bought AS itb\n WHERE itb.invoice_id = ia.invoice_id\n\n EXCEPT\n\n SELECT ifa.track_id\n FROM invoice_full_album_tracks AS ifa\n WHERE ifa.invoice_id = ia.invoice_id\n ) IS NULL\n \nIn order to make the explanation more simple, we can call the subqueries above \"set 1\" and \"set 2\".\n\n (\n {set 1}\n\n EXCEPT\n\n {set 2}\n ) IS NULL\n \n- Set 1 represents all tracks bought in one invoice.\n- Set 2 represents the full set of tracks in an album associated with the invoice.\n- Via `EXCEPT` and `IS NULL`, we check whether Set 1 is a subset of Set 2.\n - If it is, the result is True.\n - Otherwise, False.\n\nWe then repeat the same process but *in reverse*, to check if Set 2 is a subset of Set 1. We thus end up with *two boolean values*, and we use the `AND` operator on these.\n\n (\n {set 1}\n\n EXCEPT\n\n {set 2}\n ) IS NULL\n\n AND\n\n (\n {set 2}\n\n EXCEPT\n\n {set 1}\n ) IS NULL\n\nThe purpose of `AND` is to determine the following.\n\n- If both conditions are True:\n - The two sets of tracks match exactly.\n - The invoice is a \"Full Album\" purchase.\n- If any condition is False:\n - The two sets of tracks do not match exactly.\n - The invoice is a \"Selected Set\" purchase.\n \nThe full query is shown below.",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS invoice_purchase_type;\n\nCREATE VIEW invoice_purchase_type AS\n SELECT\n ia.invoice_id,\n CASE\n WHEN (\n\n (\n SELECT itb.track_id\n FROM invoice_tracks_bought AS itb\n WHERE itb.invoice_id = ia.invoice_id\n\n EXCEPT\n\n SELECT ifa.track_id\n FROM invoice_full_album_tracks AS ifa\n WHERE ifa.invoice_id = ia.invoice_id\n ) IS NULL\n\n AND\n\n (\n SELECT ifa.track_id\n FROM invoice_full_album_tracks AS ifa\n WHERE ifa.invoice_id = ia.invoice_id\n\n EXCEPT\n\n SELECT itb.track_id\n FROM invoice_tracks_bought AS itb\n WHERE itb.invoice_id = ia.invoice_id\n ) IS NULL\n\n ) THEN \"Full Album\"\n ELSE \"Selected Set\"\n END AS purchase_type,\n ia.total_purchase AS total_purchase\n FROM invoice_album AS ia\n;\n\nSELECT *\nFROM invoice_purchase_type\nLIMIT 10\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\nDone.\n"
]
],
[
[
"With the view above, we can finally answer the question being asked. A view called `purchase_type_proportion` will be created which shows the following information per purchase type:\n\n- number of invoices\n- percentage of invoices\n- average sales per invoice\n- sales in USD\n- percentage of total sales",
"_____no_output_____"
]
],
[
[
"%%sql\nDROP VIEW IF EXISTS purchase_type_proportion;\n\nCREATE VIEW purchase_type_proportion AS\n SELECT\n purchase_type,\n \n COUNT(purchase_type) AS type_count,\n \n ROUND(\n CAST(COUNT(purchase_type) AS Float)\n / CAST(\n (SELECT COUNT(purchase_type)\n FROM invoice_purchase_type)\n AS Float\n )\n * 100,\n 2\n ) AS type_percentage,\n \n ROUND(\n AVG(total_purchase),\n 2\n ) AS avg_sales_per_invoice,\n \n ROUND(\n SUM(total_purchase),\n 2\n ) AS sales_number,\n \n ROUND(\n SUM(total_purchase)\n / (SELECT SUM(total_purchase)\n FROM invoice_purchase_type)\n * 100,\n 2\n ) AS sales_percentage\n \n FROM invoice_purchase_type\n GROUP BY purchase_type\n ORDER BY type_count DESC\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nDone.\n"
],
[
"%%sql result <<\nSELECT *\nFROM purchase_type_proportion\n;",
" * sqlite:///private/2021-07-31-Intermediate-SQL-Files/chinook.db\nDone.\nReturning data to local variable result\n"
],
[
"purchase_type_df = result.DataFrame()\npurchase_type_df",
"_____no_output_____"
]
],
[
[
"The `type_percentage` column shows that the majority (81%) of all Chinook invoices are Selected Sets. Full Album purchases only make up 19% of all invoices.\n\nThis is shown in the pie chart below.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\ncolors = sns.color_palette('pastel')[:2]\n\nplt.pie(\n x = purchase_type_df[\"type_percentage\"],\n labels = purchase_type_df[\"purchase_type\"],\n colors = colors,\n autopct = \"%0.2f%%\",\n)\n\nplt.title(\"Proportion of Chinook Store Invoices by Purchase Type\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"However, the average sales per Full Album purchase is *almost twice* that of a Selected Set purchase. This is shown in the bar chart below.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n# Base layer with bar chart\nbase = (\n alt.Chart(purchase_type_df)\n .mark_bar()\n .encode(\n x = alt.X(\"purchase_type:N\", title = \"Purchase Type\"),\n y = alt.Y(\"avg_sales_per_invoice:Q\", title = \"Average Sales per Invoice (USD)\"),\n tooltip = purchase_type_df.columns.tolist(),\n )\n)\n\n# Text layer\ntext = (\n base\n .mark_text(\n align = \"center\",\n dy = 10,\n color = \"white\",\n )\n .encode(\n text = \"avg_sales_per_invoice:Q\",\n )\n)\n\n# Combine layers into one chart\nchart = (\n (base + text)\n .properties(\n title = \"Average Sales per Invoice by Purchase Type\",\n height = 300,\n width = 200,\n )\n .configure_axis(labelAngle = 30)\n .interactive()\n)\n\n# Display chart.\nchart",
"_____no_output_____"
]
],
[
[
"Full Albums cost more than Selected Sets because the former tend to have more tracks. Thus, even if Full Albums only represent 19% of all invoices, these also represent *31% of all dollar sales*. This is shown in the pie chart below.",
"_____no_output_____"
]
],
[
[
"#collapse-hide\nplt.pie(\n x = purchase_type_df[\"sales_percentage\"],\n labels = purchase_type_df[\"purchase_type\"],\n colors = colors,\n autopct = \"%0.2f%%\",\n)\n\nplt.title(\"Proportion of Chinook Store Sales by Purchase Type\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Overall, though, Selected Sets still represent the majority of Chinook's invoices (81%) and sales (69%). Therefore, we recommend that Chinook shift to a new purchasing strategy in which it **only buys the most popular tracks** from record companies. Chinook should not buy full albums since it is less likely for customers to purchase these.",
"_____no_output_____"
],
[
"# Conclusion\n\nIn this project, we used intermediate SQL techniques to answer business questions for a hypothetical digital media store called Chinook. We solved a total of 4 scenarios by creating views and gradually working towards a final query. We also ended each scenario with a chart that communicates our findings more engagingly.\n\nBelow is a summary of all of the questions and our findings.\n\n<br/>\n\n*What are the best-selling music genres with regards to USA customers? Based on this, which new albums should be purchased for the Chinook store?*\n\nRock is the best-selling music genre as it makes up 53% of total purchases in the USA. Rock is followed by the Alternative & Punk genre and Metal genre, which each make up over 10% of purchases.\n\nAmong the 4 new albums which may be added to the digital store, we recommend the following 3 artists' albums: Red Tone (Punk), Slim Jim Bites (Blues), and Meteor and the Girls (Pop).\n\n<br/>\n\n*Which of Chinook's sales support agents has the highest total sales from their assigned customers? Can the exemplary performance of these employees be explained by any information in the database?*\n\nSales support agent Jane Peacock has the highest total sales (37% of all sales) from her customers. She also has the highest average sales per customer (USD 82).\n\nHowever, her statistics are only slightly higher than that of her colleagues. Therefore, we cannot conclusively say that she is the best-performing agent.\n\n<br/>\n\n*What are the statistics on the customers and sales for each country where Chinook offers its service?*\n\nThe USA has the highest number of Chinook customers (13), as well as the highest total sales (USD 1040.49). All of the other countries have only 2 to 8 customers.\n\nWe recommend marketing Chinook more aggressively in Canada, Brazil, and France since these countries have the highest number of customers after the USA. We also recommend expanding into the Czech Republic since it has a high average value of sales per customer.\n\n<br/>\n\n*How many purchases are full albums, and how many are manually selected sets of tracks? Based on this, what strategy should Chinook adopt when buying new tracks from record companies?*\n\nSelected Sets account for 81% of all invoices and 69% of total sales. Therefore, these are more popular than Full Albums.\n\nThus, we recommend that Chinook buy only the most popular tracks from record companies. This would be more cost-effective than buying full albums.\n\n---\n\nThat concludes this project. Thanks for reading!",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6a6bcf3ef75c99fe63250b6da750bf41d29c8a | 914,769 | ipynb | Jupyter Notebook | GP-Class-Design.ipynb | DennisReddyhoff/ContinualGP | 166c1c88b7c6183aed9ef89f6fc3fccdbf338b01 | [
"MIT"
]
| null | null | null | GP-Class-Design.ipynb | DennisReddyhoff/ContinualGP | 166c1c88b7c6183aed9ef89f6fc3fccdbf338b01 | [
"MIT"
]
| null | null | null | GP-Class-Design.ipynb | DennisReddyhoff/ContinualGP | 166c1c88b7c6183aed9ef89f6fc3fccdbf338b01 | [
"MIT"
]
| null | null | null | 1,210.011905 | 321,144 | 0.955606 | [
[
[
"# Gaussian Process Class Design",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport time\nimport gpflow\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom gpflow.utilities import set_trainable\nfrom IPython.display import clear_output\nfrom tensorflow_probability import distributions as tfd\nimport json\n# for reproducibility of this notebook:\ntf.random.set_seed(42)\nnp.random.seed(42)",
"_____no_output_____"
]
],
[
[
"## Step 1: Prototype model\nWe implement a simple 1D model and define a periodic latent function which takes inputs X and returns outputs Y. Our training data is a noisy sample of the true function. Here the data is returned by a function but in reality this would be an endpoint. See notes at end for further thoughts on datasets.",
"_____no_output_____"
]
],
[
[
"def true_f(X_input):\n return (3 * np.cos(5 * np.pi * X_input + 1*np.pi)) ",
"_____no_output_____"
],
[
"def get_data():\n N_train = 5000\n N_test = 1250\n max_X = 2\n Xtrain = np.sort(max_X*np.random.rand(N_train))[:, None] \n Ytrain = np.random.normal(true_f(Xtrain),scale=1.3)\n \n Xtest = np.sort(np.linspace(0, max_X, N_test))[:, None] \n Ytest = true_f(Xtest)\n \n return (Xtrain, Ytrain), (Xtest, Ytest)",
"_____no_output_____"
]
],
[
[
"We can plot our data and the latent function",
"_____no_output_____"
]
],
[
[
"train_data, test_data = get_data()\n\nplt.figure(figsize=(15,7))\nplt.scatter(train_data[0], train_data[1], alpha=0.5,marker=\"x\")\nXt = np.linspace(0, 2, 200)[:, None]\nYt = true_f(Xt)\n_ = plt.plot(Xt, Yt, c=\"k\")\nplt.title(\"True f(X)\")",
"_____no_output_____"
]
],
[
[
"We define a Sparse Variational Gaussian Process, which requires a user-input kernel, likelihood and an array of inducing points. We use 15 inducing points here, but when pushing to production we would need to test our results with different values. We use a periodic kernel and guassian likelihood and space our inducing points evenly over the domain initially.",
"_____no_output_____"
]
],
[
[
"def get_model():\n Z = np.linspace(0,2,15)[:,None]\n \n kernel = (gpflow.kernels.Periodic(base_kernel=gpflow.kernels.RBF()) *\n gpflow.kernels.Periodic(base_kernel=gpflow.kernels.RBF())\n )\n model = gpflow.models.SVGP(kernel=gpflow.kernels.Periodic(base_kernel=gpflow.kernels.RBF())+gpflow.kernels.Bias(), \n inducing_variable=Z, \n likelihood = gpflow.likelihoods.Gaussian())\n return model",
"_____no_output_____"
]
],
[
[
"## Step 2: Define Trainer class\n\nI've defined a seperate Trainer class which takes the model and dataset and iterates over a training step to optimize the model. I've tried to use as few gpflow abstractions as possible (e.g. defining own KL and elbo functions rather than using inbuilt) so that the code is more customisable in the long run. In my tests, using optimizer.minmize is quicker than the gradient tape method. I also fix the inducing points to start but optimize them every 200 steps, this provides a good trade off of prediction accuracy and training speed.",
"_____no_output_____"
]
],
[
[
"class Trainer():\n '''Trainer class for GPFlow SVGP model'''\n \n def __init__(self, \n model, \n data, \n optimizer=tf.optimizers.Adam()):\n '''\n Initialise trainer class\n \n arguments:\n model - GPFlow SVGP model\n data - tuple, (X,Y) where X are observed input and Y are observed outputs\n optimizer - optional, tf.optimizer object, default is adam\n '''\n \n self.model = model\n self.data = data\n self.model_optimizer = optimizer \n \n def get_KL(self):\n '''\n Compare KL divergence between prior and posterior distributions\n \n return:\n KL - Tensor of KL divergence \n '''\n \n #get cholesky decomposition of covariance matrix conditioned on inducing points\n Z = self.model.inducing_variable.Z\n K = self.model.kernel(Z)\n K = tf.linalg.cholesky(K+0.005*tf.eye(K.shape[0], dtype='float64'))\n #calculate KL divergence\n pu = tfd.MultivariateNormalTriL(scale_tril=K)\n qu = tfd.MultivariateNormalTriL(self.model.q_mu[:,0],self.model.q_sqrt) \n KL = tfd.kl_divergence(qu,pu)\n\n return KL\n \n def elbo(self):\n '''\n Get Evidence Based Lower Bound (ELBO) for training data\n \n return:\n elbo - Tensor of ELBO for model given (X,Y)\n '''\n \n X, Y = self.data\n kl = self.get_KL()\n \n #predict Ypred for given X\n f_mean, f_var = self.model.predict_f(X)\n #get variational expectation for Ypred given Y\n var_exp = self.model.likelihood.variational_expectations(f_mean, f_var, Y) \n var_exp = tf.reduce_sum(var_exp) \n #compute elbo, return negative ELBO to minimize\n elbo = var_exp - kl\n elbo = -1*elbo\n\n return elbo\n \n @tf.function\n def optimization_step(self):\n '''\n Calculate gradients and optimize model, uses tf.function to pass function as a graph\n \n return:\n loss - Tensor of ELBO for model given (X,Y)\n '''\n \n #disables training of inducing points\n \n #optimize hyperparameters given model loss for this training step\n self.model_optimizer.minimize(self.elbo, self.model.trainable_variables)\n loss = -1*self.elbo()\n \n return loss\n \n def train_model(self, iterations, with_output=True):\n '''\n Train model and log model losses\n \n arguments:\n iterations - number of training iterations, the optimization step is called once per iteration\n with_output - optional, if True will print information at each logging step\n \n returns:\n model_loss - returns np array of size \n '''\n \n #initialise array to log model losses\n self.model_loss = np.array([])\n #fix inducing points\n set_trainable(self.model.inducing_variable, False)\n #loop optimization step and log losses, if with_output is true, record time and pass to logging func \n if with_output:\n tic = time.perf_counter() \n for step in range(iterations):\n #train inducing locations every 200 steps\n if step % 200 == 0:\n set_trainable(self.model.inducing_variable, True)\n loss = self.optimization_step()\n set_trainable(self.model.inducing_variable, False)\n else:\n loss = self.optimization_step()\n #log losses every 100 iterations, pass time and step information if with_output is True\n if step % 100 == 0:\n if with_output:\n toc = time.perf_counter()\n self._log(step, loss, tic, toc, iterations)\n else:\n self._log(step, loss)\n #log final loss values\n if with_output:\n self._log((step+1), loss, tic, toc, iterations)\n else:\n self._log((step+1), loss)\n #reshape array into expected output\n model_loss = self.model_loss.reshape(-1,2)\n \n return model_loss\n \n def _log(self, step, loss, *args):\n '''\n Log model losses and print outputs if passed additional arguments\n \n arguments:\n step - the current training iteration\n loss - model loss at current iteration\n *tic - optional, time at training start\n *toc - optional, time at current iteration\n *iterations - optional, maximum iterations for given loop\n ''' \n \n #initialise logging parameters if passed\n if args:\n tic, toc, iterations = args\n #for intial timestep, log loss and optionally print output \n if step == 0: \n self.model_loss = np.append(self.model_loss, [step, loss.numpy().item()])\n if args:\n print(\"Starting training\")\n #for each call, log the current model loss and optionally print output\n else:\n self.model_loss = np.append(self.model_loss, [step, loss.numpy().item()])\n if args:\n elapsed = toc-tic\n periter = elapsed/step \n clear_output(wait=True) \n print(\"Iteration {}/{}, elapsed time: {:.2f} seconds, time per iteration: {:.3f} seconds\".format(step, \n iterations, \n elapsed, \n periter)) ",
"_____no_output_____"
]
],
[
[
"Now we can build the SVGP model, pass it to the trainer with the data and train, you can use more iterations if you wish, this is just for speed. On my machine, this is quicker on the CPU than on the GPU",
"_____no_output_____"
]
],
[
[
"model=get_model()\ntrainer = Trainer(model,train_data)\nwith tf.device('/CPU:0'):\n loss = trainer.train_model(2500)",
"Iteration 2500/2500, elapsed time: 17.30 seconds, time per iteration: 0.007 seconds\n"
]
],
[
[
"We can plot our predicted Xtest results vs the latent function, notice that the inducing points have moved.",
"_____no_output_____"
]
],
[
[
"Xtest, Ytrue = test_data\nXtrain, Ytrain = train_data\nYpred, Yvar = model.predict_y(Xtest)\nZ = model.inducing_variable.Z.numpy()\n\nplt.figure(figsize=(15,7))\nplt.plot(Xtest, Ytrue, c='k')\nplt.plot(Xtest, Ypred,c=\"#ffa01f\")\nplt.plot(Xtest,(Ypred - 2 * Yvar ** 0.5),c=\"#ffa01f\", linestyle='--')\nplt.plot(Xtest,(Ypred + 2 * Yvar ** 0.5), c=\"#ffa01f\", linestyle='--')\nplt.scatter(Xtrain,Ytrain, alpha=0.3,marker=\"x\")\nplt.scatter(Z, np.zeros_like(Z), c='k')",
"_____no_output_____"
],
[
"rmse = tf.keras.metrics.RootMeanSquaredError()\nrmse(Ytrue,Ypred).numpy()",
"_____no_output_____"
]
],
[
[
"Not bad, would be better if run for more iterations or if inducing points are optimized more often",
"_____no_output_____"
],
[
"## Step 3: Define Model Class\n\nHere we inherit from the SGVP class and our trainer to produce a simple class which can be run in a container or further broken out in to microservices. Again, get_train_data() would pull down from an API end point in reality. \n\nWe initialise our model as before in the init function. To train our model we load the dataset, pass it to the trainer and return the logs which would be saved to disk for comparison with past models, I also expect there would be a validation step here when we're pushing to production.\n\nOnce trained, our train function saves the model parameters as a json file. We can then load these parameters in our predict function.",
"_____no_output_____"
]
],
[
[
"class ExampleGP(gpflow.models.SVGP, Trainer):\n '''\n ExampleGP class, inherits from SVGP and Trainer, contains logic to build dataset, train model and return predictions\n '''\n \n def __init__(self):\n '''\n Initialise GP class, builds untrained gpflow model\n '''\n #define initial inducing point locations and model kernel\n Z = np.linspace(0,2,15)[:,None]\n kernel = gpflow.kernels.Periodic(base_kernel=gpflow.kernels.RBF())+gpflow.kernels.Bias()\n #initialise class as GP model\n super().__init__(\n kernel=kernel,\n inducing_variable=Z, \n likelihood = gpflow.likelihoods.Gaussian()\n )\n \n def get_train_data(self):\n '''\n Returns training and test data\n \n returns:\n data - tuples (Xtrain, Ytrain), (Xtest, Ytest)\n '''\n data = get_data()\n return data\n \n def train(self, iterations):\n '''\n Initialises a Trainer for model and data, trains model and saved parameters to disk\n \n arguments:\n iterations - int, number of training steps\n \n returns:\n logs - nd.array, array of iteration and loss values at logging step\n '''\n #get training data\n train_data, _ = self.get_train_data()\n #initialise trainer\n trainer = Trainer(model=self, data=train_data)\n logs = trainer.train_model(iterations)\n #save trained parameters\n self.save_params()\n return logs\n \n def predict(self, X):\n '''\n Returns predictions and variance for given X\n \n arguments:\n X - nd.array, test locations for predictions\n \n returns:\n Ypred - nd.array, predictions for X\n Yvar - nd.array, variance of predictions\n '''\n #load and assign trained parameters\n trained_params = self.load_params()\n gpflow.utilities.multiple_assign(self, trained_params)\n #get predictions and variance\n Ypred, Yvar = self.predict_y(X)\n return Ypred.numpy(), Yvar.numpy()\n \n def save_params(self):\n '''\n Saves model parameters as json file\n '''\n #get parameter dictionary\n trained_params = gpflow.utilities.parameter_dict(self)\n #convert entries to serializable format\n for key in trained_params.keys():\n trained_params[key] = trained_params[key].numpy().tolist()\n #save as json file\n with open('trained_params.json', 'w') as fp:\n json.dump(trained_params, fp)\n \n def load_params(self):\n '''\n Loads model parameters from json file\n \n returns:\n trained_params - dict, dictionary of model parameter values\n '''\n with open('trained_params.json', 'r') as fp:\n trained_params = json.load(fp)\n return trained_params",
"_____no_output_____"
]
],
[
[
"## Step 4: Define Endpoints:\n\nEndpoints are pretty simple for the model and could easily be wrapped with Flask and used with docker. \n\ntrain() creates a new model and calls the train method from our ExampleGP class, this function builds the dataset, trains the model and saves the trained parameters for use by the predict function. In production we could call this with a cron job.",
"_____no_output_____"
]
],
[
[
"def train():\n model = ExampleGP()\n logs = model.train(2500)\n return logs\n # SAVE LOGS TO DISK FOR VALIDATION ETC. IN PRODUCTION\n \ndef predict(request):\n model = ExampleGP()\n response = model.predict(request)\n # RETURNING Z FOR PLOTS\n return response, model.inducing_variable.Z.numpy()",
"_____no_output_____"
]
],
[
[
"We can call these endpoints to train and predict as before",
"_____no_output_____"
]
],
[
[
"with tf.device('/CPU:0'):\n loss = train()\nXtest, Ytrue = test_data\n(Ypred, Yvar), Z = predict(Xtest)",
"Iteration 2500/2500, elapsed time: 9.42 seconds, time per iteration: 0.004 seconds\n"
]
],
[
[
"8 seconds faster than the previous example! The power of OOP? or have I made a mistake? let me know if you spot one!",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15,7))\nplt.plot(Xtest, Ytrue, c='k')\nplt.plot(Xtest, Ypred,c=\"#ffa01f\")\nplt.plot(Xtest,(Ypred - 2 * Yvar ** 0.5),c=\"#ffa01f\", linestyle='--')\nplt.plot(Xtest,(Ypred + 2 * Yvar ** 0.5), c=\"#ffa01f\", linestyle='--')\nplt.scatter(Xtrain,Ytrain, alpha=0.3,marker=\"x\")\nplt.scatter(Z, np.zeros_like(Z), c='k')",
"_____no_output_____"
],
[
"rmse(Ytrue, Ypred).numpy()",
"_____no_output_____"
]
],
[
[
"Slightly better RMSE, though this would probably even out over multiple runs.",
"_____no_output_____"
]
],
[
[
"plt.plot(loss[:,0],loss[:,1])",
"_____no_output_____"
]
],
[
[
"Converges quickly!",
"_____no_output_____"
],
[
"## Thoughts on Data\n\n- My gut feeling says that each model should have a related database which returns the train/val/test sets when queried\n- Use BigQuery to do the heavy lifting and manipulation and maybe MongoDB or similar to serve the model, will this be cheaper?\n- I think it's better to not have modellers setting up their own infrastructure, better to instead have a standard way of requesting creation of endpoints and specifying requirements\n\nFor example, imagine we have a GP that requires latititude, longitude, time and PM2.5 values for our sensors, averaged hourly. The dataspec would be something like\n\n#### Columns:\n\nlatitude - float, latitude of sensor at observation time\n\nlongitude - float, longitude of sensor at overvation time\n\ntime - integer, time of observation, normalised so that initial observation is t=0\n\nPM2.5 - float, measured pollution value at given lat,long,time\n\nIn this case, can we use BigQuery to get the sensor values and average them hourly before updating the model dataset?\n\nWe could use a cron job to update:\n\n- Get latest data from BigQuery\n- Transform and load to ModelTable\n- Call model train function\n- Train function gets data from ModelTable via API and passes to Trainer\n- Call model validate function\n- Save parameters for prediction if validation is passed",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6a752140a905ab5371f6d7aa25869883fa21f4 | 63,382 | ipynb | Jupyter Notebook | elizabeth/Trade_Recommender/baseline_model.ipynb | standroidbeta/cryptolytic-ds | f5d2cc5e3e340bbcb01cfbd5d52b15ad4d2d62a0 | [
"MIT"
]
| null | null | null | elizabeth/Trade_Recommender/baseline_model.ipynb | standroidbeta/cryptolytic-ds | f5d2cc5e3e340bbcb01cfbd5d52b15ad4d2d62a0 | [
"MIT"
]
| null | null | null | elizabeth/Trade_Recommender/baseline_model.ipynb | standroidbeta/cryptolytic-ds | f5d2cc5e3e340bbcb01cfbd5d52b15ad4d2d62a0 | [
"MIT"
]
| null | null | null | 63,382 | 63,382 | 0.503645 | [
[
[
"## Import data",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"# import data\npath = 'coinbase_btc_usd.csv'\ndf = pd.read_csv(path)\n\n# drop unnecessary columns and change the order of time\ndf.drop(df.columns[0], axis=1, inplace=True)\ndf = df[::-1]\ndf.head()",
"_____no_output_____"
]
],
[
[
"## Add Technical Analysis features",
"_____no_output_____"
]
],
[
[
"pip install ta",
"Requirement already satisfied: ta in /usr/local/lib/python3.6/dist-packages (0.4.5)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from ta) (0.21.3)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from ta) (0.24.2)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from ta) (1.16.5)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->ta) (0.14.0)\nRequirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->ta) (1.3.1)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas->ta) (2018.9)\nRequirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas->ta) (2.5.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas->ta) (1.12.0)\n"
],
[
"from ta import add_all_ta_features",
"_____no_output_____"
],
[
"# # 31 indicators\n\n# Volume\n# Accumulation/Distribution Index (ADI)\n# On-Balance Volume (OBV)\n# Chaikin Money Flow (CMF) *\n# Force Index (FI)\n# Ease of Movement (EoM, EMV)\n# Volume-price Trend (VPT)\n# Negative Volume Index (NVI)\n\n# Volatility\n# Average True Range (ATR)\n# Bollinger Bands (BB)\n# Keltner Channel (KC)\n# Donchian Channel (DC)\n\n# Trend\n# Moving Average Convergence Divergence (MACD)\n# Average Directional Movement Index (ADX)\n# Vortex Indicator (VI)\n# Trix (TRIX)\n# Mass Index (MI)\n# Commodity Channel Index (CCI)\n# Detrended Price Oscillator (DPO)\n# KST Oscillator (KST)\n# Ichimoku Kinkō Hyō (Ichimoku)\n\n# Momentum\n# Money Flow Index (MFI)\n# Relative Strength Index (RSI)\n# True strength index (TSI)\n# Ultimate Oscillator (UO)\n# Stochastic Oscillator (SR)\n# Williams %R (WR)\n# Awesome Oscillator (AO)\n# Kaufman's Adaptive Moving Average (KAMA)\n\n# Others\n# Daily Return (DR)\n# Daily Log Return (DLR)\n# Cumulative Return (CR)",
"_____no_output_____"
],
[
"# TA library from github https://github.com/bukosabino/ta\ndf = add_all_ta_features(df, \"open\", \"high\", \"low\", \"close\", \"volume\")",
"/usr/local/lib/python3.6/dist-packages/ta/trend.py:170: RuntimeWarning: divide by zero encountered in double_scalars\n dip[i] = 100 * (dip_mio[i]/trs[i])\n/usr/local/lib/python3.6/dist-packages/ta/trend.py:170: RuntimeWarning: invalid value encountered in double_scalars\n dip[i] = 100 * (dip_mio[i]/trs[i])\n/usr/local/lib/python3.6/dist-packages/ta/trend.py:174: RuntimeWarning: divide by zero encountered in double_scalars\n din[i] = 100 * (din_mio[i]/trs[i])\n/usr/local/lib/python3.6/dist-packages/ta/trend.py:174: RuntimeWarning: invalid value encountered in double_scalars\n din[i] = 100 * (din_mio[i]/trs[i])\n/usr/local/lib/python3.6/dist-packages/ta/trend.py:176: RuntimeWarning: invalid value encountered in subtract\n dx = 100 * np.abs((dip - din) / (dip + din))\n"
],
[
"# drop null values\nnull_columns = ['volume_obv', 'trend_adx', 'trend_adx_pos', 'trend_adx_neg', 'trend_trix']\n\ndf.drop(columns=null_columns, inplace=True)\n\ndf2 = df[40:]\n\ndf2['close_diff'] = df2['close'] - df2['close'].shift(1)",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:7: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n import sys\n"
],
[
"df2.shape\n# shape should be (440043, 60)",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"# function to create a target\ndef price_increase(x):\n if x > 0:\n return True\n else:\n return False",
"_____no_output_____"
],
[
"# fix the index numbers\ndf2 = df2.reset_index()\ndf2.drop(columns='index', inplace=True)",
"_____no_output_____"
],
[
"# create target column\ntarget = df2['close_diff'].apply(price_increase)\n\n# remove the first item off target and remove the last item off the dataframe\ntarget3 = target[1:].values\ndf3 = df2[:440042]\n\n# create target column\ndf3['target'] = target3",
"_____no_output_____"
],
[
"df3.head(10)",
"_____no_output_____"
],
[
"# export to csv\ndf3[1:].to_csv('BTC_USD_CBPro_TA.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Baseline Model",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"# load data\ntraintest = pd.read_csv('BTC_USD_CBPro_TA.csv')\n\ntrain = traintest[traintest['time'] < 1538265600] # cutoff sept 30 2018\ntest = traintest[traintest['time'] > 1548892800] # cutoff jan 31 2019\ntrain.shape, test.shape",
"_____no_output_____"
],
[
"# pick features and target\nfeatures = ['open', 'high', 'low', 'close', 'volume']\ntarget = 'target'\n\n# X, y vectors\nX_train = train[features]\nX_test = test[features]\ny_train = train[target]\ny_test = test[target]\n\n# select model\nmodel = RandomForestClassifier()\n\n# fit model\nmodel.fit(X_train, y_train)",
"/usr/local/lib/python3.6/dist-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.\n \"10 in version 0.20 to 100 in 0.22.\", FutureWarning)\n"
],
[
"# make predictions \ny_pred = model.predict(X_test)\n\n# accuracy\nscore = accuracy_score(y_test, y_pred)\nprint(score)",
"0.5090261486525147\n"
]
],
[
[
"## Model Tuning",
"_____no_output_____"
]
],
[
[
"# pick features and target\nfeature_columns = traintest.drop(columns=['target', 'time']).columns.tolist()\nfeatures = feature_columns\ntarget = 'target'\n\n# X, y vectors\nX_train = train[features]\nX_test = test[features]\ny_train = train[target]\ny_test = test[target]\n\n# select model\nmodel = RandomForestClassifier(max_depth=20, n_estimators=100, n_jobs=-1)\n\n# fit model\nmodel.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# make predictions \ny_preds = model.predict(X_test)\n\nscore = accuracy_score(y_test, y_preds)\nprint(score)",
"0.5325713795273789\n"
]
],
[
[
"## Download Pickle",
"_____no_output_____"
]
],
[
[
"import pickle",
"_____no_output_____"
],
[
"pickle.dump(model, open('BTC_USD_CBPro_rf_0.53.pkl', 'wb'))",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec6a9aa9dfd73ecc21281f6c15acb9f02c5636d6 | 116,648 | ipynb | Jupyter Notebook | basic_supervised_net/resnetv2_driver/resnet_v2.ipynb | ARCC-RACE/ai_training_notebooks | 7414ddecc1b8700b5c3533cebb237e88996298cd | [
"MIT"
]
| 1 | 2020-03-04T07:46:01.000Z | 2020-03-04T07:46:01.000Z | basic_supervised_net/resnetv2_driver/resnet_v2.ipynb | ARCC-RACE/ai_training_notebooks | 7414ddecc1b8700b5c3533cebb237e88996298cd | [
"MIT"
]
| null | null | null | basic_supervised_net/resnetv2_driver/resnet_v2.ipynb | ARCC-RACE/ai_training_notebooks | 7414ddecc1b8700b5c3533cebb237e88996298cd | [
"MIT"
]
| null | null | null | 155.738318 | 11,772 | 0.770069 | [
[
[
"# Jetsoncar Resnet V2\n\nTensorflow 2.0, all in notebook, optimized with RT. V2 includes depth image as well color",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\ntf.config.experimental.list_physical_devices('GPU') # If device does not show and using conda env with tensorflow-gpu then try restarting computer",
"_____no_output_____"
],
[
"# verify the image data directory\nimport os\ndata_directory = \"/media/michael/BigMemory/datasets/jetsoncar/training_data/data/dataset\"\nos.listdir(data_directory)[:10]\n\nimport matplotlib.pyplot as plt\nimg = plt.imread(os.path.join(data_directory + \"/color_images\", os.listdir(data_directory + \"/color_images\")[0]))\nprint(img.shape)\nplt.imshow(img)",
"_____no_output_____"
]
],
[
[
"## Create the datagenerator and augmentation framework",
"_____no_output_____"
]
],
[
[
"# Include the custom utils.py and perform tests\nimport importlib\nutils = importlib.import_module('utils')\nimport numpy as np\n\nprint(utils.INPUT_SHAPE)\n\nimg = utils.load_image(os.path.join(data_directory, 'color_images'),os.listdir(data_directory + \"/color_images\")[0])\nprint(img.shape)\n\nfig = plt.figure(figsize=(20,20))\nfig.add_subplot(1, 3, 1)\nplt.imshow(img)\n\nimg, _ = utils.preprocess_data(last_color_image=img)\nprint(img.shape)\n\nfig.add_subplot(1, 3, 2)\nplt.imshow(np.squeeze(img))\n\nplt.show()",
"_____no_output_____"
],
[
"# Load the steering angles and image paths from labels.csv\nimport csv, random\nimport seaborn as sns\n\n# these will be 2D arrays where each row represents a dataset\nx = [] # images\ny = [] # steering\nz = [] # speed\nwith open(os.path.join(data_directory, \"tags.csv\")) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n # print(row['Time_stamp'] + \".jpg\", row['Steering_angle'])\n if not float(row['raw_speed']) == 0:\n x.append(row['time_stamp'] + \".jpg\",) # get image path\n y.append(float(row['raw_steering']),) # get steering value\n z.append(float(row['raw_speed']))\n\nprint(\"Number of data samples is \" + str(len(y)))\n\ndata = list(zip(x,y))\nrandom.shuffle(data)\nx,y = zip(*data)\n\n# plot of steering angle distribution without correction\nsns.distplot(y)",
"Number of data samples is 17214\n"
],
[
"# plot of speed distribution\nsns.distplot(z)",
"_____no_output_____"
],
[
"# Split the training data\nvalidation_split = 0.2\ntrain_x = x[0:int(len(x)*(1.0-validation_split))]\ntrain_y = y[0:int(len(y)*(1.0-validation_split))]\nprint(\"Training data shape: \" + str(len(train_x)))\ntest_x = x[int(len(x)*(1.0-validation_split)):]\ntest_y = y[int(len(y)*(1.0-validation_split)):]\nprint(\"Validation data shape: \" + str(len(test_x)) + \"\\n\")",
"Training data shape: 13771\nValidation data shape: 3443\n\n"
],
[
"# Define and test batch generator\ndef batch_generator(data_dir, image_paths, steering_angles, batch_size, is_training):\n \"\"\"\n Generate training image give image paths and associated steering angles\n \"\"\"\n images = np.empty([batch_size, utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH, utils.IMAGE_CHANNELS])\n steers = np.empty(batch_size)\n while True:\n i = 0\n for index in np.random.permutation(len(image_paths)):\n img = image_paths[index]\n steering_angle = steering_angles[index]\n # argumentation\n if is_training and np.random.rand() < 0.8:\n image, steering_angle = utils.augument(data_dir, os.path.join(\"color_images\",img), steering_angle)\n else:\n image, _ = utils.preprocess_data(utils.load_image(data_dir, os.path.join(\"color_images\",img)))\n # add the image and steering angle to the batch\n images[i] = image\n steers[i] = steering_angle\n i += 1\n if i == batch_size:\n break\n yield images, steers\n \ntrain_generator = batch_generator(data_directory, train_x, train_y, 32, True)\nvalidation_generator = batch_generator(data_directory, test_x, test_y, 32, False)\n\ntrain_image = next(train_generator) # returns tuple with steering and throttle\nprint(train_image[0].shape)\nprint(train_image[1][0])\nplt.imshow(train_image[0][0])",
"_____no_output_____"
]
],
[
[
"## Define the model and start training",
"_____no_output_____"
]
],
[
[
"# This is a pretrained resnet50 with custom head for detection\nresnet = tf.keras.applications.ResNet50V2(include_top=False, input_tensor=tf.keras.layers.Input(shape=(240, 640, 3))) # takes in image of 224x224\nresnet_head = tf.keras.models.Model(inputs=resnet.input, outputs=resnet.layers[-46].output) # get output to (14x14x256)\n# don't retrain weights from imagenet\n# for layer in resnet_head.layers:\n# layer.trainable = False \n \n# resnet_head.summary()\n\n# custom tail\nx = tf.keras.layers.Conv2D(128, (7,7), kernel_initializer='lecun_uniform', padding='same')(resnet_head.output)\nx = tf.keras.layers.BatchNormalization()(x)\nout = tf.keras.layers.Activation(tf.nn.relu)(x)\n\nmodel = tf.keras.models.Model(inputs=resnet_head.input, outputs=out)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[tf.keras.metrics.Accuracy()])\nmodel.summary()",
"_____no_output_____"
],
[
"import datetime\nlog_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\nprint(\"To view tensorboard please run `tensorboard --logdir logs/fit` in the code directory from the terminal with deeplearning env active\")\n\ncheckpoint = tf.keras.callbacks.ModelCheckpoint('rosey_v2.{epoch:03d}-{val_loss:.2f}.h5', # filepath = working directory/\n monitor='val_loss',\n verbose=0,\n save_best_only=True,\n mode='auto')\n\nmodel.fit_generator(train_generator,\n steps_per_epoch=100, \n epochs=20,\n validation_data=validation_generator,\n validation_steps=1,\n callbacks=[tensorboard_callback, checkpoint])",
"To view tensorboard please run `tensorboard --logdir logs/fit` in the code directory from the terminal with deeplearning env active\nEpoch 1/20\n100/100 [==============================] - 131s 1s/step - loss: 0.0886 - val_loss: 0.0705\nEpoch 2/20\n100/100 [==============================] - 123s 1s/step - loss: 0.0577 - val_loss: 0.0177\nEpoch 3/20\n100/100 [==============================] - 123s 1s/step - loss: 0.0573 - val_loss: 0.0715\nEpoch 4/20\n100/100 [==============================] - 124s 1s/step - loss: 0.0510 - val_loss: 0.0220\nEpoch 5/20\n100/100 [==============================] - 123s 1s/step - loss: 0.0529 - val_loss: 0.0250\nEpoch 6/20\n100/100 [==============================] - 123s 1s/step - loss: 0.0498 - val_loss: 0.0582\nEpoch 7/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0506 - val_loss: 0.0379\nEpoch 8/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0470 - val_loss: 0.0084\nEpoch 9/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0465 - val_loss: 0.0166\nEpoch 10/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0502 - val_loss: 0.0313\nEpoch 11/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0502 - val_loss: 0.0176\nEpoch 12/20\n100/100 [==============================] - 123s 1s/step - loss: 0.0472 - val_loss: 0.0196\nEpoch 13/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0469 - val_loss: 0.0473\nEpoch 14/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0492 - val_loss: 0.0109\nEpoch 15/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0450 - val_loss: 0.0221\nEpoch 16/20\n100/100 [==============================] - 121s 1s/step - loss: 0.0453 - val_loss: 0.0668\nEpoch 17/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0507 - val_loss: 0.0279\nEpoch 18/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0428 - val_loss: 0.0132\nEpoch 19/20\n100/100 [==============================] - 122s 1s/step - loss: 0.0511 - val_loss: 0.0162\nEpoch 20/20\n100/100 [==============================] - 121s 1s/step - loss: 0.0451 - val_loss: 0.0237\n"
],
[
"# Test the model\nimage, steering = next(train_generator)\nprint(steering)\nprint(model.predict(image))\nprint(\"\")\n\nimage, steering = next(validation_generator)\nprint(steering)\nprint(model.predict(image))",
"[ 0.38866069 -0.01653751 0.15497394 0.18995237 -0.24068273 -0.29661691\n -0.2366807 -0.23459999 -0.09765687 -0.35514895 -0.28255172 0.15013804\n -0.15041455 -0.14102635 -0.04422271 -0.2006 -0.2032 -0.10150293\n -0.18395218 0.34925292 -0.22099999 0.250549 0.28476982 0.40067244\n 0.0918623 0.20941769 0.01414458 0.29502599 -0.27545909 0.27571231\n -0.06456293 -0.38859999]\n[[-0.01409152]\n [ 0.08915222]\n [-0.0566576 ]\n [-0.03755104]\n [ 0.08308045]\n [-0.15913627]\n [-0.16558295]\n [-0.12865719]\n [ 0.04993952]\n [-0.01573419]\n [-0.02034076]\n [-0.15650505]\n [ 0.06376354]\n [ 0.01313779]\n [-0.12256726]\n [-0.1740151 ]\n [-0.19206235]\n [ 0.08967156]\n [-0.09032293]\n [ 0.2788198 ]\n [-0.13434035]\n [ 0.00317884]\n [ 0.15187286]\n [ 0.15058331]\n [ 0.01717911]\n [ 0.00433371]\n [-0.14564866]\n [ 0.00086454]\n [-0.08476945]\n [-0.05350491]\n [-0.0531204 ]\n [-0.1822269 ]]\n\n[-0.26859999 -0.20259999 -0.31899998 -0.25319999 -0.1032 -0.17560001\n -0.21020001 -0.146 -0.2956 -0.1936 -0.15899999 -0.2\n -0.1512 -0.1336 -0.161 -0.16160001 -0.29519999 -0.2418\n -0.2006 -0.1636 -0.16160001 -0.3206 -0.1772 -0.31059998\n -0.1036 -0.15899999 -0.19180001 -0.16620001 -0.07160001 -0.2052\n -0.15359999 -1. ]\n[[-0.11714696]\n [-0.20285761]\n [-0.15389183]\n [-0.19883284]\n [-0.16768342]\n [-0.1410008 ]\n [-0.18112859]\n [-0.1797688 ]\n [-0.17058238]\n [-0.15402645]\n [-0.14007717]\n [-0.14992392]\n [-0.18626127]\n [-0.15838376]\n [-0.16476256]\n [-0.12435078]\n [-0.10820477]\n [-0.16049647]\n [-0.18037426]\n [-0.12607521]\n [-0.14107633]\n [-0.22561407]\n [-0.18220991]\n [-0.17157105]\n [-0.18844417]\n [-0.16825321]\n [-0.17849213]\n [-0.19069886]\n [-0.14187187]\n [-0.14629892]\n [-0.07397493]\n [-0.12378275]]\n"
]
],
[
[
"## Save the model as tensor RT and export to Jetson format",
"_____no_output_____"
]
],
[
[
"# Load the model that you would like converted to RT\nmodel_path = 'model.h5'\nexport_path = \"/home/michael/Desktop/model\"\n\nimport shutil\nif not os.path.isdir(export_path):\n os.mkdir(export_path)\nelse:\n response = input(\"Do you want to delete existing export_path directory? y/n\")\n if response == 'y':\n shutil.rmtree(export_path)\n os.mkdir(export_path)\n\nloaded_model = tf.keras.models.load_model(model_path)\n\nshutil.copy(\"./utils.py\", os.path.join(export_path, \"utils.py\"))\nshutil.copy(\"./notes.txt\", os.path.join(export_path, \"notes.txt\"))\nshutil.copy(\"./config.yaml\", os.path.join(export_path, \"config.yaml\"))\n# Save as tf saved_model (faster than h5)\ntf.saved_model.save(loaded_model, export_path)",
"Do you want to delete existing export_path directory? y/n y\n"
],
[
"from tensorflow.python.compiler.tensorrt import trt_convert as trt\n\nconversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS\nconversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 32))\nconversion_params = conversion_params._replace(precision_mode=\"INT8\")\nconversion_params = conversion_params._replace(maximum_cached_engines=100)\nconversion_params = conversion_params._replace(use_calibration=True)\n\ndef my_calibration_input_fn():\n for i in range(20):\n image, _ = utils.preprocess_data(utils.load_image(data_directory, os.path.join(\"color_images\",x[i])))\n yield image.astype(np.float32),\n\nconverter = trt.TrtGraphConverterV2(input_saved_model_dir=export_path,conversion_params=conversion_params)\n\ngen = my_calibration_input_fn()\n\nconverter.convert(calibration_input_fn=my_calibration_input_fn)\nconverter.build(my_calibration_input_fn)\n\nif not os.path.isdir(os.path.join(export_path, \"rt\")):\n os.mkdir(os.path.join(export_path, \"rt\"))\n \nconverter.save(os.path.join(export_path, \"rt\"))",
"INFO:tensorflow:Linked TensorRT version: (0, 0, 0)\nINFO:tensorflow:Loaded TensorRT version: (0, 0, 0)\nINFO:tensorflow:Running against TensorRT version 0.0.0\nINFO:tensorflow:Assets written to: /home/michael/Desktop/model/rt/assets\n"
],
[
"# Test normal saved model\nsaved_model = tf.saved_model.load(export_path) # normal saved model\n\nimage, _ = next(validation_generator)\n\nimport time\noutput = saved_model(image.astype(np.float32)) # load once to get more accurate representation of speed\nstart = time.time()\noutput = saved_model(image.astype(np.float32))\nstop = time.time()\nprint(\"inference time: \" + str(stop - start))\nprint(\"Output: %.20f\"%output[8,0])",
"inference time: 0.06450390815734863\nOutput: -0.21330091357231140137\n"
],
[
"# Test TRT optimized saved model\nsaved_model = tf.saved_model.load(os.path.join(export_path, \"rt\")) # normal saved model\n\n\nimage, _ = next(validation_generator)\n\nimport time\noutput = saved_model(image.astype(np.float32)) # load once to get more accurate representation of speed\nstart = time.time()\noutput = saved_model(image.astype(np.float32))\nstop = time.time()\nprint(\"inference time: \" + str(stop - start))\nprint(\"Output: %.20f\"%output[8,0])",
"inference time: 0.0611567497253418\nOutput: -0.13906964659690856934\n"
],
[
"# Run many samples through and save distribution \nvalidation_generator = batch_generator(data_directory, test_x, test_y, 32, False)\ntest = []\nfor i in range(50):\n img, _ = next(validation_generator)\n test.append(saved_model(img.astype(np.float32))[0][0])\n print(str(i), end=\"\\r\")\nsns.distplot(test)",
"49\r"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6aab2162d810e6eb77e45f18d06666d280f339 | 3,983 | ipynb | Jupyter Notebook | code/DuckModel.ipynb | AustinVes/ModSimPy | 8be86eda816b0720e10856b7028221cac7af5296 | [
"MIT"
]
| null | null | null | code/DuckModel.ipynb | AustinVes/ModSimPy | 8be86eda816b0720e10856b7028221cac7af5296 | [
"MIT"
]
| null | null | null | code/DuckModel.ipynb | AustinVes/ModSimPy | 8be86eda816b0720e10856b7028221cac7af5296 | [
"MIT"
]
| null | null | null | 22.376404 | 78 | 0.450665 | [
[
[
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *",
"_____no_output_____"
],
[
"system = System(\n density_duck = 0.3,\n density_water = 1,\n r = 5)",
"_____no_output_____"
],
[
"def error_func(d, system):\n unpack(system)\n \n volume_duck = (4/3) * pi * r**3\n mass_duck = volume_duck * density_duck\n volume_water = (pi/3) * (3 * r * d**2 - d**3)\n mass_water = volume_water * density_water\n \n return mass_duck - mass_water",
"_____no_output_____"
],
[
"for d in range(int(2*system.r)):\n print(error_func(d,system))",
"157.07963267948966\n142.4188669627373\n102.62536001726659\n43.982297150257125\n-27.227136331111524\n-104.71975511965977\n-182.21237390820798\n-253.42180738957663\n-312.0648702565861\n-351.8583772020568\n"
],
[
"fsolve(error_func, 3, system)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6acd16f4af23fc19cfcb8a9f9538cece51a875 | 56,837 | ipynb | Jupyter Notebook | Depression_Classifier.ipynb | RahulG1309/Epoch_MLDA_Submission | 91bd11c3eb97cffe683626d0eff521e65612e7d9 | [
"MIT"
]
| 1 | 2021-10-17T08:28:00.000Z | 2021-10-17T08:28:00.000Z | Depression_Classifier.ipynb | RahulG1309/Epoch_MLDA_Submission | 91bd11c3eb97cffe683626d0eff521e65612e7d9 | [
"MIT"
]
| null | null | null | Depression_Classifier.ipynb | RahulG1309/Epoch_MLDA_Submission | 91bd11c3eb97cffe683626d0eff521e65612e7d9 | [
"MIT"
]
| 2 | 2022-01-24T17:15:54.000Z | 2022-01-24T17:16:50.000Z | 36.787702 | 248 | 0.487359 | [
[
[
"### Importing required Libraries",
"_____no_output_____"
]
],
[
[
"!pip install sktime",
"Requirement already satisfied: sktime in /usr/local/lib/python3.7/dist-packages (0.8.0)\nRequirement already satisfied: pandas>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from sktime) (1.1.5)\nRequirement already satisfied: scikit-learn>=0.24.0 in /usr/local/lib/python3.7/dist-packages (from sktime) (1.0)\nRequirement already satisfied: wheel in /usr/local/lib/python3.7/dist-packages (from sktime) (0.37.0)\nRequirement already satisfied: statsmodels>=0.12.1 in /usr/local/lib/python3.7/dist-packages (from sktime) (0.13.0)\nRequirement already satisfied: numba>=0.53 in /usr/local/lib/python3.7/dist-packages (from sktime) (0.54.1)\nRequirement already satisfied: numpy>=1.19.3 in /usr/local/lib/python3.7/dist-packages (from sktime) (1.19.5)\nRequirement already satisfied: llvmlite<0.38,>=0.37.0rc1 in /usr/local/lib/python3.7/dist-packages (from numba>=0.53->sktime) (0.37.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from numba>=0.53->sktime) (57.4.0)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.1.0->sktime) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.1.0->sktime) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=1.1.0->sktime) (1.15.0)\nRequirement already satisfied: scipy>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.24.0->sktime) (1.4.1)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.24.0->sktime) (3.0.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.24.0->sktime) (1.0.1)\nRequirement already satisfied: patsy>=0.5.2 in /usr/local/lib/python3.7/dist-packages (from statsmodels>=0.12.1->sktime) (0.5.2)\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import RidgeClassifierCV\nfrom sklearn.pipeline import make_pipeline\nfrom sktime.transformations.panel.rocket import Rocket",
"_____no_output_____"
]
],
[
[
"### Data Cleaning \nAfter setting the timestamps to index, we combined the 55 datasets to facilitate the process of feeding it into the model.",
"_____no_output_____"
]
],
[
[
"condition = {a: pd.read_csv(\"condition_{x}.csv\".format(x = str(a))) for a in range(1, 24)}\ncontrol = {b+23: pd.read_csv(\"control_{x}.csv\".format(x = str(b))) for b in range(1, 33)}",
"_____no_output_____"
],
[
"for i in range(1, 24):\n condition[i].drop(columns = 'date', inplace = True)\n condition[i][\"timestamp\"] = pd.to_datetime(condition[i][\"timestamp\"])\n condition[i] = condition[i].set_index('timestamp')\n \nfor j in range(24, 56):\n control[j].drop(columns = 'date', inplace = True)\n control[j][\"timestamp\"] = pd.to_datetime(control[j][\"timestamp\"])\n control[j] = control[j].set_index('timestamp') ",
"_____no_output_____"
],
[
"for i in range(1, 24):\n index = 0\n for j in condition[i]['activity']:\n if(j==0):\n index += 1\n else:\n break \n print(index)\n condition[i] = condition[i][index:]\n#Removing the zeros at the start ",
"1\n0\n0\n0\n25\n101\n0\n0\n0\n0\n1\n3\n5\n24\n27\n1\n22\n38\n0\n0\n1\n0\n0\n"
],
[
"for i in range(24, 56):\n index = 0\n for j in control[i]['activity']:\n if(j==0):\n index += 1\n else:\n break \n print(index)\n control[i] = control[i][index:]\n#Removing the zeros at the start ",
"0\n0\n22\n0\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n40\n40\n0\n67\n16\n0\n0\n0\n0\n0\n0\n0\n8\n"
],
[
"condition.update(control)",
"_____no_output_____"
]
],
[
[
"Slicing the arrays so that they can be combined smoothly.",
"_____no_output_____"
]
],
[
[
"array=[np.array(condition[x]['activity'][:19299]) for x in range(1,56)]\nlengths=[len(x) for x in array]\nmax(lengths)",
"_____no_output_____"
],
[
"z=pd.DataFrame()\nz['x']=array\nprint(z.shape)\nz.head()",
"(55, 1)\n"
],
[
"X=z\ny=[]\nfrom sklearn.model_selection import train_test_split\nfor i in range(23):\n y.append(1)\nfor i in range(32):\n y.append(0)\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1)",
"_____no_output_____"
]
],
[
[
"### Training the Model\nFinally, the moment we've all been waiting for, we use the ROCKET model on the timeseries to extract the features. The new datasets are fed into a Rigid Classifier to classify them as Depressed (1) or not (0).",
"_____no_output_____"
]
],
[
[
"rocket = Rocket(num_kernels=100, random_state=111) \nrocket.fit(X_train)\nX_train_transform = rocket.transform(X_train)\nX_train_transform.shape",
"_____no_output_____"
],
[
"classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True)\nclassifier.fit(X_train_transform, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_base.py:145: FutureWarning: 'normalize' was deprecated in version 1.0 and will be removed in 1.2.\nIf you wish to scale the data, use Pipeline with a StandardScaler in a preprocessing stage. To reproduce the previous behavior:\n\nfrom sklearn.pipeline import make_pipeline\n\nmodel = make_pipeline(StandardScaler(with_mean=False), _RidgeGCV())\n\nIf you wish to pass a sample_weight parameter, you need to pass it as a fit parameter to each step of the pipeline as follows:\n\nkwargs = {s[0] + '__sample_weight': sample_weight for s in model.steps}\nmodel.fit(X, y, **kwargs)\n\n\n FutureWarning,\n"
],
[
"X_test_transform = rocket.transform(X_test)\nclassifier.score(X_test_transform,y_test)",
"_____no_output_____"
],
[
"y_test\ny_pred=classifier.predict(X_test_transform)",
"_____no_output_____"
]
],
[
[
"### Testing the model validity with K-fold cross-validation",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import KFold\nkfold = KFold(n_splits=11,shuffle=True)",
"_____no_output_____"
],
[
"y_2=pd.DataFrame(y)\naccuracies=[]\nfor train_index, test_index in kfold.split(X):\n # splitting\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_test = X.loc[train_index], X.loc[test_index]\n y_train, y_test = y_2.loc[train_index], y_2.loc[test_index]\n\n # rocket transformation\n rocket = Rocket(num_kernels=100, random_state=111) \n rocket.fit(X_train)\n X_train_transform = rocket.transform(X_train)\n print(X_train_transform.shape)\n\n # classifier\n classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True)\n classifier.fit(X_train_transform, y_train[0])\n X_test_transform = rocket.transform(X_test)\n ac=classifier.score(X_test_transform,y_test[0])\n print(ac)\n accuracies.append(ac)\n",
"TRAIN: [ 0 1 2 3 4 5 6 7 8 9 12 13 14 15 16 17 18 19 20 22 23 24 25 26\n 27 28 29 30 31 32 33 34 35 36 37 38 39 41 42 43 44 46 47 48 49 50 51 52\n 53 54] TEST: [10 11 21 40 45]\n(50, 200)\n"
],
[
"df = pd.DataFrame(accuracies)\ndf.value_counts()",
"_____no_output_____"
],
[
"df = pd.DataFrame(accuracies)\ndf.mean()",
"_____no_output_____"
]
],
[
[
"### Hyperparameter Tuning\n\nWe have hypertuned the model using GridSearchCV with 5 fold cross-validation.",
"_____no_output_____"
]
],
[
[
"nums = np.arange(100,1001,100)\nscores = []\nfor i in nums:\n # Create and train a new model instance.\n rocket = Rocket(num_kernels= i, random_state=111) \n rocket.fit(X_train)\n\n X_train_transform = rocket.transform(X_train)\n # X_train_transform.shape\n\n classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True)\n classifier.fit(X_train_transform, y_train)\n\n X_test_transform = rocket.transform(X_test)\n score = classifier.score(X_test_transform,y_test)\n scores.append(score)\n print(score)\n",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_ridge.py:2334: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_base.py:145: FutureWarning: 'normalize' was deprecated in version 1.0 and will be removed in 1.2.\nIf you wish to scale the data, use Pipeline with a StandardScaler in a preprocessing stage. To reproduce the previous behavior:\n\nfrom sklearn.pipeline import make_pipeline\n\nmodel = make_pipeline(StandardScaler(with_mean=False), _RidgeGCV())\n\nIf you wish to pass a sample_weight parameter, you need to pass it as a fit parameter to each step of the pipeline as follows:\n\nkwargs = {s[0] + '__sample_weight': sample_weight for s in model.steps}\nmodel.fit(X, y, **kwargs)\n\n\n FutureWarning,\n"
],
[
"scores",
"_____no_output_____"
]
],
[
[
"As we can see the optimal parameter is num_kernels = 100, with the an accuracy of 0.8.",
"_____no_output_____"
],
[
"### Saving the Model for Deployment\n",
"_____no_output_____"
]
],
[
[
"# Create and train a new model instance.\nrocket = Rocket(num_kernels=100, random_state=111) \nrocket.fit(X_train)\n\nX_train_transform = rocket.transform(X_train)\nX_train_transform.shape\n\nclassifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True)\nclassifier.fit(X_train_transform, y_train)\n\nX_test_transform = rocket.transform(X_test)\nprint(classifier.score(X_test_transform,y_test))",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_ridge.py:2334: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_base.py:145: FutureWarning: 'normalize' was deprecated in version 1.0 and will be removed in 1.2.\nIf you wish to scale the data, use Pipeline with a StandardScaler in a preprocessing stage. To reproduce the previous behavior:\n\nfrom sklearn.pipeline import make_pipeline\n\nmodel = make_pipeline(StandardScaler(with_mean=False), _RidgeGCV())\n\nIf you wish to pass a sample_weight parameter, you need to pass it as a fit parameter to each step of the pipeline as follows:\n\nkwargs = {s[0] + '__sample_weight': sample_weight for s in model.steps}\nmodel.fit(X, y, **kwargs)\n\n\n FutureWarning,\n"
],
[
"from pickle import dump\n# save the model\ndump(rocket, open('rocket_new.pkl', 'wb'))\ndump(classifier, open('classifier_new.pkl', 'wb'))",
"_____no_output_____"
]
],
[
[
"### Extra Models",
"_____no_output_____"
]
],
[
[
"from sktime.classification.dictionary_based import ContractableBOSS",
"_____no_output_____"
],
[
"Shape = ContractableBOSS()\nShape.fit(X_train,y_train[0])\n\nShape.score(X_test,y_test)",
"_____no_output_____"
],
[
"from sktime.classification.interval_based import RandomIntervalSpectralForest",
"_____no_output_____"
],
[
"nums = np.arange(100,501,50)\nscores = []\nfor i in nums:\n # Create and train a new model instance.\n Rise = RandomIntervalSpectralForest(n_estimators = i)\n Rise.fit(X_train,y_train[0]) \n\n score = Rise.score(X_test,y_test)\n scores.append(score)\n print(score)",
"0.6\n0.6\n0.6\n0.6\n0.6\n0.6\n0.6\n0.6\n0.6\n"
],
[
"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6ad30b74357bf914c9c9748a6237e7d380e8ac | 20,708 | ipynb | Jupyter Notebook | scripts/Testing-emo-bert-final.ipynb | glhuilli/srt-analysis | 4ae633d7c7867ca6875b08c9db4bc2896ac506b3 | [
"MIT"
]
| 12 | 2019-04-15T13:35:45.000Z | 2022-02-15T13:15:20.000Z | scripts/Testing-emo-bert-final.ipynb | glhuilli/srt-analysis | 4ae633d7c7867ca6875b08c9db4bc2896ac506b3 | [
"MIT"
]
| 11 | 2020-01-23T19:12:00.000Z | 2022-02-17T10:39:16.000Z | scripts/Testing-emo-bert-final.ipynb | glhuilli/srt-analysis | 4ae633d7c7867ca6875b08c9db4bc2896ac506b3 | [
"MIT"
]
| 10 | 2020-09-07T03:08:54.000Z | 2021-04-08T03:45:30.000Z | 100.038647 | 9,032 | 0.732809 | [
[
[
"# EmoBert Evaluation \n\n\nThis notebook shows a quick evaluation of the EmoBert model trained in `Build-emotions-BERT-model-final.ipynb`.",
"_____no_output_____"
]
],
[
[
"import pickle\nfrom limbic.emotion.models.bert_limbic_model.bert_limbic_model import BertLimbicModel\n",
"_____no_output_____"
],
[
"bert_path = '../data/distill_bert'\nmodel_path = '../data/models/emo_bert_model_2021-01-10.bin'\n \nbert_model = BertLimbicModel(model_path, bert_path)",
"Some weights of the model checkpoint at ../data/distill_bert were not used when initializing BertModel: ['distilbert.embeddings.word_embeddings.weight', 'distilbert.embeddings.position_embeddings.weight', 'distilbert.embeddings.LayerNorm.weight', 'distilbert.embeddings.LayerNorm.bias', 'distilbert.transformer.layer.0.attention.q_lin.weight', 'distilbert.transformer.layer.0.attention.q_lin.bias', 'distilbert.transformer.layer.0.attention.k_lin.weight', 'distilbert.transformer.layer.0.attention.k_lin.bias', 'distilbert.transformer.layer.0.attention.v_lin.weight', 'distilbert.transformer.layer.0.attention.v_lin.bias', 'distilbert.transformer.layer.0.attention.out_lin.weight', 'distilbert.transformer.layer.0.attention.out_lin.bias', 'distilbert.transformer.layer.0.sa_layer_norm.weight', 'distilbert.transformer.layer.0.sa_layer_norm.bias', 'distilbert.transformer.layer.0.ffn.lin1.weight', 'distilbert.transformer.layer.0.ffn.lin1.bias', 'distilbert.transformer.layer.0.ffn.lin2.weight', 'distilbert.transformer.layer.0.ffn.lin2.bias', 'distilbert.transformer.layer.0.output_layer_norm.weight', 'distilbert.transformer.layer.0.output_layer_norm.bias', 'distilbert.transformer.layer.1.attention.q_lin.weight', 'distilbert.transformer.layer.1.attention.q_lin.bias', 'distilbert.transformer.layer.1.attention.k_lin.weight', 'distilbert.transformer.layer.1.attention.k_lin.bias', 'distilbert.transformer.layer.1.attention.v_lin.weight', 'distilbert.transformer.layer.1.attention.v_lin.bias', 'distilbert.transformer.layer.1.attention.out_lin.weight', 'distilbert.transformer.layer.1.attention.out_lin.bias', 'distilbert.transformer.layer.1.sa_layer_norm.weight', 'distilbert.transformer.layer.1.sa_layer_norm.bias', 'distilbert.transformer.layer.1.ffn.lin1.weight', 'distilbert.transformer.layer.1.ffn.lin1.bias', 'distilbert.transformer.layer.1.ffn.lin2.weight', 'distilbert.transformer.layer.1.ffn.lin2.bias', 'distilbert.transformer.layer.1.output_layer_norm.weight', 'distilbert.transformer.layer.1.output_layer_norm.bias', 'distilbert.transformer.layer.2.attention.q_lin.weight', 'distilbert.transformer.layer.2.attention.q_lin.bias', 'distilbert.transformer.layer.2.attention.k_lin.weight', 'distilbert.transformer.layer.2.attention.k_lin.bias', 'distilbert.transformer.layer.2.attention.v_lin.weight', 'distilbert.transformer.layer.2.attention.v_lin.bias', 'distilbert.transformer.layer.2.attention.out_lin.weight', 'distilbert.transformer.layer.2.attention.out_lin.bias', 'distilbert.transformer.layer.2.sa_layer_norm.weight', 'distilbert.transformer.layer.2.sa_layer_norm.bias', 'distilbert.transformer.layer.2.ffn.lin1.weight', 'distilbert.transformer.layer.2.ffn.lin1.bias', 'distilbert.transformer.layer.2.ffn.lin2.weight', 'distilbert.transformer.layer.2.ffn.lin2.bias', 'distilbert.transformer.layer.2.output_layer_norm.weight', 'distilbert.transformer.layer.2.output_layer_norm.bias', 'distilbert.transformer.layer.3.attention.q_lin.weight', 'distilbert.transformer.layer.3.attention.q_lin.bias', 'distilbert.transformer.layer.3.attention.k_lin.weight', 'distilbert.transformer.layer.3.attention.k_lin.bias', 'distilbert.transformer.layer.3.attention.v_lin.weight', 'distilbert.transformer.layer.3.attention.v_lin.bias', 'distilbert.transformer.layer.3.attention.out_lin.weight', 'distilbert.transformer.layer.3.attention.out_lin.bias', 'distilbert.transformer.layer.3.sa_layer_norm.weight', 'distilbert.transformer.layer.3.sa_layer_norm.bias', 'distilbert.transformer.layer.3.ffn.lin1.weight', 'distilbert.transformer.layer.3.ffn.lin1.bias', 'distilbert.transformer.layer.3.ffn.lin2.weight', 'distilbert.transformer.layer.3.ffn.lin2.bias', 'distilbert.transformer.layer.3.output_layer_norm.weight', 'distilbert.transformer.layer.3.output_layer_norm.bias', 'distilbert.transformer.layer.4.attention.q_lin.weight', 'distilbert.transformer.layer.4.attention.q_lin.bias', 'distilbert.transformer.layer.4.attention.k_lin.weight', 'distilbert.transformer.layer.4.attention.k_lin.bias', 'distilbert.transformer.layer.4.attention.v_lin.weight', 'distilbert.transformer.layer.4.attention.v_lin.bias', 'distilbert.transformer.layer.4.attention.out_lin.weight', 'distilbert.transformer.layer.4.attention.out_lin.bias', 'distilbert.transformer.layer.4.sa_layer_norm.weight', 'distilbert.transformer.layer.4.sa_layer_norm.bias', 'distilbert.transformer.layer.4.ffn.lin1.weight', 'distilbert.transformer.layer.4.ffn.lin1.bias', 'distilbert.transformer.layer.4.ffn.lin2.weight', 'distilbert.transformer.layer.4.ffn.lin2.bias', 'distilbert.transformer.layer.4.output_layer_norm.weight', 'distilbert.transformer.layer.4.output_layer_norm.bias', 'distilbert.transformer.layer.5.attention.q_lin.weight', 'distilbert.transformer.layer.5.attention.q_lin.bias', 'distilbert.transformer.layer.5.attention.k_lin.weight', 'distilbert.transformer.layer.5.attention.k_lin.bias', 'distilbert.transformer.layer.5.attention.v_lin.weight', 'distilbert.transformer.layer.5.attention.v_lin.bias', 'distilbert.transformer.layer.5.attention.out_lin.weight', 'distilbert.transformer.layer.5.attention.out_lin.bias', 'distilbert.transformer.layer.5.sa_layer_norm.weight', 'distilbert.transformer.layer.5.sa_layer_norm.bias', 'distilbert.transformer.layer.5.ffn.lin1.weight', 'distilbert.transformer.layer.5.ffn.lin1.bias', 'distilbert.transformer.layer.5.ffn.lin2.weight', 'distilbert.transformer.layer.5.ffn.lin2.bias', 'distilbert.transformer.layer.5.output_layer_norm.weight', 'distilbert.transformer.layer.5.output_layer_norm.bias', 'vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias']\n- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of BertModel were not initialized from the model checkpoint at ../data/distill_bert and are newly initialized: ['embeddings.word_embeddings.weight', 'embeddings.position_embeddings.weight', 'embeddings.token_type_embeddings.weight', 'embeddings.LayerNorm.weight', 'embeddings.LayerNorm.bias', 'encoder.layer.0.attention.self.query.weight', 'encoder.layer.0.attention.self.query.bias', 'encoder.layer.0.attention.self.key.weight', 'encoder.layer.0.attention.self.key.bias', 'encoder.layer.0.attention.self.value.weight', 'encoder.layer.0.attention.self.value.bias', 'encoder.layer.0.attention.output.dense.weight', 'encoder.layer.0.attention.output.dense.bias', 'encoder.layer.0.attention.output.LayerNorm.weight', 'encoder.layer.0.attention.output.LayerNorm.bias', 'encoder.layer.0.intermediate.dense.weight', 'encoder.layer.0.intermediate.dense.bias', 'encoder.layer.0.output.dense.weight', 'encoder.layer.0.output.dense.bias', 'encoder.layer.0.output.LayerNorm.weight', 'encoder.layer.0.output.LayerNorm.bias', 'encoder.layer.1.attention.self.query.weight', 'encoder.layer.1.attention.self.query.bias', 'encoder.layer.1.attention.self.key.weight', 'encoder.layer.1.attention.self.key.bias', 'encoder.layer.1.attention.self.value.weight', 'encoder.layer.1.attention.self.value.bias', 'encoder.layer.1.attention.output.dense.weight', 'encoder.layer.1.attention.output.dense.bias', 'encoder.layer.1.attention.output.LayerNorm.weight', 'encoder.layer.1.attention.output.LayerNorm.bias', 'encoder.layer.1.intermediate.dense.weight', 'encoder.layer.1.intermediate.dense.bias', 'encoder.layer.1.output.dense.weight', 'encoder.layer.1.output.dense.bias', 'encoder.layer.1.output.LayerNorm.weight', 'encoder.layer.1.output.LayerNorm.bias', 'encoder.layer.2.attention.self.query.weight', 'encoder.layer.2.attention.self.query.bias', 'encoder.layer.2.attention.self.key.weight', 'encoder.layer.2.attention.self.key.bias', 'encoder.layer.2.attention.self.value.weight', 'encoder.layer.2.attention.self.value.bias', 'encoder.layer.2.attention.output.dense.weight', 'encoder.layer.2.attention.output.dense.bias', 'encoder.layer.2.attention.output.LayerNorm.weight', 'encoder.layer.2.attention.output.LayerNorm.bias', 'encoder.layer.2.intermediate.dense.weight', 'encoder.layer.2.intermediate.dense.bias', 'encoder.layer.2.output.dense.weight', 'encoder.layer.2.output.dense.bias', 'encoder.layer.2.output.LayerNorm.weight', 'encoder.layer.2.output.LayerNorm.bias', 'encoder.layer.3.attention.self.query.weight', 'encoder.layer.3.attention.self.query.bias', 'encoder.layer.3.attention.self.key.weight', 'encoder.layer.3.attention.self.key.bias', 'encoder.layer.3.attention.self.value.weight', 'encoder.layer.3.attention.self.value.bias', 'encoder.layer.3.attention.output.dense.weight', 'encoder.layer.3.attention.output.dense.bias', 'encoder.layer.3.attention.output.LayerNorm.weight', 'encoder.layer.3.attention.output.LayerNorm.bias', 'encoder.layer.3.intermediate.dense.weight', 'encoder.layer.3.intermediate.dense.bias', 'encoder.layer.3.output.dense.weight', 'encoder.layer.3.output.dense.bias', 'encoder.layer.3.output.LayerNorm.weight', 'encoder.layer.3.output.LayerNorm.bias', 'encoder.layer.4.attention.self.query.weight', 'encoder.layer.4.attention.self.query.bias', 'encoder.layer.4.attention.self.key.weight', 'encoder.layer.4.attention.self.key.bias', 'encoder.layer.4.attention.self.value.weight', 'encoder.layer.4.attention.self.value.bias', 'encoder.layer.4.attention.output.dense.weight', 'encoder.layer.4.attention.output.dense.bias', 'encoder.layer.4.attention.output.LayerNorm.weight', 'encoder.layer.4.attention.output.LayerNorm.bias', 'encoder.layer.4.intermediate.dense.weight', 'encoder.layer.4.intermediate.dense.bias', 'encoder.layer.4.output.dense.weight', 'encoder.layer.4.output.dense.bias', 'encoder.layer.4.output.LayerNorm.weight', 'encoder.layer.4.output.LayerNorm.bias', 'encoder.layer.5.attention.self.query.weight', 'encoder.layer.5.attention.self.query.bias', 'encoder.layer.5.attention.self.key.weight', 'encoder.layer.5.attention.self.key.bias', 'encoder.layer.5.attention.self.value.weight', 'encoder.layer.5.attention.self.value.bias', 'encoder.layer.5.attention.output.dense.weight', 'encoder.layer.5.attention.output.dense.bias', 'encoder.layer.5.attention.output.LayerNorm.weight', 'encoder.layer.5.attention.output.LayerNorm.bias', 'encoder.layer.5.intermediate.dense.weight', 'encoder.layer.5.intermediate.dense.bias', 'encoder.layer.5.output.dense.weight', 'encoder.layer.5.output.dense.bias', 'encoder.layer.5.output.LayerNorm.weight', 'encoder.layer.5.output.LayerNorm.bias', 'encoder.layer.6.attention.self.query.weight', 'encoder.layer.6.attention.self.query.bias', 'encoder.layer.6.attention.self.key.weight', 'encoder.layer.6.attention.self.key.bias', 'encoder.layer.6.attention.self.value.weight', 'encoder.layer.6.attention.self.value.bias', 'encoder.layer.6.attention.output.dense.weight', 'encoder.layer.6.attention.output.dense.bias', 'encoder.layer.6.attention.output.LayerNorm.weight', 'encoder.layer.6.attention.output.LayerNorm.bias', 'encoder.layer.6.intermediate.dense.weight', 'encoder.layer.6.intermediate.dense.bias', 'encoder.layer.6.output.dense.weight', 'encoder.layer.6.output.dense.bias', 'encoder.layer.6.output.LayerNorm.weight', 'encoder.layer.6.output.LayerNorm.bias', 'encoder.layer.7.attention.self.query.weight', 'encoder.layer.7.attention.self.query.bias', 'encoder.layer.7.attention.self.key.weight', 'encoder.layer.7.attention.self.key.bias', 'encoder.layer.7.attention.self.value.weight', 'encoder.layer.7.attention.self.value.bias', 'encoder.layer.7.attention.output.dense.weight', 'encoder.layer.7.attention.output.dense.bias', 'encoder.layer.7.attention.output.LayerNorm.weight', 'encoder.layer.7.attention.output.LayerNorm.bias', 'encoder.layer.7.intermediate.dense.weight', 'encoder.layer.7.intermediate.dense.bias', 'encoder.layer.7.output.dense.weight', 'encoder.layer.7.output.dense.bias', 'encoder.layer.7.output.LayerNorm.weight', 'encoder.layer.7.output.LayerNorm.bias', 'encoder.layer.8.attention.self.query.weight', 'encoder.layer.8.attention.self.query.bias', 'encoder.layer.8.attention.self.key.weight', 'encoder.layer.8.attention.self.key.bias', 'encoder.layer.8.attention.self.value.weight', 'encoder.layer.8.attention.self.value.bias', 'encoder.layer.8.attention.output.dense.weight', 'encoder.layer.8.attention.output.dense.bias', 'encoder.layer.8.attention.output.LayerNorm.weight', 'encoder.layer.8.attention.output.LayerNorm.bias', 'encoder.layer.8.intermediate.dense.weight', 'encoder.layer.8.intermediate.dense.bias', 'encoder.layer.8.output.dense.weight', 'encoder.layer.8.output.dense.bias', 'encoder.layer.8.output.LayerNorm.weight', 'encoder.layer.8.output.LayerNorm.bias', 'encoder.layer.9.attention.self.query.weight', 'encoder.layer.9.attention.self.query.bias', 'encoder.layer.9.attention.self.key.weight', 'encoder.layer.9.attention.self.key.bias', 'encoder.layer.9.attention.self.value.weight', 'encoder.layer.9.attention.self.value.bias', 'encoder.layer.9.attention.output.dense.weight', 'encoder.layer.9.attention.output.dense.bias', 'encoder.layer.9.attention.output.LayerNorm.weight', 'encoder.layer.9.attention.output.LayerNorm.bias', 'encoder.layer.9.intermediate.dense.weight', 'encoder.layer.9.intermediate.dense.bias', 'encoder.layer.9.output.dense.weight', 'encoder.layer.9.output.dense.bias', 'encoder.layer.9.output.LayerNorm.weight', 'encoder.layer.9.output.LayerNorm.bias', 'encoder.layer.10.attention.self.query.weight', 'encoder.layer.10.attention.self.query.bias', 'encoder.layer.10.attention.self.key.weight', 'encoder.layer.10.attention.self.key.bias', 'encoder.layer.10.attention.self.value.weight', 'encoder.layer.10.attention.self.value.bias', 'encoder.layer.10.attention.output.dense.weight', 'encoder.layer.10.attention.output.dense.bias', 'encoder.layer.10.attention.output.LayerNorm.weight', 'encoder.layer.10.attention.output.LayerNorm.bias', 'encoder.layer.10.intermediate.dense.weight', 'encoder.layer.10.intermediate.dense.bias', 'encoder.layer.10.output.dense.weight', 'encoder.layer.10.output.dense.bias', 'encoder.layer.10.output.LayerNorm.weight', 'encoder.layer.10.output.LayerNorm.bias', 'encoder.layer.11.attention.self.query.weight', 'encoder.layer.11.attention.self.query.bias', 'encoder.layer.11.attention.self.key.weight', 'encoder.layer.11.attention.self.key.bias', 'encoder.layer.11.attention.self.value.weight', 'encoder.layer.11.attention.self.value.bias', 'encoder.layer.11.attention.output.dense.weight', 'encoder.layer.11.attention.output.dense.bias', 'encoder.layer.11.attention.output.LayerNorm.weight', 'encoder.layer.11.attention.output.LayerNorm.bias', 'encoder.layer.11.intermediate.dense.weight', 'encoder.layer.11.intermediate.dense.bias', 'encoder.layer.11.output.dense.weight', 'encoder.layer.11.output.dense.bias', 'encoder.layer.11.output.LayerNorm.weight', 'encoder.layer.11.output.LayerNorm.bias', 'pooler.dense.weight', 'pooler.dense.bias']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
],
[
"bert_model.get_sentence_emotions('I have a lot of joy')",
"_____no_output_____"
],
[
"import pandas as pd\nimport tensorflow as tf\n\nfrom limbic.limbic_constants import AFFECT_INTENSITY_EMOTIONS as EMOTIONS\nfrom limbic.emotion.models.tf_limbic_model import utils\n\nSENTENCE_EMOTIONS_TEST_FILE = '../data/sentence_emotions_test.pickle' \nCONTINUES_TO_BINARY_THRESHOLD = 0.5\n\n\ndef load_data_file(file_path):\n data = pd.read_pickle(file_path)\n data_sentences = data['text']\n y_data = data[EMOTIONS].values\n y_data_labeled = utils.continuous_labels_to_binary(y_data, CONTINUES_TO_BINARY_THRESHOLD) \n \n return data, y_data, y_data_labeled, data_sentences\n\ntest, y_test, y_test_labeled, test_sentences = load_data_file(SENTENCE_EMOTIONS_TEST_FILE)\n\nprint(f'test shape: {test.shape}')",
"test shape: (19085, 5)\n"
],
[
"from tqdm.notebook import tqdm \n\n\ny_pred_bert = []\nfor sentence in tqdm(test_sentences):\n prediction = bert_model.predict(sentence)\n y_pred_bert.append(prediction)",
"_____no_output_____"
],
[
"import numpy as np\nfrom sklearn.metrics import classification_report\n\ny_pred_bert_labeled = utils.continuous_labels_to_binary(np.array([list(x) for x in y_pred_bert]), 0.2)\nprint(classification_report(y_test_labeled, y_pred_bert_labeled, target_names=EMOTIONS))\n",
" precision recall f1-score support\n\n sadness 0.14 0.54 0.22 2430\n joy 0.24 0.83 0.38 3603\n fear 0.16 0.65 0.26 2684\n anger 0.09 0.42 0.15 1783\n\n micro avg 0.17 0.65 0.26 10500\n macro avg 0.16 0.61 0.25 10500\nweighted avg 0.17 0.65 0.27 10500\n samples avg 0.32 0.26 0.27 10500\n\n"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6adb3dad16f9a1192f260e53ea2084cccfd1b1 | 8,537 | ipynb | Jupyter Notebook | notebooks/var-benchmark/main.ipynb | resslerruntime/notebooks | df367280de00208cef893334c1a359e33ad537f7 | [
"Apache-2.0"
]
| 1 | 2021-08-22T08:03:17.000Z | 2021-08-22T08:03:17.000Z | notebooks/var-benchmark/main.ipynb | GauriVaidya/notebooks | d97f87bf851397788c2d5953595de458ba832f75 | [
"Apache-2.0"
]
| null | null | null | notebooks/var-benchmark/main.ipynb | GauriVaidya/notebooks | d97f87bf851397788c2d5953595de458ba832f75 | [
"Apache-2.0"
]
| null | null | null | 23.582873 | 419 | 0.481551 | [
[
[
"# atoti Value-at-Risk benchmark\n\nThis notebook is a benchmark template used to expose atoti performances over ClickHouse by reproducing a benchmark detailed in a [blog post by Altinity on calculating Value-at-Risk (VaR)](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management). Check out the [full ](https://www.activeviam.com/blog/atoti-for-risk-management-aggregation-comparison-with-the-clickhouse-benchmark/)",
"_____no_output_____"
],
[
" \n<div style=\"text-align:center\"><a href=\"https://www.atoti.io/?utm_source=gallery&utm_content=var-benchmark\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://data.atoti.io/notebooks/banners/discover.png\" alt=\"atoti\" /></a></div>",
"_____no_output_____"
]
],
[
[
"import time\n\nimport atoti",
"_____no_output_____"
],
[
"session = atoti.create_session(\n \"RiskBenchmark\", config={\"java_options\": [\"-XX:MaxDirectMemorySize=12G\"]}\n)",
"_____no_output_____"
],
[
"# Load risk dataset\ndataset = session.read_parquet(\n \"dataset.parquet\", keys=[\"index\"], partitioning=\"hash32(index)\"\n)",
"_____no_output_____"
],
[
"cube = session.create_cube(dataset)",
"_____no_output_____"
],
[
"cube.schema",
"_____no_output_____"
],
[
"# Disable the aggregate cache\ncube.aggregates_cache.capacity = -1",
"_____no_output_____"
],
[
"# Define vector aggregation and value at risk\nh, m, l = cube.hierarchies, cube.measures, cube.levels\n\nm[\"PnlVector\"] = atoti.agg.sum(dataset[\"arrFloat\"])\nm[\"ValueAtRisk\"] = atoti.array.quantile(m[\"PnlVector\"], q=0.95)",
"_____no_output_____"
],
[
"# Create a hierarchy to access vector elements\ncube.create_parameter_hierarchy_from_members(\"Scenarios\", list(range(0, 1000)))\nm[\"PnL at index\"] = m[\"PnlVector\"][l[\"Scenarios\"]]",
"_____no_output_____"
]
],
[
[
"Creating hierarchies for numerical columns required for querying.",
"_____no_output_____"
]
],
[
[
"h[\"int10\"] = [dataset[\"int10\"]]\nh[\"int11\"] = [dataset[\"int11\"]]\nh[\"int12\"] = [dataset[\"int12\"]]\nh[\"int13\"] = [dataset[\"int13\"]]",
"_____no_output_____"
],
[
"session.link()",
"_____no_output_____"
],
[
"# Number of facts in the cube\ncube.query(m[\"contributors.COUNT\"])",
"_____no_output_____"
]
],
[
[
"### Query Benchmark",
"_____no_output_____"
]
],
[
[
"def Q1():\n return cube.query(m[\"ValueAtRisk\"], levels=[l[\"str0\"]], mode=\"raw\")\n\n\ndef Q2():\n return cube.query(\n m[\"ValueAtRisk\"],\n levels=[\n l[\"str0\"],\n l[\"str1\"],\n l[\"int10\"],\n l[\"int11\"],\n l[\"dttime10\"],\n l[\"dttime11\"],\n ],\n mode=\"raw\",\n )\n\n\ndef Q3():\n return cube.query(\n m[\"ValueAtRisk\"],\n levels=[\n l[\"str0\"],\n l[\"str1\"],\n l[\"str2\"],\n l[\"str3\"],\n l[\"int10\"],\n l[\"int11\"],\n l[\"int12\"],\n l[\"int13\"],\n l[\"dttime10\"],\n l[\"dttime11\"],\n l[\"dttime12\"],\n l[\"dttime13\"],\n ],\n mode=\"raw\",\n )\n\n\ndef Q4():\n return cube.query(\n m[\"PnL at index\"],\n levels=[l[\"str0\"], l[\"Scenarios\"]],\n condition=(l[\"str1\"] == \"KzORBHFRuFFOQm\"),\n mode=\"raw\",\n )\n\n\n# Iterations\niterations = 10\n\n\ndef benchmark(name, task):\n # Warmup\n for w in range(10):\n task()\n\n start = time.time()\n for i in range(iterations):\n task()\n elapsed = time.time() - start\n\n print(f\"{name} average query time {round(1000*elapsed/iterations)} ms\")",
"_____no_output_____"
],
[
"benchmark(\"Q1\", Q1)",
"_____no_output_____"
],
[
"benchmark(\"Q2\", Q2)",
"_____no_output_____"
],
[
"benchmark(\"Q3\", Q3)",
"_____no_output_____"
],
[
"benchmark(\"Q4\", Q4)",
"_____no_output_____"
]
],
[
[
"### Launch queries manually",
"_____no_output_____"
]
],
[
[
"%%time\nQ1()",
"_____no_output_____"
],
[
"%%time\nQ2()",
"_____no_output_____"
],
[
"%%time\nQ3()",
"_____no_output_____"
],
[
"%%time\nQ4()",
"_____no_output_____"
]
],
[
[
" \n<div style=\"text-align:center\"><a href=\"https://www.atoti.io/?utm_source=gallery&utm_content=var-benchmark\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://data.atoti.io/notebooks/banners/discover-try.png\" alt=\"atoti\" /></a></div>",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
ec6af3b93bfb590dfcef48906730b33f75d65681 | 9,630 | ipynb | Jupyter Notebook | gs_quant/examples/01_pricing_and_risk/00_rates/010001_swap_trade_construction.ipynb | jeamick/gs-quant | a61dd2866dafc8453949391e900f9bf1ce2ad52e | [
"Apache-2.0"
]
| null | null | null | gs_quant/examples/01_pricing_and_risk/00_rates/010001_swap_trade_construction.ipynb | jeamick/gs-quant | a61dd2866dafc8453949391e900f9bf1ce2ad52e | [
"Apache-2.0"
]
| null | null | null | gs_quant/examples/01_pricing_and_risk/00_rates/010001_swap_trade_construction.ipynb | jeamick/gs-quant | a61dd2866dafc8453949391e900f9bf1ce2ad52e | [
"Apache-2.0"
]
| null | null | null | 31.165049 | 130 | 0.634268 | [
[
[
"from gs_quant.session import Environment, GsSession\nfrom gs_quant.common import PayReceive, Currency, DayCountFraction, BusinessDayConvention\nfrom gs_quant.target.common import SwapClearingHouse\nfrom gs_quant.instrument import IRSwap\nfrom gs_quant.markets.portfolio import Portfolio\nfrom datetime import date",
"_____no_output_____"
],
[
"# external users should substitute their client id and secret; please skip this step if you are using internal jupyterhub\nGsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',))",
"_____no_output_____"
],
[
"# get list of properties of an interest rate swap\nIRSwap.properties()",
"_____no_output_____"
],
[
"swaps = Portfolio()\n\n# you don't need to specify any parameters to get a valid trade. All properties have defaults\nswaps.append(IRSwap())",
"_____no_output_____"
],
[
"# pay_or_receive can be a string of 'pay' or 'receive' or the PayReceive enum \n# and relates to paying or receiving the fixed leg. defaults to a receiver swap\nswaps.append(IRSwap(pay_or_receive=PayReceive.Pay))\nswaps.append(IRSwap(pay_or_receive='Pay'))",
"_____no_output_____"
],
[
"# termination_date is the end date of the swap. It may be a tenor relative to effective_date or a datetime.date.\n# defaults to 10y\nswaps.append(IRSwap(termination_date=date(2025, 11, 12)))\nswaps.append(IRSwap(termination_date='1y'))",
"_____no_output_____"
],
[
"# notional currency may be a string or the Currency enum. defaults to USD\nswaps.append(IRSwap(notional_currency=Currency.USD))\nswaps.append(IRSwap(notional_currency='EUR'))",
"_____no_output_____"
],
[
"# the effective date is the start date of the swap and may be a tenor relative \n# to the active PricingContext.pricing_date or a datetime.date, default is pricing date\nswaps.append(IRSwap(effective_date='1y'))\nswaps.append(IRSwap(effective_date=date(2019, 11, 12)))",
"_____no_output_____"
],
[
"# fixed_rate is the interest rate on the fixed leg of the swap. Defaults to Par Rate (ATM). \n# Can be expressed as 'ATM', 'ATM+25' for 25bp above par, a-100 for 100bp below par, 0.01 for 1%\nswaps.append(IRSwap(fixed_rate='ATM'))\nswaps.append(IRSwap(fixed_rate='ATM+50'))\nswaps.append(IRSwap(fixed_rate='a-100'))\nswaps.append(IRSwap(fixed_rate=0.01))",
"_____no_output_____"
],
[
"# floating_rate_for_the_initial_calculation_period sets the first fixing on the trade. \n# It should be a float in absolute terms so 0.0075 is 75bp. Defaults to the value derived from fwd curve\nswaps.append(IRSwap(floating_rate_for_the_initial_calculation_period=0.0075))",
"_____no_output_____"
],
[
"# floating rate option is the index that is being observed, defaults to LIBOR style index for each ccy, \n# 'OIS' will give the default overnight index for the notional ccy\nswaps.append(IRSwap(notional_currency='USD', floating_rate_option='USD-ISDA-SWAP RATE'))\nswaps.append(IRSwap(notional_currency='USD', floating_rate_option='USD-LIBOR-BBA'))\nswaps.append(IRSwap(notional_currency='EUR', floating_rate_option='EUR-EONIA-OIS-COMPOUND'))\nswaps.append(IRSwap(notional_currency='GBP', floating_rate_option='OIS'))",
"_____no_output_____"
],
[
"# floating_rate_designated_maturity is the index term. defaults to the frequency of the floating leg\nswaps.append(IRSwap(notional_currency='GBP', floating_rate_designated_maturity='3m'))",
"_____no_output_____"
],
[
"# floating_rate_spread is a float spread over the index. eg. pay euribor + 1%. defaults to 0\nswaps.append(IRSwap(pay_or_receive='receive', notional_currency='EUR', floating_rate_spread=0.01))",
"_____no_output_____"
],
[
"# floating_rate_frequency is the accrual frequency of the floating leg defined as a tenor. \n# It will drive the floating_rate_designated_maturity if that has not been independently set. \n# Defaults to ccy/tenor market standard defaults\nswaps.append(IRSwap(floating_rate_frequency='6m'))",
"_____no_output_____"
],
[
"# floating_rate_day_count_fraction can be the enum used here or a string. defaults to ccy market standard defaults\nswaps.append(IRSwap(floating_rate_day_count_fraction=DayCountFraction.ACT_OVER_365_ISDA))\nswaps.append(IRSwap(floating_rate_day_count_fraction=DayCountFraction._30E_OVER_360))\nswaps.append(IRSwap(floating_rate_day_count_fraction='30/360'))\nswaps.append(IRSwap(floating_rate_day_count_fraction='ACT/360'))",
"_____no_output_____"
],
[
"# floating_rate_business_day_convention can be the enum used here a the equivalent string\nswaps.append(IRSwap(floating_rate_business_day_convention=BusinessDayConvention.Following))\nswaps.append(IRSwap(floating_rate_business_day_convention='Modified Following'))",
"_____no_output_____"
],
[
"# fee is an amount paid. A positive fee will have a negative impact on the PV. Defaults to 0\nswaps.append(IRSwap(fee=50000))",
"_____no_output_____"
],
[
"# you can specify fee currency and fee date. trades where the fee is paid in a different currency to the\n# notional currency are supported. Default fee currency is notional currency\n# fee date can be a datetime.date or a tenor default fee date is current PricingContext.pricing_date\nswaps.append(IRSwap(notional_currency=Currency.GBP, fee=50000, fee_currency=Currency.GBP, fee_payment_date='1y'))\nswaps.append(IRSwap(notional_currency=Currency.GBP, fee=1e5, fee_currency=Currency.USD, fee_payment_date=date(2020, 1, 30)))",
"_____no_output_____"
],
[
"# valid clearinghouses are held in the SwapClearingHouse enum\nswaps.append(IRSwap(clearing_house=SwapClearingHouse.LCH))\nswaps.append(IRSwap(clearing_house=SwapClearingHouse.EUREX))\nswaps.append(IRSwap(clearing_house='CME'))",
"_____no_output_____"
],
[
"# you can specify a name for a trade. This has no economic effect but is useful when extracting results\n# from a portfolio object\nswaps.append(IRSwap(PayReceive.Receive, '5y', 'gbp', name='GBP5y'))",
"_____no_output_____"
],
[
"swaps.price()\n",
"_____no_output_____"
],
[
"# you can express a swap as a dictionary\nswap = IRSwap(termination_date='10y', notional_currency='EUR', fixed_rate='ATM+50')\nswap_dict = swap.as_dict()\nswap_dict",
"_____no_output_____"
],
[
"# and you can construct a swap from a dictionary\nnew_swap = IRSwap.from_dict(swap_dict)",
"_____no_output_____"
],
[
"swap = IRSwap(effective_date='1y')\nswap.resolve()\nswap.as_dict()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6b108f2f88243b0f0384b0d176aba5729e5a7d | 255,759 | ipynb | Jupyter Notebook | Assignment6_JOCSON.ipynb | domsjcsn/Linear-ALgebra---Python | 9f43c5cc6f0484cbe9a0d57c82685222a0242313 | [
"Apache-2.0"
]
| null | null | null | Assignment6_JOCSON.ipynb | domsjcsn/Linear-ALgebra---Python | 9f43c5cc6f0484cbe9a0d57c82685222a0242313 | [
"Apache-2.0"
]
| null | null | null | Assignment6_JOCSON.ipynb | domsjcsn/Linear-ALgebra---Python | 9f43c5cc6f0484cbe9a0d57c82685222a0242313 | [
"Apache-2.0"
]
| null | null | null | 112.520458 | 33,619 | 0.838739 | [
[
[
"<a href=\"https://colab.research.google.com/github/domsjcsn/Linear-ALgebra---Python/blob/main/Assignment6_JOCSON.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# ***Linear Algebra for ECE***",
"_____no_output_____"
],
[
"### Laboratory 6: Matrix Operations\nNow that you have a fundamental knowledge about representing and operating with vectors as well as the fundamentals of matrices, we'll try to the same operations with matrices and even more",
"_____no_output_____"
],
[
"## **Objectives**\n\nAt the end of this activity you will be able to:\n1. Be familiar with the fundamental matrix operations.\n2. Apply the operations to solve intermediate equations.\n3. Apply matrix algebra in engineering solutions.",
"_____no_output_____"
],
[
"## Discussion",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Transposition\nOne of the fundamental operations in matrix algebra is Transposition. The transpose of a matrix is done by flipping the values of its elements over its diagonals. With this, the rows and columns from the original matrix will be switched. So for a matrix $A$ its transpose is denoted as $A^T$. So for example:",
"_____no_output_____"
],
[
"$$A = \\begin{bmatrix} 1 & 2 & 5\\\\5 & -1 &0 \\\\ 0 & -3 & 3\\end{bmatrix} $$",
"_____no_output_____"
],
[
"$$ A^T = \\begin{bmatrix} 1 & 5 & 0\\\\2 & -1 &-3 \\\\ 5 & 0 & 3\\end{bmatrix}$$",
"_____no_output_____"
],
[
"This can now be achieved programmatically by using np.transpose() or by using the T method.",
"_____no_output_____"
]
],
[
[
"A = np.array([\n [1 ,2, 5],\n [5, -1, 0],\n [0, -3, 3]\n])\nA",
"_____no_output_____"
],
[
"AT1 = np.transpose(A)\nAT1",
"_____no_output_____"
],
[
"AT2 = A.T\nAT2",
"_____no_output_____"
],
[
"np.array_equiv(AT1, AT2)",
"_____no_output_____"
],
[
"B = np.array([\n [1,2,3,4],\n [1,0,2,1],\n])\nB.shape",
"_____no_output_____"
],
[
"np.transpose(B).shape",
"_____no_output_____"
],
[
"B.T.shape",
"_____no_output_____"
]
],
[
[
"Try to create your own matrix (you can try non-squares) to test transposition.",
"_____no_output_____"
]
],
[
[
"doms = np.array([\n [3,2,1],\n [7,6,5]\n\n])\ndoms.shape",
"_____no_output_____"
],
[
"np.transpose(doms).shape",
"_____no_output_____"
],
[
"doms.T.shape",
"_____no_output_____"
],
[
"domsT = doms.T\ndoms.T",
"_____no_output_____"
]
],
[
[
"## Dot Product / Inner Product",
"_____no_output_____"
],
[
"If you recall the dot product from laboratory activity before, we will try to implement the same operation with matrices. In matrix dot product we are going to get the sum of products of the vectors by row-column pairs. So if we have two matrices $X$ and $Y$:\n\n$$X = \\begin{bmatrix}x_{(0,0)}&x_{(0,1)}\\\\ x_{(1,0)}&x_{(1,1)}\\end{bmatrix}, Y = \\begin{bmatrix}y_{(0,0)}&y_{(0,1)}\\\\ y_{(1,0)}&y_{(1,1)}\\end{bmatrix}$$\n\nThe dot product will then be computed as:\n$$X \\cdot Y= \\begin{bmatrix} x_{(0,0)}*y_{(0,0)} + x_{(0,1)}*y_{(1,0)} & x_{(0,0)}*y_{(0,1)} + x_{(0,1)}*y_{(1,1)} \\\\ x_{(1,0)}*y_{(0,0)} + x_{(1,1)}*y_{(1,0)} & x_{(1,0)}*y_{(0,1)} + x_{(1,1)}*y_{(1,1)}\n\\end{bmatrix}$$\n\nSo if we assign values to $X$ and $Y$:\n$$X = \\begin{bmatrix}1&2\\\\ 0&1\\end{bmatrix}, Y = \\begin{bmatrix}-1&0\\\\ 2&2\\end{bmatrix}$$",
"_____no_output_____"
],
[
"$$X \\cdot Y= \\begin{bmatrix} 1*-1 + 2*2 & 1*0 + 2*2 \\\\ 0*-1 + 1*2 & 0*0 + 1*2 \\end{bmatrix} = \\begin{bmatrix} 3 & 4 \\\\2 & 2 \\end{bmatrix}$$\nThis could be achieved programmatically using `np.dot()`, `np.matmul()` or the `@` operator.",
"_____no_output_____"
]
],
[
[
"X = np.array([\n [1,2],\n [0,1]\n])\nY = np.array([\n [-1,0],\n [2,2]\n])",
"_____no_output_____"
],
[
"np.dot(X,Y)",
"_____no_output_____"
],
[
"X.dot(Y)",
"_____no_output_____"
],
[
"X @ Y",
"_____no_output_____"
],
[
"np.matmul(X,Y)",
"_____no_output_____"
]
],
[
[
"### Sample",
"_____no_output_____"
]
],
[
[
"doms1 = np.array([\n [3,2,1],\n [6,5,4],\n [9,8,7] \n])\n\ndoms2 = np.array([\n [7,8,9],\n [4,5,6],\n [1,2,3] \n])\nnp.dot(doms1,doms2)",
"_____no_output_____"
],
[
"doms1.dot(doms2)",
"_____no_output_____"
],
[
"doms1 @ doms2",
"_____no_output_____"
],
[
"np.matmul(doms1,doms2)",
"_____no_output_____"
]
],
[
[
"In matrix dot products there are additional rules compared with vector dot products. Since vector dot products were just in one dimension there are less restrictions. Since now we are dealing with Rank 2 vectors we need to consider some rules:\n\n### Rule 1: The inner dimensions of the two matrices in question must be the same. \n\nSo given a matrix $A$ with a shape of $(a,b)$ where $a$ and $b$ are any integers. If we want to do a dot product between $A$ and another matrix $B$, then matrix $B$ should have a shape of $(b,c)$ where $b$ and $c$ are any integers. So for given the following matrices:\n\n$$A = \\begin{bmatrix}2&4\\\\5&-2\\\\0&1\\end{bmatrix}, B = \\begin{bmatrix}1&1\\\\3&3\\\\-1&-2\\end{bmatrix}, C = \\begin{bmatrix}0&1&1\\\\1&1&2\\end{bmatrix}$$\n\nSo in this case $A$ has a shape of $(3,2)$, $B$ has a shape of $(3,2)$ and $C$ has a shape of $(2,3)$. So the only matrix pairs that is eligible to perform dot product is matrices $A \\cdot C$, or $B \\cdot C$. ",
"_____no_output_____"
]
],
[
[
"A = np.array([\n [2,4],\n [5,-2],\n [0,1]\n])\nB = np.array([\n [1,1],\n [3,3],\n [-1,-2]\n])\nC = np.array([\n [0,1,1],\n [1,1,2]\n])\nprint(A.shape)\nprint(B.shape)\nprint(A.shape)",
"(3, 2)\n(3, 2)\n(3, 2)\n"
],
[
"A @ C",
"_____no_output_____"
],
[
"B @ C",
"_____no_output_____"
]
],
[
[
"If you would notice the shape of the dot product changed and its shape is not the same as any of the matrices we used. The shape of a dot product is actually derived from the shapes of the matrices used. So recall matrix $A$ with a shape of $(a,b)$ and matrix $B$ with a shape of $(b,c)$, $A \\cdot B$ should have a shape $(a,c)$.",
"_____no_output_____"
]
],
[
[
"A @ B.T",
"_____no_output_____"
],
[
"X = np.array([\n [1,2,3,0]\n])\nY = np.array([\n [1,0,4,-1]\n])\nprint(X.shape)\nprint(Y.shape)",
"(1, 4)\n(1, 4)\n"
],
[
"Y.T @ X",
"_____no_output_____"
],
[
"X @ Y.T",
"_____no_output_____"
]
],
[
[
"And youcan see that when you try to multiply A and B, it returns `ValueError` pertaining to matrix shape mismatch.",
"_____no_output_____"
],
[
"### Rule 2: Dot Product has special properties\n\nDot products are prevalent in matrix algebra, this implies that it has several unique properties and it should be considered when formulation solutions:\n 1. $A \\cdot B \\neq B \\cdot A$\n 2. $A \\cdot (B \\cdot C) = (A \\cdot B) \\cdot C$\n 3. $A\\cdot(B+C) = A\\cdot B + A\\cdot C$\n 4. $(B+C)\\cdot A = B\\cdot A + C\\cdot A$\n 5. $A\\cdot I = A$\n 6. $A\\cdot \\emptyset = \\emptyset$ ",
"_____no_output_____"
]
],
[
[
"A = np.array([\n [4,3,1],\n [5,3,5],\n [5,0,4]\n])\nB = np.array([\n [1,2,3],\n [4,3,1],\n [9,1,1]\n])\nC = np.array([\n [2,3,6],\n [5,7,8],\n [1,2,1]\n])",
"_____no_output_____"
],
[
"np.array_equal(A@B, B@A)",
"_____no_output_____"
],
[
"E = A @ (B @ C)\nE",
"_____no_output_____"
],
[
"F = (A @ B) @ C\nF",
"_____no_output_____"
],
[
"np.array_equiv(E, F)",
"_____no_output_____"
],
[
"AB = np.dot(A,B)\nAB",
"_____no_output_____"
],
[
"BA = np.dot(B,A)\nBA",
"_____no_output_____"
],
[
"np.array_equiv(AB,BA)",
"_____no_output_____"
],
[
"BC= np.dot(B,C)\nBC",
"_____no_output_____"
],
[
"ABC= np.dot(A,BC)\nABC",
"_____no_output_____"
],
[
"ABC1 = np.dot(AB,C)\nABC1",
"_____no_output_____"
],
[
"np.identity(3)",
"_____no_output_____"
],
[
"A.dot(np.eye(3))",
"_____no_output_____"
]
],
[
[
"## Determinant",
"_____no_output_____"
],
[
"A determinant is a scalar value derived from a square matrix. The determinant is a fundamental and important value used in matrix algebra. Although it will not be evident in this laboratory on how it can be used practically, but it will be reatly used in future lessons.\n\nThe determinant of some matrix $A$ is denoted as $det(A)$ or $|A|$. So let's say $A$ is represented as:\n$$A = \\begin{bmatrix}a_{(0,0)}&a_{(0,1)}\\\\a_{(1,0)}&a_{(1,1)}\\end{bmatrix}$$\nWe can compute for the determinant as:\n$$|A| = a_{(0,0)}*a_{(1,1)} - a_{(1,0)}*a_{(0,1)}$$\nSo if we have $A$ as:\n$$A = \\begin{bmatrix}1&4\\\\0&3\\end{bmatrix}, |A| = 3$$\n\nBut you might wonder how about square matrices beyond the shape $(2,2)$? We can approach this problem by using several methods such as co-factor expansion and the minors method. This can be taught in the lecture of the laboratory but we can achieve the strenuous computation of high-dimensional matrices programmatically using Python. We can achieve this by using `np.linalg.det()`.",
"_____no_output_____"
]
],
[
[
"A = np.array([\n [1,4],\n [0,3]\n])\nnp.linalg.det(A)",
"_____no_output_____"
],
[
"B = np.array([\n [1, 5, 2],\n [3, -1 ,-1],\n [0, -2, 1]\n])\nnp.linalg.det(B)",
"_____no_output_____"
],
[
"## Now other mathematics classes would require you to solve this by hand, \n## and that is great for practicing your memorization and coordination skills \n## but in this class we aim for simplicity and speed so we'll use programming\n## but it's completely fine if you want to try to solve this one by hand.\nB = np.array([\n [1,3,5,6],\n [0,3,1,3],\n [3,1,8,2],\n [5,2,6,8]\n])\nnp.linalg.det(B)",
"_____no_output_____"
]
],
[
[
"## ***Inverse***\nThe inverse of a matrix is another fundamental operation in matrix algebra. Determining the inverse of a matrix let us determine if its solvability and its characteristic as a system of linear equation — we'll expand on this in the nect module. Another use of the inverse matrix is solving the problem of divisibility between matrices. Although element-wise division exists but dividing the entire concept of matrices does not exists. Inverse matrices provides a related operation that could have the same concept of \"dividing\" matrices.\n\nNow to determine the inverse of a matrix we need to perform several steps. So let's say we have a matrix $M$:\n$$M = \\begin{bmatrix}1&7\\\\-3&5\\end{bmatrix}$$\nFirst, we need to get the determinant of $M$.\n$$|M| = (1)(5)-(-3)(7) = 26$$\nNext, we need to reform the matrix into the inverse form:\n$$M^{-1} = \\frac{1}{|M|} \\begin{bmatrix} m_{(1,1)} & -m_{(0,1)} \\\\ -m_{(1,0)} & m_{(0,0)}\\end{bmatrix}$$\nSo that will be:\n$$M^{-1} = \\frac{1}{26} \\begin{bmatrix} 5 & -7 \\\\ 3 & 1\\end{bmatrix} = \\begin{bmatrix} \\frac{5}{26} & \\frac{-7}{26} \\\\ \\frac{3}{26} & \\frac{1}{26}\\end{bmatrix}$$\nFor higher-dimension matrices you might need to use co-factors, minors, adjugates, and other reduction techinques. To solve this programmatially we can use `np.linalg.inv()`.",
"_____no_output_____"
]
],
[
[
"M = np.array([\n [1,7],\n [-3, 5]\n])\n\nnp.array(M @ np.linalg.inv(M), dtype=int)",
"_____no_output_____"
],
[
"P = np.array([\n [6, 9, 0],\n [4, 2, -1],\n [3, 6, 7]\n])\nQ = np.linalg.inv(P)\nQ",
"_____no_output_____"
],
[
"P @ Q",
"_____no_output_____"
],
[
"## And now let's test your skills in solving a matrix with high dimensions:\nN = np.array([\n [18,5,23,1,0,33,5],\n [0,45,0,11,2,4,2],\n [5,9,20,0,0,0,3],\n [1,6,4,4,8,43,1],\n [8,6,8,7,1,6,1],\n [-5,15,2,0,0,6,-30],\n [-2,-5,1,2,1,20,12],\n])\nN_inv = np.linalg.inv(N)\nnp.array(N @ N_inv,dtype=int)",
"_____no_output_____"
]
],
[
[
"To validate the wether if the matric that you have solved is really the inverse, we follow this dot product property for a matrix $M$:\n$$M\\cdot M^{-1} = I$$",
"_____no_output_____"
]
],
[
[
"squad = np.array([\n [1.0, 1.0, 0.5],\n [0.7, 0.7, 0.9],\n [0.3, 0.3, 1.0]\n])\nweights = np.array([\n [0.2, 0.2, 0.6]\n])\np_grade = squad @ weights.T\np_grade\n",
"_____no_output_____"
]
],
[
[
"# ***ACTIVITY***",
"_____no_output_____"
],
[
"## **Task 1**\nProve and implement the remaining 6 matrix multiplication properties. You may create your own matrices in which their shapes should not be lower than (3,3) . In your methodology, create individual flowcharts for each property and discuss the property you would then present your proofs or validity of your implementation in the results section by comparing your result to present functions from NumPy.",
"_____no_output_____"
]
],
[
[
"A = np.array([\n [6,9,6],\n [0,4,2],\n [8,1,1] \n])\n\nB = np.array([\n [5,2,4],\n [7,1,3],\n [5,4,4] \n])\n\nC = np.array([\n [7,6,8],\n [1,3,4],\n [5,5,3] \n\n])",
"_____no_output_____"
],
[
"#1 \nA1 = A@B\nA1",
"_____no_output_____"
],
[
"B1 = B@A\nB1",
"_____no_output_____"
],
[
"np.array_equal(A1,B1)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#2\nA2 = A @ (B @ C)\nA2",
"_____no_output_____"
],
[
"B2 = (A @ B) @ C\nB2",
"_____no_output_____"
],
[
"np.array_equal(A2,B2)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#3\nA3 = A @ (B + C)\nA3",
"_____no_output_____"
],
[
"B3 = A @ B + A @ C\nB3",
"_____no_output_____"
],
[
"np.array_equal(A3,B3)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#4\nA4 = (B + C) @ A\nA4",
"_____no_output_____"
],
[
"B4 = B @ A + C @ A\nB4",
"_____no_output_____"
],
[
"np.array_equal(A4,B4)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#5\nA5 = A@B5\nA5",
"_____no_output_____"
],
[
"B5 = np.eye(3)\nB5",
"_____no_output_____"
],
[
"np.array_equal(A5,B5)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#6\nA6 = np.zeros(A.shape)\nA6",
"_____no_output_____"
],
[
"B6 = A.dot(np.zeros(A.shape))\nB6",
"_____no_output_____"
],
[
"np.array_equal(A6,B6)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"## ***CONCLUSION***\nFor your conclusion synthesize the concept and application of the laboratory. Briefly discuss what you have learned and achieved in this activity.",
"_____no_output_____"
],
[
"***The students found out that this laboratory experiment and our last experiment are alike. Both experiment talks about matrixes and operations. But the difference is that this laboratory experiment emphasized Dot Product, Determinant, Transposition, and Inverse. Through this laboratory experiment, the students are able to prove the six given equations given regarding the topics that our professor discussed to us. Finally, this activity made the students' understanding of matrices and their operations broader.***",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec6b132dfadafbde6edeb3b3e171f3e84f6d1caa | 21,947 | ipynb | Jupyter Notebook | 2-networkx-basics-student.ipynb | mariaclaudia/Network-Analysis-Made-Simple | 5d6951a9e9f3177b43339e749b216ecd7d22be85 | [
"MIT"
]
| 1 | 2019-05-03T12:53:53.000Z | 2019-05-03T12:53:53.000Z | 2-networkx-basics-student.ipynb | skielosky/Network-Analysis-Made-Simple | 18486724b9e28c2a54a625cbe8de5009bdd96a75 | [
"MIT"
]
| null | null | null | 2-networkx-basics-student.ipynb | skielosky/Network-Analysis-Made-Simple | 18486724b9e28c2a54a625cbe8de5009bdd96a75 | [
"MIT"
]
| 1 | 2020-12-28T11:04:15.000Z | 2020-12-28T11:04:15.000Z | 25.94208 | 486 | 0.570739 | [
[
[
"import networkx as nx\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nfrom custom import load_data as cf\n\nwarnings.filterwarnings('ignore')\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'",
"_____no_output_____"
]
],
[
[
"# Nodes and Edges: How do we represent relationships between individuals using NetworkX?\n\nAs mentioned earlier, networks, also known as graphs, are comprised of individual entities and their representatives. The technical term for these are nodes and edges, and when we draw them we typically use circles (nodes) and lines (edges). \n\nIn this notebook, we will work with a social network of seventh graders, in which nodes are individual students, and edges represent their relationships. Edges between individuals show how often the seventh graders indicated other seventh graders as their favourite.\n\nData credit: http://konect.uni-koblenz.de/networks/moreno_seventh",
"_____no_output_____"
],
[
"## Data Representation\n\nIn the `networkx` implementation, graph objects store their data in dictionaries. \n\nNodes are part of the attribute `Graph.node`, which is a dictionary where the key is the node ID and the values are a dictionary of attributes. \n\nEdges are part of the attribute `Graph.edge`, which is a nested dictionary. Data are accessed as such: `G.edge[node1][node2]['attr_name']`.\n\nBecause of the dictionary implementation of the graph, any hashable object can be a node. This means strings and tuples, but not lists and sets.",
"_____no_output_____"
],
[
"## Load Data\n\nLet's load some real network data to get a feel for the NetworkX API. This [dataset](http://konect.uni-koblenz.de/networks/moreno_seventh) comes from a study of 7th grade students.\n\n> This directed network contains proximity ratings between studetns from 29 seventh grade students from a school in Victoria. Among other questions the students were asked to nominate their preferred classmates for three different activities. A node represents a student. An edge between two nodes shows that the left student picked the right student as his answer. The edge weights are between 1 and 3 and show how often the left student chose the right student as his favourite.",
"_____no_output_____"
]
],
[
[
"G = cf.load_seventh_grader_network()",
"_____no_output_____"
]
],
[
[
"# Basic Network Statistics\nLet's first understand how many students and friendships are represented in the network.",
"_____no_output_____"
]
],
[
[
"# Who are represented in the network?\nG.nodes()",
"_____no_output_____"
]
],
[
[
"**API Note:** As of NetworkX 2.0, to select subset of nodes, you have to cast `G.nodes()` as a list, i.e. `list(G.nodes())[0:10]`",
"_____no_output_____"
],
[
"### Exercise\n\nCan you write a single line of code that returns the number of nodes in the graph? (1 min.)",
"_____no_output_____"
],
[
"Let's now figure out who is connected to who in the network",
"_____no_output_____"
]
],
[
[
"# Who is connected to who in the network?\nG.edges()",
"_____no_output_____"
]
],
[
[
"**API Note:** As of NetworkX 2.0, to select subset of edges, you have to cast `G.edges()` as a list, i.e. `list(G.edges())[0:10]`",
"_____no_output_____"
],
[
"### Exercise\n\nCan you write a single line of code that returns the number of relationships represented? (1 min.)",
"_____no_output_____"
],
[
"## Concept\n\nA **network**, more technically known as a **graph**, is comprised of:\n\n- a set of nodes\n- joined by a set of edges\n\nThey can be represented as two lists:\n\n1. A **node list**: a list of 2-tuples where the first element of each tuple is the representation of the node, and the second element is a dictionary of metadata associated with the node.\n2. An **edge list**: a list of 3-tuples where the first two elements are the nodes that are connected together, and the third element is a dictionary of metadata associated with the edge.",
"_____no_output_____"
],
[
"Since this is a social network of people, there'll be attributes for each individual, such as a student's gender. We can grab that data off from the attributes that are stored with each node.",
"_____no_output_____"
]
],
[
[
"# Let's get a list of nodes with their attributes.\nlist(G.nodes(data=True))\n\n# NetworkX will return a list of tuples in the form (node_id, attribute_dictionary) ",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nCan you count how many males and females are represented in the graph? (3 min.)\n\nHint: You may want to use the Counter object from the collections module.",
"_____no_output_____"
]
],
[
[
"from collections import Counter\nmf_counts = Counter([d['_________'] for _, _ in G._____(data=_____)])\n\ndef test_answer(mf_counts):\n assert mf_counts['female'] == 17\n assert mf_counts['male'] == 12\n \ntest_answer(mf_counts)",
"_____no_output_____"
]
],
[
[
"Edges can also store attributes in their attribute dictionary.",
"_____no_output_____"
]
],
[
[
"G.edges(data=True)",
"_____no_output_____"
]
],
[
[
"In this synthetic social network, the number of times the left student indicated that the right student was their favourite is stored in the \"count\" variable.",
"_____no_output_____"
],
[
"### Exercise\n\nCan you figure out the maximum times any student rated another student as their favourite? (3 min.)",
"_____no_output_____"
]
],
[
[
"# Answer\ncounts = [d['_____'] for _, _, _ in G._______(_________)]\nmaxcount = max(_________)\n\ndef test_maxcount(maxcount):\n assert maxcount == 3\n \ntest_maxcount(maxcount)",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nWe found out that there are two individuals that we left out of the network, individual no. 30 and 31. They are one male (30) and one female (31), and they are a pair that just love hanging out with one another and with individual 7 (`count=3`), in both directions per pair. Add this information to the graph. (5 min.)\n\nIf you need more help, check out https://networkx.readthedocs.io/en/stable/tutorial/index.html",
"_____no_output_____"
]
],
[
[
"# Answer: Follow the coding pattern.\nG.add_node(30, gender='male')\n\n\nG.add_edge(30, 31, count=3)\n",
"_____no_output_____"
]
],
[
[
"Verify that you have added in the edges and nodes correctly by running the following cell.",
"_____no_output_____"
]
],
[
[
"def test_graph_integrity(G):\n assert 30 in G.nodes()\n assert 31 in G.nodes()\n assert G.node[30]['gender'] == 'male'\n assert G.node[31]['gender'] == 'female'\n assert G.has_edge(30, 31)\n assert G.has_edge(30, 7)\n assert G.has_edge(31, 7)\n assert G.edges[30, 7]['count'] == 3\n assert G.edges[7, 30]['count'] == 3\n assert G.edges[31, 7]['count'] == 3\n assert G.edges[7, 31]['count'] == 3\n assert G.edges[30, 31]['count'] == 3\n assert G.edges[31, 30]['count'] == 3\n print('All tests passed.')\n \ntest_graph_integrity(G)",
"_____no_output_____"
]
],
[
[
"**API Note:** Pre-NetworkX 2.0, the syntax for accessing any particular edge's metadata was `G.edge[node1][node2][key]`. \n\nPost-NetworkX 2.0, the syntax has changed to: `G.edges[node1, node2][key]`.",
"_____no_output_____"
],
[
"### Exercise (break-time)\n\nIf you would like a challenge during the break, try figuring out which students have \"unrequited\" friendships, that is, they have rated another student as their favourite at least once, but that other student has not rated them as their favourite at least once.\n\nSpecifically, get a list of edges for which the reverse edge is not present.\n\n**Hint:** You may need the class method `G.has_edge(n1, n2)`. This returns whether a graph has an edge between the nodes `n1` and `n2`.",
"_____no_output_____"
]
],
[
[
"unrequitted_friendships = []\n# Fill in your answer below.\n\n\n\nassert len(unrequitted_friendships) == 124",
"_____no_output_____"
]
],
[
[
"In a previous session at ODSC East 2018, a few other class participants attempted this problem. You can find their solutions in the Instructor version of this notebook.",
"_____no_output_____"
],
[
"## Tests\n\nA note about the tests: Testing is good practice when writing code. Well-crafted assertion statements help you program defensivel, by forcing you to explicitly state your assumptions about the code or data.\n\nFor more references on defensive programming, check out Software Carpentry's website: http://swcarpentry.github.io/python-novice-inflammation/08-defensive/\n\nFor more information on writing tests for your data, check out these slides from a lightning talk I gave at Boston Python and SciPy 2015: http://j.mp/data-test",
"_____no_output_____"
],
[
"# Coding Patterns\n\nThese are some recommended coding patterns when doing network analysis using NetworkX, which stem from my roughly two years of experience with the package.",
"_____no_output_____"
],
[
"## Iterating using List Comprehensions\nI would recommend that you use the following for compactness: \n\n [d['attr'] for n, d in G.nodes(data=True)]\n\nAnd if the node is unimportant, you can do:\n\n [d['attr'] for _, d in G.nodes(data=True)]",
"_____no_output_____"
],
[
"## Iterating over Edges using List Comprehensions\n\nA similar pattern can be used for edges:\n\n [n2 for n1, n2, d in G.edges(data=True)]\n\nor\n\n [n2 for _, n2, d in G.edges(data=True)]\n\nIf the graph you are constructing is a directed graph, with a \"source\" and \"sink\" available, then I would recommend the following pattern:\n\n [(sc, sk) for sc, sk, d in G.edges(data=True)]\n\nor \n\n [d['attr'] for sc, sk, d in G.edges(data=True)]",
"_____no_output_____"
],
[
"# Drawing Graphs\n\nAs illustrated above, we can draw graphs using the `nx.draw()` function. The most popular format for drawing graphs is the **node-link diagram**.",
"_____no_output_____"
],
[
"## Hairballs\n\nNodes are circles and lines are edges. Nodes more tightly connected with one another are clustered together. Large graphs end up looking like hairballs.",
"_____no_output_____"
]
],
[
[
"nx.draw(G)",
"_____no_output_____"
]
],
[
[
"If the network is small enough to visualize, and the node labels are small enough to fit in a circle, then you can use the `with_labels=True` argument.",
"_____no_output_____"
]
],
[
[
"nx.draw(G, with_labels=True)",
"_____no_output_____"
]
],
[
[
"However, note that if the number of nodes in the graph gets really large, node-link diagrams can begin to look like massive hairballs. This is undesirable for graph visualization.\n\n## Matrix Plot\n\nInstead, we can use a **matrix** to represent them. The nodes are on the x- and y- axes, and a filled square represent an edge between the nodes. This is done by using the `MatrixPlot` object from `nxviz`.",
"_____no_output_____"
]
],
[
[
"from nxviz import MatrixPlot\n\nm = MatrixPlot(G)\nm.draw()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Arc Plot\n\nThe Arc Plot is the basis of the next set of rational network visualizations.",
"_____no_output_____"
]
],
[
[
"from nxviz import ArcPlot\n\na = ArcPlot(G, node_color='gender', node_grouping='gender')\na.draw()",
"_____no_output_____"
]
],
[
[
"## Circos Plot\n\nLet's try another visualization, the **Circos plot**. We can order the nodes in the Circos plot according to the node ID, but any other ordering is possible as well. Edges are drawn between two nodes.\n\nCredit goes to Justin Zabilansky (MIT) for the implementation, Jon Charest for subsequent improvements, and `nxviz` contributors for further development.",
"_____no_output_____"
]
],
[
[
"from nxviz import CircosPlot\n\nc = CircosPlot(G, node_color='gender', node_grouping='gender')\nc.draw()\nplt.savefig('images/seventh.png', dpi=300)",
"_____no_output_____"
]
],
[
[
"This visualization helps us highlight nodes that there are poorly connected, and others that are strongly connected.\n\n## Hive Plot\n\nNext up, let's try Hive Plots. HivePlots are not yet implemented in `nxviz` just yet, so we're going to be using the old `hiveplot` API for this. When HivePlots have been migrated over to `nxviz`, its API will resemble that of the CircosPlot's. ",
"_____no_output_____"
]
],
[
[
"from hiveplot import HivePlot\n\nnodes = dict()\nnodes['male'] = [n for n,d in G.nodes(data=True) if d['gender'] == 'male']\nnodes['female'] = [n for n,d in G.nodes(data=True) if d['gender'] == 'female']\n\nedges = dict()\nedges['group1'] = G.edges(data=True)\n\nnodes_cmap = dict()\nnodes_cmap['male'] = 'blue'\nnodes_cmap['female'] = 'red'\n\nedges_cmap = dict()\nedges_cmap['group1'] = 'black'",
"_____no_output_____"
],
[
"h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)\nh.draw()",
"_____no_output_____"
]
],
[
[
"Hive plots allow us to divide our nodes into sub-groups, and visualize the within- and between-group connectivity.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
ec6b158608ddde4277ca6781bbcfed54c196c255 | 34,875 | ipynb | Jupyter Notebook | 1-Lessons/Lesson05/dev_src/OriginalPowerpoint/scraps/Lab5-WorksheetOnly.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
]
| null | null | null | 1-Lessons/Lesson05/dev_src/OriginalPowerpoint/scraps/Lab5-WorksheetOnly.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
]
| null | null | null | 1-Lessons/Lesson05/dev_src/OriginalPowerpoint/scraps/Lab5-WorksheetOnly.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
]
| null | null | null | 87.1875 | 16,764 | 0.84648 | [
[
[
"## Full name: Farhang Forghanparast\n## R#: 321654987\n## HEX: 0x132c10cb\n## Title of the notebook\n## Date: 9/3/2020",
"_____no_output_____"
],
[
"## Example\n\nCreate the AVERAGE function for three values and test it for these values:\n- 3,4,5\n- 10,100,1000\n- -5,15,5",
"_____no_output_____"
]
],
[
[
"def AVERAGE3(x,y,z) : #define the function \"AVERAGE3\"\n Ave = (x+y+z)/3 #computes the average\n return Ave",
"_____no_output_____"
],
[
"print(AVERAGE3(3,4,5))\nprint(AVERAGE3(10,100,1000))\nprint(AVERAGE3(-5,15,5))",
"4.0\n370.0\n5.0\n"
]
],
[
[
"## Example\n\nCreate the FC function to convert Fahrenhiet to Celsius and test it for these values:\n- 32\n- 15\n- 100\n\n*hint: Formula-(°F − 32) × 5/9 = °C",
"_____no_output_____"
]
],
[
[
"def FC(x) : #define the function \"AVERAGE3\"\n C = (x - 32)*5/9\n return C",
"_____no_output_____"
],
[
"print(FC(32))\nprint(FC(15))\nprint(FC(100))",
"0.0\n-9.444444444444445\n37.77777777777778\n"
]
],
[
[
"## Exercise 1\n\nCreate the function $$f(x) = e^x - 10 cos(x) - 100$$ as a function (i.e. use the `def` keyword)\n\n def name(parameters) :\n operations on parameters\n ...\n ...\n return (value, or null)\n\nThen apply your function to the value.\n\nUse your function to complete the table below:\n\n| x | f(x) |\n|---:|---:|\n| 0.0 | |\n| 1.50 | |\n| 2.00 | |\n| 2.25 | |\n| 3.0 | |\n| 4.25 | |\n",
"_____no_output_____"
],
[
"## Example \nUse the plotting script and create a function that draws a straight line between two points.",
"_____no_output_____"
]
],
[
[
"def Line():\n from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\n x1 = input('Please enter x value for point 1')\n y1 = input('Please enter y value for point 1')\n x2 = input('Please enter x value for point 2')\n y2 = input('Please enter y value for point 2')\n xlist = [x1,x2]\n ylist = [y1,y2]\n plt.plot( xlist, ylist, color ='orange', marker ='*', linestyle ='solid') \n #plt.title(strtitle)# add a title\n plt.ylabel(\"Y-axis\")# add a label to the x and y-axes\n plt.xlabel(\"X-axis\")\n plt.show() # display the plot\n return #null return\n",
"_____no_output_____"
],
[
"Line()",
"Please enter x value for point 1 1\nPlease enter y value for point 1 1\nPlease enter x value for point 2 2\nPlease enter y value for point 2 2\n"
]
],
[
[
"## Example- Lets have some fun! \nCopy the wrapper script for the `plotAline()` function, and modify the copy to create a plot of\n$$ x = 16sin^3(t) $$\n$$ y = 13cos(t) - 5cos(2t) - 2cos(3t) - cos(4t) $$\nfor t raging from [0,2$\\Pi$] (inclusive).\n\nLabel the plot and the plot axes.\n",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\nimport numpy as np # import NumPy: for large, multi-dimensional arrays and matrices, along with high-level mathematical functions to operate on these arrays.\npi = np.pi #pi value from the np package\nt= np.linspace(0,2*pi,360)# the NumPy function np.linspace is similar to the range()\n\nx = 16*np.sin(t)**3\ny = 13*np.cos(t) - 5*np.cos(2*t) - 2*np.cos(3*t) - np.cos(4*t)\n\nplt.plot( x, y, color ='purple', marker ='.', linestyle ='solid') \nplt.ylabel(\"Y-axis\")# add a label to the x and y-axes\nplt.xlabel(\"X-axis\")\nplt.axis('equal') #sets equal axis ratios\nplt.title(\"A Hopeless Romantic's Curve\")# add a title\nplt.show() # display the plot",
"_____no_output_____"
]
],
[
[
"## Exercise 2\nCopy the wrapper script for the `plotAline()` function, and modify the copy to create a plot of\n$$ y = x^2 $$\nfor x raging from 0 to 9 (inclusive) in steps of 1.\n\nLabel the plot and the plot axes.\n",
"_____no_output_____"
],
[
"## Exercise 3 \nUse your function from Exercise 1. \n\n$$f(x) = e^x - 10 cos(x) - 100$$ \n\nAnd make a plot where $x$ ranges from 0 to 15 in increments of 0.25. Label the plot and the plot axes.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
ec6b1b6201c193738d91cc84f1c1890173317fa1 | 21,313 | ipynb | Jupyter Notebook | tests/notebooks/data_loading.ipynb | gabmis/scVI | 02045f343a23bb5a27f1d3d2e41708a5c5914681 | [
"MIT"
]
| 1 | 2021-03-05T08:27:05.000Z | 2021-03-05T08:27:05.000Z | tests/notebooks/data_loading.ipynb | gabmis/scVI | 02045f343a23bb5a27f1d3d2e41708a5c5914681 | [
"MIT"
]
| null | null | null | tests/notebooks/data_loading.ipynb | gabmis/scVI | 02045f343a23bb5a27f1d3d2e41708a5c5914681 | [
"MIT"
]
| null | null | null | 33.24961 | 555 | 0.617698 | [
[
[
"# Data Loading Tutorial",
"_____no_output_____"
]
],
[
[
"cd ../..",
"/Users/yiningliu/research/scVI\n"
],
[
"save_path = 'data/'",
"_____no_output_____"
],
[
"from scvi.dataset import LoomDataset, CsvDataset, Dataset10X, AnnDataset\nimport urllib.request\nimport os\nfrom scvi.dataset import BrainLargeDataset, CortexDataset, PbmcDataset, RetinaDataset, HematoDataset, CbmcDataset, BrainSmallDataset, SmfishDataset",
"_____no_output_____"
]
],
[
[
"## Generic Datasets\n`scvi v0.1.3` supports dataset loading for the following three generic file formats: \n* `.loom` files\n* `.csv` files \n* `.h5ad` files\n* datasets from `10x` website \n\nMost of the dataset loading instances implemented in scvi use a positional argument `filename` and an optional argument `save_path` (value by default: `data/`). Files will be downloaded or searched for at the location `os.path.join(save_path, filename)`, make sure this path is valid when you specify the arguments.",
"_____no_output_____"
],
[
"### Loading a `.loom` file\nAny `.loom` file can be loaded with initializing `LoomDataset` with `filename`.\n\nOptional parameters: \n* `save_path`: save path (default to be `data/`) of the file\n* `url`: url the dataset if the file needs to be downloaded from the web\n* `new_n_genes`: the number of subsampling genes - set it to be `False` to turn off subsampling\n* `subset_genes`: a list of gene names for subsampling",
"_____no_output_____"
]
],
[
[
"# Loading a remote dataset \nremote_loom_dataset = LoomDataset(\"osmFISH_SScortex_mouse_all_cell.loom\", \n save_path=save_path, \n url='http://linnarssonlab.org/osmFISH/osmFISH_SScortex_mouse_all_cells.loom')",
"Downloading file at data/osmFISH_SScortex_mouse_all_cell.loom\nPreprocessing dataset\nFinished preprocessing dataset\n"
],
[
"# Loading a local dataset \nlocal_loom_dataset = LoomDataset(\"osmFISH_SScortex_mouse_all_cell.loom\", \n save_path=save_path)",
"File data/osmFISH_SScortex_mouse_all_cell.loom already downloaded\nPreprocessing dataset\nFinished preprocessing dataset\n"
]
],
[
[
"### Loading a `.csv` file \nAny `.csv` file can be loaded with initializing `CsvDataset` with `filename`.\n\nOptional parameters: \n* `save_path`: save path (default to be `data/`) of the file\n* `url`: url of the dataset if the file needs to be downloaded from the web\n* `compression`: set `compression` as `.gz`, `.bz2`, `.zip`, or `.xz` to load a zipped `csv` file \n* `new_n_genes`: the number of subsampling genes - set it to be `False` to turn off subsampling\n* `subset_genes`: a list of gene names for subsampling \n\nNote: `CsvDataset` currently only supoorts `.csv` files that are genes by cells. ",
"_____no_output_____"
],
[
"If the dataset has already been downloaded at the location `save_path`, it will not be downloaded again.",
"_____no_output_____"
]
],
[
[
"# Loading a remote dataset \nremote_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\",\n save_path=save_path, \n compression='gzip', \n url = \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE100866&format=file&file=GSE100866%5FCBMC%5F8K%5F13AB%5F10X%2DRNA%5Fumi%2Ecsv%2Egz\")",
"Downloading file at data/GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\nPreprocessing dataset\nFinished preprocessing dataset\nDownsampling from 36280 to 600 genes\n"
],
[
"# Loading a local dataset \nlocal_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\", \n save_path=save_path, \n compression='gzip') ",
"File data/GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz already downloaded\nPreprocessing dataset\nFinished preprocessing dataset\nDownsampling from 36280 to 600 genes\n"
]
],
[
[
"### Loading a `.h5ad` file\n[AnnData](http://anndata.readthedocs.io/en/latest/) objects can be stored in `.h5ad` format. Any `.h5ad` file can be loaded with initializing `AnnDataset` with `filename`.\n\nOptional parameters: \n* `save_path`: save path (default to be `data/`) of the file\n* `url`: url the dataset if the file needs to be downloaded from the web\n* `new_n_genes`: the number of subsampling genes - set it to be `False` to turn off subsampling\n* `subset_genes`: a list of gene names for subsampling ",
"_____no_output_____"
]
],
[
[
"# Loading a local dataset \nlocal_ann_dataset = AnnDataset(\"TM_droplet_mat.h5ad\", \n save_path = save_path) ",
"File data/TM_droplet_mat.h5ad already downloaded\nPreprocessing dataset\nFinished preprocessing dataset\n"
]
],
[
[
"### Loading a file from `10x` website ",
"_____no_output_____"
],
[
"If the dataset has already been downloaded at the location `save_path`, it will not be downloaded again.",
"_____no_output_____"
],
[
"`10x` has published several datasets on their [website](https://www.10xgenomics.com). \nInitialize `Dataset10X` by passing in the dataset name of one of the following datasets that `scvi` currently supports: `frozen_pbmc_donor_a`, `frozen_pbmc_donor_b`, `frozen_pbmc_donor_c`, `pbmc8k`, `pbmc4k`, `t_3k`, `t_4k`, and `neuron_9k`. \n\nOptional parameters: \n* `save_path`: save path (default to be `data/`) of the file\n* `type`: set `type` (default to be `filtered`) to be `filtered` or `raw` to choose one from the two datasets that's available on `10X`\n* `new_n_genes`: the number of subsampling genes - set it to be `False` to turn off subsampling",
"_____no_output_____"
]
],
[
[
"tenX_dataset = Dataset10X(\"neuron_9k\", save_path=save_path)",
"Downloading file at data/10X/neuron_9k/filtered_gene_bc_matrices.tar.gz\nPreprocessing dataset\nExtracting tar file\nFinished preprocessing dataset\nDownsampling from 27998 to 3000 genes\n"
]
],
[
[
"### Loading local `10x` data ",
"_____no_output_____"
],
[
"It is also possible to create a Dataset object from 10X data saved locally. Initialize Dataset10X by passing in the optional remote argument as False to specify you're loading local data and give the name of the directory that contains the gene expression matrix and gene names of the data as well as the path to this directory.\nIf your data (the genes.tsv and matrix.mtx files) is located inside the directory 'mm10' which is located at 'data/10X/neuron_9k/filtered_gene_bc_matrices/'. Then filename should have the value 'mm10' and save_path should be the path to the directory containing 'mm10'.",
"_____no_output_____"
]
],
[
[
"local_10X_dataset = Dataset10X('mm10', save_path=os.path.join(save_path, '10X/neuron_9k/filtered_gene_bc_matrices/'), \n remote=False)",
"Preprocessing dataset\nFinished preprocessing dataset\n"
]
],
[
[
"## Built-In Datasets ",
"_____no_output_____"
],
[
"We've also implemented seven built-in datasets to make it easier to reproduce results from the scVI paper. \n\n* **PBMC**: 12,039 human peripheral blood mononuclear cells profiled with 10x; \n* **RETINA**: 27,499 mouse retinal bipolar neurons, profiled in two batches using the Drop-Seq technology; \n* **HEMATO**: 4,016 cells from two batches that were profiled using in-drop; \n* **CBMC**: 8,617 cord blood mononuclear cells profiled using 10x along with, for each cell, 13 well-characterized mononuclear antibodies; \n* **BRAIN SMALL**: 9,128 mouse brain cells profiled using 10x. \n* **BRAIN LARGE**: 1.3 million mouse brain cells profiled using 10x;\n* **CORTEX**: 3,005 mouse Cortex cells profiled using the Smart-seq2 protocol, with the addition of UMI\n* **SMFISH**: 4,462 mouse Cortex cells profiled using the osmFISH protocol\n* **DROPSEQ**: 71,639 mouse Cortex cells profiled using the Drop-Seq technology\n* **STARMAP**: 3,722 mouse Cortex cells profiled using the STARmap technology",
"_____no_output_____"
],
[
"### Loading `STARMAP` dataset\n`StarmapDataset` consists of 3722 cells profiled in 3 batches. The cells come with spatial coordinates of their location inside the tissue from which they were extracted and cell type labels retrieved by the authors ofthe original publication.\n\nReference: X.Wang et al., Science10.1126/science.aat5691 (2018)",
"_____no_output_____"
],
[
"### Loading `DROPSEQ` dataset\n`DropseqDataset` consists of 71,639 mouse Cortex cells profiled using the Drop-Seq technology. To facilitate comparison with other methods we use a random filtered set of 15000 cells and then keep only a filtered set of 6000 highly variable genes. Cells have cell type annotaions and even sub-cell type annotations inferred by the authors of the original publication.\n\nReference: https://www.biorxiv.org/content/biorxiv/early/2018/04/10/299081.full.pdf",
"_____no_output_____"
],
[
"### Loading `SMFISH` dataset\n`SmfishDataset` consists of 4,462 mouse cortex cells profiled using the OsmFISH protocol. The cells come with spatial coordinates of their location inside the tissue from which they were extracted and cell type labels retrieved by the authors of the original publication.\n\nReference: Simone Codeluppi, Lars E Borm, Amit Zeisel, Gioele La Manno, Josina A van Lunteren, Camilla I Svensson, and Sten Linnarsson. Spatial organization of the somatosensory cortex revealed by cyclic smFISH. bioRxiv, 2018.",
"_____no_output_____"
]
],
[
[
"smfish_dataset = SmfishDataset(save_path=save_path)",
"File data/osmFISH_SScortex_mouse_all_cell.loom already downloaded\nPreprocessing smFISH dataset\nFinished preprocessing smFISH dataset\n"
]
],
[
[
"### Loading `BRAIN-LARGE` dataset\n\n<font color='red'>Loading BRAIN-LARGE requires at least 32 GB memory!</font>\n\n`BrainLargeDataset` consists of 1.3 million mouse brain cells, spanning the cortex, hippocampus and subventricular zone, and profiled with 10x chromium. We use this dataset to demonstrate the scalability of scVI. It can be used to demonstrate the scalability of scVI. \n\nReference: 10x genomics (2017). URL https://support.10xgenomics.com/single-cell-gene-expression/datasets. ",
"_____no_output_____"
]
],
[
[
"brain_large_dataset = BrainLargeDataset(save_path=save_path) ",
"Downloading file at data/genomics.h5\nPreprocessing Brain Large data\n720 genes subsampled\n1306127 cells subsampled\nFinished preprocessing data\n"
]
],
[
[
"### Loading `CORTEX` dataset\n`CortexDataset` consists of 3,005 mouse cortex cells profiled with the Smart-seq2 protocol, with the addition of UMI. To facilitate com- parison with other methods, we use a filtered set of 558 highly variable genes. The `CortexDataset` exhibits a clear high-level subpopulation struc- ture, which has been inferred by the authors of the original publication using computational tools and annotated by inspection of specific genes or transcriptional programs. Similar levels of annotation are provided with the `PbmcDataset` and `RetinaDataset`. \n\nReference: Zeisel, A. et al. Cell types in the mouse cortex and hippocampus revealed by single-cell rna-seq. Science 347, 1138–1142 (2015). ",
"_____no_output_____"
]
],
[
[
"cortex_dataset = CortexDataset(save_path=save_path) ",
"Downloading file at data/expression.bin\nPreprocessing Cortex data\nFinished preprocessing Cortex data\n"
]
],
[
[
"### Loading `PBMC` dataset\n`PbmcDataset` consists of 12,039 human peripheral blood mononu- clear cells profiled with 10x. \n\nReference: Zheng, G. X. Y. et al. Massively parallel digital transcriptional profiling of single cells. Nature Communications 8, 14049 (2017). ",
"_____no_output_____"
]
],
[
[
"pbmc_dataset = PbmcDataset(save_path=save_path) ",
"Downloading file at data/10X/pbmc8k/filtered_gene_bc_matrices.tar.gz\nPreprocessing dataset\nExtracting tar file\nFinished preprocessing dataset\nDownsampling from 33694 to 3000 genes\nDownloading file at data/10X/pbmc4k/filtered_gene_bc_matrices.tar.gz\nPreprocessing dataset\nExtracting tar file\nFinished preprocessing dataset\nDownsampling from 33694 to 3000 genes\nKeeping 2903 genes\n"
]
],
[
[
"### Loading `RETINA` dataset \n`RetinaDataset` includes 27,499 mouse retinal bipolar neu- rons, profiled in two batches using the Drop-Seq technology. \n\nReference: Shekhar, K. et al. Comprehensive classification of retinal bipolar neurons by single-cell transcriptomics. Cell 166, 1308–1323.e30 (2017). ",
"_____no_output_____"
]
],
[
[
"retina_dataset = RetinaDataset(save_path=save_path)",
"Downloading file at data/retina.loom\nPreprocessing dataset\nFinished preprocessing dataset\n"
]
],
[
[
"### Loading `HEMATO` dataset \n`HematoDataset` includes 4,016 cells from two batches that were profiled using in-drop. This data provides a snapshot of hematopoietic progenitor cells differentiating into various lineages. We use this dataset as an example for cases where gene expression varies in a continuous fashion (along pseudo-temporal axes) rather than forming discrete subpopulations. \n\nReference: Tusi, B. K. et al. Population snapshots predict early haematopoietic and erythroid hierarchies. Nature 555, 54–60 (2018).",
"_____no_output_____"
]
],
[
[
"hemato_dataset = HematoDataset(save_path=os.path.join(save_path, 'HEMATO/')) ",
"Downloading data.zip\nDownloading file at data/HEMATO/bBM.raw_umifm_counts.csv.gz\nPreprocessing Hemato data\nFinished preprocessing Hemato data\n"
]
],
[
[
"### Loading `CBMC` dataset\n`CbmcDataset` includes 8,617 cord blood mononuclear cells pro- filed using 10x along with, for each cell, 13 well-characterized mononuclear antibodies. We used this dataset to analyze how the latent spaces inferred by dimensionality-reduction algorithms summarize protein marker abundance.\n\nReference: Stoeckius, M. et al. Simultaneous epitope and transcriptome measurement in single cells. Nature Methods 14, 865–868 (2017).",
"_____no_output_____"
]
],
[
[
"cbmc_dataset = CbmcDataset(save_path=os.path.join(save_path, \"citeSeq/\"))",
"Downloading file at data/citeSeq/cbmc/cbmc_rna.csv.gz\nDownloading file at data/citeSeq/cbmc/cbmc_adt.csv.gz\nDownloading file at data/citeSeq/cbmc/cbmc_adt_centered.csv.gz\nPreprocessing data\nSelecting only HUMAN genes (20400 / 36280)\nFinish preprocessing data\n"
]
],
[
[
"### Loading `BRAIN-SMALL` dataset\n`BrainSmallDataset` consists of 9,128 mouse brain cells profiled using 10x. This dataset is used as a complement to PBMC for our study of zero abundance and quality control metrics correlation with our generative posterior parameters.\n\nReference: ",
"_____no_output_____"
]
],
[
[
"brain_small_dataset = BrainSmallDataset(save_path=save_path)",
"File data/10X/neuron_9k/filtered_gene_bc_matrices.tar.gz already downloaded\nPreprocessing dataset\nFinished preprocessing dataset\nDownsampling from 27998 to 3000 genes\n"
],
[
"def allow_notebook_for_test():\n print(\"Testing the data loading notebook\")",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
ec6b21496f51a0f7d829de9d8db476a99e316b0f | 58,596 | ipynb | Jupyter Notebook | ClassExercises/Week5_HashingTheory/HashPerformance.ipynb | ursinus-cs371-s2022/CoursePage | f721a2208f8b951f15335a929fb4f96e53fb56db | [
"Apache-2.0"
]
| null | null | null | ClassExercises/Week5_HashingTheory/HashPerformance.ipynb | ursinus-cs371-s2022/CoursePage | f721a2208f8b951f15335a929fb4f96e53fb56db | [
"Apache-2.0"
]
| null | null | null | ClassExercises/Week5_HashingTheory/HashPerformance.ipynb | ursinus-cs371-s2022/CoursePage | f721a2208f8b951f15335a929fb4f96e53fb56db | [
"Apache-2.0"
]
| null | null | null | 272.539535 | 29,320 | 0.922452 | [
[
[
"# Words Hashing Experiment\n\nBelow are the results of plotting the average of the max number of elements in a hack bucket/bin as the number of buckets scales with the number of objects in the hash table",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom scipy.signal import medfilt\nfrom hashtable import *\nfrom strwrapper import *\n\nnp.random.seed(0)\nfin = open(\"words.txt\")\nwords = [StrWrapper(w) for w in fin.readlines()]\nwords = [words[i] for i in np.random.permutation(len(words))]\nfin.close()\n\nnums = []\navgs = []\nmaxes = []\nnempty = []",
"_____no_output_____"
],
[
"for i in range(0, 200000, 10):\n nums.append(i)\n dictionary = HashTable(i+1)\n for w in words[0:i+1]:\n dictionary.add(w)\n lens = [dictionary.bin_len(k) for k in range(i+1)]\n maxi = np.max(lens)\n avgi = np.mean(lens)\n avgs.append(avgi)\n maxes.append(maxi)\n nempty.append(np.sum(np.array(lens) == 0))\n if i%10000 == 0:\n print(i, \".\", end=\"\")",
"0 .10000 .20000 .30000 .40000 .50000 .60000 .70000 .80000 .90000 .100000 .110000 .120000 .130000 .140000 .150000 .160000 .170000 .180000 .190000 ."
],
[
"import scipy.io as sio\nsio.savemat(\"hashexperiment.mat\", {\"nums\":nums, \"avgs\":avgs, \"maxes\":maxes, \"nempty\":nempty})",
"_____no_output_____"
],
[
"nums = np.array(nums)\ny = medfilt(maxes, 5) # Median filter\nwin = 1000\nprint(\"y.size = \", y.size)\ny = np.convolve(y, np.ones(win), 'same')/win # Running average\nN1 = 100\nN2 = min(len(maxes), len(nums))-win\nx = range(0, 200000, 10)\nplt.figure(figsize=(9, 5))\nplt.plot(x, y, linewidth=4)\nplt.plot(x, 3*np.log(x)/np.log(np.log(x)), linewidth=4, linestyle='--')\nplt.xlim([0, 180000])\nplt.xlabel(\"Number of Words\")\nplt.ylabel(\"Max Number of Words in Bin\")\nplt.legend([\"Average Per Bin\", \"$3 \\\\log(N) / \\\\log\\\\log(N)$\"])",
"y.size = 20000\n"
],
[
"y = np.convolve(nempty, np.ones(win), 'same')/win\nplt.figure(figsize=(9, 5))\nplt.plot(x, y, linewidth=4)\nplt.plot(x, x/np.exp(1))\nplt.xlabel(\"Number of Bins\")\nplt.ylabel(\"Empty Bins\")\nplt.xlim([0, 180000])\nplt.legend([\"Average Number of Empty Bins\", \"$N/e$\"])",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6b22572afdcc0657a299aab27cbfef5dfe93d9 | 5,648 | ipynb | Jupyter Notebook | 4. Inventory Management System with JSON/4. Saving Record/Inventory Management System - Saving Record on JSON.ipynb | AshishJangra27/Data-Science-Live-Course-GeeksForGeeks | 4fefa9c855dd515a974ee4c0d9a41886e3c0c1f8 | [
"Apache-2.0"
]
| null | null | null | 4. Inventory Management System with JSON/4. Saving Record/Inventory Management System - Saving Record on JSON.ipynb | AshishJangra27/Data-Science-Live-Course-GeeksForGeeks | 4fefa9c855dd515a974ee4c0d9a41886e3c0c1f8 | [
"Apache-2.0"
]
| null | null | null | 4. Inventory Management System with JSON/4. Saving Record/Inventory Management System - Saving Record on JSON.ipynb | AshishJangra27/Data-Science-Live-Course-GeeksForGeeks | 4fefa9c855dd515a974ee4c0d9a41886e3c0c1f8 | [
"Apache-2.0"
]
| null | null | null | 26.148148 | 335 | 0.380489 | [
[
[
"import json\n\nrecord = {1001: {'Name': \"5 Star\" , \"Price\" : 10 , \"Qn\" : 200},\n 1002: {'Name': \"Bar-One\" , \"Price\" : 20 , \"Qn\" : 100 },\n 1003: {'Name': \"Candy\" , \"Price\" : 2 , \"Qn\" : 1000},\n 1004: {'Name': \"Chocolate Cake\" , \"Price\" : 550, \"Qn\" : 8 },\n 1005: {'Name': \"Blueberry Cake\" , \"Price\" : 650, \"Qn\" : 5 }}",
"_____no_output_____"
],
[
"print(\"--------------------MENU---------------------\")\nfor key in record.keys():\n print(key, record[key]['Name'], record[key]['Price'], record[key]['Name'])\nprint(\"---------------------------------------------\")\nprint('')\n\nui_pr = int(input(\"Enter product ID : \"))\nui_qn = int(input(\"Enter Quantiry : \"))\n\nprint(\"---------------------------------------------\")\nprint('')\n\nprint(\"Name : \", record[ui_pr][\"Name\"])\nprint(\"Price (Rs): \", record[ui_pr][\"Price\"])\nprint(\"Quantity : \", ui_qn)\nprint(\"---------------------------------------------\")\nprint(\"Billing : \", ui_qn * record[ui_pr][\"Price\"], \"Rs\")\nprint(\"---------------------------------------------\")\n\nrecord[ui_pr]['Qn'] = record[ui_pr]['Qn'] - ui_qn\n\njs = json.dumps(record)\n\nfd = open('Record.json','w')\nfd.write(js)\nfd.close()\n\nprint('')\nprint(\"---------------------------------------------\")\nprint(\" Thanks for your order, Inventory Updated! \")\nprint(\"---------------------------------------------\")",
"--------------------MENU---------------------\n(1001, '5 Star', 10, '5 Star')\n(1002, 'Bar-One', 20, 'Bar-One')\n(1003, 'Candy', 2, 'Candy')\n(1004, 'Chocolate Cake', 550, 'Chocolate Cake')\n(1005, 'Blueberry Cake', 650, 'Blueberry Cake')\n---------------------------------------------\n\nEnter product ID : 1005\nEnter Quantiry : 2\n---------------------------------------------\n\n('Name : ', 'Blueberry Cake')\n('Price (Rs): ', 650)\n('Quantity : ', 2)\n---------------------------------------------\n('Billing : ', 1300, 'Rs')\n---------------------------------------------\n\n---------------------------------------------\n Thanks for your order, Inventory Updated! \n---------------------------------------------\n"
],
[
"record",
"_____no_output_____"
],
[
"type(record)",
"_____no_output_____"
],
[
"js",
"_____no_output_____"
],
[
"type(js)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6b30c764798f0327484e4b9c89469d100bf02b | 94,456 | ipynb | Jupyter Notebook | resultados/8.Git.ipynb | jacsonrbinf/minicurso-mineracao-interativa | 79b479f5fa68c317902e069826977748b00bbae9 | [
"MIT"
]
| 2 | 2019-10-30T04:33:32.000Z | 2019-10-30T04:36:29.000Z | resultados/8.Git.ipynb | jacsonrbinf/minicurso-mineracao-interativa | 79b479f5fa68c317902e069826977748b00bbae9 | [
"MIT"
]
| null | null | null | resultados/8.Git.ipynb | jacsonrbinf/minicurso-mineracao-interativa | 79b479f5fa68c317902e069826977748b00bbae9 | [
"MIT"
]
| 4 | 2019-10-29T23:05:00.000Z | 2022-03-13T18:17:31.000Z | 41.555653 | 13,636 | 0.485845 | [
[
[
"Para entrar no modo apresentação, execute a seguinte célula e pressione `-`",
"_____no_output_____"
]
],
[
[
"%reload_ext slide",
"_____no_output_____"
]
],
[
[
"<span class=\"notebook-slide-start\"/>\n\n# Git\n\nEste notebook apresenta os seguintes tópicos:\n\n- [Git](#Git)\n- [Exercício 9](#Exerc%C3%ADcio-9)\n- [Pandas](#Pandas)\n- [Exercício 10](#Exerc%C3%ADcio-10)",
"_____no_output_____"
],
[
"Outra fonte de informações de um repositório de software é o repositório do sistema de controle de versões.\n\nPelo controle de versões, conseguimos ter acesso a todos os arquivos de todas as versões, todas as mensagens de commit, branches, e colaboradores.\n\nNesta parte do minicurso, faremos a mineração dessas informações.",
"_____no_output_____"
],
[
"No caso do Git, ao clonar um repositório, ficamos com uma cópia local do que está lá. Portanto, começamos a mineração com um clone e não precisamos de nenhum proxy. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/gems-uff/sapos",
"Cloning into 'sapos'...\nremote: Enumerating objects: 22, done.\u001b[K\nremote: Counting objects: 100% (22/22), done.\u001b[K\nremote: Compressing objects: 100% (19/19), done.\u001b[K\nremote: Total 12954 (delta 4), reused 11 (delta 3), pack-reused 12932\nReceiving objects: 100% (12954/12954), 10.41 MiB | 6.43 MiB/s, done.\nResolving deltas: 100% (8011/8011), done.\n"
]
],
[
[
"Com o repositório clonado, podemos usar comandos do git para extrair informações. <span class=\"notebook-slide-extra\" data-count=\"2\"/>",
"_____no_output_____"
]
],
[
[
"%cd sapos",
"/home/joao/projects/tutorial/sapos\n"
],
[
"!git branch -r",
" \u001b[31morigin/HEAD\u001b[m -> origin/master\r\n \u001b[31morigin/bugfixes\u001b[m\r\n \u001b[31morigin/hotfixes\u001b[m\r\n \u001b[31morigin/master\u001b[m\r\n \u001b[31morigin/reports\u001b[m\r\n"
]
],
[
[
"Essas informações também podem ser obtidas para tratarmos usando variáveis do Python.\n\nA seguir tentamos descobrir qual é o commit de cada um desses branches. <span class=\"notebook-slide-extra\" data-count=\"2\"/>",
"_____no_output_____"
]
],
[
[
"git_branch_output = !git branch -r\nbranches = [\n branch.strip().split(' ')[0].split('/')[1]\n for branch in git_branch_output\n]\nbranches",
"_____no_output_____"
],
[
"branch_commit = {}\nfor branch in branches:\n __ = !git checkout $branch\n commit = !git show --pretty=format:\"%h\" --no-patch\n branch_commit[branch] = commit\n__ = !git checkout master\nbranch_commit",
"_____no_output_____"
]
],
[
[
"Usamos `__ = !...` para evitar a exibição do output do comando de sistema. O IPython imprime o output quando bang expressions são usadas isoladas e retorna o output quando elas são usadas em atribuições.\n\nNote que apenas o branch `reports` está em um commit diferente. <span class=\"notebook-slide-scroll\" data-position=\"-1\"/>",
"_____no_output_____"
],
[
"## Exercício 9\n\nFaça a mesma operação para obter o código dos commits de tags e salve na variável `tag_commit`. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"tags = !git tag\n...",
"_____no_output_____"
]
],
[
[
"Agora vamos agrupar as tags por versões minor e ordenar as versões patch. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"from itertools import groupby\ngroups = groupby(tags, lambda x: x.rsplit(\".\", 1)[0])\nminor_tags = {}\nfor minor, elements in groups:\n minor_tags[minor] = sorted(\n elements,\n key=lambda x: int(x.split('-')[0].split('.')[-1])\n )\nminor_tags['4.3']",
"_____no_output_____"
]
],
[
[
"Fazendo o mesmo para agrupar versões major. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"groups = groupby(minor_tags, lambda x: x.rsplit(\".\", 1)[0])\nmajor_tags = {}\nfor major, elements in groups:\n major_tags[major] = sorted(\n elements,\n key=lambda x: int(x.split('-')[0].split('.')[-1])\n )\nmajor_tags['4']",
"_____no_output_____"
]
],
[
[
"Com isso, podemos escolher versões major (e.g., 3 e 4) e obter a última versão patch para cada minor delas. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"last_patch_for_v3v4 = {\n minor: minor_tags[minor][-1]\n for minor in major_tags['3'] + major_tags['4']\n}\nlast_patch_for_v3v4",
"_____no_output_____"
]
],
[
[
"Agora queremos ver a evolução de linhas de código para as versões selecionadas. Para isso, vamos percorrer o dicionário fazendo checkout de cada versão, carregar o número de linhas usando `cloc` e parsear o resultado para extrair as colunas para construir linhas de uma tabela. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"from collections import defaultdict\ncolumns = {\"id\"}\nrows = []\n\nfor minor, tag in last_patch_for_v3v4.items():\n __ = !git checkout $tag\n lines = !cloc .\n filtered_lines = lines[lines.index(\"-\" * 79) + 3:]\n commit_result = defaultdict(int)\n commit_result[\"id\"] = minor\n for line in filtered_lines:\n if not line.startswith(\"-\"):\n split = line.split()\n language = split[0]\n commit_result[language + \"_files\"] = int(split[1])\n commit_result[language + \"_blank\"] = int(split[2])\n commit_result[language + \"_comment\"] = int(split[3])\n commit_result[language + \"_code\"] = int(split[4])\n columns |= {\n language + \"_files\", language + \"_blank\",\n language + \"_comment\", language + \"_code\"\n }\n rows.append(commit_result)",
"_____no_output_____"
]
],
[
[
"## Pandas\nPodemos usar `pandas` para construir a tabela a partir da lista de dicionários. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf = pd.DataFrame(rows)\ndf",
"_____no_output_____"
]
],
[
[
"O `pandas` permite descrever a tabela com o método `.describe()`. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"Além disso, é possível fazer seleções nos dados. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"df[df[\"Ruby_code\"] > 25000]",
"_____no_output_____"
]
],
[
[
"## Exercício 10\n\nSelecione as versões que usam CoffeeScript e as versões que não usam XML. <span class=\"notebook-slide-extra\" data-count=\"2\"/>\n",
"_____no_output_____"
]
],
[
[
"with_coffee = ...\nwith_coffee",
"_____no_output_____"
],
[
"without_xml = ...\nwithout_xml",
"_____no_output_____"
]
],
[
[
"Além de selecionar linhas, podemos selecionar colunas. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"columns = ['SUM:_files', 'SUM:_blank', 'SUM:_comment', 'SUM:_code']\nndf = df[columns]\nndf",
"_____no_output_____"
]
],
[
[
"O `pandas` também oferece algumas funções que facilitam a geração de gráficos. <span class=\"notebook-slide-extra\" data-count=\"2\"/>",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nndf.boxplot()",
"_____no_output_____"
],
[
"df.set_index(\"id\")[\"Ruby_code\"].plot()",
"_____no_output_____"
]
],
[
[
"É possível aplicar operações em colunas e criar novas colunas. <span class=\"notebook-slide-extra\" data-count=\"1\"/>",
"_____no_output_____"
]
],
[
[
"df.loc[:, \"tag\"] = df[\"id\"].apply(lambda minor: last_patch_for_v3v4[minor])",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"Existem muitas outras operações que podem ser vistas na documentação: https://pandas.pydata.org/pandas-docs/stable/. <span class=\"notebook-slide-scroll\" data-position=\"-1\"/>",
"_____no_output_____"
],
[
"Continua: [9.Pygit2.ipynb](9.Pygit2.ipynb)",
"_____no_output_____"
],
[
" \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n\n \n\n \n\n \n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
ec6b34ef3d4f1b532735c72ffebf52ab23c9349c | 2,007 | ipynb | Jupyter Notebook | galata/test/jupyterlab/notebooks/toc_notebook.ipynb | AmaranthosLabs/jupyterlab | b80475fbd409385eae2017fbb82c72c638fd6959 | [
"BSD-3-Clause"
]
| 11,496 | 2016-10-12T21:02:20.000Z | 2022-03-31T17:09:23.000Z | galata/test/jupyterlab/notebooks/toc_notebook.ipynb | AmaranthosLabs/jupyterlab | b80475fbd409385eae2017fbb82c72c638fd6959 | [
"BSD-3-Clause"
]
| 10,587 | 2016-10-12T21:22:34.000Z | 2022-03-31T22:44:58.000Z | galata/test/jupyterlab/notebooks/toc_notebook.ipynb | AmaranthosLabs/jupyterlab | b80475fbd409385eae2017fbb82c72c638fd6959 | [
"BSD-3-Clause"
]
| 2,612 | 2016-10-13T12:56:28.000Z | 2022-03-30T17:03:04.000Z | 16.317073 | 73 | 0.489287 | [
[
[
"# Test Notebook",
"_____no_output_____"
],
[
"## Sub title 1",
"_____no_output_____"
],
[
"### Sub sub title",
"_____no_output_____"
],
[
"## Sub title 2",
"_____no_output_____"
],
[
"### Sub sub title 1",
"_____no_output_____"
],
[
"### Sub sub title 2",
"_____no_output_____"
],
[
"## IPython: tools for interactive and parallel computing in Python.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image",
"_____no_output_____"
]
],
[
[
"## Image",
"_____no_output_____"
]
],
[
[
"Image(\"WidgetArch.png\", width=70, height=100)",
"_____no_output_____"
],
[
"2 + 2",
"_____no_output_____"
],
[
"import math\nmath.pi / 2",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec6b3beace1ffbf50eb88b451920198aa9f54372 | 4,992 | ipynb | Jupyter Notebook | mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb | lbianchi-lbl/MPContribs | 4d2dd404dd0972fbb6f3446db0adc7521e8932fd | [
"MIT"
]
| null | null | null | mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb | lbianchi-lbl/MPContribs | 4d2dd404dd0972fbb6f3446db0adc7521e8932fd | [
"MIT"
]
| null | null | null | mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb | lbianchi-lbl/MPContribs | 4d2dd404dd0972fbb6f3446db0adc7521e8932fd | [
"MIT"
]
| null | null | null | 26.83871 | 99 | 0.480369 | [
[
[
"import os, json\nfrom pathlib import Path\nfrom pandas import DataFrame\nfrom mpcontribs.client import Client\nfrom unflatten import unflatten",
"_____no_output_____"
],
[
"client = Client()",
"_____no_output_____"
]
],
[
[
"**Load raw data**",
"_____no_output_____"
]
],
[
[
"name = \"screening_inorganic_pv\"\nindir = Path(\"/Users/patrick/gitrepos/mp/mpcontribs-data/ThinFilmPV\")\nfiles = {\n \"summary\": \"SUMMARY.json\",\n \"absorption\": \"ABSORPTION-CLIPPED.json\",\n \"dos\": \"DOS.json\",\n \"formulae\": \"FORMATTED-FORMULAE.json\"\n}\ndata = {}\n\nfor k, v in files.items():\n path = indir / v\n with path.open(mode=\"r\") as f:\n data[k] = json.load(f)\n \nfor k, v in data.items():\n print(k, len(v))",
"_____no_output_____"
]
],
[
[
"**Prepare contributions**",
"_____no_output_____"
]
],
[
[
"config = {\n \"SLME_500_nm\": {\"path\": \"SLME.500nm\", \"unit\": \"%\"},\n \"SLME_1000_nm\": {\"path\": \"SLME.1000nm\", \"unit\": \"%\"},\n \"E_g\": {\"path\": \"ΔE.corrected\", \"unit\": \"eV\"},\n \"E_g_d\": {\"path\": \"ΔE.direct\", \"unit\": \"eV\"},\n \"E_g_da\": {\"path\": \"ΔE.dipole\", \"unit\": \"eV\"},\n \"m_e\": {\"path\": \"mᵉ\", \"unit\": \"mₑ\"},\n \"m_h\": {\"path\": \"mʰ\", \"unit\": \"mₑ\"}\n}\ncolumns = {c[\"path\"]: c[\"unit\"] for c in config.values()}\ncontributions = []\n\nfor mp_id, d in data[\"summary\"].items():\n formula = data[\"formulae\"][mp_id].replace(\"<sub>\", \"\").replace(\"</sub>\", \"\")\n contrib = {\"project\": name, \"identifier\": mp_id, \"data\": {\"formula\": formula}}\n cdata = {v[\"path\"]: f'{d[k]} {v[\"unit\"]}' for k, v in config.items()}\n contrib[\"data\"] = unflatten(cdata)\n \n df_abs = DataFrame(data=data[\"absorption\"][mp_id])\n df_abs.columns = [\"hν [eV]\", \"α [cm⁻¹]\"]\n df_abs.set_index(\"hν [eV]\", inplace=True)\n df_abs.columns.name = \"\" # legend name\n df_abs.attrs[\"name\"] = \"absorption\"\n df_abs.attrs[\"title\"] = \"optical absorption spectrum\"\n df_abs.attrs[\"labels\"] = {\"variable\": \"\", \"value\": \"α [cm⁻¹]\"}\n\n df_dos = DataFrame(data=data[\"dos\"][mp_id])\n df_dos.columns = ['E [eV]', 'DOS [eV⁻¹]']\n df_dos.set_index(\"E [eV]\", inplace=True)\n df_dos.columns.name = \"\" # legend name\n df_dos.attrs[\"name\"] = \"DOS\"\n df_dos.attrs[\"title\"] = \"electronic density of states\"\n df_dos.attrs[\"labels\"] = {\"variable\": \"\", \"value\": \"DOS [eV⁻¹]\"}\n\n contrib[\"tables\"] = [df_abs, df_dos]\n contributions.append(contrib)\n \nlen(contributions)",
"_____no_output_____"
]
],
[
[
"**Submit contributions**",
"_____no_output_____"
]
],
[
[
"client.delete_contributions(name)\nclient.init_columns(name, columns)\nclient.submit_contributions(contributions[:5])",
"_____no_output_____"
]
],
[
[
"**Retrieve and plot tables**",
"_____no_output_____"
]
],
[
[
"contribs = client.get_contributions(name)",
"_____no_output_____"
],
[
"tables = [client.get_table(md5) for md5 in contribs[\"tables\"]] ",
"_____no_output_____"
],
[
"df = tables[0]\ndf.plot(**df.attrs)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
ec6b436acfa7c102e697b284dfee255934296ae4 | 12,913 | ipynb | Jupyter Notebook | train_notebook/melgan.ipynb | asdryau/descriptor-transformer | 8d519e1b8ba3c1094ddaba65e94955b5ebb0bf26 | [
"MIT"
]
| 1 | 2021-06-24T15:18:38.000Z | 2021-06-24T15:18:38.000Z | train_notebook/melgan.ipynb | asdryau/descriptor-transformer | 8d519e1b8ba3c1094ddaba65e94955b5ebb0bf26 | [
"MIT"
]
| null | null | null | train_notebook/melgan.ipynb | asdryau/descriptor-transformer | 8d519e1b8ba3c1094ddaba65e94955b5ebb0bf26 | [
"MIT"
]
| 1 | 2021-04-25T15:15:38.000Z | 2021-04-25T15:15:38.000Z | 38.204142 | 363 | 0.521413 | [
[
[
"<a href=\"https://colab.research.google.com/github/buganart/descriptor-transformer/blob/main/train_notebook/melgan.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@markdown Before starting please save the notebook in your drive by clicking on `File -> Save a copy in drive`",
"_____no_output_____"
],
[
"#@markdown Check GPU, should be a Tesla V100\n!nvidia-smi -L\nimport os\nprint(f\"We have {os.cpu_count()} CPU cores.\")",
"_____no_output_____"
],
[
"#@markdown Mount google drive\nfrom google.colab import drive\nfrom google.colab import output\ndrive.mount('/content/drive')\n\nfrom pathlib import Path\nif not Path(\"/content/drive/My Drive/IRCMS_GAN_collaborative_database\").exists():\n raise RuntimeError(\n \"Shortcut to our shared drive folder doesn't exits.\\n\\n\"\n \"\\t1. Go to the google drive web UI\\n\"\n \"\\t2. Right click shared folder IRCMS_GAN_collaborative_database and click \\\"Add shortcut to Drive\\\"\"\n )\n\ndef clear_on_success(msg=\"Ok!\"):\n if _exit_code == 0:\n output.clear()\n print(msg)",
"_____no_output_____"
],
[
"#@markdown Install wandb and log in\n%pip install wandb\noutput.clear()\nimport wandb\nfrom pathlib import Path\nwandb_drive_netrc_path = Path(\"drive/My Drive/colab/.netrc\")\nwandb_local_netrc_path = Path(\"/root/.netrc\")\nif wandb_drive_netrc_path.exists():\n import shutil\n\n print(\"Wandb .netrc file found, will use that to log in.\")\n shutil.copy(wandb_drive_netrc_path, wandb_local_netrc_path)\nelse:\n print(\n f\"Wandb config not found at {wandb_drive_netrc_path}.\\n\"\n f\"Using manual login.\\n\\n\"\n f\"To use auto login in the future, finish the manual login first and then run:\\n\\n\"\n f\"\\t!mkdir -p '{wandb_drive_netrc_path.parent}'\\n\"\n f\"\\t!cp {wandb_local_netrc_path} '{wandb_drive_netrc_path}'\\n\\n\"\n f\"Then that file will be used to login next time.\\n\"\n )\n\n!wandb login\noutput.clear()\nprint(\"ok!\")",
"_____no_output_____"
]
],
[
[
"# Description\n\nThis notebook is used for training melgan and log results to the wandb project \"demiurge/melgan-neurips\". The [buganart/melgan-neurips](https://github.com/buganart/melgan-neurips) code is based on the [descriptinc/melgan-neurips repository](https://github.com/descriptinc/melgan-neurips).\n\nTo start training the melgan, user will need to specify **audio_db_dir** to locate a music folder in the mounted Google Drive. All the data in the folder will be used for training and evaluating the model traininig process. **experiment_dir** and **melgan_output_dir** are the path where the data generated from the melgan training process is saved. \n\nIn case the run is stopped, and the user want to resume such run, please specify wandb run id in the **resume_run_id**. For all the training arguments, please see [descriptinc/melgan-neurips repository](https://github.com/descriptinc/melgan-neurips).",
"_____no_output_____"
]
],
[
[
"#@title Configuration\n\n#@markdown Directories can be found via file explorer on the left by navigating into `drive` to the desired folders. \n#@markdown Then right-click and `Copy path`.\n# audio_db_dir = \"/content/drive/My Drive/AUDIO DATABASE/RAW Sessions/Roberto Studio Material\" #@param {type:\"string\"}\naudio_db_dir = \"/content/drive/My Drive/AUDIO DATABASE/TESTING\" #@param {type:\"string\"}\nexperiment_dir = \"/content/drive/My Drive/IRCMS_GAN_collaborative_database/Experiments/colab-violingan/melgan\" #@param {type:\"string\"}\nmelgan_output_dir = \"/content/drive/My Drive/IRCMS_GAN_collaborative_database/Experiments/colab-violingan/melgan-outputs\" #@param {type:\"string\"}\n\n#@markdown ### Resumption of previous runs\n#@markdown Optional resumption arguments below, leaving both empty will start a new run from scratch. \n#@markdown - The ID can be found on wandb. \n#@markdown - It's 8 characters long and may contain a-z letters and digits (for example `1t212ycn`).\n\n#@markdown Resume a previous run \nresume_run_id = \"\" #@param {type:\"string\"}\n#@markdown Load initial weights from a previous run to start a new run.\nload_from_run_id = \"\" #@param {type:\"string\"}\n\n#@markdown train argument\nn_mel_channels = 80 #@param {type: \"integer\"}\nngf = 32 #@param {type: \"integer\"}\nn_residual_layers = 3 #@param {type: \"integer\"}\n\nndf = 16 #@param {type: \"integer\"}\nnum_D = 3 #@param {type: \"integer\"}\nn_layers_D = 4 #@param {type: \"integer\"}\ndownsamp_factor = 4 #@param {type: \"integer\"}\n#@markdown - ratios should be list in string format, with product of elements = 256?\nratios = \"[8,8,2,2]\" #@param {type: \"string\"}\n\nlambda_feat = 10 #@param {type: \"integer\"}\n#cond_disc: action=\"store_true\"\nlearning_rate = 1e-4 #@param {type: \"number\"}\npad_mode = \"reflect\" #@param [\"reflect\", \"replicate\"]\n\nbatch_size = 16 #@param {type: \"integer\"}\nseq_len = 8192 #@param {type: \"integer\"}\nsampling_rate = 44100 #@param {type: \"integer\"}\n\nepochs = 3000 #@param {type: \"integer\"}\nlog_interval = 100 #@param {type: \"integer\"}\nsave_interval = 1000 #@param {type: \"integer\"}\nn_test_samples = 8 #@param {type: \"integer\"}\n\nnotes = \"\" #@param {type: \"string\"}\n\nimport re\nfrom pathlib import Path\n\naudio_db_dir = Path(audio_db_dir)\nmelgan_output_dir = Path(melgan_output_dir)\nexperiment_dir = Path(experiment_dir)\n\n#check ratios\nimport numpy as np\nratios_str = ratios\nratios_str = ratios_str.replace(\" \", \"\")\nratios_str = ratios_str.strip(\"][\").split(\",\")\nratios_str = [int(i) for i in ratios_str]\nratios_str = np.array(ratios_str)\n\n\nfor path in [experiment_dir, melgan_output_dir]:\n path.mkdir(parents=True, exist_ok=True)\n\nif not audio_db_dir.exists():\n raise RuntimeError(f\"audio_db_dir {audio_db_dir} does not exists.\")\n\nif resume_run_id and load_from_run_id:\n raise RuntimeError(\"Only set `resume_run_id` or `load_from_run_id`.\")\n\ndef check_wandb_id(run_id):\n if run_id and not re.match(r\"^[\\da-z]{8}$\", run_id):\n raise RuntimeError(\n \"Run ID needs to be 8 characters long and contain only letters a-z and digits.\\n\"\n f\"Got \\\"{run_id}\\\"\"\n )\n\ncheck_wandb_id(resume_run_id)\ncheck_wandb_id(load_from_run_id)",
"_____no_output_____"
],
[
"#@title Clone melgan repo\n\n!git clone https://github.com/buganart/melgan-neurips",
"_____no_output_____"
],
[
"#@title Install Dependencies\n\n%cd /content/melgan-neurips\n%pip install -r requirements.txt\nclear_on_success(\"Dependencies installed.\")",
"_____no_output_____"
],
[
"#@title Copy audio files to runtime\n\nlocal_wav_dir = Path(\"/content/wavs/\")\n!find \"{audio_db_dir}\" -maxdepth 1 -type f | xargs -t -d \"\\n\" -I'%%' -P 10 -n 1 rsync -a '%%' \"$local_wav_dir\"/\nclear_on_success(\"All files copied to this runtime.\")\n",
"_____no_output_____"
],
[
"#@title Split train/test dataset\n\n# os.environ[\"WANDB_MODE\"] = \"dryrun\"\n!python split_dataset.py --data_path \"$local_wav_dir\"\n\nprint(\"TRAIN FILES\")\n!head -n3 train_files.txt\nprint('...')\n!tail -n3 train_files.txt\n\nprint()\nprint(\"TEST FILES\")\n!head -n3 test_files.txt\nprint('...')\n!tail -n3 test_files.txt",
"_____no_output_____"
],
[
"#@title TRAIN\n\n# This done a bit weirdly because setting PYTHONPATH=$PWD removes variables afterwards. A colab bug, maybe.\n!env PYTHONPATH=\"$(pwd)\" python scripts/train.py \\\n--save_path \"$experiment_dir\" \\\n--data_path . \\\n--resume_run_id \"$resume_run_id\" \\\n--load_from_run_id \"$load_from_run_id\" \\\n--n_mel_channels \"$n_mel_channels\" \\\n--ngf \"$ngf\" \\\n--n_residual_layers \"$n_residual_layers\" \\\n--ndf \"$ndf\" \\\n--num_D \"$num_D\" \\\n--n_layers_D \"$n_layers_D\" \\\n--downsamp_factor \"$downsamp_factor\" \\\n--ratios \"$ratios\" \\\n--lambda_feat \"$lambda_feat\" \\\n--learning_rate \"$learning_rate\" \\\n--pad_mode \"$pad_mode\" \\\n--batch_size \"$batch_size\" \\\n--seq_len \"$seq_len\" \\\n--sampling_rate \"$sampling_rate\" \\\n--epochs \"$epochs\" \\\n--log_interval \"$log_interval\" \\\n--save_interval \"$save_interval\" \\\n--n_test_samples \"$n_test_samples\" \\\n--notes \"$notes\"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6b56c91d8a43ed42da6cb8474bcaa993a7125f | 717,789 | ipynb | Jupyter Notebook | outliers/outliers.ipynb | Prikshit7766/ml-class | daef6cabe1b386ba765c01ce4d93c6b9ac59d3b6 | [
"Apache-2.0"
]
| null | null | null | outliers/outliers.ipynb | Prikshit7766/ml-class | daef6cabe1b386ba765c01ce4d93c6b9ac59d3b6 | [
"Apache-2.0"
]
| null | null | null | outliers/outliers.ipynb | Prikshit7766/ml-class | daef6cabe1b386ba765c01ce4d93c6b9ac59d3b6 | [
"Apache-2.0"
]
| null | null | null | 162.727046 | 146,206 | 0.859719 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"#create a random distribution of numbers\n#create a function for that!!!\ndef create_number(mu=54,sigma=15,num_samples=100,seed=42):\n np.random.seed(seed)\n #With the seed reset (every time), the same set of numbers will appear every time.\n sample_numbers=np.random.normal(loc=mu,scale=sigma,size=num_samples)\n #random sample from a normal(gaussian) distribution with\n sample_numbers=np.round(sample_numbers,decimals=0)\n return sample_numbers",
"_____no_output_____"
],
[
"samples=create_number()\nsamples",
"_____no_output_____"
],
[
"sns.displot(samples,bins=20)\n\n\n",
"_____no_output_____"
],
[
"sns.boxplot(samples)#box plot (due to valid data points)",
"C:\\Users\\HP\\Anaconda3\\envs\\ml_Test\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n"
],
[
"ser=pd.Series(samples)#trying to take samples as panda series\nser",
"_____no_output_____"
],
[
"#to see Iqr\nser.describe()\n",
"_____no_output_____"
],
[
"IQR=60.25-45.00\nlower_limit=45.00-1.5*IQR\nIQR",
"_____no_output_____"
],
[
"upper_limit = 60.25+1.5*IQR",
"_____no_output_____"
],
[
"lower_limit",
"_____no_output_____"
],
[
"upper_limit",
"_____no_output_____"
],
[
"#to drop outliers\nser[ser>lower_limit]",
"_____no_output_____"
],
[
"#another method to find lower and upper limit\nq75,q25=np.percentile(samples,[75,25])\nIQR=q75-q25\nIQR\n",
"_____no_output_____"
],
[
"IQR",
"_____no_output_____"
],
[
"lower_limit=q25-1.5*IQR",
"_____no_output_____"
],
[
"lower_limit",
"_____no_output_____"
],
[
"#now we take that data set and typo handel that data set into\nwith open (r'C:\\Users\\HP\\OneDrive\\Desktop\\ml\\modular\\ml-class\\Data\\DataDictionary_AmesHousing.txt',\"r\") as f:\n print(f.read())",
"NAME: AmesHousing.txt\nTYPE: Population\nSIZE: 2930 observations, 82 variables\nARTICLE TITLE: Ames Iowa: Alternative to the Boston Housing Data Set\n\nDESCRIPTIVE ABSTRACT: Data set contains information from the Ames Assessor’s Office used in computing assessed values for individual residential properties sold in Ames, IA from 2006 to 2010.\n\nSOURCES: \nAmes, Iowa Assessor’s Office \n\nVARIABLE DESCRIPTIONS:\nTab characters are used to separate variables in the data file. The data has 82 columns which include 23 nominal, 23 ordinal, 14 discrete, and 20 continuous variables (and 2 additional observation identifiers).\n\nOrder (Discrete): Observation number\n\nPID (Nominal): Parcel identification number - can be used with city web site for parcel review. \n\nMS SubClass (Nominal): Identifies the type of dwelling involved in the sale.\t\n\n 020\t1-STORY 1946 & NEWER ALL STYLES\n 030\t1-STORY 1945 & OLDER\n 040\t1-STORY W/FINISHED ATTIC ALL AGES\n 045\t1-1/2 STORY - UNFINISHED ALL AGES\n 050\t1-1/2 STORY FINISHED ALL AGES\n 060\t2-STORY 1946 & NEWER\n 070\t2-STORY 1945 & OLDER\n 075\t2-1/2 STORY ALL AGES\n 080\tSPLIT OR MULTI-LEVEL\n 085\tSPLIT FOYER\n 090\tDUPLEX - ALL STYLES AND AGES\n 120\t1-STORY PUD (Planned Unit Development) - 1946 & NEWER\n 150\t1-1/2 STORY PUD - ALL AGES\n 160\t2-STORY PUD - 1946 & NEWER\n 180\tPUD - MULTILEVEL - INCL SPLIT LEV/FOYER\n 190\t2 FAMILY CONVERSION - ALL STYLES AND AGES\n\nMS Zoning (Nominal): Identifies the general zoning classification of the sale.\n\t\t\n A\tAgriculture\n C\tCommercial\n FV\tFloating Village Residential\n I\tIndustrial\n RH\tResidential High Density\n RL\tResidential Low Density\n RP\tResidential Low Density Park \n RM\tResidential Medium Density\n\t\nLot Frontage (Continuous): Linear feet of street connected to property\n\nLot Area (Continuous): Lot size in square feet\n\nStreet (Nominal): Type of road access to property\n\n Grvl\tGravel\t\n Pave\tPaved\n \t\nAlley (Nominal): Type of alley access to property\n\n Grvl\tGravel\n Pave\tPaved\n NA \tNo alley access\n\t\t\nLot Shape (Ordinal): General shape of property\n\n Reg\tRegular\t\n IR1\tSlightly irregular\n IR2\tModerately Irregular\n IR3\tIrregular\n \nLand Contour (Nominal): Flatness of the property\n\n Lvl\tNear Flat/Level\t\n Bnk\tBanked - Quick and significant rise from street grade to building\n HLS\tHillside - Significant slope from side to side\n Low\tDepression\n\t\t\nUtilities (Ordinal): Type of utilities available\n\t\t\n AllPub\tAll public Utilities (E,G,W,& S)\t\n NoSewr\tElectricity, Gas, and Water (Septic Tank)\n NoSeWa\tElectricity and Gas Only\n ELO\tElectricity only\t\n\t\nLot Config (Nominal): Lot configuration\n\n Inside\tInside lot\n Corner\tCorner lot\n CulDSac\tCul-de-sac\n FR2\tFrontage on 2 sides of property\n FR3\tFrontage on 3 sides of property\n\t\nLand Slope (Ordinal): Slope of property\n\t\t\n Gtl\tGentle slope\n Mod\tModerate Slope\t\n Sev\tSevere Slope\n\t\nNeighborhood (Nominal): Physical locations within Ames city limits (map available)\n\n Blmngtn\tBloomington Heights\n Blueste\tBluestem\n BrDale\tBriardale\n BrkSide\tBrookside\n ClearCr\tClear Creek\n CollgCr\tCollege Creek\n Crawfor\tCrawford\n Edwards\tEdwards\n Gilbert\tGilbert\n Greens\tGreens\n GrnHill\tGreen Hills\n IDOTRR\tIowa DOT and Rail Road\n Landmrk\tLandmark\n MeadowV\tMeadow Village\n Mitchel\tMitchell\n Names\tNorth Ames\n NoRidge\tNorthridge\n NPkVill\tNorthpark Villa\n NridgHt\tNorthridge Heights\n NWAmes\tNorthwest Ames\n OldTown\tOld Town\n SWISU\tSouth & West of Iowa State University\n Sawyer\tSawyer\n SawyerW\tSawyer West\n Somerst\tSomerset\n StoneBr\tStone Brook\n Timber\tTimberland\n Veenker\tVeenker\n\t\t\t\nCondition 1 (Nominal): Proximity to various conditions\n\t\n Artery\tAdjacent to arterial street\n Feedr\tAdjacent to feeder street\t\n Norm\tNormal\t\n RRNn\tWithin 200' of North-South Railroad\n RRAn\tAdjacent to North-South Railroad\n PosN\tNear positive off-site feature--park, greenbelt, etc.\n PosA\tAdjacent to postive off-site feature\n RRNe\tWithin 200' of East-West Railroad\n RRAe\tAdjacent to East-West Railroad\n\t\nCondition 2 (Nominal): Proximity to various conditions (if more than one is present)\n\t\t\n Artery\tAdjacent to arterial street\n Feedr\tAdjacent to feeder street\t\n Norm\tNormal\t\n RRNn\tWithin 200' of North-South Railroad\n RRAn\tAdjacent to North-South Railroad\n PosN\tNear positive off-site feature--park, greenbelt, etc.\n PosA\tAdjacent to postive off-site feature\n RRNe\tWithin 200' of East-West Railroad\n RRAe\tAdjacent to East-West Railroad\n\t\nBldg Type (Nominal): Type of dwelling\n\t\t\n 1Fam\tSingle-family Detached\t\n 2FmCon\tTwo-family Conversion; originally built as one-family dwelling\n Duplx\tDuplex\n TwnhsE\tTownhouse End Unit\n TwnhsI\tTownhouse Inside Unit\n\t\nHouse Style (Nominal): Style of dwelling\n\t\n 1Story\tOne story\n 1.5Fin\tOne and one-half story: 2nd level finished\n 1.5Unf\tOne and one-half story: 2nd level unfinished\n 2Story\tTwo story\n 2.5Fin\tTwo and one-half story: 2nd level finished\n 2.5Unf\tTwo and one-half story: 2nd level unfinished\n SFoyer\tSplit Foyer\n SLvl\tSplit Level\n\t\nOverall Qual (Ordinal): Rates the overall material and finish of the house\n\n 10\tVery Excellent\n 9\tExcellent\n 8\tVery Good\n 7\tGood\n 6\tAbove Average\n 5\tAverage\n 4\tBelow Average\n 3\tFair\n 2\tPoor\n 1\tVery Poor\n\t\nOverall Cond (Ordinal): Rates the overall condition of the house\n\n 10\tVery Excellent\n 9\tExcellent\n 8\tVery Good\n 7\tGood\n 6\tAbove Average\t\n 5\tAverage\n 4\tBelow Average\t\n 3\tFair\n 2\tPoor\n 1\tVery Poor\n\t\t\nYear Built (Discrete): Original construction date\n\nYear Remod/Add (Discrete): Remodel date (same as construction date if no remodeling or additions)\n\nRoof Style (Nominal): Type of roof\n\n Flat\tFlat\n Gable\tGable\n Gambrel\tGabrel (Barn)\n Hip\tHip\n Mansard\tMansard\n Shed\tShed\n\t\t\nRoof Matl (Nominal): Roof material\n\n ClyTile\tClay or Tile\n CompShg\tStandard (Composite) Shingle\n Membran\tMembrane\n Metal\tMetal\n Roll\tRoll\n Tar&Grv\tGravel & Tar\n WdShake\tWood Shakes\n WdShngl\tWood Shingles\n\t\t\nExterior 1 (Nominal): Exterior covering on house\n\n AsbShng\tAsbestos Shingles\n AsphShn\tAsphalt Shingles\n BrkComm\tBrick Common\n BrkFace\tBrick Face\n CBlock\tCinder Block\n CemntBd\tCement Board\n HdBoard\tHard Board\n ImStucc\tImitation Stucco\n MetalSd\tMetal Siding\n Other\tOther\n Plywood\tPlywood\n PreCast\tPreCast\t\n Stone\tStone\n Stucco\tStucco\n VinylSd\tVinyl Siding\n Wd Sdng\tWood Siding\n WdShing\tWood Shingles\n\t\nExterior 2 (Nominal): Exterior covering on house (if more than one material)\n\n AsbShng\tAsbestos Shingles\n AsphShn\tAsphalt Shingles\n BrkComm\tBrick Common\n BrkFace\tBrick Face\n CBlock\tCinder Block\n CemntBd\tCement Board\n HdBoard\tHard Board\n ImStucc\tImitation Stucco\n MetalSd\tMetal Siding\n Other\tOther\n Plywood\tPlywood\n PreCast\tPreCast\n Stone\tStone\n Stucco\tStucco\n VinylSd\tVinyl Siding\n Wd Sdng\tWood Siding\n WdShing\tWood Shingles\n\t\nMas Vnr Type (Nominal): Masonry veneer type\n\n BrkCmn\tBrick Common\n BrkFace\tBrick Face\n CBlock\tCinder Block\n None\tNone\n Stone\tStone\n\t\nMas Vnr Area (Continuous): Masonry veneer area in square feet\n\nExter Qual (Ordinal): Evaluates the quality of the material on the exterior \n\t\t\n Ex\tExcellent\n Gd\tGood\n TA\tAverage/Typical\n Fa\tFair\n Po\tPoor\n\t\t\nExter Cond (Ordinal): Evaluates the present condition of the material on the exterior\n\t\t\n Ex\tExcellent\n Gd\tGood\n TA\tAverage/Typical\n Fa\tFair\n Po\tPoor\n\t\t\nFoundation (Nominal): Type of foundation\n\t\t\n BrkTil\tBrick & Tile\n CBlock\tCinder Block\n PConc\tPoured Contrete\t\n Slab\tSlab\n Stone\tStone\n Wood\tWood\n\t\t\nBsmt Qual (Ordinal): Evaluates the height of the basement\n\n Ex\tExcellent (100+ inches)\t\n Gd\tGood (90-99 inches)\n TA\tTypical (80-89 inches)\n Fa\tFair (70-79 inches)\n Po\tPoor (<70 inches\n NA\tNo Basement\n\t\t\nBsmt Cond (Ordinal): Evaluates the general condition of the basement\n\n Ex\tExcellent\n Gd\tGood\n TA\tTypical - slight dampness allowed\n Fa\tFair - dampness or some cracking or settling\n Po\tPoor - Severe cracking, settling, or wetness\n NA\tNo Basement\n\t\nBsmt Exposure\t(Ordinal): Refers to walkout or garden level walls\n\n Gd\tGood Exposure\n Av\tAverage Exposure (split levels or foyers typically score average or above)\t\n Mn\tMimimum Exposure\n No\tNo Exposure\n NA\tNo Basement\n\t\nBsmtFin Type 1\t(Ordinal): Rating of basement finished area\n\n GLQ\tGood Living Quarters\n ALQ\tAverage Living Quarters\n BLQ\tBelow Average Living Quarters\t\n Rec\tAverage Rec Room\n LwQ\tLow Quality\n Unf\tUnfinshed\n NA\tNo Basement\n\t\t\nBsmtFin SF 1 (Continuous): Type 1 finished square feet\n\nBsmtFinType 2\t(Ordinal): Rating of basement finished area (if multiple types)\n\n GLQ\tGood Living Quarters\n ALQ\tAverage Living Quarters\n BLQ\tBelow Average Living Quarters\t\n Rec\tAverage Rec Room\n LwQ\tLow Quality\n Unf\tUnfinshed\n NA\tNo Basement\n\nBsmtFin SF 2 (Continuous): Type 2 finished square feet\n\nBsmt Unf SF (Continuous): Unfinished square feet of basement area\n\nTotal Bsmt SF (Continuous): Total square feet of basement area\n\nHeating\t(Nominal): Type of heating\n\t\t\n Floor\tFloor Furnace\n GasA\tGas forced warm air furnace\n GasW\tGas hot water or steam heat\n Grav\tGravity furnace\t\n OthW\tHot water or steam heat other than gas\n Wall\tWall furnace\n\t\t\nHeatingQC (Ordinal): Heating quality and condition\n\n Ex\tExcellent\n Gd\tGood\n TA\tAverage/Typical\n Fa\tFair\n Po\tPoor\n\t\t\nCentral Air (Nominal): Central air conditioning\n\n N\tNo\n Y\tYes\n\t\t\nElectrical (Ordinal): Electrical system\n\n SBrkr\tStandard Circuit Breakers & Romex\n FuseA\tFuse Box over 60 AMP and all Romex wiring (Average)\t\n FuseF\t60 AMP Fuse Box and mostly Romex wiring (Fair)\n FuseP\t60 AMP Fuse Box and mostly knob & tube wiring (poor)\n Mix\tMixed\n\t\t\n1st Flr SF (Continuous): First Floor square feet\n \n2nd Flr SF (Continuous)\t: Second floor square feet\n\nLow Qual Fin SF (Continuous): Low quality finished square feet (all floors)\n\nGr Liv Area (Continuous): Above grade (ground) living area square feet\n\nBsmt Full Bath (Discrete): Basement full bathrooms\n\nBsmt Half Bath (Discrete): Basement half bathrooms\n\nFull Bath (Discrete): Full bathrooms above grade\n\nHalf Bath (Discrete): Half baths above grade\n\nBedroom (Discrete): Bedrooms above grade (does NOT include basement bedrooms)\n\nKitchen (Discrete): Kitchens above grade\n\nKitchenQual (Ordinal): Kitchen quality\n\n Ex\tExcellent\n Gd\tGood\n TA\tTypical/Average\n Fa\tFair\n Po\tPoor\n \t\nTotRmsAbvGrd\t(Discrete): Total rooms above grade (does not include bathrooms)\n\nFunctional (Ordinal): Home functionality (Assume typical unless deductions are warranted)\n\n Typ\tTypical Functionality\n Min1\tMinor Deductions 1\n Min2\tMinor Deductions 2\n Mod\tModerate Deductions\n Maj1\tMajor Deductions 1\n Maj2\tMajor Deductions 2\n Sev\tSeverely Damaged\n Sal\tSalvage only\n\t\t\nFireplaces (Discrete): Number of fireplaces\n\nFireplaceQu (Ordinal): Fireplace quality\n\n Ex\tExcellent - Exceptional Masonry Fireplace\n Gd\tGood - Masonry Fireplace in main level\n TA\tAverage - Prefabricated Fireplace in main living area or Masonry Fireplace in basement\n Fa\tFair - Prefabricated Fireplace in basement\n Po\tPoor - Ben Franklin Stove\n NA\tNo Fireplace\n\t\t\nGarage Type (Nominal): Garage location\n\t\t\n 2Types\tMore than one type of garage\n Attchd\tAttached to home\n Basment\tBasement Garage\n BuiltIn\tBuilt-In (Garage part of house - typically has room above garage)\n CarPort\tCar Port\n Detchd\tDetached from home\n NA\tNo Garage\n\t\t\nGarage Yr Blt (Discrete): Year garage was built\n\t\t\nGarage Finish (Ordinal)\t: Interior finish of the garage\n\n Fin\tFinished\n RFn\tRough Finished\t\n Unf\tUnfinished\n NA\tNo Garage\n\t\t\nGarage Cars (Discrete): Size of garage in car capacity\n\nGarage Area (Continuous): Size of garage in square feet\n\nGarage Qual (Ordinal): Garage quality\n\n Ex\tExcellent\n Gd\tGood\n TA\tTypical/Average\n Fa\tFair\n Po\tPoor\n NA\tNo Garage\n\t\t\nGarage Cond (Ordinal): Garage condition\n\n Ex\tExcellent\n Gd\tGood\n TA\tTypical/Average\n Fa\tFair\n Po\tPoor\n NA\tNo Garage\n\t\t\nPaved Drive (Ordinal): Paved driveway\n\n Y\tPaved \n P\tPartial Pavement\n N\tDirt/Gravel\n\t\t\nWood Deck SF (Continuous): Wood deck area in square feet\n\nOpen Porch SF (Continuous): Open porch area in square feet\n\nEnclosed Porch (Continuous): Enclosed porch area in square feet\n\n3-Ssn Porch (Continuous): Three season porch area in square feet\n\nScreen Porch (Continuous): Screen porch area in square feet\n\nPool Area (Continuous): Pool area in square feet\n\nPool QC (Ordinal): Pool quality\n\t\t\n Ex\tExcellent\n Gd\tGood\n TA\tAverage/Typical\n Fa\tFair\n NA\tNo Pool\n\t\t\nFence (Ordinal): Fence quality\n\t\t\n GdPrv\tGood Privacy\n MnPrv\tMinimum Privacy\n GdWo\tGood Wood\n MnWw\tMinimum Wood/Wire\n NA\tNo Fence\n\t\nMisc Feature (Nominal): Miscellaneous feature not covered in other categories\n\t\t\n Elev\tElevator\n Gar2\t2nd Garage (if not described in garage section)\n Othr\tOther\n Shed\tShed (over 100 SF)\n TenC\tTennis Court\n NA\tNone\n\t\t\nMisc Val (Continuous): $Value of miscellaneous feature\n\nMo Sold (Discrete): Month Sold (MM)\n\nYr Sold (Discrete): Year Sold (YYYY)\n\nSale Type (Nominal): Type of sale\n\t\t\n WD \tWarranty Deed - Conventional\n CWD\tWarranty Deed - Cash\n VWD\tWarranty Deed - VA Loan\n New\tHome just constructed and sold\n COD\tCourt Officer Deed/Estate\n Con\tContract 15% Down payment regular terms\n ConLw\tContract Low Down payment and low interest\n ConLI\tContract Low Interest\n ConLD\tContract Low Down\n Oth\tOther\n\t\t\nSale Condition (Nominal): Condition of sale\n\n Normal\tNormal Sale\n Abnorml\tAbnormal Sale - trade, foreclosure, short sale\n AdjLand\tAdjoining Land Purchase\n Alloca\tAllocation - two linked properties with separate deeds, typically condo with a garage unit\t\n Family\tSale between family members\n Partial\tHome was not completed when last assessed (associated with New Homes)\n\t\t\nSalePrice (Continuous): Sale price $$\n\nSPECIAL NOTES:\nThere are 5 observations that an instructor may wish to remove from the data set before giving it to students (a plot of SALE PRICE versus GR LIV AREA will indicate them quickly). Three of them are true outliers (Partial Sales that likely don’t represent actual market values) and two of them are simply unusual sales (very large houses priced relatively appropriately). I would recommend removing any houses with more than 4000 square feet from the data set (which eliminates these 5 unusual observations) before assigning it to students.\n\nSTORY BEHIND THE DATA:\nThis data set was constructed for the purpose of an end of semester project for an undergraduate regression course. The original data (obtained directly from the Ames Assessor’s Office) is used for tax assessment purposes but lends itself directly to the prediction of home selling prices. The type of information contained in the data is similar to what a typical home buyer would want to know before making a purchase and students should find most variables straightforward and understandable.\n\nPEDAGOGICAL NOTES:\nInstructors unfamiliar with multiple regression may wish to use this data set in conjunction with an earlier JSE paper that reviews most of the major issues found in regression modeling: \n\nKuiper , S. (2008), “Introduction to Multiple Regression: How Much Is Your Car Worth?”, Journal of Statistics Education Volume 16, Number 3 (2008).\n\nOutside of the general issues associated with multiple regression discussed in this article, this particular data set offers several opportunities to discuss how the purpose of a model might affect the type of modeling done. User of this data may also want to review another JSE article related directly to real estate pricing:\n\nPardoe , I. (2008), “Modeling home prices using realtor data”, Journal of Statistics Education Volume 16, Number 2 (2008).\n\nOne issue is in regards to homoscedasticity and assumption violations. The graph included in the article appears to indicate heteroscedasticity with variation increasing with sale price and this problem is evident in many simple home pricing models that focus only on house and lot sizes. Though this violation can be alleviated by transforming the response variable (sale price), the resulting equation yields difficult to interpret fitted values (selling price in log or square root dollars). This situation gives the instructor the opportunity to talk about the costs (biased estimators, incorrect statistical tests, etc.) and benefits (ease of use) of not correcting this assumption violation. If the purpose in building the model is simply to allow a typical buyer or real estate agent to sit down and estimate the selling price of a house, such transformations may be unnecessary or inappropriate for the task at hand. This issue could also open into a discussion on the contrasts and comparisons between data mining, predictive models, and formal statistical inference.\n\nA second issue closely related to the intended use of the model, is the handling of outliers and unusual observations. In general, I instruct my students to never throw away data points simply because they do not match a priori expectations (or other data points). I strongly make this point in the situation where data are being analyzed for research purposes that will be shared with a larger audience. Alternatively, if the purpose is to once again create a common use model to estimate a “typical” sale, it is in the modeler’s best interest to remove any observations that do not seem typical (such as foreclosures or family sales).\n\nREFERENCES:\nIndividual homes within the data set can be referenced directly from the Ames City Assessor webpage via the Parcel ID (PID) found in the data set. Note these are nominal values (non-numeric) so preceding 0’s must be included in the data entry field on the website. Access to the database can be gained from the Ames site (http://www.cityofames.org/assessor/) by clicking on “property search” or by accessing the Beacon (http://beacon.schneidercorp.com/Default.aspx) website and inputting Iowa and Ames in the appropriate fields. A city map showing the location of all the neighborhoods is also available on the Ames site and can be accessed by clicking on “Maps” and then “Residential Assessment Neighborhoods (City of Ames Only)”.\n\nSUBMITTED BY:\nDean De Cock\nTruman State University\n100 E. Normal St., Kirksville, MO, 63501\[email protected]\n\n\n"
],
[
"df=pd.read_csv(r\"C:\\Users\\HP\\OneDrive\\Desktop\\ml\\modular\\ml-class\\Data\\ames.csv\")\ndf.head()\n\n",
"_____no_output_____"
],
[
"#check the corelation function\ndf.corr().head()",
"_____no_output_____"
],
[
"#correlation w.r.t salesPrice\ndf.corr()['SalePrice'].sort_values()",
"_____no_output_____"
],
[
"#find the highly correlated with sales price ",
"_____no_output_____"
],
[
"sns.scatterplot(x=\"Overall.Qual\",y=\"SalePrice\",data=df,color=\"red\")",
"_____no_output_____"
],
[
"sns.scatterplot(x=\"Gr.Liv.Area\",y=\"SalePrice\",data=df,color=\"red\")",
"_____no_output_____"
],
[
"#identfy outliers\n#then removed outliers\ndf[(df[\"Overall.Qual\"]>8)&(df[\"SalePrice\"]<200000)]",
"_____no_output_____"
],
[
"df[(df[\"Gr.Liv.Area\"]>4000)&(df[\"SalePrice\"]<300000)]",
"_____no_output_____"
],
[
"drop_ind=df[(df[\"Gr.Liv.Area\"]>4000)&(df[\"SalePrice\"]<300000)].index",
"_____no_output_____"
],
[
"df=df.drop(drop_ind,axis=0)",
"_____no_output_____"
],
[
"#outliers are removed\nsns.scatterplot(x=\"Overall.Qual\",y=\"SalePrice\",data=df,color=\"red\")",
"_____no_output_____"
],
[
"sns.scatterplot(x=\"Gr.Liv.Area\",y=\"SalePrice\",data=df,color=\"red\")",
"_____no_output_____"
],
[
"#to save the model \n#df.to_csv('ames_no_outliers.csv')\ndf.to_csv(os.path.join(r'C:\\Users\\HP\\OneDrive\\Desktop\\ml\\modular\\ml-class\\Data','ames_no_outliers.csv'))\n",
"_____no_output_____"
],
[
"#now we are using data set without outliers \ndf=pd.read_csv(r\"C:\\Users\\HP\\OneDrive\\Desktop\\ml\\modular\\ml-class\\Data\\ames_no_outliers.csv\")",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"#to see the basic features ie. how many values are missing in the data set.\n",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2927 entries, 0 to 2926\nData columns (total 83 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 2927 non-null int64 \n 1 Order 2927 non-null int64 \n 2 PID 2927 non-null int64 \n 3 MS.SubClass 2927 non-null int64 \n 4 MS.Zoning 2927 non-null object \n 5 Lot.Frontage 2437 non-null float64\n 6 Lot.Area 2927 non-null int64 \n 7 Street 2927 non-null object \n 8 Alley 198 non-null object \n 9 Lot.Shape 2927 non-null object \n 10 Land.Contour 2927 non-null object \n 11 Utilities 2927 non-null object \n 12 Lot.Config 2927 non-null object \n 13 Land.Slope 2927 non-null object \n 14 Neighborhood 2927 non-null object \n 15 Condition.1 2927 non-null object \n 16 Condition.2 2927 non-null object \n 17 Bldg.Type 2927 non-null object \n 18 House.Style 2927 non-null object \n 19 Overall.Qual 2927 non-null int64 \n 20 Overall.Cond 2927 non-null int64 \n 21 Year.Built 2927 non-null int64 \n 22 Year.Remod.Add 2927 non-null int64 \n 23 Roof.Style 2927 non-null object \n 24 Roof.Matl 2927 non-null object \n 25 Exterior.1st 2927 non-null object \n 26 Exterior.2nd 2927 non-null object \n 27 Mas.Vnr.Type 2904 non-null object \n 28 Mas.Vnr.Area 2904 non-null float64\n 29 Exter.Qual 2927 non-null object \n 30 Exter.Cond 2927 non-null object \n 31 Foundation 2927 non-null object \n 32 Bsmt.Qual 2847 non-null object \n 33 Bsmt.Cond 2847 non-null object \n 34 Bsmt.Exposure 2844 non-null object \n 35 BsmtFin.Type.1 2847 non-null object \n 36 BsmtFin.SF.1 2926 non-null float64\n 37 BsmtFin.Type.2 2846 non-null object \n 38 BsmtFin.SF.2 2926 non-null float64\n 39 Bsmt.Unf.SF 2926 non-null float64\n 40 Total.Bsmt.SF 2926 non-null float64\n 41 Heating 2927 non-null object \n 42 Heating.QC 2927 non-null object \n 43 Central.Air 2927 non-null object \n 44 Electrical 2926 non-null object \n 45 X1st.Flr.SF 2927 non-null int64 \n 46 X2nd.Flr.SF 2927 non-null int64 \n 47 Low.Qual.Fin.SF 2927 non-null int64 \n 48 Gr.Liv.Area 2927 non-null int64 \n 49 Bsmt.Full.Bath 2925 non-null float64\n 50 Bsmt.Half.Bath 2925 non-null float64\n 51 Full.Bath 2927 non-null int64 \n 52 Half.Bath 2927 non-null int64 \n 53 Bedroom.AbvGr 2927 non-null int64 \n 54 Kitchen.AbvGr 2927 non-null int64 \n 55 Kitchen.Qual 2927 non-null object \n 56 TotRms.AbvGrd 2927 non-null int64 \n 57 Functional 2927 non-null object \n 58 Fireplaces 2927 non-null int64 \n 59 Fireplace.Qu 1505 non-null object \n 60 Garage.Type 2770 non-null object \n 61 Garage.Yr.Blt 2768 non-null float64\n 62 Garage.Finish 2768 non-null object \n 63 Garage.Cars 2926 non-null float64\n 64 Garage.Area 2926 non-null float64\n 65 Garage.Qual 2768 non-null object \n 66 Garage.Cond 2768 non-null object \n 67 Paved.Drive 2927 non-null object \n 68 Wood.Deck.SF 2927 non-null int64 \n 69 Open.Porch.SF 2927 non-null int64 \n 70 Enclosed.Porch 2927 non-null int64 \n 71 X3Ssn.Porch 2927 non-null int64 \n 72 Screen.Porch 2927 non-null int64 \n 73 Pool.Area 2927 non-null int64 \n 74 Pool.QC 12 non-null object \n 75 Fence 572 non-null object \n 76 Misc.Feature 105 non-null object \n 77 Misc.Val 2927 non-null int64 \n 78 Mo.Sold 2927 non-null int64 \n 79 Yr.Sold 2927 non-null int64 \n 80 Sale.Type 2927 non-null object \n 81 Sale.Condition 2927 non-null object \n 82 SalePrice 2927 non-null int64 \ndtypes: float64(11), int64(29), object(43)\nmemory usage: 1.9+ MB\n"
],
[
"df=df.drop([\"Unnamed: 0\",\"PID\"],axis=1)",
"_____no_output_____"
],
[
"len(df.columns)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.head()\n",
"_____no_output_____"
],
[
"df.isnull()",
"_____no_output_____"
],
[
"df.isnull().sum()#number of null value",
"_____no_output_____"
],
[
"len(df)",
"_____no_output_____"
],
[
"(df.isnull().sum()/len(df))*100#percentage of null value",
"_____no_output_____"
],
[
"#problem of pandas that it shows less null value so we have to make seprate function for missing values",
"_____no_output_____"
],
[
"def percent_missing(df):\n percent_nan=(df.isnull().sum()/len(df))*100\n percent_nan=percent_nan[percent_nan>0].sort_values()\n #percentage misiing more than 0 percent\n return percent_nan",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"#to visualise\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)",
"_____no_output_____"
],
[
"sns.barplot(x=percent_nan.index,y=percent_nan)\n",
"_____no_output_____"
],
[
"plt.xticks(rotation=90)#to rotate the graph\nsns.barplot(x=percent_nan.index,y=percent_nan)",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\nplt.ylim(0,1)# limit the graph o to 1 percent of data\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"#if few rowa are missing then you can drop those rows and full columns is empty then you can drop it\n#drop those values\npercent_nan[percent_nan>1]#drop the percentage between 0 to 1 not permenant",
"_____no_output_____"
],
[
"#to check how many rows are empty",
"_____no_output_____"
],
[
"df[df['Electrical'].isnull()]",
"_____no_output_____"
],
[
"df[df['Garage.Area'].isnull()]",
"_____no_output_____"
],
[
"# now drop permenantly\ndf=df.dropna(axis=0,subset=[\"Electrical\",\"Garage.Area\"])",
"_____no_output_____"
],
[
"#now run the function to find the missing values \npercent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"#some time missing values are not error it is like some feature that\n#like in this case bsmtCond\n# BSMT Numeric Columns - fillna \"0\"\nbsmt_num_cols = ['BsmtFin.SF.1','BsmtFin.SF.2', 'Bsmt.Unf.SF', 'Total.Bsmt.SF','Bsmt.Full.Bath', 'Bsmt.Half.Bath']\ndf[bsmt_num_cols] = df[bsmt_num_cols].fillna(0) #0 Imputation\n\n#BSMT Ctegorical Columns - fill \"None\"\nbsmt_str_cols = ['Bsmt.Qual', 'Bsmt.Cond', 'Bsmt.Exposure', 'BsmtFin.Type.1', 'BsmtFin.Type.2']\ndf[bsmt_str_cols] = df[bsmt_str_cols].fillna('None') #None Imputation\n",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\nplt.ylim(0,1)# limit the graph o to 1 percent of data\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"df[\"Mas.Vnr.Type\"]=df[\"Mas.Vnr.Type\"].fillna(\"none\")",
"_____no_output_____"
],
[
"df[\"Mas.Vnr.Area\"]=df[\"Mas.Vnr.Area\"].fillna(\"0\")",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\nplt.ylim(0,1)# limit the graph o to 1 percent of data\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"gar_str_cols=[\"Garage.Type\",\"Garage.Finish\",\"Garage.Qual\",\"Garage.Cond\"]",
"_____no_output_____"
],
[
"df[gar_str_cols]=df[gar_str_cols].fillna(\"none\")",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\n\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"df[\"Garage.Yr.Blt\"]=df[\"Garage.Yr.Blt\"].fillna(0)",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\n\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"#now there are huge missing value in Pool.QC,Misc.Feature,Alley,Fence so we drop these columns ",
"_____no_output_____"
],
[
"df=df.drop([\"Pool.QC\",\"Misc.Feature\",\"Alley\",\"Fence\"],axis=1)",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\n\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"\ndf[\"Fireplace.Qu\"].value_counts()\n#shows that it have categorical features",
"_____no_output_____"
],
[
"df[\"Fireplace.Qu\"]=df[\"Fireplace.Qu\"].fillna(\"none\")",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\n\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"#Lot.Frontage meaning depending on geographic location\n#need domin knowledge for that",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,11),dpi=200)\n\nsns.boxplot(x=\"Lot.Frontage\",y=\"Neighborhood\",data=df)\n",
"_____no_output_____"
],
[
"# now we can do numeric imputation in the form of mean imputation",
"_____no_output_____"
],
[
"\n\ndf.groupby(\"Neighborhood\")[\"Lot.Frontage\"].mean()",
"_____no_output_____"
],
[
"df[\"Lot.Frontage\"]=df.groupby(\"Neighborhood\")[\"Lot.Frontage\"].transform(lambda value:value.fillna(value.mean()))",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"#these are very small values so we can drop them or we can fill 0",
"_____no_output_____"
],
[
"df[\"Lot.Frontage\"]=df[\"Lot.Frontage\"].fillna(0)",
"_____no_output_____"
],
[
"percent_nan=percent_missing(df)\npercent_nan",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,3),dpi=200)\n\nsns.barplot(x=percent_nan.index,y=percent_nan)\nplt.xticks(rotation=90)#to rotate the graph",
"_____no_output_____"
],
[
"#this error showa that graph is empty",
"_____no_output_____"
],
[
"df.to_csv(os.path.join(r'C:\\Users\\HP\\OneDrive\\Desktop\\ml\\modular\\ml-class\\Data','ames_no_outliers(no missing value).csv'))",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
ec6b572958e30702379a2c355149a9b6234d2398 | 11,442 | ipynb | Jupyter Notebook | notebooks/DeepSky_database.ipynb | Skydipper/CNN-tests | 43c80bc1871b13c64035e07cda64a744575e61e7 | [
"MIT"
]
| 7 | 2020-02-10T17:23:42.000Z | 2022-03-30T16:09:07.000Z | notebooks/DeepSky_database.ipynb | Skydipper/CNN-tests | 43c80bc1871b13c64035e07cda64a744575e61e7 | [
"MIT"
]
| 1 | 2020-02-10T16:56:20.000Z | 2020-02-10T17:00:20.000Z | notebooks/DeepSky_database.ipynb | Skydipper/CNN-tests | 43c80bc1871b13c64035e07cda64a744575e61e7 | [
"MIT"
]
| 3 | 2020-09-03T23:10:48.000Z | 2021-08-01T08:35:48.000Z | 35.534161 | 163 | 0.449397 | [
[
[
"# Database for Deep Learning with SkyDL\n**Setup software libraries**",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport os\nimport sqlalchemy\nfrom sqlalchemy import Column, Integer, BigInteger, Float, Text, String, Boolean, DateTime\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom shapely.geometry import shape",
"_____no_output_____"
]
],
[
[
"## Database\n\nWe will create a Database to save all the attributes that we will generate all through the pipeline",
"_____no_output_____"
]
],
[
[
"def create_db_table(table_path, columns, dtypes):\n if not os.path.exists(table_path):\n dictionary = dict(zip(columns, dtypes))\n dtypes = np.dtype([(k, v) for k, v in dictionary.items()]) \n \n data = np.empty(0, dtype=dtypes)\n df = pd.DataFrame(data)\n \n df.to_csv(table_path, sep=';', quotechar='\\'',index=True, index_label='id')\n else:\n df = pd.read_csv(table_path, sep=';', quotechar='\\'').drop(columns='id')\n \n return df",
"_____no_output_____"
],
[
"if not os.path.exists('Database'):\n os.makedirs('Database')\n \ndatasets = create_db_table('Database/dataset.csv', \n columns = ['slug', 'name', 'bands', 'rgb_bands', 'provider'], \n dtypes = [str, str, list, list, str]\n )\n\nimages = create_db_table('Database/image.csv', \n columns = ['dataset_id', 'bands_selections', 'scale', 'init_date',\n 'end_date', 'bands_min_max', 'norm_type', 'geostore_id'], \n dtypes = [int, list, float, str, str, str, str, str]\n )\n\nmodels = create_db_table('Database/model.csv', \n columns = ['model_name', 'model_type', 'model_output', 'model_description', 'output_image_id'], \n dtypes = [str, str, str, str, int]\n )\n \nversions = create_db_table('Database/model_versions.csv', \n columns = ['model_id', 'model_architecture', 'input_image_id', 'output_image_id', 'geostore_id', 'kernel_size', 'sample_size', \n 'training_params', 'version', 'data_status', 'training_status', 'eeified', 'deployed'], \n dtypes = [int, str, int, int, str, int, int, str, int, str, str, bool, bool] \n )",
"_____no_output_____"
]
],
[
[
"### Connecting Pandas to a Database with SQLAlchemy ([tutorial](https://hackersandslackers.com/connecting-pandas-to-a-sql-database-with-sqlalchemy/))\n\n#### Create an engine\n\nAn `engine` is an object used to connect to databases using the information in our URI.",
"_____no_output_____"
]
],
[
[
"engine = sqlalchemy.create_engine('postgresql://postgres:[email protected]:5432/geomodels')",
"_____no_output_____"
]
],
[
[
"#### Create SQL tables from DataFrames",
"_____no_output_____"
]
],
[
[
"def df_to_db(df, table_name):\n if table_name == \"dataset\":\n df.to_sql(\"dataset\",\n engine,\n if_exists='replace',\n schema='public',\n index=True,\n index_label='id',\n chunksize=500,\n dtype={\"slug\": Text,\n \"name\": Text,\n \"bands\": Text,\n \"bands\": Text,\n \"provider\": Text})\n if table_name == \"image\":\n df.to_sql(\"image\",\n engine,\n if_exists='replace',\n schema='public',\n index=True,\n index_label='id',\n chunksize=500,\n dtype={\"dataset_id \": Integer,\n \"bands_selections\": Text,\n \"scale\": Float,\n \"init_date\": Text,\n \"end_date\": Text,\n \"bands_min_max\": JSON,\n \"norm_type\": Text,\n \"geostore_id\": Text})\n \n if table_name == \"model\":\n df.to_sql(\"model\",\n engine,\n if_exists='replace',\n schema='public',\n index=True,\n index_label='id',\n chunksize=500,\n dtype={\"model_name\": Text,\n \"model_type\": Text,\n \"model_output\": Text,\n \"model_description\": Text,\n \"output_image_id\": Integer})\n \n if table_name == \"model_versions\":\n df.to_sql(\"model_versions\",\n engine,\n if_exists='replace',\n schema='public',\n index=True,\n index_label='id',\n chunksize=500,\n dtype={\"model_id\": Integer,\n \"model_architecture\": Text,\n \"input_image_id\": Integer,\n \"output_image_id\": Integer,\n \"geostore_id\": Text,\n \"kernel_size\": BigInteger,\n \"sample_size\": BigInteger,\n \"training_params\": JSON,\n \"version\": BigInteger,\n \"data_status\": Text,\n \"training_status\": Text,\n \"eeified\": Boolean,\n \"deployed\": Boolean}) ",
"_____no_output_____"
]
],
[
[
"**Read DataFrames**",
"_____no_output_____"
]
],
[
[
"if not engine.dialect.has_table(engine, \"dataset\"):\n datasets = pd.read_csv('Database/dataset.csv', sep=';', quotechar='\\'').drop(columns='id')\nif not engine.dialect.has_table(engine, \"image\"):\n images = pd.read_csv('Database/image.csv', sep=';', quotechar='\\'').drop(columns='id')\nif not engine.dialect.has_table(engine, \"model\"):\n models = pd.read_csv('Database/model.csv', sep=';', quotechar='\\'').drop(columns='id')\nif not engine.dialect.has_table(engine, \"model_versions\"):\n versions = pd.read_csv('Database/model_versions.csv', sep=';', quotechar='\\'').drop(columns='id')",
"_____no_output_____"
]
],
[
[
"**Save SQL tables**",
"_____no_output_____"
]
],
[
[
"if not engine.dialect.has_table(engine, \"dataset\"):\n df_to_db(datasets, \"dataset\")\nif not engine.dialect.has_table(engine, \"image\"):\n df_to_db(images, \"image\")\nif not engine.dialect.has_table(engine, \"model\"):\n df_to_db(models, \"model\")\nif not engine.dialect.has_table(engine, \"model_versions\"):\n df_to_db(versions, \"model_versions\")",
"_____no_output_____"
]
],
[
[
"### Populate `dataset` table",
"_____no_output_____"
]
],
[
[
"slugs_list = [\"Sentinel-2-Top-of-Atmosphere-Reflectance\",\n \"Landsat-7-Surface-Reflectance\",\n \"Landsat-8-Surface-Reflectance\",\n \"USDA-NASS-Cropland-Data-Layers\",\n \"USGS-National-Land-Cover-Database\",\n \"Lake-Water-Quality-100m\"]",
"_____no_output_____"
],
[
"c = Skydipper.Collection(search=' '.join(slugs_list), object_type=['dataset'], app=['skydipper'], limit=10)\nc",
"_____no_output_____"
],
[
"# Read table\ndatasets = df_from_query('dataset')\n\nfor collection in slugs_list:\n\n ds = Skydipper.Dataset(id_hash=collection)\n name = ds.attributes.get('name')\n provider = ds.attributes.get('provider')\n\n bands = [str(ee_collection_specifics.ee_bands(collection))]\n rgb_bands = [str(ee_collection_specifics.ee_bands_rgb(collection))]\n\n\n dictionary = dict(zip(list(datasets.keys()), [collection, name, bands, rgb_bands, provider]))\n \n if (datasets['slug'] == collection).any():\n datasets = datasets\n else:\n datasets = datasets.append(pd.DataFrame(dictionary), ignore_index = True)\n \n # Save table\n df_to_csv(datasets, \"dataset\")\n df_to_db(datasets, \"dataset\")\n \ndatasets",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.