hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
e7cf77fbf27aaed1441a18e3bdfed1d80bc6ff1c
22,639
ipynb
Jupyter Notebook
travaux_diriges/TD1 - Intro a pyTorch - Partie 1.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
null
null
null
travaux_diriges/TD1 - Intro a pyTorch - Partie 1.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
null
null
null
travaux_diriges/TD1 - Intro a pyTorch - Partie 1.ipynb
pgermain/cours2018-Intro_aux_r-seaux_de_neurones
bec6bb9e9ff40fbac0e29469fe038b3c10beebf9
[ "CC-BY-4.0" ]
2
2018-10-23T14:22:25.000Z
2020-11-19T23:36:33.000Z
23.680962
227
0.500508
[ [ [ "$\\newcommand{\\xbf}{{\\bf x}}\n\\newcommand{\\ybf}{{\\bf y}}\n\\newcommand{\\wbf}{{\\bf w}}\n\\newcommand{\\Ibf}{\\mathbf{I}}\n\\newcommand{\\Xbf}{\\mathbf{X}}\n\\newcommand{\\Rbb}{\\mathbb{R}}\n\\newcommand{\\vec}[1]{\\left[\\begin{array}{c}#1\\end{array}\\right]}\n$\n\n# Introduction aux réseaux de neurones : TD #1 (partie 1)\nMatériel de cours rédigé par Pascal Germain, 2018\n************", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom math import sqrt\nimport aidecours", "_____no_output_____" ] ], [ [ "## La libraire pyTorch\n\nhttps://pytorch.org/", "_____no_output_____" ] ], [ [ "import torch", "_____no_output_____" ] ], [ [ "### Les tenseurs", "_____no_output_____" ] ], [ [ "torch.tensor?", "_____no_output_____" ] ], [ [ "Un tenseur peut contenir un scalaire", "_____no_output_____" ] ], [ [ "a = torch.tensor(1.5)\na", "_____no_output_____" ], [ "a + 2", "_____no_output_____" ], [ "a.item()", "_____no_output_____" ] ], [ [ "Les tenseurs contenant des vecteurs ou des matrices se comportent similairement aux *array numpy*.", "_____no_output_____" ] ], [ [ "v = torch.tensor([1,2,3])\nv", "_____no_output_____" ], [ "torch.sum(v)", "_____no_output_____" ], [ "u = torch.tensor([1.,2.,3.])\nu", "_____no_output_____" ], [ "torch.log(u)", "_____no_output_____" ], [ "u[0]", "_____no_output_____" ], [ "u[1:]", "_____no_output_____" ], [ "np.array(u)", "_____no_output_____" ], [ "M = torch.tensor([[1.,2.,3.], [4, 5, 6]])\nM", "_____no_output_____" ], [ "M.shape", "_____no_output_____" ], [ "2 * M + 1", "_____no_output_____" ], [ "M @ u", "_____no_output_____" ], [ "torch.ones((2, 3))", "_____no_output_____" ], [ "torch.zeros((3, 2))", "_____no_output_____" ], [ "torch.rand((3, 4))", "_____no_output_____" ], [ "torch.randn((3, 4))", "_____no_output_____" ] ], [ [ "**ATTENTION:** Les *tenseurs pyTorch* sont plus capricieux sur le type des variables que les *array numpy*.", "_____no_output_____" ] ], [ [ "v = np.array([.3, .6, .9])\nv.dtype", "_____no_output_____" ], [ "w = np.array([-1, 3, 8])\nw.dtype", "_____no_output_____" ], [ "v_tensor = torch.from_numpy(v)\nv_tensor.dtype", "_____no_output_____" ], [ "w_tensor = torch.from_numpy(w)\nw_tensor.dtype", "_____no_output_____" ], [ "print('v:', v.dtype)\nprint('w:', w.dtype)\n\nresult = v @ w\nprint('v @ w:', result.dtype)\nresult", "_____no_output_____" ], [ "print('v_tensor:', w_tensor.dtype)\nprint('w_tensor:', v_tensor.dtype)\nresult = v_tensor @ w_tensor\nprint('v_tensor @ w_tensor:', result.dtype)", "_____no_output_____" ], [ "w_tensor = torch.tensor(w, dtype=torch.float64)\nw_tensor", "_____no_output_____" ], [ "print('v_tensor:', v_tensor.dtype)\nprint('w_tensor:', w_tensor.dtype)\nresult = v_tensor @ w_tensor\nprint('v_tensor @ x_tensor:', result.dtype)", "_____no_output_____" ] ], [ [ "### Dérivation automatique", "_____no_output_____" ], [ "Lors de l'initialisation d'un tenseur, l'argument `requires_grad=True` indique que nous désirons calculer le gradient des variables contenues dans le tenseur.", "_____no_output_____" ] ], [ [ "x = torch.tensor(3., requires_grad=True)", "_____no_output_____" ] ], [ [ "Le graphe de calcul est alors bâti au fur et à mesure des opérations impliquant les tenseurs.", "_____no_output_____" ] ], [ [ "F = x ** 2", "_____no_output_____" ] ], [ [ "La fonction `F.backward()` parcours le graphe de calcul en sens inverse et calcule le gradient de la fonction $F$ selon les variables voulues.", "_____no_output_____" ] ], [ [ "F.backward()", "_____no_output_____" ] ], [ [ "Après avoir exécuté la fonction `backward()`, l'attribut `grad` des tenseurs impliqués dans le calcul contient la valeur du gradient calculé au point courant. Ici, on aura la valeur :\n\n$$\\left[\\frac{\\partial F(x)}{\\partial x}\\right]_{x=3} = \\big[\\,2\\,x\\,\\big]_{x=3} = 6$$", "_____no_output_____" ] ], [ [ "x.grad", "_____no_output_____" ] ], [ [ "Illustrons le fonctionnement de la dérivation par quelques autres exemples", "_____no_output_____" ] ], [ [ "x = torch.linspace(-1, 1, 11, requires_grad=True)\nx", "_____no_output_____" ], [ "quad = x @ x\nquad", "_____no_output_____" ], [ "quad.backward()", "_____no_output_____" ], [ "x.grad", "_____no_output_____" ], [ "a = torch.tensor(-3., requires_grad=True)\nb = torch.tensor(2., requires_grad=True)\nm = a*b\nm.backward()\nprint('a.grad =', a.grad)\nprint('b.grad =', b.grad)", "_____no_output_____" ], [ "a = torch.tensor(-3., requires_grad=True)\nb = torch.tensor(2., requires_grad=True)\nm = 2*a + b\nm.backward()\nprint('a.grad =', a.grad)\nprint('b.grad =', b.grad)", "_____no_output_____" ], [ "a = torch.tensor(3., requires_grad=True)\nb = torch.tensor(2., requires_grad=False)\nm = a ** b\nm.backward()\nprint('a.grad =', a.grad)\nprint('b.grad =', b.grad)", "_____no_output_____" ], [ "a = torch.tensor(-3., requires_grad=True)\nb = torch.tensor(2., requires_grad=True)\nc = torch.tensor(4., requires_grad=True)\nm1 = (a + b)\nm2 = m1 * c\nm2.backward()\nprint('a.grad =', a.grad)\nprint('b.grad =', b.grad)\nprint('c.grad =', c.grad)", "_____no_output_____" ], [ "vecteur_a = torch.tensor([-1., 2, 3], requires_grad=True)\nvecteur_b = torch.ones(3, requires_grad=True)\nproduit = vecteur_a @ vecteur_b\nproduit.backward()\nprint('vecteur_a =', vecteur_a, '; vecteur_a.grad =', vecteur_a.grad)\nprint('vecteur_b =', vecteur_b, '; vecteur_b.grad =', vecteur_b.grad)\nprint('produit =', produit.item())", "_____no_output_____" ], [ "vecteur_a = torch.tensor([1., 4, 9], requires_grad=True)\nresult = torch.sum(torch.sqrt(vecteur_a))\nresult.backward()\nprint('vecteur_a =', vecteur_a, '; vecteur_a.grad =', vecteur_a.grad)\nprint('result =', result.item())", "_____no_output_____" ] ], [ [ "### Descente de gradient", "_____no_output_____" ], [ "Commencons par un exemple en une dimension.\n\n$$f(x) = x^2 - x + 3$$", "_____no_output_____" ] ], [ [ "def fonction_maison(x):\n return x**2 - x + 3\n\nx = np.linspace(-2, 2)\nplt.plot(x, fonction_maison(x) )\nplt.plot((.5,),(fonction_maison(.5)), 'r*');", "_____no_output_____" ], [ "eta = .4 # Pas de gradient\nT = 20 # Nombre d'itérations\n\n# Initialisation aléatoire \nx = torch.randn(1, requires_grad=True)\n\nfor t in range(T):\n \n # Calcul de la fonction objectif\n val = fonction_maison(x)\n \n # Calcul des gradients\n val.backward()\n \n print('Interation', t+1, ': x =', x.item(), '; f(x) =', val.item(), '; f\\'(x) =', x.grad.item())\n \n # Mise à jour de la variable x\n with torch.no_grad():\n x -= eta * x.grad\n \n # Remise à zéro du gradient\n x.grad.zero_()\n", "_____no_output_____" ] ], [ [ "Reprenons l'exemple des moindres carrés présentés dans les transparents du cours.\n\n$$\\min_\\wbf \\left[\\frac1n \\sum_{i=1}^n (\\wbf\\cdot\\xbf_i- y_i)^2\\right].$$ ", "_____no_output_____" ] ], [ [ "def moindre_carres_objectif(x, y, w): \n return np.mean((x @ w - y) ** 2)", "_____no_output_____" ], [ "x = np.array([(1,1), (0,-1), (2,.5)])\ny = np.array([-1, 3, 2])", "_____no_output_____" ], [ "fonction_objectif = lambda w: moindre_carres_objectif(x, y, w)\naidecours.show_2d_function(fonction_objectif, -5, 5, .5)", "_____no_output_____" ], [ "w_opt = np.linalg.inv(x.T @ x) @ x.T @ y\n\naidecours.show_2d_function(fonction_objectif, -5, 5, .5, optimal=w_opt)", "_____no_output_____" ] ], [ [ "Nous créons une classe `moindre_carres` avec un fonctionnement semblable aux algorithmes de *scikit-learn* qui résout le problème des moindres carrés par descente de gradient, en utilisant les fonctionnalités de *pyTorch*", "_____no_output_____" ] ], [ [ "class moindre_carres:\n def __init__(self, eta=0.4, nb_iter=50, seed=None):\n self.eta=eta\n self.nb_iter=nb_iter\n self.seed = seed\n \n def fit(self, x, y):\n if self.seed is not None:\n torch.manual_seed(seed)\n \n x = torch.tensor(x, dtype=torch.float32)\n y = torch.tensor(y, dtype=torch.float32) \n\n n, d = x.shape\n self.w = torch.randn(d, requires_grad=True)\n \n self.w_list = list() # Servira à garder une trace de la descente de gradient\n self.obj_list = list()\n \n for t in range(self.nb_iter+1):\n \n loss = torch.mean((x @ self.w - y) ** 2)\n \n self.w_list.append(np.array(self.w.detach()))\n self.obj_list.append(loss.item()) \n if t == self.nb_iter: break \n \n with torch.no_grad():\n loss.backward()\n self.w -= self.eta * self.w.grad\n \n self.w.grad.zero_()\n \n def predict(self, x):\n x = torch.tensor(x, dtype=torch.float32)\n pred = x @ self.w.detach()\n return pred.numpy()", "_____no_output_____" ] ], [ [ "Exécution de l'algorithme.", "_____no_output_____" ] ], [ [ "eta = 0.4 # taille du pas\nnb_iter = 20 # nombre d'itérations\n\nalgo = moindre_carres(eta, nb_iter)\nalgo.fit(x, y)", "_____no_output_____" ], [ "fig, axes = plt.subplots(1, 2, figsize=(14.5, 4))\naidecours.sgd_trajectoire(algo.w_list, fonction_objectif, w_opt=w_opt, ax=axes[0])\naidecours.sgd_courbe_objectif(algo.obj_list, ax=axes[1], obj_opt=fonction_objectif(w_opt))", "_____no_output_____" ] ], [ [ "## Exercice\n\nDans cet exercice, nous vous demandons de vous inspirer de la classe `moindre_carrees` ci-haut et de l'adapter au problème de la régression logistique présenté dans les transparents du cours.", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_blobs\nxx, yy = make_blobs(n_samples=100, centers=2, n_features=2, cluster_std=1, random_state=0)\n\naidecours.show_2d_dataset(xx, yy)", "_____no_output_____" ] ], [ [ "Illustrons la fonction à optimiser (avec $\\lambda=0.01$):\n \n$$\n\\frac1n \\sum_{i=1}^n - y_i \\wbf\\cdot\\xbf_i + \\log(1+e^{\\wbf\\cdot\\xbf_i})+ \\frac\\rho2\\|\\wbf\\|^2\\,.\n$$ ", "_____no_output_____" ] ], [ [ "def sigmoid(x):\n return 1 / (1+np.exp(-x))\n\ndef calc_perte_logistique(w, x, y, rho):\n pred = sigmoid(x @ w)\n pred[y==0] = 1-pred[y==0]\n return np.mean(-np.log(pred)) + rho*w @ w/2\n\nfct_objectif = lambda w: calc_perte_logistique(w, xx, yy, 0.01)\naidecours.show_2d_function(fct_objectif, -4, 4, .05)", "_____no_output_____" ] ], [ [ "Compléter le code de la classe suivante. ", "_____no_output_____" ] ], [ [ "class regression_logistique:\n def __init__(self, rho=.01, eta=0.4, nb_iter=50, seed=None):\n self.rho = rho\n self.eta = eta\n self.nb_iter = nb_iter\n self.seed = seed\n \n def fit(self, x, y):\n if self.seed is not None:\n torch.manual_seed(seed)\n \n x = torch.tensor(x, dtype=torch.float32)\n y = torch.tensor(y, dtype=torch.float32) \n\n n, d = x.shape\n self.w = torch.randn(d, requires_grad=True)\n \n self.w_list = list() # Servira à garder une trace de la descente de gradient\n self.obj_list = list()\n \n for t in range(self.nb_iter+1):\n pass # Compléter\n \n def predict(self, x):\n x = torch.tensor(x, dtype=torch.float32)\n pred = x @ self.w.detach()\n return np.array(pred.numpy() > .5, dtype=np.int)", "_____no_output_____" ] ], [ [ "Exécuter le code suivant pour vérifier le bon fonctionnement de votre algorithme. Essayer ensuite de varier les paramètres `rho`, `eta` et `nb_iter` afin d'évaluer leur impact sur le résultat obtenu.", "_____no_output_____" ] ], [ [ "rho = 0.01\neta = 0.4 # taille du pas\nnb_iter = 20 # nombre d'itérations\n\nalgo = regression_logistique(rho, eta, nb_iter)\nalgo.fit(xx, yy)\n\nfig, axes = plt.subplots(1, 3, figsize=(16, 4))\naidecours.sgd_trajectoire(algo.w_list, fct_objectif, -4, 4, .05, ax=axes[0])\naidecours.sgd_courbe_objectif(algo.obj_list, ax=axes[1])\naidecours.show_2d_predictions(xx, yy, algo.predict, ax=axes[2]);", "_____no_output_____" ] ], [ [ "Reprenons l'exercice précédent en ajoutant l'apprentissange d'un *biais* à la régression logistique:\n\n$$\n\\frac1n \\sum_{i=1}^n - y_i (\\wbf\\cdot\\xbf_i+b) + \\log(1+e^{\\wbf\\cdot\\xbf_i+b})+ \\frac\\rho2\\|\\wbf\\|^2\\,.\n$$ ", "_____no_output_____" ] ], [ [ "class regression_logistique_avec_biais:\n def __init__(self, rho=.01, eta=0.4, nb_iter=50, seed=None):\n self.rho = rho\n self.eta = eta\n self.nb_iter = nb_iter\n self.seed = seed\n \n def fit(self, x, y):\n if self.seed is not None:\n torch.manual_seed(seed)\n \n x = torch.tensor(x, dtype=torch.float32)\n y = torch.tensor(y, dtype=torch.float32) \n\n n, d = x.shape\n self.w = torch.randn(d, requires_grad=True)\n self.b = torch.zeros(1, requires_grad=True)\n \n self.w_list = list() # Servira à garder une trace de la descente de gradient\n self.obj_list = list()\n \n for t in range(self.nb_iter+1):\n pass # Compléter\n \n def predict(self, x):\n x = torch.tensor(x, dtype=torch.float32)\n pred = x @ self.w.detach() + self.b.item()\n return np.array(pred.numpy() > .5, dtype=np.int)", "_____no_output_____" ], [ "rho = 0.01\neta = 0.4 # taille du pas\nnb_iter = 20 # nombre d'itérations\n\nalgo = regression_logistique_avec_biais(rho, eta, nb_iter)\nalgo.fit(xx, yy)\n\nfig, axes = plt.subplots(1, 2, figsize=(12, 4))\naidecours.sgd_courbe_objectif(algo.obj_list, ax=axes[0])\naidecours.show_2d_predictions(xx, yy, algo.predict, ax=axes[1]);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7cf7aab0f6fde004a355b2244b0ce8aa144aea6
8,567
ipynb
Jupyter Notebook
JavaScripts/Image/CenterPivotIrrigationDetector.ipynb
OIEIEIO/earthengine-py-notebooks
5d6c5cdec0c73bf02020ee17d42c9e30d633349f
[ "MIT" ]
1,008
2020-01-27T02:03:18.000Z
2022-03-24T10:42:14.000Z
JavaScripts/Image/CenterPivotIrrigationDetector.ipynb
rafatieppo/earthengine-py-notebooks
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
[ "MIT" ]
8
2020-02-01T20:18:18.000Z
2021-11-23T01:48:02.000Z
JavaScripts/Image/CenterPivotIrrigationDetector.ipynb
rafatieppo/earthengine-py-notebooks
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
[ "MIT" ]
325
2020-01-27T02:03:36.000Z
2022-03-25T20:33:33.000Z
46.814208
1,031
0.590872
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Image/CenterPivotIrrigationDetector.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/CenterPivotIrrigationDetector.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Image/CenterPivotIrrigationDetector.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as geemap\nexcept:\n import geemap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google MapS`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = geemap.Map(center=[40,-100], zoom=4)\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Add Earth Engine dataset\n# Center-pivot Irrigation Detector.\n#\n# Finds circles that are 500m in radius.\nMap.setCenter(-106.06, 37.71, 12)\n\n# A nice NDVI palette.\npalette = [\n 'FFFFFF', 'CE7E45', 'DF923D', 'F1B555', 'FCD163', '99B718',\n '74A901', '66A000', '529400', '3E8601', '207401', '056201',\n '004C00', '023B01', '012E01', '011D01', '011301']\n\n# Just display the image with the palette.\nimage = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_034034_20170608')\nndvi = image.normalizedDifference(['B5','B4'])\n\nMap.addLayer(ndvi, {'min': 0, 'max': 1, 'palette': palette}, 'Landsat NDVI')\n\n# Find the difference between convolution with circles and squares.\n# This difference, in theory, will be strongest at the center of\n# circles in the image. This region is filled with circular farms\n# with radii on the order of 500m.\nfarmSize = 500; # Radius of a farm, in meters.\ncircleKernel = ee.Kernel.circle(farmSize, 'meters')\nsquareKernel = ee.Kernel.square(farmSize, 'meters')\ncircles = ndvi.convolve(circleKernel)\nsquares = ndvi.convolve(squareKernel)\ndiff = circles.subtract(squares)\n\n# Scale by 100 and find the best fitting pixel in each neighborhood.\ndiff = diff.abs().multiply(100).toByte()\nmax = diff.focal_max({'radius': farmSize * 1.8, 'units': 'meters'})\n# If a pixel isn't the local max, set it to 0.\nlocal = diff.where(diff.neq(max), 0)\nthresh = local.gt(2)\n\n# Here, we highlight the maximum differences as \"Kernel Peaks\"\n# and draw them in red.\npeaks = thresh.focal_max({'kernel': circleKernel})\nMap.addLayer(peaks.updateMask(peaks), {'palette': 'FF3737'}, 'Kernel Peaks')\n\n# Detect the edges of the features. Discard the edges with lower intensity.\ncanny = ee.Algorithms.CannyEdgeDetector(ndvi, 0)\ncanny = canny.gt(0.3)\n\n# Create a \"ring\" kernel from two circular kernels.\ninner = ee.Kernel.circle(farmSize - 20, 'meters', False, -1)\nouter = ee.Kernel.circle(farmSize + 20, 'meters', False, 1)\nring = outer.add(inner, True)\n\n# Highlight the places where the feature edges best match the circle kernel.\ncenters = canny.convolve(ring).gt(0.5).focal_max({'kernel': circleKernel})\nMap.addLayer(centers.updateMask(centers), {'palette': '4285FF'}, 'Ring centers')\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7cf874b2b97b675e03ca7ba097f337f92060726
329,753
ipynb
Jupyter Notebook
Task2/Task_2_TSF(UnSupervised Learning-Prediction).ipynb
HaseebRajput007/The-Spark-Foundation-Internship
c46ecce371c0baae17703829c2f7566469adbe3f
[ "MIT" ]
1
2021-12-30T15:08:16.000Z
2021-12-30T15:08:16.000Z
Task2/Task_2_TSF(UnSupervised Learning-Prediction).ipynb
HaseebRajput007/The-Spark-Foundation-Internship
c46ecce371c0baae17703829c2f7566469adbe3f
[ "MIT" ]
null
null
null
Task2/Task_2_TSF(UnSupervised Learning-Prediction).ipynb
HaseebRajput007/The-Spark-Foundation-Internship
c46ecce371c0baae17703829c2f7566469adbe3f
[ "MIT" ]
4
2021-07-06T18:38:45.000Z
2022-01-07T14:24:09.000Z
353.812232
94,832
0.92303
[ [ [ "## Technical TASK 2 :- Prediction using UnSupervised ML\nIn this task, we are going to predict the optimum number of clusters from the given iris dataset and represent it visually. This includes unsupervised learning.\n#### Task Completed for The Sparks Foundation Internship Program\n#### Data Science & Business Analytics Internship Task_2\n### Author: Muhammad Haseeb Aslam ", "_____no_output_____" ], [ "## Step 0: Importing Libraries needed to perform task", "_____no_output_____" ] ], [ [ "# Importing all the libraries needed in this notebook\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets", "_____no_output_____" ] ], [ [ "## Step 1 : Reading the data-set ", "_____no_output_____" ] ], [ [ "# Loading and Reading the iris dataset \n# Data available at the link - 'https://bit.ly/3kXTdox'\n\ndata = pd.read_csv('Iris.csv')\nprint('Data import successfull')", "Data import successfull\n" ], [ "data.head(10) # loads the first five rows", "_____no_output_____" ], [ "data.tail() # loads the last five rows", "_____no_output_____" ], [ "# Checking for NaN values\ndata.isna().sum()", "_____no_output_____" ] ], [ [ "NaN standing for not a number, is a numeric data type used to represent any value that is undefined or unpresentable. For example, 0/0 is undefined as a real number and is, therefore, represented by NaN. So, in this dataset, we don't have such values.", "_____no_output_____" ] ], [ [ "# Checking statistical description\ndata.describe()", "_____no_output_____" ] ], [ [ "### Now, let's check for unique classes in the dataset.", "_____no_output_____" ] ], [ [ "print(data.Species.nunique())\nprint(data.Species.value_counts())", "3\nIris-versicolor 50\nIris-virginica 50\nIris-setosa 50\nName: Species, dtype: int64\n" ] ], [ [ "## Step 2: Data Visualization", "_____no_output_____" ] ], [ [ "sns.set(style = 'whitegrid')\niris = sns.load_dataset('iris');\nax = sns.stripplot(x ='species',y = 'sepal_length',data = iris);\nplt.title('Iris Dataset')\nplt.show()", "_____no_output_____" ], [ "sns.boxplot(x='species',y='sepal_width',data=iris)\nplt.title(\"Iris Dataset\")\nplt.show()", "_____no_output_____" ], [ "sns.boxplot(x='species',y='petal_width',data=iris)\nplt.title(\"Iris Dataset\")\nplt.show()", "_____no_output_____" ], [ "sns.boxplot(x='species',y='petal_length',data=iris)\nplt.title(\"Iris Dataset\")\nplt.show()", "_____no_output_____" ], [ "# Count plot\nsns.countplot(x='species', data=iris, palette=\"OrRd\")\nplt.title(\"Count of different species in Iris dataset\")\nplt.show()", "_____no_output_____" ], [ "#This is needed for the analysis of two variables, for determining the empirical relationship between them.\nsns.heatmap(data.corr(), annot=True,cmap='RdYlGn')\nplt.title(\"Heat-Map\")\nplt.show()", "_____no_output_____" ], [ "iris1 = data.corr() #finding correlation between variables of iris dataset\nfig,ax=plt.subplots(figsize=(10,10))\nsns.heatmap(iris1,vmin=0,vmax=1,square=True,annot=True,linewidth=1)", "_____no_output_____" ] ], [ [ "Heatmap is a two-dimensional graphical representation of data where the individual values that are contained in a matrix are represented as colors. Or we can also say that these Heat maps display numeric tabular data where the cells are colored depending upon the contained value.\n\nHeat maps are great for making trends in this kind of data more readily apparent, particularly when the data is ordered and there is clustering.\n\nThe columns with the correlation 1 are the best correlated and vice versa.", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\nsns.set(style=\"ticks\", color_codes=True)\niris = sns.load_dataset(\"iris\")\ng = sns.pairplot(iris)\n\n\nimport matplotlib.pyplot as plt\nplt.show()", "_____no_output_____" ] ], [ [ "\nPairplots are a really simple way to visualize relationships between each variable. It produces a matrix of relationships between each variable in the data for an instant examination of our data.", "_____no_output_____" ], [ "## Step 3 : Finding the optimum number of clusters using k-means clustering", "_____no_output_____" ] ], [ [ "# Finding the optimum number of clusters using k-means\n\nx = data.iloc[:,[0,1,2,3]].values\n\nfrom sklearn.cluster import KMeans\nwcss = []\nfor i in range(1,11):\n kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)\n kmeans.fit(x)\n ## appending the WCSS to the list (kmeans.inertia_ returns the WCSS value for an initialized cluster)\n wcss.append(kmeans.inertia_) \n print('k:',i ,\"wcss:\",kmeans.inertia_)", "k: 1 wcss: 281831.54466666654\nk: 2 wcss: 70581.3808\nk: 3 wcss: 31320.711199999994\nk: 4 wcss: 17762.657226173542\nk: 5 wcss: 11423.238080088988\nk: 6 wcss: 7909.5306730769225\nk: 7 wcss: 5881.448116883118\nk: 8 wcss: 4562.780079365082\nk: 9 wcss: 3579.7205612745106\nk: 10 wcss: 2968.4914326653743\n" ], [ "# Plotting the results onto a line graph, allowing us to observe 'The elbow'\n\nplt.plot(range(1,11),wcss)\nplt.title('The Elbow Method')\nplt.xlabel('Number of Clusters')\nplt.ylabel('WCSS')\nplt.show()", "_____no_output_____" ] ], [ [ "We can see that after 3 the drop in WCSS is minimal. So we choose 3 as the optimal number of clusters.", "_____no_output_____" ], [ "## Step 4 : Initializing K-Means With Optimum Number Of Clusters", "_____no_output_____" ] ], [ [ "# Fitting K-Means to the Dataset \nkmeans = KMeans(n_clusters = 3, init = 'k-means++',max_iter = 300, n_init = 10, random_state = 0)\n\n# Returns a label for each data point based on the number of clusters\ny_kmeans = kmeans.fit_predict(x)", "_____no_output_____" ] ], [ [ "## Step 5 : Predicting Values", "_____no_output_____" ] ], [ [ "y_kmeans", "_____no_output_____" ] ], [ [ "## Step 6 : Visualizing the Clusters", "_____no_output_____" ] ], [ [ "# Visualising the clusters\nplt.figure(figsize=(10,10))\nplt.scatter(x[y_kmeans==0,0],x[y_kmeans==0,1],s=100,c='red',label='Iris-setosa')\nplt.scatter(x[y_kmeans==1,0],x[y_kmeans==1,1],s=100,c='blue',label='Iris-versicolour')\nplt.scatter(x[y_kmeans==2,0],x[y_kmeans==2,1],s=100,c='green',label='Iris-virginica')\n\n# Plotting the centroids of the clusters\nplt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s=100,c='yellow',label='Centroids')\nplt.title('Iris Flower Clusters')\nplt.xlabel('Sepal Length in cm')\nplt.ylabel('Petal Length in cm')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7cf87b85dbf8019cf1958d70850e71fc7724cb4
17,991
ipynb
Jupyter Notebook
notebooks/21 - Advanced.ipynb
projetinho-bioinfo/biopython-notebook
09c1bb8ebc58baa7eb986d970f85f4c969805588
[ "MIT" ]
105
2015-07-13T15:34:55.000Z
2022-03-13T17:05:12.000Z
notebooks/21 - Advanced.ipynb
projetinho-bioinfo/biopython-notebook
09c1bb8ebc58baa7eb986d970f85f4c969805588
[ "MIT" ]
23
2015-12-05T19:24:02.000Z
2020-12-04T16:21:47.000Z
notebooks/21 - Advanced.ipynb
projetinho-bioinfo/biopython-notebook
09c1bb8ebc58baa7eb986d970f85f4c969805588
[ "MIT" ]
88
2015-08-27T21:31:58.000Z
2022-02-13T08:54:45.000Z
27.635945
185
0.530265
[ [ [ "**Source of the materials**: Biopython cookbook (adapted)\n<font color='red'>Status: Draft</font>", "_____no_output_____" ], [ "<font color='red'>Consider looking at the Substitution Matrices example in [Chapter 19 - Cookbook](19 - Cookbook - Cool things to do with it.ipynb#Substitution-Matrices)</font>\n\n# Advanced\n\n[Parser Design](#Parser-Design)\n\n[Substitution Matrices](#Substitution-Matrices)\n\n[FreqTable](#FreqTable)\n\n## Parser Design\n\nMany of the older Biopython parsers were built around an event-oriented\ndesign that includes Scanner and Consumer objects.\n\nScanners take input from a data source and analyze it line by line,\nsending off an event whenever it recognizes some information in the\ndata. For example, if the data includes information about an organism\nname, the scanner may generate an `organism_name` event whenever it\nencounters a line containing the name.\n\nConsumers are objects that receive the events generated by Scanners.\nFollowing the previous example, the consumer receives the\n`organism_name` event, and the processes it in whatever manner necessary\nin the current application.\n\nThis is a very flexible framework, which is advantageous if you want to\nbe able to parse a file format into more than one representation. For\nexample, the `Bio.GenBank` module uses this to construct either\n`SeqRecord` objects or file-format-specific record objects.\n\nMore recently, many of the parsers added for `Bio.SeqIO` and\n`Bio.AlignIO` take a much simpler approach, but only generate a single\nobject representation (`SeqRecord` and `MultipleSeqAlignment` objects\nrespectively). In some cases the `Bio.SeqIO` parsers actually wrap\nanother Biopython parser - for example, the `Bio.SwissProt` parser\nproduces SwissProt format specific record objects, which get converted\ninto `SeqRecord` objects.\n\n## Substitution Matrices\n\n\n### SubsMat\n\nThis module provides a class and a few routines for generating\nsubstitution matrices, similar to BLOSUM or PAM matrices, but based on\nuser-provided data. Additionally, you may select a matrix from\nMatrixInfo.py, a collection of established substitution matrices. The\n`SeqMat` class derives from a dictionary:\n\n", "_____no_output_____" ], [ "```\nclass SeqMat(dict)\n\n```", "_____no_output_____" ], [ "\nThe dictionary is of the form `{(i1,j1):n1, (i1,j2):n2,...,(ik,jk):nk}`\nwhere i, j are alphabet letters, and n is a value.\n\n1. Attributes\n\n 1. `self.alphabet`: a class as defined in Bio.Alphabet\n\n 2. `self.ab_list`: a list of the alphabet’s letters, sorted. Needed\n mainly for internal purposes\n\n2. Methods\n\n", "_____no_output_____" ], [ "```\n__init__(self,data=None,alphabet=None, mat_name='', build_later=0):\n\n```", "_____no_output_____" ], [ "\n 1. `data`: can be either a dictionary, or another\n SeqMat instance.\n\n 2. `alphabet`: a Bio.Alphabet instance. If not provided,\n construct an alphabet from data.\n\n 3. `mat_name`: matrix name, such as “BLOSUM62” or “PAM250”\n\n 4. `build_later`: default false. If true, user may supply only\n alphabet and empty dictionary, if intending to build the\n matrix later. this skips the sanity check of alphabet\n size vs. matrix size.\n\n", "_____no_output_____" ], [ "```\nentropy(self,obs_freq_mat)\n\n```", "_____no_output_____" ], [ "\n 1. `obs_freq_mat`: an observed frequency matrix. Returns the\n matrix’s entropy, based on the frequency in `obs_freq_mat`.\n The matrix instance should be LO or SUBS.\n\n", "_____no_output_____" ], [ "```\nsum(self)\n\n```", "_____no_output_____" ], [ "\n Calculates the sum of values for each letter in the matrix’s\n alphabet, and returns it as a dictionary of the form\n `{i1: s1, i2: s2,...,in:sn}`, where:\n\n - i: an alphabet letter;\n\n - s: sum of all values in a half-matrix for that letter;\n\n - n: number of letters in alphabet.\n\n", "_____no_output_____" ], [ "```\nprint_mat(self,f,format=\"%4d\",bottomformat=\"%4s\",alphabet=None)\n\n```", "_____no_output_____" ], [ "\n prints the matrix to file handle f. `format` is the format field\n for the matrix values; `bottomformat` is the format field for\n the bottom row, containing matrix letters. Example output for a\n 3-letter alphabet matrix:\n\n", "_____no_output_____" ], [ "```\nA 23\n B 12 34\n C 7 22 27\n A B C\n\n```", "_____no_output_____" ], [ "\n The `alphabet` optional argument is a string of all characters\n in the alphabet. If supplied, the order of letters along the\n axes is taken from the string, rather than by\n alphabetical order.\n\n3. Usage\n\n The following section is laid out in the order by which most people\n wish to generate a log-odds matrix. Of course, interim matrices can\n be generated and investigated. Most people just want a log-odds\n matrix, that’s all.\n\n 1. Generating an Accepted Replacement Matrix\n\n Initially, you should generate an accepted replacement\n matrix (ARM) from your data. The values in ARM are the counted\n number of replacements according to your data. The data could be\n a set of pairs or multiple alignments. So for instance if\n Alanine was replaced by Cysteine 10 times, and Cysteine by\n Alanine 12 times, the corresponding ARM entries would be:\n\n", "_____no_output_____" ], [ "```\n('A','C'): 10, ('C','A'): 12\n\n```", "_____no_output_____" ], [ "\n as order doesn’t matter, user can already provide only one\n entry:\n\n", "_____no_output_____" ], [ "```\n('A','C'): 22\n\n```", "_____no_output_____" ], [ "\n A SeqMat instance may be initialized with either a full (first\n method of counting: 10, 12) or half (the latter method, 22)\n matrices. A full protein alphabet matrix would be of the size\n 20x20 = 400. A half matrix of that alphabet would be 20x20/2 +\n 20/2 = 210. That is because same-letter entries don’t change.\n (The matrix diagonal). Given an alphabet size of N:\n\n 1. Full matrix size: N\\*N\n\n 2. Half matrix size: N(N+1)/2\n\n The SeqMat constructor automatically generates a half-matrix, if\n a full matrix is passed. If a half matrix is passed, letters in\n the key should be provided in alphabetical order: (’A’,’C’) and\n not (’C’,A’).\n\n At this point, if all you wish to do is generate a log-odds\n matrix, please go to the section titled Example of Use. The\n following text describes the nitty-gritty of internal functions,\n to be used by people who wish to investigate their\n nucleotide/amino-acid frequency data more thoroughly.\n\n 2. Generating the observed frequency matrix (OFM)\n\n Use:\n\n", "_____no_output_____" ], [ "```python\nOFM = SubsMat._build_obs_freq_mat(ARM)\n\n```", "_____no_output_____" ], [ "\n The OFM is generated from the ARM, only instead of replacement\n counts, it contains replacement frequencies.\n\n 3. Generating an expected frequency matrix (EFM)\n\n Use:\n\n", "_____no_output_____" ], [ "```\nEFM = SubsMat._build_exp_freq_mat(OFM,exp_freq_table)\n\n```", "_____no_output_____" ], [ "\n 1. `exp_freq_table`: should be a FreqTable instance. See\n section \\[sec:freq\\_table\\] for detailed information\n on FreqTable. Briefly, the expected frequency table has the\n frequencies of appearance for each member of the alphabet.\n It is implemented as a dictionary with the alphabet letters\n as keys, and each letter’s frequency as a value. Values sum\n to 1.\n\n The expected frequency table can (and generally should) be\n generated from the observed frequency matrix. So in most cases\n you will generate `exp_freq_table` using:\n\n", "_____no_output_____" ], [ "```\nfrom Bio import SubsMat\nfrom Bio.SubsMat import _build_obs_freq_mat\nOFM = _build_obs_freq_mat(ARM)\nexp_freq_table = SubsMat._exp_freq_table_from_obs_freq(OFM)\nEFM = SubsMat._build_exp_freq_mat(OFM, exp_freq_table)\n```", "_____no_output_____" ], [ "\n But you can supply your own `exp_freq_table`, if you wish\n\n 4. Generating a substitution frequency matrix (SFM)\n\n Use:\n\n", "_____no_output_____" ], [ "```\nSFM = SubsMat._build_subs_mat(OFM,EFM)\n\n```", "_____no_output_____" ], [ "\n Accepts an OFM, EFM. Provides the division product of the\n corresponding values.\n\n 5. Generating a log-odds matrix (LOM)\n\n Use:\n\n", "_____no_output_____" ], [ "```\nLOM=SubsMat._build_log_odds_mat(SFM[,logbase=10,factor=10.0,round_digit=1])\n\n```", "_____no_output_____" ], [ "\n 1. Accepts an SFM.\n\n 2. `logbase`: base of the logarithm used to generate the\n log-odds values.\n\n 3. `factor`: factor used to multiply the log-odds values. Each\n entry is generated by log(LOM\\[key\\])\\*factor And rounded to\n the `round_digit` place after the decimal point,\n if required.\n\n4. Example of use\n\n As most people would want to generate a log-odds matrix, with\n minimum hassle, SubsMat provides one function which does it all:\n\n", "_____no_output_____" ], [ "```\nmake_log_odds_matrix(acc_rep_mat,exp_freq_table=None,logbase=10,\n factor=10.0,round_digit=0):\n\n```", "_____no_output_____" ], [ "\n 1. `acc_rep_mat`: user provided accepted replacements matrix\n\n 2. `exp_freq_table`: expected frequencies table. Used if provided,\n if not, generated from the `acc_rep_mat`.\n\n 3. `logbase`: base of logarithm for the log-odds matrix. Default\n base 10.\n\n 4. `round_digit`: number after decimal digit to which result should\n be rounded. Default zero.\n\n## FreqTable\n\n", "_____no_output_____" ], [ "```\nFreqTable.FreqTable(UserDict.UserDict)\n\n```", "_____no_output_____" ], [ "\n1. Attributes:\n\n 1. `alphabet`: A Bio.Alphabet instance.\n\n 2. `data`: frequency dictionary\n\n 3. `count`: count dictionary (in case counts are provided).\n\n2. Functions:\n\n 1. `read_count(f)`: read a count file from stream f. Then convert\n to frequencies.\n\n 2. `read_freq(f)`: read a frequency data file from stream f. Of\n course, we then don’t have the counts, but it is usually the\n letter frequencies which are interesting.\n\n3. Example of use: The expected count of the residues in the database\n is sitting in a file, whitespace delimited, in the following format\n (example given for a 3-letter alphabet):\n\n", "_____no_output_____" ], [ "```\nA 35\n B 65\n C 100\n\n```", "_____no_output_____" ], [ "\n And will be read using the\n `FreqTable.read_count(file_handle)` function.\n\n An equivalent frequency file:\n\n", "_____no_output_____" ], [ "```\nA 0.175\n B 0.325\n C 0.5\n\n```", "_____no_output_____" ], [ "\n Conversely, the residue frequencies or counts can be passed as\n a dictionary. Example of a count dictionary (3-letter alphabet):\n\n", "_____no_output_____" ], [ "```\n{'A': 35, 'B': 65, 'C': 100}\n\n```", "_____no_output_____" ], [ "\n Which means that an expected data count would give a 0.5 frequency\n for ’C’, a 0.325 probability of ’B’ and a 0.175 probability of ’A’\n out of 200 total, sum of A, B and C)\n\n A frequency dictionary for the same data would be:\n\n", "_____no_output_____" ], [ "```\n{'A': 0.175, 'B': 0.325, 'C': 0.5}\n\n```", "_____no_output_____" ], [ "\n Summing up to 1.\n\n When passing a dictionary as an argument, you should indicate\n whether it is a count or a frequency dictionary. Therefore the\n FreqTable class constructor requires two arguments: the dictionary\n itself, and FreqTable.COUNT or FreqTable.FREQ indicating counts or\n frequencies, respectively.\n\n Read expected counts. readCount will already generate the\n frequencies Any one of the following may be done to geerate the\n frequency table (ftab):\n\n", "_____no_output_____" ], [ "```python\nfrom Bio.SubsMat import *\nftab = FreqTable.FreqTable(my_frequency_dictionary, FreqTable.FREQ)\nftab = FreqTable.FreqTable(my_count_dictionary, FreqTable.COUNT)\nftab = FreqTable.read_count(open('myCountFile'))\nftab = FreqTable.read_frequency(open('myFrequencyFile'))\n```", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7cf8c5a707c1b3a484efd9f1dab2e300b29a4ed
113,878
ipynb
Jupyter Notebook
02_tools-and-packages/01_plot-packages.ipynb
FelixBleimund/py-algorithms-4-automotive-engineering
8aa46aa6ee4ffb48ee6de1121d28413abcf92f82
[ "MIT" ]
null
null
null
02_tools-and-packages/01_plot-packages.ipynb
FelixBleimund/py-algorithms-4-automotive-engineering
8aa46aa6ee4ffb48ee6de1121d28413abcf92f82
[ "MIT" ]
null
null
null
02_tools-and-packages/01_plot-packages.ipynb
FelixBleimund/py-algorithms-4-automotive-engineering
8aa46aa6ee4ffb48ee6de1121d28413abcf92f82
[ "MIT" ]
null
null
null
178.212833
29,188
0.909368
[ [ [ "# Required to load webpages\nfrom IPython.display import IFrame", "_____no_output_____" ] ], [ [ "[Table of contents](../toc.ipynb)\n\n# Plotting packages\n\n* Until now, we were just able to return results via `print()` command.\n* However, as humans depend very much on vision, data visualization is urgently needed.", "_____no_output_____" ], [ "## Matplotlib\n\n<img src=\"https://github.com/matplotlib/matplotlib/raw/master/doc/_static/logo2.png\" alt=\"Matplotlib\" width=\"350\" align=\"right\">\n\n* Very common and widely used is the [matplotlib](https://matplotlib.org/) package.\n* Matplotlib provides with `pyplot` module very similar plotting commands as Matlab.\n* Matplotlib is packaged and available through pip and conda.\n* Let's create our first plot.", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "* The first line of code import the pyplot module, which is very convenient for most simple plots.\n* The second line is a special Jupyter magic command to present the plots instantly.", "_____no_output_____" ], [ "### Basic plotting", "_____no_output_____" ] ], [ [ "# First, we create some data to plot, which is not that handy with lists.\nx = [i for i in range(-20, 20)]\ny = [x[i]**2 for i in range(0,40)]", "_____no_output_____" ], [ "# Now the plot command\nplt.plot(x, y)\n\nplt.show()", "_____no_output_____" ], [ "# Here the same plot with axis labels\nplt.plot(x, y)\nplt.xlabel(\"samples\")\nplt.ylabel(\"$x^2$\") # You can use LaTeX's math support\n\nplt.show()", "_____no_output_____" ], [ "# Some line style arguments and a title\nplt.plot(x, y, \"k--\")\nplt.xlabel(\"samples\")\nplt.ylabel(\"$x^2$\") # You can use LaTeX's math support\nplt.title(\"My parabola\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Multiple plots in one figure\n\nTo present two time series, we will create an additional list of values.", "_____no_output_____" ] ], [ [ "import math\ny2 = [math.sin(x[i]) for i in range(0,40)]", "_____no_output_____" ], [ "plt.subplot(2, 1, 1)\nplt.plot(x, y, 'o-')\nplt.title('A tale of 2 subplots')\nplt.ylabel('$x^2$')\n\nplt.subplot(2, 1, 2)\nplt.plot(x, y2, '.-')\nplt.xlabel('samples')\nplt.ylabel('$\\sin{x}$')\n\nplt.show()", "_____no_output_____" ] ], [ [ "* I guess you got the concept here.\n* Also basic statistical plots are simple to generate.", "_____no_output_____" ] ], [ [ "# Generate random numbers in a list\nimport random\n\ny3 = [random.gammavariate(alpha=1.2, beta=2.3) for i in range(0, 5000)]", "_____no_output_____" ], [ "plt.hist(y3, bins=40)\nplt.xlabel('Data')\nplt.ylabel('Probability density')\nplt.title(r'Histogram of Gammadist: $\\alpha=1.2$, $\\beta=2.3$')\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Exercise: Matplotlib (10 minutes)\n\n<img src=\"../_static/exercise.png\" alt=\"Exercise\" width=\"75\" align=\"left\">\n\n\nHere the task:\n\n* Activate your local Python environment and install matplotlib with `conda install matplotlib`.\n* Either work in Jupyter notebook, in Ipython shell, or Pycharm.\n* Create a list of 1500 data points.\n* Add a second list of 1500 data points with Gaussian distribution $\\mathcal{N}\\sim(0, 1)$.\n* Create a figure with a histogram of the data.", "_____no_output_____" ], [ "### Solution\n\nPlease find one possible solution in [`solution_plotting.py`](solution_plotting.py) file.", "_____no_output_____" ] ], [ [ "%run solution_plotting.py\nplt.show()", "_____no_output_____" ] ], [ [ "### More plot types\n\n* Many other plots are as simple as the examples above.\n* Other typical plots are \n * Bar plots\n * 3D plots\n * Pie charts\n * Scatter plots\n * ...\n* You can find many more plot examples in the [Matlotlib gallery](https://matplotlib.org/gallery/index.html)", "_____no_output_____" ] ], [ [ "IFrame(src='https://matplotlib.org/gallery/index.html', width=700, height=600)", "_____no_output_____" ] ], [ [ "## Seaborn\n\n* Seaborn is a statistical plot library for Python.\n* It is based on matplotlib.\n* Please find it's documentation here [https://seaborn.pydata.org/](https://seaborn.pydata.org/).\n* You can install it with `conda install seaborn`.\n* Next comes a snapshot of seaborn's example gallery [https://seaborn.pydata.org/examples/index.html](https://seaborn.pydata.org/examples/index.html).", "_____no_output_____" ] ], [ [ "IFrame(src='https://seaborn.pydata.org/examples/index.html', width=700, height=600)", "_____no_output_____" ] ], [ [ "## Bokeh\n\n<img src=\"https://static.bokeh.org/logos/logotype.svg\" alt=\"Bokeh\" width=\"350\" align=\"right\">\n\n* Add to \"static\" plots, interactive plots and dashboards can be build with Bokeh library.\n* Interactive plots are ideal if you want to visualize large data sets.\n* Real time information is visible like server load,...\n* Boekh is easy to install via conda and pip.\n* Please find here the Bokeh gallery [https://docs.bokeh.org/en/latest/docs/gallery.html](https://docs.bokeh.org/en/latest/docs/gallery.html).\n* And in next cell, an interactive plot of bokeh website is presented.", "_____no_output_____" ] ], [ [ "IFrame(src='https://demo.bokeh.org/crossfilter', width=700, height=600)", "_____no_output_____" ] ], [ [ "## Plotly\n\n* Plotly is another very simple to use interactive plot library.\n* You can find more detail in [https://plot.ly/python/](https://plot.ly/python/).\n* Next, some examples from plotly gallery are presented.", "_____no_output_____" ] ], [ [ "IFrame(src='https://plot.ly/python/', width=700, height=600)", "_____no_output_____" ] ], [ [ "## I a nutshell\n\n* There are many very powerful plot libraries available for Python.\n* The chance that someone wrote a template for your plotting task is very high.\n* Just look in the galleries and you will find a great starting point.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7cfaa01ba8f2055c6c30972e4ea89ffacd35da9
21,190
ipynb
Jupyter Notebook
labs/01_python/laboratorio_03.ipynb
andresmontecinos12/mat281_portfolio
38036b441349f68f3d1224cf592864d84d26490b
[ "MIT" ]
1
2020-10-01T16:06:54.000Z
2020-10-01T16:06:54.000Z
labs/01_python/laboratorio_03.ipynb
andresmontecinos12/mat281_portfolio
38036b441349f68f3d1224cf592864d84d26490b
[ "MIT" ]
null
null
null
labs/01_python/laboratorio_03.ipynb
andresmontecinos12/mat281_portfolio
38036b441349f68f3d1224cf592864d84d26490b
[ "MIT" ]
null
null
null
23.808989
212
0.402832
[ [ [ "<img src=\"images/usm.jpg\" width=\"480\" height=\"240\" align=\"left\"/>", "_____no_output_____" ], [ "# MAT281 - Laboratorio N°03\n\n## Objetivos de la clase\n\n* Reforzar los conceptos básicos de pandas.", "_____no_output_____" ], [ "## Contenidos\n\n* [Problema 01](#p1)\n", "_____no_output_____" ], [ "## Problema 01\n\n\n<img src=\"https://imagenes.universia.net/gc/net/images/practicas-empleo/p/pr/pro/profesiones-con-el-avance-de-la-tecnologia.jpg\" width=\"480\" height=\"360\" align=\"center\"/>\n\n\nEL conjunto de datos se denomina `ocupation.csv`, el cual contiene información tal como: edad ,sexo, profesión, etc.\n\nLo primero es cargar el conjunto de datos y ver las primeras filas que lo componen:", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport os", "_____no_output_____" ], [ "# cargar datos\ndf = pd.read_csv(os.path.join(\"data\",\"ocupation.csv\"), sep=\"|\").set_index('user_id')\ndf.head()\n", "_____no_output_____" ] ], [ [ "El objetivo es tratar de obtener la mayor información posible de este conjunto de datos. Para cumplir este objetivo debe resolver las siguientes problemáticas:", "_____no_output_____" ], [ "1. ¿Cuál es el número de observaciones en el conjunto de datos?", "_____no_output_____" ] ], [ [ "\nprint('El número de observaciones es de',df.shape[0],'elementos')", "El número de observaciones es de 943 elementos\n" ] ], [ [ "2. ¿Cuál es el número de columnas en el conjunto de datos?", "_____no_output_____" ] ], [ [ "print('El número de columnas es',df.shape[1])", "El número de columnas es 4\n" ] ], [ [ "3. Imprime el nombre de todas las columnas", "_____no_output_____" ] ], [ [ "list(df)", "_____no_output_____" ] ], [ [ "4. Imprima el índice del dataframe", "_____no_output_____" ] ], [ [ "print(\"index:\")\nprint(df.index)", "index:\nInt64Index([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n ...\n 934, 935, 936, 937, 938, 939, 940, 941, 942, 943],\n dtype='int64', name='user_id', length=943)\n" ] ], [ [ "5. ¿Cuál es el tipo de datos de cada columna?", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "6. Resumir el conjunto de datos", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "7. Resume conjunto de datos con todas las columnas", "_____no_output_____" ] ], [ [ "df.describe(include='all')", "_____no_output_____" ] ], [ [ "8. Imprimir solo la columna de **occupation**.", "_____no_output_____" ] ], [ [ "df['occupation'].head()", "_____no_output_____" ], [ "print(df['occupation'])", "user_id\n1 technician\n2 other\n3 writer\n4 technician\n5 other\n ... \n939 student\n940 administrator\n941 student\n942 librarian\n943 student\nName: occupation, Length: 943, dtype: object\n" ] ], [ [ "9. ¿Cuántas ocupaciones diferentes hay en este conjunto de datos?", "_____no_output_____" ] ], [ [ "a=df['occupation'].unique() #Ocupación\n\nprint('En total hay',len(a),'categorias de ocupaciones')\n", "En total hay 21 categorias de ocupaciones\n" ], [ "list(a)", "_____no_output_____" ] ], [ [ "10. ¿Cuál es la ocupación más frecuente?", "_____no_output_____" ] ], [ [ "a=df['occupation'].unique() #Ocupación\n\nfrec=pd.Series()\nfor ocupa in a:\n df_aux = df.loc[lambda x: x['occupation'] == ocupa]\n #print(df_aux)\n frec[ocupa] = len(df_aux)\n\nfrec\n", "<ipython-input-438-901d77bb1c0e>:3: DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\n frec=pd.Series()\n" ], [ "# dataframe with list\ndf_list1 = pd.DataFrame([], columns = ['ocupaciones'])\ndf_list1['ocupaciones']=frec\nfrec_mas=frec[frec==frec.max()]\nfrec_mas\n\nprint('la ocupación más frecuete es:',frec_mas)", "la ocupación más frecuete es: student 196\ndtype: int64\n" ] ], [ [ "11. ¿Cuál es la edad media de los usuarios?", "_____no_output_____" ] ], [ [ "import math\n\n\nprint('La edad media de los usuarios es :',math.floor(df['age'].mean()))", "La edad mediade los usuarios es : 34\n" ] ], [ [ "12. ¿Cuál es la edad con menos ocurrencia?", "_____no_output_____" ] ], [ [ "b=df['age'].unique()\nfrec_edad=pd.Series()\n#nba_position_duration = pd.Series()\nfor edad in b:\n df_aux = df.loc[lambda x: x['age'] == edad]\n edad_str=str(edad)\n frec_edad[edad_str] = len(df_aux)", "<ipython-input-441-34841642a62d>:2: DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\n frec_edad=pd.Series()\n" ], [ "# dataframe with list\ndf_list = pd.DataFrame([], columns = [\"edad\"])\ndf_list['edad']=frec_edad\nedad_menos=frec_edad[frec_edad==frec_edad.min()]", "_____no_output_____" ], [ "print('Las edades menos concurridas son: 7,66,11,10 y 73 años')\nedad_menos", "Las edades menos concurridas son: 7,66,11,10 y 73 años\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7cfaf26cdf2a33969b9aa3f91679c5f614e1b65
201,720
ipynb
Jupyter Notebook
_notebooks/2020-06-29-Visualising-text-data-with-unsupervised-learning.ipynb
theo-r/datablog
88c510c48b5bbbe4a4a6a9faeb417c6683c38105
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-06-29-Visualising-text-data-with-unsupervised-learning.ipynb
theo-r/datablog
88c510c48b5bbbe4a4a6a9faeb417c6683c38105
[ "Apache-2.0" ]
3
2021-03-30T11:17:31.000Z
2022-02-26T08:46:28.000Z
_notebooks/2020-06-29-Visualising-text-data-with-unsupervised-learning.ipynb
theo-r/datablog
88c510c48b5bbbe4a4a6a9faeb417c6683c38105
[ "Apache-2.0" ]
null
null
null
58.300578
30,312
0.489743
[ [ [ "# Visualising text data with unsupervised learning\n- branch: master\n- badges: false\n- comments: true\n- author: Theo Rutter\n- image: images/plot.png\n- categories: [python, tfidf, pca, clustering, plotly, nlp]", "_____no_output_____" ] ], [ [ "# hide\n!pip install reed", "Requirement already satisfied: reed in /usr/local/lib/python3.6/dist-packages (0.0.3)\n" ], [ "# hide\nimport re\nimport json\nfrom html.parser import HTMLParser\n\nimport numpy as np", "_____no_output_____" ], [ "# hide\nclass MyHTMLParser(HTMLParser):\n\n def __init__(self):\n self.string = ''\n super().__init__()\n\n def handle_data(self, data):\n self.string = self.string + ' ' + data\n return (data)\n\n def return_data(self):\n return self.string.strip().replace(' ', ' ')", "_____no_output_____" ], [ "# hide\nfrom googleapiclient.discovery import build\nimport io, os\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom google.colab import auth\n\nauth.authenticate_user()\n\ndrive_service = build('drive', 'v3')\nresults = drive_service.files().list(\n q=\"name = 'creds.json'\", fields=\"files(id)\").execute()\ncreds = results.get('files', [])\nresults = drive_service.files().list(\n q=\"name = 'job_data.csv'\", fields=\"files(id)\").execute()\ndata = results.get('files', [])\n\nfilename = \"/content/.reed/creds.json\"\nos.makedirs(os.path.dirname(filename), exist_ok=True)\n\nrequest = drive_service.files().get_media(fileId=creds[0]['id'])\nfh = io.FileIO(filename, 'wb')\ndownloader = MediaIoBaseDownload(fh, request)\ndone = False\nwhile done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))\nos.chmod(filename, 600)\n\nfilename = \"/content/data/job_data.csv\"\nos.makedirs(os.path.dirname(filename), exist_ok=True)\n\nrequest = drive_service.files().get_media(fileId=data[0]['id'])\nfh = io.FileIO(filename, 'wb')\ndownloader = MediaIoBaseDownload(fh, request)\ndone = False\nwhile done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))\nos.chmod(filename, 600)", "Download 100%.\nDownload 100%.\n" ], [ "# hide\nfilename = \"/content/.reed/creds.json\"\nwith open(filename, 'r') as f:\n creds = json.load(f)\n \napi_key=creds['API_KEY']", "_____no_output_____" ] ], [ [ "## Intro\n\nVisualisation is an essential part of working with data. Whether we are delivering regular reports to decision-makers, presenting the results of a regression model to a room of stakeholders, or creating a real-time dashboard for the wider business, we are using data to tell a story, and visuals bring these stories to life.\n\nIn this post we are going to explore a number of ways to visualise text data in the context of analysing job posts for three data roles: data scientists, data engineers and data analysts. These roles have a lot in common but are fundamentally different; will the content of the job descriptions for these roles reflect the differences between them? And how can we visualise these differences?\n\n## Getting the data\n\nFirst we need to collect some job descriptions to analyse. There are plenty of job search APIs out there to choose from and we also have the option to scrape directly from job websites. I ended up using the Reed developer API because of the simple process of signing up for credentials. I wrote a python wrapper for the API which I will be using to extract a collection of relevant job posts; you can find it [here](https://pypi.org/project/reed/).\n\nIt's a simple process to fetch the jobs matching a keywords search query. The response is a list of json objects which in this case are identical to python dictionaries so we can easily place the data in a pandas dataframe. ", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom reed import ReedClient\n\nclient = ReedClient(api_key=api_key)\nsearch_params = {\n 'keywords': 'data+scientist|data+engineer|data+analyst', \n 'resultsToTake': 600\n}\nresponse = client.search(**search_params)\ndf = pd.DataFrame(response)\ndf.shape", "_____no_output_____" ] ], [ [ "Our search returned 524 jobs in total; let's clean up the job titles and see what we have.", "_____no_output_____" ] ], [ [ "def clean_title(title_str): \n return title_str.lower().strip().replace(' ', ' ')\n\ndf['jobTitle'] = [clean_title(x) for x in df.jobTitle]\ndf.groupby('jobTitle').size().sort_values(ascending=False).head(10)", "_____no_output_____" ] ], [ [ "We're going to remove all the posts which don't have one of the three most common titles; what we're left with should be broadly similar in terms of representing mid-level roles within each category. ", "_____no_output_____" ] ], [ [ "accepted_titles = ['data scientist', 'data engineer', 'data analyst']\ndf = df[[x in accepted_titles for x in df.jobTitle]].set_index('jobId')", "_____no_output_____" ] ], [ [ "The job search API gives a truncated version of the job descriptions so if we want the complete text we'll have to take each individual job id and pull down the details using the job details function one-by-one. To make this process easier I created a dictionary with the three job titles as keys and the items consisting of the list of ids corresponding to each title.", "_____no_output_____" ] ], [ [ "groups = df.groupby('jobTitle').groups\ngroups.keys()", "_____no_output_____" ] ], [ [ "Now we can loop over the dictionary pulling down the description from each job in turn. There's another complication in that the job descriptions are returned as html documents. We're only interested in the text data so we're going to have the parse the html to extract the information we want. We can wrap this process inside a function which we call for each id in our dictionary. ", "_____no_output_____" ] ], [ [ "def get_job_desc(job_type, job_id, client, html_parser):\n desc_html = client.job_details(job_id)['jobDescription']\n parser.feed(desc_html)\n desc_str = parser.return_data()\n # reset parser string\n parser.string = ''\n return dict(job=job_type, job_id=job_id, desc=desc_str)\n \nparser = MyHTMLParser()\njob_descriptions = []\nfor title in groups:\n for id in groups[title]:\n job_desc_dict = get_job_desc(title, id, client, parser)\n job_descriptions.append(job_desc_dict)\n \ndf = pd.DataFrame(job_descriptions)\ndf.head()", "_____no_output_____" ] ], [ [ "In order to visualise the content of the job descriptions we need a numerical representation of the text. One way to do this is with a bag-of-words approach, which consists of separating the text in our documents into tokens and counting the appearances of each token in each document. Before we do this there are some errands we need to run first. \n\nSklearn's Countvectorizer class performs the tokenization step for us, but we need to be careful when using it. By default it splits the text on punctuation marks and discards tokens less than two characters long. For this reason, the word 'Ph.D' which is frequently used would be split into the two meaningless tokens 'Ph' and 'D', for example. The important term 'R' denoting the programming language would also be discarded. We can remedy these issues and others similar using pandas string methods. At the same time we'll remove any rows with duplicate job descriptions.", "_____no_output_____" ] ], [ [ "# hide\nfilename = \"/content/data/job_data.csv\"\ndf = pd.read_csv(filename, index_col=0)", "_____no_output_____" ], [ "df = df[~df.duplicated('desc')]\n\ncicd_pat = \"([Cc][Ii]/[Cc][Dd])\"\nphd_pat = \"[Pp][Hh].?[Dd]\"\nr_pat = '(\\sR\\W)'\n\ndf['desc'] = (df.desc.str.replace(',000', 'k')\n .str.replace('\\xa0', ' ')\n .str.replace(phd_pat, 'PHD')\n .str.replace(r_pat, ' RStudio ')\n .str.replace(cicd_pat, ' CICD')\n .str.replace('Modis', ''))", "_____no_output_____" ] ], [ [ "Now we're finally at a stage where our data is ready to be analysed. After whittling the results down to mid-level roles we were left with 133 unique jobs with a roughly even split between the three roles. ", "_____no_output_____" ], [ "When we fit the CountVectorizer transform to an array containing each job description what we get is a document-term matrix: the rows are the job descriptions and the columns are all the words which were found in the collection of documents. The contents of the matrix are the word frequencies. ", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\ncount_vectorizer = CountVectorizer(stop_words='english')\ntext_data = np.array(df['desc'])\ncount_matrix = count_vectorizer.fit_transform(text_data)\ncount_matrix_df = pd.DataFrame(count_matrix.toarray(), columns=count_vectorizer.get_feature_names())\ncount_matrix_df", "_____no_output_____" ] ], [ [ "How can we use this implementation to visualise the three different roles? A simple option is to look at the most frequent words across the three classes: if we group the documents into the three roles, sum the word frequencies and sort them in descending order we'll see which words appear the most in each.", "_____no_output_____" ] ], [ [ "# hide_input\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\n\ncount_matrix_df['job'] = df.job\ncounts_by_job = (count_matrix_df.groupby('job')\n .sum()\n .transpose()\n .reset_index()\n .rename(columns={'index': 'word'}))\n\nda_top = counts_by_job[['word', 'data analyst']].sort_values('data analyst', ascending=False)[:20]\nde_top = counts_by_job[['word', 'data engineer']].sort_values('data engineer', ascending=False)[:20]\nds_top = counts_by_job[['word', 'data scientist']].sort_values('data scientist', ascending=False)[:20]\n\ncolours_dict = {'data analyst': '#636efa', 'data scientist': '#00cc96', 'data engineer': '#EF553B'}\n\nfig = make_subplots(rows=1, \n cols=3, \n subplot_titles=(\"DS top words\", \"DE top words\", \"DA top words\"),\n x_title='frequency'\n )\n\nfig.add_trace(go.Bar(\n y=ds_top['word'],\n x=ds_top['data scientist'],\n orientation='h',\n name='ds',\n marker=dict(color=colours_dict['data scientist'])),\n row=1, col=1\n)\n\nfig.add_trace(go.Bar(\n y=de_top['word'],\n x=de_top['data engineer'],\n orientation='h',\n name='de',\n marker=dict(color=colours_dict['data engineer'])),\n row=1, col=2\n)\n\nfig.add_trace(go.Bar(\n y=da_top['word'],\n x=da_top['data analyst'],\n orientation='h',\n name='da',\n marker=dict(color=colours_dict['data analyst'])),\n row=1, col=3\n)\n\nfig.update_layout(\n yaxis1=dict(autorange=\"reversed\"),\n yaxis2=dict(autorange=\"reversed\"),\n yaxis3=dict(autorange=\"reversed\"),\n height=600, width=1000,\n showlegend=False\n)\nfig.show()", "_____no_output_____" ] ], [ [ "We can spot a few themes such as the emphasis on machine learning in DS job descriptions while SQL and reporting feature prominently in DA posts. The plot corresponding to Data engineer roles is not as insightful. The appearance of Python, SQL and Azure are promising, but no sign of ETL pipelines and a lower prevalance of cloud-based systems than we'd expect. And why does scientist turn up so much? Across the three plots we can also see another problem with this simple word frequency approach: the most insightful terms are diluted by a saturation of words which are common across the three roles and thus essentially useless, such as 'data', 'experience', 'role' and 'team'. \n\nWe need a metric which highly ranks frequent words but suppresses words which appear in lots of the documents in our corpus. Luckily for us a metric exists which does exactly as we require. TFIDF or term-frequency inverse-document frequency takes the term frequency of a word in a document and multiplies it by the inverse document freqency of that word, essentially damping those words which appear across a large proportion of the documents. Again sklearn has our back and can perform this process for us.", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import TfidfVectorizer\nmax_df = 0.6\nmin_df = 2\nvectorizer = TfidfVectorizer(stop_words='english', max_df=max_df, min_df=min_df)\ntext = np.array(df['desc'])\ntfidf_matrix = vectorizer.fit_transform(text)\ntfidf = pd.DataFrame(tfidf_matrix.toarray(), columns=vectorizer.get_feature_names())\ntfidf", "_____no_output_____" ] ], [ [ "As before the TfidfVectorizer returns a term-document matrix, but instead of consisting of word frequencies we have tfidf values for each term-document pair. The parameters 'min_df' and 'max_df' are constraints on the document frequency of the words in our vocabulary. We have used a 'min_df' of 2, so any words that do not appear in at least two documents are thrown out. 'Max_df' is a float which corresponds to a proportion of the documents, so here if any words appear in more than 60 percent of the documents they too are discarded. In the same way as before, we can sum the tfidf values within the three classes and plot the highest aggregate values to hopefully extract the most important terms to characterise the three job types.", "_____no_output_____" ] ], [ [ "# hide_input\ntfidf_with_job = tfidf.copy()\ntfidf_with_job['job_type'] = df.job\ncounts_by_job = (tfidf_with_job.groupby('job_type')\n .sum()\n .transpose()\n .reset_index()\n .rename(columns={'index': 'word'}))\n\nda_top = counts_by_job[['word', 'data analyst']].sort_values('data analyst', ascending=False)[:20]\nde_top = counts_by_job[['word', 'data engineer']].sort_values('data engineer', ascending=False)[:20]\nds_top = counts_by_job[['word', 'data scientist']].sort_values('data scientist', ascending=False)[:20]\n\ncolours_dict = {'data analyst': '#636efa', 'data scientist': '#00cc96', 'data engineer': '#EF553B'}\n\nfig = make_subplots(rows=1, \n cols=3, \n subplot_titles=(\"DS top words\", \"DE top words\", \"DA top words\"),\n x_title='tfidf score'\n )\n\nfig.add_trace(go.Bar(\n y=ds_top['word'],\n x=ds_top['data scientist'],\n orientation='h',\n marker=dict(color=colours_dict['data scientist'])),\n row=1, col=1\n)\n\nfig.add_trace(go.Bar(\n y=de_top['word'],\n x=de_top['data engineer'],\n orientation='h',\n marker=dict(color=colours_dict['data engineer'])),\n row=1, col=2\n)\n\nfig.add_trace(go.Bar(\n y=da_top['word'],\n x=da_top['data analyst'],\n orientation='h',\n marker=dict(color=colours_dict['data analyst'])),\n row=1, col=3\n)\n\nfig.update_layout(\n yaxis1=dict(autorange=\"reversed\"),\n yaxis2=dict(autorange=\"reversed\"),\n yaxis3=dict(autorange=\"reversed\"),\n height=600, width=1000,\n showlegend=False\n)\nfig.show()", "_____no_output_____" ] ], [ [ "This is a big improvement over our first attempt. There are far fewer generic terms because of the introduction of the inverse-document weighting and the document frequency constraints. The data engineering terms are also closer to our expectations than they were before. \n\nWhat are the key insights we can take from these plots? For data scientists machine learning is the most distinguishing feature. R and Python both appear with the latter ranked slightly higher, at least in this sample. We can also see that doctorates are important in data scientist posts but not in the other two roles.\n\nOur picture of data engineering posts is clearer from this version of the plot. Cloud technologies, ETL pipelines and databases feature heavily and python seems to be the scripting language of choice within this sample of posts. The impression given by this visual is that tools and technologies are to data engineering what algorithms and models are to data science.\n\nFinally, data analyst positions appear to be characterised more by aptitudes and skills than by specific technologies or technical backgrounds. SQL and excel feature heavily but apart from these tools the key terms seem to describe an general analytical mindset and an ability to support other business functions through reporting and analysis.", "_____no_output_____" ], [ "## Plotting job descriptions via dimensionality reduction\n\nSo far we've used the TF-IDF vectors to gain insight into the key words that describe the different data roles. We haven't, however, used any unsupervised learning as we promised at the beginning. Let's sort that out now. \n\nUsing TF-IDF we have created a 2038-dimensional representation of the job descriptions in our sample. As it is a bit of a struggle to visualise anything in more than three dimensions, it is common practice to manipulate high-dimensional data into a two or three dimensional space so that it can be visualised and structural patterns in the data can be more easily found. In this case we are going to use Principal Component Analysis, or PCA for short, to project our data onto the two dimensions which retain the highest variability. This will allow us to create a scatterplot of the data.", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA\npca = PCA(n_components=2)\nX = pca.fit_transform(tfidf)\nX_df = pd.DataFrame(X)\nX_df['job'] = df.job.values\nX_df = X_df.rename(columns={0: 'pc_0', 1: 'pc_1'})\nX_df.sample(5)", "_____no_output_____" ], [ "pca.explained_variance_", "_____no_output_____" ] ], [ [ "Using only a two dimensional representation of the TF-IDF vectors we retain just over half of the variance in our data. The following is a scatterplot of the first two principle components; each point is a job description.", "_____no_output_____" ] ], [ [ "import plotly.express as px\nfig = px.scatter(X_df, x=\"pc_0\", y=\"pc_1\", color=\"job\", hover_data=[\"pc_0\", \"pc_1\", X_df.index])\nfig.update_layout(title='first two principle components of tfidf matrix')\nfig.show()", "_____no_output_____" ] ], [ [ "Remarkably, the three different job titles are almost perfectly separated into clusters in this two dimensional representation. This is significant because the PCA algorithm has no idea that our dataset contains three distinct classes, but by simply projecting the data onto the coordinate axes retaining the most variability the algorithm has managed to almost perfectly separate them. We can go one step further and demonstrate that the three classes are well-separated by applying k-means clustering first two principle components. The result is the following.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=3)\n# X is the data matrix of the first two PC's\nkmeans.fit(X)\ny_kmeans = kmeans.predict(X)\ncenters = kmeans.cluster_centers_", "_____no_output_____" ], [ "# hide_input\nfig = make_subplots(rows=2, cols=1, shared_xaxes=False, vertical_spacing=0.15,\n subplot_titles=(\"actual job titles\",\"k means clustered job titles\"))\n\nfig1 = px.scatter(X_df, x=\"pc_0\", y=\"pc_1\", color=\"job\", hover_data=[\"pc_0\", \"pc_1\", X_df.index])\ntrace1, trace2, trace3 = fig1['data'][0], fig1['data'][1], fig1['data'][2]\ntrace1['name'] = 'data analyst'\ntrace2['name'] = 'data engineer'\ntrace3['name'] = 'data scientist'\n\nplotly_colours = px.colors.qualitative.Plotly\nc = [plotly_colours[x] for x in y_kmeans]\n\nfig2 = px.scatter(X_df, x='pc_0', y='pc_1', color=c, hover_data=['job', 'pc_0', 'pc_1', X_df.index])\ntrace4, trace5, trace6 = fig2['data'][0], fig2['data'][1], fig2['data'][2]\ntrace4['name'] = 'data analyst'\ntrace5['name'] = 'data engineer'\ntrace6['name'] = 'data scientist'\n\ntemp_str = '<br>job=%{customdata[0]}' + \\\n '<br>pc_0=%{customdata[1]}' + \\\n '<br>pc_1=%{customdata[2]}' + \\\n '<br>index=%{customdata[3]}' + \\\n '<extra></extra>'\n\ntrace4['hovertemplate'] = temp_str\ntrace5['hovertemplate'] = temp_str\ntrace6['hovertemplate'] = temp_str\n\ntrace4['legendgroup'] = 'job=data analyst'\ntrace4['showlegend'] = False\ntrace5['legendgroup'] = 'job=data engineer'\ntrace5['showlegend'] = False\ntrace6['legendgroup'] = 'job=data scientist'\ntrace6['showlegend'] = False\n\nfig.add_trace(trace1, row=1, col=1)\nfig.add_trace(trace2, row=1, col=1)\nfig.add_trace(trace3, row=1, col=1)\nfig.add_trace(trace4, row=2, col=1)\nfig.add_trace(trace5, row=2, col=1)\nfig.add_trace(trace6, row=2, col=1)\n\nfig.add_trace(go.Scatter(\n x=centers[:, 0],\n y=centers[:, 1],\n name='cluster centers',\n mode='markers',\n marker=dict(size=[15, 15, 15],\n color=[2, 2, 2],\n opacity=0.7)),\n row=2, col=1\n)\n\n# fig['data'][6]['showlegend'] = False\n\nfig.update_layout(height=800, legend=dict(x=1, y=0.5))\nfig.layout['xaxis']['title'] = 'pc_0'\nfig.layout['xaxis2']['title'] = 'pc_0'\nfig.layout['yaxis']['title'] = 'pc_1'\nfig.layout['yaxis2']['title'] = 'pc_1'\nfig.show()", "_____no_output_____" ] ], [ [ "Let's recap what we've achieved here. We started with a collection of raw text documents, each one a description of a job from one of three different titles. Using term-frequency inverse-document-freqency we converted each description into a fixed length vector of TF-IDF values. Then using PCA we projected our new 2038-dimensional data matrix onto the two coordinate axes of highest variability so we could produce a scatterplot of the data. The result was a clear visual separation into three clusters corresponding to the three job titles, which we have now shown are nearly perfectly recreated using a simple clustering algorithm.\n\nThe cool part about this is not that we have accurately classified each TF-IDF vector with its job title. There are lots of supervised approaches that would likely be more effective if that were our goal. No, the interesting part is thinking about how these simple unsupervised methods can be applied to problems where we *don't* have access to labelled data, as we did here. For example, given an unlabelled text dataset we could take the same steps, compute the TF-IDF values, plot the first two principle components and look for clusters. Then by comparing the most important words in each cluster via the TF-IDF values we might uncover patterns in the data that we weren't aware of. What clusters might we find if we applied this process to restaurant reviews? Or house listings?\n\nHopefully some of these ideas for visualising and investigating text data have piqued your interest and you've learned some of the differences between data science, engineering and analysis roles along the way. For further reading, Jake VanderPlas has released great content for [free](https://jakevdp.github.io/PythonDataScienceHandbook/) covering TF-IDF statistics, PCA and clustering among other topics.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e7cfb5772966f7756869b73fe81104cefd94530d
3,116
ipynb
Jupyter Notebook
hpc_ai/ai_science_climate/English/python/jupyter_notebook/Tropical_Cyclone_Intensity_Estimation/Downloading_Images.ipynb
UCL-RITS/gpubootcamp
992cbef4739c0853803bbfa5787a2d75d6ab327f
[ "Apache-2.0" ]
null
null
null
hpc_ai/ai_science_climate/English/python/jupyter_notebook/Tropical_Cyclone_Intensity_Estimation/Downloading_Images.ipynb
UCL-RITS/gpubootcamp
992cbef4739c0853803bbfa5787a2d75d6ab327f
[ "Apache-2.0" ]
null
null
null
hpc_ai/ai_science_climate/English/python/jupyter_notebook/Tropical_Cyclone_Intensity_Estimation/Downloading_Images.ipynb
UCL-RITS/gpubootcamp
992cbef4739c0853803bbfa5787a2d75d6ab327f
[ "Apache-2.0" ]
null
null
null
37.095238
228
0.57285
[ [ [ "## Downloading Images in Bulk from U.S. Naval Archives\n\nThe Image Dataset can be found here [Here](https://www.nrlmry.navy.mil/tcdat/) \n\nThe Text Dataset can be found and downloaded from [Here]( https://www.nhc.noaa.gov/data/#hurdat)", "_____no_output_____" ], [ "```python3\n#Libraries to Scrap Text Data and Recurse Through the Directory and Fetch only the Required Data\nfrom bs4 import BeautifulSoup\nimport requests\nimport subprocess\nlimit = 16 # 16 Concurrent parallel Downloads to Speed them up\n\npage_link = 'https://www.nrlmry.navy.mil/tcdat/'\nparallel=0\nparallel_links=[]\ntextContent = []\nfor i in range(20): # Loop through tc01 to tc19\n new_page_link = page_link+'tc'+str(int(i/10))+str(int(i%10))+'/'\n print(new_page_link)\n links=[]\n links.append(new_page_link+'ATL/') # Get Atlantic Cyclones \n for j in links: # Loop through all Cyclones and Download Only IR images in 1Km range\n print(j)\n page_response = requests.get(j,timeout=10)\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n for k in range(5,len(page_content.find_all(\"a\"))-1): \n text_output = page_content.find_all(\"a\")[k].text\n parallel_links.append(j+text_output+'ir/geo/1km/') \n if(int(len(parallel_links)==limit)): # Get Batch of 16 and Put them to Download in Parallel \n p1 = subprocess.Popen([\"echo\"]+parallel_links,stdout=subprocess.PIPE)\n p2 = subprocess.Popen([\"xargs\" ,\"-n\",\"1\",\"-P\",str(int(len(parallel_links))),'wget','-r','-np','-e','robots=off','-R','index.html*'],stdin=p1.stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.\n output,error = p2.communicate()\n parallel_links=[]\n```", "_____no_output_____" ], [ "### Warning !! \n\n#### The Cell Type has been changed to Markdown so that you don't accidentally run the code.\n\n#### It is not recommended to run the code until you fully understand the code and it's consequences , It can download huge amount of data (~ 10's of GB's ) thereby filling your computer's memory", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
e7cfc1e0527a46d626d286bb91705c6a4aa7d519
109,322
ipynb
Jupyter Notebook
blocking/learning_blocking_dynamics.ipynb
geflaspohler/deep-OTD
0daec276669776952b5142149007175b8a3c4d87
[ "MIT" ]
null
null
null
blocking/learning_blocking_dynamics.ipynb
geflaspohler/deep-OTD
0daec276669776952b5142149007175b8a3c4d87
[ "MIT" ]
null
null
null
blocking/learning_blocking_dynamics.ipynb
geflaspohler/deep-OTD
0daec276669776952b5142149007175b8a3c4d87
[ "MIT" ]
null
null
null
145.568575
21,712
0.863394
[ [ [ "# Data proessing imports\nimport h5py\nimport numpy as np\nimport os\nimport pdb\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \nimport time\nimport pickle", "_____no_output_____" ], [ "# Torch Imports\nimport torch\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom skimage import io, transform\nfrom torch.utils.data import Dataset, DataLoader", "_____no_output_____" ], [ "# Set up output directories \nmodel_dir = '/Users/quetzal/Documents/code/deep-OTD/blocking/blocking_models'\ndata_dir = '/Users/quetzal/Documents/code/blocking_project'\nfigure_dir = '/Users/quetzal/Documents/code/deep-OTD/blocking/blocking_figures'\n\n\nif not os.path.exists(model_dir):\n os.mkdir(model_dir)\nif not os.path.exists(data_dir):\n os.mkdir(data_dir) \nif not os.path.exists(figure_dir):\n os.mkdir(figure_dir) ", "_____no_output_____" ], [ "# Read in all 40 days of experimental data into a single 4D tensor\nexp_days = 40\n\n# Get the metadata (e.g., lat, long) from a single example data file\ntemp_day = 1\ntemp_filename = 'Z_5day_day' + str(temp_day) + '_20190730.mat'\nmeta_data = h5py.File(os.path.join(data_dir, temp_filename), 'r')\ndata_shape = np.array(meta_data['Zluck']).shape\nprint(\"Data shape:\", data_shape)\n\n# # Construct the 40 x 127 x 43 x 7079 data file, corresponding to day, lat,\n# # lon, trail_no values, of top-layer quasigeostrophic potential vorticity\nexp_data = np.zeros((exp_days, data_shape[0], data_shape[1], data_shape[2]));\nfor day in range(1, exp_days+1):\n filename = 'Z_5day_day' + str(day) + '_20190730.mat'\n print('Loading file for day ' + str(day), \"from\", filename)\n data = h5py.File(os.path.join(data_dir, filename), 'r');\n exp_data[day-1, :, :, :] = data['Zluck']; ", "Data shape: (7079, 43, 127)\nLoading file for day 1 from Z_5day_day1_20190730.mat\nLoading file for day 2 from Z_5day_day2_20190730.mat\nLoading file for day 3 from Z_5day_day3_20190730.mat\nLoading file for day 4 from Z_5day_day4_20190730.mat\nLoading file for day 5 from Z_5day_day5_20190730.mat\nLoading file for day 6 from Z_5day_day6_20190730.mat\nLoading file for day 7 from Z_5day_day7_20190730.mat\nLoading file for day 8 from Z_5day_day8_20190730.mat\nLoading file for day 9 from Z_5day_day9_20190730.mat\nLoading file for day 10 from Z_5day_day10_20190730.mat\nLoading file for day 11 from Z_5day_day11_20190730.mat\nLoading file for day 12 from Z_5day_day12_20190730.mat\nLoading file for day 13 from Z_5day_day13_20190730.mat\nLoading file for day 14 from Z_5day_day14_20190730.mat\nLoading file for day 15 from Z_5day_day15_20190730.mat\nLoading file for day 16 from Z_5day_day16_20190730.mat\nLoading file for day 17 from Z_5day_day17_20190730.mat\nLoading file for day 18 from Z_5day_day18_20190730.mat\nLoading file for day 19 from Z_5day_day19_20190730.mat\nLoading file for day 20 from Z_5day_day20_20190730.mat\nLoading file for day 21 from Z_5day_day21_20190730.mat\nLoading file for day 22 from Z_5day_day22_20190730.mat\nLoading file for day 23 from Z_5day_day23_20190730.mat\nLoading file for day 24 from Z_5day_day24_20190730.mat\nLoading file for day 25 from Z_5day_day25_20190730.mat\nLoading file for day 26 from Z_5day_day26_20190730.mat\nLoading file for day 27 from Z_5day_day27_20190730.mat\nLoading file for day 28 from Z_5day_day28_20190730.mat\nLoading file for day 29 from Z_5day_day29_20190730.mat\nLoading file for day 30 from Z_5day_day30_20190730.mat\nLoading file for day 31 from Z_5day_day31_20190730.mat\nLoading file for day 32 from Z_5day_day32_20190730.mat\nLoading file for day 33 from Z_5day_day33_20190730.mat\nLoading file for day 34 from Z_5day_day34_20190730.mat\nLoading file for day 35 from Z_5day_day35_20190730.mat\nLoading file for day 36 from Z_5day_day36_20190730.mat\nLoading file for day 37 from Z_5day_day37_20190730.mat\nLoading file for day 38 from Z_5day_day38_20190730.mat\nLoading file for day 39 from Z_5day_day39_20190730.mat\nLoading file for day 40 from Z_5day_day40_20190730.mat\n" ], [ "# Relevent constants\nNUM_DAYS = exp_data.shape[0]\nNUM_TRIALS = exp_data.shape[1]\nNUM_LAT = exp_data.shape[2]\nNUM_LON = exp_data.shape[3]\n\n# Find the mean and std of data for normalization\ndata_mean = np.mean(exp_data, axis=None)\ndata_std = np.std(exp_data, axis=None)\n\n# Truncate latitude and longitude data; convoluation layers behave badly with origonal 43 x 127 dimensions\nLAT_TRUNC = 40\nLON_TRUNC = 124\nlon = meta_data['lon0'][0:LON_TRUNC]\nlat = meta_data['lon0'][0:LAT_TRUNC]\nlon_cord, lat_cord = np.meshgrid(lon, lat); ", "_____no_output_____" ], [ "class BlockingDataset(Dataset):\n \"\"\" Custom dataset class for reading in the blocking data\n\n Args:\n data (np.array): the exp_data array, which should have be a 4D numpy tensor \n containing DAY x TRAIL x LAT x LON\n \"\"\"\n def __init__(self, data, DAY_BK=20, DAY_BF=0, DAY_AF=5, transform=None):\n self.DAY_BK = DAY_BK\n self.DAY_BF = DAY_BF\n self.DAY_AF = DAY_AF\n \n self.cur_state, self.next_state, self.block_status = self.split_data_labels_timeseries(data)\n \n self.transform = transform\n \n\n def __getitem__(self, index):\n cur = self.cur_state[index]\n nxt = self.next_state[index]\n dif = (self.next_state[index] - self.cur_state[index])\n bks = self.block_status[index]\n \n sample = {'cur_state': cur, 'next_state': nxt, 'diff': dif, 'bs': bks}\n\n # Apply relevent transformations\n if self.transform:\n sample = self.transform(sample)\n return sample\n \n def __len__(self):\n return self.cur_state.shape[0]\n \n def split_data_labels_timeseries(self, data):\n \"\"\" Splits data into \"current timestep\" and \"next timestep\" training pairs, where\n every other sample is added to either the current or next array\n\n Args:\n data (np.array): the exp_data array, which should have be a 4D numpy tensor \n containing DAY x TRAIL x LAT x LON\n \"\"\" \n duration = data.shape[0]\n num_trials = data.shape[1]\n \n # Each current and next array will be half the length of the origonal data array\n # This also could be implemented so that every sample is used as a current and \n # and a next training example\n cur_data = np.zeros((duration // 2 * num_trials, data.shape[2], data.shape[3]))\n next_data = np.zeros((duration // 2 * num_trials, data.shape[2], data.shape[3]))\n block_status = np.zeros((duration // 2 * num_trials,))\n\n i = 0 \n for trial in range(num_trials):\n for day in range(0, duration, 2): # Increase days by 2\n cur_data[i, :, :] = data[day, trial, :, :]\n next_data[i, :, :] = data[day+1, trial, :, :]\n if day < self.DAY_BK - self.DAY_BF or day > self.DAY_BK + self.DAY_AF:\n block_status[i] = False\n else:\n block_status[i] = True\n i += 1\n\n return cur_data, next_data, block_status", "_____no_output_____" ], [ "# Define data transformations\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n cur_state, next_state, diff, bs = sample['cur_state'], sample['next_state'], sample['diff'], sample['bs']\n\n new_h, new_w = self.output_size\n new_h, new_w = int(new_h), int(new_w)\n\n cs = transform.resize(cur_state, (new_h, new_w))\n ns = transform.resize(next_state, (new_h, new_w))\n df = transform.resize(diff, (new_h, new_w))\n\n return {'cur_state': cs, 'next_state': ns, 'diff': df, 'bs': bs}\n \nclass AddDummyDim(object):\n \"\"\" Adds a dummy dimension, so that each example is a 3D tensor, with dimensions (1, *, *)\n For compatiablity with pytorch convoluation.\n \"\"\"\n def __init__(self):\n pass\n\n def __call__(self, sample):\n cur_state, next_state, diff, bs = sample['cur_state'], sample['next_state'], sample['diff'], sample['bs']\n \n cur_state = cur_state.reshape(1, cur_state.shape[0], cur_state.shape[1]) \n next_state = next_state.reshape(1, next_state.shape[0], next_state.shape[1]) \n diff = diff.reshape(1, diff.shape[0], diff.shape[1]) \n\n return {'cur_state': cur_state, 'next_state': next_state, 'diff': diff, 'bs': bs}\n \nclass Normalize(object):\n \"\"\"Normalize an QGPV field to zero mean, unit variance.\n\n Args:\n mean (float or double): dataset mean\n std (float or double): dataset standard deviation\n \"\"\"\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n cur_state, next_state, diff, bs = sample['cur_state'], sample['next_state'], sample['diff'], sample['bs']\n \n cur_state = (cur_state - self.mean) / self.std\n next_state = (next_state - self.mean) / self.std \n diff = next_state - cur_state\n\n return {'cur_state': cur_state, 'next_state': next_state, 'diff': diff, 'bs': bs}\n ", "_____no_output_____" ], [ "NUM_TRIALS = 500\n\n# Compose relevent data transformations: normalize -> rescale -> add dummy dim\ntrans = transforms.Compose([Normalize(data_mean, data_std), Rescale((40, 124)), AddDummyDim()])\n\n# Create training and testing datasets, each with a fraction of the origonal data \nblocking_dataset_train = BlockingDataset(data=exp_data[:, 0:NUM_TRIALS//2, :, :], transform=trans)\nblocking_dataset_test = BlockingDataset(data=exp_data[:, NUM_TRIALS//2:NUM_TRIALS//2 + NUM_TRIALS//2, :, :], transform=trans) # TODO modify to take the rest of the dataset", "_____no_output_____" ], [ "# Create training and testing dataloaders\nnum_workers = 0\nbatch_size = 5\ndataloader_train = torch.utils.data.DataLoader(blocking_dataset_train, shuffle=True, batch_size=batch_size, num_workers=num_workers)\ndataloader_test = torch.utils.data.DataLoader(blocking_dataset_test, shuffle=True, batch_size=batch_size, num_workers=num_workers)", "_____no_output_____" ], [ "from matplotlib import colors\n\ndef show_blocking_batch(sample_batched):\n \"\"\"Visualize a batch of data\n\n Args:\n sample_batched (torch.tensor): a batched data example, dimension BATCHSIZE x LAT X LON\n \"\"\" \n # Unpack and squeeze to remove dummy dimension \n cur_state_batch, next_state_batch, diff_batch = \\\n np.squeeze(sample_batched['cur_state']), \\\n np.squeeze(sample_batched['next_state']), \\\n np.squeeze(sample_batched['diff'])\n \n batch_size = len(cur_state_batch)\n \n # Find max and min diff values, for consitant visualization \n diff_min = np.min(diff_batch.numpy(), axis=None)\n diff_max = np.max(diff_batch.numpy(), axis=None)\n\n # Plotting \n for i in range(batch_size):\n ax1 = plt.subplot(3, batch_size, i+1)\n ax2 = plt.subplot(3, batch_size, i+1+batch_size)\n ax3 = plt.subplot(3, batch_size, i+1+2*batch_size)\n\n plt.tight_layout()\n \n ax1.axis('off')\n ax2.axis('off')\n ax3.axis('off')\n\n s = ax1.contourf(lon_cord, lat_cord, cur_state_batch[i, :, :], cmap=plt.cm.viridis)\n s = ax2.contourf(lon_cord, lat_cord, next_state_batch[i, :, :], cmap=plt.cm.viridis)\n s = ax3.contourf(lon_cord, lat_cord, diff_batch[i, :, :], cmap=plt.cm.bwr, vmin=diff_min, vmax=diff_max)\n\n plt.show()\n plt.close()", "_____no_output_____" ], [ "# Plot a few batches of data\nfor i_batch, sample_batched in enumerate(dataloader_train):\n print(i_batch, sample_batched['cur_state'].size(),\n sample_batched['next_state'].size())\n show_blocking_batch(sample_batched)\n \n # Only display the first few sampled bataches\n if i_batch > 2:\n break", "0 torch.Size([5, 1, 40, 124]) torch.Size([5, 1, 40, 124])\n" ], [ "# Define convoluational autoencoder module \nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# define the NN architecture\nclass ConvAutoencoder(nn.Module):\n def __init__(self):\n super(ConvAutoencoder, self).__init__()\n ## encoder layers ##\n # conv layer (depth from 1 --> 16), 3x3 kernels\n self.conv1 = nn.Conv2d(1, 16, 3, padding=1) \n # conv layer (depth from 16 --> 4), 3x3 kernels\n self.conv2 = nn.Conv2d(16, 4, 3, padding=1)\n # pooling layer to reduce x-y dims by two; kernel and stride of 2\n self.pool = nn.MaxPool2d(2, 2)\n \n ## decoder layers ##\n ## a kernel of 2 and a stride of 2 will increase the spatial dims by 2\n self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)\n self.t_conv2 = nn.ConvTranspose2d(16, 1, 2, stride=2)\n\n def forward(self, x):\n ## encode ##\n # add hidden layers with relu activation function\n # and maxpooling after\n x = F.relu(self.conv1(x))\n x = self.pool(x)\n # add second hidden layer\n x = F.relu(self.conv2(x))\n x = self.pool(x) # compressed representation\n\n ## decode ##\n # add transpose conv layers, with relu activation function\n x = F.relu(self.t_conv1(x))\n # output layer (could add sigmoid for scaling from 0 to 1)\n x = self.t_conv2(x)\n return x\n\n# initialize the NN\nmodel = ConvAutoencoder()\nprint(model)", "ConvAutoencoder(\n (conv1): Conv2d(1, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (conv2): Conv2d(16, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (t_conv1): ConvTranspose2d(4, 16, kernel_size=(2, 2), stride=(2, 2))\n (t_conv2): ConvTranspose2d(16, 1, kernel_size=(2, 2), stride=(2, 2))\n)\n" ], [ "# Specify obejctive and optimizer for CAE\n# specify loss function\ncriterion = nn.MSELoss()\n\n# specify loss function\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)", "_____no_output_____" ], [ "# number of epochs to train the model\nn_epochs = 30\n\nloss_progress = [0]*n_epochs\nval_progress = [0]*n_epochs\n\n\nfor epoch in range(1, n_epochs+1):\n # monitor training loss\n train_loss = 0.0\n val_loss = 0.0\n\n ###################\n # train the model #\n ###################\n for data in dataloader_train:\n # _ stands in for labels, here\n # no need to flatten images\n cur_state, next_state, diff = data['cur_state'], data['next_state'], data['diff']\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n outputs = model(cur_state.float())\n # calculate the loss\n loss = criterion(outputs, diff.float())\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n\n # update running training loss\n train_loss += loss.item()*cur_state.size(0) # Multiply by the batch size\n \n ########################################\n # run current model on validation data #\n ########################################\n with torch.no_grad():\n for data in dataloader_test: \n cur_state, next_state, diff = data['cur_state'], data['next_state'], data['diff'] \n # Validation outputs\n outputs = model(cur_state.float())\n loss = criterion(outputs, diff.float())\n val_loss += loss.item()*cur_state.size(0) # Multiply by the batch size\n\n # print avg training statistics \n train_loss = train_loss/len(dataloader_train)\n val_loss = val_loss/len(dataloader_test)\n\n loss_progress[epoch-1] = train_loss\n val_progress[epoch-1] = val_loss\n\n print('Epoch: {} \\tTraining Loss: {:.10f} \\tValidation Loss: {:.10f}'.format(\n epoch, \n train_loss,\n val_loss\n ))", "_____no_output_____" ], [ "# Plot validation and training loss with epoch\nplt.plot(loss_progress)\nplt.plot(val_progress)", "_____no_output_____" ], [ "# Save the model\nif not os.path.exists(model_dir):\n os.mkdir(model_dir)\nfh = open(os.path.join(model_dir, 'blocking_model_epochs' + str(n_epochs) + '.pickle'), 'wb')\npickle.dump([model, dataloader_train, dataloader_test], fh)", "_____no_output_____" ], [ "# Load the model\nLOAD = False # Flag for safety, don't override\nif LOAD:\n fh = open(os.path.join(model_dir, 'blocking_model_epochs' + str(n_epochs) + '.pickle'), 'rb')\n model_, dataloader_train_, dataloader_test_ = pickle.load(fh)", "_____no_output_____" ], [ "from matplotlib import colors\n\ndef show_predictions(sample, model):\n \"\"\"Visualize a batch of data\n\n Args:\n sample (torch.tensor): a batched data example, dimension BATCHSIZE x LAT X LON\n model (torch.nn): the convoluational autoencoder model\n \"\"\" \n cur_state_batch, next_state_batch, diff_batch = \\\n sample_batched['cur_state'], \\\n sample_batched['next_state'], \\\n sample_batched['diff']\n \n with torch.no_grad():\n outputs_batch = model(cur_state_batch.float())\n \n cur_state_batch = np.squeeze(cur_state_batch)\n next_state_batch = np.squeeze(next_state_batch)\n diff_batch = np.squeeze(diff_batch)\n outputs_batch = np.squeeze(outputs_batch)\n \n min_diff = np.min([diff_batch.numpy(), outputs_batch.numpy()])\n max_diff = np.max([diff_batch.numpy(), outputs_batch.numpy()])\n\n\n batch_size = len(cur_state_batch) \n for i in range(batch_size):\n ax1 = plt.subplot(4, batch_size, i+1)\n ax2 = plt.subplot(4, batch_size, i+1+batch_size)\n ax3 = plt.subplot(4, batch_size, i+1+2*batch_size)\n ax4 = plt.subplot(4, batch_size, i+1+3*batch_size)\n\n plt.tight_layout()\n \n ax1.axis('off')\n ax2.axis('off')\n ax3.axis('off')\n ax4.axis('off')\n \n s1 = ax1.contourf(lon_cord, lat_cord, cur_state_batch[i, :, :], cmap=plt.cm.viridis)\n s2 = ax2.contourf(lon_cord, lat_cord, diff_batch[i, :, :], cmap=plt.cm.bwr, vmin=min_diff, vmax=max_diff)\n s3 = ax3.contourf(lon_cord, lat_cord, outputs_batch[i, :, :], cmap=plt.cm.bwr, vmin=min_diff, vmax=max_diff)\n s4 = ax4.contourf(lon_cord, lat_cord, np.abs(diff_batch[i, :, :] - outputs_batch[i, :, :]), cmap=plt.cm.bwr, vmin=min_diff, vmax=max_diff)\n\n\n plt.show()\n plt.close()", "_____no_output_____" ], [ "########################################\n# run current model on validation data #\n########################################\nfor i, data in enumerate(dataloader_test): \n print(i, \"Cur State\", \"Diff\", \"Pred Diff\", \"Pred Error\")\n show_predictions(data, model)\n \n if i > 4:\n break", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7cfc9d913ab98288e76a1d3c0481ea1d3c60eee
134,546
ipynb
Jupyter Notebook
notebooks/run_pipeline.ipynb
lisc119/Soiling-Loss-Analysis
979424a9b355a0ad6196492d105d1905844cd238
[ "MIT" ]
null
null
null
notebooks/run_pipeline.ipynb
lisc119/Soiling-Loss-Analysis
979424a9b355a0ad6196492d105d1905844cd238
[ "MIT" ]
null
null
null
notebooks/run_pipeline.ipynb
lisc119/Soiling-Loss-Analysis
979424a9b355a0ad6196492d105d1905844cd238
[ "MIT" ]
null
null
null
232.778547
43,092
0.922933
[ [ [ "# Processing pipeline - PV solar cell output data", "_____no_output_____" ], [ "## Modify system path and import pipeline module", "_____no_output_____" ] ], [ [ "from sys import path as syspath\nsyspath.insert(1, '../src/')", "_____no_output_____" ], [ "import pandas as pd\nfrom scripts import (process_data, calculations,\n apply_filters, plotting)", "_____no_output_____" ] ], [ [ "## Define paths to data files (output, environmental, & panel capacity)", "_____no_output_____" ] ], [ [ "# PARK 1 #\nroot_path = \"../data/raw/New_data/\"\npower_filepath = root_path + \"SolarPark1_Jun_2019_Jun2020_string_production.csv\"\nenvironment_filepath = root_path + \"SolarPark1_Jun_2019_Jun2020_environmental.csv\"\ncapacity_filepath = root_path + \"Solarpark_1_string_capacity.csv\"", "_____no_output_____" ], [ "# # PARK 2 #\n# root_path = \"../data/raw/New_data/\"\n# power_filepath, \\\n# capacity_filepath, \\\n# environment_filepath = [root_path + i for i in ['SolarPark2_Oct_2019_Oct2020_string_production.csv',\n# 'Solarpark_2_CB_capacity.csv',\n# 'Solarpark2_Oct_2019_Oct2020_environmental.csv']]", "_____no_output_____" ] ], [ [ "#### Set output directory for files to be saved", "_____no_output_____" ] ], [ [ "working_dir = \"../data/temp/park1/\" # will save all interim files here\n# working_dir = \"../data/temp/park2/\" # will save all interim files here", "_____no_output_____" ] ], [ [ "## Run processing pipeline", "_____no_output_____" ], [ "##### Note that two dataframes are created in the process by the function.\n\n- **Dataframe 1**: the original output with NAs filtered (by irradiance) & environmental data optionally appended.\n \n *Set keep_env_info to True if you want to keep the environmental data in this dataframe.*\n ", "_____no_output_____" ], [ "- **Dataframe 2**: calculated performance ratios with NAs filtered", "_____no_output_____" ] ], [ [ "df, df_PR = process_data.preprocess_data(\n power_filepath,\n environment_filepath,\n capacity_filepath,\n yearly_degradation_rate = 0.005,\n keep_env_info = False,\n save_dir = working_dir\n)", "Data read successfully.\n\nColumns renamed.\nMerged DFs.\nAdjusting expected power by degradation rate of: 0.5%/year...\nCalculated performance ratio.\n(112584, 330)\n(57991, 330)\nCleaned dataframes.\n\n\nSaving dataframes...\n\tSaving ../data/temp/park1/preprocessing/df_output.csv...\n\tDone.\n\tSaving ../data/temp/park1/preprocessing/df_PR.csv...\n\tDone.\nDONE.\n" ] ], [ [ "## Drop worst strings based on UMAP clustering", "_____no_output_____" ], [ "### (Need to cluster first and save labels of worst strings to filter out)", "_____no_output_____" ] ], [ [ "cluster_filepath = '../data/processed/park1/park1_string_clusters_filtered.csv'", "_____no_output_____" ], [ "df_clusters = pd.read_csv(cluster_filepath, delimiter=',')\n\nbottom_cluster = df_clusters['bottom'].dropna().tolist()\n\ndf_PR = df_PR.drop(\n columns = [\n i+\"_(kW)\" for i in bottom_cluster] + [\n col for col in df_PR.columns.to_list(\n ) if col.startswith(\n \"ST 2.7\") or col.startswith(\n \"ST 2.5.4\")or col.startswith(\n \"ST 4.4.1\") or col.startswith(\n \"ST 4.5.2\")])", "_____no_output_____" ] ], [ [ "## Filter best time periods & strings", "_____no_output_____" ], [ "Best time window needs to first be determined by ...", "_____no_output_____" ] ], [ [ "#plotting.plot_EPI_daily_window(apply_filters.add_temporal(df_PR, drop_extra = False))\n#should do this AFTER filtering strings...?", "_____no_output_____" ], [ "#plotting.plot_EPI_dpm(apply_filters.add_temporal(df_PR, drop_extra = False))", "_____no_output_____" ], [ "#plotting.plot_EPI_sd_daily_windows(apply_filters.add_temporal(df_PR, drop_extra = False))", "_____no_output_____" ] ], [ [ "### Set decided time window", "_____no_output_____" ] ], [ [ "peak_hour_start = \"15\" #park 1 - \"15\", park 2 - \"16\"\npeak_hour_end = \"19\" #park 1 - \"19\", park 2 - \"18\"", "_____no_output_____" ] ], [ [ "### Run filtering steps", "_____no_output_____" ] ], [ [ "df_BDfilt_dayfilt_hour, \\\ndf_BDfilt_dayfilt_day, \\\ndebug = apply_filters.filter_data(df_PR,\n window_start = peak_hour_start,\n window_end = peak_hour_end,\n save_dir = working_dir)", "Beginning filtering...\nFiltering time window (15-19)...\n" ], [ "df_BDfilt_dayfilt_day.mean(axis=1).plot(legend=False)", "_____no_output_____" ], [ "df_BDfilt_dayfilt_day.mean(axis=1).plot(legend=False)", "_____no_output_____" ] ], [ [ "### Estimate soiling", "_____no_output_____" ] ], [ [ "tmp = pd.read_csv(\"../data/processed/park1/df_park1_allfilters_hourly_timemasked.csv\")\ntmp['datetime'] = pd.to_datetime(tmp['datetime'])\ntmp = tmp.set_index('datetime')", "_____no_output_____" ], [ "tmp.median().plot(figsize = (15,6), color = \"blue\", alpha = 0.5, ms=20, use_index = True, style ='.', ylim = (0.94,1))\ndf_BDfilt_dayfilt_hour.median().plot(use_index=False, color = \"red\", style = \".\", ms=20, alpha = 0.5)", "_____no_output_____" ], [ "tmp.median(axis=0).mean() - df_BDfilt_dayfilt_hour.median(axis=0).median()", "_____no_output_____" ], [ "4.406552374713879e-05 * 100", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7cfd7a77510ffb06a418e0022c1b9d896d2f9da
19,171
ipynb
Jupyter Notebook
index.ipynb
DavidLeoni/softpython-en
24578eca312cb55934857f35d958117bd015453b
[ "CC-BY-4.0" ]
null
null
null
index.ipynb
DavidLeoni/softpython-en
24578eca312cb55934857f35d958117bd015453b
[ "CC-BY-4.0" ]
16
2020-10-24T15:16:59.000Z
2022-03-19T04:05:48.000Z
index.ipynb
DavidLeoni/softpython-en
24578eca312cb55934857f35d958117bd015453b
[ "CC-BY-4.0" ]
1
2021-10-30T18:09:14.000Z
2021-10-30T18:09:14.000Z
37.887352
536
0.617808
[ [ [ ".. meta::\n\n :description: Introductive guide to coding, data cleaning and analysis for Python 3, with many worked exercises.", "_____no_output_____" ] ], [ [ "# SoftPython", "_____no_output_____" ] ], [ [ "% Latex fixes, DO NOT DELETE THIS !!!!\n\n% need to demote chapters to sections because sphinx promotes them *just in the home* :-/\n% https://tex.stackexchange.com/questions/161259/macro-for-promoting-sections-to-chapters\n% for alternatives, see https://texfaq.org/FAQ-patch\n\n\\let\\truechapter\\chapter\n\\let\\truesection\\section\n\\let\\truesubsection\\subsection\n\\let\\truesubsubsection\\subsubsection\n\\let\\truethesection\\thesection\n\n\\let\\chapter\\truesection\n\\let\\section\\truesubsection\n% suspend number printing , according to https://tex.stackexchange.com/a/80114\n\\renewcommand\\thesection{}\n\n\\truesection{Preface}", "_____no_output_____" ] ], [ [ "**Introductive guide to coding, data cleaning and analysis for Python 3, with many worked exercises.**\n\n<div class=\"alert alert-warning\">\n \n**WARNING: THIS ENGLISH VERSION IS IN-PROGRESS**\n \nCompletion is due by end of 2021\n \nComplete Italian version is here: [it.softpython.org](https://it.softpython.org)\n</div>", "_____no_output_____" ] ], [ [ "<p>\nDOWNLOAD: &nbsp;&nbsp;<a href=\"https://en.softpython.org/softpython-en.pdf\" target=\"_blank\">PDF</a>\n&nbsp;&nbsp;<a href=\"http://en.softpython.org/softpython-en.epub\" target=\"_blank\"> EPUB </a>\n&nbsp;&nbsp;<a href=\"https://github.com/DavidLeoni/softpython-en/archive/en.softpython.org.zip\" target=\"_blank\"> HTML </a>\n&nbsp;&nbsp; <a href=\"https://github.com/DavidLeoni/softpython-en\" target=\"_blank\"> Github </a>\n</p>\n<br/>\n<br/>\n", "_____no_output_____" ] ], [ [ "Nowadays, more and more decisions are taken upon factual and objective data. All disciplines, from engineering to social sciences, require to elaborate data and extract actionable information by analysing heterogenous sources. This book of practical exercises gives an introduction to coding and data processing using [Python](https://www.python.org), a programming language popular both in the industry and in research environments.", "_____no_output_____" ] ], [ [ "<a id=\"news\"></a>", "_____no_output_____" ] ], [ [ "## News\n\n**October 15, 2021**: added [formats challenges](formats/formats4-chal.ipynb), moved graph formats and binary relations to [relational data](#relational-data) section\n\n**October 14, 2021** added [functions](#functions), [matrix lists challenges](matrices-lists/matrices-lists3-chal.ipynb) and [mixed structures challenges ](mixed-structures/mixed-structures2-chal.ipynb)\n\n**October 8, 2021** added [for](for/for8-chal.ipynb), [while](while/while2-chal.ipynb), [sequences](sequences/sequences2-chal.ipynb) challenges\n\n**October 7, 2021**: added [sets](sets/sets2-chal.ipynb), [dictionary](dictionaries/dictionaries5-chal.ipynb), [if](if/if2-chal.ipynb) challenges\n\n**October 1, 2021**: added [lists](lists/lists5-chal.ipynb) and [tuples](tuples/tuples2-chal.ipynb) challenges\n\n**September 30, 2021**: added [string challenges](strings/strings5-chal.ipynb)\n\n**September 22, 2021**:\n\n- major update, added new exercises and pages\n- added [worked projects](#C---Worked-projects) section\n\n**October 3, 2020**: updated [References](references.ipynb) page\n\nOld news: [link](changelog.ipynb)", "_____no_output_____" ] ], [ [ "% Latex: fix for book structure\n\n\\let\\thesection\\truethesection\n\\truechapter{Overview}", "_____no_output_____" ] ], [ [ "## Intended audience\n\nThis book can be useful for both novices who never really programmed before, and for students with more techical background, who a desire to know about about data extraction, cleaning, analysis and visualization (among used frameworks there are Pandas, Numpy and Jupyter editor). We will try to process data in a practical way, without delving into more advanced considerations about algorithmic complexity and data structures. To overcome issues and guarantee concrete didactical results, we will present step-by-step tutorials. ", "_____no_output_____" ] ], [ [ "<a id=\"contents\"></a>", "_____no_output_____" ] ], [ [ "## Contents\n\n* [Overview](overview.ipynb): Approach and goals", "_____no_output_____" ] ], [ [ "<a id=\"foundations\"></a>", "_____no_output_____" ] ], [ [ "### A - Foundations\n\n1. [Installation](installation.ipynb)\n1. [Tools and scripts](tools/tools-sol.ipynb)", "_____no_output_____" ] ], [ [ "<a id=\"data-types\"></a>\n<a id=\"basics\"></a>\n<a id=\"strings\"></a>\n<a id=\"tuples\"></a>\n<a id=\"sets\"></a>\n<a id=\"dictionaries\"></a>", "_____no_output_____" ] ], [ [ "### A.1 Data types\n\n1. Basics: [1. variables and integers](basics/basics1-ints-sol.ipynb) &nbsp;&nbsp;[2. booleans](basics/basics2-bools-sol.ipynb) &nbsp;&nbsp;[3. real numbers](basics/basics3-floats-sol.ipynb) &nbsp;&nbsp;[4. challenges](basics/basics4-chal.ipynb)\n\n1. Strings: &nbsp;&nbsp;[1. intro](strings/strings1-sol.ipynb) &nbsp;&nbsp;[2. operators](strings/strings2-sol.ipynb) &nbsp;&nbsp;[3. basic methods](strings/strings3-sol.ipynb) &nbsp;&nbsp;[4. search methods](strings/strings4-sol.ipynb)&nbsp;&nbsp; [5. challenges](strings/strings5-chal.ipynb)\n \n1. Lists: &nbsp;&nbsp;[1. intro](lists/lists1-sol.ipynb) &nbsp;&nbsp;[2. operators](lists/lists2-sol.ipynb) &nbsp;&nbsp;[3. basic methods](lists/lists3-sol.ipynb) &nbsp;&nbsp;[4. search methods](lists/lists4-sol.ipynb) &nbsp;&nbsp;[5. challenges](lists/lists5-chal.ipynb)\n \n1. Tuples: &nbsp;&nbsp;[1. intro](tuples/tuples1-sol.ipynb) &nbsp;&nbsp;[2. challenges](tuples/tuples2-chal.ipynb)\n\n1. Sets: &nbsp;&nbsp;[1. intro](sets/sets1-sol.ipynb) &nbsp;&nbsp;[2. challenges](sets/sets2-chal.ipynb)\n\n1. Dictionaries: &nbsp;&nbsp;[1. intro](dictionaries/dictionaries1-sol.ipynb) &nbsp;&nbsp;[2. operators](dictionaries/dictionaries2-sol.ipynb) &nbsp;&nbsp;[3. methods](dictionaries/dictionaries3-sol.ipynb) &nbsp;&nbsp;[4. special classes ](dictionaries/dictionaries4-sol.ipynb) &nbsp;&nbsp;[5. challenges](dictionaries/dictionaries5-chal.ipynb)", "_____no_output_____" ] ], [ [ "<a id=\"control-flow\"></a>\n<a id=\"if\"></a>\n<a id=\"for\"></a>\n<a id=\"while\"></a>\n<a id=\"sequences\"></a>", "_____no_output_____" ] ], [ [ "### A.2 Control flow\n\n1. If conditionals: &nbsp;&nbsp;[1.intro](if/if1-sol.ipynb) &nbsp;&nbsp; [2. challenges](if/if2-chal.ipynb) \n \n1. For loops: &nbsp;&nbsp;[1. intro](for/for1-intro-sol.ipynb) &nbsp;&nbsp;[2. strings](for/for2-strings-sol.ipynb) &nbsp;&nbsp;[3. lists](for/for3-lists-sol.ipynb) &nbsp;&nbsp;[4. tuples](for/for4-tuples-sol.ipynb) &nbsp;&nbsp;[5. sets](for/for5-sets-sol.ipynb) &nbsp;&nbsp;[6. dictionaries](for/for6-dictionaries-sol.ipynb) \n \n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;[7. nested for](for/for7-nested-sol.ipynb) &nbsp;&nbsp;[8. challenges](for/for8-chal.ipynb)\n\n1. While loops &nbsp;&nbsp;[1. intro](while/while1-sol.ipynb) &nbsp;&nbsp;[2. challenges](while/while2-chal.ipynb)\n1. Sequences and comprehensions: &nbsp;&nbsp;[1. intro](sequences/sequences1-sol.ipynb) &nbsp;&nbsp;[1. challenges](sequences/sequences2-chal.ipynb)", "_____no_output_____" ] ], [ [ "<a id=\"algorithms\"></a>\n<a id=\"functions\"></a>\n<a id=\"matrices-lists\"></a>\n<a id=\"mixed-structures\"></a>\n<a id=\"matrices-numpy\"></a>", "_____no_output_____" ] ], [ [ "### A.3 Algorithms\n\n1. Functions: &nbsp;&nbsp;[1. intro](functions/fun1-intro-sol.ipynb) &nbsp;&nbsp;[2. error handling and testing](functions/fun2-errors-and-testing-sol.ipynb)\n\n &nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;[3. strings](functions/fun3-strings-sol.ipynb) &nbsp;&nbsp;[4. lists](functions/fun4-lists-sol.ipynb) &nbsp;&nbsp;[5. tuples](functions/fun5-tuples-sol.ipynb) [6. sets](functions/fun6-sets-sol.ipynb)<!--&nbsp;&nbsp;[7. dictionaries](functions/fun7-dictionaries-sol.ipynb) &nbsp;&nbsp;[8. challenges](functions/fun8-chal.ipynb)-->\n \n1. Matrices - list of lists: &nbsp;&nbsp;[1. intro](matrices-lists/matrices-lists1-sol.ipynb) &nbsp;&nbsp;[2. other exercises](matrices-lists/matrices-lists2-sol.ipynb) &nbsp;&nbsp; [3. challenges](matrices-lists/matrices-lists3-chal.ipynb)\n\n1. Mixed structures: &nbsp;&nbsp;[1. intro](mixed-structures/mixed-structures1-sol.ipynb) &nbsp;&nbsp; [2. challenges](mixed-structures/mixed-structures2-chal.ipynb)\n\n1. Matrices - numpy: &nbsp;&nbsp;[1. intro](matrices-numpy/matrices-numpy1-sol.ipynb) &nbsp;&nbsp;[2. exercises](matrices-numpy/matrices-numpy2-sol.ipynb) &nbsp;&nbsp; <!-- [3. challenges](matrices-numpy/matrices-numpy3-chal.ipynb) -->", "_____no_output_____" ] ], [ [ "<a id=\"data-analysis\"></a>\n<a id=\"formats\"></a>\n<a id=\"visualization\"></a>\n<a id=\"pandas\"></a>\n<a id=\"binary-relations\"></a>\n<a id=\"relational-data\"></a>", "_____no_output_____" ] ], [ [ "### B - Data analysis\n\n1. Data formats: &nbsp;&nbsp;[1. line files](formats/formats1-lines-sol.ipynb) &nbsp;&nbsp;[2. CSV files](formats/formats2-csv-sol.ipynb) &nbsp;&nbsp;[3. JSON files](formats/formats3-json-sol.ipynb) &nbsp;&nbsp;[4. challenges](formats/formats4-chal.ipynb) \n1. Visualization: &nbsp;&nbsp; [1. intro](visualization/visualization1-sol.ipynb) &nbsp;&nbsp; [2. challenges](visualization/visualization2-chal.ipynb) &nbsp;&nbsp; [images](visualization/visualization-images-sol.ipynb)\n1. Analytics with Pandas: [1. intro](pandas/pandas1-sol.ipynb) &nbsp;&nbsp; [2. exercises](pandas/pandas2-sol.ipynb) &nbsp;&nbsp; [3. challenge](pandas/pandas3-chal.ipynb)\n1. Relational data: [1. intro](relational/relational1-intro-sol.ipynb) &nbsp;&nbsp; [2. binary relations](relational/relational2-binrel-sol.ipynb) <!--&nbsp;&nbsp; [3. challenge](relational/relational3-chal.ipynb)-->", "_____no_output_____" ] ], [ [ "<a id=\"applications\"></a>", "_____no_output_____" ] ], [ [ "### C - Applications\n\n<a id=\"applications\"></a>", "_____no_output_____" ] ], [ [ "<a id=\"worked-projects\"></a>", "_____no_output_____" ] ], [ [ "### D - Worked projects\n\n<a id=\"worked-projects\"></a>\n\n<!--\nProjects as exercises (with solution), involving some raw data preprocessing, simple analysis and final chart display. \n-->", "_____no_output_____" ] ], [ [ ".. \n .. toctree::\n :maxdepth: 2\n :glob:\n :reversed:\n\n toc-worked-projects.rst", "_____no_output_____" ], [ "<a id=\"appendix\"></a>", "_____no_output_____" ] ], [ [ "### E - Appendix\n\n* [Commandments](commandments.ipynb)\n* [References](references.ipynb)", "_____no_output_____" ] ], [ [ "<a id=\"authors\"></a>", "_____no_output_____" ] ], [ [ "## Author\n \n**David Leoni**: Software engineer specialized in data integration and semantic web, has made applications in open data and medical in Italy and abroad. He frequently collaborates with University of Trento for teaching activities in various departments. Since 2019 is president of CoderDolomiti Association, where along with Marco Caresia manages volunteering movement CoderDojo Trento to teach creative coding to kids. <br/>\nEmail: [[email protected]](mailto:[email protected]) &ensp; Website: [davidleoni.it](https://davidleoni.it)\n\n### Contributors\n\n**Marco Caresia** (2017 Autumn Edition assistent @DISI, University of Trento): He has been informatics teacher at Scuola Professionale Einaudi of Bolzano. He is president of the Trentino Alto Adige Südtirol delegatioon of the Associazione Italiana Formatori and vicepresident of CoderDolomiti Association.\n\n**Alessio Zamboni** (2018 March Edition assistent @Sociology Department, University of Trento): Data scientist and software engineer with experience in NLP, GIS and knowledge management. Has collaborated to numerous research projects, collecting experinces in Europe and Asia. He strongly believes that _'Programming is a work of art'_.\n\n**Massimiliano Luca** (2019 summer edition teacher @Sociology Department, University of Trento): Loves learning new technilogies each day. Particularly interested in knowledge representation, data integration, data modeling and computational social science. Firmly believes it is vital to introduce youngsters to computer science, and has been mentoring at Coder Dojo DISI Master.", "_____no_output_____" ] ], [ [ "<a id=\"license\"></a>", "_____no_output_____" ] ], [ [ "## License\n\nThe making of this website and related courses was funded mainly by [Department of Information Engineering and Computer Science (DISI)](https://www.disi.unitn.it), University of Trento, and also [Sociology](https://www.sociologia.unitn.it/en) and [Mathematics](https://www.maths.unitn.it/en) departments.\n\n![unitn-843724](_static/img/third-parties/disi-unitn-en-logo-468-153.png)\n\n\n![cc-by-7172829](_static/img/cc-by.png)\n\nAll the material in this website is distributed with license CC-BY 4.0 International Attribution [https://creativecommons.org/licenses/by/4.0/deed.en](https://creativecommons.org/licenses/by/4.0/deed.en) \n\nBasically, you can freely redistribute and modify the content, just remember to cite University of Trento and [the authors](https://en.softpython.org/index.html#Author) \n\nTechnical notes: all website pages are easily modifiable Jupyter notebooks, that were converted to web pages using [NBSphinx](https://nbsphinx.readthedocs.io) using template [Jupman](https://github.com/DavidLeoni/jupman). Text sources are on Github at address [https://github.com/DavidLeoni/softpython-en](https://github.com/DavidLeoni/softpython-en)", "_____no_output_____" ] ], [ [ "<a id=\"acknowledgments\"></a>", "_____no_output_____" ] ], [ [ "\n## Acknowledgments\n\nWe thank in particular professor Alberto Montresor of Department of Information Engineering and Computer Science, University of Trento to have allowed the making of first courses from which this material was born from, and the project Trentino Open Data ([dati.trentino.it](https://dati.trentino.it)) for the numerous datasets provided.\n\n![dati-trentino-9327234823487](_static/img/third-parties/dati-trentino-small.png)\n\nOther numerous intitutions and companies that over time contributed material and ideas are cited [in this page](thanks.ipynb)", "_____no_output_____" ] ], [ [ "% Latex: Restores all the previous substitutions \n% DO NOT DELETE THIS !!!!\n \n\\let\\chapter\\truechapter\n\\let\\section\\truesection\n\\let\\subsection\\truesubsection\n\\let\\subsubsection\\truesubsubsection", "_____no_output_____" ] ] ]
[ "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw", "markdown", "raw" ]
[ [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw", "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "raw" ] ]
e7cfda3801440de115e6a8d1dcc84499dc7ea493
235,729
ipynb
Jupyter Notebook
dog_app.ipynb
theCydonian/Dog-App
f6f32bf305e1862fdb67d0f425aed175bfd93527
[ "MIT" ]
null
null
null
dog_app.ipynb
theCydonian/Dog-App
f6f32bf305e1862fdb67d0f425aed175bfd93527
[ "MIT" ]
null
null
null
dog_app.ipynb
theCydonian/Dog-App
f6f32bf305e1862fdb67d0f425aed175bfd93527
[ "MIT" ]
null
null
null
155.699472
102,784
0.85401
[ [ [ "# Artificial Intelligence Nanodegree\n\n## Convolutional Neural Networks\n\n## Project: Write an Algorithm for a Dog Identification App \n\n---\n\nIn this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n\nThe rubric contains _optional_ \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. If you decide to pursue the \"Stand Out Suggestions\", you should include the code in this IPython notebook.\n\n\n\n---\n### Why We're Here \n\nIn this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). \n\n![Sample Dog Output](images/sample_dog_output.png)\n\nIn this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience!\n\n### The Road Ahead\n\nWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.\n\n* [Step 0](#step0): Import Datasets\n* [Step 1](#step1): Detect Humans\n* [Step 2](#step2): Detect Dogs\n* [Step 3](#step3): Create a CNN to Classify Dog Breeds (from Scratch)\n* [Step 4](#step4): Use a CNN to Classify Dog Breeds (using Transfer Learning)\n* [Step 5](#step5): Create a CNN to Classify Dog Breeds (using Transfer Learning)\n* [Step 6](#step6): Write your Algorithm\n* [Step 7](#step7): Test Your Algorithm\n\n---\n<a id='step0'></a>\n## Step 0: Import Datasets\n\n### Import Dog Dataset\n\nIn the code cell below, we import a dataset of dog images. We populate a few variables through the use of the `load_files` function from the scikit-learn library:\n- `train_files`, `valid_files`, `test_files` - numpy arrays containing file paths to images\n- `train_targets`, `valid_targets`, `test_targets` - numpy arrays containing onehot-encoded classification labels \n- `dog_names` - list of string-valued dog breed names for translating labels", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_files \nfrom keras.utils import np_utils\nimport numpy as np\nfrom glob import glob\n\n# define function to load train, test, and validation datasets\ndef load_dataset(path):\n data = load_files(path)\n dog_files = np.array(data['filenames'])\n dog_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return dog_files, dog_targets\n\n# load train, test, and validation datasets\ntrain_files, train_targets = load_dataset('dogImages/train')\nvalid_files, valid_targets = load_dataset('dogImages/valid')\ntest_files, test_targets = load_dataset('dogImages/test')\n\n# load list of dog names\ndog_names = [item[20:-1] for item in sorted(glob(\"dogImages/train/*/\"))]\n\n# print statistics about the dataset\nprint('There are %d total dog categories.' % len(dog_names))\nprint('There are %s total dog images.\\n' % len(np.hstack([train_files, valid_files, test_files])))\nprint('There are %d training dog images.' % len(train_files))\nprint('There are %d validation dog images.' % len(valid_files))\nprint('There are %d test dog images.'% len(test_files))", "Using TensorFlow backend.\n" ] ], [ [ "### Import Human Dataset\n\nIn the code cell below, we import a dataset of human images, where the file paths are stored in the numpy array `human_files`.", "_____no_output_____" ] ], [ [ "import random\nrandom.seed(8675309)\n\n# load filenames in shuffled human dataset\nhuman_files = np.array(glob(\"lfw/*/*\"))\nrandom.shuffle(human_files)\n\n# print statistics about the dataset\nprint('There are %d total human images.' % len(human_files))", "There are 13233 total human images.\n" ] ], [ [ "---\n<a id='step1'></a>\n## Step 1: Detect Humans\n\nWe use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory.\n\nIn the next code cell, we demonstrate how to use this detector to find human faces in a sample image.", "_____no_output_____" ] ], [ [ "import cv2 \nimport matplotlib.pyplot as plt \n%matplotlib inline \n\n# extract pre-trained face detector\nface_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# load color (BGR) image\nimg = cv2.imread(human_files[3])\n# convert BGR image to grayscale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# find faces in image\nfaces = face_cascade.detectMultiScale(gray)\n\n# print number of faces detected in the image\nprint('Number of faces detected:', len(faces))\n\n# get bounding box for each detected face\nfor (x,y,w,h) in faces:\n # add bounding box to color image\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \n# convert BGR image to RGB for plotting\ncv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n# display the image, along with bounding box\nplt.imshow(cv_rgb)\nplt.show()", "('Number of faces detected:', 1)\n" ] ], [ [ "Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. \n\nIn the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box.\n\n### Write a Human Face Detector\n\nWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.", "_____no_output_____" ] ], [ [ "# returns \"True\" if face is detected in image stored at img_path\ndef face_detector(img_path):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Assess the Human Face Detector\n\n__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. \n- What percentage of the first 100 images in `human_files` have a detected human face? \n- What percentage of the first 100 images in `dog_files` have a detected human face? \n\nIdeally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`.\n\n__Answer:__ \ndetected in dog images: 0.12;\ndetected in human images: 1.0", "_____no_output_____" ] ], [ [ "human_files_short = human_files[:100]\ndog_files_short = train_files[:100]\n# Do NOT modify the code above this line.\ntrueDog = 0.0\nfalseDog = 0.0\n\ntrueHum = 0.0\nfalseHum = 0.0\n\n# human stuff\nfor X in human_files_short:\n if face_detector(X):\n trueHum += 1.0;\n else:\n falseHum += 1.0;\n\n# dog stuff\nfor X in dog_files_short:\n if face_detector(X):\n falseDog += 1.0;\n else:\n trueDog += 1.0;\n\nprint \"detected in dog images: \" + str(falseDog/(trueDog+falseDog))\nprint \"detected in human images: \" + str(trueHum/(trueHum+falseHum))", "detected in dog images: 0.12\ndetected in human images: 1.0\n" ] ], [ [ "__Question 2:__ This algorithmic choice necessitates that we communicate to the user that we accept human images only when they provide a clear view of a face (otherwise, we risk having unneccessarily frustrated users!). In your opinion, is this a reasonable expectation to pose on the user? If not, can you think of a way to detect humans in images that does not necessitate an image with a clearly presented face?\n\n__Answer:__\nI think it is completely reasonable to need faes shown because the face is such a defining feature for both humans and dogs, so, without it, the entire premise of giving dog breed most similar to a person is pointless.\n\nWe suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on each of the datasets.", "_____no_output_____" ] ], [ [ "## (Optional) TODO: Report the performance of another \n## face detection algorithm on the LFW dataset\n### Feel free to use as many code cells as needed.", "_____no_output_____" ] ], [ [ "---\n<a id='step2'></a>\n## Step 2: Detect Dogs\n\nIn this section, we use a pre-trained [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) model to detect dogs in images. Our first line of code downloads the ResNet-50 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). Given an image, this pre-trained ResNet-50 model returns a prediction (derived from the available categories in ImageNet) for the object that is contained in the image.", "_____no_output_____" ] ], [ [ "from keras.applications.resnet50 import ResNet50\n\n# define ResNet50 model\nResNet50_model = ResNet50(weights='imagenet')", "_____no_output_____" ] ], [ [ "### Pre-process the Data\n\nWhen using TensorFlow as backend, Keras CNNs require a 4D array (which we'll also refer to as a 4D tensor) as input, with shape\n\n$$\n(\\text{nb_samples}, \\text{rows}, \\text{columns}, \\text{channels}),\n$$\n\nwhere `nb_samples` corresponds to the total number of images (or samples), and `rows`, `columns`, and `channels` correspond to the number of rows, columns, and channels for each image, respectively. \n\nThe `path_to_tensor` function below takes a string-valued file path to a color image as input and returns a 4D tensor suitable for supplying to a Keras CNN. The function first loads the image and resizes it to a square image that is $224 \\times 224$ pixels. Next, the image is converted to an array, which is then resized to a 4D tensor. In this case, since we are working with color images, each image has three channels. Likewise, since we are processing a single image (or sample), the returned tensor will always have shape\n\n$$\n(1, 224, 224, 3).\n$$\n\nThe `paths_to_tensor` function takes a numpy array of string-valued image paths as input and returns a 4D tensor with shape \n\n$$\n(\\text{nb_samples}, 224, 224, 3).\n$$\n\nHere, `nb_samples` is the number of samples, or number of images, in the supplied array of image paths. It is best to think of `nb_samples` as the number of 3D tensors (where each 3D tensor corresponds to a different image) in your dataset!", "_____no_output_____" ] ], [ [ "from keras.preprocessing import image \nfrom tqdm import tqdm\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n return np.vstack(list_of_tensors)", "_____no_output_____" ] ], [ [ "### Making Predictions with ResNet-50\n\nGetting the 4D tensor ready for ResNet-50, and for any other pre-trained model in Keras, requires some additional processing. First, the RGB image is converted to BGR by reordering the channels. All pre-trained models have the additional normalization step that the mean pixel (expressed in RGB as $[103.939, 116.779, 123.68]$ and calculated from all pixels in all images in ImageNet) must be subtracted from every pixel in each image. This is implemented in the imported function `preprocess_input`. If you're curious, you can check the code for `preprocess_input` [here](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py).\n\nNow that we have a way to format our image for supplying to ResNet-50, we are now ready to use the model to extract the predictions. This is accomplished with the `predict` method, which returns an array whose $i$-th entry is the model's predicted probability that the image belongs to the $i$-th ImageNet category. This is implemented in the `ResNet50_predict_labels` function below.\n\nBy taking the argmax of the predicted probability vector, we obtain an integer corresponding to the model's predicted object class, which we can identify with an object category through the use of this [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ", "_____no_output_____" ] ], [ [ "from keras.applications.resnet50 import preprocess_input, decode_predictions\n\ndef ResNet50_predict_labels(img_path):\n # returns prediction vector for image located at img_path\n img = preprocess_input(path_to_tensor(img_path))\n return np.argmax(ResNet50_model.predict(img))", "_____no_output_____" ] ], [ [ "### Write a Dog Detector\n\nWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained ResNet-50 model, we need only check if the `ResNet50_predict_labels` function above returns a value between 151 and 268 (inclusive).\n\nWe use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).", "_____no_output_____" ] ], [ [ "### returns \"True\" if a dog is detected in the image stored at img_path\ndef dog_detector(img_path):\n prediction = ResNet50_predict_labels(img_path)\n return ((prediction <= 268) & (prediction >= 151)) ", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Assess the Dog Detector\n\n__Question 3:__ Use the code cell below to test the performance of your `dog_detector` function. \n- What percentage of the images in `human_files_short` have a detected dog? \n- What percentage of the images in `dog_files_short` have a detected dog?\n\n__Answer:__ \ndetected in dog images: 1.0;\ndetected in human images: 0.01", "_____no_output_____" ] ], [ [ "trueDog = 0.0\nfalseDog = 0.0\n\ntrueHum = 0.0\nfalseHum = 0.0\n\n# human stuff\nfor X in human_files_short:\n if dog_detector(X):\n falseHum += 1.0;\n else:\n trueHum += 1.0;\n\n# dog stuff\nfor X in dog_files_short:\n if dog_detector(X):\n trueDog += 1.0;\n else:\n falseDog += 1.0;\n\nprint \"detected in dog images: \" + str(trueDog/(trueDog+falseDog))\nprint \"detected in human images: \" + str(falseHum/(trueHum+falseHum))", "_____no_output_____" ] ], [ [ "---\n<a id='step3'></a>\n## Step 3: Create a CNN to Classify Dog Breeds (from Scratch)\n\nNow that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 1%. In Step 5 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.\n\nBe careful with adding too many trainable layers! More parameters means longer training, which means you are more likely to need a GPU to accelerate the training process. Thankfully, Keras provides a handy estimate of the time that each epoch is likely to take; you can extrapolate this estimate to figure out how long it will take for your algorithm to train. \n\nWe mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have great difficulty in distinguishing between a Brittany and a Welsh Springer Spaniel. \n\nBrittany | Welsh Springer Spaniel\n- | - \n<img src=\"images/Brittany_02625.jpg\" width=\"100\"> | <img src=\"images/Welsh_springer_spaniel_08203.jpg\" width=\"200\">\n\nIt is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). \n\nCurly-Coated Retriever | American Water Spaniel\n- | -\n<img src=\"images/Curly-coated_retriever_03896.jpg\" width=\"200\"> | <img src=\"images/American_water_spaniel_00648.jpg\" width=\"200\">\n\n\nLikewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. \n\nYellow Labrador | Chocolate Labrador | Black Labrador\n- | -\n<img src=\"images/Labrador_retriever_06457.jpg\" width=\"150\"> | <img src=\"images/Labrador_retriever_06455.jpg\" width=\"240\"> | <img src=\"images/Labrador_retriever_06449.jpg\" width=\"220\">\n\nWe also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. \n\nRemember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! \n\n### Pre-process the Data\n\nWe rescale the images by dividing every pixel in every image by 255.", "_____no_output_____" ] ], [ [ "from PIL import ImageFile \nImageFile.LOAD_TRUNCATED_IMAGES = True \n\n# pre-process the data for Keras\ntrain_tensors = paths_to_tensor(train_files).astype('float32')/255\nvalid_tensors = paths_to_tensor(valid_files).astype('float32')/255\ntest_tensors = paths_to_tensor(test_files).astype('float32')/255", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Model Architecture\n\nCreate a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:\n \n model.summary()\n\nWe have imported some Python modules to get you started, but feel free to import as many modules as you need. If you end up getting stuck, here's a hint that specifies a model that trains relatively fast on CPU and attains >1% test accuracy in 5 epochs:\n\n![Sample CNN](images/sample_cnn.png)\n \n__Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. If you chose to use the hinted architecture above, describe why you think that CNN architecture should work well for the image classification task.\n\n__Answer:__ \nI started by researching what techniques great architectures for image recognition use. I took some of those techniques and made a very complex model. One of the major one was to have progressively large dropout layers as number of filters increased. It overfitted a lot, so I messed around with the layer layout to try to reduce it. Eventually, I decided to reduce number of filters greatly, and that produced my final model. This model got a 6% accurracy. This model is still overfitting fairly early and has room for improvement. I may try to improve it later.", "_____no_output_____" ] ], [ [ "from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.models import Sequential\n\nmodel = Sequential()\n\nmodel.add(Conv2D(filters = 16, kernel_size = 3, padding=\"same\", input_shape=(224, 224, 3)))\nmodel.add(Conv2D(filters = 32, kernel_size = 3, padding=\"same\", input_shape=(224, 224, 3)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.15))\n\nmodel.add(Conv2D(filters = 32, kernel_size = 3, padding=\"same\", input_shape=(224, 224, 3)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Conv2D(filters = 48, kernel_size = 3, padding=\"same\", input_shape=(224, 224, 3)))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.2))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.1))\n\nmodel.add(Dense(133, activation='softmax'))\n\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 224, 224, 16) 448 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 224, 224, 32) 4640 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 112, 112, 32) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 112, 112, 32) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 112, 112, 32) 9248 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 56, 56, 32) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 56, 56, 48) 13872 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 28, 28, 48) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 28, 28, 48) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 37632) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 19268096 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 256) 131328 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 133) 34181 \n=================================================================\nTotal params: 19,461,813\nTrainable params: 19,461,813\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "### Compile the Model", "_____no_output_____" ] ], [ [ "model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Train the Model\n\nTrain your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss.\n\nYou are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement. ", "_____no_output_____" ] ], [ [ "from keras.callbacks import ModelCheckpoint \n\n### TODO: specify the number of epochs that you would like to use to train the model.\n\nepochs = 100 # high so I can always manually stop\n\n### Do NOT modify the code below this line.\n\ncheckpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5', \n verbose=1, save_best_only=True)\n\nmodel.fit(train_tensors, train_targets, \n validation_data=(valid_tensors, valid_targets),\n epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)", "_____no_output_____" ] ], [ [ "### Load the Model with the Best Validation Loss", "_____no_output_____" ] ], [ [ "model.load_weights('saved_models/weights.best.from_scratch.hdf5')", "_____no_output_____" ] ], [ [ "### Test the Model\n\nTry out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 1%.", "_____no_output_____" ] ], [ [ "# get index of predicted dog breed for each image in test set\ndog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)", "Test accuracy: 6.0000%\n" ] ], [ [ "---\n<a id='step4'></a>\n## Step 4: Use a CNN to Classify Dog Breeds\n\nTo reduce training time without sacrificing accuracy, we show you how to train a CNN using transfer learning. In the following step, you will get a chance to use transfer learning to train your own CNN.\n\n### Obtain Bottleneck Features", "_____no_output_____" ] ], [ [ "bottleneck_features = np.load('bottleneck_features/DogVGG16Data.npz')\ntrain_VGG16 = bottleneck_features['train']\nvalid_VGG16 = bottleneck_features['valid']\ntest_VGG16 = bottleneck_features['test']", "_____no_output_____" ] ], [ [ "### Model Architecture\n\nThe model uses the the pre-trained VGG-16 model as a fixed feature extractor, where the last convolutional output of VGG-16 is fed as input to our model. We only add a global average pooling layer and a fully connected layer, where the latter contains one node for each dog category and is equipped with a softmax.", "_____no_output_____" ] ], [ [ "VGG16_model = Sequential()\nVGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))\nVGG16_model.add(Dense(133, activation='softmax'))\n\nVGG16_model.summary()", "_____no_output_____" ] ], [ [ "### Compile the Model", "_____no_output_____" ] ], [ [ "VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Train the Model", "_____no_output_____" ] ], [ [ "checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5', \n verbose=1, save_best_only=True)\n\nVGG16_model.fit(train_VGG16, train_targets, \n validation_data=(valid_VGG16, valid_targets),\n epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)", "Train on 6680 samples, validate on 835 samples\nEpoch 1/20\n6680/6680 [==============================] - 89s 13ms/step - loss: 11.8201 - acc: 0.1313 - val_loss: 10.1389 - val_acc: 0.2240\n\nEpoch 00001: val_loss improved from inf to 10.13894, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 2/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 9.2538 - acc: 0.3043 - val_loss: 8.9575 - val_acc: 0.3222\n\nEpoch 00002: val_loss improved from 10.13894 to 8.95749, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 3/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 8.4209 - acc: 0.3877 - val_loss: 8.6163 - val_acc: 0.3557\n\nEpoch 00003: val_loss improved from 8.95749 to 8.61632, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 4/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 8.0421 - acc: 0.4325 - val_loss: 8.2879 - val_acc: 0.3772\n\nEpoch 00004: val_loss improved from 8.61632 to 8.28791, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 5/20\n6680/6680 [==============================] - 12s 2ms/step - loss: 7.7844 - acc: 0.4624 - val_loss: 8.1717 - val_acc: 0.4048\n\nEpoch 00005: val_loss improved from 8.28791 to 8.17172, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 6/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 7.6280 - acc: 0.4835 - val_loss: 8.0697 - val_acc: 0.4096\n\nEpoch 00006: val_loss improved from 8.17172 to 8.06973, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 7/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 7.4649 - acc: 0.5015 - val_loss: 8.0886 - val_acc: 0.4096\n\nEpoch 00007: val_loss did not improve from 8.06973\nEpoch 8/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 7.3254 - acc: 0.5120 - val_loss: 7.9434 - val_acc: 0.4168\n\nEpoch 00008: val_loss improved from 8.06973 to 7.94338, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 9/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 7.1596 - acc: 0.5250 - val_loss: 7.7321 - val_acc: 0.4240\n\nEpoch 00009: val_loss improved from 7.94338 to 7.73213, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 10/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 6.9381 - acc: 0.5419 - val_loss: 7.5836 - val_acc: 0.4395\n\nEpoch 00010: val_loss improved from 7.73213 to 7.58365, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 11/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 6.7472 - acc: 0.5557 - val_loss: 7.4644 - val_acc: 0.4467\n\nEpoch 00011: val_loss improved from 7.58365 to 7.46442, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 12/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 6.6441 - acc: 0.5686 - val_loss: 7.3440 - val_acc: 0.4491\n\nEpoch 00012: val_loss improved from 7.46442 to 7.34403, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 13/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 6.5786 - acc: 0.5749 - val_loss: 7.2727 - val_acc: 0.4635\n\nEpoch 00013: val_loss improved from 7.34403 to 7.27274, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 14/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 6.3946 - acc: 0.5844 - val_loss: 7.2446 - val_acc: 0.4611\n\nEpoch 00014: val_loss improved from 7.27274 to 7.24463, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 15/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 6.2405 - acc: 0.5940 - val_loss: 7.1072 - val_acc: 0.4695\n\nEpoch 00015: val_loss improved from 7.24463 to 7.10718, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 16/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 6.1987 - acc: 0.6039 - val_loss: 7.0344 - val_acc: 0.4754\n\nEpoch 00016: val_loss improved from 7.10718 to 7.03437, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 17/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 6.1235 - acc: 0.6072 - val_loss: 6.9543 - val_acc: 0.4754\n\nEpoch 00017: val_loss improved from 7.03437 to 6.95427, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 18/20\n6680/6680 [==============================] - 11s 2ms/step - loss: 6.0022 - acc: 0.6138 - val_loss: 6.8999 - val_acc: 0.4754\n\nEpoch 00018: val_loss improved from 6.95427 to 6.89992, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 19/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 5.8735 - acc: 0.6213 - val_loss: 6.7769 - val_acc: 0.4826\n\nEpoch 00019: val_loss improved from 6.89992 to 6.77690, saving model to saved_models/weights.best.VGG16.hdf5\nEpoch 20/20\n6680/6680 [==============================] - 10s 2ms/step - loss: 5.7552 - acc: 0.6341 - val_loss: 6.6512 - val_acc: 0.5018\n\nEpoch 00020: val_loss improved from 6.77690 to 6.65119, saving model to saved_models/weights.best.VGG16.hdf5\n" ] ], [ [ "### Load the Model with the Best Validation Loss", "_____no_output_____" ] ], [ [ "VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')", "_____no_output_____" ] ], [ [ "### Test the Model\n\nNow, we can use the CNN to test how well it identifies breed within our test dataset of dog images. We print the test accuracy below.", "_____no_output_____" ] ], [ [ "# get index of predicted dog breed for each image in test set\nVGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)", "Test accuracy: 48.0000%\n" ] ], [ [ "### Predict Dog Breed with the Model", "_____no_output_____" ] ], [ [ "from extract_bottleneck_features import *\n\ndef VGG16_predict_breed(img_path):\n # extract bottleneck features\n bottleneck_feature = extract_VGG16(path_to_tensor(img_path))\n # obtain predicted vector\n predicted_vector = VGG16_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n return dog_names[np.argmax(predicted_vector)]", "_____no_output_____" ] ], [ [ "---\n<a id='step5'></a>\n## Step 5: Create a CNN to Classify Dog Breeds (using Transfer Learning)\n\nYou will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set.\n\nIn Step 4, we used transfer learning to create a CNN using VGG-16 bottleneck features. In this section, you must use the bottleneck features from a different pre-trained model. To make things easier for you, we have pre-computed the features for all of the networks that are currently available in Keras:\n- [VGG-19](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz) bottleneck features\n- [ResNet-50](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz) bottleneck features\n- [Inception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz) bottleneck features\n- [Xception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz) bottleneck features\n\nThe files are encoded as such:\n\n Dog{network}Data.npz\n \nwhere `{network}`, in the above filename, can be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`. Pick one of the above architectures, download the corresponding bottleneck features, and store the downloaded file in the `bottleneck_features/` folder in the repository.\n\n### (IMPLEMENTATION) Obtain Bottleneck Features\n\nIn the code block below, extract the bottleneck features corresponding to the train, test, and validation sets by running the following:\n\n bottleneck_features = np.load('bottleneck_features/Dog{network}Data.npz')\n train_{network} = bottleneck_features['train']\n valid_{network} = bottleneck_features['valid']\n test_{network} = bottleneck_features['test']", "_____no_output_____" ] ], [ [ "bottleneck_features_VGG19 = np.load('bottleneck_features/DogVGG19Data.npz')\ntrain_VGG19 = bottleneck_features_VGG19['train']\nvalid_VGG19 = bottleneck_features_VGG19['valid']\ntest_VGG19 = bottleneck_features_VGG19['test']", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Model Architecture\n\nCreate a CNN to classify dog breed. At the end of your code cell block, summarize the layers of your model by executing the line:\n \n <your model's name>.summary()\n \n__Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem.\n\n__Answer:__ \n\nI started with a relu and a tanh hidden layer with a couple of dropouts between them. This gave decent performance(low 70%s). I them proceeded to increase complexity while managing overfitting with the dropout layers. After many tests, I was able to break 80% accuracy. Switching to sgd optimizer also helped. I might try to improve it in the future.\n\nFinal accuracy: 81.0000%", "_____no_output_____" ] ], [ [ "VGG19_model = Sequential()\nVGG19_model.add(GlobalAveragePooling2D(input_shape=train_VGG19.shape[1:]))\nVGG19_model.add(Dense(760, activation=\"relu\"))\nVGG19_model.add(Dropout(0.5))\nVGG19_model.add(Dense(256, activation=\"tanh\"))\nVGG19_model.add(Dropout(0.4))\nVGG19_model.add(Dense(133, activation=\"softmax\"))\nVGG19_model.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nglobal_average_pooling2d_1 ( (None, 512) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 760) 389880 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 760) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 256) 194816 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 133) 34181 \n=================================================================\nTotal params: 618,877\nTrainable params: 618,877\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "### (IMPLEMENTATION) Compile the Model", "_____no_output_____" ] ], [ [ "VGG19_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Train the Model\n\nTrain your model in the code cell below. Use model checkpointing to save the model that attains the best validation loss. \n\nYou are welcome to [augment the training data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), but this is not a requirement. ", "_____no_output_____" ] ], [ [ "from keras.callbacks import ModelCheckpoint \ncheckpoint = ModelCheckpoint(filepath='saved_models/weights.best.VGG19.hdf5', verbose=1, save_best_only=True)\n\nVGG19_model.fit(train_VGG19, train_targets, validation_data=(valid_VGG19, valid_targets), epochs=50, batch_size=20, callbacks=[checkpoint], verbose=1)\n# high number of epochs for manual stop", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Load the Model with the Best Validation Loss", "_____no_output_____" ] ], [ [ "VGG19_model.load_weights('saved_models/weights.best.VGG19.hdf5')", "_____no_output_____" ] ], [ [ "### (IMPLEMENTATION) Test the Model\n\nTry out your model on the test dataset of dog images. Ensure that your test accuracy is greater than 60%.", "_____no_output_____" ] ], [ [ "# get index of predicted dog breed for each image in test set\nVGG19_predictions = [np.argmax(VGG19_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG19]\n\n# report test accuracy\ntest_accuracy = 100*np.sum(np.array(VGG19_predictions)==np.argmax(test_targets, axis=1))/len(VGG19_predictions)\nprint('Test accuracy: %.4f%%' % test_accuracy)", "Test accuracy: 81.0000%\n" ] ], [ [ "### (IMPLEMENTATION) Predict Dog Breed with the Model\n\nWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan_hound`, etc) that is predicted by your model. \n\nSimilar to the analogous function in Step 5, your function should have three steps:\n1. Extract the bottleneck features corresponding to the chosen CNN model.\n2. Supply the bottleneck features as input to the model to return the predicted vector. Note that the argmax of this prediction vector gives the index of the predicted dog breed.\n3. Use the `dog_names` array defined in Step 0 of this notebook to return the corresponding breed.\n\nThe functions to extract the bottleneck features can be found in `extract_bottleneck_features.py`, and they have been imported in an earlier code cell. To obtain the bottleneck features corresponding to your chosen CNN architecture, you need to use the function\n\n extract_{network}\n \nwhere `{network}`, in the above filename, should be one of `VGG19`, `Resnet50`, `InceptionV3`, or `Xception`.", "_____no_output_____" ] ], [ [ "import extract_bottleneck_features\n\ndef getBreed(path):\n # extract bottleneck features\n bottleneck_feature = extract_bottleneck_features.extract_VGG19(path_to_tensor(path))\n # obtain predicted vector\n predicted_vector = VGG19_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n return dog_names[np.argmax(predicted_vector)]\n \nprint getBreed(\"/Users/Luke_MacBook/Downloads/dog-project/dogImages/train/023.Bernese_mountain_dog/Bernese_mountain_dog_01619.jpg\")", "Bernese_mountain_dog\n" ] ], [ [ "---\n<a id='step6'></a>\n## Step 6: Write your Algorithm\n\nWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,\n- if a __dog__ is detected in the image, return the predicted breed.\n- if a __human__ is detected in the image, return the resembling dog breed.\n- if __neither__ is detected in the image, provide output that indicates an error.\n\nYou are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 5 to predict dog breed. \n\nSome sample output for our algorithm is provided below, but feel free to design your own user experience!\n\n![Sample Human Output](images/sample_human_output.png)\n\n\n### (IMPLEMENTATION) Write your Algorithm", "_____no_output_____" ] ], [ [ "def printImg(path):\n img = cv2.imread(path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # display the image, along with bounding box\n plt.imshow(cv_rgb)\n plt.show()", "_____no_output_____" ], [ "def whatIs(path, error_mode):\n dog = dog_detector(path)\n human = face_detector(path)\n if not dog and not human:\n if error_mode == \"terminate\":\n raise ValueError(\"Neither Human nor Dog Detected. Please input a new image\")\n elif error_mode == \"continue\":\n print \"Neither Human nor Dog Detected.\"\n return None\n if dog:\n return \"dog\"\n else:\n return \"human\"", "_____no_output_____" ], [ "def finalAlg(path, error_mode = 'terminate'):\n if error_mode is not \"terminate\" and error_mode is not \"continue\":\n raise ValueError(\"error_mode can only be set to terminate or continue\") # \"continue\" just prints and does not raise error\n entity_type = whatIs(path, error_mode)\n if entity_type == \"dog\":\n print \"Whats up Dog!\"\n printImg(path)\n print(\"You are a...\")\n print(str(getBreed(path)))\n print(\"\\n\")\n elif entity_type == \"human\":\n print \"Hello, Human!\"\n printImg(path)\n print(\"You look like a...\")\n print(str(getBreed(path)))\n print(\"\\n\")\n else:\n print \"Hello!\"\n printImg(path)\n print(\"You look like a...\")\n print(str(getBreed(path)))\n print(\"\\n\")", "_____no_output_____" ] ], [ [ "---\n<a id='step7'></a>\n## Step 7: Test Your Algorithm\n\nIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that __you__ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog?\n\n### (IMPLEMENTATION) Test Your Algorithm on Sample Images!\n\nTest your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. \n\n__Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm.\n\n__Answer:__ \nMuch better. For dogs, it is fairly accurate, and for humans, I can clearly see similar features between humans and the dogs they were identified as.\n\nI could potentially improve the algorithm by leveraging both dog and human checks for greater accurracy.\nI could also add camera input to make it more usable.\nFinally, I could potentially eliminate more overfitting to get a more accurate mode.", "_____no_output_____" ] ], [ [ "# doqs\nfinalAlg(\"dogImages/test/002.Afghan_hound/Afghan_hound_00151.jpg\")\nfinalAlg(\"dogImages/train/023.Bernese_mountain_dog/Bernese_mountain_dog_01619.jpg\")\nfinalAlg(\"dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05466.jpg\")\n\n# humans\nfinalAlg(\"lfw/Aaron_Guiel/Aaron_Guiel_0001.jpg\")\nfinalAlg(\"lfw/Will_Ferrell/Will_Ferrell_0001.jpg\")\n\n# other\nfinalAlg(\"weirdPics/KingDedede.jpg\", error_mode=\"continue\")\nfinalAlg(\"weirdPics/furry.jpg\", error_mode=\"continue\")", "Whats up Dog!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e7d01e729ced47dca62d3e5eaac7a9e7a6864951
59,660
ipynb
Jupyter Notebook
matrix_one/day5.ipynb
korespo/dw_matrix
71c8ec5072bd02b4f3ec831b890dde33f8f876a5
[ "MIT" ]
null
null
null
matrix_one/day5.ipynb
korespo/dw_matrix
71c8ec5072bd02b4f3ec831b890dde33f8f876a5
[ "MIT" ]
null
null
null
matrix_one/day5.ipynb
korespo/dw_matrix
71c8ec5072bd02b4f3ec831b890dde33f8f876a5
[ "MIT" ]
null
null
null
59,660
59,660
0.623885
[ [ [ "!pip install eli5", "Collecting eli5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl (105kB)\n\r\u001b[K |███ | 10kB 16.5MB/s eta 0:00:01\r\u001b[K |██████▏ | 20kB 1.8MB/s eta 0:00:01\r\u001b[K |█████████▎ | 30kB 2.3MB/s eta 0:00:01\r\u001b[K |████████████▍ | 40kB 1.7MB/s eta 0:00:01\r\u001b[K |███████████████▌ | 51kB 1.9MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 61kB 2.3MB/s eta 0:00:01\r\u001b[K |█████████████████████▊ | 71kB 2.5MB/s eta 0:00:01\r\u001b[K |████████████████████████▊ | 81kB 2.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▉ | 92kB 3.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 102kB 2.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 112kB 2.9MB/s \n\u001b[?25hRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nInstalling collected packages: eli5\nSuccessfully installed eli5-0.10.1\n" ], [ "import pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import cross_val_score\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\n\nfrom ast import literal_eval\nfrom tqdm import tqdm_notebook", "_____no_output_____" ], [ "cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix\"", "/content/drive/My Drive/Colab Notebooks/dw_matrix\n" ], [ "ls data/", "men_shoes.csv\n" ], [ "df = pd.read_csv('data/men_shoes.csv', low_memory=False)", "_____no_output_____" ], [ "def run_model(feats, model=DecisionTreeRegressor(max_depth=5)):\n x = df[ feats ].values\n y = df['prices_amountmin'].values\n\n scores = cross_val_score(model, x, y, scoring='neg_mean_absolute_error')\n return np.mean(scores), np.std(scores)", "_____no_output_____" ], [ "df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]\nrun_model(['brand_cat'])", "_____no_output_____" ], [ "model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)\nrun_model(['brand_cat'], model)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.features.head().values[0]", "_____no_output_____" ], [ "str_dict = '[{\"key\":\"Gender\",\"value\":[\"Men\"]},{\"key\":\"Shoe Size\",\"value\":[\"M\"]},{\"key\":\"Shoe Category\",\"value\":[\"Men\\'s Shoes\"]},{\"key\":\"Color\",\"value\":[\"Multicolor\"]},{\"key\":\"Manufacturer Part Number\",\"value\":[\"8190-W-NAVY-7.5\"]},{\"key\":\"Brand\",\"value\":[\"Josmo\"]}]'\n\nliteral_eval(str_dict)[0]['value'][0]", "_____no_output_____" ], [ "def parse_features(x):\n output_dict = {}\n if str(x) == 'nan': return output_dict\n\n features = literal_eval(x.replace('\\\\\"', '\"'))\n for item in features: \n key = item['key'].lower().strip()\n value = item['value'][0].lower().strip()\n\n output_dict[key] = value\n\n return output_dict\n\ndf['features_parsed'] = df['features'].map(parse_features)", "_____no_output_____" ], [ "keys = set()\n\ndf['features_parsed'].map( lambda x: keys.update(x.keys()) )\nlen(keys)", "_____no_output_____" ], [ "df.features_parsed.head().values", "_____no_output_____" ], [ "def get_name_feat(key):\n return 'feat_' + key\n\nfor key in tqdm_notebook(keys):\n df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "keys_stat = {}\n\nfor key in keys:\n keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] * 100", "_____no_output_____" ], [ "{k:v for k,v in keys_stat.items() if v > 30}", "_____no_output_____" ], [ "df['feat_brand_cat'] = df['feat_brand'].factorize()[0]\ndf['feat_color_cat'] = df['feat_color'].factorize()[0]\ndf['feat_gender_cat'] = df['feat_gender'].factorize()[0]\ndf['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]\ndf['feat_material_cat'] = df['feat_material'].factorize()[0]\n\ndf['feat_sport_cat'] = df['feat_sport'].factorize()[0]\ndf['feat_style_cat'] = df['feat_style'].factorize()[0]\n\nfor key in keys:\n df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]\n \n", "_____no_output_____" ], [ "df['brand'] = df['brand'].map(lambda x: str(x).lower() )\n\ndf[ df.brand == df.feat_brand].shape", "_____no_output_____" ], [ "feats = ['']", "_____no_output_____" ], [ "model = RandomForestRegressor(max_depth=5, n_estimators=100)\nrun_model(['brand_cat'], model)", "_____no_output_____" ], [ "feats_cat = [x for x in df.columns if 'cat' in x]\nfeats_cat", "_____no_output_____" ], [ "df['weight'].unique()", "_____no_output_____" ], [ "\n\nfeats = ['brand_cat', 'feat_metal type_cat', 'feat_shape_cat', 'feat_brand_cat', 'feat_gender_cat', 'feat_material_cat', 'feat_style_cat']\n#feats += feats_cat\n#feats = list(set(feats))\n\nmodel = RandomForestRegressor(max_depth=5, n_estimators=100)\n\nresult = run_model(feats, model)\nprint(result)", "(-57.29886406353087, 4.203991334577988)\n" ], [ "x = df[feats].values\ny = df['prices_amountmin'].values\n\nm = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)\nm.fit(x, y)\n\nprint(result)\nperm = PermutationImportance(m, random_state=1).fit(x, y)\neli5.show_weights(perm, feature_names=feats)", "(-57.29886406353087, 4.203991334577988)\n" ], [ "df['brand'].value_counts(normalize=True)", "_____no_output_____" ], [ "df[ df['brand'] == 'nike'].features_parsed.sample(5).values", "_____no_output_____" ], [ "!git add matrix_one/day5.ipynb\n!git commit -m \"Read Men's Shoe Prices dataset from data.world\"", "_____no_output_____" ], [ "ls matrix_one/", "day3.ipynb day4.ipynb day5.ipynb Womens_shoes.ipynb\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d0211ec5511fac2e2f374b80ffecbf6af7eaf8
641,572
ipynb
Jupyter Notebook
examples/rasterio.ipynb
sackh/python-geospatial
cf9c2267e4c92572aa6d94cef1529d7783ff6d66
[ "MIT" ]
474
2019-01-01T21:08:40.000Z
2022-03-29T08:52:29.000Z
examples/rasterio.ipynb
prachisarode95/python-geospatial
cf9c2267e4c92572aa6d94cef1529d7783ff6d66
[ "MIT" ]
1
2021-08-31T16:53:04.000Z
2021-08-31T16:53:04.000Z
examples/rasterio.ipynb
prachisarode95/python-geospatial
cf9c2267e4c92572aa6d94cef1529d7783ff6d66
[ "MIT" ]
111
2019-01-29T14:22:37.000Z
2022-03-11T01:57:25.000Z
799.965087
176,960
0.938311
[ [ [ "# Advanced features in Rasterio\n\nhttps://gist.github.com/sgillies/7e5cd548110a5b4d45ac1a1d93cb17a3\n\n[Rasterio](https://mapbox.github.io/rasterio/) is an open source Python package that wraps [GDAL](http://www.gdal.org/) in idiomatic Python functions and classes.\n\nThe last pre-release of Rasterio has five advanced features that are useful for developing cloud-native applications.\n\n1. Quick overviews of GeoTIFFs in the cloud\n2. Quick subsets of GeoTIFFs in the cloud\n3. Lazy warping of GeoTIFFs in the cloud\n4. Formatted files in RAM\n5. Datasets in zipped streams\n\nPlease note these features already exist in the latest version of the GDAL library. What Rasterio does, for the first time, is make them into solid Python patterns.\n\nThis notebook is a demonstration of these patterns.\n\n## Notebook requirements\n\nThis notebook uses f-strings and requires Python 3.6. It will probably work with other Python 3 versions if the f-strings are replaced by `str.format()` calls. My team has switched to Python 3.6 this year and we're glad we did.\n\nI recommend that you run this notebook in an isolated Python environment. My preference is for one created with venv. Install the latest pre-release of Rasterio with its S3-related extras.\n\n```\npython3.6 -m venv rasterio-advanced-features\nsource rasterio-advanced-features/bin/activate\n(rasterio-advanced-features) $ pip install --pre rasterio[s3]\n(rasterio-advanced-features) $ pip install mercantile jupyter\n```\n\nIf you're a conda user, do the following.\n\n```\nconda create -n rasterio-advanced-features python=3.6 boto3 jupyter\nconda install -c conda-forge mercantile\nconda install -c conda-forge/label/dev rasterio\n```\n\nYou will need an AWS account and credentials to run the scripts in this notebook. An AWS account is free and doesn't take long to set up: https://aws.amazon.com/account/.\n\n## Rasterio documentation\n\nThis notebook glosses over basic usage of Rasterio and discusses several advanced usage patterns. Please consult the documentation of the Rasterio package for help with basic usage: https://mapbox.github.io/rasterio/.\n\n## Quick tour of the AWS Landsat PDS\n\nWe're going to use the [AWS Landsat PDS](https://aws.amazon.com/public-datasets/landsat/) as a source of data for this notebook. From the site:\n\n> Landsat 8 data is available for anyone to use via Amazon S3. All Landsat 8 scenes are available from the start of imagery capture. All new Landsat 8 scenes are made available each day, often within hours of production.\n\n> The Landsat program is a joint effort of the U.S. Geological Survey and NASA. First launched in 1972, the Landsat series of satellites has produced the longest, continuous record of Earth’s land surface as seen from space. NASA is in charge of developing remote-sensing instruments and spacecraft, launching the satellites, and validating their performance. USGS develops the associated ground systems, then takes ownership and operates the satellites, as well as managing data reception, archiving, and distribution. Since late 2008, Landsat data have been made available to all users free of charge. Carefully calibrated Landsat imagery provides the U.S. and the world with a long-term, consistent inventory of vitally important global resources.\nAWS has made Landsat 8 data freely available on Amazon S3 so that anyone can use our on-demand computing resources to perform analysis and create new products without needing to worry about the cost of storing Landsat data or the time required to download it.\n\n> Landsat data is in AWS S3 bucket named *landsat-pds* and is organzied by *path*, *row*, and *scene*. The scene id also has the path and row encoded in it. If you know the scene you're interested in – by searching the USGS Earth Explorer site, or via James Bridle's [Landsat Tumblr](http://laaaaaaandsat.tumblr.com/) – you can extract the path and row and construct an AWS S3 prefix that lets you find all the objects associated with that scene using boto3.", "_____no_output_____" ], [ "If you don't have AWS credentials set in your environment, you can set them in the block below. If you, delete the block. Be careful to remove your credentials from the notebook before sharing it with anyone else.", "_____no_output_____" ] ], [ [ "%env AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY=", "env: AWS_ACCESS_KEY_ID=AWS_SECRET_ACCESS_KEY=\n" ] ], [ [ "In the script below we will use the AWS boto3 module to examine the structure of the Landsat Public Dataset. `LC08_L1TP_139045_20170304_20170316_01_T1` is a Landsat scene ID with a standard pattern.", "_____no_output_____" ] ], [ [ "import re\n\nscene = 'LC08_L1TP_139045_20170304_20170316_01_T1'\npath, row = re.match(r'LC08_L1TP_(\\d{3})(\\d{3})', scene).groups()\nprefix = f'c1/L8/{path}/{row}/{scene}'\n\nimport boto3\n\nfor objsum in boto3.resource('s3').Bucket('landsat-pds').objects.filter(Prefix=prefix):\n print(objsum.bucket_name, objsum.key, objsum.size)", "landsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_ANG.txt 117122\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B1.TIF 50091654\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B1.TIF.ovr 6454401\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B10.TIF 48758134\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B10.TIF.ovr 7379918\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B10_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B11.TIF 46753150\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B11.TIF.ovr 7202288\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B11_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B1_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B2.TIF 51415509\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B2.TIF.ovr 6776630\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B2_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B3.TIF 55266974\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B3.TIF.ovr 7207728\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B3_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B4.TIF 59330924\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B4.TIF.ovr 7682022\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B4_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF 63037553\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF.ovr 8033890\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B6.TIF 65134525\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B6.TIF.ovr 8283172\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B6_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B7.TIF 64089521\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B7.TIF.ovr 8161327\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B7_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B8.TIF 229147981\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B8.TIF.ovr 28702520\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B8_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B9.TIF 38573789\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B9.TIF.ovr 3510656\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B9_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_BQA.TIF 1921058\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_BQA.TIF.ovr 367874\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_BQA_wrk.IMD 11597\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_MTL.json 10565\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_MTL.txt 8695\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_thumb_large.jpg 148845\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_thumb_small.jpg 8090\nlandsat-pds c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/index.html 5391\n" ] ], [ [ "## There's a web browser in GDAL\n\nEach of the .TIF files in the landsat-pds bucket is a georeferenced raster dataset formatted as a [cloud optimized GeoTIFF](https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF). A GeoTIFF is a TIFF with extra tags specifying spatial reference systems and coordinates and can be accompanied by reduced-resolution .ovr files as well as other auxiliary files.\n\nThere is a browser in the latest version of GDAL that can navigate these TIFFs and auxiliary files like a web browser navigates linked HTML documents. HTTP and the GeoTIFF format replace specialized geospatial raster services like [WCS](http://www.opengeospatial.org/standards/wcs) in the workflow presented in this notebook.\n\nBy using GDAL's browser, Rasterio can open and query cloud-optimized GeoTIFFs *without* prior download.\n\n## Quick overviews of GeoTIFFs\n\nRasterio affords us quickly-generated overviews of Landsat PDS GeoTIFFs. In the script below we will open a GeoTIFF stored on S3 (and identified by an AWS CLI style 's3://' URI), read a 10:1 overview of its data as a Numpy ndarray, and display the array. Note that we use no temporary file to do so.\n\nThe S3 object with the name ending in `B4.TIF` corresponds to the red band of the Landsat imager.", "_____no_output_____" ] ], [ [ "import rasterio\n\nwith rasterio.open(f's3://landsat-pds/{prefix}/{scene}_B4.TIF') as src:\n arr = src.read(out_shape=(src.height//10, src.width//10))\n\n%matplotlib inline\n\nfrom matplotlib import pyplot as plt\n\nplt.imshow(arr[0])\nplt.show()", "_____no_output_____" ] ], [ [ "## A look backstage\n\nNot only is there a web browser in a Rasterio dataset object, it's a sophisticated web brower that uses HTTP range requests to download the least number of bytes required to execute `src.read()` with the given parameters. With a little extra configuration we can see exactly how few bytes.\n\nWe will read and display a 10:1 overview as we did for band 4. The S3 object in the listing above with the name ending in `B5.TIF` corresponds to the near-infrared (NIR) band of the Landsat imager.\n\nThe little extra configuration needed is to use a rasterio environment with `CPL_CURL_VERBOSE=True` as the context for opening and reading a Landsat PDS GeoTIFF.", "_____no_output_____" ] ], [ [ "with rasterio.Env(CPL_CURL_VERBOSE=True):\n with rasterio.open(f's3://landsat-pds/{prefix}/{scene}_B5.TIF') as src:\n arr = src.read(out_shape=(src.height//10, src.width//10))\n\nplt.imshow(arr[0])\nplt.show()", "_____no_output_____" ] ], [ [ "Within a `rasterio.Env` context with `CPL_CURL_VERBOSE=True`, the GDAL functions called by `rasterio.open()` and `src.read()` will print HTTP request and response details as you would see if you used `curl -v`.\n\nA dissected transcript follows. In the transcript, we can see that 5 HTTP requests are made to display the 10:1 overview image of band 5.\n\nFirst we see the request GDAL makes for only the first 16384 (2^14) bytes of the 63 MB B5.TIF GeoTIFF file.\nPlease note: I've removed sensitive headers from the transcript. Yours will be different.\n\n```\n* Couldn't find host landsat-pds.s3.amazonaws.com in the .netrc file; using defaults\n* Connection 1 seems to be dead!\n* Closing connection 1\n* Trying 52.218.160.34...\n* TCP_NODELAY set\n* Connected to landsat-pds.s3.amazonaws.com (52.218.160.34) port 443 (#2)\n* SSL re-using session ID\n* TLS 1.2 connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n* Server certificate: *.s3.amazonaws.com\n* Server certificate: DigiCert Baltimore CA-2 G2\n* Server certificate: Baltimore CyberTrust Root\n> GET /c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF HTTP/1.1\nHost: landsat-pds.s3.amazonaws.com\nRange: bytes=0-16383\nAccept: */*\n\n< HTTP/1.1 206 Partial Content\n< Date: Thu, 07 Dec 2017 17:02:07 GMT\n< Last-Modified: Sat, 29 Apr 2017 11:24:24 GMT\n< ETag: \"8dfaba5cd40136c31959b411038760ab\"\n< Accept-Ranges: bytes\n< Content-Range: bytes 0-16383/63037553\n< Content-Type: image/tiff\n< Content-Length: 16384\n< Server: AmazonS3\n<\n* Connection #2 to host landsat-pds.s3.amazonaws.com left intact\n```\n\nAt this point you could get the `profile` attribute of the dataset object we've named `src` and GDAL wouldn't need to download any more bytes to provide the dataset metadata. Thanks to the the TIFF format's consolidation of metadata in the head of the file and [HTTP range requests](https://tools.ietf.org/html/rfc7233), we only need to read 0.03% of the file to know its dimensions, data type, spatial extent, and coordinate reference system.\n\nCalling `src.read()` triggers 3 more HTTP requests by GDAL. The third is for the first 16384 (2^14) bytes of the 8 MB .ovr file that GDAL discovered when it fetched the directory listing. In our case, the array returned by ``src.read()`` will come entirely from the .ovr file. For a reference on GeoTIFF overviews, see http://www.gdal.org/frmt_gtiff.html.\n\n```\n* Couldn't find host landsat-pds.s3.amazonaws.com in the .netrc file; using defaults\n* Found bundle for host landsat-pds.s3.amazonaws.com: 0x109a5a9a0 [can pipeline]\n* Re-using existing connection! (#2) with host landsat-pds.s3.amazonaws.com\n* Connected to landsat-pds.s3.amazonaws.com (52.218.160.34) port 443 (#2)\n> GET /c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF.ovr HTTP/1.1\nHost: landsat-pds.s3.amazonaws.com\nRange: bytes=0-16383\nAccept: */*\n\n< HTTP/1.1 206 Partial Content\n< Date: Thu, 07 Dec 2017 17:02:07 GMT\n< Last-Modified: Sat, 29 Apr 2017 11:23:56 GMT\n< ETag: \"d6777167ccd6141537b5e9d71abdcfeb\"\n< Accept-Ranges: bytes\n< Content-Range: bytes 0-16383/8033890\n< Content-Type: application/octet-stream\n< Content-Length: 16384\n< Server: AmazonS3\n<\n* Connection #2 to host landsat-pds.s3.amazonaws.com left intact\n```\n\nThe fourth and fifth requests are for overview imagery stored near the tail of the .ovr file, as you can see in the request and response `Content-Range` headers.\n\n```\n* Couldn't find host landsat-pds.s3.amazonaws.com in the .netrc file; using defaults\n* Found bundle for host landsat-pds.s3.amazonaws.com: 0x109a5a9a0 [can pipeline]\n* Re-using existing connection! (#2) with host landsat-pds.s3.amazonaws.com\n* Connected to landsat-pds.s3.amazonaws.com (52.218.160.34) port 443 (#2)\n> GET /c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF.ovr HTTP/1.1\nHost: landsat-pds.s3.amazonaws.com\nRange: bytes=7110656-7438335\nAccept: */*\n\n< HTTP/1.1 206 Partial Content\n< Date: Thu, 07 Dec 2017 17:02:07 GMT\n< Last-Modified: Sat, 29 Apr 2017 11:23:56 GMT\n< ETag: \"d6777167ccd6141537b5e9d71abdcfeb\"\n< Accept-Ranges: bytes\n< Content-Range: bytes 7110656-7438335/8033890\n< Content-Type: application/octet-stream\n< Content-Length: 327680\n< Server: AmazonS3\n<\n* Connection #2 to host landsat-pds.s3.amazonaws.com left intact\n* Couldn't find host landsat-pds.s3.amazonaws.com in the .netrc file; using defaults\n* Found bundle for host landsat-pds.s3.amazonaws.com: 0x109a5a9a0 [can pipeline]\n* Re-using existing connection! (#2) with host landsat-pds.s3.amazonaws.com\n* Connected to landsat-pds.s3.amazonaws.com (52.218.160.34) port 443 (#2)\n> GET /c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF.ovr HTTP/1.1\nHost: landsat-pds.s3.amazonaws.com\nRange: bytes=7438336-8033889\nAccept: */*\n\n< HTTP/1.1 206 Partial Content\n< Date: Thu, 07 Dec 2017 17:02:07 GMT\n< Last-Modified: Sat, 29 Apr 2017 11:23:56 GMT\n< ETag: \"d6777167ccd6141537b5e9d71abdcfeb\"\n< Accept-Ranges: bytes\n< Content-Range: bytes 7438336-8033889/8033890\n< Content-Type: application/octet-stream\n< Content-Length: 595554\n< Server: AmazonS3\n<\n* Connection #2 to host landsat-pds.s3.amazonaws.com left intact\n```\n\nWe got a 10:1 overview of one band of one Landsat scene and downloaded only 1/9th of its .ovr file – which is itself only about 1/8th of the target file.", "_____no_output_____" ], [ "## Quick subsets of GeoTIFFs\n\nHaving seen how GeoTIFF overviews work in the cloud, let's look at access to full resolution subsets of imagery.\n\nHere we'll read imagery from a random block of the GeoTIFF. Please note that blocks near the south and east edges of the GeoTIFF may be smaller than 512 x 512 pixels.", "_____no_output_____" ] ], [ [ "import random\n\nwith rasterio.Env(CPL_CURL_VERBOSE=True):\n with rasterio.open(f's3://landsat-pds/{prefix}/{scene}_B5.TIF') as src:\n ij, window = random.choice(list(src.block_windows()))\n print(ij, window)\n arr = src.read(window=window)\n\nplt.imshow(arr[0])\nplt.show()", "(5, 7) Window(col_off=3584, row_off=2560, width=512, height=512)\n" ] ], [ [ "## Backstage again\n\nHere is the transcript.\n\n```\n* Hostname landsat-pds.s3.amazonaws.com was found in DNS cache\n* Trying 52.218.208.122...\n* TCP_NODELAY set\n* Connected to landsat-pds.s3.amazonaws.com (52.218.208.122) port 443 (#5)\n* SSL re-using session ID\n* TLS 1.2 connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n* Server certificate: *.s3.amazonaws.com\n* Server certificate: DigiCert Baltimore CA-2 G2\n* Server certificate: Baltimore CyberTrust Root\n> GET /c1/L8/139/045/LC08_L1TP_139045_20170304_20170316_01_T1/LC08_L1TP_139045_20170304_20170316_01_T1_B5.TIF HTTP/1.1\nHost: landsat-pds.s3.amazonaws.com\nRange: bytes=44646400-44957695\nAccept: */*\n\n< HTTP/1.1 206 Partial Content\n< Date: Thu, 07 Dec 2017 18:17:20 GMT\n< Last-Modified: Sat, 29 Apr 2017 11:24:24 GMT\n< ETag: \"8dfaba5cd40136c31959b411038760ab\"\n< Accept-Ranges: bytes\n< Content-Range: bytes 44646400-44957695/63037553\n< Content-Type: image/tiff\n< Content-Length: 311296\n< Server: AmazonS3\n<\n* Connection #5 to host landsat-pds.s3.amazonaws.com left intact\n```\n\nWe've downloaded only 311 KB (every block of the GeoTIFF is DEFLATE compressed) of a 63 MB file to get that piece of data.\n\nGDAL's GeoTIFF browser caches the data in the fetched byte ranges, so you may not see any curl transcripts from subsequent requests. Finer control of caching will be a feature of GDAL 2.3 and future version of Rasterio.", "_____no_output_____" ], [ "## Lazy warping: WarpedVRT\n\nThe Landsat PDS data is georeferenced using the [Universal Transverse Mercator (UTM) system](https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system). To use it in another system such as Web Mercator (also known as 'epsg:3857', as in Google Maps, Mapbox, etc.) it must be reprojected or *warped*.\n\nRasterio has an abstraction for reprojection that also does not require prior download of the GeoTIFF file and, as with the previously discussed patterns, fetches the least number of bytes required to fill the reading window.\n\nIn the script below we fetch imagery from the B5.TIF (georeferenced to UTM Zone 45) that intersects with the zoom level 11 Web Mercator tile at the center of the GeoTIFF. We will use functions from the mercantile module to get the bounds of the that tile.", "_____no_output_____" ] ], [ [ "import mercantile\n\nfrom rasterio.vrt import WarpedVRT\n\nwith rasterio.open(f's3://landsat-pds/{prefix}/{scene}_B5.TIF') as src:\n\n lng, lat = src.lnglat()\n tile = mercantile.tile(lng, lat, 11)\n merc_bounds = mercantile.xy_bounds(tile)\n\n with WarpedVRT(src, dst_crs='epsg:3857') as vrt:\n\n window = vrt.window(*merc_bounds)\n arr_transform = vrt.window_transform(window)\n arr = vrt.read(window=window)\n\nplt.imshow(arr[0])\nplt.show()", "_____no_output_____" ] ], [ [ "## Formatted files in RAM: MemoryFile\n\nRaster data processing often involves temporary files. For example, in making a set of Web Mercator tiles from a differently projected raster dataset we may use a temporary GeoTIFF dataset to hold the result of a warp operation and then transform this into a JPEG, PNG, or WebP for use on the web.\n\nPython has a `NamedTemporaryFile` class in its `tempfile` module that is well suited for this task. It has a visible name in the filesystem, which GDAL requires, and is automatically cleaned up when no longer used. Its usage a raster-processing program is something like we see in the script below.", "_____no_output_____" ] ], [ [ "from tempfile import NamedTemporaryFile\n\ncount, height, width = arr.shape\ndtype = arr.dtype\n\nwith NamedTemporaryFile() as temp:\n with rasterio.open(temp.name, 'w', driver='GTiff', dtype=dtype,\n count=count, height=height, width=width,\n transform=arr_transform) as dst:\n dst.write(arr)\n\n temp_bytes = temp.read()", "_____no_output_____" ] ], [ [ "We can see an indicator that we have a TIFF file in `temp` if we print the first few bytes.", "_____no_output_____" ] ], [ [ "print(temp_bytes[:20])", "b'II*\\x00\\x08\\x00\\x00\\x00\\r\\x00\\x00\\x01\\x03\\x00\\x01\\x00\\x00\\x00\\\\\\x02'\n" ] ], [ [ "Let’s say you want to write a program like this that will run on a computer with a very limited filesystem or no filesystem at all. Python has an in-memory binary file-like class, `io.BytesIO`, but, unlike `NamedTemporaryFile`, instances of `BytesIO` lack the name GDAL needs to access data. To solve this problem, Rasterio provides a new class, `io.MemoryFile`, a drop-in replacement for Python’s `NamedTemporaryFile` which keeps its bytes in a virtual file in an in-memory filesystem that GDAL can access, not on disk.\n\nThe usage of `MemoryFile` is modeled after Python’s `zipfile.ZipFile` class. `MemoryFile.open` returns a dataset object, just as `rasterio.open` does.", "_____no_output_____" ] ], [ [ "from rasterio.io import MemoryFile\n\nwith MemoryFile() as temp:\n with temp.open(driver='GTiff', dtype=dtype,\n count=count, height=height, width=width,\n transform=arr_transform) as dst:\n dst.write(arr)\n png_bytes = temp.read()\n\nprint(temp_bytes[:20])", "b'II*\\x00\\x08\\x00\\x00\\x00\\r\\x00\\x00\\x01\\x03\\x00\\x01\\x00\\x00\\x00\\\\\\x02'\n" ] ], [ [ "A `MemoryFile` can also be used to access datasets contained in a stream of bytes.", "_____no_output_____" ] ], [ [ "with MemoryFile(temp_bytes) as temp:\n with temp.open() as src:\n print(src.profile)", "{'driver': 'GTiff', 'dtype': 'uint16', 'nodata': None, 'width': 604, 'height': 604, 'count': 1, 'crs': None, 'transform': Affine(32.374971311808814, 0.0, 9666532.345057327,\n 0.0, -32.374971311808814, 2485120.663606849), 'tiled': False, 'interleave': 'band'}\n" ] ], [ [ "Below is an example of downloading an entire Landsat PDS GeoTIFF to a stream of bytes and then opening the stream of bytes.", "_____no_output_____" ] ], [ [ "from io import BytesIO\n\nf's3://landsat-pds/{prefix}/{scene}_B4.TIF'\n\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('landsat-pds')\nobj = bucket.Object(f'{prefix}/{scene}_B4.TIF')\n\nwith BytesIO() as temp:\n obj.download_fileobj(temp)\n \n temp.seek(0)\n with MemoryFile(temp) as memfile:\n with memfile.open() as src:\n print(src.profile)", "{'driver': 'GTiff', 'dtype': 'uint16', 'nodata': None, 'width': 7611, 'height': 7771, 'count': 1, 'crs': CRS({'init': 'epsg:32645'}), 'transform': Affine(30.0, 0.0, 382185.0,\n 0.0, -30.0, 2512515.0), 'blockxsize': 512, 'blockysize': 512, 'tiled': True, 'compress': 'deflate', 'interleave': 'band'}\n" ] ], [ [ "## Zip files in memory\n\nRasterio can read datasets within zipped streams of bytes. Zipfiles are commonly used in the GIS domain to package legacy multi-file formats like shapefiles (a shapefile is actually an ensemble of .shp, .dbf, .shx, .prj, and other files) or virtual raster files (VRT) and the rasters they reference.\n\nBelow we'll fetch a VRT and JPEG pair from the Rasterio GitHub repo and package them in an in-memory zip file to simulate a zip file such as one that a server might accept as an upload.", "_____no_output_____" ] ], [ [ "import io\nimport zipfile\n\nimport requests\n\ntemp = io.BytesIO()\n\nwith zipfile.ZipFile(temp, 'w') as pkg:\n res = requests.get('https://raw.githubusercontent.com/mapbox/rasterio/master/tests/data/389225main_sw_1965_1024.jpg')\n pkg.writestr('389225main_sw_1965_1024.jpg', res.content)\n res = requests.get('https://raw.githubusercontent.com/mapbox/rasterio/master/tests/data/white-gemini-iv.vrt')\n pkg.writestr('white-gemini-iv.vrt', res.content)", "_____no_output_____" ] ], [ [ "We then read the zipped VRT file. You must rewind the `BytesIO` file because its current position has been left at its end.", "_____no_output_____" ] ], [ [ "from rasterio.io import ZipMemoryFile\n\ntemp.seek(0)\nwith ZipMemoryFile(temp) as zipmemfile:\n with zipmemfile.open('white-gemini-iv.vrt') as src:\n rgb = src.read()\n\nimport numpy\n\nplt.imshow(numpy.rollaxis(rgb, 0, 3))\nplt.show()", "/Users/sean/envs/rio-blog-post/lib/python3.6/site-packages/rasterio/io.py:157: NotGeoreferencedWarning: Dataset has no geotransform set. Default transform will be applied (Affine.identity())\n return DatasetReader(vsi_path, driver=driver, **kwargs)\n" ] ], [ [ "Using code like that above, you can build web services that accept zipped shapefiles or zipped VRT and rasters and process them without extracting them to your filesystem.", "_____no_output_____" ], [ "## Conclusion\n\nRasterio turns 5 GDAL features into solid, idiomatic Python patterns suited for building applications that run in the cloud.\n\n1. Quick overviews of GeoTIFFs in the cloud\n2. Quick subsets of GeoTIFFs in the cloud\n3. Lazy warping of GeoTIFFs in the cloud\n4. Formatted files in RAM\n5. Datasets in zipped streams", "_____no_output_____" ], [ "## Acknowledgements\n\nGDAL's [virtual file systems](http://www.gdal.org/gdal_virtual_file_systems.html), upon which Rasterio's `MemoryFile` and `ZipMemoryFile` are based, and virtually warped dataset feature, used by `WarpedVRT`, were written by Frank Warmerdam.\n\nEven Rouault is the author of GDAL's curl-based HTTP virtual filesystem and the GeoTIFF \"browser\" that powers cloud-optimized GeoTIFFs in Rasterio. Mapbox is proud to be a sponsor of early work on GDAL's browser.\n\nRasterio's binary wheels are made possible by the tools and wisdom of the Python wheel-builders community: https://mail.python.org/mailman/listinfo/wheel-builders. Matthew Brett's [delocate](https://github.com/matthew-brett/delocate) has been particularly useful.\n\nRasterio is written by programmers at companies and organizations such as Mapbox, the Conservation Biology Institute, Planet, the U.S. Geological Survey, Continuum Analytics, and DigitalGlobe. See https://github.com/mapbox/rasterio/graphs/contributors for the complete list of contributors.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e7d0217beb496ff6d7b2518e3850e7e960c8bf1e
8,176
ipynb
Jupyter Notebook
03- Calling Functions.ipynb
HaoRay/cookie-python
e5892bc346a4731f8c5165b27e2fe6cd48b6a2e2
[ "CNRI-Python" ]
21
2015-02-07T05:00:19.000Z
2022-01-19T10:36:07.000Z
03- Calling Functions.ipynb
HaoRay/cookie-python
e5892bc346a4731f8c5165b27e2fe6cd48b6a2e2
[ "CNRI-Python" ]
1
2016-04-17T08:49:19.000Z
2016-04-17T08:49:19.000Z
03- Calling Functions.ipynb
howardabrams/cookie-python
e5892bc346a4731f8c5165b27e2fe6cd48b6a2e2
[ "CNRI-Python" ]
16
2015-09-15T09:57:48.000Z
2020-11-22T17:30:44.000Z
35.547826
989
0.563234
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7d022458eb200de230f592721fd51cc41a55d53
3,497
ipynb
Jupyter Notebook
numpy_cosine_similarity.ipynb
Wanibzh29/hello-world-python
66f930d84801dbb074d96ba785904269bab05895
[ "MIT" ]
null
null
null
numpy_cosine_similarity.ipynb
Wanibzh29/hello-world-python
66f930d84801dbb074d96ba785904269bab05895
[ "MIT" ]
1
2020-06-30T08:09:32.000Z
2020-06-30T08:09:32.000Z
numpy_cosine_similarity.ipynb
Wanibzh29/hello-world-python
66f930d84801dbb074d96ba785904269bab05895
[ "MIT" ]
1
2020-04-30T07:54:48.000Z
2020-04-30T07:54:48.000Z
18.213542
73
0.426079
[ [ [ "import numpy as np\nfrom numpy import dot\nfrom numpy.linalg import norm", "_____no_output_____" ], [ "data = { \n 'A': [1, 1, 1, 0, 0, 0], \n 'B': [1, 1, 1, 0, 0, 0], \n 'C': [1, 1, 0, 1, 0, 0], \n 'D': [1, 0, 0, 1, 1, 0], \n 'E': [0, 0, 0, 1, 1, 1], \n}", "_____no_output_____" ], [ "A = data['A']\nB = data['B']\nC = data['C']\nD = data['D']\nE = data['E']", "_____no_output_____" ], [ "A", "_____no_output_____" ], [ "norm(A)", "_____no_output_____" ], [ "B", "_____no_output_____" ], [ "dot(A, B)", "_____no_output_____" ], [ "print(\"cos(A, B)=%.2f\" % np.divide(dot(A, B), norm(A) * norm(B)))", "cos(A, B)=1.00\n" ], [ "cos_similarity = dot(A, B)/(norm(A)*norm(B))\nprint(\"cos(A, B)=%.2f\" % cos_similarity)\n\ncos_similarity = dot(A, C)/(norm(A)*norm(C))\nprint(\"cos(A, C)=%.2f\" % cos_similarity)\n\ncos_similarity = dot(A, D)/(norm(A)*norm(D))\nprint(\"cos(A, D)=%.2f\" % cos_similarity)\n\ncos_similarity = dot(A, E)/(norm(A)*norm(E))\nprint(\"cos(A, E)=%.2f\" % cos_similarity)", "cos(A, B)=1.00\ncos(A, C)=0.67\ncos(A, D)=0.33\ncos(A, E)=0.00\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d0226c7805d6e8a579fe4498f8ac7b77c69db0
52,278
ipynb
Jupyter Notebook
08 ML0101EN-Clas-SVM-cancer-py-v1.ipynb
barathevergreen/Machine_Learning_with_python_sklearn_scipy_IBM_Projects
4b20462b6d8588ef2f5f015cb4be2d46b544b4f2
[ "BSD-4-Clause-UC" ]
null
null
null
08 ML0101EN-Clas-SVM-cancer-py-v1.ipynb
barathevergreen/Machine_Learning_with_python_sklearn_scipy_IBM_Projects
4b20462b6d8588ef2f5f015cb4be2d46b544b4f2
[ "BSD-4-Clause-UC" ]
null
null
null
08 ML0101EN-Clas-SVM-cancer-py-v1.ipynb
barathevergreen/Machine_Learning_with_python_sklearn_scipy_IBM_Projects
4b20462b6d8588ef2f5f015cb4be2d46b544b4f2
[ "BSD-4-Clause-UC" ]
null
null
null
61.359155
14,792
0.738609
[ [ [ "<a href=\"https://www.bigdatauniversity.com\"><img src=\"https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png\" width=\"400\" align=\"center\"></a>\n\n<h1 align=center><font size=\"5\"> SVM (Support Vector Machines)</font></h1>", "_____no_output_____" ], [ "In this notebook, you will use SVM (Support Vector Machines) to build and train a model using human cell records, and classify cells to whether the samples are benign or malignant.\n\nSVM works by mapping data to a high-dimensional feature space so that data points can be categorized, even when the data are not otherwise linearly separable. A separator between the categories is found, then the data is transformed in such a way that the separator could be drawn as a hyperplane. Following this, characteristics of new data can be used to predict the group to which a new record should belong.", "_____no_output_____" ], [ "<h1>Table of contents</h1>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ol>\n <li><a href=\"#load_dataset\">Load the Cancer data</a></li>\n <li><a href=\"#modeling\">Modeling</a></li>\n <li><a href=\"#evaluation\">Evaluation</a></li>\n <li><a href=\"#practice\">Practice</a></li>\n </ol>\n</div>\n<br>\n<hr>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport pylab as pl\nimport numpy as np\nimport scipy.optimize as opt\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline \nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "<h2 id=\"load_dataset\">Load the Cancer data</h2>\nThe example is based on a dataset that is publicly available from the UCI Machine Learning Repository (Asuncion and Newman, 2007)[http://mlearn.ics.uci.edu/MLRepository.html]. The dataset consists of several hundred human cell sample records, each of which contains the values of a set of cell characteristics. The fields in each record are:\n\n|Field name|Description|\n|--- |--- |\n|ID|Clump thickness|\n|Clump|Clump thickness|\n|UnifSize|Uniformity of cell size|\n|UnifShape|Uniformity of cell shape|\n|MargAdh|Marginal adhesion|\n|SingEpiSize|Single epithelial cell size|\n|BareNuc|Bare nuclei|\n|BlandChrom|Bland chromatin|\n|NormNucl|Normal nucleoli|\n|Mit|Mitoses|\n|Class|Benign or malignant|\n\n<br>\n<br>\n\nFor the purposes of this example, we're using a dataset that has a relatively small number of predictors in each record. To download the data, we will use `!wget` to download it from IBM Object Storage. \n__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)", "_____no_output_____" ] ], [ [ "#Click here and press Shift+Enter\n!pip install wget\n!wget -O cell_samples.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv", "Requirement already satisfied: wget in /Users/baraths/opt/anaconda3/lib/python3.7/site-packages (3.2)\n/bin/sh: wget: command not found\n" ] ], [ [ "### Load Data From CSV File ", "_____no_output_____" ] ], [ [ "cell_df = pd.read_csv(\"cell_samples (1).csv\")\ncell_df.head()", "_____no_output_____" ] ], [ [ "The ID field contains the patient identifiers. The characteristics of the cell samples from each patient are contained in fields Clump to Mit. The values are graded from 1 to 10, with 1 being the closest to benign.\n\nThe Class field contains the diagnosis, as confirmed by separate medical procedures, as to whether the samples are benign (value = 2) or malignant (value = 4).\n\nLets look at the distribution of the classes based on Clump thickness and Uniformity of cell size:", "_____no_output_____" ] ], [ [ "#Taking only 50 values to understand\n#Giving values in ax and applying ax in another plot to see together (Axes - oo style plot)\n#Give Label to automatically show legend\n\n#only Malignant(Class = 4)\nax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant')\n\n#only benign(Class = 2)\n#plot together\ncell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax)\nplt.show()", "_____no_output_____" ] ], [ [ "## Data pre-processing and selection", "_____no_output_____" ], [ "Lets first look at columns data types:", "_____no_output_____" ] ], [ [ "cell_df.dtypes", "_____no_output_____" ] ], [ [ "It looks like the __BareNuc__ column includes some values that are not numerical. We can convert them to int wherever values are available:\n\n__Note:__\nerrors kwarg in to_numeric:\n\n{‘ignore’, ‘raise’, ‘coerce’}, default ‘raise’\n\nIf ‘raise’, then invalid parsing will raise an exception.\n\nIf ‘coerce’, then invalid parsing will be set as __NaN__.\n\nIf ‘ignore’, then invalid parsing will return the input.", "_____no_output_____" ] ], [ [ "#Convert to numeric and put NaN for values wherever not present:\ncell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()]\ncell_df['BareNuc'] = cell_df['BareNuc'].astype('int')\ncell_df.dtypes", "_____no_output_____" ], [ "feature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']]\nX = np.asarray(feature_df)\nX[0:5]", "_____no_output_____" ] ], [ [ "We want the model to predict the value of Class (that is, benign (=2) or malignant (=4)). As this field can have one of only two possible values, we need to change its measurement level to reflect this.", "_____no_output_____" ] ], [ [ "cell_df['Class'] = cell_df['Class'].astype('int')\ny = np.asarray(cell_df['Class'])\ny [0:5]", "_____no_output_____" ] ], [ [ "## Train/Test dataset", "_____no_output_____" ], [ "Okay, we split our dataset into train and test set:", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)", "Train set: (546, 9) (546,)\nTest set: (137, 9) (137,)\n" ] ], [ [ "<h2 id=\"modeling\">Modeling (SVM with Scikit-learn)</h2>", "_____no_output_____" ], [ "The SVM algorithm offers a choice of kernel functions for performing its processing. Basically, mapping data into a higher dimensional space is called kernelling. The mathematical function used for the transformation is known as the kernel function, and can be of different types, such as:\n\n 1.Linear\n 2.Polynomial\n 3.Radial basis function (RBF)\n 4.Sigmoid\nEach of these functions has its characteristics, its pros and cons, and its equation, but as there's no easy way of knowing which function performs best with any given dataset, we usually choose different functions in turn and compare the results. Let's just use the default, RBF (Radial Basis Function) for this lab.", "_____no_output_____" ] ], [ [ "#We use Support vector classifier from Support Vector Machine:\nfrom sklearn import svm\nclf = svm.SVC(kernel='rbf') #even if you dont specify default is 'rbf'\nclf.fit(X_train, y_train) ", "_____no_output_____" ] ], [ [ "After being fitted, the model can then be used to predict new values:", "_____no_output_____" ] ], [ [ "yhat = clf.predict(X_test)\nyhat [0:5]", "_____no_output_____" ] ], [ [ "<h2 id=\"evaluation\">Evaluation</h2>", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report, confusion_matrix\nimport itertools", "_____no_output_____" ], [ "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "_____no_output_____" ], [ "# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4])\nnp.set_printoptions(precision=2)\n\nprint (classification_report(y_test, yhat))\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')", " precision recall f1-score support\n\n 2 1.00 0.94 0.97 90\n 4 0.90 1.00 0.95 47\n\n accuracy 0.96 137\n macro avg 0.95 0.97 0.96 137\nweighted avg 0.97 0.96 0.96 137\n\nConfusion matrix, without normalization\n[[85 5]\n [ 0 47]]\n" ] ], [ [ "You can also easily use the __f1_score__ from sklearn library:", "_____no_output_____" ] ], [ [ "from sklearn.metrics import f1_score\nf1_score(y_test, yhat, average='weighted') ", "_____no_output_____" ] ], [ [ "Lets try jaccard index for accuracy:", "_____no_output_____" ] ], [ [ "from sklearn.metrics import jaccard_similarity_score\njaccard_similarity_score(y_test, yhat)", "/Users/baraths/opt/anaconda3/lib/python3.7/site-packages/sklearn/metrics/_classification.py:664: FutureWarning: jaccard_similarity_score has been deprecated and replaced with jaccard_score. It will be removed in version 0.23. This implementation has surprising behavior for binary and multiclass classification tasks.\n FutureWarning)\n" ] ], [ [ "<h2 id=\"practice\">Practice</h2>\nCan you rebuild the model, but this time with a __linear__ kernel? You can use __kernel='linear'__ option, when you define the svm. How the accuracy changes with the new kernel function?", "_____no_output_____" ] ], [ [ "# write your code here\nclf2 = svm.SVC(kernel='linear')\nclf2.fit(X_train, y_train) \nyhat2 = clf2.predict(X_test)\nprint(\"Avg F1-score: %.4f\" % f1_score(y_test, yhat2, average='weighted'))\nprint(\"Jaccard score: %.4f\" % jaccard_similarity_score(y_test, yhat2))", "Avg F1-score: 0.9639\nJaccard score: 0.9635\n" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n \nclf2 = svm.SVC(kernel='linear')\nclf2.fit(X_train, y_train) \nyhat2 = clf2.predict(X_test)\nprint(\"Avg F1-score: %.4f\" % f1_score(y_test, yhat2, average='weighted'))\nprint(\"Jaccard score: %.4f\" % jaccard_similarity_score(y_test, yhat2))\n\n-->", "_____no_output_____" ], [ "<h2>Want to learn more?</h2>\n\nIBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href=\"http://cocl.us/ML0101EN-SPSSModeler\">SPSS Modeler</a>\n\nAlso, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href=\"https://cocl.us/ML0101EN_DSX\">Watson Studio</a>\n\n<h3>Thanks for completing this lesson!</h3>\n\n<h4>Author: <a href=\"https://ca.linkedin.com/in/saeedaghabozorgi\">Saeed Aghabozorgi</a></h4>\n<p><a href=\"https://ca.linkedin.com/in/saeedaghabozorgi\">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>\n\n<hr>\n\n<p>Copyright &copy; 2018 <a href=\"https://cocl.us/DX0108EN_CC\">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7d02c524c6ed838f568b57b31d9c7949ac0383c
21,773
ipynb
Jupyter Notebook
4_Python for Data Science, AI & Development/PY0101EN-2-3-Dictionaries.ipynb
lebinh97/IBM-DataScience-Capstone
efe0881be047fc2b0518f80cf42c059280598187
[ "FSFAP" ]
35
2021-07-29T16:15:16.000Z
2022-03-20T16:14:38.000Z
4_Python for Data Science, AI & Development/PY0101EN-2-3-Dictionaries.ipynb
lebinh97/IBM-DataScience-Capstone
efe0881be047fc2b0518f80cf42c059280598187
[ "FSFAP" ]
1
2021-05-05T19:54:15.000Z
2021-05-05T19:54:15.000Z
4_Python for Data Science, AI & Development/PY0101EN-2-3-Dictionaries.ipynb
lebinh97/IBM-DataScience-Capstone
efe0881be047fc2b0518f80cf42c059280598187
[ "FSFAP" ]
40
2021-07-25T19:13:25.000Z
2022-03-25T17:55:42.000Z
23.666304
716
0.505626
[ [ [ "<center>\n <img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Dictionaries in Python\n\nEstimated time needed: **20** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n- Work with libraries in Python, including operations\n", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"#dic\">Dictionaries</a>\n <ul>\n <li><a href=\"content\">What are Dictionaries?</a></li>\n <li><a href=\"key\">Keys</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#quiz\">Quiz on Dictionaries</a>\n </li>\n </ul>\n\n</div>\n\n<hr>\n", "_____no_output_____" ], [ "<h2 id=\"Dic\">Dictionaries</h2>\n", "_____no_output_____" ], [ "<h3 id=\"content\">What are Dictionaries?</h3>\n", "_____no_output_____" ], [ "A dictionary consists of keys and values. It is helpful to compare a dictionary to a list. Instead of the numerical indexes such as a list, dictionaries have keys. These keys are the keys that are used to access values within a dictionary.\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%202/images/DictsList.png\" width=\"650\" />\n", "_____no_output_____" ], [ "An example of a Dictionary <code>Dict</code>:\n", "_____no_output_____" ] ], [ [ "# Create the dictionary\n\nDict = {\"key1\": 1, \"key2\": \"2\", \"key3\": [3, 3, 3], \"key4\": (4, 4, 4), ('key5'): 5, (0, 1): 6}\nDict", "_____no_output_____" ] ], [ [ "The keys can be strings:\n", "_____no_output_____" ] ], [ [ "# Access to the value by the key\n\na = Dict[\"key1\"]\nb = Dict[\"key4\"]\n\nprint(a)\nprint(b)", "1\n(4, 4, 4)\n" ] ], [ [ "Keys can also be any immutable object such as a tuple: \n", "_____no_output_____" ] ], [ [ "# Access to the value by the key\n\nDict[(0, 1)]", "_____no_output_____" ] ], [ [ " Each key is separated from its value by a colon \"<code>:</code>\". Commas separate the items, and the whole dictionary is enclosed in curly braces. An empty dictionary without any items is written with just two curly braces, like this \"<code>{}</code>\".\n", "_____no_output_____" ] ], [ [ "# Create a sample dictionary\n\nrelease_year_dict = {\"Thriller\": \"1982\", \"Back in Black\": \"1980\", \\\n \"The Dark Side of the Moon\": \"1973\", \"The Bodyguard\": \"1992\", \\\n \"Bat Out of Hell\": \"1977\", \"Their Greatest Hits (1971-1975)\": \"1976\", \\\n \"Saturday Night Fever\": \"1977\", \"Rumours\": \"1977\"}\nrelease_year_dict", "_____no_output_____" ] ], [ [ "In summary, like a list, a dictionary holds a sequence of elements. Each element is represented by a key and its corresponding value. Dictionaries are created with two curly braces containing keys and values separated by a colon. For every key, there can only be one single value, however, multiple keys can hold the same value. Keys can only be strings, numbers, or tuples, but values can be any data type.\n", "_____no_output_____" ], [ "It is helpful to visualize the dictionary as a table, as in the following image. The first column represents the keys, the second column represents the values.\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%202/images/DictsStructure.png\" width=\"650\" />\n", "_____no_output_____" ], [ "<h3 id=\"key\">Keys</h3>\n", "_____no_output_____" ], [ "You can retrieve the values based on the names:\n", "_____no_output_____" ] ], [ [ "# Get value by keys\n\nrelease_year_dict['Thriller'] ", "_____no_output_____" ] ], [ [ "This corresponds to: \n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%202/images/DictsKeyOne.png\" width=\"500\" />\n", "_____no_output_____" ], [ "Similarly for <b>The Bodyguard</b>\n", "_____no_output_____" ] ], [ [ "# Get value by key\n\nrelease_year_dict['The Bodyguard'] ", "_____no_output_____" ] ], [ [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%202/images/DictsKeyTwo.png\" width=\"500\" />\n", "_____no_output_____" ], [ "Now let us retrieve the keys of the dictionary using the method <code>keys()</code>:\n", "_____no_output_____" ] ], [ [ "# Get all the keys in dictionary\n\nrelease_year_dict.keys() ", "_____no_output_____" ] ], [ [ "You can retrieve the values using the method <code>values()</code>:\n", "_____no_output_____" ] ], [ [ "# Get all the values in dictionary\n\nrelease_year_dict.values() ", "_____no_output_____" ] ], [ [ "We can add an entry:\n", "_____no_output_____" ] ], [ [ "# Append value with key into dictionary\n\nrelease_year_dict['Graduation'] = '2007'\nrelease_year_dict", "_____no_output_____" ] ], [ [ "We can delete an entry: \n", "_____no_output_____" ] ], [ [ "# Delete entries by key\n\ndel(release_year_dict['Thriller'])\ndel(release_year_dict['Graduation'])\nrelease_year_dict", "_____no_output_____" ] ], [ [ " We can verify if an element is in the dictionary: \n", "_____no_output_____" ] ], [ [ "# Verify the key is in the dictionary\n\n'The Bodyguard' in release_year_dict", "_____no_output_____" ] ], [ [ "<hr>\n", "_____no_output_____" ], [ "<h2 id=\"quiz\">Quiz on Dictionaries</h2>\n", "_____no_output_____" ], [ "<b>You will need this dictionary for the next two questions:</b>\n", "_____no_output_____" ] ], [ [ "# Question sample dictionary\n\nsoundtrack_dic = {\"The Bodyguard\":\"1992\", \"Saturday Night Fever\":\"1977\"}\nsoundtrack_dic ", "_____no_output_____" ] ], [ [ "a) In the dictionary <code>soundtrack_dic</code> what are the keys ?\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nsoundtrack_dic.keys()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsoundtrack_dic.keys() # The Keys \"The Bodyguard\" and \"Saturday Night Fever\" \n\n```\n\n</details>\n", "_____no_output_____" ], [ "b) In the dictionary <code>soundtrack_dic</code> what are the values ?\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nsoundtrack_dic.values()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsoundtrack_dic.values() # The values are \"1992\" and \"1977\"\n\n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "<b>You will need this dictionary for the following questions:</b>\n", "_____no_output_____" ], [ "The Albums <b>Back in Black</b>, <b>The Bodyguard</b> and <b>Thriller</b> have the following music recording sales in millions 50, 50 and 65 respectively:\n", "_____no_output_____" ], [ "a) Create a dictionary <code>album_sales_dict</code> where the keys are the album name and the sales in millions are the values. \n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nalbum_sales_dict = { \"Back in Black\" : 50, \"The Bodyguard\" : 50, \"Thriller\" : 65}\nalbum_sales_dict", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nalbum_sales_dict = {\"The Bodyguard\":50, \"Back in Black\":50, \"Thriller\":65}\n\n```\n\n</details>\n", "_____no_output_____" ], [ "b) Use the dictionary to find the total sales of <b>Thriller</b>:\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nalbum_sales_dict[\"Thriller\"]", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nalbum_sales_dict[\"Thriller\"]\n\n```\n\n</details>\n", "_____no_output_____" ], [ "c) Find the names of the albums from the dictionary using the method <code>keys()</code>:\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nalbum_sales_dict.keys()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nalbum_sales_dict.keys()\n\n```\n\n</details>\n", "_____no_output_____" ], [ "d) Find the values of the recording sales from the dictionary using the method <code>values</code>:\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nalbum_sales_dict.values()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nalbum_sales_dict.values()\n\n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>\n", "_____no_output_____" ], [ "## Author\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a>\n\n## Other contributors\n\n<a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ------------- | ------------------------------------------------------------------- |\n| 2020-09-09 | 2.1 | Malika Singla | Updated the variable soundtrack_dict to soundtrack_dic in Questions |\n| 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e7d04255e4c24be6f2bf1f7687d242e2a5d1a4e5
680,626
ipynb
Jupyter Notebook
Capstone Project - The Battle of the Neighborhoods - London Neighborhood Clustering.ipynb
ZRQ-rikkie/coursera-python
3be75d9da2ed86ca9bff8d231d26667540c7cc52
[ "Apache-2.0" ]
11
2020-07-13T16:29:07.000Z
2022-03-11T17:44:18.000Z
Capstone Project - The Battle of the Neighborhoods - London Neighborhood Clustering.ipynb
YashkumarNavadiya/Applied_Data_Science_Capstone
df8265ce2e9ca09c7116cb2d5bc5e9949fae59b2
[ "MIT" ]
null
null
null
Capstone Project - The Battle of the Neighborhoods - London Neighborhood Clustering.ipynb
YashkumarNavadiya/Applied_Data_Science_Capstone
df8265ce2e9ca09c7116cb2d5bc5e9949fae59b2
[ "MIT" ]
4
2021-09-15T03:47:13.000Z
2022-03-22T21:26:56.000Z
54.929061
40,172
0.540787
[ [ [ "## Capstone Project - The Battle of the Neighborhoods \n### Applied Data Science Capstone by IBM/Coursera", "_____no_output_____" ], [ "## Table of contents\n* [Introduction: Business Problem](#introduction)\n* [Data](#data)\n* [Methodology](#methodology)\n* [Analysis](#analysis)\n* [Results and Discussion](#results)\n* [Conclusion](#conclusion)", "_____no_output_____" ], [ "## Introduction: Business Problem <a name=\"introduction\"></a>", "_____no_output_____" ], [ "This project aims to select the safest borough in London based on the **total crimes**, explore the **neighborhoods** of that borough to find the **10 most common venues** in each neighborhood and finally cluster the neighborhoods using **k-mean clustering**.\n\nThis report will be targeted to people who are looking to **relocate to London**. Inorder to finalise a neighborhood to hunt for an apartment, **safety** is considered as a top concern when moving to a new place. If you don’t feel safe in your own home, you’re not going to be able to enjoy living there. The **crime statistics** will provide an insight into this issue.\n\nWe will focus on the safest borough and explore its neighborhoods and the 10 most common venues in each neighborhood so that the best neighborhood suited to an individual's needs can be selected.", "_____no_output_____" ], [ "## Data <a name=\"data\"></a>\n\nBased on definition of our problem, factors that will influence our decision are:\n* The total number of crimes commited in each of the borough during the last year.\n* The most common venues in each of the neighborhood in the safest borough selected.\n\nFollowing data sources will be needed to extract/generate the required information:\n\n- [**Part 1**: Preprocessing a real world data set from Kaggle showing the London Crimes from 2008 to 2016](#part1): A dataset consisting of the crime statistics of each borough in London obtained from Kaggle\n- [**Part 2**: Scraping additional information of the different Boroughs in London from a Wikipedia page.](#part2): More information regarding the boroughs of London is scraped using the Beautifulsoup library\n- [**Part 3**: Creating a new dataset of the Neighborhoods of the safest borough in London and generating their co-ordinates.](#part3): Co-ordinate of neighborhood will be obtained using **Google Maps API geocoding**\n", "_____no_output_____" ], [ "### Part 1: Preprocessing a real world data set from Kaggle showing the London Crimes from 2008 to 2016<a name=\"part1\"></a>\n\n\n#### London Crime Data \n\nAbout this file\n\n- lsoa_code: code for Lower Super Output Area in Greater London.\n- borough: Common name for London borough.\n- major_category: High level categorization of crime\n- minor_category: Low level categorization of crime within major category.\n- value: monthly reported count of categorical crime in given borough\n- year: Year of reported counts, 2008-2016\n- month: Month of reported counts, 1-12\n\nData set URL: https://www.kaggle.com/jboysen/london-crime\n", "_____no_output_____" ], [ "#### Import necessary libraries", "_____no_output_____" ] ], [ [ "import requests # library to handle requests\nimport pandas as pd # library for data analsysis\nimport numpy as np # library to handle data in a vectorized manner\nimport random # library for random number generation\nfrom bs4 import BeautifulSoup # library for web scrapping \n\n#!conda install -c conda-forge geocoder --yes\nimport geocoder\n\n#!conda install -c conda-forge geopy --yes \nfrom geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values\n\n# libraries for displaying images\nfrom IPython.display import Image \nfrom IPython.core.display import HTML \n \n# tranforming json file into a pandas dataframe library\nfrom pandas.io.json import json_normalize\n\n#!conda install -c conda-forge folium=0.5.0 --yes\nimport folium # plotting library\n\nprint('Folium installed')\nprint('Libraries imported.')\n", "Folium installed\nLibraries imported.\n" ] ], [ [ "#### Define Foursquare Credentials and Version\nMake sure that you have created a Foursquare developer account and have your credentials handy", "_____no_output_____" ] ], [ [ "CLIENT_ID = 'R01LINGO2WC45KLRLKT3ZHU2QENAO2IPRK2N2ELOHRNK4P3K' # your Foursquare ID\nCLIENT_SECRET = '4JT1TWRMXMPLX5IOKNBAFU3L3ARXK4D5JJDPFK1CLRZM2ZVW' # your Foursquare Secret\n\nVERSION = '20180604'\nLIMIT = 30\n\nprint('Your credentails:')\nprint('CLIENT_ID: ' + CLIENT_ID)\nprint('CLIENT_SECRET:' + CLIENT_SECRET)", "Your credentails:\nCLIENT_ID: R01LINGO2WC45KLRLKT3ZHU2QENAO2IPRK2N2ELOHRNK4P3K\nCLIENT_SECRET:4JT1TWRMXMPLX5IOKNBAFU3L3ARXK4D5JJDPFK1CLRZM2ZVW\n" ] ], [ [ "#### Read in the dataset", "_____no_output_____" ] ], [ [ "# Read in the data \ndf = pd.read_csv(\"london_crime_by_lsoa.csv\")", "_____no_output_____" ], [ "# View the top rows of the dataset\ndf.head()", "_____no_output_____" ] ], [ [ "#### Accessing the most recent crime rates (2016)", "_____no_output_____" ] ], [ [ "# Taking only the most recent year (2016) and dropping the rest\ndf.drop(df.index[df['year'] != 2016], inplace = True)\n\n# Removing all the entires where crime values are null \ndf = df[df.value != 0]\n\n# Reset the index and dropping the previous index\ndf = df.reset_index(drop=True)", "_____no_output_____" ], [ "# Shape of the data frame\ndf.shape", "_____no_output_____" ], [ "# View the top of the dataset \ndf.head()", "_____no_output_____" ] ], [ [ "#### Change the column names ", "_____no_output_____" ] ], [ [ "df.columns = ['LSOA_Code', 'Borough','Major_Category','Minor_Category','No_of_Crimes','Year','Month']\ndf.head()", "_____no_output_____" ], [ "# View the information of the dataset \ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 392042 entries, 0 to 392041\nData columns (total 7 columns):\nLSOA_Code 392042 non-null object\nBorough 392042 non-null object\nMajor_Category 392042 non-null object\nMinor_Category 392042 non-null object\nNo_of_Crimes 392042 non-null int64\nYear 392042 non-null int64\nMonth 392042 non-null int64\ndtypes: int64(3), object(4)\nmemory usage: 20.9+ MB\n" ] ], [ [ "#### Total number of crimes in each Borough", "_____no_output_____" ] ], [ [ "df['Borough'].value_counts()", "_____no_output_____" ] ], [ [ "#### The total crimes per major category", "_____no_output_____" ] ], [ [ "df['Major_Category'].value_counts()", "_____no_output_____" ] ], [ [ "#### Pivoting the table to view the no. of crimes for each major category in each Borough ", "_____no_output_____" ] ], [ [ "London_crime = pd.pivot_table(df,values=['No_of_Crimes'],\n index=['Borough'],\n columns=['Major_Category'],\n aggfunc=np.sum,fill_value=0)\nLondon_crime.head()", "_____no_output_____" ], [ "# Reset the index\nLondon_crime.reset_index(inplace = True)", "_____no_output_____" ], [ "# Total crimes per Borough\nLondon_crime['Total'] = London_crime.sum(axis=1)\nLondon_crime.head(33)", "_____no_output_____" ] ], [ [ "#### Removing the multi index so that it will be easier to merge", "_____no_output_____" ] ], [ [ "London_crime.columns = London_crime.columns.map(''.join)\nLondon_crime.head()", "_____no_output_____" ] ], [ [ "#### Renaming the columns", "_____no_output_____" ] ], [ [ "London_crime.columns = ['Borough','Burglary', 'Criminal Damage','Drugs','Other Notifiable Offences',\n 'Robbery','Theft and Handling','Violence Against the Person','Total']\nLondon_crime.head()", "_____no_output_____" ], [ "# Shape of the data set \nLondon_crime.shape", "_____no_output_____" ], [ "# View the Columns in the data frame\n# London_crime.columns.tolist()", "_____no_output_____" ] ], [ [ "### Part 2: Scraping additional information of the different Boroughs in London from a Wikipedia page <a name=\"part2\"></a>\n \n**Using Beautiful soup to scrap the latitude and longitiude of the boroughs in London**\n\nURL: https://en.wikipedia.org/wiki/List_of_London_boroughs", "_____no_output_____" ] ], [ [ "# getting data from internet\nwikipedia_link='https://en.wikipedia.org/wiki/List_of_London_boroughs'\nraw_wikipedia_page= requests.get(wikipedia_link).text\n\n# using beautiful soup to parse the HTML/XML codes.\nsoup = BeautifulSoup(raw_wikipedia_page,'xml')\nprint(soup.prettify())\n", "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE html>\n<html class=\"client-nojs\" dir=\"ltr\" lang=\"en\">\n <head>\n <meta charset=\"UTF-8\"/>\n <title>\n List of London boroughs - Wikipedia\n </title>\n <script>\n document.documentElement.className = document.documentElement.className.replace( /(^|\\s)client-nojs(\\s|$)/, \"$1client-js$2\" );\n </script>\n <script>\n (window.RLQ=window.RLQ||[]).push(function(){mw.config.set({\"wgCanonicalNamespace\":\"\",\"wgCanonicalSpecialPageName\":false,\"wgNamespaceNumber\":0,\"wgPageName\":\"List_of_London_boroughs\",\"wgTitle\":\"List of London boroughs\",\"wgCurRevisionId\":881899861,\"wgRevisionId\":881899861,\"wgArticleId\":28092685,\"wgIsArticle\":true,\"wgIsRedirect\":false,\"wgAction\":\"view\",\"wgUserName\":null,\"wgUserGroups\":[\"*\"],\"wgCategories\":[\"Use dmy dates from August 2015\",\"Use British English from August 2015\",\"Lists of coordinates\",\"Geographic coordinate lists\",\"Articles with Geo\",\"London boroughs\",\"Lists of places in London\"],\"wgBreakFrames\":false,\"wgPageContentLanguage\":\"en\",\"wgPageContentModel\":\"wikitext\",\"wgSeparatorTransformTable\":[\"\",\"\"],\"wgDigitTransformTable\":[\"\",\"\"],\"wgDefaultDateFormat\":\"dmy\",\"wgMonthNames\":[\"\",\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\"],\"wgMonthNamesShort\":[\"\",\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"],\"wgRelevantPageName\":\"List_of_London_boroughs\",\"wgRelevantArticleId\":28092685,\"wgRequestId\":\"XMbUCwpAAEUAACKj1V4AAABQ\",\"wgCSPNonce\":false,\"wgIsProbablyEditable\":true,\"wgRelevantPageIsProbablyEditable\":true,\"wgRestrictionEdit\":[],\"wgRestrictionMove\":[],\"wgFlaggedRevsParams\":{\"tags\":{}},\"wgStableRevisionId\":null,\"wgMediaViewerOnClick\":true,\"wgMediaViewerEnabledByDefault\":true,\"wgPopupsReferencePreviews\":false,\"wgPopupsConflictsWithNavPopupGadget\":false,\"wgVisualEditor\":{\"pageLanguageCode\":\"en\",\"pageLanguageDir\":\"ltr\",\"pageVariantFallbacks\":\"en\",\"usePageImages\":true,\"usePageDescriptions\":true},\"wgMFDisplayWikibaseDescriptions\":{\"search\":true,\"nearby\":true,\"watchlist\":true,\"tagline\":false},\"wgRelatedArticles\":null,\"wgRelatedArticlesUseCirrusSearch\":true,\"wgRelatedArticlesOnlyUseCirrusSearch\":false,\"wgWMESchemaEditAttemptStepOversample\":false,\"wgPoweredByHHVM\":true,\"wgULSCurrentAutonym\":\"English\",\"wgNoticeProject\":\"wikipedia\",\"wgCentralNoticeCookiesToDelete\":[],\"wgCentralNoticeCategoriesUsingLegacy\":[\"Fundraising\",\"fundraising\"],\"wgWikibaseItemId\":\"Q6577004\",\"wgCentralAuthMobileDomain\":false,\"wgEditSubmitButtonLabelPublish\":true});mw.loader.state({\"ext.gadget.charinsert-styles\":\"ready\",\"ext.globalCssJs.user.styles\":\"ready\",\"ext.globalCssJs.site.styles\":\"ready\",\"site.styles\":\"ready\",\"noscript\":\"ready\",\"user.styles\":\"ready\",\"ext.globalCssJs.user\":\"ready\",\"ext.globalCssJs.site\":\"ready\",\"user\":\"ready\",\"user.options\":\"ready\",\"user.tokens\":\"loading\",\"ext.cite.styles\":\"ready\",\"mediawiki.legacy.shared\":\"ready\",\"mediawiki.legacy.commonPrint\":\"ready\",\"mediawiki.toc.styles\":\"ready\",\"wikibase.client.init\":\"ready\",\"ext.visualEditor.desktopArticleTarget.noscript\":\"ready\",\"ext.uls.interlanguage\":\"ready\",\"ext.wikimediaBadges\":\"ready\",\"ext.3d.styles\":\"ready\",\"mediawiki.skinning.interface\":\"ready\",\"skins.vector.styles\":\"ready\"});mw.loader.implement(\"user.tokens@0tffind\",function($,jQuery,require,module){/*@nomin*/mw.user.tokens.set({\"editToken\":\"+\\\\\",\"patrolToken\":\"+\\\\\",\"watchToken\":\"+\\\\\",\"csrfToken\":\"+\\\\\"});\n});RLPAGEMODULES=[\"ext.cite.ux-enhancements\",\"site\",\"mediawiki.page.startup\",\"mediawiki.page.ready\",\"jquery.tablesorter\",\"mediawiki.toc\",\"mediawiki.searchSuggest\",\"ext.gadget.teahouse\",\"ext.gadget.ReferenceTooltips\",\"ext.gadget.watchlist-notice\",\"ext.gadget.DRN-wizard\",\"ext.gadget.charinsert\",\"ext.gadget.refToolbar\",\"ext.gadget.extra-toolbar-buttons\",\"ext.gadget.switcher\",\"ext.centralauth.centralautologin\",\"mmv.head\",\"mmv.bootstrap.autostart\",\"ext.popups\",\"ext.visualEditor.desktopArticleTarget.init\",\"ext.visualEditor.targetLoader\",\"ext.eventLogging\",\"ext.wikimediaEvents\",\"ext.navigationTiming\",\"ext.uls.eventlogger\",\"ext.uls.init\",\"ext.uls.compactlinks\",\"ext.uls.interface\",\"ext.quicksurveys.init\",\"ext.centralNotice.geoIP\",\"ext.centralNotice.startUp\",\"skins.vector.js\"];mw.loader.load(RLPAGEMODULES);});\n </script>\n <link href=\"/w/load.php?lang=en&amp;modules=ext.3d.styles%7Cext.cite.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cmediawiki.legacy.commonPrint%2Cshared%7Cmediawiki.skinning.interface%7Cmediawiki.toc.styles%7Cskins.vector.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector\" rel=\"stylesheet\"/>\n <script async=\"\" src=\"/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;skin=vector\"/>\n <meta content=\"\" name=\"ResourceLoaderDynamicStyles\"/>\n <link href=\"/w/load.php?lang=en&amp;modules=ext.gadget.charinsert-styles&amp;only=styles&amp;skin=vector\" rel=\"stylesheet\"/>\n <link href=\"/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector\" rel=\"stylesheet\"/>\n <meta content=\"MediaWiki 1.34.0-wmf.1\" name=\"generator\"/>\n <meta content=\"origin\" name=\"referrer\"/>\n <meta content=\"origin-when-crossorigin\" name=\"referrer\"/>\n <meta content=\"origin-when-cross-origin\" name=\"referrer\"/>\n <link href=\"android-app://org.wikipedia/http/en.m.wikipedia.org/wiki/List_of_London_boroughs\" rel=\"alternate\"/>\n <link href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit\" rel=\"alternate\" title=\"Edit this page\" type=\"application/x-wiki\"/>\n <link href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit\" rel=\"edit\" title=\"Edit this page\"/>\n <link href=\"/static/apple-touch/wikipedia.png\" rel=\"apple-touch-icon\"/>\n <link href=\"/static/favicon/wikipedia.ico\" rel=\"shortcut icon\"/>\n <link href=\"/w/opensearch_desc.php\" rel=\"search\" title=\"Wikipedia (en)\" type=\"application/opensearchdescription+xml\"/>\n <link href=\"//en.wikipedia.org/w/api.php?action=rsd\" rel=\"EditURI\" type=\"application/rsd+xml\"/>\n <link href=\"//creativecommons.org/licenses/by-sa/3.0/\" rel=\"license\"/>\n <link href=\"https://en.wikipedia.org/wiki/List_of_London_boroughs\" rel=\"canonical\"/>\n <link href=\"//login.wikimedia.org\" rel=\"dns-prefetch\"/>\n <link href=\"//meta.wikimedia.org\" rel=\"dns-prefetch\"/>\n <!--[if lt IE 9]><script src=\"/w/load.php?lang=qqx&amp;modules=html5shiv&amp;only=scripts&amp;skin=fallback&amp;sync=1\"></script><![endif]-->\n </head>\n <body class=\"mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-List_of_London_boroughs rootpage-List_of_London_boroughs skin-vector action-view\">\n <div class=\"noprint\" id=\"mw-page-base\"/>\n <div class=\"noprint\" id=\"mw-head-base\"/>\n <div class=\"mw-body\" id=\"content\" role=\"main\">\n <a id=\"top\"/>\n <div class=\"mw-body-content\" id=\"siteNotice\">\n <!-- CentralNotice -->\n </div>\n <div class=\"mw-indicators mw-body-content\">\n </div>\n <h1 class=\"firstHeading\" id=\"firstHeading\" lang=\"en\">\n List of London boroughs\n </h1>\n <div class=\"mw-body-content\" id=\"bodyContent\">\n <div class=\"noprint\" id=\"siteSub\">\n From Wikipedia, the free encyclopedia\n </div>\n <div id=\"contentSub\"/>\n <div id=\"jump-to-nav\"/>\n <a class=\"mw-jump-link\" href=\"#mw-head\">\n Jump to navigation\n </a>\n <a class=\"mw-jump-link\" href=\"#p-search\">\n Jump to search\n </a>\n <div class=\"mw-content-ltr\" dir=\"ltr\" id=\"mw-content-text\" lang=\"en\">\n <div class=\"mw-parser-output\">\n <p class=\"mw-empty-elt\">\n </p>\n <p class=\"mw-empty-elt\">\n </p>\n <div class=\"thumb tright\">\n <div class=\"thumbinner\" style=\"width:302px;\">\n <a class=\"image\" href=\"/wiki/File:London-boroughs.svg\">\n <img alt=\"\" class=\"thumbimage\" data-file-height=\"386\" data-file-width=\"489\" decoding=\"async\" height=\"237\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/2/29/London-boroughs.svg/300px-London-boroughs.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/2/29/London-boroughs.svg/450px-London-boroughs.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/29/London-boroughs.svg/600px-London-boroughs.svg.png 2x\" width=\"300\"/>\n </a>\n <div class=\"thumbcaption\">\n <div class=\"magnify\">\n <a class=\"internal\" href=\"/wiki/File:London-boroughs.svg\" title=\"Enlarge\"/>\n </div>\n Map of the 32 London boroughs and the City of London.\n </div>\n </div>\n </div>\n <p>\n This is a list of\n <a href=\"/wiki/Districts_of_England\" title=\"Districts of England\">\n local authority districts\n </a>\n within\n <a href=\"/wiki/Greater_London\" title=\"Greater London\">\n Greater London\n </a>\n , including 32\n <a href=\"/wiki/London_boroughs\" title=\"London boroughs\">\n London boroughs\n </a>\n and the\n <a href=\"/wiki/City_of_London\" title=\"City of London\">\n City of London\n </a>\n . The London boroughs were all created on 1 April 1965. Upon creation, twelve were designated\n <a href=\"/wiki/Inner_London\" title=\"Inner London\">\n Inner London\n </a>\n boroughs and the remaining twenty were designated\n <a href=\"/wiki/Outer_London\" title=\"Outer London\">\n Outer London\n </a>\n boroughs. The\n <a href=\"/wiki/Office_for_National_Statistics\" title=\"Office for National Statistics\">\n Office for National Statistics\n </a>\n has amended the designations of three boroughs for statistics purposes only. Three boroughs have been granted the designation\n <a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">\n royal borough\n </a>\n and one has\n <a href=\"/wiki/City_status_in_the_United_Kingdom\" title=\"City status in the United Kingdom\">\n city status\n </a>\n . For planning purposes, in addition to the boroughs and City there are also two active development corporations, the\n <a href=\"/wiki/London_Legacy_Development_Corporation\" title=\"London Legacy Development Corporation\">\n London Legacy Development Corporation\n </a>\n and\n <a href=\"/wiki/Old_Oak_and_Park_Royal_Development_Corporation\" title=\"Old Oak and Park Royal Development Corporation\">\n Old Oak and Park Royal Development Corporation\n </a>\n .\n </p>\n <div class=\"toc\" id=\"toc\">\n <input class=\"toctogglecheckbox\" id=\"toctogglecheckbox\" role=\"button\" style=\"display:none\" type=\"checkbox\"/>\n <div class=\"toctitle\" dir=\"ltr\" lang=\"en\">\n <h2>\n Contents\n </h2>\n <span class=\"toctogglespan\">\n <label class=\"toctogglelabel\" for=\"toctogglecheckbox\"/>\n </span>\n </div>\n <ul>\n <li class=\"toclevel-1 tocsection-1\">\n <a href=\"#List_of_boroughs_and_local_authorities\">\n <span class=\"tocnumber\">\n 1\n </span>\n <span class=\"toctext\">\n List of boroughs and local authorities\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-2\">\n <a href=\"#City_of_London\">\n <span class=\"tocnumber\">\n 2\n </span>\n <span class=\"toctext\">\n City of London\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-3\">\n <a href=\"#See_also\">\n <span class=\"tocnumber\">\n 3\n </span>\n <span class=\"toctext\">\n See also\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-4\">\n <a href=\"#Notes\">\n <span class=\"tocnumber\">\n 4\n </span>\n <span class=\"toctext\">\n Notes\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-5\">\n <a href=\"#References\">\n <span class=\"tocnumber\">\n 5\n </span>\n <span class=\"toctext\">\n References\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-6\">\n <a href=\"#External_links\">\n <span class=\"tocnumber\">\n 6\n </span>\n <span class=\"toctext\">\n External links\n </span>\n </a>\n </li>\n </ul>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"List_of_boroughs_and_local_authorities\">\n List of boroughs and local authorities\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=1\" title=\"Edit section: List of boroughs and local authorities\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <table class=\"wikitable sortable\" style=\"font-size:100%\" width=\"100%\">\n <tbody>\n <tr>\n <th>\n Borough\n </th>\n <th>\n Inner\n </th>\n <th>\n Status\n </th>\n <th>\n Local authority\n </th>\n <th>\n Political control\n </th>\n <th>\n Headquarters\n </th>\n <th>\n Area (sq mi)\n </th>\n <th>\n Population (2013 est)\n <sup class=\"reference\" id=\"cite_ref-1\">\n <a href=\"#cite_note-1\">\n [1]\n </a>\n </sup>\n </th>\n <th>\n Co-ordinates\n </th>\n <th>\n <span style=\"background:#67BCD3\">\n Nr. in map\n </span>\n </th>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Barking_and_Dagenham\" title=\"London Borough of Barking and Dagenham\">\n Barking and Dagenham\n </a>\n <sup class=\"reference\" id=\"cite_ref-2\">\n <a href=\"#cite_note-2\">\n [note 1]\n </a>\n </sup>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Barking_and_Dagenham_London_Borough_Council\" title=\"Barking and Dagenham London Borough Council\">\n Barking and Dagenham London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Barking_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Barking Town Hall (page does not exist)\">\n Town Hall\n </a>\n , 1 Town Square\n </td>\n <td>\n 13.93\n </td>\n <td>\n 194,352\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5607_N_0.1557_E_region:GB_type:city&amp;title=Barking+and+Dagenham\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°33′39″N\n </span>\n <span class=\"longitude\">\n 0°09′21″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5607°N 0.1557°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5607; 0.1557\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Barking and Dagenham\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 25\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Barnet\" title=\"London Borough of Barnet\">\n Barnet\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Barnet_London_Borough_Council\" title=\"Barnet London Borough Council\">\n Barnet London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=North_London_Business_Park&amp;action=edit&amp;redlink=1\" title=\"North London Business Park (page does not exist)\">\n North London Business Park\n </a>\n , Oakleigh Road South\n </td>\n <td>\n 33.49\n </td>\n <td>\n 369,088\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6252_N_0.1517_W_region:GB_type:city&amp;title=Barnet\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°37′31″N\n </span>\n <span class=\"longitude\">\n 0°09′06″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.6252°N 0.1517°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.6252; -0.1517\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Barnet\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 31\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Bexley\" title=\"London Borough of Bexley\">\n Bexley\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Bexley_London_Borough_Council\" title=\"Bexley London Borough Council\">\n Bexley London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Civic_Offices&amp;action=edit&amp;redlink=1\" title=\"Civic Offices (page does not exist)\">\n Civic Offices\n </a>\n , 2 Watling Street\n </td>\n <td>\n 23.38\n </td>\n <td>\n 236,687\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4549_N_0.1505_E_region:GB_type:city&amp;title=Bexley\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°27′18″N\n </span>\n <span class=\"longitude\">\n 0°09′02″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4549°N 0.1505°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4549; 0.1505\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Bexley\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 23\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Brent\" title=\"London Borough of Brent\">\n Brent\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Brent_London_Borough_Council\" title=\"Brent London Borough Council\">\n Brent London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a href=\"/wiki/Brent_Civic_Centre\" title=\"Brent Civic Centre\">\n Brent Civic Centre\n </a>\n , Engineers Way\n </td>\n <td>\n 16.70\n </td>\n <td>\n 317,264\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5588_N_0.2817_W_region:GB_type:city&amp;title=Brent\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°33′32″N\n </span>\n <span class=\"longitude\">\n 0°16′54″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5588°N 0.2817°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5588; -0.2817\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Brent\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 12\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Bromley\" title=\"London Borough of Bromley\">\n Bromley\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Bromley_London_Borough_Council\" title=\"Bromley London Borough Council\">\n Bromley London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Bromley_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Bromley Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , Stockwell Close\n </td>\n <td>\n 57.97\n </td>\n <td>\n 317,899\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4039_N_0.0198_E_region:GB_type:city&amp;title=Bromley\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°24′14″N\n </span>\n <span class=\"longitude\">\n 0°01′11″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4039°N 0.0198°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4039; 0.0198\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Bromley\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 20\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Camden\" title=\"London Borough of Camden\">\n Camden\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Camden_London_Borough_Council\" title=\"Camden London Borough Council\">\n Camden London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a href=\"/wiki/Camden_Town_Hall\" title=\"Camden Town Hall\">\n Camden Town Hall\n </a>\n , Judd Street\n </td>\n <td>\n 8.40\n </td>\n <td>\n 229,719\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.529_N_0.1255_W_region:GB_type:city&amp;title=Camden\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°31′44″N\n </span>\n <span class=\"longitude\">\n 0°07′32″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5290°N 0.1255°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5290; -0.1255\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Camden\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 11\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Croydon\" title=\"London Borough of Croydon\">\n Croydon\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Croydon_London_Borough_Council\" title=\"Croydon London Borough Council\">\n Croydon London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Bernard_Weatherill_House&amp;action=edit&amp;redlink=1\" title=\"Bernard Weatherill House (page does not exist)\">\n Bernard Weatherill House\n </a>\n , Mint Walk\n </td>\n <td>\n 33.41\n </td>\n <td>\n 372,752\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.3714_N_0.0977_W_region:GB_type:city&amp;title=Croydon\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°22′17″N\n </span>\n <span class=\"longitude\">\n 0°05′52″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.3714°N 0.0977°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.3714; -0.0977\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Croydon\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 19\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Ealing\" title=\"London Borough of Ealing\">\n Ealing\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Ealing_London_Borough_Council\" title=\"Ealing London Borough Council\">\n Ealing London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Perceval_House,_Ealing&amp;action=edit&amp;redlink=1\" title=\"Perceval House, Ealing (page does not exist)\">\n Perceval House\n </a>\n , 14-16 Uxbridge Road\n </td>\n <td>\n 21.44\n </td>\n <td>\n 342,494\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.513_N_0.3089_W_region:GB_type:city&amp;title=Ealing\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′47″N\n </span>\n <span class=\"longitude\">\n 0°18′32″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5130°N 0.3089°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5130; -0.3089\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Ealing\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 13\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Enfield\" title=\"London Borough of Enfield\">\n Enfield\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Enfield_London_Borough_Council\" title=\"Enfield London Borough Council\">\n Enfield London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Enfield_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Enfield Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , Silver Street\n </td>\n <td>\n 31.74\n </td>\n <td>\n 320,524\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6538_N_0.0799_W_region:GB_type:city&amp;title=Enfield\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°39′14″N\n </span>\n <span class=\"longitude\">\n 0°04′48″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.6538°N 0.0799°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.6538; -0.0799\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Enfield\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 30\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/Royal_Borough_of_Greenwich\" title=\"Royal Borough of Greenwich\">\n Greenwich\n </a>\n <sup class=\"reference\" id=\"cite_ref-3\">\n <a href=\"#cite_note-3\">\n [note 2]\n </a>\n </sup>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n <sup class=\"reference\" id=\"cite_ref-note2_4-0\">\n <a href=\"#cite_note-note2-4\">\n [note 3]\n </a>\n </sup>\n </td>\n <td>\n <a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">\n Royal\n </a>\n </td>\n <td>\n <a href=\"/wiki/Greenwich_London_Borough_Council\" title=\"Greenwich London Borough Council\">\n Greenwich London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a href=\"/wiki/Woolwich_Town_Hall\" title=\"Woolwich Town Hall\">\n Woolwich Town Hall\n </a>\n , Wellington Street\n </td>\n <td>\n 18.28\n </td>\n <td>\n 264,008\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4892_N_0.0648_E_region:GB_type:city&amp;title=Greenwich\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°29′21″N\n </span>\n <span class=\"longitude\">\n 0°03′53″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4892°N 0.0648°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4892; 0.0648\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Greenwich\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 22\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Hackney\" title=\"London Borough of Hackney\">\n Hackney\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Hackney_London_Borough_Council\" title=\"Hackney London Borough Council\">\n Hackney London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Hackney_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Hackney Town Hall (page does not exist)\">\n Hackney Town Hall\n </a>\n , Mare Street\n </td>\n <td>\n 7.36\n </td>\n <td>\n 257,379\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.545_N_0.0553_W_region:GB_type:city&amp;title=Hackney\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°32′42″N\n </span>\n <span class=\"longitude\">\n 0°03′19″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5450°N 0.0553°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5450; -0.0553\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Hackney\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 9\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Hammersmith_and_Fulham\" title=\"London Borough of Hammersmith and Fulham\">\n Hammersmith and Fulham\n </a>\n <sup class=\"reference\" id=\"cite_ref-5\">\n <a href=\"#cite_note-5\">\n [note 4]\n </a>\n </sup>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Hammersmith_and_Fulham_London_Borough_Council\" title=\"Hammersmith and Fulham London Borough Council\">\n Hammersmith and Fulham London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Hammersmith_and_Fulham_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Hammersmith and Fulham Town Hall (page does not exist)\">\n Town Hall\n </a>\n , King Street\n </td>\n <td>\n 6.33\n </td>\n <td>\n 178,685\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4927_N_0.2339_W_region:GB_type:city&amp;title=Hammersmith+and+Fulham\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°29′34″N\n </span>\n <span class=\"longitude\">\n 0°14′02″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4927°N 0.2339°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4927; -0.2339\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Hammersmith and Fulham\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 4\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Haringey\" title=\"London Borough of Haringey\">\n Haringey\n </a>\n </td>\n <td>\n <sup class=\"reference\" id=\"cite_ref-note2_4-1\">\n <a href=\"#cite_note-note2-4\">\n [note 3]\n </a>\n </sup>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Haringey_London_Borough_Council\" title=\"Haringey London Borough Council\">\n Haringey London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Haringey_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Haringey Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , High Road\n </td>\n <td>\n 11.42\n </td>\n <td>\n 263,386\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6_N_0.1119_W_region:GB_type:city&amp;title=Haringey\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°36′00″N\n </span>\n <span class=\"longitude\">\n 0°06′43″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.6000°N 0.1119°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.6000; -0.1119\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Haringey\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 29\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Harrow\" title=\"London Borough of Harrow\">\n Harrow\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Harrow_London_Borough_Council\" title=\"Harrow London Borough Council\">\n Harrow London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Harrow_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Harrow Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , Station Road\n </td>\n <td>\n 19.49\n </td>\n <td>\n 243,372\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5898_N_0.3346_W_region:GB_type:city&amp;title=Harrow\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°35′23″N\n </span>\n <span class=\"longitude\">\n 0°20′05″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5898°N 0.3346°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5898; -0.3346\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Harrow\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 32\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Havering\" title=\"London Borough of Havering\">\n Havering\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Havering_London_Borough_Council\" title=\"Havering London Borough Council\">\n Havering London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n (council\n <a href=\"/wiki/No_overall_control\" title=\"No overall control\">\n NOC\n </a>\n )\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Havering_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Havering Town Hall (page does not exist)\">\n Town Hall\n </a>\n , Main Road\n </td>\n <td>\n 43.35\n </td>\n <td>\n 242,080\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5812_N_0.1837_E_region:GB_type:city&amp;title=Havering\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°34′52″N\n </span>\n <span class=\"longitude\">\n 0°11′01″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5812°N 0.1837°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5812; 0.1837\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Havering\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 24\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Hillingdon\" title=\"London Borough of Hillingdon\">\n Hillingdon\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Hillingdon_London_Borough_Council\" title=\"Hillingdon London Borough Council\">\n Hillingdon London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a href=\"/wiki/Hillingdon_Civic_Centre\" title=\"Hillingdon Civic Centre\">\n Civic Centre\n </a>\n , High Street\n </td>\n <td>\n 44.67\n </td>\n <td>\n 286,806\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5441_N_0.476_W_region:GB_type:city&amp;title=Hillingdon\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°32′39″N\n </span>\n <span class=\"longitude\">\n 0°28′34″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5441°N 0.4760°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5441; -0.4760\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Hillingdon\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 33\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Hounslow\" title=\"London Borough of Hounslow\">\n Hounslow\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Hounslow_London_Borough_Council\" title=\"Hounslow London Borough Council\">\n Hounslow London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Hounslow_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Hounslow Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , Lampton Road\n </td>\n <td>\n 21.61\n </td>\n <td>\n 262,407\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4746_N_0.368_W_region:GB_type:city&amp;title=Hounslow\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°28′29″N\n </span>\n <span class=\"longitude\">\n 0°22′05″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4746°N 0.3680°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4746; -0.3680\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Hounslow\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 14\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Islington\" title=\"London Borough of Islington\">\n Islington\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Islington_London_Borough_Council\" title=\"Islington London Borough Council\">\n Islington London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Islington_Municipal_Offices&amp;action=edit&amp;redlink=1\" title=\"Islington Municipal Offices (page does not exist)\">\n Municipal Offices\n </a>\n , 222 Upper Street\n </td>\n <td>\n 5.74\n </td>\n <td>\n 215,667\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5416_N_0.1022_W_region:GB_type:city&amp;title=Islington\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°32′30″N\n </span>\n <span class=\"longitude\">\n 0°06′08″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5416°N 0.1022°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5416; -0.1022\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Islington\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 10\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/Royal_Borough_of_Kensington_and_Chelsea\" title=\"Royal Borough of Kensington and Chelsea\">\n Kensington and Chelsea\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n <a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">\n Royal\n </a>\n </td>\n <td>\n <a href=\"/wiki/Kensington_and_Chelsea_London_Borough_Council\" title=\"Kensington and Chelsea London Borough Council\">\n Kensington and Chelsea London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a href=\"/wiki/Kensington_Town_Hall,_London\" title=\"Kensington Town Hall, London\">\n The Town Hall\n </a>\n ,\n <a href=\"/wiki/Hornton_Street\" title=\"Hornton Street\">\n Hornton Street\n </a>\n </td>\n <td>\n 4.68\n </td>\n <td>\n 155,594\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.502_N_0.1947_W_region:GB_type:city&amp;title=Kensington+and+Chelsea\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′07″N\n </span>\n <span class=\"longitude\">\n 0°11′41″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5020°N 0.1947°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5020; -0.1947\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Kensington and Chelsea\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 3\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/Royal_Borough_of_Kingston_upon_Thames\" title=\"Royal Borough of Kingston upon Thames\">\n Kingston upon Thames\n </a>\n </td>\n <td>\n </td>\n <td>\n <a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">\n Royal\n </a>\n </td>\n <td>\n <a href=\"/wiki/Kingston_upon_Thames_London_Borough_Council\" title=\"Kingston upon Thames London Borough Council\">\n Kingston upon Thames London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">\n Liberal Democrat\n </a>\n </td>\n <td>\n <a href=\"/wiki/Kingston_upon_Thames_Guildhall\" title=\"Kingston upon Thames Guildhall\">\n Guildhall\n </a>\n , High Street\n </td>\n <td>\n 14.38\n </td>\n <td>\n 166,793\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4085_N_0.3064_W_region:GB_type:city&amp;title=Kingston+upon+Thames\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°24′31″N\n </span>\n <span class=\"longitude\">\n 0°18′23″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4085°N 0.3064°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4085; -0.3064\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Kingston upon Thames\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 16\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Lambeth\" title=\"London Borough of Lambeth\">\n Lambeth\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Lambeth_London_Borough_Council\" title=\"Lambeth London Borough Council\">\n Lambeth London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a href=\"/wiki/Lambeth_Town_Hall\" title=\"Lambeth Town Hall\">\n Lambeth Town Hall\n </a>\n , Brixton Hill\n </td>\n <td>\n 10.36\n </td>\n <td>\n 314,242\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4607_N_0.1163_W_region:GB_type:city&amp;title=Lambeth\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°27′39″N\n </span>\n <span class=\"longitude\">\n 0°06′59″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4607°N 0.1163°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4607; -0.1163\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Lambeth\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 6\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Lewisham\" title=\"London Borough of Lewisham\">\n Lewisham\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Lewisham_London_Borough_Council\" title=\"Lewisham London Borough Council\">\n Lewisham London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Lewisham_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Lewisham Town Hall (page does not exist)\">\n Town Hall\n </a>\n , 1 Catford Road\n </td>\n <td>\n 13.57\n </td>\n <td>\n 286,180\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4452_N_0.0209_W_region:GB_type:city&amp;title=Lewisham\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°26′43″N\n </span>\n <span class=\"longitude\">\n 0°01′15″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4452°N 0.0209°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4452; -0.0209\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Lewisham\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 21\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Merton\" title=\"London Borough of Merton\">\n Merton\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Merton_London_Borough_Council\" title=\"Merton London Borough Council\">\n Merton London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Merton_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Merton Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , London Road\n </td>\n <td>\n 14.52\n </td>\n <td>\n 203,223\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4014_N_0.1958_W_region:GB_type:city&amp;title=Merton\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°24′05″N\n </span>\n <span class=\"longitude\">\n 0°11′45″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4014°N 0.1958°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4014; -0.1958\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Merton\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 17\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Newham\" title=\"London Borough of Newham\">\n Newham\n </a>\n </td>\n <td>\n <sup class=\"reference\" id=\"cite_ref-note2_4-2\">\n <a href=\"#cite_note-note2-4\">\n [note 3]\n </a>\n </sup>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Newham_London_Borough_Council\" title=\"Newham London Borough Council\">\n Newham London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Newham_Dockside&amp;action=edit&amp;redlink=1\" title=\"Newham Dockside (page does not exist)\">\n Newham Dockside\n </a>\n , 1000 Dockside Road\n </td>\n <td>\n 13.98\n </td>\n <td>\n 318,227\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5077_N_0.0469_E_region:GB_type:city&amp;title=Newham\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′28″N\n </span>\n <span class=\"longitude\">\n 0°02′49″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5077°N 0.0469°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5077; 0.0469\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Newham\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 27\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Redbridge\" title=\"London Borough of Redbridge\">\n Redbridge\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Redbridge_London_Borough_Council\" title=\"Redbridge London Borough Council\">\n Redbridge London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Redbridge_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Redbridge Town Hall (page does not exist)\">\n Town Hall\n </a>\n , 128-142 High Road\n </td>\n <td>\n 21.78\n </td>\n <td>\n 288,272\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.559_N_0.0741_E_region:GB_type:city&amp;title=Redbridge\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°33′32″N\n </span>\n <span class=\"longitude\">\n 0°04′27″E\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5590°N 0.0741°E\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5590; 0.0741\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Redbridge\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 26\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Richmond_upon_Thames\" title=\"London Borough of Richmond upon Thames\">\n Richmond upon Thames\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Richmond_upon_Thames_London_Borough_Council\" title=\"Richmond upon Thames London Borough Council\">\n Richmond upon Thames London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">\n Liberal Democrat\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Richmond_upon_Thames_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Richmond upon Thames Civic Centre (page does not exist)\">\n Civic Centre\n </a>\n , 44 York Street\n </td>\n <td>\n 22.17\n </td>\n <td>\n 191,365\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4479_N_0.326_W_region:GB_type:city&amp;title=Richmond+upon+Thames\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°26′52″N\n </span>\n <span class=\"longitude\">\n 0°19′34″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4479°N 0.3260°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4479; -0.3260\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Richmond upon Thames\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 15\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Southwark\" title=\"London Borough of Southwark\">\n Southwark\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Southwark_London_Borough_Council\" title=\"Southwark London Borough Council\">\n Southwark London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=160_Tooley_Street&amp;action=edit&amp;redlink=1\" title=\"160 Tooley Street (page does not exist)\">\n 160 Tooley Street\n </a>\n </td>\n <td>\n 11.14\n </td>\n <td>\n 298,464\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5035_N_0.0804_W_region:GB_type:city&amp;title=Southwark\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′13″N\n </span>\n <span class=\"longitude\">\n 0°04′49″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5035°N 0.0804°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5035; -0.0804\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Southwark\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 7\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Sutton\" title=\"London Borough of Sutton\">\n Sutton\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Sutton_London_Borough_Council\" title=\"Sutton London Borough Council\">\n Sutton London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">\n Liberal Democrat\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Sutton_Civic_Offices&amp;action=edit&amp;redlink=1\" title=\"Sutton Civic Offices (page does not exist)\">\n Civic Offices\n </a>\n , St Nicholas Way\n </td>\n <td>\n 16.93\n </td>\n <td>\n 195,914\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.3618_N_0.1945_W_region:GB_type:city&amp;title=Sutton\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°21′42″N\n </span>\n <span class=\"longitude\">\n 0°11′40″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.3618°N 0.1945°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.3618; -0.1945\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Sutton\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 18\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Tower_Hamlets\" title=\"London Borough of Tower Hamlets\">\n Tower Hamlets\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Tower_Hamlets_London_Borough_Council\" title=\"Tower Hamlets London Borough Council\">\n Tower Hamlets London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Tower_Hamlets_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Tower Hamlets Town Hall (page does not exist)\">\n Town Hall\n </a>\n , Mulberry Place, 5 Clove Crescent\n </td>\n <td>\n 7.63\n </td>\n <td>\n 272,890\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5099_N_0.0059_W_region:GB_type:city&amp;title=Tower+Hamlets\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′36″N\n </span>\n <span class=\"longitude\">\n 0°00′21″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5099°N 0.0059°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5099; -0.0059\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Tower Hamlets\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 8\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Waltham_Forest\" title=\"London Borough of Waltham Forest\">\n Waltham Forest\n </a>\n </td>\n <td>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Waltham_Forest_London_Borough_Council\" title=\"Waltham Forest London Borough Council\">\n Waltham Forest London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">\n Labour\n </a>\n </td>\n <td>\n <a href=\"/wiki/Waltham_Forest_Town_Hall\" title=\"Waltham Forest Town Hall\">\n Waltham Forest Town Hall\n </a>\n , Forest Road\n </td>\n <td>\n 14.99\n </td>\n <td>\n 265,797\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5908_N_0.0134_W_region:GB_type:city&amp;title=Waltham+Forest\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°35′27″N\n </span>\n <span class=\"longitude\">\n 0°00′48″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5908°N 0.0134°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5908; -0.0134\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Waltham Forest\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 28\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/London_Borough_of_Wandsworth\" title=\"London Borough of Wandsworth\">\n Wandsworth\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n </td>\n <td>\n <a href=\"/wiki/Wandsworth_London_Borough_Council\" title=\"Wandsworth London Borough Council\">\n Wandsworth London Borough Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Wandsworth_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Wandsworth Town Hall (page does not exist)\">\n The Town Hall\n </a>\n ,\n <a href=\"/wiki/Wandsworth_High_Street\" title=\"Wandsworth High Street\">\n Wandsworth High Street\n </a>\n </td>\n <td>\n 13.23\n </td>\n <td>\n 310,516\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4567_N_0.191_W_region:GB_type:city&amp;title=Wandsworth\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°27′24″N\n </span>\n <span class=\"longitude\">\n 0°11′28″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4567°N 0.1910°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4567; -0.1910\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Wandsworth\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 5\n </td>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/City_of_Westminster\" title=\"City of Westminster\">\n Westminster\n </a>\n </td>\n <td>\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n </td>\n <td>\n <a href=\"/wiki/City_status_in_the_United_Kingdom\" title=\"City status in the United Kingdom\">\n City\n </a>\n </td>\n <td>\n <a href=\"/wiki/Westminster_City_Council\" title=\"Westminster City Council\">\n Westminster City Council\n </a>\n </td>\n <td>\n <a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">\n Conservative\n </a>\n </td>\n <td>\n <a class=\"new\" href=\"/w/index.php?title=Westminster_City_Hall&amp;action=edit&amp;redlink=1\" title=\"Westminster City Hall (page does not exist)\">\n Westminster City Hall\n </a>\n , 64 Victoria Street\n </td>\n <td>\n 8.29\n </td>\n <td>\n 226,841\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4973_N_0.1372_W_region:GB_type:city&amp;title=Westminster\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°29′50″N\n </span>\n <span class=\"longitude\">\n 0°08′14″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.4973°N 0.1372°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.4973; -0.1372\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n Westminster\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 2\n </td>\n </tr>\n </tbody>\n </table>\n <h2>\n <span class=\"mw-headline\" id=\"City_of_London\">\n City of London\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=2\" title=\"Edit section: City of London\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <p>\n The\n <a href=\"/wiki/City_of_London\" title=\"City of London\">\n City of London\n </a>\n is the 33rd principal division of Greater London but it is not a London borough.\n </p>\n <table class=\"wikitable sortable\" style=\"font-size:95%\" width=\"100%\">\n <tbody>\n <tr>\n <th width=\"100px\">\n <i>\n ‘Borough’\n </i>\n </th>\n <th>\n Inner\n </th>\n <th width=\"100px\">\n Status\n </th>\n <th>\n Local authority\n </th>\n <th>\n Political control\n </th>\n <th width=\"120px\">\n Headquarters\n </th>\n <th>\n Area\n <br/>\n (sq mi)\n </th>\n <th>\n Population\n <br/>\n (2011 est)\n </th>\n <th width=\"20px\">\n Co-ordinates\n </th>\n <th>\n <span style=\"background:#67BCD3\">\n Nr. in\n <br/>\n map\n </span>\n </th>\n </tr>\n <tr>\n <td>\n <a href=\"/wiki/City_of_London\" title=\"City of London\">\n City of London\n </a>\n </td>\n <td>\n (\n <img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/>\n <span style=\"display:none\">\n Y\n </span>\n )\n <br/>\n <sup class=\"reference\" id=\"cite_ref-6\">\n <a href=\"#cite_note-6\">\n [note 5]\n </a>\n </sup>\n </td>\n <td>\n <i>\n <a href=\"/wiki/Sui_generis\" title=\"Sui generis\">\n Sui generis\n </a>\n </i>\n ;\n <br/>\n <a href=\"/wiki/City_status_in_the_United_Kingdom\" title=\"City status in the United Kingdom\">\n City\n </a>\n ;\n <br/>\n <a href=\"/wiki/Ceremonial_counties_of_England\" title=\"Ceremonial counties of England\">\n Ceremonial county\n </a>\n </td>\n <td>\n <a class=\"mw-redirect\" href=\"/wiki/Corporation_of_London\" title=\"Corporation of London\">\n Corporation of London\n </a>\n ;\n <br/>\n <a href=\"/wiki/Inner_Temple\" title=\"Inner Temple\">\n Inner Temple\n </a>\n ;\n <br/>\n <a href=\"/wiki/Middle_Temple\" title=\"Middle Temple\">\n Middle Temple\n </a>\n </td>\n <td>\n ?\n </td>\n <td>\n <a href=\"/wiki/Guildhall,_London\" title=\"Guildhall, London\">\n Guildhall\n </a>\n </td>\n <td>\n 1.12\n </td>\n <td>\n 7,000\n </td>\n <td>\n <span class=\"plainlinks nourlexpansion\">\n <a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5155_N_0.0922_W_region:GB_type:city&amp;title=City+of+London\">\n <span class=\"geo-nondefault\">\n <span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\">\n <span class=\"latitude\">\n 51°30′56″N\n </span>\n <span class=\"longitude\">\n 0°05′32″W\n </span>\n </span>\n </span>\n <span class=\"geo-multi-punct\">\n  / \n </span>\n <span class=\"geo-default\">\n <span class=\"vcard\">\n <span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">\n 51.5155°N 0.0922°W\n </span>\n <span style=\"display:none\">\n  /\n <span class=\"geo\">\n 51.5155; -0.0922\n </span>\n </span>\n <span style=\"display:none\">\n  (\n <span class=\"fn org\">\n City of London\n </span>\n )\n </span>\n </span>\n </span>\n </a>\n </span>\n </td>\n <td>\n 1\n </td>\n </tr>\n </tbody>\n </table>\n <table class=\"noprint infobox\" id=\"GeoGroup\" style=\"width: 23em; font-size: 88%; line-height: 1.5em\">\n <tbody>\n <tr>\n <td>\n <b>\n Map all coordinates using:\n </b>\n <a class=\"external text\" href=\"//tools.wmflabs.org/osm4wiki/cgi-bin/wiki/wiki-osm.pl?project=en&amp;article=List_of_London_boroughs\">\n OpenStreetMap\n </a>\n </td>\n </tr>\n <tr>\n <td>\n <b>\n Download coordinates as:\n </b>\n <a class=\"external text\" href=\"//tools.wmflabs.org/kmlexport?article=List_of_London_boroughs\">\n KML\n </a>\n <b>\n ·\n </b>\n <a class=\"external text\" href=\"http://tripgang.com/kml2gpx/http%3A%2F%2Ftools.wmflabs.org%2Fkmlexport%3Farticle%3DList_of_London_boroughs?gpx=1\" rel=\"nofollow\">\n GPX\n </a>\n </td>\n </tr>\n </tbody>\n </table>\n <h2>\n <span class=\"mw-headline\" id=\"See_also\">\n See also\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=3\" title=\"Edit section: See also\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <table class=\"metadata mbox-small\" role=\"presentation\" style=\"background-color:#f9f9f9;border:1px solid #aaa;color:#000\">\n <tbody>\n <tr>\n <td class=\"mbox-image\">\n <img alt=\"icon\" data-file-height=\"48\" data-file-width=\"48\" decoding=\"async\" height=\"30\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Office-book.svg/30px-Office-book.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Office-book.svg/45px-Office-book.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Office-book.svg/60px-Office-book.svg.png 2x\" width=\"30\"/>\n </td>\n <td class=\"mbox-text plainlist\">\n <ul style=\"font-weight: bold\">\n <li>\n <a href=\"/wiki/Book:London\" title=\"Book:London\">\n Book: London\n </a>\n </li>\n </ul>\n </td>\n </tr>\n </tbody>\n </table>\n <ul>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Political_make-up_of_London_borough_councils\" title=\"Political make-up of London borough councils\">\n Political make-up of London borough councils\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_areas_of_London\" title=\"List of areas of London\">\n List of areas of London\n </a>\n </li>\n <li>\n <a href=\"/wiki/Subdivisions_of_England\" title=\"Subdivisions of England\">\n Subdivisions of England\n </a>\n </li>\n </ul>\n <h2>\n <span class=\"mw-headline\" id=\"Notes\">\n Notes\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=4\" title=\"Edit section: Notes\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <div class=\"reflist\" style=\"list-style-type: decimal;\">\n <div class=\"mw-references-wrap\">\n <ol class=\"references\">\n <li id=\"cite_note-2\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-2\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Renamed from London Borough of Barking 1 January 1980.\n <cite class=\"citation magazine\" id=\"CITEREFGazette48021\">\n <a class=\"external text\" href=\"https://www.thegazette.co.uk/London/issue/48021/page/15280\" rel=\"nofollow\">\n \"No. 48021\"\n </a>\n .\n <i>\n <a href=\"/wiki/The_London_Gazette\" title=\"The London Gazette\">\n The London Gazette\n </a>\n </i>\n . 4 December 1979. p. 15280.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+London+Gazette&amp;rft.atitle=No.+48021&amp;rft.pages=15280&amp;rft.date=1979-12-04&amp;rft_id=https%3A%2F%2Fwww.thegazette.co.uk%2FLondon%2Fissue%2F48021%2Fpage%2F15280&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+London+boroughs\"/>\n <style data-mw-deduplicate=\"TemplateStyles:r886058088\">\n .mw-parser-output cite.citation{font-style:inherit}.mw-parser-output .citation q{quotes:\"\\\"\"\"\\\"\"\"'\"\"'\"}.mw-parser-output .citation .cs1-lock-free a{background:url(\"//upload.wikimedia.org/wikipedia/commons/thumb/6/65/Lock-green.svg/9px-Lock-green.svg.png\")no-repeat;background-position:right .1em center}.mw-parser-output .citation .cs1-lock-limited a,.mw-parser-output .citation .cs1-lock-registration a{background:url(\"//upload.wikimedia.org/wikipedia/commons/thumb/d/d6/Lock-gray-alt-2.svg/9px-Lock-gray-alt-2.svg.png\")no-repeat;background-position:right .1em center}.mw-parser-output .citation .cs1-lock-subscription a{background:url(\"//upload.wikimedia.org/wikipedia/commons/thumb/a/aa/Lock-red-alt-2.svg/9px-Lock-red-alt-2.svg.png\")no-repeat;background-position:right .1em center}.mw-parser-output .cs1-subscription,.mw-parser-output .cs1-registration{color:#555}.mw-parser-output .cs1-subscription span,.mw-parser-output .cs1-registration span{border-bottom:1px dotted;cursor:help}.mw-parser-output .cs1-ws-icon a{background:url(\"//upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/12px-Wikisource-logo.svg.png\")no-repeat;background-position:right .1em center}.mw-parser-output code.cs1-code{color:inherit;background:inherit;border:inherit;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;font-size:100%}.mw-parser-output .cs1-visible-error{font-size:100%}.mw-parser-output .cs1-maint{display:none;color:#33aa33;margin-left:0.3em}.mw-parser-output .cs1-subscription,.mw-parser-output .cs1-registration,.mw-parser-output .cs1-format{font-size:95%}.mw-parser-output .cs1-kern-left,.mw-parser-output .cs1-kern-wl-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right,.mw-parser-output .cs1-kern-wl-right{padding-right:0.2em}\n </style>\n </span>\n </li>\n <li id=\"cite_note-3\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-3\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Royal borough from 2012\n </span>\n </li>\n <li id=\"cite_note-note2-4\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-note2_4-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-note2_4-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-note2_4-2\">\n <sup>\n <i>\n <b>\n c\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n Haringey and Newham are Inner London for statistics; Greenwich is Outer London for statistics\n </span>\n </li>\n <li id=\"cite_note-5\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-5\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Renamed from London Borough of Hammersmith 1 April 1979.\n <cite class=\"citation magazine\" id=\"CITEREFGazette47771\">\n <a class=\"external text\" href=\"https://www.thegazette.co.uk/London/issue/47771/page/2095\" rel=\"nofollow\">\n \"No. 47771\"\n </a>\n .\n <i>\n <a href=\"/wiki/The_London_Gazette\" title=\"The London Gazette\">\n The London Gazette\n </a>\n </i>\n . 13 February 1979. p. 2095.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+London+Gazette&amp;rft.atitle=No.+47771&amp;rft.pages=2095&amp;rft.date=1979-02-13&amp;rft_id=https%3A%2F%2Fwww.thegazette.co.uk%2FLondon%2Fissue%2F47771%2Fpage%2F2095&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+London+boroughs\"/>\n <link href=\"mw-data:TemplateStyles:r886058088\" rel=\"mw-deduplicated-inline-style\"/>\n </span>\n </li>\n <li id=\"cite_note-6\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-6\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n The City of London was not part of the\n <a href=\"/wiki/County_of_London\" title=\"County of London\">\n County of London\n </a>\n and is not a London Borough but can be counted to\n <a href=\"/wiki/Inner_London\" title=\"Inner London\">\n Inner London\n </a>\n .\n </span>\n </li>\n </ol>\n </div>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"References\">\n References\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=5\" title=\"Edit section: References\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <div class=\"reflist\" style=\"list-style-type: decimal;\">\n <div class=\"mw-references-wrap\">\n <ol class=\"references\">\n <li id=\"cite_note-1\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-1\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <cite class=\"citation web\">\n ONS (2 July 2010).\n <a class=\"external text\" href=\"https://webarchive.nationalarchives.gov.uk/20160107070948/http://www.ons.gov.uk/ons/publications/re-reference-tables.html\" rel=\"nofollow\">\n \"Release Edition Reference Tables\"\n </a>\n .\n <i>\n Webarchive.nationalarchives.gov.uk\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 5 February\n </span>\n 2019\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Webarchive.nationalarchives.gov.uk&amp;rft.atitle=Release+Edition+Reference+Tables&amp;rft.date=2010-07-02&amp;rft.au=ONS&amp;rft_id=https%3A%2F%2Fwebarchive.nationalarchives.gov.uk%2F20160107070948%2Fhttp%3A%2F%2Fwww.ons.gov.uk%2Fons%2Fpublications%2Fre-reference-tables.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+London+boroughs\"/>\n <link href=\"mw-data:TemplateStyles:r886058088\" rel=\"mw-deduplicated-inline-style\"/>\n </span>\n </li>\n </ol>\n </div>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"External_links\">\n External links\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit&amp;section=6\" title=\"Edit section: External links\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <ul>\n <li>\n <a class=\"external text\" href=\"https://web.archive.org/web/20101010011530/http://londoncouncils.gov.uk/londonlocalgovernment/londonboroughs.htm\" rel=\"nofollow\">\n London Councils: List of inner/outer London boroughs\n </a>\n </li>\n <li>\n <a class=\"external text\" href=\"http://londonboroughsmap.co.uk/\" rel=\"nofollow\">\n London Boroughs Map\n </a>\n </li>\n </ul>\n <div aria-labelledby=\"Governance_of_Greater_London\" class=\"navbox\" role=\"navigation\" style=\"padding:3px\">\n <table class=\"nowraplinks hlist collapsible collapsed navbox-inner\" style=\"border-spacing:0;background:transparent;color:inherit\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <div class=\"plainlinks hlist navbar mini\">\n <ul>\n <li class=\"nv-view\">\n <a href=\"/wiki/Template:Governance_of_Greater_London\" title=\"Template:Governance of Greater London\">\n <abbr style=\";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;\" title=\"View this template\">\n v\n </abbr>\n </a>\n </li>\n <li class=\"nv-talk\">\n <a href=\"/wiki/Template_talk:Governance_of_Greater_London\" title=\"Template talk:Governance of Greater London\">\n <abbr style=\";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;\" title=\"Discuss this template\">\n t\n </abbr>\n </a>\n </li>\n <li class=\"nv-edit\">\n <a class=\"external text\" href=\"//en.wikipedia.org/w/index.php?title=Template:Governance_of_Greater_London&amp;action=edit\">\n <abbr style=\";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;\" title=\"Edit this template\">\n e\n </abbr>\n </a>\n </li>\n </ul>\n </div>\n <div id=\"Governance_of_Greater_London\" style=\"font-size:114%;margin:0 4em\">\n Governance of\n <a href=\"/wiki/Greater_London\" title=\"Greater London\">\n Greater London\n </a>\n </div>\n </th>\n </tr>\n <tr>\n <td class=\"navbox-abovebelow\" colspan=\"2\">\n <div id=\"*_City_of_London&amp;#10;*_London\">\n <ul>\n <li>\n <a href=\"/wiki/City_of_London\" title=\"City of London\">\n City of London\n </a>\n </li>\n <li>\n <a href=\"/wiki/London\" title=\"London\">\n London\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Regional\n </th>\n <td class=\"navbox-list navbox-odd\" style=\"text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px\">\n <div style=\"padding:0em 0.25em\">\n <ul>\n <li>\n <b>\n <a href=\"/wiki/Greater_London_Authority\" title=\"Greater London Authority\">\n Greater London Authority\n </a>\n :\n </b>\n <a href=\"/wiki/London_Assembly\" title=\"London Assembly\">\n London Assembly\n </a>\n </li>\n <li>\n <a href=\"/wiki/Mayor_of_London\" title=\"Mayor of London\">\n Mayor of London\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/London_boroughs\" title=\"London boroughs\">\n Boroughs\n </a>\n </th>\n <td class=\"navbox-list navbox-even\" style=\"text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px\">\n <div style=\"padding:0em 0.25em\">\n <ul>\n <li>\n <b>\n <a href=\"/wiki/London_Councils\" title=\"London Councils\">\n London Councils\n </a>\n :\n </b>\n <a href=\"/wiki/London_Borough_of_Barking_and_Dagenham\" title=\"London Borough of Barking and Dagenham\">\n Barking and Dagenham\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Barnet\" title=\"London Borough of Barnet\">\n Barnet\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Bexley\" title=\"London Borough of Bexley\">\n Bexley\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Brent\" title=\"London Borough of Brent\">\n Brent\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Bromley\" title=\"London Borough of Bromley\">\n Bromley\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Camden\" title=\"London Borough of Camden\">\n Camden\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Croydon\" title=\"London Borough of Croydon\">\n Croydon\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Ealing\" title=\"London Borough of Ealing\">\n Ealing\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Enfield\" title=\"London Borough of Enfield\">\n Enfield\n </a>\n </li>\n <li>\n <a href=\"/wiki/Royal_Borough_of_Greenwich\" title=\"Royal Borough of Greenwich\">\n Greenwich\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Hackney\" title=\"London Borough of Hackney\">\n Hackney\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Hammersmith_and_Fulham\" title=\"London Borough of Hammersmith and Fulham\">\n Hammersmith and Fulham\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Haringey\" title=\"London Borough of Haringey\">\n Haringey\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Harrow\" title=\"London Borough of Harrow\">\n Harrow\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Havering\" title=\"London Borough of Havering\">\n Havering\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Hillingdon\" title=\"London Borough of Hillingdon\">\n Hillingdon\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Hounslow\" title=\"London Borough of Hounslow\">\n Hounslow\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Islington\" title=\"London Borough of Islington\">\n Islington\n </a>\n </li>\n <li>\n <a href=\"/wiki/Royal_Borough_of_Kensington_and_Chelsea\" title=\"Royal Borough of Kensington and Chelsea\">\n Kensington and Chelsea\n </a>\n </li>\n <li>\n <a href=\"/wiki/Royal_Borough_of_Kingston_upon_Thames\" title=\"Royal Borough of Kingston upon Thames\">\n Kingston upon Thames\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Lambeth\" title=\"London Borough of Lambeth\">\n Lambeth\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Lewisham\" title=\"London Borough of Lewisham\">\n Lewisham\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Merton\" title=\"London Borough of Merton\">\n Merton\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Newham\" title=\"London Borough of Newham\">\n Newham\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Redbridge\" title=\"London Borough of Redbridge\">\n Redbridge\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Richmond_upon_Thames\" title=\"London Borough of Richmond upon Thames\">\n Richmond upon Thames\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Southwark\" title=\"London Borough of Southwark\">\n Southwark\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Sutton\" title=\"London Borough of Sutton\">\n Sutton\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Tower_Hamlets\" title=\"London Borough of Tower Hamlets\">\n Tower Hamlets\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Waltham_Forest\" title=\"London Borough of Waltham Forest\">\n Waltham Forest\n </a>\n </li>\n <li>\n <a href=\"/wiki/London_Borough_of_Wandsworth\" title=\"London Borough of Wandsworth\">\n Wandsworth\n </a>\n </li>\n <li>\n <a href=\"/wiki/City_of_Westminster\" title=\"City of Westminster\">\n Westminster\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Ceremonial\n </th>\n <td class=\"navbox-list navbox-odd\" style=\"text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px\">\n <div style=\"padding:0em 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Lord_Mayor_of_London\" title=\"Lord Mayor of London\">\n Lord Mayor of the City of London\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Lord_Lieutenant_of_Greater_London\" title=\"Lord Lieutenant of Greater London\">\n Lord Lieutenant of Greater London\n </a>\n </li>\n <li>\n <a href=\"/wiki/High_Sheriff_of_Greater_London\" title=\"High Sheriff of Greater London\">\n High Sheriff of Greater London\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/History_of_local_government_in_London\" title=\"History of local government in London\">\n Historical\n </a>\n </th>\n <td class=\"navbox-list navbox-even\" style=\"text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px\">\n <div style=\"padding:0em 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Metropolitan_Board_of_Works\" title=\"Metropolitan Board of Works\">\n Metropolitan Board of Works\n </a>\n <span style=\"font-size:85%;\">\n (MBW) 1855–1889\n </span>\n </li>\n <li>\n <a href=\"/wiki/London_County_Council\" title=\"London County Council\">\n London County Council\n </a>\n <span style=\"font-size:85%;\">\n (LCC) 1889–1965\n </span>\n </li>\n <li>\n <a href=\"/wiki/Greater_London_Council\" title=\"Greater London Council\">\n Greater London Council\n </a>\n <span style=\"font-size:85%;\">\n (GLC) 1965–1986\n </span>\n </li>\n <li>\n <a href=\"/wiki/List_of_heads_of_London_government\" title=\"List of heads of London government\">\n Leaders\n </a>\n </li>\n <li>\n <a href=\"/wiki/Sheriffs_of_the_City_of_London\" title=\"Sheriffs of the City of London\">\n Sheriffs of the City of London\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n </div>\n <!-- \nNewPP limit report\nParsed by mw1269\nCached time: 20190428014242\nCache expiry: 2592000\nDynamic content: false\nCPU time usage: 0.324 seconds\nReal time usage: 0.395 seconds\nPreprocessor visited node count: 5142/1000000\nPreprocessor generated node count: 0/1500000\nPost‐expand include size: 79976/2097152 bytes\nTemplate argument size: 991/2097152 bytes\nHighest expansion depth: 12/40\nExpensive parser function count: 2/500\nUnstrip recursion depth: 1/20\nUnstrip post‐expand size: 10101/5000000 bytes\nNumber of Wikibase entities loaded: 0/400\nLua time usage: 0.111/10.000 seconds\nLua memory usage: 2.75 MB/50 MB\n-->\n <!--\nTransclusion expansion time report (%,ms,calls,template)\n100.00% 279.718 1 -total\n 41.33% 115.594 2 Template:Reflist\n 32.47% 90.827 2 Template:London_Gazette\n 30.02% 83.982 2 Template:Cite_magazine\n 20.77% 58.105 33 Template:Coord\n 9.59% 26.818 33 Template:English_district_control\n 9.47% 26.499 1 Template:Use_dmy_dates\n 5.78% 16.180 1 Template:London\n 4.97% 13.913 2 Template:DMCA\n 4.23% 11.829 2 Template:Dated_maintenance_category\n-->\n <!-- Saved in parser cache with key enwiki:pcache:idhash:28092685-0!canonical and timestamp 20190428014242 and revision id 881899861\n -->\n </div>\n <noscript>\n <img alt=\"\" height=\"1\" src=\"//en.wikipedia.org/wiki/Special:CentralAutoLogin/start?type=1x1\" style=\"border: none; position: absolute;\" title=\"\" width=\"1\"/>\n </noscript>\n </div>\n <div class=\"printfooter\">\n Retrieved from \"\n <a dir=\"ltr\" href=\"https://en.wikipedia.org/w/index.php?title=List_of_London_boroughs&amp;oldid=881899861\">\n https://en.wikipedia.org/w/index.php?title=List_of_London_boroughs&amp;oldid=881899861\n </a>\n \"\n </div>\n <div class=\"catlinks\" data-mw=\"interface\" id=\"catlinks\">\n <div class=\"mw-normal-catlinks\" id=\"mw-normal-catlinks\">\n <a href=\"/wiki/Help:Category\" title=\"Help:Category\">\n Categories\n </a>\n :\n <ul>\n <li>\n <a href=\"/wiki/Category:London_boroughs\" title=\"Category:London boroughs\">\n London boroughs\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Lists_of_places_in_London\" title=\"Category:Lists of places in London\">\n Lists of places in London\n </a>\n </li>\n </ul>\n </div>\n <div class=\"mw-hidden-catlinks mw-hidden-cats-hidden\" id=\"mw-hidden-catlinks\">\n Hidden categories:\n <ul>\n <li>\n <a href=\"/wiki/Category:Use_dmy_dates_from_August_2015\" title=\"Category:Use dmy dates from August 2015\">\n Use dmy dates from August 2015\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Use_British_English_from_August_2015\" title=\"Category:Use British English from August 2015\">\n Use British English from August 2015\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Lists_of_coordinates\" title=\"Category:Lists of coordinates\">\n Lists of coordinates\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Geographic_coordinate_lists\" title=\"Category:Geographic coordinate lists\">\n Geographic coordinate lists\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_with_Geo\" title=\"Category:Articles with Geo\">\n Articles with Geo\n </a>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"visualClear\"/>\n </div>\n </div>\n <div id=\"mw-navigation\">\n <h2>\n Navigation menu\n </h2>\n <div id=\"mw-head\">\n <div aria-labelledby=\"p-personal-label\" id=\"p-personal\" role=\"navigation\">\n <h3 id=\"p-personal-label\">\n Personal tools\n </h3>\n <ul>\n <li id=\"pt-anonuserpage\">\n Not logged in\n </li>\n <li id=\"pt-anontalk\">\n <a accesskey=\"n\" href=\"/wiki/Special:MyTalk\" title=\"Discussion about edits from this IP address [n]\">\n Talk\n </a>\n </li>\n <li id=\"pt-anoncontribs\">\n <a accesskey=\"y\" href=\"/wiki/Special:MyContributions\" title=\"A list of edits made from this IP address [y]\">\n Contributions\n </a>\n </li>\n <li id=\"pt-createaccount\">\n <a href=\"/w/index.php?title=Special:CreateAccount&amp;returnto=List+of+London+boroughs\" title=\"You are encouraged to create an account and log in; however, it is not mandatory\">\n Create account\n </a>\n </li>\n <li id=\"pt-login\">\n <a accesskey=\"o\" href=\"/w/index.php?title=Special:UserLogin&amp;returnto=List+of+London+boroughs\" title=\"You're encouraged to log in; however, it's not mandatory. [o]\">\n Log in\n </a>\n </li>\n </ul>\n </div>\n <div id=\"left-navigation\">\n <div aria-labelledby=\"p-namespaces-label\" class=\"vectorTabs\" id=\"p-namespaces\" role=\"navigation\">\n <h3 id=\"p-namespaces-label\">\n Namespaces\n </h3>\n <ul>\n <li class=\"selected\" id=\"ca-nstab-main\">\n <span>\n <a accesskey=\"c\" href=\"/wiki/List_of_London_boroughs\" title=\"View the content page [c]\">\n Article\n </a>\n </span>\n </li>\n <li id=\"ca-talk\">\n <span>\n <a accesskey=\"t\" href=\"/wiki/Talk:List_of_London_boroughs\" rel=\"discussion\" title=\"Discussion about the content page [t]\">\n Talk\n </a>\n </span>\n </li>\n </ul>\n </div>\n <div aria-labelledby=\"p-variants-label\" class=\"vectorMenu emptyPortlet\" id=\"p-variants\" role=\"navigation\">\n <input aria-labelledby=\"p-variants-label\" class=\"vectorMenuCheckbox\" type=\"checkbox\"/>\n <h3 id=\"p-variants-label\">\n <span>\n Variants\n </span>\n </h3>\n <ul class=\"menu\">\n </ul>\n </div>\n </div>\n <div id=\"right-navigation\">\n <div aria-labelledby=\"p-views-label\" class=\"vectorTabs\" id=\"p-views\" role=\"navigation\">\n <h3 id=\"p-views-label\">\n Views\n </h3>\n <ul>\n <li class=\"collapsible selected\" id=\"ca-view\">\n <span>\n <a href=\"/wiki/List_of_London_boroughs\">\n Read\n </a>\n </span>\n </li>\n <li class=\"collapsible\" id=\"ca-edit\">\n <span>\n <a accesskey=\"e\" href=\"/w/index.php?title=List_of_London_boroughs&amp;action=edit\" title=\"Edit this page [e]\">\n Edit\n </a>\n </span>\n </li>\n <li class=\"collapsible\" id=\"ca-history\">\n <span>\n <a accesskey=\"h\" href=\"/w/index.php?title=List_of_London_boroughs&amp;action=history\" title=\"Past revisions of this page [h]\">\n View history\n </a>\n </span>\n </li>\n </ul>\n </div>\n <div aria-labelledby=\"p-cactions-label\" class=\"vectorMenu emptyPortlet\" id=\"p-cactions\" role=\"navigation\">\n <input aria-labelledby=\"p-cactions-label\" class=\"vectorMenuCheckbox\" type=\"checkbox\"/>\n <h3 id=\"p-cactions-label\">\n <span>\n More\n </span>\n </h3>\n <ul class=\"menu\">\n </ul>\n </div>\n <div id=\"p-search\" role=\"search\">\n <h3>\n <label for=\"searchInput\">\n Search\n </label>\n </h3>\n <form action=\"/w/index.php\" id=\"searchform\">\n <div id=\"simpleSearch\">\n <input accesskey=\"f\" id=\"searchInput\" name=\"search\" placeholder=\"Search Wikipedia\" title=\"Search Wikipedia [f]\" type=\"search\"/>\n <input name=\"title\" type=\"hidden\" value=\"Special:Search\"/>\n <input class=\"searchButton mw-fallbackSearchButton\" id=\"mw-searchButton\" name=\"fulltext\" title=\"Search Wikipedia for this text\" type=\"submit\" value=\"Search\"/>\n <input class=\"searchButton\" id=\"searchButton\" name=\"go\" title=\"Go to a page with this exact name if it exists\" type=\"submit\" value=\"Go\"/>\n </div>\n </form>\n </div>\n </div>\n </div>\n <div id=\"mw-panel\">\n <div id=\"p-logo\" role=\"banner\">\n <a class=\"mw-wiki-logo\" href=\"/wiki/Main_Page\" title=\"Visit the main page\"/>\n </div>\n <div aria-labelledby=\"p-navigation-label\" class=\"portal\" id=\"p-navigation\" role=\"navigation\">\n <h3 id=\"p-navigation-label\">\n Navigation\n </h3>\n <div class=\"body\">\n <ul>\n <li id=\"n-mainpage-description\">\n <a accesskey=\"z\" href=\"/wiki/Main_Page\" title=\"Visit the main page [z]\">\n Main page\n </a>\n </li>\n <li id=\"n-contents\">\n <a href=\"/wiki/Portal:Contents\" title=\"Guides to browsing Wikipedia\">\n Contents\n </a>\n </li>\n <li id=\"n-featuredcontent\">\n <a href=\"/wiki/Portal:Featured_content\" title=\"Featured content – the best of Wikipedia\">\n Featured content\n </a>\n </li>\n <li id=\"n-currentevents\">\n <a href=\"/wiki/Portal:Current_events\" title=\"Find background information on current events\">\n Current events\n </a>\n </li>\n <li id=\"n-randompage\">\n <a accesskey=\"x\" href=\"/wiki/Special:Random\" title=\"Load a random article [x]\">\n Random article\n </a>\n </li>\n <li id=\"n-sitesupport\">\n <a href=\"https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en\" title=\"Support us\">\n Donate to Wikipedia\n </a>\n </li>\n <li id=\"n-shoplink\">\n <a href=\"//shop.wikimedia.org\" title=\"Visit the Wikipedia store\">\n Wikipedia store\n </a>\n </li>\n </ul>\n </div>\n </div>\n <div aria-labelledby=\"p-interaction-label\" class=\"portal\" id=\"p-interaction\" role=\"navigation\">\n <h3 id=\"p-interaction-label\">\n Interaction\n </h3>\n <div class=\"body\">\n <ul>\n <li id=\"n-help\">\n <a href=\"/wiki/Help:Contents\" title=\"Guidance on how to use and edit Wikipedia\">\n Help\n </a>\n </li>\n <li id=\"n-aboutsite\">\n <a href=\"/wiki/Wikipedia:About\" title=\"Find out about Wikipedia\">\n About Wikipedia\n </a>\n </li>\n <li id=\"n-portal\">\n <a href=\"/wiki/Wikipedia:Community_portal\" title=\"About the project, what you can do, where to find things\">\n Community portal\n </a>\n </li>\n <li id=\"n-recentchanges\">\n <a accesskey=\"r\" href=\"/wiki/Special:RecentChanges\" title=\"A list of recent changes in the wiki [r]\">\n Recent changes\n </a>\n </li>\n <li id=\"n-contactpage\">\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Contact_us\" title=\"How to contact Wikipedia\">\n Contact page\n </a>\n </li>\n </ul>\n </div>\n </div>\n <div aria-labelledby=\"p-tb-label\" class=\"portal\" id=\"p-tb\" role=\"navigation\">\n <h3 id=\"p-tb-label\">\n Tools\n </h3>\n <div class=\"body\">\n <ul>\n <li id=\"t-whatlinkshere\">\n <a accesskey=\"j\" href=\"/wiki/Special:WhatLinksHere/List_of_London_boroughs\" title=\"List of all English Wikipedia pages containing links to this page [j]\">\n What links here\n </a>\n </li>\n <li id=\"t-recentchangeslinked\">\n <a accesskey=\"k\" href=\"/wiki/Special:RecentChangesLinked/List_of_London_boroughs\" rel=\"nofollow\" title=\"Recent changes in pages linked from this page [k]\">\n Related changes\n </a>\n </li>\n <li id=\"t-upload\">\n <a accesskey=\"u\" href=\"/wiki/Wikipedia:File_Upload_Wizard\" title=\"Upload files [u]\">\n Upload file\n </a>\n </li>\n <li id=\"t-specialpages\">\n <a accesskey=\"q\" href=\"/wiki/Special:SpecialPages\" title=\"A list of all special pages [q]\">\n Special pages\n </a>\n </li>\n <li id=\"t-permalink\">\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;oldid=881899861\" title=\"Permanent link to this revision of the page\">\n Permanent link\n </a>\n </li>\n <li id=\"t-info\">\n <a href=\"/w/index.php?title=List_of_London_boroughs&amp;action=info\" title=\"More information about this page\">\n Page information\n </a>\n </li>\n <li id=\"t-wikibase\">\n <a accesskey=\"g\" href=\"https://www.wikidata.org/wiki/Special:EntityPage/Q6577004\" title=\"Link to connected data repository item [g]\">\n Wikidata item\n </a>\n </li>\n <li id=\"t-cite\">\n <a href=\"/w/index.php?title=Special:CiteThisPage&amp;page=List_of_London_boroughs&amp;id=881899861\" title=\"Information on how to cite this page\">\n Cite this page\n </a>\n </li>\n </ul>\n </div>\n </div>\n <div aria-labelledby=\"p-coll-print_export-label\" class=\"portal\" id=\"p-coll-print_export\" role=\"navigation\">\n <h3 id=\"p-coll-print_export-label\">\n Print/export\n </h3>\n <div class=\"body\">\n <ul>\n <li id=\"coll-create_a_book\">\n <a href=\"/w/index.php?title=Special:Book&amp;bookcmd=book_creator&amp;referer=List+of+London+boroughs\">\n Create a book\n </a>\n </li>\n <li id=\"coll-download-as-rdf2latex\">\n <a href=\"/w/index.php?title=Special:ElectronPdf&amp;page=List+of+London+boroughs&amp;action=show-download-screen\">\n Download as PDF\n </a>\n </li>\n <li id=\"t-print\">\n <a accesskey=\"p\" href=\"/w/index.php?title=List_of_London_boroughs&amp;printable=yes\" title=\"Printable version of this page [p]\">\n Printable version\n </a>\n </li>\n </ul>\n </div>\n </div>\n <div aria-labelledby=\"p-lang-label\" class=\"portal\" id=\"p-lang\" role=\"navigation\">\n <h3 id=\"p-lang-label\">\n Languages\n </h3>\n <div class=\"body\">\n <ul>\n <li class=\"interlanguage-link interwiki-ru\">\n <a class=\"interlanguage-link-target\" href=\"https://ru.wikipedia.org/wiki/%D0%A1%D0%BF%D0%B8%D1%81%D0%BE%D0%BA_%D0%BB%D0%BE%D0%BD%D0%B4%D0%BE%D0%BD%D1%81%D0%BA%D0%B8%D1%85_%D0%B1%D0%BE%D1%80%D0%BE\" hreflang=\"ru\" lang=\"ru\" title=\"Список лондонских боро – Russian\">\n Русский\n </a>\n </li>\n </ul>\n <div class=\"after-portlet after-portlet-lang\">\n <span class=\"wb-langlinks-edit wb-langlinks-link\">\n <a class=\"wbc-editpage\" href=\"https://www.wikidata.org/wiki/Special:EntityPage/Q6577004#sitelinks-wikipedia\" title=\"Edit interlanguage links\">\n Edit links\n </a>\n </span>\n </div>\n </div>\n </div>\n </div>\n </div>\n <div id=\"footer\" role=\"contentinfo\">\n <ul id=\"footer-info\">\n <li id=\"footer-info-lastmod\">\n This page was last edited on 5 February 2019, at 14:50\n <span class=\"anonymous-show\">\n (UTC)\n </span>\n .\n </li>\n <li id=\"footer-info-copyright\">\n Text is available under the\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License\" rel=\"license\">\n Creative Commons Attribution-ShareAlike License\n </a>\n <a href=\"//creativecommons.org/licenses/by-sa/3.0/\" rel=\"license\" style=\"display:none;\"/>\n ;\nadditional terms may apply. By using this site, you agree to the\n <a href=\"//foundation.wikimedia.org/wiki/Terms_of_Use\">\n Terms of Use\n </a>\n and\n <a href=\"//foundation.wikimedia.org/wiki/Privacy_policy\">\n Privacy Policy\n </a>\n . Wikipedia® is a registered trademark of the\n <a href=\"//www.wikimediafoundation.org/\">\n Wikimedia Foundation, Inc.\n </a>\n , a non-profit organization.\n </li>\n </ul>\n <ul id=\"footer-places\">\n <li id=\"footer-places-privacy\">\n <a class=\"extiw\" href=\"https://foundation.wikimedia.org/wiki/Privacy_policy\" title=\"wmf:Privacy policy\">\n Privacy policy\n </a>\n </li>\n <li id=\"footer-places-about\">\n <a href=\"/wiki/Wikipedia:About\" title=\"Wikipedia:About\">\n About Wikipedia\n </a>\n </li>\n <li id=\"footer-places-disclaimer\">\n <a href=\"/wiki/Wikipedia:General_disclaimer\" title=\"Wikipedia:General disclaimer\">\n Disclaimers\n </a>\n </li>\n <li id=\"footer-places-contact\">\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Contact_us\">\n Contact Wikipedia\n </a>\n </li>\n <li id=\"footer-places-developers\">\n <a href=\"https://www.mediawiki.org/wiki/Special:MyLanguage/How_to_contribute\">\n Developers\n </a>\n </li>\n <li id=\"footer-places-cookiestatement\">\n <a href=\"https://foundation.wikimedia.org/wiki/Cookie_statement\">\n Cookie statement\n </a>\n </li>\n <li id=\"footer-places-mobileview\">\n <a class=\"noprint stopMobileRedirectToggle\" href=\"//en.m.wikipedia.org/w/index.php?title=List_of_London_boroughs&amp;mobileaction=toggle_view_mobile\">\n Mobile view\n </a>\n </li>\n </ul>\n <ul class=\"noprint\" id=\"footer-icons\">\n <li id=\"footer-copyrightico\">\n <a href=\"https://wikimediafoundation.org/\">\n <img alt=\"Wikimedia Foundation\" height=\"31\" src=\"/static/images/wikimedia-button.png\" srcset=\"/static/images/wikimedia-button-1.5x.png 1.5x, /static/images/wikimedia-button-2x.png 2x\" width=\"88\"/>\n </a>\n </li>\n <li id=\"footer-poweredbyico\">\n <a href=\"//www.mediawiki.org/\">\n <img alt=\"Powered by MediaWiki\" height=\"31\" src=\"/static/images/poweredby_mediawiki_88x31.png\" srcset=\"/static/images/poweredby_mediawiki_132x47.png 1.5x, /static/images/poweredby_mediawiki_176x62.png 2x\" width=\"88\"/>\n </a>\n </li>\n </ul>\n <div style=\"clear: both;\"/>\n </div>\n <script>\n (window.RLQ=window.RLQ||[]).push(function(){mw.config.set({\"wgPageParseReport\":{\"limitreport\":{\"cputime\":\"0.324\",\"walltime\":\"0.395\",\"ppvisitednodes\":{\"value\":5142,\"limit\":1000000},\"ppgeneratednodes\":{\"value\":0,\"limit\":1500000},\"postexpandincludesize\":{\"value\":79976,\"limit\":2097152},\"templateargumentsize\":{\"value\":991,\"limit\":2097152},\"expansiondepth\":{\"value\":12,\"limit\":40},\"expensivefunctioncount\":{\"value\":2,\"limit\":500},\"unstrip-depth\":{\"value\":1,\"limit\":20},\"unstrip-size\":{\"value\":10101,\"limit\":5000000},\"entityaccesscount\":{\"value\":0,\"limit\":400},\"timingprofile\":[\"100.00% 279.718 1 -total\",\" 41.33% 115.594 2 Template:Reflist\",\" 32.47% 90.827 2 Template:London_Gazette\",\" 30.02% 83.982 2 Template:Cite_magazine\",\" 20.77% 58.105 33 Template:Coord\",\" 9.59% 26.818 33 Template:English_district_control\",\" 9.47% 26.499 1 Template:Use_dmy_dates\",\" 5.78% 16.180 1 Template:London\",\" 4.97% 13.913 2 Template:DMCA\",\" 4.23% 11.829 2 Template:Dated_maintenance_category\"]},\"scribunto\":{\"limitreport-timeusage\":{\"value\":\"0.111\",\"limit\":\"10.000\"},\"limitreport-memusage\":{\"value\":2885183,\"limit\":52428800}},\"cachereport\":{\"origin\":\"mw1269\",\"timestamp\":\"20190428014242\",\"ttl\":2592000,\"transientcontent\":false}}});});\n </script>\n <script type=\"application/ld+json\">\n {\"@context\":\"https:\\/\\/schema.org\",\"@type\":\"Article\",\"name\":\"List of London boroughs\",\"url\":\"https:\\/\\/en.wikipedia.org\\/wiki\\/List_of_London_boroughs\",\"sameAs\":\"http:\\/\\/www.wikidata.org\\/entity\\/Q6577004\",\"mainEntity\":\"http:\\/\\/www.wikidata.org\\/entity\\/Q6577004\",\"author\":{\"@type\":\"Organization\",\"name\":\"Contributors to Wikimedia projects\"},\"publisher\":{\"@type\":\"Organization\",\"name\":\"Wikimedia Foundation, Inc.\",\"logo\":{\"@type\":\"ImageObject\",\"url\":\"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png\"}},\"datePublished\":\"2010-07-20T07:28:35Z\",\"dateModified\":\"2019-02-05T14:50:59Z\",\"headline\":\"Wikimedia list article\"}\n </script>\n <script>\n (window.RLQ=window.RLQ||[]).push(function(){mw.config.set({\"wgBackendResponseTime\":109,\"wgHostname\":\"mw1274\"});});\n </script>\n </body>\n</html>\n" ], [ "# extracting the raw table inside that webpage\ntable = soup.find_all('table', {'class':'wikitable sortable'})\nprint(table)", "[<table class=\"wikitable sortable\" style=\"font-size:100%\" width=\"100%\">\n<tbody><tr>\n<th>Borough\n</th>\n<th>Inner\n</th>\n<th>Status\n</th>\n<th>Local authority\n</th>\n<th>Political control\n</th>\n<th>Headquarters\n</th>\n<th>Area (sq mi)\n</th>\n<th>Population (2013 est)<sup class=\"reference\" id=\"cite_ref-1\"><a href=\"#cite_note-1\">[1]</a></sup>\n</th>\n<th>Co-ordinates\n</th>\n<th><span style=\"background:#67BCD3\"> Nr. in map </span>\n</th></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Barking_and_Dagenham\" title=\"London Borough of Barking and Dagenham\">Barking and Dagenham</a> <sup class=\"reference\" id=\"cite_ref-2\"><a href=\"#cite_note-2\">[note 1]</a></sup>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Barking_and_Dagenham_London_Borough_Council\" title=\"Barking and Dagenham London Borough Council\">Barking and Dagenham London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Barking_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Barking Town Hall (page does not exist)\">Town Hall</a>, 1 Town Square\n</td>\n<td>13.93\n</td>\n<td>194,352\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5607_N_0.1557_E_region:GB_type:city&amp;title=Barking+and+Dagenham\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°33′39″N</span> <span class=\"longitude\">0°09′21″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5607°N 0.1557°E</span><span style=\"display:none\"> / <span class=\"geo\">51.5607; 0.1557</span></span><span style=\"display:none\"> (<span class=\"fn org\">Barking and Dagenham</span>)</span></span></span></a></span>\n</td>\n<td>25\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Barnet\" title=\"London Borough of Barnet\">Barnet</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Barnet_London_Borough_Council\" title=\"Barnet London Borough Council\">Barnet London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=North_London_Business_Park&amp;action=edit&amp;redlink=1\" title=\"North London Business Park (page does not exist)\">North London Business Park</a>, Oakleigh Road South\n</td>\n<td>33.49\n</td>\n<td>369,088\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6252_N_0.1517_W_region:GB_type:city&amp;title=Barnet\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°37′31″N</span> <span class=\"longitude\">0°09′06″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.6252°N 0.1517°W</span><span style=\"display:none\"> / <span class=\"geo\">51.6252; -0.1517</span></span><span style=\"display:none\"> (<span class=\"fn org\">Barnet</span>)</span></span></span></a></span>\n</td>\n<td>31\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Bexley\" title=\"London Borough of Bexley\">Bexley</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Bexley_London_Borough_Council\" title=\"Bexley London Borough Council\">Bexley London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Civic_Offices&amp;action=edit&amp;redlink=1\" title=\"Civic Offices (page does not exist)\">Civic Offices</a>, 2 Watling Street\n</td>\n<td>23.38\n</td>\n<td>236,687\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4549_N_0.1505_E_region:GB_type:city&amp;title=Bexley\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°27′18″N</span> <span class=\"longitude\">0°09′02″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4549°N 0.1505°E</span><span style=\"display:none\"> / <span class=\"geo\">51.4549; 0.1505</span></span><span style=\"display:none\"> (<span class=\"fn org\">Bexley</span>)</span></span></span></a></span>\n</td>\n<td>23\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Brent\" title=\"London Borough of Brent\">Brent</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Brent_London_Borough_Council\" title=\"Brent London Borough Council\">Brent London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a href=\"/wiki/Brent_Civic_Centre\" title=\"Brent Civic Centre\">Brent Civic Centre</a>, Engineers Way\n</td>\n<td>16.70\n</td>\n<td>317,264\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5588_N_0.2817_W_region:GB_type:city&amp;title=Brent\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°33′32″N</span> <span class=\"longitude\">0°16′54″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5588°N 0.2817°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5588; -0.2817</span></span><span style=\"display:none\"> (<span class=\"fn org\">Brent</span>)</span></span></span></a></span>\n</td>\n<td>12\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Bromley\" title=\"London Borough of Bromley\">Bromley</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Bromley_London_Borough_Council\" title=\"Bromley London Borough Council\">Bromley London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Bromley_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Bromley Civic Centre (page does not exist)\">Civic Centre</a>, Stockwell Close\n</td>\n<td>57.97\n</td>\n<td>317,899\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4039_N_0.0198_E_region:GB_type:city&amp;title=Bromley\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°24′14″N</span> <span class=\"longitude\">0°01′11″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4039°N 0.0198°E</span><span style=\"display:none\"> / <span class=\"geo\">51.4039; 0.0198</span></span><span style=\"display:none\"> (<span class=\"fn org\">Bromley</span>)</span></span></span></a></span>\n</td>\n<td>20\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Camden\" title=\"London Borough of Camden\">Camden</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Camden_London_Borough_Council\" title=\"Camden London Borough Council\">Camden London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a href=\"/wiki/Camden_Town_Hall\" title=\"Camden Town Hall\">Camden Town Hall</a>, Judd Street\n</td>\n<td>8.40\n</td>\n<td>229,719\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.529_N_0.1255_W_region:GB_type:city&amp;title=Camden\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°31′44″N</span> <span class=\"longitude\">0°07′32″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5290°N 0.1255°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5290; -0.1255</span></span><span style=\"display:none\"> (<span class=\"fn org\">Camden</span>)</span></span></span></a></span>\n</td>\n<td>11\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Croydon\" title=\"London Borough of Croydon\">Croydon</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Croydon_London_Borough_Council\" title=\"Croydon London Borough Council\">Croydon London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Bernard_Weatherill_House&amp;action=edit&amp;redlink=1\" title=\"Bernard Weatherill House (page does not exist)\">Bernard Weatherill House</a>, Mint Walk\n</td>\n<td>33.41\n</td>\n<td>372,752\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.3714_N_0.0977_W_region:GB_type:city&amp;title=Croydon\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°22′17″N</span> <span class=\"longitude\">0°05′52″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.3714°N 0.0977°W</span><span style=\"display:none\"> / <span class=\"geo\">51.3714; -0.0977</span></span><span style=\"display:none\"> (<span class=\"fn org\">Croydon</span>)</span></span></span></a></span>\n</td>\n<td>19\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Ealing\" title=\"London Borough of Ealing\">Ealing</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Ealing_London_Borough_Council\" title=\"Ealing London Borough Council\">Ealing London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Perceval_House,_Ealing&amp;action=edit&amp;redlink=1\" title=\"Perceval House, Ealing (page does not exist)\">Perceval House</a>, 14-16 Uxbridge Road\n</td>\n<td>21.44\n</td>\n<td>342,494\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.513_N_0.3089_W_region:GB_type:city&amp;title=Ealing\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′47″N</span> <span class=\"longitude\">0°18′32″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5130°N 0.3089°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5130; -0.3089</span></span><span style=\"display:none\"> (<span class=\"fn org\">Ealing</span>)</span></span></span></a></span>\n</td>\n<td>13\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Enfield\" title=\"London Borough of Enfield\">Enfield</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Enfield_London_Borough_Council\" title=\"Enfield London Borough Council\">Enfield London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Enfield_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Enfield Civic Centre (page does not exist)\">Civic Centre</a>, Silver Street\n</td>\n<td>31.74\n</td>\n<td>320,524\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6538_N_0.0799_W_region:GB_type:city&amp;title=Enfield\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°39′14″N</span> <span class=\"longitude\">0°04′48″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.6538°N 0.0799°W</span><span style=\"display:none\"> / <span class=\"geo\">51.6538; -0.0799</span></span><span style=\"display:none\"> (<span class=\"fn org\">Enfield</span>)</span></span></span></a></span>\n</td>\n<td>30\n</td></tr>\n<tr>\n<td><a href=\"/wiki/Royal_Borough_of_Greenwich\" title=\"Royal Borough of Greenwich\">Greenwich</a> <sup class=\"reference\" id=\"cite_ref-3\"><a href=\"#cite_note-3\">[note 2]</a></sup>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span> <sup class=\"reference\" id=\"cite_ref-note2_4-0\"><a href=\"#cite_note-note2-4\">[note 3]</a></sup>\n</td>\n<td><a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">Royal</a>\n</td>\n<td><a href=\"/wiki/Greenwich_London_Borough_Council\" title=\"Greenwich London Borough Council\">Greenwich London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a href=\"/wiki/Woolwich_Town_Hall\" title=\"Woolwich Town Hall\">Woolwich Town Hall</a>, Wellington Street\n</td>\n<td>18.28\n</td>\n<td>264,008\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4892_N_0.0648_E_region:GB_type:city&amp;title=Greenwich\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°29′21″N</span> <span class=\"longitude\">0°03′53″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4892°N 0.0648°E</span><span style=\"display:none\"> / <span class=\"geo\">51.4892; 0.0648</span></span><span style=\"display:none\"> (<span class=\"fn org\">Greenwich</span>)</span></span></span></a></span>\n</td>\n<td>22\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Hackney\" title=\"London Borough of Hackney\">Hackney</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Hackney_London_Borough_Council\" title=\"Hackney London Borough Council\">Hackney London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Hackney_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Hackney Town Hall (page does not exist)\">Hackney Town Hall</a>, Mare Street\n</td>\n<td>7.36\n</td>\n<td>257,379\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.545_N_0.0553_W_region:GB_type:city&amp;title=Hackney\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°32′42″N</span> <span class=\"longitude\">0°03′19″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5450°N 0.0553°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5450; -0.0553</span></span><span style=\"display:none\"> (<span class=\"fn org\">Hackney</span>)</span></span></span></a></span>\n</td>\n<td>9\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Hammersmith_and_Fulham\" title=\"London Borough of Hammersmith and Fulham\">Hammersmith and Fulham</a> <sup class=\"reference\" id=\"cite_ref-5\"><a href=\"#cite_note-5\">[note 4]</a></sup>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Hammersmith_and_Fulham_London_Borough_Council\" title=\"Hammersmith and Fulham London Borough Council\">Hammersmith and Fulham London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Hammersmith_and_Fulham_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Hammersmith and Fulham Town Hall (page does not exist)\">Town Hall</a>, King Street\n</td>\n<td>6.33\n</td>\n<td>178,685\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4927_N_0.2339_W_region:GB_type:city&amp;title=Hammersmith+and+Fulham\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°29′34″N</span> <span class=\"longitude\">0°14′02″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4927°N 0.2339°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4927; -0.2339</span></span><span style=\"display:none\"> (<span class=\"fn org\">Hammersmith and Fulham</span>)</span></span></span></a></span>\n</td>\n<td>4\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Haringey\" title=\"London Borough of Haringey\">Haringey</a>\n</td>\n<td><sup class=\"reference\" id=\"cite_ref-note2_4-1\"><a href=\"#cite_note-note2-4\">[note 3]</a></sup>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Haringey_London_Borough_Council\" title=\"Haringey London Borough Council\">Haringey London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Haringey_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Haringey Civic Centre (page does not exist)\">Civic Centre</a>, High Road\n</td>\n<td>11.42\n</td>\n<td>263,386\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.6_N_0.1119_W_region:GB_type:city&amp;title=Haringey\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°36′00″N</span> <span class=\"longitude\">0°06′43″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.6000°N 0.1119°W</span><span style=\"display:none\"> / <span class=\"geo\">51.6000; -0.1119</span></span><span style=\"display:none\"> (<span class=\"fn org\">Haringey</span>)</span></span></span></a></span>\n</td>\n<td>29\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Harrow\" title=\"London Borough of Harrow\">Harrow</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Harrow_London_Borough_Council\" title=\"Harrow London Borough Council\">Harrow London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Harrow_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Harrow Civic Centre (page does not exist)\">Civic Centre</a>, Station Road\n</td>\n<td>19.49\n</td>\n<td>243,372\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5898_N_0.3346_W_region:GB_type:city&amp;title=Harrow\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°35′23″N</span> <span class=\"longitude\">0°20′05″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5898°N 0.3346°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5898; -0.3346</span></span><span style=\"display:none\"> (<span class=\"fn org\">Harrow</span>)</span></span></span></a></span>\n</td>\n<td>32\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Havering\" title=\"London Borough of Havering\">Havering</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Havering_London_Borough_Council\" title=\"Havering London Borough Council\">Havering London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a> (council <a href=\"/wiki/No_overall_control\" title=\"No overall control\">NOC</a>)\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Havering_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Havering Town Hall (page does not exist)\">Town Hall</a>, Main Road\n</td>\n<td>43.35\n</td>\n<td>242,080\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5812_N_0.1837_E_region:GB_type:city&amp;title=Havering\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°34′52″N</span> <span class=\"longitude\">0°11′01″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5812°N 0.1837°E</span><span style=\"display:none\"> / <span class=\"geo\">51.5812; 0.1837</span></span><span style=\"display:none\"> (<span class=\"fn org\">Havering</span>)</span></span></span></a></span>\n</td>\n<td>24\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Hillingdon\" title=\"London Borough of Hillingdon\">Hillingdon</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Hillingdon_London_Borough_Council\" title=\"Hillingdon London Borough Council\">Hillingdon London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a href=\"/wiki/Hillingdon_Civic_Centre\" title=\"Hillingdon Civic Centre\">Civic Centre</a>, High Street\n</td>\n<td>44.67\n</td>\n<td>286,806\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5441_N_0.476_W_region:GB_type:city&amp;title=Hillingdon\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°32′39″N</span> <span class=\"longitude\">0°28′34″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5441°N 0.4760°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5441; -0.4760</span></span><span style=\"display:none\"> (<span class=\"fn org\">Hillingdon</span>)</span></span></span></a></span>\n</td>\n<td>33\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Hounslow\" title=\"London Borough of Hounslow\">Hounslow</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Hounslow_London_Borough_Council\" title=\"Hounslow London Borough Council\">Hounslow London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Hounslow_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Hounslow Civic Centre (page does not exist)\">Civic Centre</a>, Lampton Road\n</td>\n<td>21.61\n</td>\n<td>262,407\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4746_N_0.368_W_region:GB_type:city&amp;title=Hounslow\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°28′29″N</span> <span class=\"longitude\">0°22′05″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4746°N 0.3680°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4746; -0.3680</span></span><span style=\"display:none\"> (<span class=\"fn org\">Hounslow</span>)</span></span></span></a></span>\n</td>\n<td>14\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Islington\" title=\"London Borough of Islington\">Islington</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Islington_London_Borough_Council\" title=\"Islington London Borough Council\">Islington London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Islington_Municipal_Offices&amp;action=edit&amp;redlink=1\" title=\"Islington Municipal Offices (page does not exist)\">Municipal Offices</a>, 222 Upper Street\n</td>\n<td>5.74\n</td>\n<td>215,667\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5416_N_0.1022_W_region:GB_type:city&amp;title=Islington\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°32′30″N</span> <span class=\"longitude\">0°06′08″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5416°N 0.1022°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5416; -0.1022</span></span><span style=\"display:none\"> (<span class=\"fn org\">Islington</span>)</span></span></span></a></span>\n</td>\n<td>10\n</td></tr>\n<tr>\n<td><a href=\"/wiki/Royal_Borough_of_Kensington_and_Chelsea\" title=\"Royal Borough of Kensington and Chelsea\">Kensington and Chelsea</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td><a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">Royal</a>\n</td>\n<td><a href=\"/wiki/Kensington_and_Chelsea_London_Borough_Council\" title=\"Kensington and Chelsea London Borough Council\">Kensington and Chelsea London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a href=\"/wiki/Kensington_Town_Hall,_London\" title=\"Kensington Town Hall, London\">The Town Hall</a>, <a href=\"/wiki/Hornton_Street\" title=\"Hornton Street\">Hornton Street</a>\n</td>\n<td>4.68\n</td>\n<td>155,594\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.502_N_0.1947_W_region:GB_type:city&amp;title=Kensington+and+Chelsea\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′07″N</span> <span class=\"longitude\">0°11′41″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5020°N 0.1947°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5020; -0.1947</span></span><span style=\"display:none\"> (<span class=\"fn org\">Kensington and Chelsea</span>)</span></span></span></a></span>\n</td>\n<td>3\n</td></tr>\n<tr>\n<td><a href=\"/wiki/Royal_Borough_of_Kingston_upon_Thames\" title=\"Royal Borough of Kingston upon Thames\">Kingston upon Thames</a>\n</td>\n<td>\n</td>\n<td><a class=\"mw-redirect\" href=\"/wiki/Royal_borough\" title=\"Royal borough\">Royal</a>\n</td>\n<td><a href=\"/wiki/Kingston_upon_Thames_London_Borough_Council\" title=\"Kingston upon Thames London Borough Council\">Kingston upon Thames London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">Liberal Democrat</a>\n</td>\n<td><a href=\"/wiki/Kingston_upon_Thames_Guildhall\" title=\"Kingston upon Thames Guildhall\">Guildhall</a>, High Street\n</td>\n<td>14.38\n</td>\n<td>166,793\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4085_N_0.3064_W_region:GB_type:city&amp;title=Kingston+upon+Thames\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°24′31″N</span> <span class=\"longitude\">0°18′23″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4085°N 0.3064°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4085; -0.3064</span></span><span style=\"display:none\"> (<span class=\"fn org\">Kingston upon Thames</span>)</span></span></span></a></span>\n</td>\n<td>16\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Lambeth\" title=\"London Borough of Lambeth\">Lambeth</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Lambeth_London_Borough_Council\" title=\"Lambeth London Borough Council\">Lambeth London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a href=\"/wiki/Lambeth_Town_Hall\" title=\"Lambeth Town Hall\">Lambeth Town Hall</a>, Brixton Hill\n</td>\n<td>10.36\n</td>\n<td>314,242\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4607_N_0.1163_W_region:GB_type:city&amp;title=Lambeth\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°27′39″N</span> <span class=\"longitude\">0°06′59″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4607°N 0.1163°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4607; -0.1163</span></span><span style=\"display:none\"> (<span class=\"fn org\">Lambeth</span>)</span></span></span></a></span>\n</td>\n<td>6\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Lewisham\" title=\"London Borough of Lewisham\">Lewisham</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Lewisham_London_Borough_Council\" title=\"Lewisham London Borough Council\">Lewisham London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Lewisham_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Lewisham Town Hall (page does not exist)\">Town Hall</a>, 1 Catford Road\n</td>\n<td>13.57\n</td>\n<td>286,180\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4452_N_0.0209_W_region:GB_type:city&amp;title=Lewisham\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°26′43″N</span> <span class=\"longitude\">0°01′15″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4452°N 0.0209°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4452; -0.0209</span></span><span style=\"display:none\"> (<span class=\"fn org\">Lewisham</span>)</span></span></span></a></span>\n</td>\n<td>21\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Merton\" title=\"London Borough of Merton\">Merton</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Merton_London_Borough_Council\" title=\"Merton London Borough Council\">Merton London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Merton_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Merton Civic Centre (page does not exist)\">Civic Centre</a>, London Road\n</td>\n<td>14.52\n</td>\n<td>203,223\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4014_N_0.1958_W_region:GB_type:city&amp;title=Merton\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°24′05″N</span> <span class=\"longitude\">0°11′45″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4014°N 0.1958°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4014; -0.1958</span></span><span style=\"display:none\"> (<span class=\"fn org\">Merton</span>)</span></span></span></a></span>\n</td>\n<td>17\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Newham\" title=\"London Borough of Newham\">Newham</a>\n</td>\n<td><sup class=\"reference\" id=\"cite_ref-note2_4-2\"><a href=\"#cite_note-note2-4\">[note 3]</a></sup>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Newham_London_Borough_Council\" title=\"Newham London Borough Council\">Newham London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Newham_Dockside&amp;action=edit&amp;redlink=1\" title=\"Newham Dockside (page does not exist)\">Newham Dockside</a>, 1000 Dockside Road\n</td>\n<td>13.98\n</td>\n<td>318,227\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5077_N_0.0469_E_region:GB_type:city&amp;title=Newham\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′28″N</span> <span class=\"longitude\">0°02′49″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5077°N 0.0469°E</span><span style=\"display:none\"> / <span class=\"geo\">51.5077; 0.0469</span></span><span style=\"display:none\"> (<span class=\"fn org\">Newham</span>)</span></span></span></a></span>\n</td>\n<td>27\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Redbridge\" title=\"London Borough of Redbridge\">Redbridge</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Redbridge_London_Borough_Council\" title=\"Redbridge London Borough Council\">Redbridge London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Redbridge_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Redbridge Town Hall (page does not exist)\">Town Hall</a>, 128-142 High Road\n</td>\n<td>21.78\n</td>\n<td>288,272\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.559_N_0.0741_E_region:GB_type:city&amp;title=Redbridge\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°33′32″N</span> <span class=\"longitude\">0°04′27″E</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5590°N 0.0741°E</span><span style=\"display:none\"> / <span class=\"geo\">51.5590; 0.0741</span></span><span style=\"display:none\"> (<span class=\"fn org\">Redbridge</span>)</span></span></span></a></span>\n</td>\n<td>26\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Richmond_upon_Thames\" title=\"London Borough of Richmond upon Thames\">Richmond upon Thames</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Richmond_upon_Thames_London_Borough_Council\" title=\"Richmond upon Thames London Borough Council\">Richmond upon Thames London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">Liberal Democrat</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Richmond_upon_Thames_Civic_Centre&amp;action=edit&amp;redlink=1\" title=\"Richmond upon Thames Civic Centre (page does not exist)\">Civic Centre</a>, 44 York Street\n</td>\n<td>22.17\n</td>\n<td>191,365\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4479_N_0.326_W_region:GB_type:city&amp;title=Richmond+upon+Thames\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°26′52″N</span> <span class=\"longitude\">0°19′34″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4479°N 0.3260°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4479; -0.3260</span></span><span style=\"display:none\"> (<span class=\"fn org\">Richmond upon Thames</span>)</span></span></span></a></span>\n</td>\n<td>15\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Southwark\" title=\"London Borough of Southwark\">Southwark</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Southwark_London_Borough_Council\" title=\"Southwark London Borough Council\">Southwark London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=160_Tooley_Street&amp;action=edit&amp;redlink=1\" title=\"160 Tooley Street (page does not exist)\">160 Tooley Street</a>\n</td>\n<td>11.14\n</td>\n<td>298,464\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5035_N_0.0804_W_region:GB_type:city&amp;title=Southwark\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′13″N</span> <span class=\"longitude\">0°04′49″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5035°N 0.0804°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5035; -0.0804</span></span><span style=\"display:none\"> (<span class=\"fn org\">Southwark</span>)</span></span></span></a></span>\n</td>\n<td>7\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Sutton\" title=\"London Borough of Sutton\">Sutton</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Sutton_London_Borough_Council\" title=\"Sutton London Borough Council\">Sutton London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Liberal_Democrats_(UK)\" title=\"Liberal Democrats (UK)\">Liberal Democrat</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Sutton_Civic_Offices&amp;action=edit&amp;redlink=1\" title=\"Sutton Civic Offices (page does not exist)\">Civic Offices</a>, St Nicholas Way\n</td>\n<td>16.93\n</td>\n<td>195,914\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.3618_N_0.1945_W_region:GB_type:city&amp;title=Sutton\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°21′42″N</span> <span class=\"longitude\">0°11′40″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.3618°N 0.1945°W</span><span style=\"display:none\"> / <span class=\"geo\">51.3618; -0.1945</span></span><span style=\"display:none\"> (<span class=\"fn org\">Sutton</span>)</span></span></span></a></span>\n</td>\n<td>18\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Tower_Hamlets\" title=\"London Borough of Tower Hamlets\">Tower Hamlets</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Tower_Hamlets_London_Borough_Council\" title=\"Tower Hamlets London Borough Council\">Tower Hamlets London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Tower_Hamlets_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Tower Hamlets Town Hall (page does not exist)\">Town Hall</a>, Mulberry Place, 5 Clove Crescent\n</td>\n<td>7.63\n</td>\n<td>272,890\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5099_N_0.0059_W_region:GB_type:city&amp;title=Tower+Hamlets\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′36″N</span> <span class=\"longitude\">0°00′21″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5099°N 0.0059°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5099; -0.0059</span></span><span style=\"display:none\"> (<span class=\"fn org\">Tower Hamlets</span>)</span></span></span></a></span>\n</td>\n<td>8\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Waltham_Forest\" title=\"London Borough of Waltham Forest\">Waltham Forest</a>\n</td>\n<td>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Waltham_Forest_London_Borough_Council\" title=\"Waltham Forest London Borough Council\">Waltham Forest London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Labour_Party_(UK)\" title=\"Labour Party (UK)\">Labour</a>\n</td>\n<td><a href=\"/wiki/Waltham_Forest_Town_Hall\" title=\"Waltham Forest Town Hall\">Waltham Forest Town Hall</a>, Forest Road\n</td>\n<td>14.99\n</td>\n<td>265,797\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5908_N_0.0134_W_region:GB_type:city&amp;title=Waltham+Forest\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°35′27″N</span> <span class=\"longitude\">0°00′48″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5908°N 0.0134°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5908; -0.0134</span></span><span style=\"display:none\"> (<span class=\"fn org\">Waltham Forest</span>)</span></span></span></a></span>\n</td>\n<td>28\n</td></tr>\n<tr>\n<td><a href=\"/wiki/London_Borough_of_Wandsworth\" title=\"London Borough of Wandsworth\">Wandsworth</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td>\n</td>\n<td><a href=\"/wiki/Wandsworth_London_Borough_Council\" title=\"Wandsworth London Borough Council\">Wandsworth London Borough Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Wandsworth_Town_Hall&amp;action=edit&amp;redlink=1\" title=\"Wandsworth Town Hall (page does not exist)\">The Town Hall</a>, <a href=\"/wiki/Wandsworth_High_Street\" title=\"Wandsworth High Street\">Wandsworth High Street</a>\n</td>\n<td>13.23\n</td>\n<td>310,516\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4567_N_0.191_W_region:GB_type:city&amp;title=Wandsworth\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°27′24″N</span> <span class=\"longitude\">0°11′28″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4567°N 0.1910°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4567; -0.1910</span></span><span style=\"display:none\"> (<span class=\"fn org\">Wandsworth</span>)</span></span></span></a></span>\n</td>\n<td>5\n</td></tr>\n<tr>\n<td><a href=\"/wiki/City_of_Westminster\" title=\"City of Westminster\">Westminster</a>\n</td>\n<td><img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>\n</td>\n<td><a href=\"/wiki/City_status_in_the_United_Kingdom\" title=\"City status in the United Kingdom\">City</a>\n</td>\n<td><a href=\"/wiki/Westminster_City_Council\" title=\"Westminster City Council\">Westminster City Council</a>\n</td>\n<td><a href=\"/wiki/Conservative_Party_(UK)\" title=\"Conservative Party (UK)\">Conservative</a>\n</td>\n<td><a class=\"new\" href=\"/w/index.php?title=Westminster_City_Hall&amp;action=edit&amp;redlink=1\" title=\"Westminster City Hall (page does not exist)\">Westminster City Hall</a>, 64 Victoria Street\n</td>\n<td>8.29\n</td>\n<td>226,841\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.4973_N_0.1372_W_region:GB_type:city&amp;title=Westminster\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°29′50″N</span> <span class=\"longitude\">0°08′14″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.4973°N 0.1372°W</span><span style=\"display:none\"> / <span class=\"geo\">51.4973; -0.1372</span></span><span style=\"display:none\"> (<span class=\"fn org\">Westminster</span>)</span></span></span></a></span>\n</td>\n<td>2\n</td></tr></tbody></table>, <table class=\"wikitable sortable\" style=\"font-size:95%\" width=\"100%\">\n<tbody><tr>\n<th width=\"100px\"><i>‘Borough’</i>\n</th>\n<th>Inner\n</th>\n<th width=\"100px\">Status\n</th>\n<th>Local authority\n</th>\n<th>Political control\n</th>\n<th width=\"120px\">Headquarters\n</th>\n<th>Area<br/>(sq mi)\n</th>\n<th>Population<br/>(2011 est)\n</th>\n<th width=\"20px\">Co-ordinates\n</th>\n<th><span style=\"background:#67BCD3\"> Nr. in<br/>map </span>\n</th></tr>\n<tr>\n<td><a href=\"/wiki/City_of_London\" title=\"City of London\">City of London</a>\n</td>\n<td>(<img alt=\"☑\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"20\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/20px-Yes_check.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/30px-Yes_check.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/f/fb/Yes_check.svg/40px-Yes_check.svg.png 2x\" width=\"20\"/><span style=\"display:none\">Y</span>)<br/><sup class=\"reference\" id=\"cite_ref-6\"><a href=\"#cite_note-6\">[note 5]</a></sup>\n</td>\n<td><i><a href=\"/wiki/Sui_generis\" title=\"Sui generis\">Sui generis</a></i>;<br/><a href=\"/wiki/City_status_in_the_United_Kingdom\" title=\"City status in the United Kingdom\">City</a>;<br/><a href=\"/wiki/Ceremonial_counties_of_England\" title=\"Ceremonial counties of England\">Ceremonial county</a>\n</td>\n<td><a class=\"mw-redirect\" href=\"/wiki/Corporation_of_London\" title=\"Corporation of London\">Corporation of London</a>;<br/><a href=\"/wiki/Inner_Temple\" title=\"Inner Temple\">Inner Temple</a>;<br/><a href=\"/wiki/Middle_Temple\" title=\"Middle Temple\">Middle Temple</a>\n</td>\n<td>? \n</td>\n<td><a href=\"/wiki/Guildhall,_London\" title=\"Guildhall, London\">Guildhall</a>\n</td>\n<td>1.12\n</td>\n<td>7,000\n</td>\n<td><span class=\"plainlinks nourlexpansion\"><a class=\"external text\" href=\"//tools.wmflabs.org/geohack/geohack.php?pagename=List_of_London_boroughs&amp;params=51.5155_N_0.0922_W_region:GB_type:city&amp;title=City+of+London\"><span class=\"geo-nondefault\"><span class=\"geo-dms\" title=\"Maps, aerial photos, and other data for this location\"><span class=\"latitude\">51°30′56″N</span> <span class=\"longitude\">0°05′32″W</span></span></span><span class=\"geo-multi-punct\"> / </span><span class=\"geo-default\"><span class=\"vcard\"><span class=\"geo-dec\" title=\"Maps, aerial photos, and other data for this location\">51.5155°N 0.0922°W</span><span style=\"display:none\"> / <span class=\"geo\">51.5155; -0.0922</span></span><span style=\"display:none\"> (<span class=\"fn org\">City of London</span>)</span></span></span></a></span>\n</td>\n<td>1\n</td></tr></tbody></table>]\n" ] ], [ [ "#### Converting the table into a data frame ", "_____no_output_____" ] ], [ [ "London_table = pd.read_html(str(table[0]), index_col=None, header=0)[0]\nLondon_table.head()", "_____no_output_____" ] ], [ [ "#### The second table on the site contains the addition Borough i.e. City of London", "_____no_output_____" ] ], [ [ "# Read in the second table \nLondon_table1 = pd.read_html(str(table[1]), index_col=None, header=0)[0]\n\n# Rename the columns to match the previous table to append the tables.\n\nLondon_table1.columns = ['Borough','Inner','Status','Local authority','Political control',\n 'Headquarters','Area (sq mi)','Population (2013 est)[1]','Co-ordinates','Nr. in map']\n\n# View the table\nLondon_table1", "_____no_output_____" ] ], [ [ "#### Append the data frame together", "_____no_output_____" ] ], [ [ "# A continuous index value will be maintained \n# across the rows in the new appended data frame. \n\nLondon_table = London_table.append(London_table1, ignore_index = True) \nLondon_table.head()", "_____no_output_____" ] ], [ [ "#### Check if the last row was appended correctly", "_____no_output_____" ] ], [ [ "London_table.tail()", "_____no_output_____" ] ], [ [ "#### View the information of the data set", "_____no_output_____" ] ], [ [ "London_table.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 33 entries, 0 to 32\nData columns (total 10 columns):\nBorough 33 non-null object\nInner 15 non-null object\nStatus 5 non-null object\nLocal authority 33 non-null object\nPolitical control 33 non-null object\nHeadquarters 33 non-null object\nArea (sq mi) 33 non-null float64\nPopulation (2013 est)[1] 33 non-null int64\nCo-ordinates 33 non-null object\nNr. in map 33 non-null int64\ndtypes: float64(1), int64(2), object(7)\nmemory usage: 2.7+ KB\n" ] ], [ [ "#### Removing Unnecessary string in the Data set", "_____no_output_____" ] ], [ [ "London_table = London_table.replace('note 1','', regex=True) \nLondon_table = London_table.replace('note 2','', regex=True) \nLondon_table = London_table.replace('note 3','', regex=True) \nLondon_table = London_table.replace('note 4','', regex=True) \nLondon_table = London_table.replace('note 5','', regex=True) \n\n# View the top of the data set\nLondon_table.head()", "_____no_output_____" ] ], [ [ "#### Check the type of the newly created table", "_____no_output_____" ] ], [ [ "type(London_table)", "_____no_output_____" ], [ "# Shape of the data frame\nLondon_table.shape", "_____no_output_____" ] ], [ [ "#### Check if the Borough in both the data frames match.", "_____no_output_____" ] ], [ [ "set(df.Borough) - set(London_table.Borough)", "_____no_output_____" ] ], [ [ "These 3 Boroughs don't match because of the unnecessary symobols present \"[]\" ", "_____no_output_____" ], [ "#### Find the index of the Boroughs that didn't match", "_____no_output_____" ] ], [ [ "print(\"The index of first borough is\",London_table.index[London_table['Borough'] == 'Barking and Dagenham []'].tolist())\nprint(\"The index of second borough is\",London_table.index[London_table['Borough'] == 'Greenwich []'].tolist())\nprint(\"The index of third borough is\",London_table.index[London_table['Borough'] == 'Hammersmith and Fulham []'].tolist())", "The index of first borough is [0]\nThe index of second borough is [9]\nThe index of third borough is [11]\n" ] ], [ [ "#### Changing the Borough names to match the other data frame", "_____no_output_____" ] ], [ [ "London_table.iloc[0,0] = 'Barking and Dagenham'\nLondon_table.iloc[9,0] = 'Greenwich'\nLondon_table.iloc[11,0] = 'Hammersmith and Fulham'", "_____no_output_____" ] ], [ [ "#### Check if the Borough names in both data sets match", "_____no_output_____" ] ], [ [ "set(df.Borough) - set(London_table.Borough)", "_____no_output_____" ] ], [ [ "The Borough names in both data frames match ", "_____no_output_____" ], [ "#### We can combine both the data frames together", "_____no_output_____" ] ], [ [ "Ld_crime = pd.merge(London_crime, London_table, on='Borough')\nLd_crime.head(10)", "_____no_output_____" ], [ "Ld_crime.shape", "_____no_output_____" ], [ "set(df.Borough) - set(Ld_crime.Borough)", "_____no_output_____" ] ], [ [ "#### Rearranging the Columns ", "_____no_output_____" ] ], [ [ "# List of Column names of the data frame \nlist(Ld_crime)", "_____no_output_____" ], [ "columnsTitles = ['Borough','Local authority','Political control','Headquarters',\n 'Area (sq mi)','Population (2013 est)[1]',\n 'Inner','Status',\n 'Burglary','Criminal Damage','Drugs','Other Notifiable Offences',\n 'Robbery','Theft and Handling','Violence Against the Person','Total','Co-ordinates']\n\nLd_crime = Ld_crime.reindex(columns=columnsTitles)\n\nLd_crime = Ld_crime[['Borough','Local authority','Political control','Headquarters',\n 'Area (sq mi)','Population (2013 est)[1]','Co-ordinates',\n 'Burglary','Criminal Damage','Drugs','Other Notifiable Offences',\n 'Robbery','Theft and Handling','Violence Against the Person','Total']]\n\nLd_crime.head()", "_____no_output_____" ] ], [ [ "## Methodology <a name=\"methodology\"></a>", "_____no_output_____" ], [ "The methodology in this project consists of two parts:\n- [Exploratory Data Analysis](#EDA): Visualise the crime rates in the London boroughs to idenity the safest borough and extract the neighborhoods in that borough to find the 10 most common venues in each neighborhood.\n\n\n- [Modelling](#modelling): To help people find similar neighborhoods in the safest borough we will be clustering similar neighborhoods using K - means clustering which is a form of unsupervised machine learning algorithm that clusters data based on predefined cluster size. We will use a cluster size of 5 for this project that will cluster the 15 neighborhoods into 5 clusters. The reason to conduct a K- means clustering is to cluster neighborhoods with similar venues together so that people can shortlist the area of their interests based on the venues/amenities around each neighborhood.\n ", "_____no_output_____" ], [ "### Exploratory Data Analysis <a name=\"EDA\"></a>", "_____no_output_____" ], [ "#### Descriptive statistics of the data", "_____no_output_____" ] ], [ [ "London_crime.describe()", "_____no_output_____" ], [ "# use the inline backend to generate the plots within the browser\n%matplotlib inline \n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nmpl.style.use('ggplot') # optional: for ggplot-like style\n\n# check for latest version of Matplotlib\nprint ('Matplotlib version: ', mpl.__version__) # >= 2.0.0\n\n# Matplotlib and associated plotting modules\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors", "Matplotlib version: 2.1.2\n" ] ], [ [ "#### Check if the column names are strings ", "_____no_output_____" ] ], [ [ "Ld_crime.columns = list(map(str, Ld_crime.columns))\n\n# let's check the column labels types now\nall(isinstance(column, str) for column in Ld_crime.columns)", "_____no_output_____" ] ], [ [ "#### Sort the total crimes in descenting order to see 5 boroughs with the highest number of crimes ", "_____no_output_____" ] ], [ [ "Ld_crime.sort_values(['Total'], ascending = False, axis = 0, inplace = True )\n\ndf_top5 = Ld_crime.head() \ndf_top5", "_____no_output_____" ] ], [ [ "#### Visualize the five boroughs with the highest number of crimes ", "_____no_output_____" ] ], [ [ "df_tt = df_top5[['Borough','Total']]\n\ndf_tt.set_index('Borough',inplace = True)\n\nax = df_tt.plot(kind='bar', figsize=(10, 6), rot=0)\n\nax.set_ylabel('Number of Crimes') # add to x-label to the plot\nax.set_xlabel('Borough') # add y-label to the plot\nax.set_title('London Boroughs with the Highest no. of crime') # add title to the plot\n\n# Creating a function to display the percentage.\n\nfor p in ax.patches:\n ax.annotate(np.round(p.get_height(),decimals=2), \n (p.get_x()+p.get_width()/2., p.get_height()), \n ha='center', \n va='center', \n xytext=(0, 10), \n textcoords='offset points',\n fontsize = 14\n )\n\nplt.show()", "_____no_output_____" ] ], [ [ "### We'll stay clear from these places :)", "_____no_output_____" ], [ "#### Sort the total crimes in ascending order to see 5 boroughs with the highest number of crimes ", "_____no_output_____" ] ], [ [ "Ld_crime.sort_values(['Total'], ascending = True, axis = 0, inplace = True )\n\ndf_bot5 = Ld_crime.head() \ndf_bot5", "_____no_output_____" ] ], [ [ "#### Visualize the five boroughs with the least number of crimes ", "_____no_output_____" ] ], [ [ "df_bt = df_bot5[['Borough','Total']]\n\ndf_bt.set_index('Borough',inplace = True)\n\nax = df_bt.plot(kind='bar', figsize=(10, 6), rot=0)\n\nax.set_ylabel('Number of Crimes') # add to x-label to the plot\nax.set_xlabel('Borough') # add y-label to the plot\nax.set_title('London Boroughs with the least no. of crime') # add title to the plot\n\n# Creating a function to display the percentage.\n\nfor p in ax.patches:\n ax.annotate(np.round(p.get_height(),decimals=2), \n (p.get_x()+p.get_width()/2., p.get_height()), \n ha='center', \n va='center', \n xytext=(0, 10), \n textcoords='offset points',\n fontsize = 14\n )\n\nplt.show()", "_____no_output_____" ] ], [ [ "The borough City of London has the lowest no. of crimes recorded for the year 2016, Looking into the details of the borough: ", "_____no_output_____" ] ], [ [ "df_col = df_bot5[df_bot5['Borough'] == 'City of London']\ndf_col = df_col[['Borough','Total','Area (sq mi)','Population (2013 est)[1]']]\ndf_col", "_____no_output_____" ] ], [ [ "#### As per the wikipedia page, The City of London is the 33rd principal division of Greater London but it is not a London borough. \nURL: https://en.wikipedia.org/wiki/List_of_London_boroughs\n\n#### Hence we will focus on the next borough with the least crime i.e. Kingston upon Thames\n\n### Visualizing different types of crimes in the borough 'Kingston upon Thames'", "_____no_output_____" ] ], [ [ "df_bc1 = df_bot5[df_bot5['Borough'] == 'Kingston upon Thames']\n\ndf_bc = df_bc1[['Borough','Burglary','Criminal Damage','Drugs','Other Notifiable Offences',\n 'Robbery','Theft and Handling','Violence Against the Person']]\n\n\ndf_bc.set_index('Borough',inplace = True)\n\nax = df_bc.plot(kind='bar', figsize=(10, 6), rot=0)\n\nax.set_ylabel('Number of Crimes') # add to x-label to the plot\nax.set_xlabel('Borough') # add y-label to the plot\nax.set_title('London Boroughs with the least no. of crime') # add title to the plot\n\n# Creating a function to display the percentage.\n\nfor p in ax.patches:\n ax.annotate(np.round(p.get_height(),decimals=2), \n (p.get_x()+p.get_width()/2., p.get_height()), \n ha='center', \n va='center', \n xytext=(0, 10), \n textcoords='offset points',\n fontsize = 14\n )\n\nplt.show()\n", "_____no_output_____" ] ], [ [ "We can conclude that Kingston upon Thames is the safest borough when compared to the other boroughs in London. ", "_____no_output_____" ], [ "### Part 3: Creating a new dataset of the Neighborhoods of the safest borough in London and generating their co-ordinates. <a name=\"part3\"></a>\n\n\n\nThe list of Neighborhoods in the Royal Borough of Kingston upon Thames was found on a wikipedia page: https://en.wikipedia.org/wiki/List_of_districts_in_the_Royal_Borough_of_Kingston_upon_Thames", "_____no_output_____" ] ], [ [ "Neighborhood = ['Berrylands','Canbury','Chessington','Coombe','Hook','Kingston upon Thames',\n'Kingston Vale','Malden Rushett','Motspur Park','New Malden','Norbiton',\n'Old Malden','Seething Wells','Surbiton','Tolworth']\n\nBorough = ['Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames',\n 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames',\n 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames','Kingston upon Thames',\n 'Kingston upon Thames','Kingston upon Thames','Kingston upon Thames']\n\nLatitude = ['','','','','','','','','','','','','','','']\nLongitude = ['','','','','','','','','','','','','','','']\n\ndf_neigh = {'Neighborhood': Neighborhood,'Borough':Borough,'Latitude': Latitude,'Longitude':Longitude}\nkut_neig = pd.DataFrame(data=df_neigh, columns=['Neighborhood', 'Borough', 'Latitude', 'Longitude'], index=None)\n\nkut_neig", "_____no_output_____" ] ], [ [ "#### Find the Co-ordiantes of each Neighborhood in the Kingston upon Thames Neighborhood", "_____no_output_____" ] ], [ [ "Latitude = []\nLongitude = []\n\nfor i in range(len(Neighborhood)):\n address = '{},London,United Kingdom'.format(Neighborhood[i])\n geolocator = Nominatim(user_agent=\"London_agent\")\n location = geolocator.geocode(address)\n Latitude.append(location.latitude)\n Longitude.append(location.longitude)\nprint(Latitude, Longitude)", "[51.3937811, 51.41749865, 51.358336, 51.4194499, 51.3678984, 51.4096275, 51.43185, 51.3410523, 51.3909852, 51.4053347, 51.4099994, 51.382484, 51.3926421, 51.3937557, 51.3788758] [-0.2848024, -0.305552805049262, -0.2986216, -0.2653985, -0.3071453, -0.3062621, -0.2581379, -0.3190757, -0.2488979, -0.2634066, -0.2873963, -0.2590897, -0.3143662, -0.3033105, -0.2828604]\n" ], [ "df_neigh = {'Neighborhood': Neighborhood,'Borough':Borough,'Latitude': Latitude,'Longitude':Longitude}\nkut_neig = pd.DataFrame(data=df_neigh, columns=['Neighborhood', 'Borough', 'Latitude', 'Longitude'], index=None)\n\nkut_neig", "_____no_output_____" ] ], [ [ "#### Get the co-ordinates of Berrylands, London, United Kingdom (The center neighborhood of Kingston upon Thames)", "_____no_output_____" ] ], [ [ "address = 'Berrylands, London, United Kingdom'\n\ngeolocator = Nominatim(user_agent=\"ld_explorer\")\nlocation = geolocator.geocode(address)\nlatitude = location.latitude\nlongitude = location.longitude\nprint('The geograpical coordinate of Berrylands, London are {}, {}.'.format(latitude, longitude))", "The geograpical coordinate of London are 51.3937811, -0.2848024.\n" ] ], [ [ "#### Visualize the Neighborhood of Kingston upon Thames Borough", "_____no_output_____" ] ], [ [ "# create map of New York using latitude and longitude values\nmap_lon = folium.Map(location=[latitude, longitude], zoom_start=12)\n\n# add markers to map\nfor lat, lng, borough, neighborhood in zip(kut_neig['Latitude'], kut_neig['Longitude'], kut_neig['Borough'], kut_neig['Neighborhood']):\n label = '{}, {}'.format(neighborhood, borough)\n label = folium.Popup(label, parse_html=True)\n folium.CircleMarker(\n [lat, lng],\n radius=5,\n popup=label,\n color='blue',\n fill=True,\n fill_color='#3186cc',\n fill_opacity=0.7,\n parse_html=False).add_to(map_lon) \n \nmap_lon", "_____no_output_____" ] ], [ [ "### Modelling <a name=\"modelling\"></a>\n\n- Finding all the venues within a 500 meter radius of each neighborhood.\n- Perform one hot ecoding on the venues data.\n- Grouping the venues by the neighborhood and calculating their mean.\n- Performing a K-means clustering (Defining K = 5)", "_____no_output_____" ], [ "#### Create a function to extract the venues from each Neighborhood", "_____no_output_____" ] ], [ [ "def getNearbyVenues(names, latitudes, longitudes, radius=500):\n \n venues_list=[]\n for name, lat, lng in zip(names, latitudes, longitudes):\n print(name)\n \n # create the API request URL\n url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(\n CLIENT_ID, \n CLIENT_SECRET, \n VERSION, \n lat, \n lng, \n radius, \n LIMIT)\n \n # make the GET request\n results = requests.get(url).json()[\"response\"]['groups'][0]['items']\n \n # return only relevant information for each nearby venue\n venues_list.append([(\n name, \n lat, \n lng, \n v['venue']['name'], \n v['venue']['location']['lat'], \n v['venue']['location']['lng'], \n v['venue']['categories'][0]['name']) for v in results])\n\n nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])\n nearby_venues.columns = ['Neighborhood', \n 'Neighborhood Latitude', \n 'Neighborhood Longitude', \n 'Venue', \n 'Venue Latitude', \n 'Venue Longitude', \n 'Venue Category']\n \n return(nearby_venues)", "_____no_output_____" ], [ "kut_venues = getNearbyVenues(names=kut_neig['Neighborhood'],\n latitudes=kut_neig['Latitude'],\n longitudes=kut_neig['Longitude']\n )\n", "Berrylands\nCanbury\nChessington\nCoombe\nHook\nKingston upon Thames\nKingston Vale\nMalden Rushett\nMotspur Park\nNew Malden\nNorbiton\nOld Malden\nSeething Wells\nSurbiton\nTolworth\n" ], [ "print(kut_venues.shape)\nkut_venues.head()", "(170, 7)\n" ], [ "kut_venues.groupby('Neighborhood').count()", "_____no_output_____" ], [ "print('There are {} uniques categories.'.format(len(kut_venues['Venue Category'].unique())))", "There are 65 uniques categories.\n" ] ], [ [ "#### One hot encoding\n\nURL: https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f", "_____no_output_____" ] ], [ [ "# one hot encoding\nkut_onehot = pd.get_dummies(kut_venues[['Venue Category']], prefix=\"\", prefix_sep=\"\")\n\n# add neighborhood column back to dataframe\nkut_onehot['Neighborhood'] = kut_venues['Neighborhood'] \n\n# move neighborhood column to the first column\nfixed_columns = [kut_onehot.columns[-1]] + list(kut_onehot.columns[:-1])\nkut_onehot = kut_onehot[fixed_columns]\n\nkut_onehot.head()", "_____no_output_____" ] ], [ [ "#### Grouping rows by neighborhood and by taking the mean of the frequency of occurrence of each category", "_____no_output_____" ] ], [ [ "kut_grouped = kut_onehot.groupby('Neighborhood').mean().reset_index()\nkut_grouped", "_____no_output_____" ], [ "kut_grouped.shape", "_____no_output_____" ], [ "num_top_venues = 5\n\nfor hood in kut_grouped['Neighborhood']:\n print(\"----\"+hood+\"----\")\n temp = kut_grouped[kut_grouped['Neighborhood'] == hood].T.reset_index()\n temp.columns = ['venue','freq']\n temp = temp.iloc[1:]\n temp['freq'] = temp['freq'].astype(float)\n temp = temp.round({'freq': 2})\n print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues))\n print('\\n')", "----Berrylands----\n venue freq\n0 Bus Stop 0.25\n1 Gym / Fitness Center 0.25\n2 Park 0.25\n3 Café 0.25\n4 Pub 0.00\n\n\n----Canbury----\n venue freq\n0 Pub 0.31\n1 Park 0.08\n2 Hotel 0.08\n3 Indian Restaurant 0.08\n4 Fish & Chips Shop 0.08\n\n\n----Chessington----\n venue freq\n0 Fast Food Restaurant 1.0\n1 Asian Restaurant 0.0\n2 Portuguese Restaurant 0.0\n3 Hardware Store 0.0\n4 Hotel 0.0\n\n\n----Hook----\n venue freq\n0 Bakery 0.25\n1 Indian Restaurant 0.25\n2 Fish & Chips Shop 0.25\n3 Convenience Store 0.25\n4 Asian Restaurant 0.00\n\n\n----Kingston Vale----\n venue freq\n0 Grocery Store 0.25\n1 Soccer Field 0.25\n2 Bar 0.25\n3 Italian Restaurant 0.25\n4 Platform 0.00\n\n\n----Kingston upon Thames----\n venue freq\n0 Café 0.13\n1 Coffee Shop 0.13\n2 Sushi Restaurant 0.07\n3 Burger Joint 0.07\n4 Pub 0.07\n\n\n----Malden Rushett----\n venue freq\n0 Pub 0.25\n1 Restaurant 0.25\n2 Convenience Store 0.25\n3 Garden Center 0.25\n4 Park 0.00\n\n\n----Motspur Park----\n venue freq\n0 Bus Stop 0.2\n1 Gym 0.2\n2 Restaurant 0.2\n3 Park 0.2\n4 Soccer Field 0.2\n\n\n----New Malden----\n venue freq\n0 Gym 0.17\n1 Indian Restaurant 0.17\n2 Gastropub 0.17\n3 Sushi Restaurant 0.17\n4 Supermarket 0.17\n\n\n----Norbiton----\n venue freq\n0 Indian Restaurant 0.11\n1 Italian Restaurant 0.07\n2 Platform 0.07\n3 Food 0.07\n4 Pub 0.07\n\n\n----Old Malden----\n venue freq\n0 Pub 0.33\n1 Train Station 0.33\n2 Food 0.33\n3 Market 0.00\n4 Platform 0.00\n\n\n----Seething Wells----\n venue freq\n0 Indian Restaurant 0.17\n1 Coffee Shop 0.13\n2 Italian Restaurant 0.09\n3 Pub 0.09\n4 Café 0.09\n\n\n----Surbiton----\n venue freq\n0 Coffee Shop 0.17\n1 Pub 0.13\n2 Supermarket 0.07\n3 Breakfast Spot 0.07\n4 Grocery Store 0.03\n\n\n----Tolworth----\n venue freq\n0 Grocery Store 0.20\n1 Pharmacy 0.13\n2 Bus Stop 0.07\n3 Furniture / Home Store 0.07\n4 Pizza Place 0.07\n\n\n" ] ], [ [ "#### Create a data frame of the venues \nFunction to sort the venues in descending order.", "_____no_output_____" ] ], [ [ "def return_most_common_venues(row, num_top_venues):\n row_categories = row.iloc[1:]\n row_categories_sorted = row_categories.sort_values(ascending=False)\n \n return row_categories_sorted.index.values[0:num_top_venues]", "_____no_output_____" ] ], [ [ "Create the new dataframe and display the top 10 venues for each neighborhood", "_____no_output_____" ] ], [ [ "num_top_venues = 10\n\nindicators = ['st', 'nd', 'rd']\n\n# create columns according to number of top venues\ncolumns = ['Neighborhood']\nfor ind in np.arange(num_top_venues):\n try:\n columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))\n except:\n columns.append('{}th Most Common Venue'.format(ind+1))\n\n# create a new dataframe\nneighborhoods_venues_sorted = pd.DataFrame(columns=columns)\nneighborhoods_venues_sorted['Neighborhood'] = kut_grouped['Neighborhood']\n\nfor ind in np.arange(kut_grouped.shape[0]):\n neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(kut_grouped.iloc[ind, :], num_top_venues)\n\nneighborhoods_venues_sorted.head()", "_____no_output_____" ] ], [ [ "### Clustering similar neighborhoods together using k - means clustering", "_____no_output_____" ] ], [ [ "# import k-means from clustering stage\nfrom sklearn.cluster import KMeans\n\n# set number of clusters\nkclusters = 5\n\nkut_grouped_clustering = kut_grouped.drop('Neighborhood', 1)\n\n# run k-means clustering\nkmeans = KMeans(n_clusters=kclusters, random_state=0).fit(kut_grouped_clustering)\n\n# check cluster labels generated for each row in the dataframe\nkmeans.labels_[0:10] ", "_____no_output_____" ], [ "# add clustering labels\nneighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)\n\nkut_merged = kut_neig\n\n# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood\nkut_merged = kut_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood')\n\nkut_merged.head() # check the last columns!", "_____no_output_____" ], [ "kut_merged.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15 entries, 0 to 14\nData columns (total 15 columns):\nNeighborhood 15 non-null object\nBorough 15 non-null object\nLatitude 15 non-null float64\nLongitude 15 non-null float64\nCluster Labels 14 non-null float64\n1st Most Common Venue 14 non-null object\n2nd Most Common Venue 14 non-null object\n3rd Most Common Venue 14 non-null object\n4th Most Common Venue 14 non-null object\n5th Most Common Venue 14 non-null object\n6th Most Common Venue 14 non-null object\n7th Most Common Venue 14 non-null object\n8th Most Common Venue 14 non-null object\n9th Most Common Venue 14 non-null object\n10th Most Common Venue 14 non-null object\ndtypes: float64(3), object(12)\nmemory usage: 1.8+ KB\n" ], [ "# Dropping the row with the NaN value \nkut_merged.dropna(inplace = True)", "_____no_output_____" ], [ "kut_merged.shape", "_____no_output_____" ], [ "kut_merged['Cluster Labels'] = kut_merged['Cluster Labels'].astype(int)", "_____no_output_____" ], [ "kut_merged.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 14 entries, 0 to 14\nData columns (total 15 columns):\nNeighborhood 14 non-null object\nBorough 14 non-null object\nLatitude 14 non-null float64\nLongitude 14 non-null float64\nCluster Labels 14 non-null int64\n1st Most Common Venue 14 non-null object\n2nd Most Common Venue 14 non-null object\n3rd Most Common Venue 14 non-null object\n4th Most Common Venue 14 non-null object\n5th Most Common Venue 14 non-null object\n6th Most Common Venue 14 non-null object\n7th Most Common Venue 14 non-null object\n8th Most Common Venue 14 non-null object\n9th Most Common Venue 14 non-null object\n10th Most Common Venue 14 non-null object\ndtypes: float64(2), int64(1), object(12)\nmemory usage: 1.8+ KB\n" ] ], [ [ "### Visualize the clusters", "_____no_output_____" ] ], [ [ "# create map\nmap_clusters = folium.Map(location=[latitude, longitude], zoom_start=11.5)\n\n# set color scheme for the clusters\nx = np.arange(kclusters)\nys = [i + x + (i*x)**2 for i in range(kclusters)]\ncolors_array = cm.rainbow(np.linspace(0, 1, len(ys)))\nrainbow = [colors.rgb2hex(i) for i in colors_array]\n\n# add markers to the map\nmarkers_colors = []\nfor lat, lon, poi, cluster in zip(kut_merged['Latitude'], kut_merged['Longitude'], kut_merged['Neighborhood'], kut_merged['Cluster Labels']):\n label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)\n folium.CircleMarker(\n [lat, lon],\n radius=8,\n popup=label,\n color=rainbow[cluster-1],\n fill=True,\n fill_color=rainbow[cluster-1],\n fill_opacity=0.5).add_to(map_clusters)\n \nmap_clusters", "_____no_output_____" ] ], [ [ "Each cluster is color coded for the ease of presentation, we can see that majority of the neighborhood falls in the red cluster which is the first cluster. Three neighborhoods have their own cluster (Blue, Purple and Yellow), these are clusters two three and five. The green cluster consists of two neighborhoods which is the 4th cluster. ", "_____no_output_____" ], [ "## Analysis <a name=\"analysis\"></a>\n\nAnalyse each of the clusters to identify the characteristics of each cluster and the neighborhoods in them.", "_____no_output_____" ], [ "#### Examine the first cluster", "_____no_output_____" ] ], [ [ "kut_merged[kut_merged['Cluster Labels'] == 0]", "_____no_output_____" ] ], [ [ "The cluster one is the biggest cluster with 9 of the 15 neighborhoods in the borough Kingston upon Thames. Upon closely examining these neighborhoods we can see that the most common venues in these neighborhoods are Restaurants, Pubs, Cafe, Supermarkets, and stores.", "_____no_output_____" ], [ "#### Examine the second cluster", "_____no_output_____" ] ], [ [ "kut_merged[kut_merged['Cluster Labels'] == 1]", "_____no_output_____" ] ], [ [ "The second cluster has one neighborhood which consists of Venues such as Restaurants, Golf courses, and wine shops. ", "_____no_output_____" ], [ "#### Examine the third cluster", "_____no_output_____" ] ], [ [ "kut_merged[kut_merged['Cluster Labels'] == 2]", "_____no_output_____" ] ], [ [ "The third cluster has one neighborhood which consists of Venues such as Train stations, Restaurants, and Furniture shops. ", "_____no_output_____" ], [ "#### Examine the forth cluster", "_____no_output_____" ] ], [ [ "kut_merged[kut_merged['Cluster Labels'] == 3]", "_____no_output_____" ] ], [ [ "The fourth cluster has two neighborhoods in it, these neighborhoods have common venues such as Parks, Gym/Fitness centers, Bus Stops, Restaurants, Electronics Stores and Soccer fields etc. \n", "_____no_output_____" ], [ "#### Examine the fifth cluster", "_____no_output_____" ] ], [ [ "kut_merged[kut_merged['Cluster Labels'] == 4]", "_____no_output_____" ] ], [ [ "The fifth cluster has one neighborhood which consists of Venues such as Grocery shops, Bars, Restaurants, Furniture shops, and Department stores.", "_____no_output_____" ], [ "## Results and Discussion <a name=\"results\"></a>", "_____no_output_____" ], [ "The aim of this project is to help people who want to relocate to the safest borough in London, expats can chose the neighborhoods to which they want to relocate based on the most common venues in it. For example if a person is looking for a neighborhood with good connectivity and public transportation we can see that Clusters 3 and 4 have Train stations and Bus stops as the most common venues. If a person is looking for a neighborhood with stores and restaurants in a close proximity then the neighborhoods in the first cluster is suitable. For a family I feel that the neighborhoods in Cluster 4 are more suitable dues to the common venues in that cluster, these neighborhoods have common venues such as Parks, Gym/Fitness centers, Bus Stops, Restaurants, Electronics Stores and Soccer fields which is ideal for a family. ", "_____no_output_____" ], [ "## Conclusion <a name=\"conclusion\"></a>", "_____no_output_____" ], [ "This project helps a person get a better understanding of the neighborhoods with respect to the most common venues in that neighborhood. It is always helpful to make use of technology to stay one step ahead i.e. finding out more about places before moving into a neighborhood. We have just taken safety as a primary concern to shortlist the borough of London. The future of this project includes taking other factors such as cost of living in the areas into consideration to shortlist the borough based on safety and a predefined budget. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d04ad747deee3a13373defd9f50132c8ae9f9a
779,465
ipynb
Jupyter Notebook
nbs/dl1/lesson2-download.ipynb
technophile21/course-v3
2bd1ce79c3f0cfe04a7c16773417d3f20f45522b
[ "Apache-2.0" ]
null
null
null
nbs/dl1/lesson2-download.ipynb
technophile21/course-v3
2bd1ce79c3f0cfe04a7c16773417d3f20f45522b
[ "Apache-2.0" ]
null
null
null
nbs/dl1/lesson2-download.ipynb
technophile21/course-v3
2bd1ce79c3f0cfe04a7c16773417d3f20f45522b
[ "Apache-2.0" ]
null
null
null
568.952555
446,052
0.93134
[ [ [ "# Creating your own dataset from Google Images\n\n*by: Francisco Ingham and Jeremy Howard. Inspired by [Adrian Rosebrock](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)*", "_____no_output_____" ], [ "In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats).", "_____no_output_____" ] ], [ [ "from fastai.vision import *", "_____no_output_____" ] ], [ [ "## Get a list of URLs", "_____no_output_____" ], [ "### Search and scroll", "_____no_output_____" ], [ "Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do.\n\nScroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700.\n\nIt is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, \"canis lupus lupus\", it might be a good idea to exclude other variants:\n\n \"canis lupus lupus\" -dog -arctos -familiaris -baileyi -occidentalis\n\nYou can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown.", "_____no_output_____" ], [ "### Download into file", "_____no_output_____" ], [ "Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.\n\nPress <kbd>Ctrl</kbd><kbd>Shift</kbd><kbd>J</kbd> in Windows/Linux and <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>J</kbd> in Mac, and a small window the javascript 'Console' will appear. That is where you will paste the JavaScript commands.\n\nYou will need to get the urls of each of the images. You can do this by running the following commands:\n\n```javascript\nurls = Array.from(document.querySelectorAll('.rg_di .rg_meta')).map(el=>JSON.parse(el.textContent).ou);\nwindow.open('data:text/csv;charset=utf-8,' + escape(urls.join('\\n')));\n```", "_____no_output_____" ], [ "### Create directory and upload urls file into your server", "_____no_output_____" ], [ "Choose an appropriate name for your labeled images. You can run these steps multiple times to create different labels.", "_____no_output_____" ] ], [ [ "folder = 'black'\nfile = 'urls_black.txt'", "_____no_output_____" ], [ "folder = 'teddys'\nfile = 'urls_teddys.txt'", "_____no_output_____" ], [ "folder = 'grizzly'\nfile = 'urls_grizzly.txt'", "_____no_output_____" ] ], [ [ "You will need to run this cell once per each category.", "_____no_output_____" ] ], [ [ "path = Path('data/bears')\ndest = path/folder\ndest.mkdir(parents=True, exist_ok=True)", "_____no_output_____" ], [ "path.ls()", "_____no_output_____" ] ], [ [ "Finally, upload your urls file. You just need to press 'Upload' in your working directory and select your file, then click 'Upload' for each of the displayed files.\n\n![uploaded file](images/download_images/upload.png)", "_____no_output_____" ], [ "## Download images", "_____no_output_____" ], [ "Now you will need to download your images from their respective urls.\n\nfast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.\n\nLet's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.\n\nYou will need to run this line once for every category.", "_____no_output_____" ] ], [ [ "classes = ['teddys','grizzly','black']", "_____no_output_____" ], [ "download_images(path/file, dest, max_pics=200)", "_____no_output_____" ], [ "# If you have problems download, try with `max_workers=0` to see exceptions:\ndownload_images(path/file, dest, max_pics=20, max_workers=0)", "_____no_output_____" ] ], [ [ "Then we can remove any images that can't be opened:", "_____no_output_____" ] ], [ [ "for c in classes:\n print(c)\n verify_images(path/c, delete=True, max_size=500)", "teddys\n" ] ], [ [ "## View data", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ndata = ImageDataBunch.from_folder(path, train=\".\", valid_pct=0.2,\n ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)", "_____no_output_____" ], [ "# If you already cleaned your data, run this cell instead of the one before\n# np.random.seed(42)\n# data = ImageDataBunch.from_csv(\".\", folder=\".\", valid_pct=0.2, csv_labels='cleaned.csv',\n# ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)", "_____no_output_____" ] ], [ [ "Good! Let's take a look at some of our pictures then.", "_____no_output_____" ] ], [ [ "data.classes", "_____no_output_____" ], [ "data.show_batch(rows=3, figsize=(7,8))", "_____no_output_____" ], [ "data.classes, data.c, len(data.train_ds), len(data.valid_ds)", "_____no_output_____" ] ], [ [ "## Train model", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet34, metrics=error_rate)", "_____no_output_____" ], [ "learn.fit_one_cycle(4)", "_____no_output_____" ], [ "learn.save('stage-1')", "_____no_output_____" ], [ "learn.unfreeze()", "_____no_output_____" ], [ "learn.lr_find()", "_____no_output_____" ], [ "learn.recorder.plot()", "_____no_output_____" ], [ "learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4))", "_____no_output_____" ], [ "learn.save('stage-2')", "_____no_output_____" ] ], [ [ "## Interpretation", "_____no_output_____" ] ], [ [ "learn.load('stage-2');", "_____no_output_____" ], [ "interp = ClassificationInterpretation.from_learner(learn)", "_____no_output_____" ], [ "interp.plot_confusion_matrix()", "_____no_output_____" ] ], [ [ "## Cleaning Up\n\nSome of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be.\n\nUsing the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong.", "_____no_output_____" ] ], [ [ "from fastai.widgets import *", "_____no_output_____" ] ], [ [ "First we need to get the file paths from our top_losses. We can do this with `.from_toplosses`. We then feed the top losses indexes and corresponding dataset to `ImageCleaner`.\n\nNotice that the widget will not delete images directly from disk but it will create a new csv file `cleaned.csv` from where you can create a new ImageDataBunch with the corrected labels to continue training your model.", "_____no_output_____" ] ], [ [ "ds, idxs = DatasetFormatter().from_toplosses(learn, ds_type=DatasetType.Valid)", "_____no_output_____" ], [ "ImageCleaner(ds, idxs, path)", "_____no_output_____" ] ], [ [ "Flag photos for deletion by clicking 'Delete'. Then click 'Next Batch' to delete flagged photos and keep the rest in that row. `ImageCleaner` will show you a new row of images until there are no more to show. In this case, the widget will show you images until there are none left from `top_losses.ImageCleaner(ds, idxs)`", "_____no_output_____" ], [ "You can also find duplicates in your dataset and delete them! To do this, you need to run `.from_similars` to get the potential duplicates' ids and then run `ImageCleaner` with `duplicates=True`. The API works in a similar way as with misclassified images: just choose the ones you want to delete and click 'Next Batch' until there are no more images left.", "_____no_output_____" ] ], [ [ "ds, idxs = DatasetFormatter().from_similars(learn, ds_type=DatasetType.Valid)", "Getting activations...\n" ], [ "ImageCleaner(ds, idxs, path, duplicates=True)", "_____no_output_____" ] ], [ [ "Remember to recreate your ImageDataBunch from your `cleaned.csv` to include the changes you made in your data!", "_____no_output_____" ], [ "## Putting your model in production", "_____no_output_____" ], [ "First thing first, let's export the content of our `Learner` object for production:", "_____no_output_____" ] ], [ [ "learn.export()", "_____no_output_____" ] ], [ [ "This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used).", "_____no_output_____" ], [ "You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so:", "_____no_output_____" ] ], [ [ "defaults.device = torch.device('cpu')", "_____no_output_____" ], [ "img = open_image(path/'black'/'00000021.jpg')\nimg", "_____no_output_____" ] ], [ [ "We create our `Learner` in production enviromnent like this, jsut make sure that `path` contains the file 'export.pkl' from before.", "_____no_output_____" ] ], [ [ "learn = load_learner(path)", "_____no_output_____" ], [ "pred_class,pred_idx,outputs = learn.predict(img)\npred_class", "_____no_output_____" ] ], [ [ "So you might create a route something like this ([thanks](https://github.com/simonw/cougar-or-not) to Simon Willison for the structure of this code):\n\n```python\[email protected](\"/classify-url\", methods=[\"GET\"])\nasync def classify_url(request):\n bytes = await get_bytes(request.query_params[\"url\"])\n img = open_image(BytesIO(bytes))\n _,_,losses = learner.predict(img)\n return JSONResponse({\n \"predictions\": sorted(\n zip(cat_learner.data.classes, map(float, losses)),\n key=lambda p: p[1],\n reverse=True\n )\n })\n```\n\n(This example is for the [Starlette](https://www.starlette.io/) web app toolkit.)", "_____no_output_____" ], [ "## Things that can go wrong", "_____no_output_____" ], [ "- Most of the time things will train fine with the defaults\n- There's not much you really need to tune (despite what you've heard!)\n- Most likely are\n - Learning rate\n - Number of epochs", "_____no_output_____" ], [ "### Learning rate (LR) too high", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet34, metrics=error_rate)", "_____no_output_____" ], [ "learn.fit_one_cycle(1, max_lr=0.5)", "Total time: 00:13\nepoch train_loss valid_loss error_rate \n1 12.220007 1144188288.000000 0.765957 (00:13)\n\n" ] ], [ [ "### Learning rate (LR) too low", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet34, metrics=error_rate)", "_____no_output_____" ] ], [ [ "Previously we had this result:\n\n```\nTotal time: 00:57\nepoch train_loss valid_loss error_rate\n1 1.030236 0.179226 0.028369 (00:14)\n2 0.561508 0.055464 0.014184 (00:13)\n3 0.396103 0.053801 0.014184 (00:13)\n4 0.316883 0.050197 0.021277 (00:15)\n```", "_____no_output_____" ] ], [ [ "learn.fit_one_cycle(5, max_lr=1e-5)", "Total time: 01:07\nepoch train_loss valid_loss error_rate\n1 1.349151 1.062807 0.609929 (00:13)\n2 1.373262 1.045115 0.546099 (00:13)\n3 1.346169 1.006288 0.468085 (00:13)\n4 1.334486 0.978713 0.453901 (00:13)\n5 1.320978 0.978108 0.446809 (00:13)\n\n" ], [ "learn.recorder.plot_losses()", "_____no_output_____" ] ], [ [ "As well as taking a really long time, it's getting too many looks at each image, so may overfit.", "_____no_output_____" ], [ "### Too few epochs", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False)", "_____no_output_____" ], [ "learn.fit_one_cycle(1)", "Total time: 00:14\nepoch train_loss valid_loss error_rate\n1 0.602823 0.119616 0.049645 (00:14)\n\n" ] ], [ [ "### Too many epochs", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ndata = ImageDataBunch.from_folder(path, train=\".\", valid_pct=0.9, bs=32, \n ds_tfms=get_transforms(do_flip=False, max_rotate=0, max_zoom=1, max_lighting=0, max_warp=0\n ),size=224, num_workers=4).normalize(imagenet_stats)", "_____no_output_____" ], [ "learn = cnn_learner(data, models.resnet50, metrics=error_rate, ps=0, wd=0)\nlearn.unfreeze()", "_____no_output_____" ], [ "learn.fit_one_cycle(40, slice(1e-6,1e-4))", "Total time: 06:39\nepoch train_loss valid_loss error_rate\n1 1.513021 1.041628 0.507326 (00:13)\n2 1.290093 0.994758 0.443223 (00:09)\n3 1.185764 0.936145 0.410256 (00:09)\n4 1.117229 0.838402 0.322344 (00:09)\n5 1.022635 0.734872 0.252747 (00:09)\n6 0.951374 0.627288 0.192308 (00:10)\n7 0.916111 0.558621 0.184982 (00:09)\n8 0.839068 0.503755 0.177656 (00:09)\n9 0.749610 0.433475 0.144689 (00:09)\n10 0.678583 0.367560 0.124542 (00:09)\n11 0.615280 0.327029 0.100733 (00:10)\n12 0.558776 0.298989 0.095238 (00:09)\n13 0.518109 0.266998 0.084249 (00:09)\n14 0.476290 0.257858 0.084249 (00:09)\n15 0.436865 0.227299 0.067766 (00:09)\n16 0.457189 0.236593 0.078755 (00:10)\n17 0.420905 0.240185 0.080586 (00:10)\n18 0.395686 0.255465 0.082418 (00:09)\n19 0.373232 0.263469 0.080586 (00:09)\n20 0.348988 0.258300 0.080586 (00:10)\n21 0.324616 0.261346 0.080586 (00:09)\n22 0.311310 0.236431 0.071429 (00:09)\n23 0.328342 0.245841 0.069597 (00:10)\n24 0.306411 0.235111 0.064103 (00:10)\n25 0.289134 0.227465 0.069597 (00:09)\n26 0.284814 0.226022 0.064103 (00:09)\n27 0.268398 0.222791 0.067766 (00:09)\n28 0.255431 0.227751 0.073260 (00:10)\n29 0.240742 0.235949 0.071429 (00:09)\n30 0.227140 0.225221 0.075092 (00:09)\n31 0.213877 0.214789 0.069597 (00:09)\n32 0.201631 0.209382 0.062271 (00:10)\n33 0.189988 0.210684 0.065934 (00:09)\n34 0.181293 0.214666 0.073260 (00:09)\n35 0.184095 0.222575 0.073260 (00:09)\n36 0.194615 0.229198 0.076923 (00:10)\n37 0.186165 0.218206 0.075092 (00:09)\n38 0.176623 0.207198 0.062271 (00:10)\n39 0.166854 0.207256 0.065934 (00:10)\n40 0.162692 0.206044 0.062271 (00:09)\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7d0578d3388ec06a393ec6291030756012edd5e
56,991
ipynb
Jupyter Notebook
tutorials/W0D1_PythonWorkshop1/student/W0D1_Tutorial1.ipynb
bgalbraith/course-content
3db3bbba0fee7af1def2a67e34be073c43434f4a
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W0D1_PythonWorkshop1/student/W0D1_Tutorial1.ipynb
bgalbraith/course-content
3db3bbba0fee7af1def2a67e34be073c43434f4a
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2021-06-16T05:41:08.000Z
2021-06-16T05:41:08.000Z
tutorials/W0D1_PythonWorkshop1/student/W0D1_Tutorial1.ipynb
bgalbraith/course-content
3db3bbba0fee7af1def2a67e34be073c43434f4a
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
33.702543
588
0.584724
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W0D1_PythonWorkshop1/student/W0D1_Tutorial1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Tutorial: LIF Neuron - Part I\n**Week 0, Day 1: Python Workshop 1**\n\n**By Neuromatch Academy**\n\n__Content creators:__ Marco Brigham and the [CCNSS](https://www.ccnss.org/) team\n\n__Content reviewers:__ Michael Waskom, Karolina Stosio, Spiros Chavlis", "_____no_output_____" ], [ "---\n## Tutorial objectives\nNMA students, you are going to use Python skills to advance your understanding of neuroscience. Just like two legs that support and strengthen each other. One has \"Python\" written in it, and the other has \"Neuro\". And step-by-step they go.\n\n&nbsp; \n\nIn this notebook, we'll practice basic operations with Python variables, control flow, plotting, and a sneak peek at `np.array`, the workhorse of scientific computation in Python.\n\n&nbsp; \n\nEach new concept in Python will unlock a different aspect of our implementation of a **Leaky Integrate-and-Fire (LIF)** neuron. And as if it couldn't get any better, we'll visualize the evolution of its membrane potential in time, and extract its statistical properties!\n\n&nbsp; \n\nWell then, let's start our walk today!", "_____no_output_____" ], [ "---\n## Imports and helper functions\nPlease execute the cell(s) below to initialize the notebook environment.", "_____no_output_____" ] ], [ [ "# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import YouTubeVideo", "_____no_output_____" ], [ "# @title Figure settings\n\n%config InlineBackend.figure_format = 'retina'\n\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")", "_____no_output_____" ] ], [ [ "---\n## Neuron model\nA *membrane equation* and a *reset condition* define our *leaky-integrate-and-fire (LIF)* neuron:\n\n\n\\begin{align*}\n\\\\\n&\\tau_m\\,\\frac{d}{dt}\\,V(t) = E_{L} - V(t) + R\\,I(t) &\\text{if }\\quad V(t) \\leq V_{th}\\\\\n\\\\\n&V(t) = V_{reset} &\\text{otherwise}\\\\\n\\\\\n\\end{align*}\n\nwhere $V(t)$ is the membrane potential, $\\tau_m$ is the membrane time constant, $E_{L}$ is the leak potential, $R$ is the membrane resistance, $I(t)$ is the synaptic input current, $V_{th}$ is the firing threshold, and $V_{reset}$ is the reset voltage. We can also write $V_m$ for membrane potential - very convenient for plot labels.\n\nThe membrane equation is an *ordinary differential equation (ODE)* that describes the time evolution of membrane potential $V(t)$ in response to synaptic input and leaking of change across the cell membrane.\n\n**Note that, in this tutorial the neuron model will not implement a spiking mechanism.**", "_____no_output_____" ] ], [ [ "# @title Video: Synaptic input\nvideo = YouTubeVideo(id='UP8rD2AwceM', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 1\nWe start by defining and initializing the main simulation variables.\n\n**Suggestions**\n* Modify the code below to print the simulation parameters", "_____no_output_____" ] ], [ [ "# t_max = 150e-3 # second\n# dt = 1e-3 # second\n# tau = 20e-3 # second\n# el = -60e-3 # milivolt\n# vr = -70e-3 # milivolt\n# vth = -50e-3 # milivolt\n# r = 100e6 # ohm\n# i_mean = 25e-11 # ampere\n\n# print(t_max, dt, tau, el, vr, vth, r, i_mean)", "_____no_output_____" ] ], [ [ "**SAMPLE OUTPUT**\n\n```\n0.15 0.001 0.02 -0.06 -0.07 -0.05 100000000.0 2.5e-10\n```", "_____no_output_____" ], [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_4adeccd3.py)\n\n", "_____no_output_____" ], [ "### Exercise 2\n![synaptic input](https://github.com/mpbrigham/colaboratory-figures/raw/master/nma/python-for-nma/synaptic_input.png)\n\nWe start with a sinusoidal model to simulate the synaptic input $I(t)$ given by:\n\\begin{align*}\n\\\\\nI(t)=I_{mean}\\left(1+\\sin\\left(\\frac{2 \\pi}{0.01}\\,t\\right)\\right)\\\\\n\\\\\n\\end{align*}\n\nCompute the values of synaptic input $I(t)$ between $t=0$ and $t=0.009$ with step $\\Delta t=0.001$.\n\n**Suggestions**\n* Loop variable `step` for 10 steps (`step` takes values from `0` to `9`)\n* At each time step\n * Compute the value of `t` with variables `step` and `dt`\n * Compute the value of `i`\n * Print `i`\n* Use `np.pi` and `np.sin` for evaluating $\\pi$ and $\\sin(\\cdot)$, respectively", "_____no_output_____" ] ], [ [ "# initialize t\nt = 0\n\n# loop for 10 steps, variable 'step' takes values from 0 to 9\nfor step in range(10):\n t = step * dt\n i = ...\n print(i)", "_____no_output_____" ] ], [ [ "**SAMPLE OUTPUT**\n\n```\n2.5e-10\n3.969463130731183e-10\n4.877641290737885e-10\n4.877641290737885e-10\n3.9694631307311837e-10\n2.5000000000000007e-10\n1.0305368692688176e-10\n1.2235870926211617e-11\n1.223587092621159e-11\n1.0305368692688186e-10\n```", "_____no_output_____" ], [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_943bc60a.py)\n\n", "_____no_output_____" ], [ "### Exercise 3\nPrint formatting is handy for displaying simulation parameters in a clean and organized form. Python 3.6 introduced the new string formatting [f-strings](https://www.python.org/dev/peps/pep-0498). Since we are dealing with type `float` variables, we use `f'{x:.3f}'` for formatting `x` to three decimal points, and `f'{x:.4e}'` for four decimal points but in exponential notation.\n```\nx = 3.14159265e-1\nprint(f'{x:.3f}')\n--> 0.314\n\nprint(f'{x:.4e}')\n--> 3.1416e-01\n```\n\nRepeat the loop from the previous exercise and print the `t` values with three decimal points, and synaptic input $I(t)$ with four decimal points in exponential notation.\n\nFor additional formatting options with f-strings see [here](http://zetcode.com/python/fstring/).\n\n**Suggestions**\n* Print `t` and `i` with help of *f-strings* formatting", "_____no_output_____" ] ], [ [ "# initialize step_end\nstep_end = 10\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n i = ...\n print(...)", "_____no_output_____" ] ], [ [ "**SAMPLE OUTPUT**\n\n```\n0.000 2.5000e-10\n0.001 3.9695e-10\n0.002 4.8776e-10\n0.003 4.8776e-10\n0.004 3.9695e-10\n0.005 2.5000e-10\n0.006 1.0305e-10\n0.007 1.2236e-11\n0.008 1.2236e-11\n0.009 1.0305e-10\n```", "_____no_output_____" ], [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_cae53962.py)\n\n", "_____no_output_____" ], [ "## ODE integration without spikes\nIn the next exercises, we now simulate the evolution of the membrane equation in discrete time steps, with a sufficiently small $\\Delta t$.\n\nWe start by writing the time derivative $d/dt\\,V(t)$ in the membrane equation without taking the limit $\\Delta t \\to 0$:\n\n\\begin{align*}\n\\\\\n\\tau_m\\,\\frac{V\\left(t+\\Delta t\\right)-V\\left(t\\right)}{\\Delta t} &= E_{L} - V(t) + R\\,I(t) \\qquad\\qquad (1)\\\\\n\\\\\n\\end{align*}\n\nThe value of membrane potential $V\\left(t+\\Delta t\\right)$ can be expressed in terms of its previous value $V(t)$ by simple algebraic manipulation. For *small enough* values of $\\Delta t$, this provides a good approximation of the continuous-time integration.\n\nThis operation is an integration since we obtain a sequence $\\{V(t), V(t+\\Delta t), V(t+2\\Delta t),...\\}$ starting from the ODE. Notice how the ODE describes the evolution of $\\frac{d}{dt}\\,V(t)$, the derivative of $V(t)$, but not directly the evolution of $V(t)$. For the evolution of $V(t)$ we need to integrate the ODE, and in this tutorial, we will do a discrete-time integration using the Euler method. See [Numerical methods for ordinary differential equations](https://en.wikipedia.org/wiki/Numerical_methods_for_ordinary_differential_equations) for additional details.", "_____no_output_____" ] ], [ [ "# @title Video: Discrete time integration\nvideo = YouTubeVideo(id='kyCbeR28AYQ', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 4\nCompute the values of $V(t)$ between $t=0$ and $t=0.01$ with step $\\Delta t=0.001$ and $V(0)=E_L$.\n\nWe will write a `for` loop from scratch in this exercise. The following three formulations are all equivalent and loop for three steps:\n```\nfor step in [0, 1, 2]:\n print(step)\n\nfor step in range(3):\n print(step)\n\nstart = 0\nend = 3\nstepsize = 1\n\nfor step in range(start, end, stepsize):\n print(step)\n```\n\n\n**Suggestions**\n* Reorganize the Eq. (1) to isolate $V\\left(t+\\Delta t\\right)$ on the left side, and express it as function of $V(t)$ and the other terms\n* Initialize the membrane potential variable `v` to leak potential `el`\n* Loop variable `step` for `10` steps\n* At each time step\n * Compute the current value of `t`, `i`\n * Print the current value of `t` and `v`\n * Update the value of `v`", "_____no_output_____" ] ], [ [ "# initialize step_end and v\nstep_end = 10\nv = el\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n i = ...\n print(...)\n v = ...", "_____no_output_____" ] ], [ [ "**SAMPLE OUTPUT**\n\n```\n0.000 -6.0000e-02\n0.001 -5.8750e-02\n0.002 -5.6828e-02\n0.003 -5.4548e-02\n0.004 -5.2381e-02\n0.005 -5.0778e-02\n0.006 -4.9989e-02\n0.007 -4.9974e-02\n0.008 -5.0414e-02\n0.009 -5.0832e-02\n```", "_____no_output_____" ], [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_95c91766.py)\n\n", "_____no_output_____" ] ], [ [ "# @title Video: Plotting\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id='BOh8CsuTFkY', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 5\n![synaptic input discrete](https://github.com/mpbrigham/colaboratory-figures/raw/master/nma/python-for-nma/synaptic_input_discrete.png)\n\nPlot the values of $I(t)$ between $t=0$ and $t=0.024$.\n\n**Suggestions**\n* Increase `step_end`\n* initialize the figure with `plt.figure`, set title, x and y labels with `plt.title`, `plt.xlabel` and `plt.ylabel`, respectively\n* Replace printing command `print` with plotting command `plt.plot` with argument `'ko'` (short version for `color='k'` and `marker='o'`) for black small dots\n* Use `plt.show()` at the end to display the plot", "_____no_output_____" ] ], [ [ "# initialize step_end\nstep_end = 25\n\n# initialize the figure\nplt.figure()\n# Complete these lines and uncomment\n# plt.title(...)\n# plt.xlabel(...)\n# plt.ylabel(...)\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n i = ...\n # Complete this line and uncomment\n # plt.plot(...)\n\n# plt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_23446a7e.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=559 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_23446a7e_0.png>\n\n", "_____no_output_____" ], [ "### Exercise 6\nPlot the values of $V(t)$ between $t=0$ and $t=t_{max}$.\n\n**Suggestions**\n* Compute the required number of steps with`int(t_max/dt)`\n* Use plotting command for black small(er) dots with argument `'k.'`", "_____no_output_____" ] ], [ [ "# initialize step_end and v\nstep_end = int(t_max / dt)\nv = el\n\n# initialize the figure\nplt.figure()\nplt.title('$V_m$ with sinusoidal I(t)')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)');\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n i = ...\n # Complete this line and uncomment\n # plt.plot(...)\n\n v = ...\n\nt = t + dt\n# Complete this line and uncomment\n# plt.plot(...)\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_1046fd94.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_1046fd94_0.png>\n\n", "_____no_output_____" ], [ "---\n## Random synaptic input\nFrom the perspective of neurons, synaptic input is random (or stochastic). We'll improve the synaptic input model by introducing random input current with statistical properties similar to the previous exercise:\n\n\\begin{align*}\n\\\\\nI(t)=I_{mean}\\left(1+0.1\\sqrt{\\frac{t_{max}}{\\Delta t}}\\,\\xi(t)\\right)\\qquad\\text{with }\\xi(t)\\sim U(-1,1)\\\\\n\\\\\n\\end{align*}\n\nwhere $U(-1,1)$ is the [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous)) with support $x\\in[-1,1]$.\n\nRandom synaptic input $I(t)$ results in random time course for $V(t)$.", "_____no_output_____" ], [ "### Exercise 7\nPlot the values of $V(t)$ between $t=0$ and $t=t_{max}-\\Delta t$ with random input $I(t)$.\n\nInitialize the (pseudo) random number generator (RNG) to a fixed value to obtain the same random input each time.\n\nThe function `np.random.seed()` initializes the RNG, and `np.random.random()` generates samples from the uniform distribution between `0` and `1`.\n\n**Suggestions**\n* Use `np.random.seed()` to initialize the RNG to `0`\n* Use `np.random.random()` to generate random input in range `[0,1]` at each timestep\n* Multiply random input by an appropriate factor to expand the range to `[-1,1]`\n* Verify that $V(t)$ has a random time course by changing the initial RNG value\n* Alternatively, comment RNG initialization by typing `CTRL` + `\\` in the relevant line", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end and v\nstep_end = int(t_max / dt)\nv = el\n\n# initialize the figure\nplt.figure()\nplt.title('$V_m$ with random I(t)')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n # Complete this line and uncomment\n # plt.plot(...)\n\n i = ...\n v = ...\n\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_41355f96.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_41355f96_1.png>\n\n", "_____no_output_____" ], [ "## Ensemble statistics\nMultiple runs of the previous exercise may give the impression of periodic regularity in the evolution of $V(t)$. We'll collect the sample mean over $N=50$ realizations of $V(t)$ with random input to test such a hypothesis. The sample mean, sample variance and sample autocovariance at times $\\left\\{t, s\\right\\}\\in[0,t_{max}]$, and for $N$ realizations $V_n(t)$ are given by:\n\n\\begin{align*}\n\\\\\n\\left\\langle V(t)\\right\\rangle &= \\frac{1}{N}\\sum_{n=1}^N V_n(t) & & \\text{sample mean}\\\\\n\\left\\langle (V(t)-\\left\\langle V(t)\\right\\rangle)^2\\right\\rangle &= \\frac{1}{N-1} \\sum_{n=1}^N \\left(V_n(t)-\\left\\langle V(t)\\right\\rangle\\right)^2 & & \\text{sample variance} \\\\\n\\left\\langle \\left(V(t)-\\left\\langle V(t)\\right\\rangle\\right)\\left(V(s)-\\left\\langle V(s)\\right\\rangle\\right)\\right\\rangle\n&= \\frac{1}{N-1} \\sum_{n=1}^N \\left(V_n(t)-\\left\\langle V(t)\\right\\rangle\\right)\\left(V_n(s)-\\left\\langle V(s)\\right\\rangle\\right) & & \\text{sample autocovariance}\\\\\n\\\\\n\\end{align*}", "_____no_output_____" ] ], [ [ "# @title Video: Ensemble statistics\nvideo = YouTubeVideo(id='4nIAS2oPEFI', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 8\nPlot multiple realizations ($N=50$) of $V(t)$ by storing in a list the voltage of each neuron at time $t$.\n\nKeep in mind that the plotting command `plt.plot(x, y)` requires `x` to have the same number of elements as `y`.\n\nMathematical symbols such as $\\alpha$ and $\\beta$ are specified as `$\\alpha$` and `$\\beta$` in [TeX markup](https://en.wikipedia.org/wiki/TeX). See additional details in [Writing mathematical expressions](https://matplotlib.org/3.2.2/tutorials/text/mathtext.html) in Matplotlib.\n\n**Suggestions**\n* Initialize a list `v_n` with `50` values of membrane leak potential `el`\n* At each time step:\n * Plot `v_n` with argument `'k.'` and parameter `alpha=0.05` to adjust the transparency (by default, `alpha=1`)\n * In the plot command, replace `t` from the previous exercises with a list of size `n` with values `t`\n * Loop over `50` realizations of random input\n * Update `v_n` with the values of $V(t)$\n\n* Why is there a black dot at $t=0$?", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, n and v_n\nstep_end = int(t_max / dt)\nn = 50\n# Complete this line and uncomment\n# v_n = ...\n\n# initialize the figure\nplt.figure()\nplt.title('Multiple realizations of $V_m$')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n # Complete this line and uncomment\n # plt.plot(...)\n\n # loop for n steps\n for j in range(0, n):\n i = ...\n # Complete this line and uncomment\n # v_n[j] = ...\n\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_8b55f5dd.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_8b55f5dd_0.png>\n\n", "_____no_output_____" ], [ "### Exercise 9\nAdd the sample mean $\\left\\langle V(t)\\right\\rangle=\\frac{1}{N}\\sum_{n=1}^N V_n(t)$ to the plot.\n\n**Suggestions**\n* At each timestep:\n * Compute and store in `v_mean` the sample mean $\\left\\langle V(t)\\right\\rangle$ by summing the values of list `v_n` with `sum` and dividing by `n`\n * Plot $\\left\\langle V(t)\\right\\rangle$ with `alpha=0.8` and argument `'C0.'` for blue (you can read more about [specifying colors](https://matplotlib.org/tutorials/colors/colors.html#sphx-glr-tutorials-colors-colors-py))\n * Loop over `50` realizations of random input\n * Update `v_n` with the values of $V(t)$", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, n and v_n\nstep_end = int(t_max / dt)\nn = 50\nv_n = [el] * n\n\n# initialize the figure\nplt.figure()\nplt.title('Multiple realizations of $V_m$')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n\n v_mean = ...\n # Complete these lines and uncomment\n # plt.plot(...)\n # plt.plot(...)\n\n for j in range(0, n):\n i = ...\n v_n[j] = ...\n\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_98017570.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_98017570_0.png>\n\n", "_____no_output_____" ], [ "### Exercise 10\nAdd the sample standard deviation $\\sigma(t)\\equiv\\sqrt{\\text{Var}\\left(t\\right)}$ to the plot, with sample variance $\\text{Var}(t) = \\frac{1}{N-1} \\sum_{n=1}^N \\left(V_n(t)-\\left\\langle V(t)\\right\\rangle\\right)^2$.\n\nUse a list comprehension to collect the sample variance `v_var`. Here's an example to initialize a list with squares of `0` to `9`:\n```\nsquares = [x**2 for x in range(10)]\nprint(squares)\n--> [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n```\n\nWhy are we plotting $\\sigma(t)$ rather than the $\\text{Var}(t)$? What are the units of each and the units of $\\left\\langle V(t)\\right\\rangle$?\n\n**Suggestions**\n* At each timestep:\n * Compute and store in `v_mean` the sample mean $\\left\\langle V(t)\\right\\rangle$\n * Initialize a list `v_var_n` with the contribution of each $V_n(t)$ to $\\text{Var}\\left(t\\right)$ with a list comprehension over values of `v_n`\n * Compute sample variance `v_var` by summing the values of `v_var_n` with `sum` and dividing by `n-1`\n * (alternative: loop over the values of `v_n` and add to `v_var` each contribution $V_n(t)$ and divide by `n-1` outside the loop)\n * Compute the standard deviation `v_std` with the function `np.sqrt`\n * Plot $\\left\\langle V(t)\\right\\rangle\\pm\\sigma(t)$ with `alpha=0.8` and argument `'C7.'`", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, n and v_n\nstep_end = int(t_max / dt)\nn = 50\nv_n = [el] * n\n\n# initialize the figure\nplt.figure()\nplt.title('Multiple realizations of $V_m$')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\n# loop for step_end steps\nfor step in range(step_end):\n t = step * dt\n\n v_mean = ...\n v_var_n = ...\n v_var = ...\n v_std = ...\n\n # Complete these lines and uncomment\n # plt.plot(...)\n # plt.plot(...)\n # plt.plot(...)\n # plt.plot(...)\n\n for j in range(0, n):\n i = ...\n v_n[j] = ...\n\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_9e048e4b.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_9e048e4b_0.png>\n\n", "_____no_output_____" ], [ "---\n## Using NumPy\nThe next set of exercises introduces `np.array`, the workhorse from the scientific computation package [NumPy](https://numpy.org). Numpy arrays the default for numerical data storage and computation and will separate computing steps from plotting.\n\n![NumPy package](https://github.com/mpbrigham/colaboratory-figures/raw/master/nma/python-for-nma/numpy_logo_small.png)\n\nWe updated plots inside the main loop in the previous exercises and stored intermediate results in lists for plotting them. The purpose was to simplify earlier exercises as much as possible. However, there are very few scenarios where this technique is necessary, and you should avoid it in the future. Using numpy arrays will significantly simplify our coding narrative by computing inside the main loop and plotting afterward.\n\nLists are much more natural for storing data for other purposes than computation. For example, lists are handy for storing numerical indexes and text.", "_____no_output_____" ] ], [ [ "# @title Video: Using NumPy\nvideo = YouTubeVideo(id='ewyHKKa2_OU', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 11\nRewrite the single neuron plot with random input from _Exercise 7_ with numpy arrays. The time range, voltage values, and synaptic current are initialized or pre-computed as numpy arrays before numerical integration.\n\n**Suggestions**\n* Use `np.linspace` to initialize a numpy array `t_range` with `num=step_end=150` values from `0` to `t_max`\n* Use `np.ones` to initialize a numpy array `v` with `step_end + 1` leak potential values `el`\n* Pre-compute `step_end` synaptic current values in numpy array `syn` with `np.random.random(step_end)` for `step_end` random numbers\n* Iterate for numerical integration of `v`\n* Since `v[0]=el`, we should iterate for `step_end` steps, for example by skipping `step=0`. Why?", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, t_range, v and syn\nstep_end = int(t_max / dt) - 1\n# skip the endpoint to match Exercise 7 plot\nt_range = np.linspace(0, t_max, num=step_end, endpoint=False)\nv = el * np.ones(step_end)\nsyn = ...\n\n# loop for step_end - 1 steps\n# Complete these lines and uncomment\n# for step in range(1, step_end):\n # v[step] = ...\n\n\nplt.figure()\nplt.title('$V_m$ with random I(t)')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\nplt.plot(t_range, v, 'k.')\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_4427a815.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_4427a815_0.png>\n\n", "_____no_output_____" ], [ "### Exercise 12\nLet's practice using `enumerate` to iterate over the indexes and values of the synaptic current array `syn`.\n\n**Suggestions**\n* Iterate indexes and values of `syn` with `enumerate` in the `for` loop\n* Plot `v` with argument `'k'` for displaying a line instead of dots", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, t_range, v and syn\nstep_end = int(t_max / dt)\nt_range = np.linspace(0, t_max, num=step_end)\nv = el * np.ones(step_end)\nsyn = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random(step_end) - 1))\n\n# loop for step_end values of syn\nfor step, i in enumerate(syn):\n # skip first iteration\n if step==0:\n continue\n # Complete this line and uncomment\n # v[step] = ...\n\nplt.figure()\nplt.title('$V_m$ with random I(t)')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\nplt.plot(t_range, v, 'k')\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_4139f63a.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_4139f63a_0.png>\n\n", "_____no_output_____" ] ], [ [ "# @title Video: Aggregation\nvideo = YouTubeVideo(id='1ME-0rJXLFg', width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "### Exercise 13\nPlot multiple realizations ($N=50$) of $V(t)$ by storing the voltage of each neuron at time $t$ in a numpy array.\n\n**Suggestions**\n* Initialize a numpy array `v_n` of shape `(n, step_end)` with membrane leak potential values `el`\n* Pre-compute synaptic current values in numpy array `syn` of shape `(n, step_end)`\n* Iterate `step_end` steps with a `for` loop for numerical integration\n* Plot results with a single plot command, by providing `v_n.T` to the plot function. `v_n.T` is the transposed version of `v_n` (with rows and columns swapped).", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, n, t_range, v and syn\nstep_end = int(t_max / dt)\nn = 50\nt_range = np.linspace(0, t_max, num=step_end)\nv_n = el * np.ones([n, step_end])\nsyn = ...\n\n# loop for step_end - 1 steps\n# Complete these lines and uncomment\n# for step in range(1, step_end):\n # v_n[:, step] = ...\n\n# initialize the figure\nplt.figure()\nplt.title('Multiple realizations of $V_m$')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\n# Complete this line and uncomment\n# plt.plot(...)\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_e8466b6b.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_e8466b6b_0.png>\n\n", "_____no_output_____" ], [ "### Exercise 14\nAdd sample mean $\\left\\langle V(t)\\right\\rangle$ and standard deviation $\\sigma(t)\\equiv\\sqrt{\\text{Var}\\left(t\\right)}$ to the plot.\n\n`np.mean(v_n, axis=0)` computes mean over rows, i.e. mean for each neuron\n\n`np.mean(v_n, axis=1)` computes mean over columns (axis `1`), i.e. mean for each time step\n\n**Suggestions**\n* Use `np.mean` and `np.std` with `axis=0` to sum over neurons\n* Use `label` argument in `plt.plot` to specify labels in each trace. Label only the last voltage trace to avoid labeling all `N` of them.", "_____no_output_____" ] ], [ [ "# set random number generator\nnp.random.seed(2020)\n\n# initialize step_end, n, t_range, v and syn\nstep_end = int(t_max / dt)\nn = 50\nt_range = np.linspace(0, t_max, num=step_end)\nv_n = el * np.ones([n, step_end])\nsyn = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random([n, step_end]) - 1))\n\n# loop for step_end - 1 steps\nfor step in range(1, step_end):\n v_n[:,step] = v_n[:,step - 1] + (dt / tau) * (el - v_n[:, step - 1] + r * syn[:, step])\n\nv_mean = ...\nv_std = ...\n\n# initialize the figure\nplt.figure()\nplt.title('Multiple realizations of $V_m$')\nplt.xlabel('time (s)')\nplt.ylabel('$V_m$ (V)')\n\nplt.plot(t_range, v_n[:-1].T, 'k', alpha=0.3)\n\n# Complete these lines and uncomment\n# plt.plot(t_range, v_n[-1], 'k', alpha=0.3, label=...)\n# plt.plot(t_range, ..., 'C0', alpha=0.8, label='mean')\n# plt.plot(t_range, ..., 'C7', alpha=0.8)\n# plt.plot(t_range, ..., 'C7', alpha=0.8, label=...)\n\n#plt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_061d112f.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D1_PythonWorkshop1/static/W0D1_Tutorial1_Solution_061d112f_0.png>\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7d064d7a2f56b687c641911e7d019af06c9a590
20,049
ipynb
Jupyter Notebook
doc/source/tutorials/Networks.ipynb
marcelo-alves87/scikit-rf
0395d79649313ab2914623d4914e7d9aad63fc49
[ "BSD-3-Clause" ]
1
2020-05-05T06:07:58.000Z
2020-05-05T06:07:58.000Z
doc/source/tutorials/Networks.ipynb
marcelo-alves87/scikit-rf
0395d79649313ab2914623d4914e7d9aad63fc49
[ "BSD-3-Clause" ]
null
null
null
doc/source/tutorials/Networks.ipynb
marcelo-alves87/scikit-rf
0395d79649313ab2914623d4914e7d9aad63fc49
[ "BSD-3-Clause" ]
null
null
null
25.970207
347
0.571051
[ [ [ ".. _networks::\n\n|\n|\n\nDownload This Notebook: :download:`Networks.ipynb`\n", "_____no_output_____" ] ], [ [ "# Networks", "_____no_output_____" ], [ "## Introduction ", "_____no_output_____" ], [ "This tutorial gives an overview of the microwave network analysis \nfeatures of **skrf**. For this tutorial, and the rest of the scikit-rf documentation, it is assumed that **skrf** has been imported as `rf`. Whether or not you follow this convention in your own code is up to you.", "_____no_output_____" ] ], [ [ "import skrf as rf\nfrom pylab import *", "_____no_output_____" ] ], [ [ "If this produces an import error, please see [Installation ](Installation.ipynb).", "_____no_output_____" ], [ "## Creating Networks\n", "_____no_output_____" ], [ "**skrf** provides an object for a N-port microwave [Network](../api/network.rst). A [Network](../api/network.rst) can be created in a number of ways. One way is from data stored in a touchstone file. ", "_____no_output_____" ] ], [ [ "from skrf import Network, Frequency\n\nring_slot = Network('data/ring slot.s2p')", "_____no_output_____" ] ], [ [ "\t\nA short description of the network will be printed out if entered onto the command line\n\t", "_____no_output_____" ] ], [ [ "ring_slot", "_____no_output_____" ] ], [ [ "Networks can also be created by directly passing values for the `frequency`, `s`-parameters and port impedance `z0`. ", "_____no_output_____" ] ], [ [ "freq = Frequency(1,10,101,'ghz')\nntwk = Network(frequency=freq, s= [-1, 1j, 0], z0=50, name='slippy') \nntwk", "_____no_output_____" ] ], [ [ "\t\nSee [Network](../api/network.rst) for more information on network creation.", "_____no_output_____" ], [ "## Basic Properties\n\n\t\nThe basic attributes of a microwave [Network](../api/network.rst) are provided by the \nfollowing properties :\n\n* `Network.s` : Scattering Parameter matrix. \n* `Network.z0` : Port Characteristic Impedance matrix.\n* `Network.frequency` : Frequency Object. ", "_____no_output_____" ], [ "The [Network](../api/network.rst) object has numerous other properties and methods. If you are using IPython, then these properties and methods can be 'tabbed' out on the command line. ", "_____no_output_____" ], [ "\n\tIn [1]: ring_slot.s<TAB>\n\tring_slot.line.s ring_slot.s_arcl ring_slot.s_im\n\tring_slot.line.s11 ring_slot.s_arcl_unwrap ring_slot.s_mag\n\t...", "_____no_output_____" ], [ "\nAll of the network parameters are represented internally as complex `numpy.ndarray`. The s-parameters are of shape (nfreq, nport, nport)", "_____no_output_____" ] ], [ [ "shape(ring_slot.s)", "_____no_output_____" ] ], [ [ "## Slicing", "_____no_output_____" ], [ "You can slice the `Network.s` attribute any way you want.", "_____no_output_____" ] ], [ [ "ring_slot.s[:11,1,0] # get first 10 values of S21", "_____no_output_____" ] ], [ [ "Slicing by frequency can also be done directly on Network objects like so ", "_____no_output_____" ] ], [ [ "ring_slot[0:10] # Network for the first 10 frequency points", "_____no_output_____" ] ], [ [ "or with a human friendly string,", "_____no_output_____" ] ], [ [ "ring_slot['80-90ghz']", "_____no_output_____" ] ], [ [ "Notice that slicing directly on a Network **returns a Network**. So, a nice way to express slicing in both dimensions is ", "_____no_output_____" ] ], [ [ "ring_slot.s11['80-90ghz'] ", "_____no_output_____" ] ], [ [ "## Plotting ", "_____no_output_____" ], [ "Amongst other things, the methods of the [Network](../api/network.rst) class provide convenient ways to plot components of the network parameters, \n\n* `Network.plot_s_db` : plot magnitude of s-parameters in log scale\n* `Network.plot_s_deg` : plot phase of s-parameters in degrees\n* `Network.plot_s_smith` : plot complex s-parameters on Smith Chart\n* ...\n\nIf you would like to use skrf's plot styling,", "_____no_output_____" ] ], [ [ "%matplotlib inline \nrf.stylely()", "_____no_output_____" ] ], [ [ "\t\nTo plot all four s-parameters of the `ring_slot` on the Smith Chart.", "_____no_output_____" ] ], [ [ "ring_slot.plot_s_smith()", "_____no_output_____" ] ], [ [ "Combining this with the slicing features, ", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\n\nplt.title('Ring Slot $S_{21}$')\n\nring_slot.s11.plot_s_db(label='Full Band Response')\nring_slot.s11['82-90ghz'].plot_s_db(lw=3,label='Band of Interest')", "_____no_output_____" ] ], [ [ "For more detailed information about plotting see [Plotting](Plotting.ipynb). ", "_____no_output_____" ], [ "\n## Operators\n\n### Arithmetic Operations \n\t\nElement-wise mathematical operations on the scattering parameter matrices are accessible through overloaded operators. To illustrate their usage, load a couple Networks stored in the `data` module. ", "_____no_output_____" ] ], [ [ "from skrf.data import wr2p2_short as short \nfrom skrf.data import wr2p2_delayshort as delayshort \n\n\nshort - delayshort\nshort + delayshort\nshort * delayshort\nshort / delayshort\n", "_____no_output_____" ] ], [ [ "All of these operations return [Network](../api/network.rst) types. For example, to plot the complex difference between `short` and `delay_short`,", "_____no_output_____" ] ], [ [ "difference = (short - delayshort)\ndifference.plot_s_mag(label='Mag of difference')", "_____no_output_____" ] ], [ [ "Another common application is calculating the phase difference using the division operator,", "_____no_output_____" ] ], [ [ "(delayshort/short).plot_s_deg(label='Detrended Phase')", "_____no_output_____" ] ], [ [ "Linear operators can also be used with scalars or an `numpy.ndarray` that ais the same length as the [Network](../api/network.rst). ", "_____no_output_____" ] ], [ [ "hopen = (short*-1)\nhopen.s[:3,...]", "_____no_output_____" ], [ "rando = hopen *rand(len(hopen))\nrando.s[:3,...]", "_____no_output_____" ] ], [ [ ".. notice :: \t\n \n Note that if you multiply a Network by an `numpy.ndarray` be sure to place the array on right side.", "_____no_output_____" ] ], [ [ "### Cascading and De-embedding\n\nCascading and de-embeding 2-port Networks can also be done though operators. The `cascade` function can be called through the power operator, `**`. To calculate a new network which is the cascaded connection of the two individual Networks `line` and `short`, ", "_____no_output_____" ] ], [ [ "short = rf.data.wr2p2_short\nline = rf.data.wr2p2_line\ndelayshort = line ** short", "_____no_output_____" ] ], [ [ "De-embedding can be accomplished by cascading the *inverse* of a network. The inverse of a network is accessed through the property `Network.inv`. To de-embed the `short` from `delay_short`,", "_____no_output_____" ] ], [ [ "short_2 = line.inv ** delayshort\n\nshort_2 == short", "_____no_output_____" ] ], [ [ "Comparison operators also work with networks.", "_____no_output_____" ], [ "## Connecting Multi-ports \n\n**skrf** supports the connection of arbitrary ports of N-port networks. It accomplishes this using an algorithm called sub-network growth[[1]](#References), available through the function `connect()`. Terminating one port of an ideal 3-way splitter can be done like so,", "_____no_output_____" ] ], [ [ "tee = rf.data.tee\ntee", "_____no_output_____" ] ], [ [ "\t\n\nTo connect port `1` of the tee, to port `0` of the delay short,", "_____no_output_____" ] ], [ [ "terminated_tee = rf.connect(tee,1,delayshort,0)\nterminated_tee", "_____no_output_____" ] ], [ [ "Note that this function takes into account port impedances. If two connected ports have different port impedances, an appropriate impedance mismatch is inserted.\n\t\n## Interpolation and Concatenation\n\nA common need is to change the number of frequency points of a [Network](../api/network.rst). To use the operators and cascading functions the networks involved must have matching frequencies, for instance. If two networks have different frequency information, then an error will be raised, ", "_____no_output_____" ] ], [ [ "from skrf.data import wr2p2_line1 as line1\n\nline1", "_____no_output_____" ] ], [ [ " line1+line\n \n ---------------------------------------------------------------------------\n IndexError Traceback (most recent call last)\n <ipython-input-49-82040f7eab08> in <module>()\n ----> 1 line1+line\n\n /home/alex/code/scikit-rf/skrf/network.py in __add__(self, other)\n 500 \n 501 if isinstance(other, Network):\n --> 502 self.__compatable_for_scalar_operation_test(other)\n 503 result.s = self.s + other.s\n 504 else:\n\n /home/alex/code/scikit-rf/skrf/network.py in __compatable_for_scalar_operation_test(self, other)\n 701 '''\n 702 if other.frequency != self.frequency:\n --> 703 raise IndexError('Networks must have same frequency. See `Network.interpolate`')\n 704 \n 705 if other.s.shape != self.s.shape:\n\n IndexError: Networks must have same frequency. See `Network.interpolate`\n", "_____no_output_____" ], [ "\t\nThis problem can be solved by interpolating one of Networks allong the frequency axis using `Network.resample`. ", "_____no_output_____" ] ], [ [ "line1.resample(201)\nline1", "_____no_output_____" ] ], [ [ "And now we can do things", "_____no_output_____" ] ], [ [ "line1 + line", "_____no_output_____" ] ], [ [ "You can also interpolate from a `Frequency` object. For example, ", "_____no_output_____" ] ], [ [ "line.interpolate_from_f(line1.frequency)", "_____no_output_____" ] ], [ [ "A related application is the need to combine Networks which cover different frequency ranges. Two Netwoks can be concatenated (aka stitched) together using `stitch`, which concatenates networks along their frequency axis. To combine a WR-2.2 Network with a WR-1.5 Network, \n ", "_____no_output_____" ] ], [ [ "from skrf.data import wr2p2_line, wr1p5_line\n\nbig_line = rf.stitch(wr2p2_line, wr1p5_line)\nbig_line", "_____no_output_____" ] ], [ [ "## Reading and Writing \n\n\nFor long term data storage, **skrf** has support for reading and partial support for writing [touchstone file format](http://en.wikipedia.org/wiki/Touchstone_file). Reading is accomplished with the Network initializer as shown above, and writing with the method `Network.write_touchstone()`.\n\nFor **temporary** data storage, **skrf** object can be [pickled](http://docs.python.org/2/library/pickle.html) with the functions `skrf.io.general.read` and `skrf.io.general.write`. The reason to use temporary pickles over touchstones is that they store all attributes of a network, while touchstone files only store partial information. ", "_____no_output_____" ] ], [ [ "rf.write('data/myline.ntwk',line) # write out Network using pickle", "_____no_output_____" ], [ "ntwk = Network('data/myline.ntwk') # read Network using pickle", "_____no_output_____" ] ], [ [ ".. warning:: \n\t\n\tPickling methods cant support long term data storage because they require the structure of the object being written to remain unchanged. something that cannot be guarnteed in future versions of skrf. (see http://docs.python.org/2/library/pickle.html) \n", "_____no_output_____" ] ], [ [ "Frequently there is an entire directory of files that need to be analyzed. `rf.read_all` creates Networks from all files in a directory quickly. To load all **skrf** files in the `data/` directory which contain the string `'wr2p2'`.", "_____no_output_____" ] ], [ [ "dict_o_ntwks = rf.read_all(rf.data.pwd, contains = 'wr2p2')\ndict_o_ntwks", "_____no_output_____" ] ], [ [ "Other times you know the list of files that need to be analyzed. `rf.read_all` also accepts a files parameter. This example file list contains only files within the same directory, but you can store files however your application would benefit from.", "_____no_output_____" ] ], [ [ "dict_o_ntwks_files = rf.read_all(files=[os.path.join(rf.data.pwd, test_file) for test_file in ['ntwk1.s2p', 'ntwk2.s2p']])\ndict_o_ntwks_files", "_____no_output_____" ] ], [ [ "## Other Parameters\t\n\nThis tutorial focuses on s-parameters, but other network representations are available as well. Impedance and Admittance Parameters can be accessed through the parameters `Network.z` and `Network.y`, respectively. Scalar components of complex parameters, such as `Network.z_re`, `Network.z_im` and plotting methods are available as well.\n\nOther parameters are only available for 2-port networks, such as wave cascading parameters (`Network.t`), and ABCD-parameters (`Network.a`)", "_____no_output_____" ] ], [ [ "ring_slot.z[:3,...]", "_____no_output_____" ], [ "ring_slot.plot_z_im(m=1,n=0)", "_____no_output_____" ] ], [ [ "## Conclusion\n\nThere are many more features of Networks that can be found in [networks](networks.rst)", "_____no_output_____" ], [ "## References\n\n\n[1] Compton, R.C.; , \"Perspectives in microwave circuit analysis,\" Circuits and Systems, 1989., Proceedings of the 32nd Midwest Symposium on , vol., no., pp.716-718 vol.2, 14-16 Aug 1989. URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=101955&isnumber=3167\n", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "raw" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
e7d0685c24bafd1c5d836248630904fdf24468f4
239,814
ipynb
Jupyter Notebook
Modulo2/.ipynb_checkpoints/Clase11_MapaLogistico-checkpoint.ipynb
ariadnagalindom/SimMat2018-2
608b7bca8f9a53bbac43076489c13cc59c936824
[ "MIT" ]
null
null
null
Modulo2/.ipynb_checkpoints/Clase11_MapaLogistico-checkpoint.ipynb
ariadnagalindom/SimMat2018-2
608b7bca8f9a53bbac43076489c13cc59c936824
[ "MIT" ]
null
null
null
Modulo2/.ipynb_checkpoints/Clase11_MapaLogistico-checkpoint.ipynb
ariadnagalindom/SimMat2018-2
608b7bca8f9a53bbac43076489c13cc59c936824
[ "MIT" ]
1
2019-01-28T16:47:14.000Z
2019-01-28T16:47:14.000Z
421.465729
94,940
0.935292
[ [ [ "# ¿Cómo crece una población? \n\n## Antes de empezar: llenar la siguiente encuesta.\n- https://forms.office.com/Pages/ResponsePage.aspx?id=8kgDb5jkyUWE9MbYHc_9_oplb4UZe4dMnU4bxi5xU55UQjlEQ1pLWElPOE9ON082RktFQVdRWEtPSS4u\n\n\n> El modelo más simple de crecimiento poblacional de organismos es $\\frac{dx}{dt}=rx$, donde $x(t)$ es la población en el tiempo $t$ y $r>0$ es la tasa de crecimiento.\n\n> Este modelo predice crecimiento exponencial $x(t)=x_0e^{rt}$ (solución de la ecuación diferencial) donde $x_0=x(0)$ es la población inicial. ¿Es esto válido?\n- Recordar que $\\lim_{t\\to\\infty}x(t)=x_0\\lim_{t\\to\\infty}e^{rt}=\\infty$.\n- Este modelo no tiene en cuenta entonces sobrepoblación ni recursos limitados.\n\n> En realidad la tasa de crecimiento no es una constante, sino que depende de la población $\\frac{dx}{dt}=\\mu(x)x$. Cuando $x$ es pequeña $\\mu(x)\\approx r$, como antes, pero cuando $x>1$ (población normalizada) $\\mu(x)<0$: la tasa de muerte es mayor a la tasa de nacimiento. Una forma matemática conveniente de modelar lo anterior es con una tasa de crecimiento $\\mu(x)$ decreciendo linealmente con $x$.\n\nReferencia:\n- Strogatz, Steven. *NONLINEAR DYNAMICS AND CHAOS*, ISBN: 9780813349107, (eBook disponible en biblioteca).", "_____no_output_____" ], [ "<img style=\"float: center;\" src=\"./poblacion.jpg\" width=\"450px\" height=\"250px\" />", "_____no_output_____" ], [ "## Ecuación Logística\nPrimero, veamos como luce $\\mu(x)$ con decrecimiento lineal respecto a la población x.\n\nComo queremos que $\\mu(0)=r$ y $\\mu(1)=0$, la línea recta que conecta estos puntos es... (graficar)", "_____no_output_____" ] ], [ [ "# Importar librerías necesarias\n", "_____no_output_____" ], [ "# Definir función mu(x)\n", "_____no_output_____" ], [ "# Graficar\n", "_____no_output_____" ] ], [ [ "___\nEntonces, con esta elección de $\\mu(x)=r(1-x)$, obtenemos la llamada **ecuación lógistica**, publicada por Pierre Verhulst en 1838.\n\n$$\\frac{dx}{dt} = r\\; x\\; (1- x)$$", "_____no_output_____" ], [ "** Solución a la ecuación diferencial ** \n\nLa ecuación diferencial inicial tiene *solución analítica*, \n$$ x(t) = \\frac{1}{1+ (\\frac{1}{x_{0}}- 1) e^{-rt}}.$$\n\n<font color = red> Ver en el tablero... </font>", "_____no_output_____" ], [ "Graficamos varias curvas de la solución analítica para $r = \\left[-1, 1\\right]$.", "_____no_output_____" ] ], [ [ "# Definir la solución analítica x(t,x0)\n\n\n# Vector de tiempo\n\n# Condicion inicial\n", "_____no_output_____" ], [ "# Graficar para diferentes r entre -1 y 1\n", "_____no_output_____" ] ], [ [ "Como podemos ver, la solución a está ecuación en el continuo nos puede ganantizar la extinción o bien un crecimiento descomunal, dependiendo del valor asignado a $r$. ", "_____no_output_____" ], [ "*Numéricamente*, ¿cómo resolveríamos esta ecuación? ", "_____no_output_____" ] ], [ [ "# Importamos función para integrar numéricamente ecuaciones diferenciales\n", "_____no_output_____" ], [ "# Definimos el campo de la ecuación diferencial\n", "_____no_output_____" ], [ "# Parámetro r\n\n# Condición inicial\n\n# Vector de tiempo\n\n# Solución\n", "_____no_output_____" ], [ "# Gráfico de la solución\n", "_____no_output_____" ] ], [ [ "### ¿Qué tan buena es la aproximación de la solución numérica?\nHay ecuaciones diferenciales ordinarias no lineales para las cuales es imposible obtener la solución exacta. En estos casos, se evalúa una solución aproximada de forma numérica.\n\nPara el caso anterior fue posible obtener la solución exacta, lo cual nos permite comparar ambas soluciones y evaluar qué tan buena es la aproximación que nos brinda la solución numérica.\n\nPrimero veamos esto gráficamente", "_____no_output_____" ] ], [ [ "# Solución numérica\n\n# Solución exacta\n", "_____no_output_____" ], [ "# Gráfica de comparación\n", "_____no_output_____" ] ], [ [ "Gráficamente vemos que la solución numérica está cerca (coincide) con la solución exacta. Sin embargo, con esta gráfica no podemos visualizar qué tan cerca están una solución de la otra. ¿Qué tal si evaluamos el error?", "_____no_output_____" ] ], [ [ "# Error de aproximación\n", "_____no_output_____" ], [ "# Gráfica del error\n", "_____no_output_____" ] ], [ [ "Entonces, **cualitativamente** ya vimos que la solución numérica es *suficientemente buena*. De todas maneras, es siempre bueno cuantificar *qué tan buena* es la aproximación. Varias formas:\n- <font color=blue>Norma del error</font>: tenemos el error de aproximación en ciertos puntos (especificados por el vector de tiempo). Este error es entonces un vector y le podemos tomar su norma 2\n\n$$||e||_2=\\sqrt{e[0]^2+\\dots+e[n-1]^2}$$", "_____no_output_____" ] ], [ [ "np.linalg.norm(error)", "_____no_output_____" ] ], [ [ "- <font color=blue>Error cuadrático medio</font>: otra forma de cuantificar es con el error cuadrático medio\n\n$$e_{ms}=\\frac{e[0]^2+\\dots+e[n-1]^2}{n}$$", "_____no_output_____" ] ], [ [ "np.mean(error**2)", "_____no_output_____" ] ], [ [ "- <font color=blue>Integral del error cuadrático</font>: evalúa la acumulación de error cuadrático. Se puede evaluar cabo con la siguiente aproximación rectangular de la integral\n\n$$e_{is}=\\int_{0}^{t_f}e(t)^2\\text{d}t\\approx \\left(e[0]^2+\\dots+e[n-1]^2\\right)h$$\n\ndonde $h$ es el tamaño de paso del vector de tiempo.", "_____no_output_____" ] ], [ [ "h = t[1]-t[0]\nnp.sum(error**2)*h", "_____no_output_____" ] ], [ [ "### Comentarios del modelo logístico\nEl modelo no se debe tomar literalmente. Más bien se debe interpretar metefóricamente como que la población tiene una tendencia a crecer hasta su tope, o bien, desaparecer.\n\nLa ecuación logística fue probada en experimentos de laboratorio para colonias de bacterias en condiciones de clima constante, abastecimiento de comida y ausencia de predadores. Los experimentos mostraron que la ecuación predecía muy bien el comportamiento real.\n\nPor otra parte, la predicción no resultó tan buena para moscas que se alimentan de frutas, escarabajos y otros organismos con ciclos de vida complejos. En esos casos se observaron fluctuaciones (oscilaciones) inmensas de la población.", "_____no_output_____" ], [ "___\n## Mapa logístico\n> La ecuación logística (curva de crecimiento logístico) es un modelo del crecimiento continuo en el tiempo. Una modificación de la ecuación continua a una ecuación de recurrencia discreta conocida como **mapa logistico** es muy usada.\n\nReferencia: \n- https://es.wikipedia.org/wiki/Aplicación_log%C3%ADstica\n- https://en.wikipedia.org/wiki/Logistic_map\n\n> Si reemplazamos la ecuación logísitica por la ecuación a diferencias: \n\n> $$x_{n+1} = r\\; x_{n}(1- x_{n}),$$\n\n> donde $r$ es la razón de crecimiento máximo de la población y $x_{n}$ es la n-ésima iteración. Entonces, lo que tenemos que programar es la siguiente relación recursiva\n\n> $$x_{n+1}^{(r)} = f_r(x_n^{(r)}) = rx_n^{(r)}(1-x_n^{(r)})$$", "_____no_output_____" ], [ "El siguiente `gif` muestra las primeras 63 iteraciones de la anterior ecuación para diferentes valores de $r$ variando entre 2 y 4.\n\n<img style=\"float: center;\" src=\"https://upload.wikimedia.org/wikipedia/commons/1/1f/Logistic_map_animation.gif\" width=\"800px\" height=\"400px\" />\n\nTomado de https://upload.wikimedia.org/wikipedia/commons/1/1f/Logistic_map_animation.gif.\n\nNote que:\n- Para $2<r<3$ el las soluciones se estabilizan en un valor de equilibrio.\n- Para $3<r<1+\\sqrt{6}\\approx 3.44949$ el las soluciones oscilan entre dos valores.\n- Para $3.44949<r<3.54409$ las soluciones oscilan entre cuatro valores.\n- Para $r>3.54409$ las soluciones exhiben un comportamiento **caótico**.\n\n<font color=red> Caos: comportamiento determinista aperiódico muy sensible a las condiciones iniciales. Es decir, pequeñas variaciones en dichas condiciones iniciales pueden implicar grandes diferencias en el comportamiento futuro</font>", "_____no_output_____" ], [ "**¿Cómo podemos capturar este comportamiento en una sola gráfica?**", "_____no_output_____" ] ], [ [ "# Definición de la función mapa logístico\ndef mapa_logistico(r, x):\n return r * x * (1 - x)", "_____no_output_____" ], [ "# Para mil valores de r entre 2.0 y 4.0\nn = 1000\nr = np.linspace(2.0, 4.0, n)", "_____no_output_____" ], [ "# Hacemos 1000 iteraciones y nos quedamos con las ultimas 100 (capturamos el comportamiento final)\niteraciones = 1000\nultimos = 100\n\n# La misma condición inicial para todos los casos. \nx = 1e-5 * np.ones(n)", "_____no_output_____" ], [ "# Gráfico\nplt.figure(figsize=(7, 5))\nfor i in np.arange(iteraciones):\n x = mapa_logistico(r, x)\n if i >= (iteraciones - ultimos):\n plt.plot(r, x, ',k', alpha=.2)\nplt.xlim(np.min(r), np.max(r))\nplt.ylim(-.1, 1.1)\nplt.title(\"Diagrama de bifurcación\", fontsize=20)\nplt.xlabel('$r$', fontsize=18)\nplt.ylabel('$x$', fontsize=18)\nplt.show()", "_____no_output_____" ], [ "fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', sharey='row',figsize =(13,4.5))\nr = np.linspace(.5, 4.0, n)\nfor i in np.arange(iteraciones):\n x = mapa_logistico(r, x)\n if i >= (iteraciones - ultimos):\n ax1.plot(r, x, '.k', alpha=1, ms = .1)\nr = np.linspace(2.5, 4.0, n)\nfor i in np.arange(iteraciones):\n x = mapa_logistico(r, x)\n if i >= (iteraciones - ultimos):\n ax2.plot(r, x, '.k', alpha=1, ms = .1)\nax1.set_xlim(.4, 4)\nax1.set_ylim(-.1, 1.1)\nax2.set_xlim(2.5, 4)\nax2.set_ylim(-.1, 1.1)\nax1.set_ylabel('$x$', fontsize = 20)\nax1.set_xlabel('$r$', fontsize = 20)\nax2.set_xlabel('$r$', fontsize = 20)\nplt.show()", "_____no_output_____" ], [ "fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', sharey='row',figsize =(13,4.5))\nr = np.linspace(.5, 4.0, n)\nfor i in np.arange(iteraciones):\n x = mapa_logistico(r, x)\n if i >= (iteraciones - ultimos):\n ax1.scatter(r, x, s = .1, cmap= 'inferno', c = x, lw = 0)\nr = np.linspace(2.5, 4.0, n)\nfor i in np.arange(iteraciones):\n x = mapa_logistico(r, x)\n if i >= (iteraciones - ultimos):\n ax2.scatter(r, x, s = .1, cmap = 'inferno', c = x, lw = 0)\nax1.set_xlim(.4, 4)\nax1.set_ylim(-.1, 1.1)\nax2.set_xlim(2.5, 4)\nax2.set_ylim(-.1, 1.1)\nax1.set_ylabel('$x$', fontsize = 20)\nax1.set_xlabel('$r$', fontsize = 20)\nax2.set_xlabel('$r$', fontsize = 20)\nplt.show()", "_____no_output_____" ] ], [ [ "<script>\n $(document).ready(function(){\n $('div.prompt').hide();\n $('div.back-to-top').hide();\n $('nav#menubar').hide();\n $('.breadcrumb').hide();\n $('.hidden-print').hide();\n });\n</script>\n\n<footer id=\"attribution\" style=\"float:right; color:#808080; background:#fff;\">\nCreated with Jupyter by Lázaro Alonso. Modified by Esteban Jiménez Rodríguez.\n</footer>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7d07f396147500ffda63f762c42d8c18ebc9ac0
226,195
ipynb
Jupyter Notebook
Pokemon/Pokemon.ipynb
ISSOH/Machine-Learning
163968f9487b8801147d065aeb7c1b6d07c416ea
[ "Apache-2.0" ]
null
null
null
Pokemon/Pokemon.ipynb
ISSOH/Machine-Learning
163968f9487b8801147d065aeb7c1b6d07c416ea
[ "Apache-2.0" ]
null
null
null
Pokemon/Pokemon.ipynb
ISSOH/Machine-Learning
163968f9487b8801147d065aeb7c1b6d07c416ea
[ "Apache-2.0" ]
null
null
null
87.435253
67,688
0.75621
[ [ [ "## Définition du Problème\nLe projet consiste à prédire le vainqueur de combats entre deux pokemons.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.externals import joblib\n\nimport warnings\nwarnings.filterwarnings('always')\nwarnings.filterwarnings('ignore')\n\nimport matplotlib as mpl\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)", "_____no_output_____" ] ], [ [ "### Description des features\t\t\t\t\t\t\n<ul>\n <li>NUMERO: Numero</li>\n <li>NOM: Nom du Pokemon</li>\n <li>TYPE_1: Type primaire</li>\n <li>TYPE_2: Type Secondaire</li>\n <li>POINTS_DE_VIE: Point de vie</li>\n <li>POINTS_ATTAQUE: Niveau d'attaque</li>\n <li>POINTS_DEFFENCE: Niveau de defense</li>\n <li>POINTS_ATTAQUE_SPECIALE: Niveau d'attaque spéciale</li>\n <li>POINT_DEFENSE_SPECIALE: Niveau de spéciale spéciale</li>\n <li>POINTS_VITESSE: Vitesse</li>\n <li>NOMBRE_GENERATIONS : Numéro de la génération</li>\n <li>LEGENDAIRE: Le pokemon est il légendaire?</li>\n</ul>", "_____no_output_____" ], [ "## Acquisition des données", "_____no_output_____" ] ], [ [ "#Récupération des fichiers necessaires au modèle.\nimport os\n\nfileList = os.listdir(\"./datas\")\nfor file in fileList:\n print(file)", "combats.csv\ndataset.csv\npokedex.csv\ntests.csv\n" ], [ "pokemons = pd.read_csv(\"./datas/pokedex.csv\", encoding = \"ISO-8859-1\")", "_____no_output_____" ], [ "pokemons.head(10)", "_____no_output_____" ] ], [ [ "## Préparation et Nettoyage des données", "_____no_output_____" ] ], [ [ "pokemons.shape", "_____no_output_____" ], [ "pokemons.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 12 columns):\nNUMERO 800 non-null int64\nNOM 799 non-null object\nTYPE_1 800 non-null object\nTYPE_2 413 non-null object\nPOINTS_DE_VIE 800 non-null int64\nPOINTS_ATTAQUE 800 non-null int64\nPOINTS_DEFFENCE 800 non-null int64\nPOINTS_ATTAQUE_SPECIALE 800 non-null int64\nPOINT_DEFENSE_SPECIALE 800 non-null int64\nPOINTS_VITESSE 800 non-null int64\nNOMBRE_GENERATIONS 800 non-null int64\nLEGENDAIRE 800 non-null object\ndtypes: int64(8), object(4)\nmemory usage: 75.1+ KB\n" ], [ "pokemons[pokemons['NOM'].isnull()]", "_____no_output_____" ], [ "pokemons['NOM'][62]=\"Colossinge\"", "_____no_output_____" ] ], [ [ "### Idenfication des features de catégorisation", "_____no_output_____" ] ], [ [ "cat_features = pokemons.select_dtypes(include=['object'])\ncat_features.head()", "_____no_output_____" ] ], [ [ "Nous allons nous concentrer sur ces features à l'exception du **NOM**.", "_____no_output_____" ] ], [ [ "#Nombre de pokemons de type primaire \n#sns.catplot(x='TYPE_1',data=pokemons, kind='count', height=3, aspect=1.5)\npokemons.TYPE_1.value_counts().plot.bar()", "_____no_output_____" ], [ "#Nombre de pokemons de type secondaire \npokemons.TYPE_2.value_counts().plot.bar()LEGENDAIRE", "_____no_output_____" ], [ "pokemons.LEGENDAIRE.value_counts().plot.bar()", "_____no_output_____" ], [ "#Transformation la feature de catégorisation LEGENDAIRE en donnée numerique \npokemons['LEGENDAIRE'] = (pokemons['LEGENDAIRE']==\"VRAI\").astype(int)", "_____no_output_____" ] ], [ [ "## Acquisition des données de combats", "_____no_output_____" ] ], [ [ "combats = pd.read_csv(\".datas/combats.csv\", encoding = \"ISO-8859-1\")\ncombats.head()", "_____no_output_____" ], [ "combats.columns", "_____no_output_____" ], [ "combats.shape", "_____no_output_____" ], [ "combats.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50000 entries, 0 to 49999\nData columns (total 3 columns):\nPremier_Pokemon 50000 non-null int64\nSecond_Pokemon 50000 non-null int64\nPokemon_Gagnant 50000 non-null int64\ndtypes: int64(3)\nmemory usage: 1.1 MB\n" ] ], [ [ "## Feature engineering", "_____no_output_____" ], [ "Nous allons déterminer le nombre de combats par Pokémon. Pour cela nous devons caluler le nombre d'apparitions en premier position et le nombre de fois en seconde position.", "_____no_output_____" ] ], [ [ "nbreCombatsPremierePosition = combats.groupby('Premier_Pokemon').count()\nnbreCombatsPremierePosition.head(5)", "_____no_output_____" ], [ "nbreCombatsSecondePosition = combats.groupby('Second_Pokemon').count()\nnbreCombatsSecondePosition.head(5)", "_____no_output_____" ], [ "nbreTotalCombatsParPokemon = nbreCombatsPremierePosition+nbreCombatsSecondePosition\nnbreTotalCombatsParPokemon.head(8)", "_____no_output_____" ], [ "#Nombre de combats gagnés\nnbreCombatsGagnes = combats.groupby('Pokemon_Gagnant').count()\nnbreCombatsGagnes.head(5)", "_____no_output_____" ], [ "nbreCombatsGagnes.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 783 entries, 1 to 800\nData columns (total 2 columns):\nPremier_Pokemon 783 non-null int64\nSecond_Pokemon 783 non-null int64\ndtypes: int64(2)\nmemory usage: 18.4 KB\n" ], [ "listePokemons = combats.groupby('Pokemon_Gagnant').count()\nlistePokemons.sort_index()\nlistePokemons['NBRE_COMBATS'] = nbreTotalCombatsParPokemon.Pokemon_Gagnant\nlistePokemons['NBRE_VICTOIRES'] = nbreCombatsGagnes.Premier_Pokemon\nlistePokemons['POURCENTAGE_VICTOIRES'] = nbreCombatsGagnes.Premier_Pokemon/nbreTotalCombatsParPokemon.Pokemon_Gagnant\nlistePokemons.head(5)", "_____no_output_____" ], [ "#Agrregate both dataframe to have global view into data\nnouveauPokedex = pokemons.merge(listePokemons,left_on='NUMERO', right_index=True, how='left')\nnouveauPokedex.head(5)", "_____no_output_____" ], [ "#Phase d'apprentissage\n#Decoupage des observations en jeu d'apprentissage et jeu de test\nnouveauPokedex.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 17 columns):\nNUMERO 800 non-null int64\nNOM 800 non-null object\nTYPE_1 800 non-null object\nTYPE_2 413 non-null object\nPOINTS_DE_VIE 800 non-null int64\nPOINTS_ATTAQUE 800 non-null int64\nPOINTS_DEFFENCE 800 non-null int64\nPOINTS_ATTAQUE_SPECIALE 800 non-null int64\nPOINT_DEFENSE_SPECIALE 800 non-null int64\nPOINTS_VITESSE 800 non-null int64\nNOMBRE_GENERATIONS 800 non-null int64\nLEGENDAIRE 800 non-null int32\nPremier_Pokemon 783 non-null float64\nSecond_Pokemon 783 non-null float64\nNBRE_COMBATS 783 non-null float64\nNBRE_VICTOIRES 783 non-null float64\nPOURCENTAGE_VICTOIRES 783 non-null float64\ndtypes: float64(5), int32(1), int64(8), object(3)\nmemory usage: 103.2+ KB\n" ], [ "#What Pokemons Type should trainer have?\n#For TYPE_1\naxe_X = sns.countplot(x='TYPE_1', hue='LEGENDAIRE', data=nouveauPokedex)\nplt.xticks(rotation=90)\nplt.xlabel('TYPE_1')\nplt.ylabel('Total')\nplt.title('Pokemons par TYPE_1')\nplt.show()", "_____no_output_____" ], [ "#What Pokemons Type should trainer have?\n#For TYPE_2\naxe_X = sns.countplot(x='TYPE_2', hue='LEGENDAIRE', data=nouveauPokedex)\nplt.xticks(rotation=90)\nplt.xlabel('TYPE_1')\nplt.ylabel('Total')\nplt.title('Pokemons par TYPE_2')\nplt.show()", "_____no_output_____" ], [ "nouveauPokedex.describe()", "_____no_output_____" ], [ "#What Pokemons Type have the great winning percentage?\nnouveauPokedex.groupby('TYPE_1').agg({'POURCENTAGE_VICTOIRES':'mean'}).sort_values(by='POURCENTAGE_VICTOIRES')", "_____no_output_____" ], [ "#Correlation entre les données\ncorr = nouveauPokedex.loc[:,['TYPE_1','POINTS_DE_VIE','POINTS_ATTAQUE','POINTS_DEFFENCE','POINTS_ATTAQUE_SPECIALE',\n 'POINT_DEFENSE_SPECIALE','POINTS_VITESSE','LEGENDAIRE','POURCENTAGE_VICTOIRES']].corr()\nsns.heatmap(corr, annot=True, cmap='Greens')\nplt.title('Correlation des features')\nplt.show()\n", "_____no_output_____" ], [ "#Sauvegarde du nouveau dataset Pokedex\ndataset = nouveauPokedex\ndataset.to_csv(\"./datas/dataset.csv\", encoding = \"ISO-8859-1\", sep='\\t')", "_____no_output_____" ], [ "dataset = pd.read_csv(\"./datas/dataset.csv\", encoding = \"ISO-8859-1\", delimiter='\\t')", "_____no_output_____" ], [ "dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 18 columns):\nUnnamed: 0 800 non-null int64\nNUMERO 800 non-null int64\nNOM 800 non-null object\nTYPE_1 800 non-null object\nTYPE_2 413 non-null object\nPOINTS_DE_VIE 800 non-null int64\nPOINTS_ATTAQUE 800 non-null int64\nPOINTS_DEFFENCE 800 non-null int64\nPOINTS_ATTAQUE_SPECIALE 800 non-null int64\nPOINT_DEFENSE_SPECIALE 800 non-null int64\nPOINTS_VITESSE 800 non-null int64\nNOMBRE_GENERATIONS 800 non-null int64\nLEGENDAIRE 800 non-null int64\nPremier_Pokemon 783 non-null float64\nSecond_Pokemon 783 non-null float64\nNBRE_COMBATS 783 non-null float64\nNBRE_VICTOIRES 783 non-null float64\nPOURCENTAGE_VICTOIRES 783 non-null float64\ndtypes: float64(5), int64(10), object(3)\nmemory usage: 112.6+ KB\n" ], [ "dataset.shape", "_____no_output_____" ], [ "dataset.head(5)", "_____no_output_____" ], [ "#Supprimer toutes les lignes ayant des valeurs manquantes\ndataset = dataset.dropna(axis=0, how ='any')", "_____no_output_____" ], [ "# Extraction des valeurs explicatives\nX = dataset.iloc[:, 5:12].values\n#Extraction de la valeur expliquée\nY = dataset.iloc[:, 17].values \n\n# Construction du jeu d'entrainement et du jeu de test\n", "_____no_output_____" ], [ " X_APPRENTISSAGE, X_VALIDATION, Y_APPRENTISSAGE, Y_VALIDATION = train_test_split(X, Y, test_size=0.2, random_state= 0)\nY_VALIDATION.shape", "_____no_output_____" ] ], [ [ "## Phase d'apprentissage\nIl s'agit d'un problème de regression. Nous allons utiliser les algorithmes suivants\n<ul>\n <li>Regression Lineaire</li>\n <li>Arbre de decision</li>\n <li>Forêt aléatoire</li>\n</ul>", "_____no_output_____" ] ], [ [ "#Modèle d'apprentissage\n\n#Algorithme de regression Lineaire\nalgorithme = LinearRegression()\n#Apprentissage de l'algorithme avec des jeux de données d'apprentissage\nalgorithme.fit(X_APPRENTISSAGE, Y_APPRENTISSAGE)\n#Realisation des predictions avec notre jeu de test\npredictions = algorithme.predict(X_VALIDATION)\n#Calcul de la precision de notre algorithme\nprecision = r2_score(Y_VALIDATION, predictions)\nprecision\n", "_____no_output_____" ], [ "\n#Algorithme d'abre de decisions.\nalgorithme = DecisionTreeRegressor()\n#Apprentissage de l'algorithme avec des jeux de données d'apprentissage\nalgorithme.fit(X_APPRENTISSAGE, Y_APPRENTISSAGE)\n#Realisation des predictions avec notre jeu de test\npredictions = algorithme.predict(X_VALIDATION)\n#Calcul de la precision de notre algorithme\nprecision = r2_score(Y_VALIDATION, predictions)\nprecision\n", "_____no_output_____" ], [ "#Algorithme de Forêt aléatoire.\nalgorithme = RandomForestRegressor()\n#Apprentissage de l'algorithme avec des jeux de données d'apprentissage\nalgorithme.fit(X_APPRENTISSAGE, Y_APPRENTISSAGE)\n#Realisation des predictions avec notre jeu de test\npredictions = algorithme.predict(X_VALIDATION)\n#Calcul de la precision de notre algorithme\nprecision = r2_score(Y_VALIDATION, predictions)\n\n#Sauvegarde du modèle d'apprentissage dans un fichier car il presente la plus grande precision.\nfile = './modele/modele_pokemon.mod'\njoblib.dump(algorithme, file)\n\nprecision", "_____no_output_____" ], [ "# Fonction qui recherche les informations en fonction du numero du Pokemon dans le Pokedex\n\ndef rechercheInformationPokemon(numeroPokemon, pokedex):\n infosPokemon = []\n for pokemon in pokedex:\n if ( numeroPokemon == int(pokemon[0]) ):\n infosPokemon = pokemon[1], pokemon[4], pokemon[5], pokemon[6], pokemon[7], pokemon[8], pokemon[9], pokemon[10]\n break\n return infosPokemon\n \n# Fonction de prediction \ndef prediction(numeroPokemon1, numeroPokemon2, Pokedex):\n pokemon_1 = rechercheInformationPokemon(numeroPokemon1,Pokedex)\n pokemon_2 = rechercheInformationPokemon(numeroPokemon2,Pokedex)\n \n # Chargement du modele d'apprentissage dans l'algorithme \n modele_prediction = joblib.load('C:/Users/Andreas/PycharmProjects/Pokemon/modele/modele_pokemon.mod')\n prediction_pokemon_1 = modele_prediction.predict([[pokemon_1[1], pokemon_1[2], pokemon_1[3], pokemon_1[4], pokemon_1[5],\n pokemon_1[6], pokemon_1[7]]])\n prediction_pokemon_2 = modele_prediction.predict([[pokemon_2[1], pokemon_2[2], pokemon_2[3], pokemon_2[4], pokemon_2[5],\n pokemon_2[6], pokemon_2[7]]])\n \n print('COMBAT OPPOSANT ' + str(pokemon_1[0]) +' A ' + str(pokemon_2[0]))\n print('----------Prediction des Pokemons--------')\n print( str(pokemon_1[0]) + \" \" +str(prediction_pokemon_1) )\n print( str(pokemon_2[0]) + \" \" +str(prediction_pokemon_2) )\n \n if prediction_pokemon_1 > prediction_pokemon_2:\n print( str(pokemon_1[0]) +' est vainqueur')\n else:\n print( str(pokemon_2[0]) +' est vainqueur')\n\nwith open(\"./datas/pokedex.csv\", newline='') as csvfile:\n pokedex=csv.reader(csvfile)\n next(pokedex)\n prediction(368, 598, pokedex); \n ", "COMBAT OPPOSANT Mangriff A Crapustule\n----------Prediction des Pokemons--------\nMangriff [0.70453906]\nCrapustule [0.56317528]\nMangriff est vainqueur\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7d086baa3f96924c4d867c166b7dbeddb6650d7
438,361
ipynb
Jupyter Notebook
visualise-searches-over-time.ipynb
GLAM-Workbench/trove-newspapers
fe97fa512ef4bee83ff8778c8c652cfc54edf1c8
[ "MIT" ]
7
2019-04-11T23:16:33.000Z
2022-01-28T06:41:41.000Z
visualise-searches-over-time.ipynb
GLAM-Workbench/trove-newspapers
fe97fa512ef4bee83ff8778c8c652cfc54edf1c8
[ "MIT" ]
26
2020-03-24T17:20:11.000Z
2022-01-27T10:23:11.000Z
visualise-searches-over-time.ipynb
GLAM-Workbench/trove-newspapers
fe97fa512ef4bee83ff8778c8c652cfc54edf1c8
[ "MIT" ]
4
2019-06-04T07:41:27.000Z
2021-11-24T06:38:11.000Z
202.569778
115,480
0.627079
[ [ [ "# Visualise Trove newspaper searches over time\n\nYou know the feeling. You enter a query into [Trove's digitised newspapers](https://trove.nla.gov.au/newspaper/) search box and...\n\n![Trove search results screen capture](images/trove-newspaper-results.png)\n\nHmmm, **3 million results**, how do you make sense of that..?\n\nTrove tries to be as helpful as possible by ordering your results by relevance. This is great if you aim is to find a few interesting articles. But how can you get a sense of the complete results set? How can you *see* everything? Trove's web interface only shows you the first 2,000 articles matching your search. But by getting data directly from the [Trove API](https://help.nla.gov.au/trove/building-with-trove/api) we can go bigger. \n\nThis notebook helps you zoom out and explore how the number of newspaper articles in your results varies over time by using the `decade` and `year` facets. We'll then combine this approach with other search facets to see how we can slice a set of results up in different ways to investigate historical changes.\n\n1. [Setting things up](#1.-Setting-things-up)\n2. [Find the number of articles per year using facets](#2.-Find-the-number-of-articles-per-year-using-facets)\n3. [How many articles in total were published each year?](#3.-How-many-articles-in-total-were-published-each-year?)\n4. [Charting our search results as a proportion of total articles](#4.-Charting-our-search-results-as-a-proportion-of-total-articles)\n5. [Comparing multiple search terms over time](#5.-Comparing-multiple-search-terms-over-time)\n6. [Comparing a search term across different states](#6.-Comparing-a-search-term-across-different-states)\n7. [Comparing a search term across different newspapers](#7.-Comparing-a-search-term-across-different-newspapers)\n8. [Chart changes in illustration types over time](#8.-Chart-changes-in-illustration-types-over-time)\n9. [But what are we searching?](#9.-But-what-are-we-searching?)\n10. [Next steps](#10.-Next-steps)\n11. [Related resources](#11.-Related-resources)\n12. [Further reading](#12.-Further-reading)\n\nIf you're interested in exploring the possibilities examined in this notebook, but are feeling a bit intimidated by the code, skip to the [Related resources](#11.-Related-resources) section for some alternative starting points. But once you've got a bit of confidence, please come back here to learn more about how it all works!\n", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n<p>If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!</p>\n\n<p>\n Some tips:\n <ul>\n <li>Code cells have boxes around them.</li>\n <li>To run a code cell click on the cell and then hit <b>Shift+Enter</b>. The <b>Shift+Enter</b> combo will also move you to the next cell, so it's a quick way to work through the notebook.</li>\n <li>While a cell is running a <b>*</b> appears in the square brackets next to the cell. Once the cell has finished running the asterix will be replaced with a number.</li>\n <li>In most cases you'll want to start from the top of notebook and work your way down running each cell in turn. Later cells might depend on the results of earlier ones.</li>\n <li>To edit a code cell, just click on it and type stuff. Remember to run the cell once you've finished editing.</li>\n </ul>\n</p>\n\n<p><b>Is this thing on?</b> If you can't edit or run any of the code cells, you might be viewing a static (read only) version of this notebook. Click here to <a href=\"https://mybinder.org/v2/gh/GLAM-Workbench/trove-newspapers/master?filepath=visualise-searches-over-time.ipynb\">load a <b>live</b> version</a> running on Binder.</p>\n\n</div>", "_____no_output_____" ], [ "## 1. Setting things up", "_____no_output_____" ], [ "### Import what we need", "_____no_output_____" ] ], [ [ "import requests\nimport os\nimport ipywidgets as widgets\nfrom operator import itemgetter # used for sorting\nimport pandas as pd # makes manipulating the data easier\nimport altair as alt\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nfrom tqdm.auto import tqdm\nfrom IPython.display import display, HTML, FileLink, clear_output\nimport math\nfrom collections import OrderedDict\nimport time\n\n# Make sure data directory exists\nos.makedirs('data', exist_ok=True)\n\n# Create a session that will automatically retry on server errors\ns = requests.Session()\nretries = Retry(total=5, backoff_factor=1, status_forcelist=[ 502, 503, 504 ])\ns.mount('http://', HTTPAdapter(max_retries=retries))\ns.mount('https://', HTTPAdapter(max_retries=retries))", "_____no_output_____" ] ], [ [ "### Enter a Trove API key\n\nWe're going to get our data from the Trove API. You'll need to get your own [Trove API key](http://help.nla.gov.au/trove/building-with-trove/api) and enter it below.", "_____no_output_____" ] ], [ [ "api_key = 'YOUR API KEY'\nprint('Your API key is: {}'.format(api_key))", "_____no_output_____" ] ], [ [ "## 2. Find the number of articles per year using facets\n\n<img src=\"images/trove-decade-facets.png\" width=\"200\" align=\"left\" style=\"border: 1px solid #d6d6d6; margin-right: 20px\">\n\nWhen you search for newspaper articles using Trove's web interface, the results appear alongside a column headed 'Refine your results'. This column displays summary data extracted from your search, such as the states in which articles were published and the newspapers that published them. In the web interface, you can use this data to filter your results, but using the API we can retrieve the raw data and use it to visualise the complete result set.\n\nHere you can see the decade facet, showing the number of newspaper articles published each decade. If you click on a decade, the interface displays the number of results per year. So sitting underneath the web interface is data that breaks down our search results by year. Let's use this data to visualise a search over time.", "_____no_output_____" ], [ "To get results by year from the Trove API, you need to set the `facet` parameter to `year`. However, this only works if you've also selected a specific decade using the `l-decade` parameter. In other words, you can only get one decade's worth of results at a time. To assemble the complete dataset, you need to loop through all the decades, requesting the `year` data for each decade in turn.\n\nLet's start with some basic parameters for our search.", "_____no_output_____" ] ], [ [ "# Basic parameters for Trove API\nparams = {\n 'facet': 'year', # Get the data aggregated by year.\n 'zone': 'newspaper',\n 'key': api_key,\n 'encoding': 'json',\n 'n': 0 # We don't need any records, just the facets!\n}", "_____no_output_____" ] ], [ [ "But what are we searching for? We need to supply a `q` parameter that includes our search terms. We can use pretty much anything that works in the Trove simple search box. This includes boolean operators, phrase searches, and proximity modifiers. But let's start with something simple. Feel free to modify the `q` value in the cell below.", "_____no_output_____" ] ], [ [ "# CHANGE THIS TO SEARCH FOR SOMETHING ELSE!\nparams['q'] = 'radio'", "_____no_output_____" ] ], [ [ "Let's define a couple of handy functions for getting facet data from the Trove API. ", "_____no_output_____" ] ], [ [ "def get_results(params):\n '''\n Get JSON response data from the Trove API.\n Parameters:\n params\n Returns:\n JSON formatted response data from Trove API \n '''\n response = s.get('https://api.trove.nla.gov.au/v2/result', params=params, timeout=30)\n response.raise_for_status()\n # print(response.url) # This shows us the url that's sent to the API\n data = response.json()\n return data\n\ndef get_facets(data):\n '''\n Loop through facets in Trove API response, saving terms and counts.\n Parameters:\n data - JSON formatted response data from Trove API \n Returns:\n A list of dictionaries containing: 'year', 'total_results'\n '''\n facets = []\n try:\n # The facets are buried a fair way down in the results\n # Note that if you ask for more than one facet, you'll have use the facet['name'] param to find the one you want\n # In this case there's only one facet, so we can just grab the list of terms (which are in fact the results by year)\n for term in data['response']['zone'][0]['facets']['facet']['term']:\n \n # Get the year and the number of results, and convert them to integers, before adding to our results\n facets.append({'year': int(term['search']), 'total_results': int(term['count'])})\n \n # Sort facets by year\n facets.sort(key=itemgetter('year'))\n except TypeError:\n pass\n return facets", "_____no_output_____" ] ], [ [ "Now we'll define a function to loop through the decades, processing each in turn. \n\nTo loop through the decades we need to define start and end points. Trove includes newspapers from 1803 right through until the current decade. Note that Trove expects decades to be specified using the first three digits of a year – so the decade value for the 1800s is just `180`. So let's set our range by giving `180` and `201` to the function as our default `start_decade` and `end_decade` values. Also note that I'm defining them as numbers, not strings (no quotes around them!). This is so that we can use them to build a range.\n\nThis function returns a list of dictionaries with values for `year` and `total_results`.", "_____no_output_____" ] ], [ [ "def get_facet_data(params, start_decade=180, end_decade=201):\n '''\n Loop throught the decades from 'start_decade' to 'end_decade',\n getting the number of search results for each year from the year facet.\n Combine all the results into a single list.\n Parameters:\n params - parameters to send to the API\n start_decade\n end_decade\n Returns:\n A list of dictionaries containing 'year', 'total_results' for the complete \n period between the start and end decades.\n '''\n # Create a list to hold the facets data\n facet_data = []\n \n # Loop through the decades\n for decade in tqdm(range(start_decade, end_decade + 1)):\n \n # Avoid confusion by copying the params before we change anything.\n search_params = params.copy()\n \n # Add decade value to params\n search_params['l-decade'] = decade\n \n # Get the data from the API\n data = get_results(search_params)\n \n # Get the facets from the data and add to facets_data\n facet_data += get_facets(data)\n \n # Try not to go over API rate limit - increase if you get 403 errors\n time.sleep(0.2)\n \n # Reomve the progress bar (you can also set leave=False in tqdm, but that still leaves white space in Jupyter Lab)\n clear_output()\n return facet_data", "_____no_output_____" ], [ "# Call the function and save the results to a variable called facet_data\nfacet_data = get_facet_data(params)", "_____no_output_____" ] ], [ [ "For easy exploration, we'll convert the facet data into a [Pandas](https://pandas.pydata.org/) DataFrame.", "_____no_output_____" ] ], [ [ "# Convert our data to a dataframe called df\ndf = pd.DataFrame(facet_data)\n\n# Let's have a look at the first few rows of data\ndf.head()", "_____no_output_____" ] ], [ [ "Which year had the most results? We can use `idxmax()` to find out.", "_____no_output_____" ] ], [ [ "# Show the row that has the highest value in the 'total_results' column.\n# Use .idxmax to find the row with the highest value, then use .loc to get it\ndf.loc[df['total_results'].idxmax()]", "_____no_output_____" ] ], [ [ "Now let's display the data as a chart using [Altair](https://altair-viz.github.io/index.html).", "_____no_output_____" ] ], [ [ "alt.Chart(df).mark_line(point=True).encode(\n # Years on the X axis\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # Number of articles on the Y axis\n y=alt.Y('total_results:Q', axis=alt.Axis(format=',d', title='Number of articles')),\n \n # Display details when you hover over a point\n tooltip=[alt.Tooltip('year:Q', title='Year'), alt.Tooltip('total_results:Q', title='Articles', format=',')]\n ).properties(width=700, height=400)", "_____no_output_____" ] ], [ [ "No suprise to see a sudden increase in the use of the word 'radio' in the early decades of the 20th century, but why do the results drop away after 1954? To find out we have to dig a bit deeper into Trove.", "_____no_output_____" ], [ "## 3. How many articles in total were published each year?", "_____no_output_____" ], [ "Ok, we've visualised a search in Trove's digitised newspapers. Our chart shows a clear change in the number of articles over time, but are we really observing a historical shift relating to the topic, or is this just because more newspapers were published at particular times? To explore this further, let's create another chart, but this time we'll search for *everything*. The way we do this is by setting the `q` parameter to ' ' – a single space.\n\nFirst let's get the data.", "_____no_output_____" ] ], [ [ "# Reset the 'q' parameter\n# Use a an empty search (a single space) to get ALL THE ARTICLES\nparams['q'] = ' '\n\n# Get facet data for all articles\nall_facet_data = get_facet_data(params)", "_____no_output_____" ] ], [ [ "Now let's create the chart.", "_____no_output_____" ] ], [ [ "# Convert the results to a dataframe\ndf_total = pd.DataFrame(all_facet_data)\n\n# Make a chart \nalt.Chart(df_total).mark_line(point=True).encode(\n # Display the years along the X axis\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # Display the number of results on the Y axis (formatted using thousands separator)\n y=alt.Y('total_results:Q', axis=alt.Axis(format=',d', title='Number of articles')),\n \n # Create a tooltip when you hover over a point to show the data for that year\n tooltip=[alt.Tooltip('year:Q', title='Year'), alt.Tooltip('total_results:Q', title='Articles', format=',')]\n ).properties(width=700, height=400)", "_____no_output_____" ] ], [ [ "This chart shows us the total number of newspaper articles in Trove for each year from 1803 to 2013. As you might expect, there's a steady increase in the number of articles published across the 19th century. But why is there such a notable peak in 1915, and why do the numbers drop away so suddenly in 1955? The answers are explored more fully in [this notebook](visualise-total-newspaper-articles-by-state-year.ipynb), but in short they're a reflection of digitisation priorities and copyright restrictions – they're artefacts of the environment in which Trove's newspapers are digitised.\n\nThe important point is that our original chart showing search results over time is distorted by these underlying features. Radios didn't suddenly go out of fashion in 1955!", "_____no_output_____" ], [ "## 4. Charting our search results as a proportion of total articles\n\nOne way of lessening the impact of these distortions is to show the number of search results as a proportion of the total number of articles available on Trove from that year. We've just harvested the total number of articles, so to get the proportion all we have to do is divide the original number of search results for each year by the total number of articles. Again, Pandas makes this sort of manipulation easy.\n\nBelow we'll define a function that takes two dataframes – the search results, and the total results – merges them, and then calculates what proportion of the total that the search results represent.", "_____no_output_____" ] ], [ [ "def merge_df_with_total(df, df_total):\n '''\n Merge dataframes containing search results with the total number of articles by year.\n This is a left join on the year column. The total number of articles will be added as a column to \n the existing results.\n Once merged, do some reorganisation and calculate the proportion of search results.\n Parameters:\n df - the search results in a dataframe\n df_total - total number of articles per year in a dataframe\n Returns:\n A dataframe with the following columns - 'year', 'total_results', 'total_articles', 'proportion' \n (plus any other columns that are in the search results dataframe).\n '''\n # Merge the two dataframes on year\n # Note that we're joining the two dataframes on the year column\n df_merged = pd.merge(df, df_total, how='left', on='year')\n\n # Rename the columns for convenience\n df_merged.rename({'total_results_y': 'total_articles'}, inplace=True, axis='columns')\n df_merged.rename({'total_results_x': 'total_results'}, inplace=True, axis='columns')\n\n # Set blank values to zero to avoid problems\n df_merged['total_results'] = df_merged['total_results'].fillna(0).astype(int)\n\n # Calculate proportion by dividing the search results by the total articles\n df_merged['proportion'] = df_merged['total_results'] / df_merged['total_articles']\n return df_merged", "_____no_output_____" ] ], [ [ "Let's merge!", "_____no_output_____" ] ], [ [ "# Merge the search results with the total articles\ndf_merged = merge_df_with_total(df, df_total)\ndf_merged.head()", "_____no_output_____" ] ], [ [ "Now we have a new dataframe `df_merged` that includes both the raw number of search results for each year, and the proportion the results represent of the total number of articles on Trove. Let's create charts for both and look at the diferences.", "_____no_output_____" ] ], [ [ "# This is the chart showing raw results -- it's the same as the one we created above (but a bit smaller)\nchart1 = alt.Chart(df).mark_line(point=True).encode(\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n y=alt.Y('total_results:Q', axis=alt.Axis(format=',d', title='Number of articles')),\n tooltip=[alt.Tooltip('year:Q', title='Year'), alt.Tooltip('total_results:Q', title='Articles', format=',')]\n ).properties(width=700, height=250)\n\n# This is the new view, note that it's using the 'proportion' column for the Y axis\nchart2 = alt.Chart(df_merged).mark_line(point=True, color='red').encode(\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # This time we're showing the proportion (formatted as a percentage) on the Y axis\n y=alt.Y('proportion:Q', axis=alt.Axis(format='%', title='Proportion of articles')),\n tooltip=[alt.Tooltip('year:Q', title='Year'), alt.Tooltip('proportion:Q', title='Proportion', format='%')],\n \n # Make the charts different colors\n color=alt.value('orange')\n ).properties(width=700, height=250)\n\n# This is a shorthand way of stacking the charts on top of each other\nchart1 & chart2", "_____no_output_____" ] ], [ [ "The overall shape of the two charts is similar, but there are some significant differences. Both show a dramatic increase after 1920, but the initial peaks are in different positions. The sudden drop-off after 1954 has gone, and we even have a new peak in 1963. Why 1963? The value of these sorts of visualisations is in the questions they prompt, rather than any claim to 'accuracy'. How meaningful are the post-1954 results? If we [break down the numbers by state](visualise-total-newspaper-articles-by-state-year.ipynb), we see that the post-1954 results are mostly from the ACT. It is a small, narrowly-focused sample. Reading these two charts in combination reminds us that the structure and content of a large corpus like Trove is not natural. While viewing the number of results over time can alert us to historical shifts, we have to be prepared to ask questions about how those results are generated, and what they represent.", "_____no_output_____" ], [ "## 5. Comparing multiple search terms over time\n\nAnother way of working around inconsistencies in the newspaper corpus is to *compare* search queries. While the total numbers could be misleading, the comparative numbers might still show us interesting shifts in usage or meaning. Once again, this is not something we can do through the web interface, but all we need to achieve this using the API is a few minor adjustments to our code.\n\nInstead of a single search query, this time we'll define a list of search queries. You can include as many queries as you want and, once again, the queries can be anything you'd type in the Trove search box.", "_____no_output_____" ] ], [ [ "# Create a list of queries\nqueries = [\n 'telegraph',\n 'radio',\n 'wireless'\n]", "_____no_output_____" ] ], [ [ "Now we'll define a new function that loops through each of the search terms, retrieving the facet data for each, and combining it all into a single dataframe.", "_____no_output_____" ] ], [ [ "def get_search_facets(params, queries):\n '''\n Process a list of search queries, gathering the facet data for each and combining the results into a single dataframe.\n \n Parameters:\n params - basic parameters to send to the API\n queries - a list of search queries\n Returns:\n A dataframe\n '''\n # This is where we'll store the invididual dataframes\n dfs = []\n \n # Make a copy of the basic parameters\n these_params = params.copy()\n \n # Loop through the list of queries\n for q in queries:\n \n # Set the 'q' parameter to the current search query\n these_params['q'] = q\n \n # Get all the facet data for this search\n facet_data = get_facet_data(these_params)\n \n # Convert the facet data into a dataframe\n df = pd.DataFrame(facet_data)\n \n # Add a column with the search query -- this will enable us to distinguish between the results in the combined dataframe.\n df['query'] = q\n \n # Add this df to our list\n dfs.append(df)\n \n # Combine the dfs into one df using concat and return the result\n return pd.concat(dfs)", "_____no_output_____" ] ], [ [ "Now we're ready to harvest some data!", "_____no_output_____" ] ], [ [ "df_queries = get_search_facets(params, queries)", "_____no_output_____" ] ], [ [ "Once again, it would be useful to have the number of search results as a proportion of the total articles, so let's use our merge function again to add the proportions.", "_____no_output_____" ] ], [ [ "df_queries_merged = merge_df_with_total(df_queries, df_total)", "_____no_output_____" ] ], [ [ "As we're repeating the same sorts of charts with different data, we might as well save ourselves some effort by creating a couple of reusable charting functions. One shows the raw numbers, and the other shows the proportions.", "_____no_output_____" ] ], [ [ "def make_chart_totals(df, category, category_title):\n '''\n Make a chart showing the raw number of search results over time.\n Creates different coloured lines for each query or category.\n Parameters:\n df - a dataframe\n category - the column containing the value that distinguishes multiple results set (eg 'query' or 'state')\n category_title - a nicely formatted title for the category to appear above the legend\n '''\n chart = alt.Chart(df).mark_line(point=True).encode(\n \n # Show the year on the X axis\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # Show the total number of articles on the Y axis (with thousands separator)\n y=alt.Y('total_results:Q', axis=alt.Axis(format=',d', title='Number of articles')),\n \n # Display query/category, year, and number of results on hover\n tooltip=[alt.Tooltip('{}:N'.format(category), title=category_title), alt.Tooltip('year:Q', title='Year'), alt.Tooltip('total_results:Q', title='Articles', format=',')],\n \n # In these charts were comparing results, so we're using color to distinguish between queries/categories\n color=alt.Color('{}:N'.format(category), legend=alt.Legend(title=category_title))\n ).properties(width=700, height=250)\n return chart\n\n\ndef make_chart_proportions(df, category, category_title):\n '''\n Make a chart showing the proportion of search results over time.\n Creates different coloured lines for each query or category.\n Parameters:\n df - a dataframe\n category - the column containing the value that distinguishes multiple results set (eg 'query' or 'state')\n category_title - a nicely formatted title for the category to appear above the legend\n '''\n chart = alt.Chart(df).mark_line(point=True).encode(\n # Show the year on the X axis\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # Show the proportion of articles on the Y axis (formatted as percentage)\n y=alt.Y('proportion:Q', axis=alt.Axis(format='%', title='Proportion of articles'), stack=None),\n \n # Display query/category, year, and proportion of results on hover\n tooltip=[alt.Tooltip('{}:N'.format(category), title=category_title), alt.Tooltip('year:Q', title='Year'), alt.Tooltip('proportion:Q', title='Proportion', format='%')],\n \n # In these charts were comparing results, so we're using color to distinguish between queries/categories\n color=alt.Color('{}:N'.format(category), legend=alt.Legend(title=category_title))\n ).properties(width=700, height=250)\n return chart", "_____no_output_____" ] ], [ [ "Let's use the new functions to create charts for our queries.", "_____no_output_____" ] ], [ [ "# Chart total results\nchart3 = make_chart_totals(df_queries_merged, 'query', 'Search query')\n\n# Chart proportions\nchart4 = make_chart_proportions(df_queries_merged, 'query', 'Search query')\n\n# Shorthand way of concatenating the two charts (note there's only one legend)\nchart3 & chart4", "_____no_output_____" ] ], [ [ "Once again, it's interesting to compare the total results with the proportions. In this case, both point to something interesting happening around 1930. To explore this further we could use the [Trove Newspaper Harvester](https://glam-workbench.github.io/trove-harvester/) to assemble a dataset of articles from 1920 to 1940 for detailed analysis. You might also notice a little peak for 'wireless' around 2011 – new uses for old words!", "_____no_output_____" ], [ "## 6. Comparing a search term across different states\n\nAnother way of building comparisons over time is to use some of the other facets available in Trove to slice up our search results. For example, the `state` facet tells us the number of results per state. We might be able to use this to track differences in language, or regional interest in particular events.\n\nBecause we're combining three facets, `state` and `decade`/`year`, we need to think a bit about how we assemble the data. In this case we're only using one search query, but we're repeating this query across a number of different states. We're then getting the data for decade and year for each of the states.\n\nThe possible values for the `state` facet are:\n\n* ACT\n* New South Wales\n* Northern Territory\n* Queensland\n* South Australia\n* Tasmania\n* Victoria\n* Western Australia\n* National\n* International\n\nThere's some other ways of exploring and visualising the `state` facet in [Visualise the total number of newspaper articles in Trove by year and state](visualise-total-newspaper-articles-by-state-year.ipynb).\n\nLet's start by defining a list of states we want to compare...\n", "_____no_output_____" ] ], [ [ "# A list of state values that we'll supply to the state facet\nstates = [\n 'New South Wales',\n 'Victoria'\n]", "_____no_output_____" ] ], [ [ "...and our search query.", "_____no_output_____" ] ], [ [ "# Remember this time we're comparing a single search query across multiple states\nquery = 'Chinese'", "_____no_output_____" ] ], [ [ "As before, we'll display both the raw number of results, and the proportion this represents of the total number of articles. But what is the total number of articles in this case? While we could generate a proportion using the totals for each year across all of Trove's newspapers, it seems more useful to use the total number of articles for each state. Otherwise, states with more newspapers will dominate. This means we'll have to make some additional calls to the API to get the state totals as well as the search results.\n\nLet's create a couple of new functions. The main function `get_state_facets()` loops through the states in our list, gathering the year by year results. It's similar to the way we handled multiple queries, but this time there's an additional step. Once we have the search results, we use `get_state_totals()` to get the total number of articles published in that state for each year. Then we merge the search results and total articles as we did before.", "_____no_output_____" ] ], [ [ "def get_state_totals(state):\n '''\n Get the total number of articles for each year for the specified state.\n Parameters:\n state\n Returns:\n A list of dictionaries containing 'year', 'total_results'.\n ''' \n these_params = params.copy()\n \n # Set the q parameter to a single space to get everything\n these_params['q'] = ' '\n \n # Set the state facet to the given state value\n these_params['l-state'] = state\n \n # Get the year by year data\n facet_data = get_facet_data(these_params)\n return facet_data\n\n\ndef get_state_facets(params, states, query):\n '''\n Loop through the supplied list of states searching for the specified query and getting the year by year results.\n Merges the search results with the total number of articles for that state.\n Parameters:\n params - basic parameters to send to the API\n states - a list of states to apply using the state facet\n query - the search query to use\n Returns:\n A dataframe \n '''\n dfs = []\n these_params = params.copy()\n \n # Set the q parameter to the supplied query\n these_params['q'] = query\n \n # Loop through the supplied list of states\n for state in states:\n \n # Set the state facet to the current state value\n these_params['l-state'] = state\n \n # Get year facets for this state & query\n facet_data = get_facet_data(these_params)\n \n # Convert the results to a dataframe\n df = pd.DataFrame(facet_data)\n \n # Get the total number of articles per year for this state\n total_data = get_state_totals(state)\n \n # Convert the totals to a dataframe\n df_total = pd.DataFrame(total_data)\n \n # Merge the two dataframes\n df_merged = merge_df_with_total(df, df_total)\n \n # Add a state column to the dataframe and set its value to the current state\n df_merged['state'] = state\n \n # Add this df to the list of dfs\n dfs.append(df_merged)\n \n # Concatenate all the dataframes and return the result\n return pd.concat(dfs)", "_____no_output_____" ] ], [ [ "Let's get the data!", "_____no_output_____" ] ], [ [ "df_states = get_state_facets(params, states, query)", "_____no_output_____" ] ], [ [ "And now chart the results, specifying `state` as the column to use for our category.", "_____no_output_____" ] ], [ [ "# Chart totals\nchart5 = make_chart_totals(df_states, 'state', 'State')\n\n# Chart proportions\nchart6 = make_chart_proportions(df_states, 'state', 'State')\n\n# Shorthand way of concatenating the two charts (note there's only one legend)\nchart5 & chart6", "_____no_output_____" ] ], [ [ "Showing the results as a proportion of the total articles for each state does seem to show up some interesting differences. Did 10% of newspaper articles published in Victoria in 1857 really mention 'Chinese'? That seems like something to investigate in more detail.\n\nAnother way of visualising the number of results per state is by using a map! See [Map newspaper results by state](Map-newspaper-results-by-state.ipynb) for a demonstration.", "_____no_output_____" ], [ "## 7. Comparing a search term across different newspapers\n\nFor a more fine-grained analysis, we might want to compare the contents of different newspapers – how did their coverage or language vary over time? To do this we can use Trove's `title` facet which, despite the name, limits your results to a particular newspaper.\n\nThe `title` facet expects a numeric newspaper identifier. The easiest way of find this id number is to go to the [list of newspapers](https://trove.nla.gov.au/newspaper/about) and click on the one you're interested in. The id number will be in the url of the newspaper details page. For example, the url of the *Canberra Times* page is:\n\n`https://trove.nla.gov.au/newspaper/title/11`\n\nSo the id number is '11'.\n\nAs with previous examples, we'll create a list of the newspapers we want to use with the `title` facet. However, the id number on it's own isn't going to be very useful in the legend of our chart, so we'll include the name of the newspaper as well. ", "_____no_output_____" ] ], [ [ "# Create a list of dictionaries, each with the 'id' and 'name' of a newspaper\nnewspapers = [\n {'id': 1180, 'name': 'Sydney Sun'},\n {'id': 35, 'name': 'Sydney Morning Herald'},\n {'id': 1002, 'name': 'Tribune'}\n]", "_____no_output_____" ], [ "# Our search query we want to compare across newspapers\nquery = 'worker'", "_____no_output_____" ] ], [ [ "In this case the total number of articles we want to use in calculating the proportion of results is probably the total number of articles published in each particular newspaper. This should allow a more meaningful comparison between, for example, a weekly and a daily newspaper. As in the example above, we'll define a function to loop through the newspapers, and another to get the total number of articles for a given newspaper.", "_____no_output_____" ] ], [ [ "def get_newspaper_totals(newspaper_id):\n '''\n Get the total number of articles for each year for the specified newspaper.\n Parameters:\n newspaper_id - numeric Trove newspaper identifier\n Returns:\n A list of dictionaries containing 'year', 'total_results'.\n '''\n these_params = params.copy()\n \n # Set q to a single space for everything\n these_params['q'] = ' '\n \n #Set the title facet to the newspaper_id\n these_params['l-title'] = newspaper_id\n \n # Get all the year by year data\n facet_data = get_facet_data(these_params)\n return facet_data\n\ndef get_newspaper_facets(params, newspapers, query):\n '''\n Loop through the supplied list of newspapers searching for the specified query and getting the year by year results.\n Merges the search results with the total number of articles for that newspaper.\n Parameters:\n params - basic parameters to send to the API\n newspapers - a list of dictionaries with the id and name of a newspaper\n query - the search query to use\n Returns:\n A dataframe \n '''\n dfs = []\n these_params = params.copy()\n \n # Set the query\n these_params['q'] = query\n \n # Loop through the list of newspapers\n for newspaper in newspapers:\n \n # Sewt the title facet to the id of the current newspaper\n these_params['l-title'] = newspaper['id']\n \n # Get the year by year results for this newspaper\n facet_data = get_facet_data(these_params)\n \n # Convert to a dataframe\n df = pd.DataFrame(facet_data)\n \n # Get the total number of articles published in this newspaper per year\n total_data = get_newspaper_totals(newspaper['id'])\n \n # Convert to a dataframe\n df_total = pd.DataFrame(total_data)\n \n # Merge the two dataframes\n df_merged = merge_df_with_total(df, df_total)\n \n # Create a newspaper column and set its value to the name of the newspaper\n df_merged['newspaper'] = newspaper['name']\n \n # Add the current datarame to the list\n dfs.append(df_merged)\n \n # Concatenate the dataframes and return the result\n return pd.concat(dfs)", "_____no_output_____" ] ], [ [ "Let's get the data!", "_____no_output_____" ] ], [ [ "df_newspapers = get_newspaper_facets(params, newspapers, query)", "_____no_output_____" ] ], [ [ "And make some charts!", "_____no_output_____" ] ], [ [ "# Chart totals\nchart7 = make_chart_totals(df_newspapers, 'newspaper', 'Newspaper')\n\n# Chart proportions\nchart8 = make_chart_proportions(df_newspapers, 'newspaper', 'Newspaper')\n\n# Shorthand way of concatenating the two charts (note there's only one legend)\nchart7 & chart8", "_____no_output_____" ] ], [ [ "## 8. Chart changes in illustration types over time\n\nLet's try something a bit different and explore the *format* of articles rather than their text content. Trove includes a couple of facets that enable you to filter your search by type of illustration. First of all you have to set the `illustrated` facet to `true`, then you can specify a type of illustration using the `illtype` facet. Possible values include:\n\n* Photo\n* Cartoon\n* Illustration\n* Map\n* Graph\n\nFirst we'll create a list with all the illustration types we're interesed in.", "_____no_output_____" ] ], [ [ "ill_types = [\n 'Photo',\n 'Cartoon',\n 'Illustration',\n 'Map',\n 'Graph'\n]", "_____no_output_____" ] ], [ [ "Then we'll define a function to loop through the illustration types getting the year by year results of each.", "_____no_output_____" ] ], [ [ "def get_ill_facets(params, ill_types):\n '''\n Loop through the supplied list of illustration types getting the year by year results.\n Parameters:\n params - basic parameters to send to the API\n ill_types - a list of illustration types to use with the ill_type facet\n Returns:\n A dataframe \n '''\n dfs = []\n ill_params = params.copy()\n \n # No query! Set q to a single space for everything\n ill_params['q'] = ' '\n \n # Set the illustrated facet to true - necessary before setting ill_type\n ill_params['l-illustrated'] = 'true'\n \n # Loop through the illustration types\n for ill_type in ill_types:\n \n # Set the ill_type facet to the current illustration type\n ill_params['l-illtype'] = ill_type\n \n # Get the year by year data\n facet_data = get_facet_data(ill_params)\n \n # Convert to a dataframe\n df = pd.DataFrame(facet_data)\n \n # Create an ill_type column and set its value to the illustration type \n df['ill_type'] = ill_type\n \n # Add current df to the list of dfs\n dfs.append(df)\n \n # Concatenate all the dfs and return the result\n return pd.concat(dfs)", "_____no_output_____" ] ], [ [ "Get the data!", "_____no_output_____" ] ], [ [ "df_illtypes = get_ill_facets(params, ill_types)", "_____no_output_____" ] ], [ [ "To calculate proportions for these searches we'll just use the total number of articles across all of Trove we collected above. ", "_____no_output_____" ] ], [ [ "# Merge results with total articles and calculate proportions\ndf_illtypes_merged = merge_df_with_total(df_illtypes, df_total)", "_____no_output_____" ], [ "# Make total results chart \nchart9 = make_chart_totals(df_illtypes_merged, 'ill_type', 'Type')\n\n# Make proportions chart\nchart10 = make_chart_proportions(df_illtypes_merged, 'ill_type', 'Type')\n\n# Shorthand way of concatenating the two charts (note there's only one legend)\nchart9 & chart10", "_____no_output_____" ] ], [ [ "And there we have it – interesting to see the rapid increase in photos from the 1920s on.", "_____no_output_____" ], [ "## 9. But what are we searching?\n\nWe've seen that we can visualise Trove search results over time in a number of different ways. But what are we actually searching? In the last example, exploring illustration types, we sliced up the complete collection of Trove newspaper articles using the `ill_type` facet. This is a metadata field whose value is set by the people who processed the articles. It should be consistent, but we can't take these sorts of things for granted. Let's look at all the values in the `illtype` field.", "_____no_output_____" ] ], [ [ "ill_params = params.copy()\n\n# No query! Set q to a single space for everything\nill_params['q'] = ' '\n\n# Set the illustrated facet to true - necessary before setting ill_type\nill_params['l-illustrated'] = 'true'\nill_params['facet'] = 'illtype'\ndata = get_results(ill_params)\nfacets = []\nfor term in data['response']['zone'][0]['facets']['facet']['term']:\n # Get the state and the number of results, and convert it to integers, before adding to our results\n facets.append({'ill_type': term['search'], 'total_results': int(term['count'])})\ndf_ill_types = pd.DataFrame(facets)\ndf_ill_types\n ", "_____no_output_____" ] ], [ [ "It's pretty consistent, but why are there entries both for 'Cartoon' and 'Cartoons'? In the past I've noticed variations in capitalisation amongst the facet values, but fortunately these seem to have been fixed. The point is that we can't take search results for granted – we have to think about how they are created.\n\nJust as working with the Trove API enables us to view search results in different ways, so we can turn the search results against themselves to reveal some of their limitations and inconsistencies. In most of the examples above we're searching the full text of the newspaper articles for specific terms. The full text has been extracted from page images using Optical Character Recognition. The results are far from perfect, and Trove users help to correct errors. But many errors remain, and all the visualisations we've created will have been affected by them. Some articles will be missing. While we can't do much directly to improve the results, we can investigate whether the OCR errors are evenly distributed across the collection – do certain time periods, or newspapers have higher error rates?\n\nAs a final example, let's what we can find out about the variation of OCR errors over time. We'll do this by searching for a very common OCR error – 'tbe'. This is of course meant to be 'the'. This is hardly a perfect measure of OCR accuracy, but it is something we can easily measure. How does the frequency of 'tbe' change over time?", "_____no_output_____" ] ], [ [ "params['q'] = 'text:\"tbe\"~0'\n\nocr_facets = get_facet_data(params)", "_____no_output_____" ], [ "df_ocr = pd.DataFrame(ocr_facets)\ndf_ocr_merged = merge_df_with_total(df_ocr, df_total)", "_____no_output_____" ], [ "alt.Chart(df_ocr_merged).mark_line(point=True).encode(\n x=alt.X('year:Q', axis=alt.Axis(format='c', title='Year')),\n \n # This time we're showing the proportion (formatted as a percentage) on the Y axis\n y=alt.Y('proportion:Q', axis=alt.Axis(format='%', title='Proportion of articles')),\n tooltip=[alt.Tooltip('year:Q', title='Year'), alt.Tooltip('proportion:Q', title='Proportion', format='%')],\n \n ).properties(width=700, height=400)", "_____no_output_____" ] ], [ [ "In general, the proportion of articles containing 'tbe' decreases over time. This is perhaps due to improvements in paper, printing technology, or the use of more OCR-friendly fonts. The variations in the period before 1860 seem unusual. Have a greater proportion of these articles been corrected by volunteers? ([Check out this notebook for more on rates of correction.](Analysing_OCR_corrections.ipynb)) There might also be differences as a result of the digitisation process itself – most newspapers were digitised from microfilm, rather than hardcopy. How might that affect the results? Once again, this visualisation doesn't provide answers, but it does suggest some avenues for further investigation. Most importantly though, it suggests ways in which the accuracy of our visualisations might themselves vary over time!\n\nOne final thing we should consider is that our visualisations all show the *number of articles* that match our search query, **not** the number of times our search terms appear in newspapers. So an article in which a search term appears once has the same value in our visualisation as an article in which a search terms appears 20 times. This might mean that periods of intensive discussion around particular words are being flattened, or even rendered invisible. We can't investigate this using Trove's own search interface. To examine word frequency in greater depth, we need to harvest the text content of all the articles we're interested in and use other tools to analyse their contents. That's what the [Trove Newspaper Harvester](https://glam-workbench.github.io/trove-harvester/) is for!", "_____no_output_____" ], [ "## 10. Next steps\n\nDon't just use my examples, try plugging different search terms and facet values into the examples above. \n\nIf you find something that you'd like to investigate in more detail, you can use the [Trove Newspaper Harvester](https://glam-workbench.github.io/trove-harvester/) to download newspaper articles in bulk. Once you have all the text and metadata, you can explore the articles using a variety of text analysis tools.", "_____no_output_____" ], [ "## 11. Related resources", "_____no_output_____" ], [ "### QueryPic\n\n[QueryPic](http://dhistory.org/querypic/) is an earlier tool I created for visualising newspaper searches in Trove (and Papers Past). It's great for creating and sharing quick visualisations. It's not quite as flexible as the approaches outlined here, but it's a good place to start your exploration of Trove data.\n\n\n### QueryPic Deconstructed\n\nThis is [a Jupyter notebook version of QueryPic](https://glam-workbench.github.io/trove-newspapers/#querypic-deconstructed) that extends some of its functionality. It's meant to be run in Appmode, so you don't have worry about the code. It reproduces most of what is available in this notebook, and adds some extra options for saving your charts. A good option if you're intimidated by the code, but want to try out some of the things described in this notebook.\n\n### Visualise a search in Papers Past\n\nDo something similar for New Zealand newspapers in Papers Past [using this notebook](https://glam-workbench.github.io/digitalnz/#visualise-a-search-in-papers-past).\n\n### Trove Newspaper Harvester\n\nOnce you've found something interesting in your visualisations, you can use the [Trove Newspaper Harvester](https://glam-workbench.github.io/trove-harvester/) to download the full text of thousands of articles for in-depth analysis.\n\n## 12. Further reading\n\n* Tim Sherratt, ['Seams and edges: dreams of aggregation, access, and discovery in a broken world'](http://discontents.com.au/seams-and-edges-dreams-of-aggregation-access-discovery-in-a-broken-world/), ALIA Online, 2015.\n\n* Tim Sherratt, ['Hacking heritage: understanding the limits of online access'](https://hcommons.org/deposits/item/hc:18733/), preprint of a chapter submitted for publication as part of *The Routledge International Handbook of New Digital Practices in Galleries, Libraries, Archives, Museums and Heritage Sites*, forthcoming 2019.", "_____no_output_____" ], [ "----\n\nCreated by [Tim Sherratt](https://timsherratt.org/) for the [GLAM Workbench](https://glam-workbench.github.io/). \nSupport this project by becoming a [GitHub sponsor](https://github.com/sponsors/wragge?o=esb).\n\nWork on this notebook was supported by the [Humanities, Arts and Social Sciences (HASS) Data Enhanced Virtual Lab](https://tinker.edu.au/).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d09e5dc0bb7a0ae6c2f8b49e86483fd12e1765
336,830
ipynb
Jupyter Notebook
notebooks/01_simulation_datasets/02_train-test_default/01_GTDB_test-val_genomes.ipynb
chrisLanderson/DeepMAsED
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
[ "MIT" ]
23
2019-09-11T10:48:22.000Z
2021-10-06T19:59:07.000Z
notebooks/01_simulation_datasets/02_train-test_default/01_GTDB_test-val_genomes.ipynb
chrisLanderson/DeepMAsED
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
[ "MIT" ]
8
2019-09-20T17:20:05.000Z
2020-12-02T09:41:19.000Z
notebooks/01_simulation_datasets/02_train-test_default/01_GTDB_test-val_genomes.ipynb
chrisLanderson/DeepMAsED
81cff4122be2fb91feeb7a36fa5d502bd57bedd8
[ "MIT" ]
8
2019-09-10T15:31:04.000Z
2021-11-17T00:16:18.000Z
149.237926
15,294
0.340967
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Goal\" data-toc-modified-id=\"Goal-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Goal</a></span></li><li><span><a href=\"#Var\" data-toc-modified-id=\"Var-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Var</a></span></li><li><span><a href=\"#Init\" data-toc-modified-id=\"Init-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Init</a></span></li><li><span><a href=\"#Load\" data-toc-modified-id=\"Load-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Load</a></span></li><li><span><a href=\"#Summary\" data-toc-modified-id=\"Summary-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Summary</a></span></li><li><span><a href=\"#Filter\" data-toc-modified-id=\"Filter-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Filter</a></span><ul class=\"toc-item\"><li><span><a href=\"#Random-selection\" data-toc-modified-id=\"Random-selection-6.1\"><span class=\"toc-item-num\">6.1&nbsp;&nbsp;</span>Random selection</a></span><ul class=\"toc-item\"><li><span><a href=\"#Summary\" data-toc-modified-id=\"Summary-6.1.1\"><span class=\"toc-item-num\">6.1.1&nbsp;&nbsp;</span>Summary</a></span><ul class=\"toc-item\"><li><span><a href=\"#Taxonomy\" data-toc-modified-id=\"Taxonomy-6.1.1.1\"><span class=\"toc-item-num\">6.1.1.1&nbsp;&nbsp;</span>Taxonomy</a></span></li></ul></li></ul></li></ul></li><li><span><a href=\"#Split\" data-toc-modified-id=\"Split-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>Split</a></span></li><li><span><a href=\"#Write\" data-toc-modified-id=\"Write-8\"><span class=\"toc-item-num\">8&nbsp;&nbsp;</span>Write</a></span></li><li><span><a href=\"#CheckM-value-histograms\" data-toc-modified-id=\"CheckM-value-histograms-9\"><span class=\"toc-item-num\">9&nbsp;&nbsp;</span>CheckM value histograms</a></span></li><li><span><a href=\"#Taxonomy-summary\" data-toc-modified-id=\"Taxonomy-summary-10\"><span class=\"toc-item-num\">10&nbsp;&nbsp;</span>Taxonomy summary</a></span></li><li><span><a href=\"#sessionInfo\" data-toc-modified-id=\"sessionInfo-11\"><span class=\"toc-item-num\">11&nbsp;&nbsp;</span>sessionInfo</a></span></li></ul></div>", "_____no_output_____" ], [ "# Goal\n\n* selecting genomes from the GTDB for testing and validation\n * randomly selecting", "_____no_output_____" ], [ "# Var", "_____no_output_____" ] ], [ [ "work_dir = '/ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes/'\nmetadata_file = '/ebio/abt3_projects/databases_no-backup/GTDB/release86/metadata_1perGTDBSpec_gte50comp-lt5cont_wPath.tsv'\n", "_____no_output_____" ] ], [ [ "# Init", "_____no_output_____" ] ], [ [ "library(dplyr)\nlibrary(tidyr)\nlibrary(ggplot2)\n\nset.seed(8364)", "\nAttaching package: ‘dplyr’\n\nThe following objects are masked from ‘package:stats’:\n\n filter, lag\n\nThe following objects are masked from ‘package:base’:\n\n intersect, setdiff, setequal, union\n\n" ] ], [ [ "# Load", "_____no_output_____" ] ], [ [ "metadata = read.delim(metadata_file, sep='\\t') %>% \n dplyr::select(ncbi_organism_name, accession, scaffold_count,\n longest_scaffold, gc_percentage, total_gap_length,\n genome_size, n50_contigs, trna_count, checkm_completeness,\n checkm_contamination, ssu_count, ncbi_taxonomy, ssu_gg_taxonomy, \n gtdb_taxonomy, fasta_file_path)\n\nmetadata %>% nrow %>% print\nmetadata %>% head", "[1] 21276\n" ] ], [ [ "# Summary", "_____no_output_____" ] ], [ [ "metadata %>% summary", "_____no_output_____" ] ], [ [ "# Filter", "_____no_output_____" ] ], [ [ "# removing abnormal \nmetadata_f = metadata %>%\n filter(scaffold_count <= 100, \n total_gap_length < 100000,\n genome_size < 10000000,\n checkm_completeness >= 90,\n ssu_count < 20,\n fasta_file_path != '')\n\nmetadata_f %>% nrow", "_____no_output_____" ], [ "metadata_f %>% summary", "_____no_output_____" ] ], [ [ "## Random selection\n\nSelecting 2000 genomes, which will be split into training & testing", "_____no_output_____" ] ], [ [ "metadata_f = metadata_f %>%\n sample_n(2000) \n\nmetadata_f %>% nrow", "_____no_output_____" ] ], [ [ "### Summary", "_____no_output_____" ] ], [ [ "metadata_f %>% \n dplyr::select(scaffold_count, longest_scaffold, gc_percentage, total_gap_length,\n genome_size, n50_contigs, trna_count, checkm_completeness,\n checkm_contamination, ssu_count) %>%\n summary", "_____no_output_____" ] ], [ [ "#### Taxonomy", "_____no_output_____" ] ], [ [ "metadata_f_tax = metadata_f %>%\n dplyr::select(ncbi_taxonomy) %>%\n separate(ncbi_taxonomy, c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'), sep=';') \n\nmetadata_f_tax %>% head", "_____no_output_____" ], [ "metadata_f_tax %>%\n group_by(Domain) %>%\n summarize(n = n()) %>%\n ungroup() %>%\n arrange(-n)", "_____no_output_____" ], [ "metadata_f_tax %>%\n group_by(Domain, Phylum) %>%\n summarize(n = n()) %>%\n ungroup() %>%\n arrange(-n) %>%\n head(n=20)", "_____no_output_____" ], [ "metadata_f_tax = metadata_f %>%\n dplyr::select(gtdb_taxonomy) %>%\n separate(gtdb_taxonomy, c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'), sep=';') \n\nmetadata_f_tax %>% head", "_____no_output_____" ], [ "metadata_f_tax %>%\n group_by(Domain) %>%\n summarize(n = n()) %>%\n ungroup() %>%\n arrange(-n)", "_____no_output_____" ], [ "metadata_f_tax %>%\n group_by(Domain, Phylum) %>%\n summarize(n = n()) %>%\n ungroup() %>%\n arrange(-n) %>%\n head(n=20)", "_____no_output_____" ] ], [ [ "# Split", "_____no_output_____" ] ], [ [ "metadata_f_train = metadata_f %>% \n sample_n(1000)\n\nmetadata_f_train %>% nrow", "_____no_output_____" ], [ "metadata_f_test = metadata_f %>% \n anti_join(metadata_f_train, c('ncbi_organism_name', 'accession'))\n\nmetadata_f_test %>% nrow", "_____no_output_____" ], [ "# accidental overlap?\nmetadata_f_train %>%\n inner_join(metadata_f_test, c('ncbi_organism_name', 'accession')) %>%\n nrow", "_____no_output_____" ] ], [ [ "# Write", "_____no_output_____" ] ], [ [ "outF = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_train.tsv')\nmetadata_f_train %>%\n rename('Taxon' = ncbi_organism_name,\n 'Fasta' = fasta_file_path) %>%\n write.table(outF, sep='\\t', quote=FALSE, row.names=FALSE)\ncat('File written:', outF, '\\n')", "File written: /ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes//DeepMAsED_GTDB_genome-refs_train.tsv \n" ], [ "outF = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_test.tsv')\nmetadata_f_test %>%\n rename('Taxon' = ncbi_organism_name,\n 'Fasta' = fasta_file_path) %>%\n write.table(outF, sep='\\t', quote=FALSE, row.names=FALSE)\ncat('File written:', outF, '\\n')", "File written: /ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes//DeepMAsED_GTDB_genome-refs_test.tsv \n" ] ], [ [ "# CheckM value histograms\n\n* the distribution of checkM completeness/contamination for training & test", "_____no_output_____" ] ], [ [ "F = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_train.tsv')\nmetadata_f_train = read.delim(F, sep='\\t') %>%\n mutate(data_partition = 'Train')\nmetadata_f_train %>% dim %>% print\nmetadata_f_train %>% head(n=3)", "[1] 1000 17\n" ], [ "F = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_test.tsv')\nmetadata_f_test = read.delim(F, sep='\\t') %>%\n mutate(data_partition = 'Test')\nmetadata_f_test %>% dim %>% print\nmetadata_f_test %>% head(n=3)", "[1] 1000 17\n" ], [ "metadata_f = rbind(metadata_f_train, metadata_f_test)\nmetadata_f %>% head(n=3)", "_____no_output_____" ], [ "p = metadata_f %>%\n dplyr::select(Taxon, data_partition, checkm_completeness, checkm_contamination) %>%\n gather(checkm_stat, checkm_value, -Taxon, -data_partition) %>%\n mutate(data_partition = factor(data_partition, levels=c('Train', 'Test')),\n checkm_stat = ifelse(checkm_stat == 'checkm_completeness', 'Completenss', 'Contamination')) %>%\n ggplot(aes(data_partition, checkm_value)) +\n geom_boxplot() +\n labs(x='Data partition', y='') +\n facet_wrap(~ checkm_stat, scales='free_y') +\n theme_bw()\n\noptions(repr.plot.width=6, repr.plot.height=3)\nplot(p)", "_____no_output_____" ], [ "F = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_checkM-box.pdf')\nggsave(p, file=F, width=7, height=4)\ncat('File written:', F, '\\n')", "File written: /ebio/abt3_projects/databases_no-backup/DeepMAsED/GTDB_ref_genomes//DeepMAsED_GTDB_genome-refs_checkM-box.pdf \n" ], [ "metadata_f$checkm_completeness %>% summary %>% print\nmetadata_f$checkm_completeness %>% sd %>% print\nmetadata_f$checkm_contamination %>% summary %>% print\nmetadata_f$checkm_contamination %>% sd %>% print", " Min. 1st Qu. Median Mean 3rd Qu. Max. \n 90.09 98.68 99.44 98.77 99.89 100.00 \n[1] 1.846624\n Min. 1st Qu. Median Mean 3rd Qu. Max. \n 0.0000 0.0000 0.4800 0.7283 1.0600 4.9900 \n[1] 0.8609621\n" ] ], [ [ "# Taxonomy summary", "_____no_output_____" ] ], [ [ "F = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_train.tsv')\nmetadata_f_train = read.delim(F, sep='\\t') %>%\n mutate(data_partition = 'Train')\nmetadata_f_train %>% dim %>% print\nmetadata_f_train %>% head(n=3)", "[1] 1000 17\n" ], [ "F = file.path(work_dir, 'DeepMAsED_GTDB_genome-refs_test.tsv')\nmetadata_f_test = read.delim(F, sep='\\t') %>%\n mutate(data_partition = 'Test')\nmetadata_f_test %>% dim %>% print\nmetadata_f_test %>% head(n=3)", "[1] 1000 17\n" ], [ "metadata_f = rbind(metadata_f_train, metadata_f_test)\nmetadata_f %>% head(n=3)", "_____no_output_____" ], [ "metadata_f_tax = metadata_f %>%\n dplyr::select(data_partition, ncbi_taxonomy) %>%\n separate(ncbi_taxonomy, c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'), sep=';') \n\nmetadata_f_tax %>% head", "_____no_output_____" ], [ "# domain-level distribution\nmetadata_f_tax %>%\n group_by(data_partition, Domain) %>%\n summarize(n = n()) %>%\n ungroup()", "_____no_output_____" ], [ "# number of phyla represented \nmetadata_f_tax %>%\n filter(Phylum != 'p__') %>%\n .$Phylum %>% unique %>% length %>% print", "[1] 40\n" ], [ "metadata_f_tax %>%\n filter(Class != 'c__') %>%\n .$Class %>% unique %>% length %>% print", "[1] 63\n" ], [ "metadata_f_tax %>%\n filter(Genus != 'g__') %>%\n .$Genus %>% unique %>% length %>% print", "[1] 764\n" ] ], [ [ "# sessionInfo", "_____no_output_____" ] ], [ [ "sessionInfo()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
e7d0af6236cb23bbd90d63b3233291974669825c
15,167
ipynb
Jupyter Notebook
Problem 06.ipynb
flothesof/advent_of_code2018
38472701620d1700bdc135b3960b741401e39864
[ "MIT" ]
null
null
null
Problem 06.ipynb
flothesof/advent_of_code2018
38472701620d1700bdc135b3960b741401e39864
[ "MIT" ]
null
null
null
Problem 06.ipynb
flothesof/advent_of_code2018
38472701620d1700bdc135b3960b741401e39864
[ "MIT" ]
null
null
null
23.120427
196
0.413793
[ [ [ "This one is a matrix problem: let's use NumPy!", "_____no_output_____" ], [ "# Sample input part 1 ", "_____no_output_____" ], [ "Let's start with the sample.", "_____no_output_____" ] ], [ [ "sample = \"\"\"1, 1\n1, 6\n8, 3\n3, 4\n5, 5\n8, 9\"\"\"", "_____no_output_____" ], [ "sources = [tuple(map(int, line.split(', '))) for line in sample.split('\\n')]", "_____no_output_____" ], [ "sources", "_____no_output_____" ] ], [ [ "Let's write a function that returns a numpy grid of closest distances given a source and a grid size.", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef manhattan(source, grid_size):\n n, m = grid_size\n grid = np.empty(grid_size, dtype=int)\n X, Y = np.meshgrid(np.arange(n), np.arange(m))\n return np.abs(X - source[0]) + np.abs(Y - source[1])", "_____no_output_____" ], [ "manhattan(sources[0], (10, 10))", "_____no_output_____" ], [ "manhattan(sources[1], (10, 10))", "_____no_output_____" ], [ "a = manhattan(sources[0], (10, 10))\nb = manhattan(sources[1], (10, 10))\nnp.where(a < b, np.ones_like(a), np.ones_like(a) * 2) ", "_____no_output_____" ], [ "grid_size = (10, 10)\nnearest = np.ones(grid_size) * -1\nmin_dist = np.ones(grid_size, dtype=int) * 10000\nfor index, source in enumerate(sources):\n dist = manhattan(source, grid_size)\n nearest[dist == min_dist] = np.nan\n nearest = np.where(dist < min_dist, np.zeros_like(nearest) + index, nearest)\n min_dist = np.where(dist < min_dist, dist, min_dist)", "_____no_output_____" ], [ "nearest", "_____no_output_____" ] ], [ [ "If we set the infinite groups to zero that would be the 0, 2, 1 and 5 we would get:", "_____no_output_____" ] ], [ [ "infinites = [0, 2, 1, 5]\nfor infinite in infinites:\n nearest[nearest == infinite] = np.nan", "_____no_output_____" ], [ "for i in (set(range(len(sources))) - set(infinites)):\n print(i, np.nansum(nearest == i))", "3 9\n4 17\n" ] ], [ [ "# Part 1 for real ", "_____no_output_____" ] ], [ [ "sources = [tuple(map(int, line.split(', '))) for line in open('input06.txt').readlines()]", "_____no_output_____" ], [ "np.array(sources).min(axis=0)", "_____no_output_____" ], [ "np.array(sources).max(axis=0)", "_____no_output_____" ], [ "grid_size = (360, 360)\nnearest = np.ones(grid_size) * -1\nmin_dist = np.ones(grid_size, dtype=int) * 10000\nfor index, source in enumerate(sources):\n dist = manhattan(source, grid_size)\n nearest[dist == min_dist] = np.nan\n nearest = np.where(dist < min_dist, np.zeros_like(nearest) + index, nearest)\n min_dist = np.where(dist < min_dist, dist, min_dist)", "_____no_output_____" ], [ "nearest", "_____no_output_____" ] ], [ [ "The funny thing here is that this view of the matrix directly allows us to set the four corners as the infinites since it's the nearest neighbor to each corner that will be the infinite one!", "_____no_output_____" ] ], [ [ "infinites = [0, 9, 28, 37]\nfor infinite in infinites:\n nearest[nearest == infinite] = np.nan", "_____no_output_____" ], [ "for i in (set(range(len(sources))) - set(infinites)):\n print(i, np.nansum(nearest == i))", "1 1086\n2 2705\n3 2730\n4 1780\n5 871\n6 3873\n7 2996\n8 2303\n10 1015\n11 2972\n12 3982\n13 2310\n14 4114\n15 3428\n16 2392\n17 1759\n18 4732\n19 4491\n20 2214\n21 527\n22 3879\n23 735\n24 1352\n25 1627\n26 813\n27 1756\n29 946\n30 3532\n31 3117\n32 729\n33 2662\n34 2296\n35 5187\n36 318\n38 1062\n39 2902\n40 3255\n41 1632\n42 120\n43 2931\n44 1494\n45 2636\n46 1437\n47 356\n48 4050\n49 4493\n" ], [ "max(np.nansum(nearest == i) for i in (set(range(len(sources))) - set(infinites)))", "_____no_output_____" ] ], [ [ "# Part 2 sample ", "_____no_output_____" ] ], [ [ "sources = [tuple(map(int, line.split(', '))) for line in sample.split('\\n')]", "_____no_output_____" ], [ "X, Y = np.meshgrid(np.arange(10), np.arange(10))", "_____no_output_____" ], [ "total = np.zeros((10, 10))\nfor source in sources:\n total += np.abs(X - source[0]) + np.abs(Y - source[1])", "_____no_output_____" ], [ "np.where(total < 32, 1, 0)", "_____no_output_____" ], [ "np.sum(np.where(total < 32, 1, 0))", "_____no_output_____" ] ], [ [ "# Part 2 for real ", "_____no_output_____" ] ], [ [ "sources = [tuple(map(int, line.split(', '))) for line in open('input06.txt').readlines()]", "_____no_output_____" ], [ "grid_size = (360, 360)", "_____no_output_____" ], [ "X, Y = np.meshgrid(np.arange(360), np.arange(360))", "_____no_output_____" ], [ "total = np.zeros(grid_size)\nfor source in sources:\n total += np.abs(X - source[0]) + np.abs(Y - source[1])", "_____no_output_____" ], [ "np.sum(np.where(total < 10000, 1, 0))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e7d0c098201b394dd13958b63be84f98896278a4
142,772
ipynb
Jupyter Notebook
Chinook_1.ipynb
alinasr/business_questions_SQL_chinook_database
c81ab783ae17cfe26ac8004e83dbf883e473bcb3
[ "MIT" ]
null
null
null
Chinook_1.ipynb
alinasr/business_questions_SQL_chinook_database
c81ab783ae17cfe26ac8004e83dbf883e473bcb3
[ "MIT" ]
null
null
null
Chinook_1.ipynb
alinasr/business_questions_SQL_chinook_database
c81ab783ae17cfe26ac8004e83dbf883e473bcb3
[ "MIT" ]
null
null
null
138.748299
80,600
0.836866
[ [ [ "## Answering Business Questions using SQL for Chinook database\n\nIn this project we are going to explore and analyze chinook data base. we are going to use modified version of data base which we included in project directory. The Chinook database contains information about a fictional digital music shop - kind of like a mini-iTunes store\n\nhere is some questions that we are going to answer in this project\n\n1- what is the best genre \n2- employee performance \n3- best countries by sale \n4- how many purchases are whole album purchasing vs individual tracks \n\nFirst we are going to import essential libraries and connect to data base also in this cell we defined our necessary functions which we are going to use in our project. at the end of the cell we showed all tables and their names in this database in a table", "_____no_output_____" ] ], [ [ "import sqlite3\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\n%matplotlib inline\n\ndef run_query(q):\n with sqlite3.connect(\"chinook.db\") as conn:\n return pd.read_sql(q, conn)\n \n \ndef run_command(c):\n with sqlite3.connect(\"chinook.db\") as conn:\n conn.isolation_level = None\n conn.execute(c)\n \n\ndef show_tables():\n q ='''\n SELECT\n name,\n type\nFROM sqlite_master\nWHERE type IN (\"table\",\"view\");\n'''\n return run_query(q)\n \nshow_tables()", "_____no_output_____" ] ], [ [ "## Best Genres\n\nthe chinook signed a contract and they want to see which genre and artist is worse to by and invest for their online shop. in table blow we can see each artists and genre in this contract.\n\n\n| Artist Name| Genre | \n| --- | --- | \n| Regal | Hip-Hop |\n| --- | --- | \n| Red Tone\t | Punk |\n| --- | --- | \n| Meteor and the Girls\t | Pop |\n| --- | --- | \n| Slim Jim Bites | Blues |\n\nThen we are going to find the best sold genres in USA and recommend first three one which are in contract our business managers.", "_____no_output_____" ] ], [ [ "best_sold_genre = '''\n\nWITH USA_purches AS\n (SELECT il.*, c.country\n FROM customer c\n INNER JOIN invoice i ON i.customer_id = c.customer_id\n INNER JOIN invoice_line il ON il.invoice_id = i.invoice_id\n WHERE c.country = \"USA\"\n )\n\nSELECT\n g.name genre_name,\n SUM(up.quantity) total_sold,\n CAST(SUM(up.quantity) as float) / CAST((SELECT COUNT(quantity) FROM USA_purches) as float) sold_percentage\nFROM USA_purches up\nLEFT JOIN track t ON t.track_id = up.track_id\nLEFT JOIN genre g on g.genre_id = t.genre_id\nGROUP BY 1\nORDER by 2 DESC\nLIMIT 10\n\n'''\nrun_query(best_sold_genre)", "_____no_output_____" ], [ "top_genre_usa = run_query(best_sold_genre)\ntop_genre_usa.set_index(\"genre_name\",drop=True,inplace=True)\nax = top_genre_usa[\"total_sold\"].plot(kind='barh', title =\"Top genres in USA\", \n figsize=(8, 6), legend=False, fontsize=12, \n width = 0.75)\n\nax.annotate(\"test labele annoate\", (600, - 0.15))", "_____no_output_____" ] ], [ [ "As you can see in the table and figure above the best genre in with high sold tracks is ROCK then Alternative & Punk following with metal. But based on our list to choose first three albums we first need to select Alternative & Punk then Blues and then Pop. So we need to purchase for the store from artists :\n\n1- `Red Tone` \n2- `Slim Jim Bites` \n3- `Meteor and the Girls` \n\n## Employee performance\n\nin this section we are going to analyze each sales support agent sales performance as in the query and the code blow", "_____no_output_____" ] ], [ [ "employee_performance = '''\nSELECT \n e.first_name || \" \" || e.last_name employee_name,\n e.birthdate,\n e.hire_Date,\n SUM(i.total) total_dollar\nFROM employee e\nLEFT JOIN customer c ON c.support_rep_id = e.employee_id\nLEFT JOIN invoice i ON i.customer_id = c.customer_id\nWHERE e.title = \"Sales Support Agent\"\nGROUP BY 1\nORDER BY 4 DESC\n\n'''\n\nrun_query(employee_performance)", "_____no_output_____" ], [ "employee_dollar = run_query(employee_performance)\nemployee_dollar.set_index(\"employee_name\", inplace = True, drop = True)\nemployee_dollar[\"total_dollar\"].plot.barh(title = \"Employee performance for Sales Support Agents\",\n colormap=plt.cm.Accent)", "_____no_output_____" ] ], [ [ "As we can see in the table and figure above there are only a bit differences between total amount of dollar for each employee and if you look at their hire data the oldest one has most total dollar amount. there is no relation between their age and their performance in this particular analysis.\n\n## Sales by country\n\nwe are going to analyze sales by country like number of customer total sale by dollar for each country in the query blow", "_____no_output_____" ] ], [ [ "country_customer_sales = '''\n\nWITH customer_purches AS\n (\n SELECT \n c.country,\n c.customer_id,\n SUM(i.total) total,\n COUNT(distinct invoice_id) number_of_order\n FROM customer c\n LEFT JOIN invoice i ON i.customer_id = c.customer_id\n GROUP BY 2\n ),\n\n\n\n country_customer AS\n (\n SELECT \n SUM(number_of_order) number_of_order,\n SUM(total) total,\n SUM(total_customer) total_customer,\n CASE\n WHEN total_customer = 1 THEN \"other\"\n ELSE country\n END AS country \n FROM (\n SELECT country,\n COUNT(customer_id) total_customer,\n SUM(total) total,\n SUM(number_of_order) number_of_order\n FROM customer_purches\n GROUP by 1\n ORDER by 2 DESC \n ) \n GROUP by 4\n ORDER BY 3 DESC\n )\n \nSELECT \n country, \n total_customer,\n total total_dollar,\n CAST(total as float) / CAST(total_customer as float) customer_lifetime_value,\n number_of_order,\n CAST(total as float) / CAST(number_of_order as float) Average_order \nFROM (\n SELECT \n cc.*,\n CASE\n WHEN country = \"other\" THEN 1\n ELSE 0\n END AS sort\n FROM country_customer cc\n)\n\nORDER BY sort\n\n\n\n'''\n\n\n\n\n\nrun_query(country_customer_sales)", "_____no_output_____" ] ], [ [ "Also here in the code blow we are going to plot the results in the table above to have better understanding ", "_____no_output_____" ] ], [ [ "country_sales = run_query(country_customer_sales)\ncountry_sales.set_index(\"country\", drop=True, inplace=True)\ncolors = [plt.cm.tab20(i) for i in np.linspace(0, 1, country_sales.shape[0])]\n\n\nfig = plt.figure(figsize=(18, 14))\nfig.subplots_adjust(hspace=.5, wspace=.5)\n\n\n\nax1 = fig.add_subplot(2, 2, 1)\ncountry_sales_rename = country_sales[\"total_customer\"].copy().rename('')\ncountry_sales_rename.plot.pie(\n startangle=-90,\n counterclock=False,\n title=\"Number of customers for each country\",\n colormap=plt.cm.tab20,\n ax =ax1,\n fontsize = 14\n)\n\n\nax2 = fig.add_subplot(2, 2, 2)\n\naverage_order = country_sales[\"Average_order\"]\naverage_order.index.name = ''\ndifretional = ((average_order * 100) / average_order.mean()) -100\n\ndifretional.plot.barh(ax = ax2,\n title = \"Average_order differences from mean\",\n color = colors,\n fontsize = 14,\n width = 0.8\n )\n\nax3 = fig.add_subplot(2, 2, 3)\n\ncustomer_lifetime_value = country_sales[\"customer_lifetime_value\"]\ncustomer_lifetime_value.index.name = ''\ncustomer_lifetime_value.plot.bar(ax = ax3,\n title = \"customer lifetime value\",\n color = colors,\n fontsize = 14,\n width = 0.8\n )\n\nax4 = fig.add_subplot(2, 2, 4)\n\ntotal_dollar = country_sales[\"total_dollar\"]\ntotal_dollar.index.name = ''\ntotal_dollar.plot.bar(ax = ax4,\n title = \"Total salwes for each country $\",\n color = colors,\n fontsize = 14,\n width = 0.8\n )\n\n", "_____no_output_____" ] ], [ [ "as you can see in the plots and tables we have results very clearly for each country. most of customer and sales belong to first USA then Canada. But with looking at the `Average_order differences from mean` plot we could that there are opportunities in countries like `Czech Republic`, `United Kingdom` and `India`. but the data for these countries are very low 2 or 3 customer so we need more data in theses country if we are going to invest in them.\n\n## Album purchases or not\n\nThe chinook allows to customer purchases whole album and purchase a collection of one or more individual tracks.\n\nbut recently they want to make some changes in their purchasing policy and it is to purchase only the most popular tracks from each album from record companies, instead of purchasing every track from an album.\n\nwe need to find out how many of purchases are individual tracks vs whole albums, so the management could make decisions based on.\n\nthe query blow actually doing this. in this query for each invoice_id we compare if it is whole album purchases or not and we showed the result in the table", "_____no_output_____" ] ], [ [ "album_purches_or_not = '''\n\nWITH track_album AS \n (\n SELECT track_id,\n album_id\n FROM track \n ),\n first_track_album AS\n (SELECT il.invoice_id,\n il.track_id, \n t.album_id\n FROM invoice_line il\n LEFT JOIN track t ON t.track_id = il.track_id\n GROUP BY 1\n )\n \n \nSELECT\n album_purch,\n COUNT(invoice_id) invoice_number\n \nFROM\n ( \n SELECT\n fta.invoice_id,\n CASE\n WHEN \n (\n SELECT ta.track_id FROM track_album ta\n WHERE ta.album_id = fta.album_id \n \n \n EXCEPT\n \n SELECT il.track_id FROM invoice_line il\n WHERE fta.invoice_id = il.invoice_id\n \n ) IS NULL\n \n AND\n (\n SELECT il.track_id FROM invoice_line il\n WHERE fta.invoice_id = il.invoice_id\n \n \n EXCEPT\n \n SELECT ta.track_id FROM track_album ta\n WHERE ta.album_id = fta.album_id \n \n \n ) IS NULL\n THEN \"YES\"\n ELSE \"NO\"\n END AS album_purch\n FROM first_track_album fta\n )\n \n \nGROUP BY album_purch\n'''\n\n\nrun_query(album_purches_or_not)", "_____no_output_____" ] ], [ [ "as you can see in the table above most of the purchases are not whole album.\n\n\n## Conclusion\n\nin this project we explored and analyzed the fictional on-line music shop data to answer some business questions and we acquired these results\n\n1- the best genres that we need to include in our new deal is punk blues and pop and their corresponding artists in contract\n\n2- the employee performances in sales section are same for three of them and the differences is only because hiring date.\n\n3- the best countries in total customers and sale was USA then Canada. and there is business opportunities in `Czech Republic`, `United Kingdom` and `India` but we need more the data before we start.\n\n4- most of purchases are individual tracks vs in compare to whole album purchasing ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7d0d30c2a2e4857496dbc79ae67d1251aa70bbb
37,542
ipynb
Jupyter Notebook
chapter_9_NLP/NLP_PySpark.ipynb
xhqing/machine-learning-with-pyspark
44cf40b9627589f9d0144b914581b752aaa94d00
[ "MIT" ]
null
null
null
chapter_9_NLP/NLP_PySpark.ipynb
xhqing/machine-learning-with-pyspark
44cf40b9627589f9d0144b914581b752aaa94d00
[ "MIT" ]
null
null
null
chapter_9_NLP/NLP_PySpark.ipynb
xhqing/machine-learning-with-pyspark
44cf40b9627589f9d0144b914581b752aaa94d00
[ "MIT" ]
1
2021-06-22T10:18:14.000Z
2021-06-22T10:18:14.000Z
31.258951
125
0.417346
[ [ [ "#create spark session\nfrom pyspark.sql import SparkSession\n\nspark=SparkSession.builder.appName('nlp').getOrCreate()", "_____no_output_____" ], [ "df=spark.createDataFrame([(1,'I really liked this movie'),\n (2,'I would recommend this movie to my friends'),\n (3,'movie was alright but acting was horrible'),\n (4,'I am never watching that movie ever again')],\n ['user_id','review'])", "_____no_output_____" ], [ "df.show(5,False)", "+-------+------------------------------------------+\n|user_id|review |\n+-------+------------------------------------------+\n|1 |I really liked this movie |\n|2 |I would recommend this movie to my friends|\n|3 |movie was alright but acting was horrible |\n|4 |I am never watching that movie ever again |\n+-------+------------------------------------------+\n\n" ], [ "# Tokenization", "_____no_output_____" ], [ "from pyspark.ml.feature import Tokenizer", "_____no_output_____" ], [ "tokenization=Tokenizer(inputCol='review',outputCol='tokens')", "_____no_output_____" ], [ "tokenized_df=tokenization.transform(df)", "_____no_output_____" ], [ "tokenized_df.show(4,False)", "+-------+------------------------------------------+---------------------------------------------------+\n|user_id|review |tokens |\n+-------+------------------------------------------+---------------------------------------------------+\n|1 |I really liked this movie |[i, really, liked, this, movie] |\n|2 |I would recommend this movie to my friends|[i, would, recommend, this, movie, to, my, friends]|\n|3 |movie was alright but acting was horrible |[movie, was, alright, but, acting, was, horrible] |\n|4 |I am never watching that movie ever again |[i, am, never, watching, that, movie, ever, again] |\n+-------+------------------------------------------+---------------------------------------------------+\n\n" ], [ "# stopwords removal ", "_____no_output_____" ], [ "from pyspark.ml.feature import StopWordsRemover", "_____no_output_____" ], [ "stopword_removal=StopWordsRemover(inputCol='tokens',outputCol='refined_tokens')", "_____no_output_____" ], [ "refined_df=stopword_removal.transform(tokenized_df)", "_____no_output_____" ], [ "refined_df.select(['user_id','tokens','refined_tokens']).show(10,False)", "+-------+---------------------------------------------------+----------------------------------+\n|user_id|tokens |refined_tokens |\n+-------+---------------------------------------------------+----------------------------------+\n|1 |[i, really, liked, this, movie] |[really, liked, movie] |\n|2 |[i, would, recommend, this, movie, to, my, friends]|[recommend, movie, friends] |\n|3 |[movie, was, alright, but, acting, was, horrible] |[movie, alright, acting, horrible]|\n|4 |[i, am, never, watching, that, movie, ever, again] |[never, watching, movie, ever] |\n+-------+---------------------------------------------------+----------------------------------+\n\n" ], [ "# Count Vectorizer", "_____no_output_____" ], [ "from pyspark.ml.feature import CountVectorizer", "_____no_output_____" ], [ "count_vec=CountVectorizer(inputCol='refined_tokens',outputCol='features')", "_____no_output_____" ], [ "cv_df=count_vec.fit(refined_df).transform(refined_df)", "_____no_output_____" ], [ "cv_df.select(['user_id','refined_tokens','features']).show(4,False)", "+-------+----------------------------------+--------------------------------+\n|user_id|refined_tokens |features |\n+-------+----------------------------------+--------------------------------+\n|1 |[really, liked, movie] |(11,[0,4,9],[1.0,1.0,1.0]) |\n|2 |[recommend, movie, friends] |(11,[0,6,10],[1.0,1.0,1.0]) |\n|3 |[movie, alright, acting, horrible]|(11,[0,2,3,5],[1.0,1.0,1.0,1.0])|\n|4 |[never, watching, movie, ever] |(11,[0,1,7,8],[1.0,1.0,1.0,1.0])|\n+-------+----------------------------------+--------------------------------+\n\n" ], [ "count_vec.fit(refined_df).vocabulary", "_____no_output_____" ], [ "#Tf-idf", "_____no_output_____" ], [ "from pyspark.ml.feature import HashingTF,IDF", "_____no_output_____" ], [ "hashing_vec=HashingTF(inputCol='refined_tokens',outputCol='tf_features')", "_____no_output_____" ], [ "hashing_df=hashing_vec.transform(refined_df)", "_____no_output_____" ], [ "hashing_df.select(['user_id','refined_tokens','tf_features']).show(4,False)", "+-------+----------------------------------+-------------------------------------------------------+\n|user_id|refined_tokens |tf_features |\n+-------+----------------------------------+-------------------------------------------------------+\n|1 |[really, liked, movie] |(262144,[14,32675,155321],[1.0,1.0,1.0]) |\n|2 |[recommend, movie, friends] |(262144,[129613,155321,222394],[1.0,1.0,1.0]) |\n|3 |[movie, alright, acting, horrible]|(262144,[80824,155321,236263,240286],[1.0,1.0,1.0,1.0])|\n|4 |[never, watching, movie, ever] |(262144,[63139,155321,203802,245806],[1.0,1.0,1.0,1.0])|\n+-------+----------------------------------+-------------------------------------------------------+\n\n" ], [ "tf_idf_vec=IDF(inputCol='tf_features',outputCol='tf_idf_features')", "_____no_output_____" ], [ "tf_idf_df=tf_idf_vec.fit(hashing_df).transform(hashing_df)", "_____no_output_____" ], [ "tf_idf_df.select(['user_id','tf_idf_features']).show(4,False)", "+-------+----------------------------------------------------------------------------------------------------+\n|user_id|tf_idf_features |\n+-------+----------------------------------------------------------------------------------------------------+\n|1 |(262144,[14,32675,155321],[0.9162907318741551,0.9162907318741551,0.0]) |\n|2 |(262144,[129613,155321,222394],[0.9162907318741551,0.0,0.9162907318741551]) |\n|3 |(262144,[80824,155321,236263,240286],[0.9162907318741551,0.0,0.9162907318741551,0.9162907318741551])|\n|4 |(262144,[63139,155321,203802,245806],[0.9162907318741551,0.0,0.9162907318741551,0.9162907318741551])|\n+-------+----------------------------------------------------------------------------------------------------+\n\n" ], [ "# Classification ", "_____no_output_____" ], [ "text_df=spark.read.csv('Movie_reviews.csv',inferSchema=True,header=True,sep=',')", "_____no_output_____" ], [ "text_df.printSchema()", "root\n |-- Review: string (nullable = true)\n |-- Sentiment: string (nullable = true)\n\n" ], [ "text_df.count()", "_____no_output_____" ], [ "from pyspark.sql.functions import rand ", "_____no_output_____" ], [ "text_df.orderBy(rand()).show(10,False)", "+------------------------------------------------------------------------+---------+\n|Review |Sentiment|\n+------------------------------------------------------------------------+---------+\n|My dad's being stupid about brokeback mountain... |0 |\n|Ok brokeback mountain is such a horrible movie. |0 |\n|I love Brokeback Mountain. |1 |\n|He's like,'YEAH I GOT ACNE AND I LOVE BROKEBACK MOUNTAIN '.. |1 |\n|Harry Potter and the Sorcerer's Stone is great but I had forgotten what |1 |\n|\"Anyway, thats why I love \"\" Brokeback Mountain.\" |1 |\n|Which is why i said silent hill turned into reality coz i was hella like|1 |\n|Apparently the Da Vinci code sucks. |0 |\n|I am going to start reading the Harry Potter series again because that i|1 |\n|So as felicia's mom is cleaning the table, felicia grabs my keys and we |1 |\n+------------------------------------------------------------------------+---------+\nonly showing top 10 rows\n\n" ], [ "text_df=text_df.filter(((text_df.Sentiment =='1') | (text_df.Sentiment =='0')))", "_____no_output_____" ], [ "text_df.count()", "_____no_output_____" ], [ "text_df.groupBy('Sentiment').count().show()", "+---------+-----+\n|Sentiment|count|\n+---------+-----+\n| 0| 3081|\n| 1| 3909|\n+---------+-----+\n\n" ], [ "text_df.printSchema()", "root\n |-- Review: string (nullable = true)\n |-- Sentiment: string (nullable = true)\n\n" ], [ "text_df = text_df.withColumn(\"Label\", text_df.Sentiment.cast('float')).drop('Sentiment')", "_____no_output_____" ], [ "text_df.orderBy(rand()).show(10,False)", "+------------------------------------------------------------------------+-----+\n|Review |Label|\n+------------------------------------------------------------------------+-----+\n|I hate Harry Potter. |0.0 |\n|I am the only person in the world who thought Brokeback Mountain sucked.|0.0 |\n|Not because I hate Harry Potter, but because I am the type of person tha|0.0 |\n|Which is why i said silent hill turned into reality coz i was hella like|1.0 |\n|The Da Vinci Code sucked big time. |0.0 |\n|The Da Vinci Code sucked big time. |0.0 |\n|Ok brokeback mountain is such a horrible movie. |0.0 |\n|A / N: This is a gift for sivullinen who requested: ” I'd love some NC |1.0 |\n|, she helped me bobbypin my insanely cool hat to my head, and she laughe|0.0 |\n|I love the Da Vinci Code. |1.0 |\n+------------------------------------------------------------------------+-----+\nonly showing top 10 rows\n\n" ], [ "text_df.groupBy('label').count().show()", "+-----+-----+\n|label|count|\n+-----+-----+\n| 1.0| 3909|\n| 0.0| 3081|\n+-----+-----+\n\n" ], [ "# Add length to the dataframe\nfrom pyspark.sql.functions import length", "_____no_output_____" ], [ "text_df=text_df.withColumn('length',length(text_df['Review']))", "_____no_output_____" ], [ "text_df.orderBy(rand()).show(10,False)", "+------------------------------------------------------------------------+-----+------+\n|Review |Label|length|\n+------------------------------------------------------------------------+-----+------+\n|I have to say that I loved Brokeback Mountain. |1.0 |46 |\n|The Da Vinci Code is awesome!! |1.0 |30 |\n|Oh, and Brokeback Mountain was a terrible movie. |0.0 |48 |\n|man i loved brokeback mountain! |1.0 |31 |\n|Even though Brokeback Mountain is one of the most depressing movies, eve|0.0 |72 |\n|da vinci code sucks... |0.0 |22 |\n|Combining the opinion / review from Gary and Gin Zen, The Da Vinci Code |0.0 |71 |\n|da vinci code sucks... |0.0 |22 |\n|Finally feel up to making the long ass drive out to the Haunt tonight...|1.0 |72 |\n|the last stand and Mission Impossible 3 both were awesome movies. |1.0 |65 |\n+------------------------------------------------------------------------+-----+------+\nonly showing top 10 rows\n\n" ], [ "text_df.groupBy('Label').agg({'Length':'mean'}).show()", "+-----+-----------------+\n|Label| avg(Length)|\n+-----+-----------------+\n| 1.0|47.61882834484523|\n| 0.0|50.95845504706264|\n+-----+-----------------+\n\n" ], [ "# Data Cleaning", "_____no_output_____" ], [ "tokenization=Tokenizer(inputCol='Review',outputCol='tokens')", "_____no_output_____" ], [ "tokenized_df=tokenization.transform(text_df)", "_____no_output_____" ], [ "tokenized_df.show()", "+--------------------+-----+------+--------------------+\n| Review|Label|length| tokens|\n+--------------------+-----+------+--------------------+\n|The Da Vinci Code...| 1.0| 39|[the, da, vinci, ...|\n|this was the firs...| 1.0| 72|[this, was, the, ...|\n|i liked the Da Vi...| 1.0| 32|[i, liked, the, d...|\n|i liked the Da Vi...| 1.0| 32|[i, liked, the, d...|\n|I liked the Da Vi...| 1.0| 72|[i, liked, the, d...|\n|that's not even a...| 1.0| 72|[that's, not, eve...|\n|I loved the Da Vi...| 1.0| 72|[i, loved, the, d...|\n|i thought da vinc...| 1.0| 57|[i, thought, da, ...|\n|The Da Vinci Code...| 1.0| 45|[the, da, vinci, ...|\n|I thought the Da ...| 1.0| 51|[i, thought, the,...|\n|The Da Vinci Code...| 1.0| 68|[the, da, vinci, ...|\n|The Da Vinci Code...| 1.0| 62|[the, da, vinci, ...|\n|then I turn on th...| 1.0| 66|[then, i, turn, o...|\n|The Da Vinci Code...| 1.0| 34|[the, da, vinci, ...|\n|i love da vinci c...| 1.0| 24|[i, love, da, vin...|\n|i loved da vinci ...| 1.0| 23|[i, loved, da, vi...|\n|TO NIGHT:: THE DA...| 1.0| 52|[to, night::, the...|\n|THE DA VINCI CODE...| 1.0| 40|[the, da, vinci, ...|\n|Thing is, I enjoy...| 1.0| 38|[thing, is,, i, e...|\n|very da vinci cod...| 1.0| 38|[very, da, vinci,...|\n+--------------------+-----+------+--------------------+\nonly showing top 20 rows\n\n" ], [ "stopword_removal=StopWordsRemover(inputCol='tokens',outputCol='refined_tokens')", "_____no_output_____" ], [ "refined_text_df=stopword_removal.transform(tokenized_df)", "_____no_output_____" ], [ "refined_text_df.show()", "+--------------------+-----+------+--------------------+--------------------+\n| Review|Label|length| tokens| refined_tokens|\n+--------------------+-----+------+--------------------+--------------------+\n|The Da Vinci Code...| 1.0| 39|[the, da, vinci, ...|[da, vinci, code,...|\n|this was the firs...| 1.0| 72|[this, was, the, ...|[first, clive, cu...|\n|i liked the Da Vi...| 1.0| 32|[i, liked, the, d...|[liked, da, vinci...|\n|i liked the Da Vi...| 1.0| 32|[i, liked, the, d...|[liked, da, vinci...|\n|I liked the Da Vi...| 1.0| 72|[i, liked, the, d...|[liked, da, vinci...|\n|that's not even a...| 1.0| 72|[that's, not, eve...|[even, exaggerati...|\n|I loved the Da Vi...| 1.0| 72|[i, loved, the, d...|[loved, da, vinci...|\n|i thought da vinc...| 1.0| 57|[i, thought, da, ...|[thought, da, vin...|\n|The Da Vinci Code...| 1.0| 45|[the, da, vinci, ...|[da, vinci, code,...|\n|I thought the Da ...| 1.0| 51|[i, thought, the,...|[thought, da, vin...|\n|The Da Vinci Code...| 1.0| 68|[the, da, vinci, ...|[da, vinci, code,...|\n|The Da Vinci Code...| 1.0| 62|[the, da, vinci, ...|[da, vinci, code,...|\n|then I turn on th...| 1.0| 66|[then, i, turn, o...|[turn, light, rad...|\n|The Da Vinci Code...| 1.0| 34|[the, da, vinci, ...|[da, vinci, code,...|\n|i love da vinci c...| 1.0| 24|[i, love, da, vin...|[love, da, vinci,...|\n|i loved da vinci ...| 1.0| 23|[i, loved, da, vi...|[loved, da, vinci...|\n|TO NIGHT:: THE DA...| 1.0| 52|[to, night::, the...|[night::, da, vin...|\n|THE DA VINCI CODE...| 1.0| 40|[the, da, vinci, ...|[da, vinci, code,...|\n|Thing is, I enjoy...| 1.0| 38|[thing, is,, i, e...|[thing, is,, enjo...|\n|very da vinci cod...| 1.0| 38|[very, da, vinci,...|[da, vinci, code,...|\n+--------------------+-----+------+--------------------+--------------------+\nonly showing top 20 rows\n\n" ], [ "from pyspark.sql.functions import udf\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.functions import *", "_____no_output_____" ], [ "len_udf = udf(lambda s: len(s), IntegerType())\n\nrefined_text_df = refined_text_df.withColumn(\"token_count\", len_udf(col('refined_tokens')))\n", "_____no_output_____" ], [ "refined_text_df.orderBy(rand()).show(10)", "+--------------------+-----+------+--------------------+--------------------+-----------+\n| Review|Label|length| tokens| refined_tokens|token_count|\n+--------------------+-----+------+--------------------+--------------------+-----------+\n|da vinci code was...| 1.0| 37|[da, vinci, code,...|[da, vinci, code,...| 5|\n|Not because I hat...| 0.0| 72|[not, because, i,...|[hate, harry, pot...| 6|\n|I love Harry Potter.| 1.0| 20|[i, love, harry, ...|[love, harry, pot...| 3|\n|and I love Da Vin...| 1.0| 71|[and, i, love, da...|[love, da, vinci,...| 7|\n|Da Vinci Code = U...| 0.0| 72|[da, vinci, code,...|[da, vinci, code,...| 15|\n|Brokeback Mountai...| 1.0| 34|[brokeback, mount...|[brokeback, mount...| 3|\n|I think I hate Ha...| 0.0| 72|[i, think, i, hat...|[think, hate, har...| 9|\n|Harry Potter is b...| 1.0| 26|[harry, potter, i...|[harry, potter, b...| 3|\n|The Da Vinci Code...| 1.0| 30|[the, da, vinci, ...|[da, vinci, code,...| 4|\n|Combining the opi...| 0.0| 71|[combining, the, ...|[combining, opini...| 10|\n+--------------------+-----+------+--------------------+--------------------+-----------+\nonly showing top 10 rows\n\n" ], [ "count_vec=CountVectorizer(inputCol='refined_tokens',outputCol='features')", "_____no_output_____" ], [ "cv_text_df=count_vec.fit(refined_text_df).transform(refined_text_df)", "_____no_output_____" ], [ "cv_text_df.select(['refined_tokens','token_count','features','Label']).show(10)", "+--------------------+-----------+--------------------+-----+\n| refined_tokens|token_count| features|Label|\n+--------------------+-----------+--------------------+-----+\n|[da, vinci, code,...| 5|(2302,[0,1,4,43,2...| 1.0|\n|[first, clive, cu...| 9|(2302,[11,51,229,...| 1.0|\n|[liked, da, vinci...| 5|(2302,[0,1,4,53,3...| 1.0|\n|[liked, da, vinci...| 5|(2302,[0,1,4,53,3...| 1.0|\n|[liked, da, vinci...| 8|(2302,[0,1,4,53,6...| 1.0|\n|[even, exaggerati...| 6|(2302,[46,229,271...| 1.0|\n|[loved, da, vinci...| 8|(2302,[0,1,22,30,...| 1.0|\n|[thought, da, vin...| 7|(2302,[0,1,4,228,...| 1.0|\n|[da, vinci, code,...| 6|(2302,[0,1,4,33,2...| 1.0|\n|[thought, da, vin...| 7|(2302,[0,1,4,223,...| 1.0|\n+--------------------+-----------+--------------------+-----+\nonly showing top 10 rows\n\n" ], [ "#select data for building model\nmodel_text_df=cv_text_df.select(['features','token_count','Label'])", "_____no_output_____" ], [ "from pyspark.ml.feature import VectorAssembler", "_____no_output_____" ], [ "df_assembler = VectorAssembler(inputCols=['features','token_count'],outputCol='features_vec')\nmodel_text_df = df_assembler.transform(model_text_df)", "_____no_output_____" ], [ "model_text_df.printSchema()", "root\n |-- features: vector (nullable = true)\n |-- token_count: integer (nullable = true)\n |-- Label: float (nullable = true)\n |-- features_vec: vector (nullable = true)\n\n" ], [ "from pyspark.ml.classification import LogisticRegression", "_____no_output_____" ], [ "#split the data \ntraining_df,test_df=model_text_df.randomSplit([0.75,0.25])", "_____no_output_____" ], [ "training_df.groupBy('Label').count().show()", "+-----+-----+\n|Label|count|\n+-----+-----+\n| 1.0| 2979|\n| 0.0| 2335|\n+-----+-----+\n\n" ], [ "test_df.groupBy('Label').count().show()", "+-----+-----+\n|Label|count|\n+-----+-----+\n| 1.0| 930|\n| 0.0| 746|\n+-----+-----+\n\n" ], [ "log_reg=LogisticRegression(featuresCol='features_vec',labelCol='Label').fit(training_df)", "_____no_output_____" ], [ "results=log_reg.evaluate(test_df).predictions", "_____no_output_____" ], [ "results.show()", "+--------------------+-----------+-----+--------------------+--------------------+--------------------+----------+\n| features|token_count|Label| features_vec| rawPrediction| probability|prediction|\n+--------------------+-----------+-----+--------------------+--------------------+--------------------+----------+\n|(2302,[0,1,4,5,64...| 6| 1.0|(2303,[0,1,4,5,64...|[-17.272830422692...|[3.15141100218827...| 1.0|\n|(2302,[0,1,4,5,89...| 9| 1.0|(2303,[0,1,4,5,89...|[-5.3071943841355...|[0.00493137238287...| 1.0|\n|(2302,[0,1,4,5,30...| 5| 1.0|(2303,[0,1,4,5,30...|[-20.050569575912...|[1.95951356060452...| 1.0|\n|(2302,[0,1,4,5,44...| 5| 1.0|(2303,[0,1,4,5,44...|[-20.154922616911...|[1.76533984442990...| 1.0|\n|(2302,[0,1,4,5,82...| 6| 1.0|(2303,[0,1,4,5,82...|[-14.417812465440...|[5.47549475575723...| 1.0|\n|(2302,[0,1,4,11,1...| 6| 0.0|(2303,[0,1,4,11,1...|[19.1666519710833...|[0.99999999525726...| 0.0|\n|(2302,[0,1,4,11,4...| 7| 1.0|(2303,[0,1,4,11,4...|[-19.931332074913...|[2.20766138812030...| 1.0|\n|(2302,[0,1,4,12,1...| 8| 1.0|(2303,[0,1,4,12,1...|[0.94242754202479...|[0.71958974868604...| 0.0|\n|(2302,[0,1,4,12,1...| 5| 1.0|(2303,[0,1,4,12,1...|[-16.855454538304...|[4.78375669750487...| 1.0|\n|(2302,[0,1,4,12,3...| 8| 1.0|(2303,[0,1,4,12,3...|[-25.986485886596...|[5.17860248497715...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n|(2302,[0,1,4,12,3...| 5| 1.0|(2303,[0,1,4,12,3...|[-20.047155070146...|[1.96621576679026...| 1.0|\n+--------------------+-----------+-----+--------------------+--------------------+--------------------+----------+\nonly showing top 20 rows\n\n" ], [ "from pyspark.ml.evaluation import BinaryClassificationEvaluator\n", "_____no_output_____" ], [ "#confusion matrix\ntrue_postives = results[(results.Label == 1) & (results.prediction == 1)].count()\ntrue_negatives = results[(results.Label == 0) & (results.prediction == 0)].count()\nfalse_positives = results[(results.Label == 0) & (results.prediction == 1)].count()\nfalse_negatives = results[(results.Label == 1) & (results.prediction == 0)].count()", "_____no_output_____" ], [ "recall = float(true_postives)/(true_postives + false_negatives)\nprint(recall)", "0.986021505376344\n" ], [ "precision = float(true_postives) / (true_postives + false_positives)\nprint(precision)", "0.9572025052192067\n" ], [ "accuracy=float((true_postives+true_negatives) /(results.count()))\nprint(accuracy)", "0.9677804295942721\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d0dd927b624168107386057ea61884e2e7f235
245,016
ipynb
Jupyter Notebook
when_to_buy_sp500.ipynb
tfiers/when-to-buy-spy
cc285e9a4b7d415425b445ce0fbd9a155f486466
[ "MIT" ]
2
2021-02-12T19:04:08.000Z
2021-02-16T18:45:22.000Z
when_to_buy_sp500.ipynb
tfiers/when-to-buy-spy
cc285e9a4b7d415425b445ce0fbd9a155f486466
[ "MIT" ]
null
null
null
when_to_buy_sp500.ipynb
tfiers/when-to-buy-spy
cc285e9a4b7d415425b445ce0fbd9a155f486466
[ "MIT" ]
null
null
null
931.619772
238,704
0.952897
[ [ [ "#!pip install yfinance", "_____no_output_____" ], [ "from tfiers.nb import *", "Preloading: numpy, matplotlib.pyplot, pandas, seaborn, janitor.\nImported `np`, `mpl`, `plt`, `sns`, `pd`\n" ], [ "import yfinance", "_____no_output_____" ], [ "ticker = yfinance.Ticker(\"SPY\")\nprice = ticker.history(period=\"5y\")['Close'];", "_____no_output_____" ], [ "T_short = 4 # in business days\nT_longg = 12\nprice_T_short_days_ago = price.shift(T_short)\nprice_T_longg_days_ago = price.shift(T_longg);", "_____no_output_____" ], [ "buy_short = price <= price_T_short_days_ago;\nbuy_longg = price <= price_T_longg_days_ago;", "_____no_output_____" ], [ "nans = pd.Series(np.nan, price.index)\nbuy_short_ = pd.concat((price[buy_short], nans[~buy_short])).sort_index()\nbuy_longg_ = pd.concat((price[buy_longg], nans[~buy_longg])).sort_index();", "_____no_output_____" ], [ "def plot(ax, date_offset):\n last_day = price.index[-1]\n t0 = last_day - pd.DateOffset(**date_offset)\n nans[t0:].asfreq('D').plot(color='black', label='', ax=ax)\n # hack to get nicely formatted xticks & labels\n # .. while not having gaps over the weekends in the price series\n # (which is what'd happen with `price.asfreq('D')`).\n price[t0:].plot(marker='.', ms=1.4, lw=0.7, label=\"S&P 500 closing price\", color='black', ax=ax)\n buy_short_[t0:].plot(label=f\"Price is lower than {T_short} business days ago\",\n marker='.', ms=3, ax=ax)\n buy_longg_[t0:].plot(label=f\"Price is lower than {T_longg} business days ago\",\n lw=4, alpha=0.3, color='C2', solid_capstyle='round', ax=ax)\n ax.set_xlabel(None)", "_____no_output_____" ], [ "from datetime import datetime\nmy_timezone = datetime.now().astimezone().tzinfo\nnow = datetime.now(my_timezone)\ntitle = f\"Report generated on {now:%a %d %b %Y, at %H:%M (UTC%z)}\";", "_____no_output_____" ], [ "m = \"main plot, medium duration\"\nl = \"long duration (zoom out)\"\ns = \"short duration (zoom in)\"\n\ndurations = {\n m: dict(years=1),\n l: dict(years=5),\n s: dict(months=3),\n}\n\nfig = plt.figure(**figsize(width=800, aspect=1.5))\naxes = fig.subplot_mosaic(\n [[m, s],\n [l, l]],\n gridspec_kw=dict(height_ratios=(1, 0.8), width_ratios=(1, 0.4))\n)\n\nfor key, ax in axes.items():\n plot(ax, durations[key])\n if key == m:\n ax.legend()\n \nfig.suptitle(title, size=8, color='grey', y=0.93, ha='left');", "_____no_output_____" ] ], [ [ "Remove existing figure file.", "_____no_output_____" ] ], [ [ "from pathlib import Path\ndesktop = Path(r\"C:\\Users\\tfiers\\Desktop\");\nfname_suffix = \" spy.png\"\nfor f in desktop.glob(f\"*{fname_suffix}\"):\n f.unlink()", "_____no_output_____" ], [ "if buy_longg[-1]:\n fname_prefix = \"💰💰BUY\"\nelif buy_short[-1]:\n fname_prefix = \"💰buy\"\nelse:\n fname_prefix = \"don't buy\"", "_____no_output_____" ], [ "fig.savefig(desktop / (fname_prefix + fname_suffix), );", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
e7d0e30acc7734a0ec84df9572b8235f2ce24f91
135,177
ipynb
Jupyter Notebook
SOSS/Final_extraction.ipynb
njcuk9999/jwst-mtl
81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596
[ "MIT" ]
1
2022-02-04T13:59:18.000Z
2022-02-04T13:59:18.000Z
SOSS/Final_extraction.ipynb
njcuk9999/jwst-mtl
81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596
[ "MIT" ]
12
2020-09-17T20:14:03.000Z
2022-03-21T21:16:43.000Z
SOSS/Final_extraction.ipynb
njcuk9999/jwst-mtl
81d3e7ec6adc5dae180cd9d3bff8e4a2a7292596
[ "MIT" ]
1
2020-09-18T15:25:52.000Z
2020-09-18T15:25:52.000Z
333.77037
104,960
0.93783
[ [ [ "# Final extraction\nThere are 2 options for a final extraction:\n1. Directly take the binned flux (see Tikhonov_extraction.ipynb)\n2. 2D decontamination + box-like extraction\n\nThis notebook focuses on the second options (the first option is shown in other notebooks)", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "# Imports for plots\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm #for better display of FITS images\n\n# Imports from standard packages\n# from scipy.interpolate import interp1d\nfrom astropy.io import fits\nimport numpy as np\n\n# Imports for extraction\nfrom extract.overlap import TrpzOverlap, TrpzBox\nfrom extract.throughput import ThroughputSOSS\nfrom extract.convolution import WebbKer", "_____no_output_____" ] ], [ [ "### Matplotlib defaults", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "plt.rc('figure', figsize=(9,3))\nplt.rcParams[\"image.cmap\"] = \"inferno\"", "_____no_output_____" ] ], [ [ "## Read ref files", "_____no_output_____" ] ], [ [ "# List of orders to consider in the extraction\norder_list = [1, 2]\n\n#### Wavelength solution ####\nwave_maps = []\nwave_maps.append(fits.getdata(\"extract/Ref_files/wavelengths_m1.fits\"))\nwave_maps.append(fits.getdata(\"extract/Ref_files/wavelengths_m2.fits\"))\n\n#### Spatial profiles ####\nspat_pros = []\nspat_pros.append(fits.getdata(\"extract/Ref_files/spat_profile_m1.fits\").squeeze())\nspat_pros.append(fits.getdata(\"extract/Ref_files/spat_profile_m2.fits\").squeeze())\n\n# Convert data from fits files to float (fits precision is 1e-8)\nwave_maps = [wv.astype('float64') for wv in wave_maps]\nspat_pros = [p_ord.astype('float64') for p_ord in spat_pros]\n\n#### Throughputs ####\nthrpt_list = [ThroughputSOSS(order) for order in order_list]\n\n#### Convolution kernels ####\nker_list = [WebbKer(wv_map) for wv_map in wave_maps]\n\n# Put all inputs from reference files in a list\nref_files_args = [spat_pros, wave_maps, thrpt_list, ker_list]", "_____no_output_____" ] ], [ [ "## Load simulation", "_____no_output_____" ] ], [ [ "# Import custom function to read toy simulation\nfrom sys import path\npath.append(\"Fake_data\")\nfrom simu_utils import load_simu\n# Load a simulation\nsimu = load_simu(\"Fake_data/phoenix_teff_02300_scale_1.0e+02.fits\")\ndata = simu[\"data\"]", "_____no_output_____" ] ], [ [ "## Extraction", "_____no_output_____" ], [ "### Parameters\n(Example using with few inputs parameters)", "_____no_output_____" ] ], [ [ "params = {}\n\n# Map of expected noise (sig)\nbkgd_noise = 20.\n\n# Oversampling\nparams[\"n_os\"] = 3\n\n# Threshold on the spatial profile\nparams[\"thresh\"] = 1e-4", "_____no_output_____" ] ], [ [ "### Init extraction object\n(This can be done only once if the `n_os` doesn't change)", "_____no_output_____" ] ], [ [ "extra = TrpzOverlap(*ref_files_args, **params)", "_____no_output_____" ] ], [ [ "### Extract\nHere, we run a really simple extraction (only one step, no tikhonov). <br>\nIn reality, this is were the \"solver\" should be used to iterate and get the best extraction as possible.", "_____no_output_____" ] ], [ [ "# Noise estimate to weight the pixels\n# Poisson noise + background noise\nsig = np.sqrt(data + bkgd_noise**2)\n\n# Extract\nf_k = extra.extract(data=data, sig=sig)", "_____no_output_____" ] ], [ [ "## Quality estimate", "_____no_output_____" ], [ "### Rebuild the detector", "_____no_output_____" ] ], [ [ "rebuilt = extra.rebuild(f_k)", "_____no_output_____" ], [ "plt.figure(figsize=(16,2))\nplt.imshow((rebuilt-data)/sig, vmin=-3, vmax=3)\nplt.colorbar(label=\"Error relative to noise\")", "_____no_output_____" ] ], [ [ "## Decontaminate the 2d image\nGenerate a decontaminated image for each order", "_____no_output_____" ] ], [ [ "data_decont = []\nfor i_ord in range(extra.n_ord):\n # Rebuild the contaminating order\n rebuilt = extra.rebuild(f_k, orders=[i_ord])\n \n # Remove this order and save\n data_decont.append(data - rebuilt)\n\n# Flip so the first order is in first position\ndata_decont = np.flip(data_decont, axis=0)", "_____no_output_____" ] ], [ [ "## Get 1d spectrum with box-like extraction\nMore details for box-like extraction in Box_like_extraction.ipynb", "_____no_output_____" ] ], [ [ "# Use a single row for final wavelength bin\ngrid_box_list = [wv_map[50, :] for wv_map in wave_maps]\n# Keep well defined values and sort\ngrid_box_list = [np.unique(wv[wv > 0.])\n for wv in grid_box_list]\nf_bin_list = []\n\n# Iterate on each orders\nfor i_ord in range(extra.n_ord):\n # Reference files\n wv_map = extra.lam_list[i_ord]\n aperture = extra.p_list[i_ord]\n \n # Mask\n mask = np.isnan(data_decont[i_ord])\n \n # Define extraction object\n box_extra = TrpzBox(aperture, wv_map, box_width=30, mask=mask)\n \n # Extract the flux\n f_k = box_extra.extract(data=data_decont[i_ord])\n\n # Bin to pixels\n grid_box = grid_box_list[i_ord]\n _, f_bin = box_extra.bin_to_pixel(grid_pix=grid_box, f_k=f_k)\n \n # Save\n f_bin_list.append(f_bin)", "_____no_output_____" ] ], [ [ "The final output is in f_bin_list (for each orders)", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(2, 1, sharex=True)\nfor i_ord in range(extra.n_ord):\n ax[i_ord].plot(grid_box_list[i_ord], f_bin_list[i_ord])\n \nax[0].set_ylabel(\"Counts\")\nax[1].set_xlabel(\"Wavelength [$\\mu m$]\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d0e64522f94a58425d1ee5bf7b4c42348c0feb
533,464
ipynb
Jupyter Notebook
Churn-Prediction.ipynb
Leonardodsch/churn-prediction
40e02bbb9999cc2b7653d6b0b2aeb71913016cd0
[ "MIT" ]
null
null
null
Churn-Prediction.ipynb
Leonardodsch/churn-prediction
40e02bbb9999cc2b7653d6b0b2aeb71913016cd0
[ "MIT" ]
null
null
null
Churn-Prediction.ipynb
Leonardodsch/churn-prediction
40e02bbb9999cc2b7653d6b0b2aeb71913016cd0
[ "MIT" ]
null
null
null
87.295696
75,164
0.806591
[ [ [ "# 0.0 Imports", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import metrics as m\nfrom sklearn.svm import SVC\nfrom imblearn import combine as c\nfrom boruta import BorutaPy\nfrom IPython.core.display import display, HTML\nimport inflection\nimport warnings\nimport joblib\nwarnings.filterwarnings('ignore')\nfrom scipy import stats\nimport requests\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression \nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn import metrics as m\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import RobustScaler, MinMaxScaler, LabelEncoder", "_____no_output_____" ] ], [ [ "## 0.1 Helper Functions", "_____no_output_____" ] ], [ [ "def jupyter_settings():\n %matplotlib inline\n %pylab inline\n \n plt.style.use( 'bmh' )\n plt.rcParams['figure.figsize'] = [18, 9]\n plt.rcParams['font.size'] = 24\n\n display( HTML( '<style>.container { width:100% !important; }</style>') )\n pd.options.display.max_columns = None\n pd.options.display.max_rows = None\n pd.set_option( 'display.expand_frame_repr', False )\n pd.set_option('display.float_format', lambda x: '%.3f' % x)\n warnings.filterwarnings('ignore')\n sns.set()\n \njupyter_settings()\n\ndef barplot(a,b,data):\n plot = sns.barplot(x=a, y=b, data=data, edgecolor='k', palette='Blues');\n return plot\n\ndef cramer_v(x,y):\n cm = pd.crosstab(x,y).values\n n = cm.sum()\n r,k = cm.shape\n \n chi2 = stats.chi2_contingency(cm)[0]\n chi2corr = max(0, chi2 - (k-1)*(r-1)/(n-1))\n \n kcorr = k - (k-1)**2/(n-1)\n rcorr = r - (r-1)**2/(n-1)\n \n return np.sqrt((chi2corr/n) / (min(kcorr-1, rcorr-1)))\n\n\ndef ml_metrics(model_name, y_true, pred):\n \n accuracy = m.balanced_accuracy_score(y_true, pred)\n precision = m.precision_score(y_true, pred)\n recall = m.recall_score(y_true, pred)\n f1 = m.f1_score(y_true, pred)\n kappa = m.cohen_kappa_score(y_true, pred)\n \n return pd.DataFrame({'Balanced Accuracy': np.round(accuracy, 2), \n 'Precision': np.round(precision, 2), \n 'Recall': np.round(recall, 2),\n 'F1': np.round(f1, 2),\n 'Kappa': np.round(kappa, 2)}, index=[model_name])\n\n\ndef ml_results_cv(model_name, model, x, y):\n \n x = x.to_numpy()\n y = y.to_numpy()\n \n mms = MinMaxScaler()\n \n balanced_accuracy = []\n precision = []\n recall = []\n f1 = []\n kappa = []\n \n skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n \n for train_index, test_index in skf.split(x, y):\n x_train_cv, x_test_cv = x[train_index], x[test_index]\n y_train_cv, y_test_cv = y[train_index], y[test_index]\n \n x_train_cv = mms.fit_transform(x_train_cv)\n x_test_cv = mms.fit_transform(x_test_cv)\n \n model.fit(x_train_cv, y_train_cv)\n pred = model.predict(x_test_cv)\n\n balanced_accuracy.append(m.balanced_accuracy_score(y_test_cv, pred))\n precision.append(m.precision_score(y_test_cv, pred))\n recall.append(m.recall_score(y_test_cv, pred))\n f1.append(m.f1_score(y_test_cv, pred))\n kappa.append(m.cohen_kappa_score(y_test_cv, pred))\n \n acurracy_mean, acurracy_std = np.round(np.mean(balanced_accuracy), 2), np.round(np.std(balanced_accuracy),2)\n precision_mean, precision_std = np.round(np.mean(precision),2), np.round(np.std(precision),2)\n recall_mean, recall_std = np.round(np.mean(recall),2), np.round(np.std(recall),2)\n f1_mean, f1_std = np.round(np.mean(f1),2), np.round(np.std(f1),2)\n kappa_mean, kappa_std = np.round(np.mean(kappa),2), np.round(np.std(kappa),2)\n \n \n return pd.DataFrame({\"Balanced Accuracy\": \"{} +/- {}\".format(acurracy_mean, acurracy_std),\n \"Precision\": \"{} +/- {}\".format(precision_mean, precision_std),\n \"Recall\": \"{} +/- {}\".format(recall_mean, recall_std),\n \"F1\": \"{} +/- {}\".format(f1_mean, f1_std),\n \"Kappa\": \"{} +/- {}\".format(kappa_mean, kappa_std)},\n index=[model_name])", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "## 0.2 Loading Data", "_____no_output_____" ] ], [ [ "df_raw = pd.read_csv('data/churn.csv')", "_____no_output_____" ] ], [ [ "# 1.0 Data Description", "_____no_output_____" ] ], [ [ "df1 = df_raw.copy()", "_____no_output_____" ] ], [ [ "**RowNumber:** O número da coluna\n\n**CustomerID:** Identificador único do cliente\n\n**Surname:** Sobrenome do cliente.\n\n**CreditScore:** A pontuação de Crédito do cliente para o mercado de consumo.\n\n**Geography:** O país onde o cliente reside.\n\n**Gender:** O gênero do cliente.\n\n**Age:** A idade do cliente.\n\n**Tenure:** Número de anos que o cliente permaneceu ativo.\n\n**Balance:** Valor monetário que o cliente tem em sua conta bancária.\n\n**NumOfProducts:** O número de produtos comprado pelo cliente no banco.\n\n**HasCrCard:** Indica se o cliente possui ou não cartão de crédito.\n\n**IsActiveMember:** Indica se o cliente fez pelo menos uma movimentação na conta bancário dentro de 12 meses.\n\n**EstimateSalary:** Estimativa do salário mensal do cliente.\n\n**Exited:** Indica se o cliente está ou não em Churn.", "_____no_output_____" ], [ "## 1.1 Rename Columns", "_____no_output_____" ] ], [ [ "cols_old = ['RowNumber','CustomerId','Surname','CreditScore', 'Geography','Gender', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',\n 'IsActiveMember', 'EstimatedSalary', 'Exited']", "_____no_output_____" ], [ "snakecase = lambda x: inflection.underscore(x)\n\ncols_new = list(map(snakecase, cols_old))", "_____no_output_____" ], [ "df1.columns = cols_new", "_____no_output_____" ], [ "df1.columns", "_____no_output_____" ] ], [ [ "## 1.2 Data Dimension", "_____no_output_____" ] ], [ [ "df1.shape", "_____no_output_____" ] ], [ [ "## 1.3 Data Types", "_____no_output_____" ] ], [ [ "df1.dtypes", "_____no_output_____" ] ], [ [ "## 1.4 Check NA", "_____no_output_____" ] ], [ [ "df1.isnull().sum()", "_____no_output_____" ] ], [ [ "## 1.5 Fillout Na", "_____no_output_____" ], [ "There are not Nan values in the dataset", "_____no_output_____" ], [ "## 1.6 Change Types", "_____no_output_____" ] ], [ [ "#changing the values 0 and 1 to 'yes' and 'no'. It'll help on the data description and analysis.\n\ndf1['has_cr_card'] = df1['has_cr_card'].map({1:'yes', 0:'no'})\ndf1['is_active_member'] = df1['is_active_member'].map({1:'yes', 0:'no'})\ndf1['exited'] = df1['exited'].map({1:'yes',0:'no'})", "_____no_output_____" ] ], [ [ "## 1.7 Descriptive Statistical", "_____no_output_____" ], [ "### 1.7.1 Numerical Attributes", "_____no_output_____" ] ], [ [ "# Central tendecy - mean, median\n# Dispersion - std, min, max, skew, kurtosis\n\nskew = df1.skew()\nkurtosis = df1.kurtosis()", "_____no_output_____" ], [ "metrics = pd.DataFrame(df1.describe().drop(['count','25%','75%']).T)\nmetrics = pd.concat([metrics, skew, kurtosis], axis=1)\nmetrics.columns = ['Mean','STD','Min','Median','Max',' Skew','Kurtosis']\nmetrics", "_____no_output_____" ] ], [ [ "### 1.7.2 Categorical Attributes", "_____no_output_____" ] ], [ [ "cat_attributes = df1.select_dtypes(exclude=['int64', 'float64'])\ncat_attributes.apply(lambda x: x.unique().shape[0])", "_____no_output_____" ], [ "cat_attributes.describe()", "_____no_output_____" ] ], [ [ "# 2.0 Feature Engineering", "_____no_output_____" ] ], [ [ "df2 = df1.copy()", "_____no_output_____" ] ], [ [ "## 2.1 Mind Map Hypotheses", "_____no_output_____" ], [ "## 2.2 Hypotheses List", "_____no_output_____" ], [ "1. Mulheres entram em churn 30% a mais do que os homens \n\n2. Pessoas com credit score menor do que 600 entram mais em churn\n\n3. Pessoas com menos de 30 anos entram mais em churn\n\n4. Pessoas com balance menor do que que a média entram mais em churn\n\n5. Pessoas com salário maior do que a média entram menos em churn\n\n6. Pessoas que possuem cartão de crédito e credit score menor do que 600 entram mais em churn\n\n7. Pessoas que permaneceram ativas por mais de 2 anos entram menos em churn\n\n8. Pessoas que não são ativas entram mais em churn\n\n9. Pessoas com mais de 1 produto entram menos em churn\n\n10. Pessoas que possuem cartão de crédito e são ativas entram menos em churn\n", "_____no_output_____" ], [ "# 3.0 Variables Filtering", "_____no_output_____" ] ], [ [ "df3 = df2.copy()", "_____no_output_____" ] ], [ [ "## 3.1 Rows Filtering", "_____no_output_____" ], [ "All rows will be used for the analysis.", "_____no_output_____" ], [ "## 3.2 Columns Selection", "_____no_output_____" ] ], [ [ "# droping columns that won't be usefull\ndf3.drop(['row_number','customer_id','surname'], axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "# 4.0 EDA ", "_____no_output_____" ] ], [ [ "df4 = df3.copy()", "_____no_output_____" ] ], [ [ "## 4.1 Univariate Analysis", "_____no_output_____" ], [ "### 4.1.1 Response Variable", "_____no_output_____" ] ], [ [ "sns.countplot(df4['exited'])", "_____no_output_____" ] ], [ [ "### 4.1.2 Numerical Variables", "_____no_output_____" ] ], [ [ "num_atributes = df4.select_dtypes(include=['int64','float64'])\n\nnum_atributes.hist(figsize=(15,10), bins=25);", "_____no_output_____" ] ], [ [ "### 4.1.3 Categorical Variables", "_____no_output_____" ] ], [ [ "cat_attributes = df4.select_dtypes(include='object')", "_____no_output_____" ], [ "j = 1\nfor i in cat_attributes:\n plt.subplot(3,2,j)\n sns.countplot(x=i, data=df4)\n plt.tight_layout()\n j +=1", "_____no_output_____" ] ], [ [ "## 4.2 Bivariate Analysis", "_____no_output_____" ], [ "### **H1.** Mulheres entram em churn 30% a mais do que os homens \n**Falsa!!** Mulheres entram 27% a mais em churn do que homens", "_____no_output_____" ] ], [ [ "aux = df4[['gender','exited']][df4['exited'] == 'yes'].groupby('gender').count().reset_index()\naux.sort_values(by='exited', ascending=True, inplace=True)\naux['growth'] = aux['exited'].pct_change()\naux", "_____no_output_____" ], [ "barplot('gender','exited', aux)", "_____no_output_____" ] ], [ [ "### H2. Pessoas com credit score menor do que 600 entram mais em churn\n**Falsa!!** Clientes com credit score maior do que 600 entram mais em churn", "_____no_output_____" ] ], [ [ "aux = df4[['credit_score','exited']][df4['exited'] == 'yes'].copy()\naux['credit_score'] = aux['credit_score'].apply(lambda x: '> 600' if x > 600 else '< 600' )\naux1 = aux[['credit_score','exited']].groupby('credit_score').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('credit_score','exited',aux1)", "_____no_output_____" ] ], [ [ "### H3. Pessoas com menos de 30 anos entram mais em churn\n**Falsa!!** Clientes com menos de 30 anos entram menos em churn", "_____no_output_____" ] ], [ [ "aux = df4[['age','exited']][df4['exited'] == 'yes'].copy()\naux['age'] = aux['age'].apply(lambda x: ' > 30' if x > 30 else ' < 30' )\naux1= aux[['age','exited']].groupby('age').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('age','exited', aux1)", "_____no_output_____" ] ], [ [ "### H4. Pessoas com balance menor do que que a média entram mais em churn\n**Falsa!!** Clientes com balance menor do que a média entram menos em churn", "_____no_output_____" ] ], [ [ "balance_mean = df4['balance'].mean()\naux = df4[['balance','exited']][df4['exited'] =='yes'].copy()\naux['balance'] = aux['balance'].apply(lambda x: '> mean' if x > balance_mean else '< mean')\naux1 = aux[['balance','exited']].groupby('balance').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('balance','exited',aux1)", "_____no_output_____" ] ], [ [ "### H5. Pessoas com salário maior do que a média entram menos em churn\n**Falsa!!** Pessoas com salário maior do que a média entram mais em churn", "_____no_output_____" ] ], [ [ "mean_salary = df4['estimated_salary'].mean()\naux = df4[['estimated_salary','exited']][df4['exited'] == 'yes'].copy()\naux['estimated_salary'] = aux['estimated_salary'].apply(lambda x: '> mean' if x > mean_salary else '< mean')\naux1 = aux[['estimated_salary','exited']].groupby('estimated_salary').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('estimated_salary','exited',aux1)", "_____no_output_____" ] ], [ [ "### H6. Pessoas que possuem cartão de crédito e credit score menor do que 600 entram mais em churn\n**Falsa!!** Pessoas que possuem cartão de crédito e score menor do que 600 entram menos em churn", "_____no_output_____" ] ], [ [ "aux = df4[['credit_score','has_cr_card','exited']][(df4['exited'] == 'yes') & (df4['has_cr_card'] == 'yes')].copy()\naux['credit_score'] = aux['credit_score'].apply(lambda x: '> 600' if x > 600 else '< 600' )\naux1 = aux[['credit_score','exited']].groupby('credit_score').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('credit_score','exited',aux1)", "_____no_output_____" ] ], [ [ "### H7. Pessoas que permaneceram ativas por mais de 2 anos entram menos em churn\n**Falsa** Pessoas que permaneceram ativas por mais de 2 anos entram mais em churn", "_____no_output_____" ] ], [ [ "aux = df4[['tenure','exited']][(df4['exited'] == 'yes')].copy()\naux['tenure'] = aux['tenure'].apply(lambda x: '> 2' if x > 3 else '< 2')\naux1 = aux[['tenure', 'exited']].groupby('tenure').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('tenure','exited',aux1)", "_____no_output_____" ] ], [ [ "### H8. Pessoas que não são ativas entram mais em churn\n**Verdadeira**", "_____no_output_____" ] ], [ [ "aux = df4[['is_active_member','exited']][df4['exited'] == 'yes'].copy()\nsns.countplot(x='is_active_member', data=aux)", "_____no_output_____" ] ], [ [ "### H9. Pessoas com mais de 1 produto entram menos em churn\n**Verdadeira**", "_____no_output_____" ] ], [ [ "aux = df4[['num_of_products','exited']][df4['exited']=='yes'].copy()\naux['num_of_products'] = df4['num_of_products'].apply(lambda x: '> 1' if x > 1 else '< 1')\naux1 = aux[['num_of_products','exited']].groupby('num_of_products').count().reset_index()\naux1", "_____no_output_____" ], [ "barplot('num_of_products','exited',aux1)", "_____no_output_____" ] ], [ [ "### H10. Pessoas que possuem cartão de crédito e são ativas entram menos em churn\n**Falsa** Pesosas que possuem cartão de crédito e são ativas entram mais em churn", "_____no_output_____" ] ], [ [ "aux = df4[['is_active_member','exited','has_cr_card']][df4['exited'] == 'yes']\nsns.countplot(x='is_active_member', hue='has_cr_card', data=aux)", "_____no_output_____" ] ], [ [ "## 4.3 Multivariate Analysis", "_____no_output_____" ] ], [ [ "# changing back to numerical for use in numerical attributes analysis\ndf4['has_cr_card'] = df4['has_cr_card'].map({'yes':1, 'no':0})\ndf4['is_active_member'] = df4['is_active_member'].map({'yes':1, 'no':0})\ndf4['exited'] = df4['exited'].map({'yes':1, 'no':0})", "_____no_output_____" ] ], [ [ "### 4.3.1 Numerical Attributes", "_____no_output_____" ] ], [ [ "num_atributes = df4.select_dtypes(include=['int64','float64'])", "_____no_output_____" ], [ "correlation = num_atributes.corr(method='pearson')\n\nsns.heatmap(correlation, annot=True)", "_____no_output_____" ] ], [ [ "### 4.3.2 Categorical Attributes", "_____no_output_____" ] ], [ [ "a = df4.select_dtypes(include='object')\na.head()", "_____no_output_____" ], [ "# calculate cramer v\na1 = cramer_v(a['geography'], a['gender'])\na2 = cramer_v(a['geography'], a['geography'])\n\na3 = cramer_v(a['gender'], a['gender'])\na4 = cramer_v(a['gender'], a['geography'])\n\nd = pd.DataFrame({'geography': [a1,a2], 'gender': [a3,a4]})\nd.set_index(d.columns)", "_____no_output_____" ], [ "sns.heatmap(d, annot=True)", "_____no_output_____" ] ], [ [ "# 5.0 Data Preparation", "_____no_output_____" ] ], [ [ "df5 = df4.copy()", "_____no_output_____" ] ], [ [ "## 5.1 Split dataframe into training, test and validation dataset", "_____no_output_____" ] ], [ [ "X = df5.drop('exited', axis=1).copy()\ny = df5['exited'].copy()", "_____no_output_____" ], [ "# train dataset\nX_train, X_rem, y_train, y_rem = train_test_split(X,y,train_size=0.8, random_state=42, stratify=y)", "_____no_output_____" ], [ "# validation, test dataset\nX_valid, X_test, y_valid, y_test = train_test_split(X_rem, y_rem, test_size=0.5, random_state=42, stratify=y_rem)", "_____no_output_____" ], [ "X_test9 = X_test.copy()\ny_test9 = y_test.copy()", "_____no_output_____" ] ], [ [ "## 5.2 Rescaling", "_____no_output_____" ] ], [ [ "mms = MinMaxScaler()\nrs = RobustScaler()\n\n# credit score - min-max scaler\nX_train['credit_score'] = mms.fit_transform(X_train[['credit_score']].values)\nX_test['credit_score'] = mms.fit_transform(X_test[['credit_score']].values)\nX_valid['credit_score'] = mms.fit_transform(X_valid[['credit_score']].values)\n\n# age - robust scaler\nX_train['age'] = rs.fit_transform(X_train[['age']].values)\nX_test['age'] = rs.fit_transform(X_test[['age']].values)\nX_valid['age'] = rs.fit_transform(X_valid[['age']].values)\n\n# balance - min-max scaler\nX_train['balance'] = mms.fit_transform(X_train[['balance']].values)\nX_test['balance'] = mms.fit_transform(X_test[['balance']].values)\nX_valid['balance'] = mms.fit_transform(X_valid[['balance']].values)\n\n\n# estimated salary - min-max scaler\nX_train['estimated_salary'] = mms.fit_transform(X_train[['estimated_salary']].values)\nX_test['estimated_salary'] = mms.fit_transform(X_test[['estimated_salary']].values)\nX_valid['estimated_salary'] = mms.fit_transform(X_valid[['estimated_salary']].values)\n\n# tenure - min-max scaler\nX_train['tenure'] = mms.fit_transform(X_train[['tenure']].values)\nX_test['tenure'] = mms.fit_transform(X_test[['tenure']].values)\nX_valid['tenure'] = mms.fit_transform(X_valid[['tenure']].values)\n\n# num of products - min-max scaler\nX_train['num_of_products'] = mms.fit_transform(X_train[['num_of_products']].values)\nX_test['num_of_products'] = mms.fit_transform(X_test[['num_of_products']].values)\nX_valid['num_of_products'] = mms.fit_transform(X_valid[['num_of_products']].values)", "_____no_output_____" ] ], [ [ "## 5.3 Encoding", "_____no_output_____" ] ], [ [ "le = LabelEncoder()\n# gender\ndic = {'Female':0, 'Male':1}\nX_train['gender'] = X_train['gender'].map(dic)\nX_test['gender'] = X_test['gender'].map(dic)\nX_valid['gender'] = X_valid['gender'].map(dic)\n\n# geography\nX_train['geography'] = le.fit_transform(X_train['geography'])\nX_test['geography'] = le.fit_transform(X_test['geography'])\nX_valid['geography'] = le.fit_transform(X_valid['geography'])", "_____no_output_____" ] ], [ [ "# 6.0 Feature Selection", "_____no_output_____" ], [ "## 6.1 Boruta as feature selector", "_____no_output_____" ] ], [ [ "#X_boruta = X_train.values\n#y_boruta = y_train.values.ravel()", "_____no_output_____" ], [ "#rf = RandomForestClassifier(n_jobs=-1, class_weight='balanced')\n#boruta = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=42)\n#boruta.fit(X_boruta, y_boruta)", "_____no_output_____" ], [ "#cols_selected = boruta.support_.tolist()", "_____no_output_____" ], [ "#cols_selected_boruta = X_train.iloc[:, cols_selected].columns.to_list()", "_____no_output_____" ], [ "cols_selected_boruta = ['age', 'balance', 'num_of_products']", "_____no_output_____" ] ], [ [ "## 6.2 Feature Importance", "_____no_output_____" ] ], [ [ "rf = RandomForestClassifier()\nrf.fit(X_train, y_train)\nimportance = rf.feature_importances_", "_____no_output_____" ], [ "for i,v in enumerate(importance):\n ('Feature: %0d, Score: %.5f' % (i,v))\n# plot feature importance\nfeature_importance = pd.DataFrame({'feature':X_train.columns,\n 'feature_importance':importance}).sort_values('feature_importance', ascending=False).reset_index()\nsns.barplot(x='feature_importance', y='feature', data=feature_importance, orient='h', color='royalblue').set_title('Feature Importance');", "_____no_output_____" ], [ "cols_selected_importance = feature_importance['feature'].head(6).copy()\ncols_selected_importance = cols_selected_importance.tolist()", "_____no_output_____" ] ], [ [ "## 6.3 Columns Selected", "_____no_output_____" ], [ " - As colunas selecinadas para treinar o modelo serão as selecionadas pelo boruta e as 6 melhores classificadas com o Random Forest", "_____no_output_____" ] ], [ [ "cols_selected_importance", "_____no_output_____" ], [ "cols_selected_boruta", "_____no_output_____" ], [ "#cols_selected = ['age', 'balance', 'num_of_products', 'estimated_salary', 'credit_score','tenure']\ncols_selected = ['age', 'balance', 'num_of_products', 'estimated_salary', 'credit_score','tenure','is_active_member','gender','has_cr_card','geography']", "_____no_output_____" ] ], [ [ "# 7.0 Machine Learning Modeling", "_____no_output_____" ] ], [ [ "X_train = X_train[cols_selected]\nX_test = X_test[cols_selected]\nX_valid = X_valid[cols_selected]", "_____no_output_____" ] ], [ [ "## 7.1 Baseline Model", "_____no_output_____" ] ], [ [ "dummy = DummyClassifier()\ndummy.fit(X_train, y_train)\npred = dummy.predict(X_valid)", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.80 1.00 0.89 796\n 1 0.00 0.00 0.00 204\n\n accuracy 0.80 1000\n macro avg 0.40 0.50 0.44 1000\nweighted avg 0.63 0.80 0.71 1000\n\n" ], [ "dummy_result = ml_metrics('dummy', y_valid, pred)\ndummy_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "dummy_result_cv = ml_results_cv('dummy_CV', DummyClassifier(), X_train, y_train)\ndummy_result_cv", "_____no_output_____" ] ], [ [ "## 7.2 Logistic Regression", "_____no_output_____" ] ], [ [ "lg = LogisticRegression(class_weight='balanced')\nlg.fit(X_train, y_train)\npred = lg.predict(X_valid)", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.91 0.70 0.79 796\n 1 0.39 0.73 0.51 204\n\n accuracy 0.71 1000\n macro avg 0.65 0.72 0.65 1000\nweighted avg 0.80 0.71 0.73 1000\n\n" ], [ "logistic_regression_result = ml_metrics('LogisticRegression', y_valid, pred)\nlogistic_regression_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "logistic_regression_result_cv = ml_results_cv('LogisticRegression_CV', LogisticRegression(class_weight='balanced'), X_train, y_train)\nlogistic_regression_result_cv", "_____no_output_____" ] ], [ [ "## 7.3 KNN", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier()\nknn.fit(X_train, y_train)\npred = knn.predict(X_valid)", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.84 0.96 0.89 796\n 1 0.63 0.29 0.40 204\n\n accuracy 0.82 1000\n macro avg 0.73 0.62 0.65 1000\nweighted avg 0.80 0.82 0.79 1000\n\n" ], [ "knn_result = ml_metrics('KNN', y_valid, pred)\nknn_result", "_____no_output_____" ] ], [ [ "### Cross Validaton", "_____no_output_____" ] ], [ [ "knn_result_cv = ml_results_cv('KNN_CV', KNeighborsClassifier(), X_train, y_train)\nknn_result_cv", "_____no_output_____" ] ], [ [ "## 7.4 Naive Bayes", "_____no_output_____" ] ], [ [ "nb = GaussianNB()\nnb.fit(X_train, y_train)\npred = nb.predict(X_valid)", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.83 0.99 0.90 796\n 1 0.83 0.22 0.34 204\n\n accuracy 0.83 1000\n macro avg 0.83 0.60 0.62 1000\nweighted avg 0.83 0.83 0.79 1000\n\n" ], [ "naive_bayes_result = ml_metrics('Naive Bayes', y_valid, pred)\nnaive_bayes_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "naive_bayes_result_cv = ml_results_cv('Naive Bayes_CV', GaussianNB(), X_train, y_train)\nnaive_bayes_result_cv", "_____no_output_____" ] ], [ [ "## 7.5 SVC", "_____no_output_____" ] ], [ [ "svc = SVC(class_weight='balanced')\nsvc.fit(X_train, y_train)\npred = svc.predict(X_valid)", "_____no_output_____" ], [ "svc_result = ml_metrics('SVC', y_valid, pred)\nsvc_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "svc_result_cv = ml_results_cv('SVC_cv', SVC(class_weight='balanced'), X_train, y_train)\nsvc_result_cv", "_____no_output_____" ] ], [ [ "## 7.6 Random Forest", "_____no_output_____" ] ], [ [ "rf = RandomForestClassifier(class_weight='balanced')\nrf.fit(X_train, y_train)\npred = rf.predict(X_valid)", "_____no_output_____" ], [ "pred_proba = rf.predict_proba(X_valid)", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.87 0.98 0.92 796\n 1 0.85 0.40 0.55 204\n\n accuracy 0.86 1000\n macro avg 0.86 0.69 0.73 1000\nweighted avg 0.86 0.86 0.84 1000\n\n" ], [ "rf_result = ml_metrics('Random Forest', y_valid, pred)\nrf_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "rf_result_cv = ml_results_cv('Random Forest_CV', RandomForestClassifier(class_weight='balanced'), X_train, y_train)\nrf_result_cv", "_____no_output_____" ] ], [ [ "## 7.7 XGBoost", "_____no_output_____" ] ], [ [ "xgb = XGBClassifier(scale_pos_weight=80, objective='binary:logistic', verbosity=0)\nxgb.fit(X_train, y_train)\npred = xgb.predict(X_valid)", "_____no_output_____" ], [ "xgb_result = ml_metrics('XGBoost', y_valid, pred)\nxgb_result", "_____no_output_____" ], [ "print(m.classification_report(y_valid, pred))", " precision recall f1-score support\n\n 0 0.92 0.67 0.77 796\n 1 0.37 0.78 0.51 204\n\n accuracy 0.69 1000\n macro avg 0.65 0.72 0.64 1000\nweighted avg 0.81 0.69 0.72 1000\n\n" ], [ "xgb_result = ml_metrics('XGBoost', y_valid, pred)\nxgb_result", "_____no_output_____" ] ], [ [ "### Cross Validation", "_____no_output_____" ] ], [ [ "xbg_result_cv = ml_results_cv('XGBoost_CV', XGBClassifier(scale_pos_weight=80, objective='binary:logistic', verbosity=0), X_train, y_train)\nxbg_result_cv", "_____no_output_____" ] ], [ [ "## 7.8 Results", "_____no_output_____" ] ], [ [ "df_results = pd.concat([dummy_result, logistic_regression_result, knn_result, naive_bayes_result, svc_result, rf_result, xgb_result])\ndf_results.style.highlight_max(color='lightgreen', axis=0)", "_____no_output_____" ] ], [ [ "## 7.9 Results Cross Validation", "_____no_output_____" ] ], [ [ "df_results_cv = pd.concat([dummy_result_cv, logistic_regression_result_cv, knn_result_cv, naive_bayes_result_cv, svc_result_cv, rf_result_cv, xbg_result_cv])\ndf_results_cv", "_____no_output_____" ] ], [ [ "# 8.0 Hyperparameter Fine Tuning", "_____no_output_____" ], [ "## 8.1 Random Search", "_____no_output_____" ] ], [ [ "# setting some parameters for testing\n\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# eta\neta = [0.01,0.03]\n# subsample\nsubsample = [0.1,0.5,0.7]\n# cols sample\ncolssample_bytree = [0.3,0.7,0.9]\n# min_child_weight\nmin_child_weight = [3,8,15]\n\nrandom_grid = {'n_estimators': n_estimators,\n 'max_depth': max_depth,\n 'eta': eta,\n 'subsample': subsample,\n 'colssample_bytree': colssample_bytree,\n 'min_child_weight': min_child_weight}", "_____no_output_____" ], [ "xgb_grid = XGBClassifier()\nxgb_random = RandomizedSearchCV(estimator = xgb_grid, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1)\n#xgb_random.fit(X_train, y_train)", "_____no_output_____" ], [ "#xgb_random.best_params_", "_____no_output_____" ], [ "best_params = {'subsample': 0.7,\n 'n_estimators': 1000,\n 'min_child_weight': 3,\n 'max_depth': 30,\n 'eta': 0.03,\n 'colssample_bytree': 0.7}", "_____no_output_____" ] ], [ [ "## 8.2 Results", "_____no_output_____" ] ], [ [ "xgb = XGBClassifier(objective='binary:logistic', n_estimators = 1000, eta=0.03, subsample = 0.7, min_child_weight = 3, max_depth = 30, colssample_bytree = 0.7, scale_pos_weight=80, verbosity=0)\nxgb.fit(X_train, y_train)\npred = xgb.predict(X_valid)", "_____no_output_____" ], [ "xgb_result = ml_metrics('XGBoost', y_valid, pred)\nxgb_result", "_____no_output_____" ] ], [ [ "### Cross Validaton", "_____no_output_____" ] ], [ [ "xgboost_cv = ml_results_cv('XGBoost_CV', XGBClassifier(objective='binary:logistic', n_estimators = 1000, eta=0.03, subsample = 0.7, min_child_weight = 3, max_depth = 30, colssample_bytree = 0.7 , scale_pos_weight=80, verbosity=0), \n X_train, y_train)\nxgboost_cv", "_____no_output_____" ] ], [ [ "# 9.0 Conclusions", "_____no_output_____" ], [ "## 9.1 Final Model", "_____no_output_____" ] ], [ [ "# model performance with unseen data\nxgb_final_model = XGBClassifier(objective='binary:logistic', n_estimators = 1000, eta=0.03, subsample = 0.7, min_child_weight = 3, max_depth = 30, colssample_bytree = 0.7, scale_pos_weight=80, verbosity=0)\nxgb_final_model.fit(X_train, y_train)\npred_final = xgb_final_model.predict(X_test)\npred_final_proba = xgb_final_model.predict_proba(X_test)\n\nxgb_final_model_result = ml_metrics('XGBoost', y_test, pred_final)\nxgb_final_model_result", "_____no_output_____" ] ], [ [ "## 9.2 Business Questions", "_____no_output_____" ] ], [ [ "df9 = df2.copy()", "_____no_output_____" ] ], [ [ "### 9.2.1 Qual a taxa atual de Churn da TopBank? ", "_____no_output_____" ] ], [ [ "churn_rate = df9['exited'].value_counts(normalize=True).reset_index()\nchurn_rate['exited'] = churn_rate['exited']*100\nchurn_rate.columns = ['churn', 'exited (%)']\nchurn_rate", "_____no_output_____" ], [ "sns.countplot(df9['exited']).set_title('Churn Rate')", "_____no_output_____" ] ], [ [ "**A taxa atual de churn do TopBank é de 20.4%**", "_____no_output_____" ], [ "### 9.2.2 Qual o retorno esperado, em termos de faturamento, se a empresa utilizar seu modelo para evitar o churn dos clientes?", "_____no_output_____" ], [ " - Para realização do cálculo de retorno financeiro foi utilizado uma amostra de 1000 clientes (10% do dataset). \n - Para comparação com os dados reais foram utlizados os valores da predição final do modelo.\n ", "_____no_output_____" ] ], [ [ "aux = pd.concat([X_test9, y_test9], axis=1)\nmean_salary = df9['estimated_salary'].mean()", "_____no_output_____" ], [ "aux['pred_exited'] = pred_final", "_____no_output_____" ], [ "aux['client_return'] = aux['estimated_salary'].apply(lambda x: x*0.15 if x < mean_salary else x*0.20)", "_____no_output_____" ] ], [ [ " - Cálculo do retorno total para todos os clintes que entraram em churn na amostra", "_____no_output_____" ] ], [ [ "total_return = aux[aux['exited'] == 1]['client_return'].sum()\nprint('O retorno total de todos os clientes que entraram em churn é de ${}' .format(total_return))", "O retorno total de todos os clientes que entraram em churn é de $3658649.9845000003\n" ] ], [ [ "- Selecionando os clientes que o modelo previu corretamente que entraram em churn. \n- Se fosse possível evitar que todos os clientes entrassem em churn seria possível recuperar aproximadamente 70% do valor total calculado acima. ", "_____no_output_____" ] ], [ [ "churn_return = aux[(aux['pred_exited'] == 1) & (aux['exited'] == 1)]['client_return'].sum()\nprint('O retorno total dos clientes que o modelo previu que entrariam em churn é de ${}' .format(churn_return))", "O retorno total dos clientes que o modelo previu que entrariam em churn é de $2540855.073\n" ] ], [ [ "### 9.2.3 Incentivo Financeiro", "_____no_output_____" ], [ "Uma possível ação para evitar que o cliente entre em churn é oferecer um cupom de desconto, ou algum outro incentivo financeiro para ele renovar seu contrato por mais 12 meses.\n- Para quais clientes você daria o incentivo financeiro e qual seria esse valor, de modo a maximizar o ROI (Retorno sobre o investimento). Lembrando que a soma dos incentivos não pode ultrapassar os $10.000,00", "_____no_output_____" ], [ "Ainda levando em conta a amostra de 1000 clientes, foi possível analisar a probabilidade de cada cliente entrar em churn segundo o algoritmo e decidir de qual forma o incentivo finaceiro seria oferecido. Após algumas análises foram definidas as seguintes estratágias (foram considerados apenas clientes que o algoritmo previu como \"positivos\" para o churn):\n\n- Foi definido um ponto de corte (threshold) de 0.95, ou seja, a probabilidade dos clientes entrarem em churn foi comparada com esse ponto de corte e a partir disso foram definidos \"grupos\" que receberiam o incentivo.\n\n - Clientes com uma probabilidade de mais de 95% não receberiam o incentivo, pois foi considerado que possuem uma probabilidade muito grande a entrarem em churn e seria muito difícil convence-los a renovar o contrato mesmo com um incentivo finaceiro. \n - Clientes com uma probabilidade maior do que 90% e menor do que 95% receberiam um incentivo de 250.\n - Clientes com uma probabilidade entre 90% e 70% receberiam um incentivo de 200.\n - Clientes com uma probabilidade menor do que 70% receberiam um incentivo de 100.", "_____no_output_____" ] ], [ [ "threshold = 0.95", "_____no_output_____" ], [ "proba_list = []\nfor i in range (len(pred_final_proba)):\n proba = pred_final_proba[i][1]\n proba_list.append(proba)", "_____no_output_____" ], [ "aux['pred_exited_proba'] = proba_list", "_____no_output_____" ], [ "aux2 = aux[(aux['exited'] == 1) & (aux['pred_exited'] ==1)]", "_____no_output_____" ], [ "aux2 = aux2[aux2['pred_exited_proba'] > threshold]", "_____no_output_____" ], [ "aux2.sample(10)", "_____no_output_____" ], [ "# definindo incentivo de acordo com a probabilidade de churn \naux2['destinated_budget'] = aux2['pred_exited_proba'].apply(lambda x: 250 if x > 0.9 else 200 if ((x < 0.9) & (x > 0.7)) else 100 )", "_____no_output_____" ] ], [ [ "- Supondo que fosse possível evitar que todos os clientes que receberam o incentivo entrassem em churn, e então consequentemente renovassem seus contratos, seria possível obter um retorno finaceiro de $ 938.235,39", "_____no_output_____" ] ], [ [ "total_return = aux2['client_return'].sum()\nprint('O Retorno financeiro total a partir dos clientes que receberam o incentivo foi de $ {}'.format(total_return))", "O Retorno financeiro total a partir dos clientes que receberam o incentivo foi de $ 1602619.6835\n" ] ], [ [ "# 10.0 Deploy", "_____no_output_____" ] ], [ [ "#saving models\n\nfinal_model = XGBClassifier(objective='binary:logistic', n_estimators = 1000, eta=0.03, subsample = 0.7, min_child_weight = 3, \n max_depth = 30, colssample_bytree = 0.7, scale_pos_weight=80, verbosity=0)\nfinal_model.fit(X_train, y_train)\n\njoblib.dump(final_model, 'Model/final_model_XGB.joblib')", "_____no_output_____" ], [ "mm = MinMaxScaler()\nle = LabelEncoder()\n\njoblib.dump(mm, 'Parameters/scaler_mm.joblib')\njoblib.dump(le, 'Parameters/label_encoder.joblib')", "_____no_output_____" ] ], [ [ "## 10.1 Churn Class", "_____no_output_____" ] ], [ [ "import joblib\nimport pandas as pd\nimport inflection\n\nclass Churn (object):\n \n def __init__(self):\n self.scaler = joblib.load('Parameters/scaler_mm.joblib')\n self.encoder_le = joblib.load('Parameters/label_encoder.joblib')\n \n def data_cleaning(self, df1):\n # rename columns\n cols_old = ['RowNumber', 'CustomerId', 'Surname', 'CreditScore', 'Geography',\n 'Gender', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',\n 'IsActiveMember', 'EstimatedSalary', 'Exited']\n \n snakecase = lambda x: inflection.underscore(x)\n\n cols_new = list(map(snakecase, cols_old))\n\n df1.columns = cols_new\n \n return df1\n \n def feature_engineering(self, df2):\n cols_drop = ['row_number','customer_id','surname']\n df2 = df2.drop(cols_drop, axis=1)\n \n return df2\n \n def data_preparation(self, df3):\n # rescaling\n mm_columns = ['credit_score', 'age', 'balance', 'estimated_salary', 'tenure', 'num_of_products']\n df3[mm_columns] = self.scaler.fit_transform(df3[mm_columns])\n \n df3['geography'] = self.encoder_le.fit_transform(df3['geography'])\n \n gender = {'Female':0, 'Male':1}\n df3['gender'] = df3['gender'].map(gender)\n \n return df3\n \n def get_prediction(self, model, orignal_data, test_data):\n \n pred = model.predict(test_data)\n \n original_data['prediciton'] = pred\n \n return original_data.to_json(orient='records', date_format='iso')", "_____no_output_____" ] ], [ [ "## 10.2 API Handler", "_____no_output_____" ] ], [ [ "import joblib\nimport pandas as pd\nfrom churn.Churn import Churn\nfrom flask import Flask, request, Response\n\nmodel = joblib.load('Model/final_model_XGB.joblib')\n\n# initialize API\napp = Flask(__name__)\n\[email protected]('/churn/predict', methods=['POST'])\n\ndef churn_predict():\n test_json = request.get_json()\n \n if test_json: # there is data\n if isinstance(test_json, dict): # unique example\n test_raw = pd.DataFrame(test_json, index=[0])\n \n else: # multiple example\n test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())\n \n \n pipeline = Churn()\n\n # data cleaning\n df1 = pipeline.data_cleaning(test_raw)\n\n # feature engineering\n df2 = pipeline.feature_engineering(df1)\n\n # data preparation\n df3 = pipeline.data_preparation(df2)\n\n # prediction\n df_response = pipeline.get_prediciton(model, test_raw, df3)\n\n return df_response\n\n\n else:\n return Response('{}', status=200, mimetype='application/json')\n\nif __name__ == '__main__':\n app.run('127.0.0.1') ", "_____no_output_____" ] ], [ [ "## 10.3 API Tester", "_____no_output_____" ] ], [ [ "df10 = pd.read_csv('data/churn.csv')", "_____no_output_____" ], [ "# convert dataframe to json\ndata = df10.to_json()", "_____no_output_____" ], [ "url = 'http://0.0.0.0:5000/churn/predict'\nheader = {'Content-type': 'application/json'}\n\nr = requests.post(url=url, data=data, headers=header)", "_____no_output_____" ], [ "r.status_code", "_____no_output_____" ], [ "r.json()", "_____no_output_____" ], [ "d1 = pd.DataFrame( r.json(), columns=r.json()[0].keys() )\nd1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
e7d0fe6080119e73b1f9e80306507d730fbada71
10,255
ipynb
Jupyter Notebook
Untitled2.ipynb
mohsinhussainpk/ssd
bd398170539539f8da4365744ec7711e5d7bad5f
[ "Apache-2.0" ]
1
2018-08-23T07:07:57.000Z
2018-08-23T07:07:57.000Z
Untitled2.ipynb
mohsinhussainpk/coreml
bd398170539539f8da4365744ec7711e5d7bad5f
[ "Apache-2.0" ]
null
null
null
Untitled2.ipynb
mohsinhussainpk/coreml
bd398170539539f8da4365744ec7711e5d7bad5f
[ "Apache-2.0" ]
null
null
null
90.752212
1,709
0.658216
[ [ [ "import keras\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\n\n\n\nfrom keras.models import load_model\nmodel = load_model('my_smodel.h5', custom_objects={'AnchorBoxes':AnchorBoxes})\n", "_____no_output_____" ], [ "import keras\nfrom keras.models import load_model\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\n\n\nmodel = load_model('my_smodel.h5', custom_objects={'AnchorBoxes': AnchorBoxes,\n 'compute_loss': ssd_loss.compute_loss})", "_____no_output_____" ], [ "import coremltools\n\n\ncoreml_model = coremltools.converters.keras.convert(model)\nmodel = coreml_model.save(\"ssd.mlmodel\")\n\n \n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
e7d10e88b5de88453583795061cb5154544c2176
9,696
ipynb
Jupyter Notebook
Other/Behavior Design Pattern - Mediator.ipynb
deepaksood619/Python-Competitive-Programming
c8353d732a372c2bc62f5f12169acc421e802d0c
[ "MIT" ]
null
null
null
Other/Behavior Design Pattern - Mediator.ipynb
deepaksood619/Python-Competitive-Programming
c8353d732a372c2bc62f5f12169acc421e802d0c
[ "MIT" ]
null
null
null
Other/Behavior Design Pattern - Mediator.ipynb
deepaksood619/Python-Competitive-Programming
c8353d732a372c2bc62f5f12169acc421e802d0c
[ "MIT" ]
null
null
null
34.382979
1,096
0.532694
[ [ [ "https://py.checkio.org/blog/design-patterns-part-2/\n\nhttps://py.checkio.org/en/mission/dialogues/share/07bc869edadfc11858e1caeaa4415987/", "_____no_output_____" ] ], [ [ "windows = dict.fromkeys(['main', 'settings', 'help'])\nwindows", "_____no_output_____" ], [ "text = \"\"\"Karl said: Hi! What's new?R2D2 said: Hello, human. Could we speak later about it?\"\"\"\n\ntext = text.replace('a', '0').replace('e', '0').replace('i', '0').replace('o', '0').replace('u', '0').replace('A', '0').replace('E', '0').replace('I', '0').replace('O', '0').replace('U', '0')\ntext.replace()", "_____no_output_____" ], [ "import re\ns = \"\"\"Karl said: Hi! What's new?R2D2 said: Hello, human. Could we speak later about it?\"\"\"\nreplaced = re.sub('[aeiouAEIOU]', '0', s)\nreplaced = re.sub('[^0]', '1', replaced)\nprint (replaced )", "101111001111011110111110111111110011110110111010111100111101110011101011010011011\n" ], [ "import re\n\n\nclass Chat:\n def __init__(self):\n self.human = None\n self.robot = None\n self.human_dialogue = ''\n self.robot_dialogue = ''\n \n def connect_human(self, human):\n self.human = human\n \n def connect_robot(self, robot):\n self.robot = robot\n \n def send(self, text, name):\n self.human_dialogue += '{} said: {}'.format(name, text)\n self.robot_dialogue += '{} said: {}'.format(name, self.convert_to_robot_lang(text))\n \n def show_human_dialogue(self):\n return self.human_dialogue\n \n def show_robot_dialogue(self):\n return self.robot_dialogue\n\n def convert_to_robot_lang(self, text):\n text = re.sub('[aeiou]', '0', text)\n return re.sub('[^0]', '1', text)\n\n\nclass Human:\n def __init__(self, name):\n self.name = name\n \n def send(self, text):\n super().send(text, self.name)\n\n\nclass Robot:\n def __init__(self, serial_number):\n self.serial_number = serial_number\n \n def send(self, text):\n super().send(text, self.name)\n\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n\n chat = Chat()\n karl = Human(\"Karl\")\n bot = Robot(\"R2D2\")\n chat.connect_human(karl)\n chat.connect_robot(bot)\n karl.send(\"Hi! What's new?\")\n bot.send(\"Hello, human. Could we speak later about it?\")\n assert chat.show_human_dialogue() == \"\"\"Karl said: Hi! What's new?\nR2D2 said: Hello, human. Could we speak later about it?\"\"\"\n assert chat.show_robot_dialogue() == \"\"\"Karl said: 101111011111011\nR2D2 said: 10110111010111100111101110011101011010011011\"\"\"\n\n print(\"Coding complete? Let's try tests!\")\n", "_____no_output_____" ], [ "class Parent:\n \n def __init__(self):\n self.parent_variable = 'Parent'\n \nclass Child(Parent):\n def __init__(self):\n super().__init__()\n self.child_variable = 'Child'\n \n def print_val(self):\n print(self.child_variable)\n print(self.parent_variable)\n \nchild = Child()\nchild.print_val()", "Child\nParent\n" ], [ "class Dog():\n \"\"\"Represent a dog.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize dog object.\"\"\"\n self.name = name\n\n def sit(self):\n \"\"\"Simulate sitting.\"\"\"\n print(self.name + ' is sitting.')\n\nmy_dog = Dog('Tommy')\nprint(my_dog.name + ' is a great dog!')\nmy_dog.sit()", "Tommy is a great dog!\nTommy is sitting.\n" ], [ "class SDog(Dog):\n \"\"\"Represent a search dog.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize the search dog.\"\"\"\n super().__init__(name)\n\n def search(self):\n \"\"\"Simulate searching.\"\"\"\n print(self.name + ' is searching.')\n\nmy_dog = SDog('Lucy')\n\nprint(my_dog.name + ' is a search dog.')\nmy_dog.sit()\nmy_dog.search()", "Lucy is a search dog.\nLucy is sitting.\nLucy is searching.\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d1191af56c30694d687f74f0da9bc8a55e1765
939,180
ipynb
Jupyter Notebook
Portfolio/TS_Traditional_Methods.ipynb
anujakapre/E-commerce-Market-Analysis-
c324d62d64d45e5b5ba97090a4169e5ef685d061
[ "MIT" ]
1
2019-09-19T15:18:34.000Z
2019-09-19T15:18:34.000Z
Portfolio/TS_Traditional_Methods.ipynb
anujakapre/E-commerce-Market-Analysis-
c324d62d64d45e5b5ba97090a4169e5ef685d061
[ "MIT" ]
null
null
null
Portfolio/TS_Traditional_Methods.ipynb
anujakapre/E-commerce-Market-Analysis-
c324d62d64d45e5b5ba97090a4169e5ef685d061
[ "MIT" ]
null
null
null
856.134913
289,620
0.950751
[ [ [ "from statsmodels.tsa.holtwinters import ExponentialSmoothing,SimpleExpSmoothing, Holt\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport statsmodels.api as sm\nimport numpy as np\nfrom math import sqrt\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom scipy import stats\nfrom pylab import rcParams\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "## Approach 2: Use Traditional statestical models\n\nIn this notebook we will discuss following models on daily sampled data.\n\n 1. MA\n 2. Simple Exponential Smoothing\n 3. Holt Linear\n 4. Holt-Winters\n These models are implemented using statsmodels library.\n \n**Objective: Implement above models and calculate RMSE to compare reults with Approach 1.**\n 1. Load previously created daily sampled data and decompose the time series\n 2. Fit each model and predict test data\n 3. Calculate RMSE and MAE\n 4. Compare results with Approach 1", "_____no_output_____" ] ], [ [ "# Load data\ndata = pd.read_csv(\"daily_data.csv\",parse_dates=[0], index_col=0)\ndata.head()\n", "_____no_output_____" ] ], [ [ "### Decompose time series\n\nA series is thought to be an aggregate or combination of these four components.All series have a level and noise. The trend and seasonality components are optional.\nThese components combine either additively or multiplicatively.\n\n#### Additive Model\nAn additive model suggests that the components are added together as follows:\n\t\ny(t) = Level + Trend + Seasonality + Noise\n\nAn additive model is linear where changes over time are consistently made by the same amount.<br>\nA linear trend is a straight line.<br>\nA linear seasonality has the same frequency (width of cycles) and amplitude (height of cycles).<br>\n\n#### Multiplicative Model\nA multiplicative model suggests that the components are multiplied together as follows:\n\ny(t) = Level * Trend * Seasonality * Noise\n\nA multiplicative model is nonlinear, such as quadratic or exponential. Changes increase or decrease over time.<br>\nA nonlinear trend is a curved line.<br>\nA non-linear seasonality has an increasing or decreasing frequency and/or amplitude over time.<br>\n\nReferance: https://machinelearningmastery.com/decompose-time-series-data-trend-seasonality/", "_____no_output_____" ] ], [ [ "#Decompose time series into trend, seasonality and noise\nrcParams['figure.figsize'] = 11, 9\nresult = sm.tsa.seasonal_decompose(data, model='additive')\nresult.plot()\nplt.show()\n", "_____no_output_____" ], [ "#Print trend, seasality, residual\nprint(result.trend)\nprint(result.seasonal)\nprint(result.resid)\n#print(result.observed)", " Total Price\ndate \n2016-09-04 NaN\n2016-09-05 NaN\n2016-09-06 NaN\n2016-09-07 30.184286\n2016-09-08 10.722857\n2016-09-09 0.000000\n2016-09-10 0.000000\n2016-09-11 0.000000\n2016-09-12 20.494286\n2016-09-13 20.494286\n2016-09-14 20.494286\n2016-09-15 20.494286\n2016-09-16 20.494286\n2016-09-17 20.494286\n2016-09-18 20.494286\n2016-09-19 0.000000\n2016-09-20 0.000000\n2016-09-21 0.000000\n2016-09-22 0.000000\n2016-09-23 0.000000\n2016-09-24 0.000000\n2016-09-25 0.000000\n2016-09-26 0.000000\n2016-09-27 0.000000\n2016-09-28 0.000000\n2016-09-29 15.620000\n2016-09-30 100.640000\n2016-10-01 1714.280000\n2016-10-02 3092.271429\n2016-10-03 4396.732857\n... ...\n2018-08-05 50594.058571\n2018-08-06 50752.515714\n2018-08-07 49333.617143\n2018-08-08 47810.464286\n2018-08-09 47020.132857\n2018-08-10 44165.345714\n2018-08-11 43190.630000\n2018-08-12 43333.222857\n2018-08-13 42687.858571\n2018-08-14 42409.815714\n2018-08-15 42143.011429\n2018-08-16 41175.404286\n2018-08-17 40314.684286\n2018-08-18 37366.015714\n2018-08-19 33352.295714\n2018-08-20 30112.760000\n2018-08-21 26351.960000\n2018-08-22 23929.054286\n2018-08-23 20981.434286\n2018-08-24 16161.397143\n2018-08-25 12345.758571\n2018-08-26 9131.485714\n2018-08-27 6339.977143\n2018-08-28 4723.778571\n2018-08-29 3046.955714\n2018-08-30 1713.647143\n2018-08-31 864.340000\n2018-09-01 NaN\n2018-09-02 NaN\n2018-09-03 NaN\n\n[730 rows x 1 columns]\n Total Price\ndate \n2016-09-04 -3927.467742\n2016-09-05 3275.542549\n2016-09-06 2614.725171\n2016-09-07 1959.080824\n2016-09-08 852.557087\n2016-09-09 182.689423\n2016-09-10 -4957.127312\n2016-09-11 -3927.467742\n2016-09-12 3275.542549\n2016-09-13 2614.725171\n2016-09-14 1959.080824\n2016-09-15 852.557087\n2016-09-16 182.689423\n2016-09-17 -4957.127312\n2016-09-18 -3927.467742\n2016-09-19 3275.542549\n2016-09-20 2614.725171\n2016-09-21 1959.080824\n2016-09-22 852.557087\n2016-09-23 182.689423\n2016-09-24 -4957.127312\n2016-09-25 -3927.467742\n2016-09-26 3275.542549\n2016-09-27 2614.725171\n2016-09-28 1959.080824\n2016-09-29 852.557087\n2016-09-30 182.689423\n2016-10-01 -4957.127312\n2016-10-02 -3927.467742\n2016-10-03 3275.542549\n... ...\n2018-08-05 -3927.467742\n2018-08-06 3275.542549\n2018-08-07 2614.725171\n2018-08-08 1959.080824\n2018-08-09 852.557087\n2018-08-10 182.689423\n2018-08-11 -4957.127312\n2018-08-12 -3927.467742\n2018-08-13 3275.542549\n2018-08-14 2614.725171\n2018-08-15 1959.080824\n2018-08-16 852.557087\n2018-08-17 182.689423\n2018-08-18 -4957.127312\n2018-08-19 -3927.467742\n2018-08-20 3275.542549\n2018-08-21 2614.725171\n2018-08-22 1959.080824\n2018-08-23 852.557087\n2018-08-24 182.689423\n2018-08-25 -4957.127312\n2018-08-26 -3927.467742\n2018-08-27 3275.542549\n2018-08-28 2614.725171\n2018-08-29 1959.080824\n2018-08-30 852.557087\n2018-08-31 182.689423\n2018-09-01 -4957.127312\n2018-09-02 -3927.467742\n2018-09-03 3275.542549\n\n[730 rows x 1 columns]\n Total Price\ndate \n2016-09-04 NaN\n2016-09-05 NaN\n2016-09-06 NaN\n2016-09-07 -1989.265109\n2016-09-08 -863.279944\n2016-09-09 -182.689423\n2016-09-10 4957.127312\n2016-09-11 3927.467742\n2016-09-12 -3296.036835\n2016-09-13 -2635.219457\n2016-09-14 -1979.575109\n2016-09-15 -729.591373\n2016-09-16 -203.183708\n2016-09-17 4936.633026\n2016-09-18 3906.973456\n2016-09-19 -3275.542549\n2016-09-20 -2614.725171\n2016-09-21 -1959.080824\n2016-09-22 -852.557087\n2016-09-23 -182.689423\n2016-09-24 4957.127312\n2016-09-25 3927.467742\n2016-09-26 -3275.542549\n2016-09-27 -2614.725171\n2016-09-28 -1959.080824\n2016-09-29 -868.177087\n2016-09-30 -283.329423\n2016-10-01 3242.847312\n2016-10-02 944.536313\n2016-10-03 -7077.135407\n... ...\n2018-08-05 -4394.520830\n2018-08-06 11832.361736\n2018-08-07 6346.037686\n2018-08-08 1590.954891\n2018-08-09 -1137.829944\n2018-08-10 -4762.745137\n2018-08-11 -7667.772688\n2018-08-12 -2666.005115\n2018-08-13 -86.491121\n2018-08-14 6446.829115\n2018-08-15 8256.557748\n2018-08-16 189.348627\n2018-08-17 -2858.383708\n2018-08-18 -3710.788402\n2018-08-19 541.672028\n2018-08-20 6463.567451\n2018-08-21 1864.004829\n2018-08-22 -1625.525109\n2018-08-23 -2293.431373\n2018-08-24 -5030.696565\n2018-08-25 4349.128740\n2018-08-26 4129.142028\n2018-08-27 -3503.909692\n2018-08-28 -3217.283742\n2018-08-29 -3243.336538\n2018-08-30 -2566.204230\n2018-08-31 -1047.029423\n2018-09-01 NaN\n2018-09-02 NaN\n2018-09-03 NaN\n\n[730 rows x 1 columns]\n" ], [ "#Find out outliers\nsns.boxplot(x=data['Total Price'],orient='v')", "_____no_output_____" ] ], [ [ "**Z score denotes how many standerd deviation away your sample is from the mean. Hence we remove all samples which are 3 std. deviations away from mean**", "_____no_output_____" ] ], [ [ "#Calculate Z score for all samples\nz = np.abs(stats.zscore(data))\n", "_____no_output_____" ], [ "#Locate outliers\noutliers = data[(z > 3).all(axis=1)]\noutliers", "_____no_output_____" ], [ "#Replace outliers by median value\nmedian = data[(z < 3).all(axis=1)].median()\ndata.loc[data['Total Price'] > 71858, 'Total Price'] = np.nan\ndata.fillna(median,inplace=True)", "_____no_output_____" ], [ "median", "_____no_output_____" ], [ "#Plot data again\nrcParams['figure.figsize'] = 20, 5\ndata.plot()", "_____no_output_____" ] ], [ [ "**Below we can see time series clearly, there is exponential growth in trend at start but linear towards the end. \nSeasonality is not increasing exponentialy, rather it's constant. Hece we can say that our time serie is additive.**", "_____no_output_____" ] ], [ [ "#Plot the data\nrcParams['figure.figsize'] = 20, 10\nresult = sm.tsa.seasonal_decompose(data, model='additive')\nresult.plot()\nplt.show()", "_____no_output_____" ], [ "#Train and test data\ntrain=data[0:-100] \ntest=data[-100:]", "_____no_output_____" ], [ "y_hat = test.copy()", "_____no_output_____" ] ], [ [ "### 1. Moving Average: \nIn this method, we use the mean of the previous data. Using the prices of the initial period would highly affect the forecast for the next period. Therefore, we will take the average of the prices for last few recent time periods only. \nSuch forecasting technique which uses window of time period for calculating the average is called Moving Average technique. Calculation of the moving average involves what is sometimes called a “sliding window” of size n.", "_____no_output_____" ] ], [ [ "#Calculate MA: use last 50 data points\nrcParams['figure.figsize'] = 17, 5\ny_hat['moving_avg_forecast'] = train['Total Price'].rolling(50).mean().iloc[-1]\nplt.plot(train['Total Price'], label='Train')\nplt.plot(test['Total Price'], label='Test')\nplt.plot(y_hat['moving_avg_forecast'], label='Moving Average Forecast')\nplt.legend(loc='best')\nplt.show()", "_____no_output_____" ], [ "#Calculate rmse\nrmse = sqrt(mean_squared_error(test['Total Price'], y_hat['moving_avg_forecast']))\nprint(rms)", "15457.439606224916\n" ], [ "#Calculate MAE\nmae = mean_absolute_error(test['Total Price'], y_hat['moving_avg_forecast'])\nprint(mae)", "11707.529420000003\n" ] ], [ [ "### Method 2 : Simple Exponential Smoothing\nThis method takes into account all the data while weighing the data points differently. For example it may be sensible to attach larger weights to more recent observations than to observations from the distant past. The technique which works on this principle is called Simple exponential smoothing. \n\nForecasts are calculated using weighted averages where the weights decrease exponentially as observations come from further in the past, the smallest weights are associated with the oldest observations:", "_____no_output_____" ] ], [ [ "#Fit the mosel\nfit1 = SimpleExpSmoothing(train).fit()\ny_hat['SES'] = fit1.forecast(len(test)).rename(r'$\\alpha=%s$'%fit1.model.params['smoothing_level'])\nalpha = fit1.model.params['smoothing_level']", "C:\\Users\\Snigdha\\AppData\\Local\\conda\\conda\\envs\\neuralnets\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:171: ValueWarning: No frequency information was provided, so inferred frequency D will be used.\n % freq, ValueWarning)\n" ] ], [ [ "where 0≤ α ≤1 is the smoothing parameter.\n\nThe one-step-ahead forecast for time T+1 is a weighted average of all the observations in the series y1,…,yT. The rate at which the weights decrease is controlled by the parameter α.", "_____no_output_____" ] ], [ [ "alpha", "_____no_output_____" ], [ "#Plot the data\nrcParams['figure.figsize'] = 17, 5\nplt.plot(train['Total Price'], label='Train')\nplt.plot(test['Total Price'], label='Test')\nplt.plot(y_hat['SES'], label='SES')\nplt.legend(loc='best')\nplt.show()", "_____no_output_____" ], [ "#Calculate rmse\nrmse = sqrt(mean_squared_error(test['Total Price'], y_hat.SES))\nprint(rmse)", "17402.473134366002\n" ], [ "#Calculate mae\nmae = mean_absolute_error(test['Total Price'], y_hat.SES)\nprint(mae)", "14885.45052998217\n" ] ], [ [ "### Method 3 – Holt’s Linear Trend method\n\nIf we use any of the above methods, it won’t take into account this trend. Trend is the general pattern of prices that we observe over a period of time. In this case we can see that there is an increasing trend.\nHence we use Holt’s Linear Trend method that can map the trend accurately without any assumptions.", "_____no_output_____" ] ], [ [ "#Holt-Linear model\nfit2 = Holt(np.asarray(train['Total Price'])).fit()\ny_hat['Holt_linear'] = fit2.forecast(len(test))\nprint(\"Smooting level\", fit2.model.params['smoothing_level'])\nprint(\"Smoothing slope\",fit2.model.params['smoothing_slope'])\n#Plot the result\nrcParams['figure.figsize'] = 17, 5\nplt.plot(train['Total Price'], label='Train')\nplt.plot(test['Total Price'], label='Test')\nplt.plot(y_hat['Holt_linear'], label='Holt_linear')\nplt.legend(loc='best')\nplt.show()", "Smooting level 0.31856815407501854\nSmoothing slope 0.0\n" ], [ "#Calculate rmse\nrmse = sqrt(mean_squared_error(test['Total Price'], y_hat.Holt_linear))\nprint(rmse)", "16556.50211081757\n" ], [ "#Calculate mae\nmae = mean_absolute_error(test['Total Price'], y_hat.Holt_linear)\nprint(mae)", "14058.411166558843\n" ] ], [ [ "If we observe closely, there are spikes in sales in middle of the month.", "_____no_output_____" ] ], [ [ "data.tail(100).plot()", "_____no_output_____" ] ], [ [ "### Method 4 : Holt-Winters Method\n\nDatasets which show a similar set of pattern after fixed intervals of a time period have from seasonality.\nHence we need a method that takes into account both trend and seasonality to forecast future prices. \nOne such algorithm that we can use in such a scenario is Holt’s Winter method. The idea behind triple exponential smoothing(Holt’s Winter) is to apply exponential smoothing to the seasonal components in addition to level and trend.", "_____no_output_____" ] ], [ [ "#Fit model\nfit3 = ExponentialSmoothing(np.asarray(train['Total Price']) ,seasonal_periods= 30, trend='add', seasonal='add').fit()\ny_hat['Holt_Winter'] = fit3.forecast(len(test))\n#Plot the data\nrcParams['figure.figsize'] = 17, 5\nplt.plot( train['Total Price'], label='Train')\nplt.plot(test['Total Price'], label='Test')\nplt.plot(y_hat['Holt_Winter'], label='Holt_Winter')\nplt.legend(loc='best')\nplt.show()", "_____no_output_____" ], [ "#Calculate rmse\nrmse = sqrt(mean_squared_error(test['Total Price'], y_hat.Holt_Winter))\nprint(rms)", "15457.439606224916\n" ], [ "#Calculate mae\nmae = mean_absolute_error(test['Total Price'], y_hat.Holt_Winter)\nprint(mae)", "14214.30484223988\n" ] ], [ [ "### Conclusion:\n\n | Method | RMSE | MAE |\n | --- | --- | --- |\n |Moving Average | 15457.43 | 11707.52 |\n |Simple Exponential Smoohing | 17402.47 | 14885.45 |\n |Holt Linear | 16556.50 | 14058.41 |\n |Holt Winters | 15457.43 | 14214.30 | \n \nBy comparing above methods, we get good results from Moving average model as both RMSE and MAE is smaller compared to others.\nNext best model would be Holt Winters. We can conclude that our data does not have good seasonality, trend and is mostly dependent on previous values.\nHowever, we already implemented a method using LSTM to use previous values to predict sales.\nSo far these problems are noticed:\n 1. This dataset ranges from Sepember 2016 to September 2018.\n 2. This is small time range, furthermore, data does not show any seasonality. E.g. In 2017 November sales have spiked but there is no data for November and December in 2016.\n 3. From 2016 to 2017, sales increased exponentially, but after 2017 growth is linear.\n 4. Data points are not consistant: Some days/ weeks/ months don't have any sales\n \nSo we have 2 conclusions:\n1. Future values are highly dependent on previous values\n2. Sales are random or time series is not stationary.\n\nA solution to this problem will be discussed in Approach 3:Use Defferencing method with LSTM", "_____no_output_____" ], [ "**This notebook is MIT liscenced(Added in github repository) https://opensource.org/licenses/MIT <br>\nReferance: https://machinelearningmastery.com/decompose-time-series-data-trend-seasonality/**", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
e7d11b0d163273e1a946e6ae21733a8e2205a5d3
17,430
ipynb
Jupyter Notebook
archive/4-cliques-triangles-structures-instructor.ipynb
ChrisKeefe/Network-Analysis-Made-Simple
98644f0d03aa3c1ece4aa2d4147835fa10a0fcf8
[ "MIT" ]
1
2017-08-19T15:03:49.000Z
2017-08-19T15:03:49.000Z
archive/4-cliques-triangles-structures-instructor.ipynb
a1ip/Network-Analysis-Made-Simple
7404c35cab8cdc9c119961ba33baef0398a20adc
[ "MIT" ]
null
null
null
archive/4-cliques-triangles-structures-instructor.ipynb
a1ip/Network-Analysis-Made-Simple
7404c35cab8cdc9c119961ba33baef0398a20adc
[ "MIT" ]
2
2022-02-09T15:41:33.000Z
2022-02-11T07:47:40.000Z
27.888
518
0.582215
[ [ [ "import networkx as nx\nimport matplotlib.pyplot as plt\nimport warnings\nfrom custom import load_data as cf\nfrom itertools import combinations\n\nwarnings.filterwarnings('ignore')\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ] ], [ [ "# Load Data\n\nAs usual, let's start by loading some network data. This time round, we have a [physician trust](http://konect.uni-koblenz.de/networks/moreno_innovation) network, but slightly modified such that it is undirected rather than directed.\n\n> This directed network captures innovation spread among 246 physicians in for towns in Illinois, Peoria, Bloomington, Quincy and Galesburg. The data was collected in 1966. A node represents a physician and an edge between two physicians shows that the left physician told that the righ physician is his friend or that he turns to the right physician if he needs advice or is interested in a discussion. There always only exists one edge between two nodes even if more than one of the listed conditions are true.", "_____no_output_____" ] ], [ [ "# Load the network. This network, while in reality is a directed graph,\n# is intentionally converted to an undirected one for simplification.\nG = cf.load_physicians_network()", "_____no_output_____" ], [ "# Make a Circos plot of the graph\nfrom nxviz import CircosPlot\n\nc = CircosPlot(G)\nc.draw()", "_____no_output_____" ] ], [ [ "## Question\n\nWhat can you infer about the structure of the graph from the Circos plot?", "_____no_output_____" ], [ "My answer: The structure is interesting. The graph looks like the physician trust network is comprised of discrete subnetworks.", "_____no_output_____" ], [ "# Structures in a Graph\n\nWe can leverage what we have learned in the previous notebook to identify special structures in a graph. \n\nIn a network, cliques are one of these special structures.", "_____no_output_____" ], [ "# Cliques\n\nIn a social network, cliques are groups of people in which everybody knows everybody. \n\n**Questions:**\n1. What is the simplest clique?\n1. What is the simplest complex clique?\n\nLet's try implementing a simple algorithm that finds out whether a node is present in a simple complex clique.", "_____no_output_____" ] ], [ [ "# Example code.\ndef in_triangle(G, node):\n \"\"\"\n Returns whether a given node is present in a triangle relationship or not.\n \"\"\" \n # Then, iterate over every pair of the node's neighbors.\n for nbr1, nbr2 in combinations(G.neighbors(node), 2):\n # Check to see if there is an edge between the node's neighbors.\n # If there is an edge, then the given node is present in a triangle.\n if G.has_edge(nbr1, nbr2):\n # We return because any triangle that is present automatically \n # satisfies the problem requirements.\n return True\n return False\n\nin_triangle(G, 3)", "_____no_output_____" ] ], [ [ "In reality, NetworkX already has a function that *counts* the number of triangles that any given node is involved in. This is probably more useful than knowing whether a node is present in a triangle or not, but the above code was simply for practice.", "_____no_output_____" ] ], [ [ "nx.triangles(G, 3)", "_____no_output_____" ] ], [ [ "## Exercise\n\nCan you write a function that takes in one node and its associated graph as an input, and returns a list or set of itself + all other nodes that it is in a triangle relationship with? Do not return the triplets, but the `set`/`list` of nodes. (5 min.)\n\n**Possible Implementation:** If I check every pair of my neighbors, any pair that are also connected in the graph are in a triangle relationship with me.\n\nHint: Python's [`itertools`](https://docs.python.org/3/library/itertools.html) module has a `combinations` function that may be useful.\n\nHint: NetworkX graphs have a `.has_edge(node1, node2)` function that checks whether an edge exists between two nodes.\n\nVerify your answer by drawing out the subgraph composed of those nodes.", "_____no_output_____" ] ], [ [ "# Possible answer\ndef get_triangles(G, node):\n neighbors1 = set(G.neighbors(node))\n triangle_nodes = set()\n triangle_nodes.add(node)\n \"\"\"\n Fill in the rest of the code below.\n \"\"\"\n for nbr1, nbr2 in combinations(neighbors1, 2):\n if G.has_edge(nbr1, nbr2):\n triangle_nodes.add(nbr1)\n triangle_nodes.add(nbr2)\n return triangle_nodes\n\n# Verify your answer with the following funciton call. Should return something of the form:\n# {3, 9, 11, 41, 42, 67}\nget_triangles(G, 3)", "_____no_output_____" ], [ "# Then, draw out those nodes.\nnx.draw(G.subgraph(get_triangles(G, 3)), with_labels=True)", "_____no_output_____" ], [ "# Compare for yourself that those are the only triangles that node 3 is involved in.\nneighbors3 = list(G.neighbors(3))\nneighbors3.append(3)\nnx.draw(G.subgraph(neighbors3), with_labels=True)", "_____no_output_____" ] ], [ [ "# Friend Recommendation: Open Triangles\n\nNow that we have some code that identifies closed triangles, we might want to see if we can do some friend recommendations by looking for open triangles.\n\nOpen triangles are like those that we described earlier on - A knows B and B knows C, but C's relationship with A isn't captured in the graph. \n\nWhat are the two general scenarios for finding open triangles that a given node is involved in?\n\n1. The given node is the centre node.\n1. The given node is one of the termini nodes.", "_____no_output_____" ], [ "## Exercise\nCan you write a function that identifies, for a given node, the other two nodes that it is involved with in an open triangle, if there is one? (5 min.)\n\nNote: For this exercise, only consider the case when the node of interest is the centre node.\n\n**Possible Implementation:** Check every pair of my neighbors, and if they are not connected to one another, then we are in an open triangle relationship.", "_____no_output_____" ] ], [ [ "def get_open_triangles(G, node):\n \"\"\"\n There are many ways to represent this. One may choose to represent\n only the nodes involved in an open triangle; this is not the \n approach taken here.\n \n Rather, we have a code that explicitly enumrates every open triangle present.\n \"\"\"\n open_triangle_nodes = []\n neighbors = list(G.neighbors(node))\n \n for n1, n2 in combinations(neighbors, 2):\n if not G.has_edge(n1, n2):\n open_triangle_nodes.append([n1, node, n2])\n \n return open_triangle_nodes", "_____no_output_____" ], [ "# # Uncomment the following code if you want to draw out each of the triplets.\n# nodes = get_open_triangles(G, 2)\n# for i, triplet in enumerate(nodes):\n# fig = plt.figure(i)\n# nx.draw(G.subgraph(triplet), with_labels=True)\nprint(get_open_triangles(G, 3))\nlen(get_open_triangles(G, 3))", "_____no_output_____" ] ], [ [ "Triangle closure is also the core idea behind social networks' friend recommendation systems; of course, it's definitely more complicated than what we've implemented here.", "_____no_output_____" ], [ "# Cliques\n\nWe have figured out how to find triangles. Now, let's find out what **cliques** are present in the network. Recall: what is the definition of a clique?\n\n- NetworkX has a [clique-finding](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.clique.find_cliques.html) algorithm implemented.\n- This algorithm finds all maximally-sized cliques for a given node.\n- Note that maximal cliques of size `n` include all cliques of `size < n`", "_____no_output_____" ] ], [ [ "list(nx.find_cliques(G))[0:20]", "_____no_output_____" ] ], [ [ "## Exercise\n\nTry writing a function `maximal_cliques_of_size(size, G)` that implements a search for all maximal cliques of a given size. (3 min.)", "_____no_output_____" ] ], [ [ "def maximal_cliqes_of_size(size, G):\n # Defensive programming check.\n assert isinstance(size, int), \"size has to be an integer\"\n assert size >= 2, \"cliques are of size 2 or greater.\"\n \n return [i for i in list(nx.find_cliques(G)) if len(i) == size]\n\nmaximal_cliqes_of_size(2, G)[0:20]", "_____no_output_____" ] ], [ [ "# Connected Components\n\nFrom [Wikipedia](https://en.wikipedia.org/wiki/Connected_component_%28graph_theory%29):\n\n> In graph theory, a connected component (or just component) of an undirected graph is a subgraph in which any two vertices are connected to each other by paths, and which is connected to no additional vertices in the supergraph.\n\nNetworkX also implements a [function](https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.components.connected.connected_component_subgraphs.html) that identifies connected component subgraphs.\n\nRemember how based on the Circos plot above, we had this hypothesis that the physician trust network may be divided into subgraphs. Let's check that, and see if we can redraw the Circos visualization.", "_____no_output_____" ] ], [ [ "ccsubgraph_nodes = list(nx.connected_components(G))\nccsubgraph_nodes", "_____no_output_____" ] ], [ [ "## Exercise\n\nDraw a circos plot of the graph, but now colour and order the nodes by their connected component subgraph. (5 min.)\n\nRecall Circos API:\n\n```python\nc = CircosPlot(G, node_order='...', node_color='...')\nc.draw()\nplt.show() # or plt.savefig(...)\n```", "_____no_output_____" ] ], [ [ "# Start by labelling each node in the master graph G by some number\n# that represents the subgraph that contains the node.\nfor i, nodeset in enumerate(ccsubgraph_nodes):\n for n in nodeset:\n G.nodes[n]['subgraph'] = i", "_____no_output_____" ], [ "c = CircosPlot(G, node_color='subgraph', node_order='subgraph')\nc.draw()\nplt.savefig('images/physicians.png', dpi=300)", "_____no_output_____" ] ], [ [ "And \"admire\" the division of the US congress over the years...", "_____no_output_____" ], [ "![Congress Voting Patterns](https://img.washingtonpost.com/wp-apps/imrs.php?src=https://img.washingtonpost.com/blogs/wonkblog/files/2015/04/journal.pone_.0123507.g002.png&w=1484)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
e7d1208c118891401d5fdb483eed78ddcdc428a7
507,171
ipynb
Jupyter Notebook
notebooks/atmos-land-ocean-handling.ipynb
lewisjared/netcdf-scm
2c433f12f126639513deeec91c338c0f2ebbcc70
[ "BSD-2-Clause" ]
null
null
null
notebooks/atmos-land-ocean-handling.ipynb
lewisjared/netcdf-scm
2c433f12f126639513deeec91c338c0f2ebbcc70
[ "BSD-2-Clause" ]
2
2019-05-31T01:07:36.000Z
2019-06-04T03:53:10.000Z
notebooks/atmos-land-ocean-handling.ipynb
lewisjared/netcdf-scm
2c433f12f126639513deeec91c338c0f2ebbcc70
[ "BSD-2-Clause" ]
null
null
null
341.989885
111,452
0.923105
[ [ [ "# Atmospheric, oceanic and land data handling\n\nIn this notebook we discuss the subtleties of how NetCDF-SCM handles different data 'realms' and why these choices are made. The realms of intereset to date are atmosphere, ocean and land and the distinction between the realms follows the [CMIP6 realm controlled vocabulary](https://github.com/WCRP-CMIP/CMIP6_CVs/blob/master/CMIP6_realm.json).", "_____no_output_____" ] ], [ [ "import traceback\nfrom os.path import join\n\nimport iris\nimport iris.quickplot as qplt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom netcdf_scm.iris_cube_wrappers import CMIP6OutputCube\nfrom netcdf_scm.utils import broadcast_onto_lat_lon_grid", "_____no_output_____" ], [ "from pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nplt.style.use(\"bmh\")", "_____no_output_____" ], [ "import logging\nroot_logger = logging.getLogger()\nroot_logger.setLevel(logging.WARNING)\nroot_logger.addHandler(logging.StreamHandler())", "_____no_output_____" ], [ "DATA_PATH_TEST = join(\"..\", \"tests\", \"test-data\")", "_____no_output_____" ] ], [ [ "Note that all of our data is on a regular grid, data on native model grids does not plot as nicely.", "_____no_output_____" ] ], [ [ "tas_file = join(\n DATA_PATH_TEST,\n \"cmip6output\",\n \"CMIP6\",\n \"CMIP\",\n \"IPSL\",\n \"IPSL-CM6A-LR\",\n \"historical\",\n \"r1i1p1f1\",\n \"Amon\",\n \"tas\",\n \"gr\",\n \"v20180803\",\n \"tas_Amon_IPSL-CM6A-LR_historical_r1i1p1f1_gr_191001-191003.nc\"\n)\n\ngpp_file = tas_file.replace(\n \"Amon\", \"Lmon\"\n).replace(\n \"tas\", \"gpp\"\n)\ncsoilfast_file = gpp_file.replace(\"gpp\", \"cSoilFast\")\n\nhfds_file = join(\n DATA_PATH_TEST,\n \"cmip6output\",\n \"CMIP6\",\n \"CMIP\",\n \"NOAA-GFDL\",\n \"GFDL-CM4\",\n \"piControl\",\n \"r1i1p1f1\",\n \"Omon\",\n \"hfds\",\n \"gr\",\n \"v20180701\",\n \"hfds_Omon_GFDL-CM4_piControl_r1i1p1f1_gr_015101-015103.nc\"\n)", "_____no_output_____" ] ], [ [ "## Oceans\n\nWe start by loading our data.", "_____no_output_____" ] ], [ [ "hfds = CMIP6OutputCube()\nhfds.load_data_from_path(hfds_file)", "Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\nUsing or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n" ] ], [ [ "NetCDF-SCM will assume whether the data is \"ocean\", \"land\" or \"atmosphere\". The assumed realm can be checked by examining a `SCMCube`'s `netcdf_scm_realm` property. \n\nIn our case we have \"ocean\" data.", "_____no_output_____" ] ], [ [ "hfds.netcdf_scm_realm", "_____no_output_____" ] ], [ [ "If we have ocean data, then there is no data which will go in a \"land\" box. Hence, if we request e.g. `World|Land` data, an error will be raised.", "_____no_output_____" ] ], [ [ "try:\n hfds.get_scm_timeseries(regions=[\"World\", \"World|Land\"])\nexcept ValueError as e:\n traceback.print_exc(limit=0, chain=False)", "Traceback (most recent call last):\nValueError: All weights are zero for region: `World|Land`\n" ] ], [ [ "As there is no land data, the `World` mean is equal to the `World|Ocean` mean.", "_____no_output_____" ] ], [ [ "hfds_scm_ts = hfds.get_scm_timeseries(\n regions=[\"World\", \"World|Ocean\"]\n)\nhfds_scm_ts.line_plot(linestyle=\"region\")\nnp.testing.assert_allclose(\n hfds_scm_ts.filter(region=\"World\").values,\n hfds_scm_ts.filter(region=\"World|Ocean\").values,\n);", "Not calculating land fractions as all required cubes are not available\nPerforming lazy conversion to datetime for calendar: 365_day. This may cause subtle errors in operations that depend on the length of time between dates\nUsing or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n" ] ], [ [ "When taking averages, there are 3 obvious options:\n\n- unweighted average\n- area weighted average\n- area and surface fraction weighted average\n\nIn NetCDF-SCM, we always go for the third type in order to make sure that our weights are both area weighted and take into account how much each cell represents the SCM box of interest.\n\nIn the cells below, we show the difference this choice makes.", "_____no_output_____" ] ], [ [ "def compare_weighting_options(input_scm_cube):\n unweighted_mean = input_scm_cube.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN\n )\n \n area_cell = input_scm_cube.get_metadata_cube(\n input_scm_cube.areacell_var\n ).cube\n\n area_weights = broadcast_onto_lat_lon_grid(\n input_scm_cube, \n area_cell.data\n )\n area_weighted_mean= input_scm_cube.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=area_weights\n )\n \n surface_frac = input_scm_cube.get_metadata_cube(\n input_scm_cube.surface_fraction_var\n ).cube\n\n \n area_sf = area_cell * surface_frac\n area_sf_weights = broadcast_onto_lat_lon_grid(\n input_scm_cube, \n area_sf.data\n )\n area_sf_weighted_mean = input_scm_cube.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=area_sf_weights\n )\n \n plt.figure(figsize=(8, 4.5))\n qplt.plot(unweighted_mean, label=\"unweighted\")\n qplt.plot(area_weighted_mean, label=\"area weighted\")\n qplt.plot(\n area_sf_weighted_mean, \n label=\"area-surface fraction weighted\", \n linestyle=\"--\",\n dashes=(10, 10),\n linewidth=4\n )\n\n plt.legend();", "_____no_output_____" ], [ "compare_weighting_options(hfds)", "Collapsing spatial coordinate 'latitude' without weighting\n" ] ], [ [ "We go to the trouble of taking these area-surface fraction weightings because they matter. In particular, the area weight is required to not overweight the poles (on whatever grid we're working) whilst the surface fraction ensures that the cells' contribution to the averages reflects how much they belong in a given 'SCM box'.", "_____no_output_____" ], [ "### More detail\n\nWe can check which variable is being used for the cell areas by loooking at `SCMCube.areacell_var`. For ocean data this is `areacello`.", "_____no_output_____" ] ], [ [ "hfds.areacell_var", "_____no_output_____" ], [ "hfds_area_cell = hfds.get_metadata_cube(hfds.areacell_var).cube\nqplt.contourf(\n hfds_area_cell,\n);", "_____no_output_____" ] ], [ [ "We can check which variable is being used for the surface fraction by loooking at `SCMCube.surface_fraction_var`. For ocean data this is `sftof`.", "_____no_output_____" ] ], [ [ "hfds.surface_fraction_var", "_____no_output_____" ], [ "hfds_surface_frac = hfds.get_metadata_cube(hfds.surface_fraction_var).cube\nqplt.contourf(\n hfds_surface_frac,\n);", "_____no_output_____" ] ], [ [ "The product of the area of the cells and the surface fraction gives us the area-surface fraction weights.", "_____no_output_____" ] ], [ [ "hfds_area_sf = hfds_area_cell * hfds_surface_frac\n\nplt.figure(figsize=(16, 9))\nplt.subplot(121)\nqplt.contourf(\n hfds_area_sf,\n)\n\nplt.subplot(122)\nlat_con = iris.Constraint(latitude=lambda cell: -50 < cell < -20)\nlon_con = iris.Constraint(longitude=lambda cell: 140 < cell < 160)\nqplt.contourf(\n hfds_area_sf.extract(lat_con & lon_con),\n);", "_____no_output_____" ] ], [ [ "The timeseries calculated by NetCDF-SCM is the same as the timeseries calculated using the surface fraction and area weights.", "_____no_output_____" ] ], [ [ "hfds_area_sf_weights = broadcast_onto_lat_lon_grid(\n hfds, \n hfds_area_sf.data\n)\nhfds_area_sf_weighted_mean = hfds.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=hfds_area_sf_weights\n)\n\nnetcdf_scm_calculated = hfds.get_scm_timeseries(\n regions=[\"World\"]\n).timeseries()\n\nnp.testing.assert_allclose(\n hfds_area_sf_weighted_mean.data,\n netcdf_scm_calculated.values.squeeze()\n)\n\nnetcdf_scm_calculated.T", "Not calculating land fractions as all required cubes are not available\nPerforming lazy conversion to datetime for calendar: 365_day. This may cause subtle errors in operations that depend on the length of time between dates\n" ] ], [ [ "## Land\n\nNext we look at land data.", "_____no_output_____" ] ], [ [ "gpp = CMIP6OutputCube()\ngpp.load_data_from_path(gpp_file)\n\ncsoilfast = CMIP6OutputCube()\ncsoilfast.load_data_from_path(csoilfast_file)", "_____no_output_____" ], [ "gpp.netcdf_scm_realm", "_____no_output_____" ], [ "csoilfast.netcdf_scm_realm", "_____no_output_____" ] ], [ [ "If we have land data, then there is no data which will go in a \"ocean\" box. Hence, if we request e.g. `World|Ocean` data, an error will be raised.", "_____no_output_____" ] ], [ [ "try:\n gpp.get_scm_timeseries(regions=[\"World\", \"World|Ocean\"])\nexcept ValueError as e:\n traceback.print_exc(limit=0, chain=False)", "Traceback (most recent call last):\nValueError: All weights are zero for region: `World|Ocean`\n" ] ], [ [ "As there is no ocean data, the `World` mean is equal to the `World|Land` mean.", "_____no_output_____" ] ], [ [ "gpp_scm_ts = gpp.get_scm_timeseries(\n regions=[\"World\", \"World|Land\"]\n)\ngpp_scm_ts.line_plot(linestyle=\"region\")\nnp.testing.assert_allclose(\n gpp_scm_ts.filter(region=\"World\").values,\n gpp_scm_ts.filter(region=\"World|Land\").values,\n);", "Collapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'latitude'.\nCollapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'longitude'.\nNot calculating land fractions as all required cubes are not available\n" ], [ "compare_weighting_options(gpp)", "Collapsing spatial coordinate 'latitude' without weighting\n" ], [ "compare_weighting_options(csoilfast)", "Collapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'latitude'.\nCollapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'longitude'.\n" ] ], [ [ "## Atmosphere\n\nFinally we look at atmospheric data.", "_____no_output_____" ] ], [ [ "tas = CMIP6OutputCube()\ntas.load_data_from_path(tas_file)", "_____no_output_____" ], [ "tas.netcdf_scm_realm", "_____no_output_____" ] ], [ [ "If we have atmosphere data, then we have global coverage and so can split data into both the land and ocean boxes.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(16, 14))\nax1 = fig.add_subplot(311)\ntas.get_scm_timeseries(\n regions=[\n \"World\",\n \"World|Land\",\n \"World|Ocean\",\n \"World|Northern Hemisphere\",\n \"World|Southern Hemisphere\",\n ]\n).line_plot(color=\"region\", ax=ax1)\n\nax2 = fig.add_subplot(312, sharey=ax1, sharex=ax1)\ntas.get_scm_timeseries(\n regions=[\n \"World\",\n \"World|Northern Hemisphere|Land\",\n \"World|Southern Hemisphere|Land\",\n \"World|Northern Hemisphere|Ocean\",\n \"World|Southern Hemisphere|Ocean\",\n ]\n).line_plot(color=\"region\", ax=ax2)\n\nax3 = fig.add_subplot(313, sharey=ax1, sharex=ax1)\ntas.get_scm_timeseries(\n regions=[\n \"World\",\n \"World|Ocean\",\n \"World|North Atlantic Ocean\",\n \"World|El Nino N3.4\",\n ]\n).line_plot(color=\"region\", ax=ax3);", "Collapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'latitude'.\nCollapsing a non-contiguous coordinate. Metadata may not be fully descriptive for 'longitude'.\nNot calculating land fractions as all required cubes are not available\nNot calculating land fractions as all required cubes are not available\nNot calculating land fractions as all required cubes are not available\n" ], [ "compare_weighting_options(tas)", "Collapsing spatial coordinate 'latitude' without weighting\n" ] ], [ [ "As our data is global, the \"World\" data is simply an area-weighted mean.", "_____no_output_____" ] ], [ [ "tas_area = tas.get_metadata_cube(\n tas.areacell_var\n).cube\n\ntas_area_weights = broadcast_onto_lat_lon_grid(\n tas, \n tas_area.data\n)\ntas_area_weighted_mean = tas.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=tas_area_weights\n)\n\nnetcdf_scm_calculated = tas.get_scm_timeseries(\n regions=[\"World\"]\n).timeseries()\n\nnp.testing.assert_allclose(\n tas_area_weighted_mean.data,\n netcdf_scm_calculated.values.squeeze()\n)\n\nnetcdf_scm_calculated.T", "Not calculating land fractions as all required cubes are not available\n" ] ], [ [ "The \"World|Land\" data is surface fraction weighted.", "_____no_output_____" ] ], [ [ "tas_sf = tas.get_metadata_cube(\n tas.surface_fraction_var\n).cube\n\ntas_area_sf = tas_area * tas_sf\n\ntas_area_sf_weights = broadcast_onto_lat_lon_grid(\n tas, \n tas_area_sf.data\n)\ntas_area_sf_weighted_mean = tas.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=tas_area_sf_weights\n)\n\nnetcdf_scm_calculated = tas.get_scm_timeseries(\n regions=[\"World|Land\"]\n).timeseries()\n\nnp.testing.assert_allclose(\n tas_area_sf_weighted_mean.data,\n netcdf_scm_calculated.values.squeeze()\n)\n\nnetcdf_scm_calculated.T", "Not calculating land fractions as all required cubes are not available\n" ] ], [ [ "The \"World|Ocean\" data is also surface fraction weighted (calculated as 100 minus land surface fraction).", "_____no_output_____" ] ], [ [ "tas_sf_ocean = tas.get_metadata_cube(\n tas.surface_fraction_var\n).cube\ntas_sf_ocean.data = 100 - tas_sf_ocean.data\n\ntas_area_sf_ocean = tas_area * tas_sf_ocean\n\ntas_area_sf_ocean_weights = broadcast_onto_lat_lon_grid(\n tas, \n tas_area_sf_ocean.data\n)\ntas_area_sf_ocean_weighted_mean = tas.cube.collapsed(\n [\"latitude\", \"longitude\"],\n iris.analysis.MEAN,\n weights=tas_area_sf_ocean_weights\n)\n\nnetcdf_scm_calculated = tas.get_scm_timeseries(\n regions=[\"World|Ocean\"]\n).timeseries()\n\nnp.testing.assert_allclose(\n tas_area_sf_ocean_weighted_mean.data,\n netcdf_scm_calculated.values.squeeze()\n)\n\nnetcdf_scm_calculated.T", "Not calculating land fractions as all required cubes are not available\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d129715c3f4fb16e9366c6f5a9323f88407c21
122,770
ipynb
Jupyter Notebook
Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
NTForked-ML/Deep-Learning-Machine-Learning-Stock
8a137972d967423c7102a33ba639bd0d5d21a0e9
[ "MIT" ]
null
null
null
Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
NTForked-ML/Deep-Learning-Machine-Learning-Stock
8a137972d967423c7102a33ba639bd0d5d21a0e9
[ "MIT" ]
1
2022-02-10T23:30:36.000Z
2022-02-10T23:30:36.000Z
Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
ysdede/Deep-Learning-Machine-Learning-Stock
2e3794efab3276b6bc389c8b38615540d4e2b144
[ "MIT" ]
null
null
null
352.787356
75,893
0.706924
[ [ [ "# Decision Tree Classification Part 4", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# yahoo finance is used to fetch data \nimport yfinance as yf\nyf.pdr_override()", "_____no_output_____" ], [ "# input\nsymbol = 'AMD'\nstart = '2014-01-01'\nend = '2019-01-01'\n\n# Read data \ndataset = yf.download(symbol,start,end)\n\n# View Columns\ndataset.head()", "[*********************100%***********************] 1 of 1 completed\n" ], [ "dataset['Open_Close'] = (dataset['Open'] - dataset['Adj Close'])/dataset['Open']\ndataset['High_Low'] = (dataset['High'] - dataset['Low'])/dataset['Low']\ndataset['Increase_Decrease'] = np.where(dataset['Volume'].shift(-1) > dataset['Volume'],1,0)\ndataset['Buy_Sell_on_Open'] = np.where(dataset['Open'].shift(-1) > dataset['Open'],1,0)\ndataset['Buy_Sell'] = np.where(dataset['Adj Close'].shift(-1) > dataset['Adj Close'],1,0)\ndataset['Returns'] = dataset['Adj Close'].pct_change()\ndataset = dataset.dropna()\ndataset.head()", "_____no_output_____" ], [ "X = dataset[['Open', 'High', 'Low', 'Volume', 'Adj Close','Returns']].values\ny = dataset['Buy_Sell'].values", "_____no_output_____" ], [ "#Spilitting the dataset\nremoved =[0,50,100]\nnew_target = np.delete(y,removed)\nnew_data = np.delete(X,removed, axis=0) ", "_____no_output_____" ], [ "from sklearn import tree\n\nclf = tree.DecisionTreeClassifier() \nclf=clf.fit(new_data,new_target) \nprediction = clf.predict(X[removed]) ", "_____no_output_____" ], [ "print(\"Original Labels\",y[removed])\nprint(\"Labels Predicted\",prediction)", "Original Labels [1 1 0]\nLabels Predicted [0 0 1]\n" ], [ "tree.plot_tree(clf) ", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d1349e16614a0d02472e5bb44950b599366a79
25,426
ipynb
Jupyter Notebook
WHUT-MCM2020国奖/Problem_C/.ipynb_checkpoints/prediction-checkpoint.ipynb
775269512/WHUT_CUMCM20
a966c09e46c1789a86d4532f46d503a2226e0a47
[ "MIT" ]
73
2020-09-20T15:39:26.000Z
2022-03-10T23:37:17.000Z
WHUT-MCM2020国奖/Problem_C/.ipynb_checkpoints/prediction-checkpoint.ipynb
ooorange9211/WHUT_CUMCM20
a966c09e46c1789a86d4532f46d503a2226e0a47
[ "MIT" ]
3
2021-09-18T04:43:08.000Z
2021-12-02T08:10:53.000Z
WHUT-MCM2020国奖/Problem_C/.ipynb_checkpoints/prediction-checkpoint.ipynb
ooorange9211/WHUT_CUMCM20
a966c09e46c1789a86d4532f46d503a2226e0a47
[ "MIT" ]
27
2020-09-20T15:39:29.000Z
2022-02-28T12:15:06.000Z
37.446244
183
0.403642
[ [ [ "import pandas as pd\nimport numpy as np\nimport catboost as cbt\nfrom sklearn.metrics import accuracy_score, roc_auc_score,log_loss\nimport gc\nimport math\nimport time\nimport datetime\nfrom sklearn.model_selection import KFold,StratifiedKFold\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\nfrom datetime import datetime,timedelta\nimport warnings\nimport seaborn as sns\nimport os\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.dummy import DummyClassifier\nfrom string import punctuation\nfrom sklearn import svm\nfrom sklearn.feature_extraction import stop_words\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nimport nltk\nfrom nltk import ngrams\nfrom itertools import chain\nfrom wordcloud import WordCloud\nwarnings.filterwarnings('ignore')\npd.options.display.max_columns = None\npd.options.display.max_rows = None\nfrom matplotlib.font_manager import FontProperties\n\nfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14)", "_____no_output_____" ], [ "#数据导入\nfile_path = './Problem_C_Data/hair_dryer.tsv'\nmessages = pd.read_csv(file_path, sep='\\t', header=0)\nmessages.head()", "_____no_output_____" ], [ "messages[\"Sentiment\"] = messages[\"star_rating\"].apply(lambda score: \"positive\" if score > 3 else \"negative\")\nmessages[\"Usefulness\"] = (messages[\"helpful_votes\"]/messages[\"total_votes\"]).apply(lambda n: \"useful\" if n > 0.5 else \"useless\")\n\nmessages.head(5)", "_____no_output_____" ], [ "messages['Summary'] = messages['review_body']\nmessages['temp'] = messages.review_date.apply(lambda x : pd.to_datetime(x))\nmessages = messages.drop(['review_headline','helpful_votes','review_body','review_date','marketplace','customer_id','product_id','product_parent','product_category'], axis=1)\nmessages = messages.set_index('review_id')\nmessages= messages.sort_values(['temp'])\nmessages.head()", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n\nimport re\nimport string\nimport nltk\n\ncleanup_re = re.compile('[^a-z]+')\ndef cleanup(sentence):\n sentence = sentence.lower()\n sentence = cleanup_re.sub(' ', sentence).strip()\n #sentence = \" \".join(nltk.word_tokenize(sentence))\n return sentence\n\n\nmessages[\"Summary_Clean\"] = messages[\"Summary\"].apply(cleanup)\n\ntrain, test = train_test_split(messages, test_size=0.2,shuffle=False)\nprint(\"%d items in training data, %d in test data\" % (len(train), len(test)))", "9176 items in training data, 2294 in test data\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
e7d13bd2b1464732ab23e79b15eb0cdc24b86b0b
221,024
ipynb
Jupyter Notebook
t81_558_class_11_04_hf_train.ipynb
igunduz/t81_558_deep_learning
03e3b4663be167e9840a819c111c0620447aaee8
[ "Apache-2.0" ]
null
null
null
t81_558_class_11_04_hf_train.ipynb
igunduz/t81_558_deep_learning
03e3b4663be167e9840a819c111c0620447aaee8
[ "Apache-2.0" ]
null
null
null
t81_558_class_11_04_hf_train.ipynb
igunduz/t81_558_deep_learning
03e3b4663be167e9840a819c111c0620447aaee8
[ "Apache-2.0" ]
null
null
null
32.158301
311
0.595116
[ [ [ "<a href=\"https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_04_hf_train.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n", "_____no_output_____" ], [ "# T81-558: Applications of Deep Neural Networks\n**Module 11: Natural Language Processing with Hugging Face**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).", "_____no_output_____" ], [ "# Module 11 Material\n\n* Part 11.1: Introduction to Hugging Face [[Video]](https://www.youtube.com/watch?v=1IHXSbz02XM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_01_huggingface.ipynb)\n* Part 11.2: Hugging Face Tokenizers [[Video]](https://www.youtube.com/watch?v=U-EGU1RyChg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_02_tokenizers.ipynb)\n* Part 11.3: Hugging Face Datasets [[Video]](https://www.youtube.com/watch?v=Mq5ODegT17M&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_03_hf_datasets.ipynb)\n* **Part 11.4: Training Hugging Face Models** [[Video]](https://www.youtube.com/watch?v=https://www.youtube.com/watch?v=l69ov6b7DOM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_04_hf_train.ipynb)\n* Part 11.5: What are Embedding Layers in Keras [[Video]](https://www.youtube.com/watch?v=OuNH5kT-aD0list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=58) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_05_embedding.ipynb)", "_____no_output_____" ], [ "# Google CoLab Instructions\n\nThe following code ensures that Google CoLab is running the correct version of TensorFlow.", "_____no_output_____" ] ], [ [ "try:\n %tensorflow_version 2.x\n COLAB = True\n print(\"Note: using Google CoLab\")\nexcept:\n print(\"Note: not using Google CoLab\")\n COLAB = False", "Note: using Google CoLab\n" ] ], [ [ "# Part 11.4: Training Hugging Face Models\n\n\n\n\n", "_____no_output_____" ], [ "Up to this point, we've used data and models from the Hugging Face hub unmodified. In this section, we will transfer and train a Hugging Face model. To achieve this training, we will use Hugging Face data sets, tokenizers, and pretrained models.\n\nWe begin by installing Hugging Face if needed. It is also essential to install Hugging Face datasets.", "_____no_output_____" ] ], [ [ "# HIDE OUTPUT\n!pip install transformers\n!pip install transformers[sentencepiece]\n!pip install datasets", "Collecting transformers\n Downloading transformers-4.17.0-py3-none-any.whl (3.8 MB)\n\u001b[K |████████████████████████████████| 3.8 MB 15.1 MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.6.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.21.5)\nCollecting tokenizers!=0.11.3,>=0.11.1\n Downloading tokenizers-0.11.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.5 MB)\n\u001b[K |████████████████████████████████| 6.5 MB 56.8 MB/s \n\u001b[?25hCollecting pyyaml>=5.1\n Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n\u001b[K |████████████████████████████████| 596 kB 72.2 MB/s \n\u001b[?25hRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers) (21.3)\nCollecting sacremoses\n Downloading sacremoses-0.0.49-py3-none-any.whl (895 kB)\n\u001b[K |████████████████████████████████| 895 kB 65.0 MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.63.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.11.3)\nCollecting huggingface-hub<1.0,>=0.1.0\n Downloading huggingface_hub-0.4.0-py3-none-any.whl (67 kB)\n\u001b[K |████████████████████████████████| 67 kB 6.9 MB/s \n\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0,>=0.1.0->transformers) (3.10.0.2)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers) (3.0.7)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.7.0)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.10.8)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.1.0)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nInstalling collected packages: pyyaml, tokenizers, sacremoses, huggingface-hub, transformers\n Attempting uninstall: pyyaml\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\nSuccessfully installed huggingface-hub-0.4.0 pyyaml-6.0 sacremoses-0.0.49 tokenizers-0.11.6 transformers-4.17.0\nRequirement already satisfied: transformers[sentencepiece] in /usr/local/lib/python3.7/dist-packages (4.17.0)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (0.0.49)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (3.6.0)\nRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (4.63.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (21.3)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (4.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (2.23.0)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (2019.12.20)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (6.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (1.21.5)\nRequirement already satisfied: huggingface-hub<1.0,>=0.1.0 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (0.4.0)\nRequirement already satisfied: tokenizers!=0.11.3,>=0.11.1 in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (0.11.6)\nCollecting sentencepiece!=0.1.92,>=0.1.91\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 14.4 MB/s \n\u001b[?25hRequirement already satisfied: protobuf in /usr/local/lib/python3.7/dist-packages (from transformers[sentencepiece]) (3.17.3)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0,>=0.1.0->transformers[sentencepiece]) (3.10.0.2)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers[sentencepiece]) (3.0.7)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers[sentencepiece]) (3.7.0)\nRequirement already satisfied: six>=1.9 in /usr/local/lib/python3.7/dist-packages (from protobuf->transformers[sentencepiece]) (1.15.0)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers[sentencepiece]) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers[sentencepiece]) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers[sentencepiece]) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers[sentencepiece]) (2021.10.8)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers[sentencepiece]) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers[sentencepiece]) (1.1.0)\nInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.96\nCollecting datasets\n Downloading datasets-2.0.0-py3-none-any.whl (325 kB)\n\u001b[K |████████████████████████████████| 325 kB 14.5 MB/s \n\u001b[?25hCollecting xxhash\n Downloading xxhash-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (212 kB)\n\u001b[K |████████████████████████████████| 212 kB 70.9 MB/s \n\u001b[?25hRequirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets) (0.70.12.2)\nRequirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (0.4.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.21.5)\nRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (2.23.0)\nRequirement already satisfied: pyarrow>=5.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (6.0.1)\nRequirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.7/dist-packages (from datasets) (4.63.0)\nCollecting responses<0.19\n Downloading responses-0.18.0-py3-none-any.whl (38 kB)\nRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.4)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from datasets) (4.11.3)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from datasets) (21.3)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from datasets) (1.3.5)\nCollecting aiohttp\n Downloading aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n\u001b[K |████████████████████████████████| 1.1 MB 62.9 MB/s \n\u001b[?25hCollecting fsspec[http]>=2021.05.0\n Downloading fsspec-2022.2.0-py3-none-any.whl (134 kB)\n\u001b[K |████████████████████████████████| 134 kB 74.3 MB/s \n\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.6.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (6.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.10.0.2)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->datasets) (3.0.7)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2021.10.8)\nCollecting urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1\n Downloading urllib3-1.25.11-py2.py3-none-any.whl (127 kB)\n\u001b[K |████████████████████████████████| 127 kB 76.1 MB/s \n\u001b[?25hCollecting asynctest==0.13.0\n Downloading asynctest-0.13.0-py3-none-any.whl (26 kB)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->datasets) (21.4.0)\nCollecting multidict<7.0,>=4.5\n Downloading multidict-6.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (94 kB)\n\u001b[K |████████████████████████████████| 94 kB 4.6 MB/s \n\u001b[?25hCollecting yarl<2.0,>=1.0\n Downloading yarl-1.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (271 kB)\n\u001b[K |████████████████████████████████| 271 kB 65.7 MB/s \n\u001b[?25hCollecting frozenlist>=1.1.1\n Downloading frozenlist-1.3.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (144 kB)\n\u001b[K |████████████████████████████████| 144 kB 78.0 MB/s \n\u001b[?25hCollecting async-timeout<5.0,>=4.0.0a3\n Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB)\nCollecting aiosignal>=1.1.2\n Downloading aiosignal-1.2.0-py3-none-any.whl (8.2 kB)\nRequirement already satisfied: charset-normalizer<3.0,>=2.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->datasets) (2.0.12)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->datasets) (3.7.0)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.15.0)\nInstalling collected packages: multidict, frozenlist, yarl, urllib3, asynctest, async-timeout, aiosignal, fsspec, aiohttp, xxhash, responses, datasets\n Attempting uninstall: urllib3\n Found existing installation: urllib3 1.24.3\n Uninstalling urllib3-1.24.3:\n Successfully uninstalled urllib3-1.24.3\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ndatascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\u001b[0m\nSuccessfully installed aiohttp-3.8.1 aiosignal-1.2.0 async-timeout-4.0.2 asynctest-0.13.0 datasets-2.0.0 frozenlist-1.3.0 fsspec-2022.2.0 multidict-6.0.2 responses-0.18.0 urllib3-1.25.11 xxhash-3.0.0 yarl-1.7.2\n" ] ], [ [ "We begin by loading the emotion data set from the Hugging Face hub. Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. The following code loads the emotion data set from the Hugging Face hub.", "_____no_output_____" ] ], [ [ "# HIDE OUTPUT\nfrom datasets import load_dataset\n\nemotions = load_dataset(\"emotion\")", "_____no_output_____" ] ], [ [ "You can see a single observation from the training data set here. This observation includes both the text sample and the assigned emotion label. The label is a numeric index representing the assigned emotion.", "_____no_output_____" ] ], [ [ "emotions['train'][2]", "_____no_output_____" ] ], [ [ "We can display the labels in order of their index labels.", "_____no_output_____" ] ], [ [ "emotions['train'].features", "_____no_output_____" ] ], [ [ "Next, we utilize Hugging Face tokenizers and data sets together. The following code tokenizes the entire emotion data set. You can see below that the code has transformed the training set into subword tokens that are now ready to be used in conjunction with a transformer for either inference or training.", "_____no_output_____" ] ], [ [ "# HIDE OUTPUT\nfrom transformers import AutoTokenizer\n\ndef tokenize(rows):\n return tokenizer(rows['text'], padding=\"max_length\", truncation=True)\n\nmodel_ckpt = \"distilbert-base-uncased\"\ntokenizer=AutoTokenizer.from_pretrained(model_ckpt)\n\nemotions.set_format(type=None)\n\ntokenized_datasets = emotions.map(tokenize, batched=True)", "_____no_output_____" ] ], [ [ "We will utilize the Hugging Face DefaultDataCollator to transform the emotion data set into TensorFlow type data that we can use to finetune a neural network.", "_____no_output_____" ] ], [ [ "from transformers import DefaultDataCollator\n\ndata_collator = DefaultDataCollator(return_tensors=\"tf\")", "_____no_output_____" ] ], [ [ "Now we generate a shuffled training and evaluation data set.", "_____no_output_____" ] ], [ [ "small_train_dataset = tokenized_datasets[\"train\"].shuffle(seed=42)\nsmall_eval_dataset = tokenized_datasets[\"test\"].shuffle(seed=42)", "_____no_output_____" ] ], [ [ "We can now generate the TensorFlow data sets. We specify which columns should map to the input features and labels. We do not need to shuffle because we previously shuffled the data.", "_____no_output_____" ] ], [ [ "tf_train_dataset = small_train_dataset.to_tf_dataset(\n columns=[\"attention_mask\", \"input_ids\", \"token_type_ids\"],\n label_cols=[\"labels\"],\n shuffle=True,\n collate_fn=data_collator,\n batch_size=8,\n)\n\ntf_validation_dataset = small_eval_dataset.to_tf_dataset(\n columns=[\"attention_mask\", \"input_ids\", \"token_type_ids\"],\n label_cols=[\"labels\"],\n shuffle=False,\n collate_fn=data_collator,\n batch_size=8,\n)", "_____no_output_____" ] ], [ [ "We will now load the distilbert model for classification. We will adjust the pretrained weights to predict the emotions of text lines.", "_____no_output_____" ] ], [ [ "# HIDE OUTPUT\nimport tensorflow as tf\nfrom transformers import TFAutoModelForSequenceClassification\n\nmodel = TFAutoModelForSequenceClassification.from_pretrained(\\\n \"distilbert-base-uncased\", num_labels=6) ", "_____no_output_____" ] ], [ [ "We now train the neural network. Because the network is already pretrained, we use a small learning rate.", "_____no_output_____" ] ], [ [ "model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=tf.metrics.SparseCategoricalAccuracy(),\n)\n\nmodel.fit(tf_train_dataset, validation_data=tf_validation_dataset, \\\n epochs=5)", "Epoch 1/5\n2000/2000 [==============================] - 360s 174ms/step - loss: 0.3720 - sparse_categorical_accuracy: 0.8669 - val_loss: 0.1728 - val_sparse_categorical_accuracy: 0.9180\nEpoch 2/5\n2000/2000 [==============================] - 347s 174ms/step - loss: 0.1488 - sparse_categorical_accuracy: 0.9338 - val_loss: 0.1496 - val_sparse_categorical_accuracy: 0.9295\nEpoch 3/5\n2000/2000 [==============================] - 347s 173ms/step - loss: 0.1253 - sparse_categorical_accuracy: 0.9420 - val_loss: 0.1617 - val_sparse_categorical_accuracy: 0.9245\nEpoch 4/5\n2000/2000 [==============================] - 346s 173ms/step - loss: 0.1092 - sparse_categorical_accuracy: 0.9486 - val_loss: 0.1654 - val_sparse_categorical_accuracy: 0.9295\nEpoch 5/5\n2000/2000 [==============================] - 347s 173ms/step - loss: 0.0960 - sparse_categorical_accuracy: 0.9585 - val_loss: 0.1830 - val_sparse_categorical_accuracy: 0.9220\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d146f7d4bf694bc345d7f87de3ccd371d97ceb
7,834
ipynb
Jupyter Notebook
src/bayes/proportion/cross_table/paired_axb.ipynb
shigeodayo/ex_design_analysis
58eda66a2b77d17f8443a286af4a7090111b072c
[ "MIT" ]
10
2020-05-24T12:09:54.000Z
2021-03-03T10:14:52.000Z
src/bayes/proportion/cross_table/paired_axb.ipynb
shigeodayo/ex_design_analysis
58eda66a2b77d17f8443a286af4a7090111b072c
[ "MIT" ]
6
2020-05-24T13:14:09.000Z
2022-03-12T00:53:24.000Z
src/bayes/proportion/cross_table/paired_axb.ipynb
shigeodayo/ex_design_analysis
58eda66a2b77d17f8443a286af4a7090111b072c
[ "MIT" ]
1
2020-05-26T05:42:52.000Z
2020-05-26T05:42:52.000Z
27.978571
211
0.459791
[ [ [ "# Paired a x b cross table\nAlternative of z-test and chi-square test", "_____no_output_____" ] ], [ [ "# Enable the commands below when running this program on Google Colab.\n# !pip install arviz==0.7\n# !pip install pymc3==3.8\n# !pip install Theano==1.0.4\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport pymc3 as pm\n\nimport math\n\nplt.style.use('seaborn-darkgrid')\nnp.set_printoptions(precision=3)\npd.set_option('display.precision', 3)", "_____no_output_____" ] ], [ [ "## Q. A restaurant counted up what wines (red, rose, and white) customers chose for their main dishes (roast veal, pasta gorgonzola, and sole meuniere). Analyze the relationship between main dish and wine.", "_____no_output_____" ] ], [ [ "a = 3 # Kinds of man dishes\nb = 3 # Kinds of wines\ndata = pd.DataFrame([[19, 12, 6], [8, 8, 4], [15, 19, 18]], columns=['Veal', 'Pasta', 'Sole'], index=['Red', 'Rose', 'White'])\nobserved = [data['Veal']['Red'], \n data['Pasta']['Red'], \n data['Sole']['Red'],\n data['Veal']['Rose'], \n data['Pasta']['Rose'], \n data['Sole']['Rose'],\n data['Veal']['White'], \n data['Pasta']['White'], \n data['Sole']['White']]\ndisplay(data)\nN = data.sum().sum()", "_____no_output_____" ] ], [ [ "## Bayesian analysis", "_____no_output_____" ] ], [ [ "with pm.Model() as model:\n # Prior distribution\n p_ = pm.Uniform('p_', 0, 1, shape=(a * b))\n p = pm.Deterministic('p', p_ / pm.math.sum(p_))\n\n # Likelihood\n x = pm.Multinomial('x', n=N, p=p, observed=observed)\n\n # Marginal probability\n p1d = pm.Deterministic('p1d', p[0] + p[1] + p[2]) # p1. = p11 + p12 + p13\n p2d = pm.Deterministic('p2d', p[3] + p[4] + p[5]) # p2. = p21 + p22 + p23\n p3d = pm.Deterministic('p3d', p[6] + p[7] + p[8]) # p3. = p31 + p32 + p33\n\n pd1 = pm.Deterministic('pd1', p[0] + p[3] + p[6]) # p.1 = p11 + p21 + p31\n pd2 = pm.Deterministic('pd2', p[1] + p[4] + p[7]) # p.2 = p12 + p22 + p32\n pd3 = pm.Deterministic('pd3', p[2] + p[5] + p[8]) # p.3 = p13 + p23 + p33\n\n # Pearson's residual\n pp = [p1d * pd1, p1d * pd2, p1d * pd3, \n p2d * pd1, p2d * pd2, p2d * pd3, \n p3d * pd1, p3d * pd2, p3d * pd3]\n e = pm.Deterministic('e', (p - pp) / pm.math.sqrt(pp))\n\n # Cramer's association coefficient\n V = pm.Deterministic('V', pm.math.sqrt(pm.math.sum(e**2) / (min(a, b) - 1)))\n\n trace = pm.sample(21000, chains=5)", "_____no_output_____" ], [ "chain = trace[1000:]\npm.traceplot(chain)\nplt.show()", "_____no_output_____" ], [ "pm.summary(chain, var_names=['p', 'V', 'p1d', 'p2d', 'p3d', 'pd1', 'pd2', 'pd3'])", "_____no_output_____" ] ], [ [ "### Independence and association", "_____no_output_____" ] ], [ [ "plt.boxplot(\n [chain['e'][:,0],\n chain['e'][:,1],\n chain['e'][:,2],\n chain['e'][:,3],\n chain['e'][:,4],\n chain['e'][:,5],\n chain['e'][:,6],\n chain['e'][:,7],\n chain['e'][:,8],],\n labels=['e11', 'e12', 'e13', 'e21', 'e22', 'e23', 'e31', 'e32', 'e33'])\nplt.show()", "_____no_output_____" ], [ "print(\"Cramer's association coefficient: {:.3f}\".format(chain['V'].mean()))\n# 1.0 - 0.5: strong association\n# 0.5 - 0.25: association\n# 0.25 - 0.1: weak association\n# 0.1 > : very weak association\n# 0: no association", "_____no_output_____" ], [ "egz = pd.DataFrame(\n [[(chain['e'][:,0] > 0).mean(), (chain['e'][:,1] > 0).mean(), (chain['e'][:,2] > 0).mean()],\n [(chain['e'][:,3] > 0).mean(), (chain['e'][:,4] > 0).mean(), (chain['e'][:,5] > 0).mean()],\n [(chain['e'][:,6] > 0).mean(), (chain['e'][:,7] > 0).mean(), (chain['e'][:,8] > 0).mean()]\n ],\n columns=['Veal', 'Pasta', 'Sole'],\n index=['Red', 'Rose', 'White']\n)\n\nelz = pd.DataFrame(\n [[(chain['e'][:,0] < 0).mean(), (chain['e'][:,1] < 0).mean(), (chain['e'][:,2] < 0).mean()],\n [(chain['e'][:,3] < 0).mean(), (chain['e'][:,4] < 0).mean(), (chain['e'][:,5] < 0).mean()],\n [(chain['e'][:,6] < 0).mean(), (chain['e'][:,7] < 0).mean(), (chain['e'][:,8] < 0).mean()]\n ],\n columns=['Veal', 'Pasta', 'Sole'],\n index=['Red', 'Rose', 'White']\n)\n\nprint('e > 0')\ndisplay(egz)\nprint('e < 0')\ndisplay(elz)", "_____no_output_____" ] ], [ [ "### RQ1: 「子牛」料理を選んだ客は「赤」を選び「白」は避け、「舌平目」料理を選んだ客は「白」を選び「赤」は避ける", "_____no_output_____" ] ], [ [ "val_1 = (chain['e'][:,0] > 0).mean() * (chain['e'][:,8] > 0).mean() * (chain['e'][:,6] < 0).mean() * (chain['e'][:,2] < 0).mean()\nprint('Probability: {:.3f} %'.format(val_1 * 100))", "_____no_output_____" ] ], [ [ "### RQ2: 「子牛」料理を選んだ客は「赤」を選び「白」は避け、「舌平目」料理を選んだ客は「白」を選ぶ", "_____no_output_____" ] ], [ [ "val_2 = (chain['e'][:,0] > 0).mean() * (chain['e'][:,8] > 0).mean() * (chain['e'][:,6] < 0).mean()\nprint('Probability: {:.3f} %'.format(val_2 * 100))", "_____no_output_____" ] ], [ [ "### RQ3: 「子牛」料理を選んだ客は「赤」を選び、「舌平目」料理を選んだ客は「白」を選ぶ", "_____no_output_____" ] ], [ [ "val_3 = (chain['e'][:,0] > 0).mean() * (chain['e'][:,8] > 0).mean()\nprint('Probability: {:.3f} %'.format(val_3 * 100))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d15f049aeb708eb5f5e03987fc10d8d1cec074
373,982
ipynb
Jupyter Notebook
3547_03_Code.ipynb
varunmuriyanat/Python-Machine-Learning
b5e3340195c90d43db8dadb9073b648462721025
[ "MIT" ]
47
2016-12-22T00:08:52.000Z
2022-02-28T15:39:31.000Z
3547_03_Code.ipynb
varunmuriyanat/Python-Machine-Learning
b5e3340195c90d43db8dadb9073b648462721025
[ "MIT" ]
null
null
null
3547_03_Code.ipynb
varunmuriyanat/Python-Machine-Learning
b5e3340195c90d43db8dadb9073b648462721025
[ "MIT" ]
43
2017-01-17T17:16:40.000Z
2022-03-07T14:46:49.000Z
326.336824
41,138
0.921611
[ [ [ "Sebastian Raschka, 2015", "_____no_output_____" ], [ "#Python Machine Learning Essentials", "_____no_output_____" ], [ "# Chapter 3 - A Tour of Machine Learning Classifiers Using Scikit-Learn", "_____no_output_____" ], [ "Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scikit-learn", "Sebastian Raschka \nLast updated: 08/21/2015 \n\nCPython 3.4.3\nIPython 3.2.1\n\nnumpy 1.9.2\npandas 0.16.2\nmatplotlib 1.4.3\nscikit-learn 0.16.1\n" ], [ "# to install watermark just uncomment the following line:\n#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py", "_____no_output_____" ] ], [ [ "### Sections\n\n- [First steps with scikit-learn](#First-steps-with-scikit-learn)\n - [Loading and preprocessing the data](#Loading-and-preprocessing-the-data )\n - [Training a perceptron via scikit-learn](#Training-a-perceptron-via-scikit-learn)\n- [Modeling class probabilities via logistic regression](#Modeling-class-probabilities-via-logistic-regression)\n- [Maximum margin classification with support vector machines](#Maximum-margin-classification-with-support-vector-machines)\n- [Solving non-linear problems using a kernel SVM](#Solving-non-linear-problems-using-a-kernel-SVM)\n- [Decision trees learning](#Decision-trees-learning)\n- [Combining weak to strong learners via random forests](#Combining-weak-to-strong-learners-via-random-forests)\n- [K-nearest neighbors - a lazy learning algorithm](#K-nearest-neighbors---a-lazy-learning-algorithm)", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ], [ "# First steps with scikit-learn", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ], [ "## Loading and preprocessing the data", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ], [ "Loading the Iris dataset from scikit-learn. Here, the third column represents the petal length, and the fourth column the petal width of the flower samples. The classes are already converted to integer labels where 0=Iris-Setosa, 1=Iris-Versicolor, 2=Iris-Virginica.", "_____no_output_____" ] ], [ [ "from sklearn import datasets\nimport numpy as np\n\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n\nprint('Class labels:', np.unique(y))", "Class labels: [0 1 2]\n" ] ], [ [ "Splitting data into 70% training and 30% test data:", "_____no_output_____" ] ], [ [ "from sklearn.cross_validation import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=0)", "_____no_output_____" ] ], [ [ "Standardizing the features:", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "##Training a perceptron via scikit-learn", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ], [ "Redefining the `plot_decision_region` function from chapter 2:", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Perceptron\n\nppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)\nppn.fit(X_train_std, y_train)", "_____no_output_____" ], [ "y_test.shape", "_____no_output_____" ], [ "y_pred = ppn.predict(X_test_std)\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())", "Misclassified samples: 4\n" ], [ "from sklearn.metrics import accuracy_score\n\nprint('Accuracy: %.2f' % accuracy_score(y_test, y_pred))", "Accuracy: 0.91\n" ], [ "from matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx] \n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\n alpha=0.8, c=cmap(idx),\n marker=markers[idx], label=cl)\n \n # highlight test samples\n if test_idx:\n X_test, y_test = X[test_idx, :], y[test_idx] \n plt.scatter(X_test[:, 0], X_test[:, 1], c='', \n alpha=1.0, linewidth=1, marker='o', \n s=55, label='test set')", "_____no_output_____" ] ], [ [ "Training a perceptron model using the standardized training data:", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\n\nplot_decision_regions(X=X_combined_std, y=y_combined, \n classifier=ppn, test_idx=range(105,150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\n\nplt.tight_layout()\n# plt.savefig('./figures/iris_perceptron_scikit.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Modeling class probabilities via logistic regression", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ], [ "Plot sigmoid function:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\nz = np.arange(-7, 7, 0.1)\nphi_z = sigmoid(z)\n\nplt.plot(z, phi_z)\nplt.axvline(0.0, color='k')\nplt.ylim(-0.1, 1.1)\nplt.xlabel('z')\nplt.ylabel('$\\phi (z)$')\n\n# y axis ticks and gridline\nplt.yticks([0.0, 0.5, 1.0])\nax = plt.gca()\nax.yaxis.grid(True)\n\nplt.tight_layout()\n# plt.savefig('./figures/sigmoid.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "Plot cost function:", "_____no_output_____" ] ], [ [ "def cost_1(z):\n return - np.log(sigmoid(z))\n \ndef cost_0(z):\n return - np.log(1 - sigmoid(z))\n\nz = np.arange(-10, 10, 0.1)\nphi_z = sigmoid(z)\n\nc1 = [cost_1(x) for x in z]\nplt.plot(phi_z, c1, label='J(w) if y=1')\n\nc0 = [cost_0(x) for x in z]\nplt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')\n\nplt.ylim(0.0, 5.1)\nplt.xlim([0, 1])\nplt.xlabel('$\\phi$(z)')\nplt.ylabel('J(w)')\nplt.legend(loc='best')\nplt.tight_layout()\n# plt.savefig('./figures/log_cost.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression(C=1000.0, random_state=0)\nlr.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=lr, test_idx=range(105,150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/logistic_regression.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "lr.predict_proba(X_test_std[0,:])", "_____no_output_____" ] ], [ [ "Regularization path:", "_____no_output_____" ] ], [ [ "weights, params = [], []\nfor c in np.arange(-5, 5):\n lr = LogisticRegression(C=10**c, random_state=0)\n lr.fit(X_train_std, y_train)\n weights.append(lr.coef_[1])\n params.append(10**c)\n\nweights = np.array(weights)\nplt.plot(params, weights[:, 0], \n label='petal length')\nplt.plot(params, weights[:, 1], linestyle='--', \n label='petal width')\nplt.ylabel('weight coefficient')\nplt.xlabel('C')\nplt.legend(loc='upper left')\nplt.xscale('log')\n# plt.savefig('./figures/regression_path.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Maximum margin classification with support vector machines", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC\n\nsvm = SVC(kernel='linear', C=1.0, random_state=0)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=svm, test_idx=range(105,150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/support_vector_machine_linear.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Solving non-linear problems using a kernel SVM", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\nnp.random.seed(0)\nX_xor = np.random.randn(200, 2)\ny_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)\ny_xor = np.where(y_xor, 1, -1)\n\nplt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')\nplt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1')\n\nplt.xlim([-3, 3])\nplt.ylim([-3, 3])\nplt.legend(loc='best')\nplt.tight_layout()\n# plt.savefig('./figures/xor.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)\nsvm.fit(X_xor, y_xor)\nplot_decision_regions(X_xor, y_xor, \n classifier=svm)\n\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/support_vector_machine_rbf_xor.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "from sklearn.svm import SVC\n\nsvm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=svm, test_idx=range(105,150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/support_vector_machine_rbf_iris_1.png', dpi=300)\nplt.show()", "_____no_output_____" ], [ "svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=svm, test_idx=range(105,150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/support_vector_machine_rbf_iris_2.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Decision trees learning", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\n\ntree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)\ntree.fit(X_train, y_train)\n\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X_combined, y_combined, \n classifier=tree, test_idx=range(105,150))\n\nplt.xlabel('petal length [cm]')\nplt.ylabel('petal width [cm]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/decision_tree_decision.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "[[back to top](#Sections)]", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\ndef gini(p):\n return (p)*(1 - (p)) + (1-p)*(1 - (1-p))\n\ndef entropy(p):\n return - p*np.log2(p) - (1 - p)*np.log2((1 - p))\n\ndef error(p):\n return 1 - np.max([p, 1 - p])\n\nx = np.arange(0.0, 1.0, 0.01)\n\nent = [entropy(p) if p != 0 else None for p in x]\nsc_ent = [e*0.5 if e else None for e in ent]\nerr = [error(i) for i in x]\n\n\nfig = plt.figure()\nax = plt.subplot(111)\nfor i, lab, ls, c, in zip([ent, sc_ent, gini(x), err], \n ['Entropy', 'Entropy (scaled)', \n 'Gini Impurity', 'Misclassification Error'],\n ['-', '-', '--', '-.'],\n ['black', 'lightgray', 'red', 'green', 'cyan']):\n line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)\n\n\n\nax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),\n ncol=3, fancybox=True, shadow=False)\n\nax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')\nax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')\nplt.ylim([0, 1.1])\nplt.xlabel('p(i=1)')\nplt.ylabel('Impurity Index')\nplt.tight_layout()\nplt.savefig('./figures/impurity.png', dpi=300, bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "from sklearn.tree import export_graphviz\n\nexport_graphviz(tree, \n out_file='tree.dot', \n feature_names=['petal length', 'petal width'])", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# Combining weak to strong learners via random forests", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(criterion='entropy',\n n_estimators=10, \n random_state=1,\n n_jobs=2)\nforest.fit(X_train, y_train)\n\nplot_decision_regions(X_combined, y_combined, \n classifier=forest, test_idx=range(105,150))\n\nplt.xlabel('petal length [cm]')\nplt.ylabel('petal width [cm]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/random_forest.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# K-nearest neighbors - a lazy learning algorithm", "_____no_output_____" ], [ "[[back to top](#Sections)]", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')\nknn.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=knn, test_idx=range(105,150))\n\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\n# plt.savefig('./figures/k_nearest_neighbors.png', dpi=300)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e7d17cccdc22df1fea9ced243123c802e4e5e869
68,813
ipynb
Jupyter Notebook
12_OPTIONAL_Interactive_Mapping_with_Folium.ipynb
reeshav-netizen/Geospatial-Fundamentals-in-Python
637e2d93f4763c9fdeeffc36d317396bdc9e16bf
[ "MIT" ]
21
2019-12-01T03:22:51.000Z
2021-09-11T08:02:27.000Z
12_OPTIONAL_Interactive_Mapping_with_Folium.ipynb
dongyi1996/Geospatial-Fundamentals-in-Python
0d9b61622b0ba2b1c5ec1f03c851ba36ae3a1282
[ "MIT" ]
9
2020-11-17T20:58:27.000Z
2021-06-29T23:46:54.000Z
12_OPTIONAL_Interactive_Mapping_with_Folium.ipynb
dongyi1996/Geospatial-Fundamentals-in-Python
0d9b61622b0ba2b1c5ec1f03c851ba36ae3a1282
[ "MIT" ]
19
2019-06-29T22:16:28.000Z
2021-08-25T14:12:26.000Z
34.806778
353
0.518986
[ [ [ "# 12. Interactive Mapping with Folium\n\nIn previous lessons we used `Geopandas` and `matplotlib` to create choropleth and point maps of our data. In this notebook we will take it to the next level by creating `interactive maps` with the **folium** library. \n\n\n\n>### References\n>\n>This notebook provides an introduction to `folium`. To see what else you can do, check out the references listed below.\n>\n> - [Folium web site](https://github.com/python-visualization/folium)\n>\n> - [Folium notebook examples](https://nbviewer.jupyter.org/github/python-visualization/folium/tree/master/examples/)\n\n### Import Libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\n\nimport matplotlib # base python plotting library\nimport matplotlib.pyplot as plt # submodule of matplotlib\n\n# To display plots, maps, charts etc in the notebook\n%matplotlib inline \n\nimport folium # popular python web mapping tool for creating Leaflet maps\nimport folium.plugins\n\n# Supress minor warnings about the syntax of CRS definitions, \n# ie \"init=epsg:4269\" vs \"epsg:4269\"\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)", "_____no_output_____" ] ], [ [ "#### Check your version of `folium` and `geopandas`.\n\nFolium is a new and evolving Python library so make sure you have version 0.10.1 or later installed.", "_____no_output_____" ] ], [ [ "print(folium.__version__) # Make sure you have version 0.10.1 or later of folium!", "_____no_output_____" ], [ "print(gpd.__version__) # Make sure you have version 0.7.0 or later of GeoPandas!", "_____no_output_____" ] ], [ [ "## 12.1 Introduction\n\nInteractive maps serve two very important purposes in geospatial analysis. First, they provde new tools for exploratory data analysis. With an interactive map you can:\n- `pan` over the mapped data, \n- `zoom` into a smaller arear that is not easily visible when the full extent of the map is displayed, and \n- `click` on or `hover` over a feature to see more information about it.\n\nSecond, when saved and shared, interactive maps provide a new tool for communicating the results of your analysis and for inviting your online audience to actively explore your work.\n\nFor those of you who work with tools like ArcGIS or QGIS, interactive maps also make working in the jupyter notebook environment a bit more like working in a desktop GIS.\n\nThe goal of this notebook is to show you how to create an interactive map with your geospatial data so that you can better analyze your data and save your output to share with others. \n\nAfter completing this lesson you will be able to create an interactive map like the one shown below.", "_____no_output_____" ] ], [ [ "%%html\n<iframe src=\"notebook_data/bartmap_example.html\" width=\"1000\" height=\"600\"></iframe>", "_____no_output_____" ] ], [ [ "<a id=\"section2\"></a>\n## 12.2 Interactive Mapping with Folium\n\nUnder the hood, `folium` is a Python package for creating interactive maps with [Leaflet](https://leafletjs.com), a popular javascript web mapping library. \n\nLet's start by creating a interactive map with the `folium.Map` function and display it in the notebook.", "_____no_output_____" ] ], [ [ "# Create a new folium map and save it to the variable name map1\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n width=\"100%\", # the width & height of the output map\n height=500, # in pixels (int) or in percent of available space (str)\n zoom_start=13) # the zoom level for the data to be displayed (3-20)\n\nmap1 # display the map in the notebook", "_____no_output_____" ] ], [ [ "Let's discuss the map above and the code we used to generate it.\n\nAt any time you can enter the following command to get help with `folium.Map`:\n", "_____no_output_____" ] ], [ [ "# uncomment to see help docs\n?folium.Map", "_____no_output_____" ] ], [ [ "Let's make another folium map using the code below:", "_____no_output_____" ] ], [ [ "# Create a new folium map and save it to the variable name map1\n#\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n #width=800, # the width & height of the output map\n #height=600, # in pixels or in percent of available space\n zoom_start=13) # the zoom level for the data to be displayed", "_____no_output_____" ] ], [ [ "<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"https://image.flaticon.com/icons/svg/87/87705.svg\" width=\"30\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Questions\n</div>\n\n- What's new in the code?\n\n- How do you think that will change the map?\n\nLet's display the map and see what changes...", "_____no_output_____" ] ], [ [ "map1 # display map in notebook", "_____no_output_____" ] ], [ [ "Notice how the map changes when you change the underlying **tileset** from the default, which is `OpenStreetMap`, to `CartoDB Positron`. \n> [OpenStreetMap](https://www.openstreetmap.org/#map=5/38.007/-95.844) is the largest free and open source dataset of geographic information about the world. So it is the default basemap for a lot of mapping tools and libraries.\n\n- You can find a list of the available tilesets you can use in the help documentation (`folium.Map?`), a snippet of which is shown below:\n\n<pre>\nGenerate a base map of given width and height with either default\ntilesets or a custom tileset URL. The following tilesets are built-in\nto Folium. Pass any of the following to the \"tiles\" keyword:\n\n - \"OpenStreetMap\"\n - \"Mapbox Bright\" (Limited levels of zoom for free tiles)\n - \"Mapbox Control Room\" (Limited levels of zoom for free tiles)\n - \"Stamen\" (Terrain, Toner, and Watercolor)\n - \"Cloudmade\" (Must pass API key)\n - \"Mapbox\" (Must pass API key)\n - \"CartoDB\" (positron and dark_matter)\n</pre>\n\n\n#### Exercise\n\nTake a few minutes to try some of the different tilesets in the code below and see how they change the output map. *Avoid the ones that don't require an API key*.", "_____no_output_____" ] ], [ [ "# Make changes to the code below to change the folium Map\n## Try changing the values for the zoom_start and tiles parameters.\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='Stamen Watercolor', # basemap aka baselay or tile set\n width=800, # the width & height of the output map\n height=500, # in pixels or percent of available space\n zoom_start=13) # the zoom level for the data to be displayed\n\n#display the map\nmap1", "_____no_output_____" ] ], [ [ "<a id=\"section3\"></a>\n## 12.3 Adding a Map Layer", "_____no_output_____" ], [ "Now that we have created a folium map, let's add our California County data to the map. \n\nFirst, let's read that data into a Geopandas geodataframe.", "_____no_output_____" ] ], [ [ "# Alameda county census tract data with the associated ACS 5yr variables.\nca_counties_gdf = gpd.read_file(\"notebook_data/california_counties/CaliforniaCounties.shp\")", "_____no_output_____" ] ], [ [ "Take another brief look at the geodataframe to recall the contents.", "_____no_output_____" ] ], [ [ "# take a look at first two rows\nca_counties_gdf.head(2)", "_____no_output_____" ], [ "# take a look at all column names\nca_counties_gdf.columns", "_____no_output_____" ] ], [ [ "### Adding a layer with folium.GeoJson\n\nFolium provides a number of ways to add vector data - points, lines, and polygons - to a map. \n\nThe data we are working with are in Geopandas geodataframes. The main folium function for adding these to the map is `folium.GeoJson`.\n\nLet's build on our last map and add the census tracts as a `folium.GeoJson` layer. ", "_____no_output_____" ] ], [ [ "map1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB positron', # basemap aka baselay or tile set\n width=800, # the width & height of the output map\n height=600, # in pixels or in percent of available space\n zoom_start=6) # the zoom level for the data to be displayed\n\n# Add the census tracts to the map\nfolium.GeoJson(ca_counties_gdf).add_to(map1)\n\n#display the map\nmap1", "_____no_output_____" ] ], [ [ "That was pretty straight-forward, but `folium.GeoJSON` provides a lot of arguments for customizing the display of the data in the map. We will review some of these soon. However, at any time you can get more information about `folium.GeoJSON` by taking a look at the function documentation.", "_____no_output_____" ] ], [ [ "# Uncomment to view documentation\n# folium.GeoJson?", "_____no_output_____" ] ], [ [ "### Checking and Transforming the CRS\n\nIt's always a good idea to check the **CRS** of your geodata before doing anything with that data. This is true when we use `folium` to make an interactive map. \n\nHere is how folium deals with the CRS of a geodataframe before mapping it:\n- Folium checks to see if the gdf has a defined CRS\n - If the CRS is not defined, it assumes the data to be in the WGS84 CRS (epsg=4326).\n - If the CRS is defined, it will be transformed dynamically to WGS84 before mapping.\n\n\nSo, if your map data doesn't show up where at all or where you think it should, check the CRS of your data!\n- If it is not defined, define it.\n\n<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"https://image.flaticon.com/icons/svg/87/87705.svg\" width=\"30\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Questions\n</div>\n\n- What is the CRS of the tract data?\n- How is folium dealing with the CRS of this gdf?", "_____no_output_____" ] ], [ [ "# Check the CRS of the data \nprint(...)", "_____no_output_____" ] ], [ [ "*Click here for answers*\n\n<!---\n# What is the CRS of the tract data?\ntracts_gdf.crs\n\n# How is folium dealing with the CRS of this gdf?\n# Dynamically transformed to WGS84 (but it already is in that projection so no change)\n--->", "_____no_output_____" ], [ "### Styling features with `folium.GeoJson`\n\nLet's dive deeper into the `folium.GeoJson` function. Below is an excerpt from the help documentation for the function that shows all the available function arguments that we can set.\n\n<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png\" width=\"20\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Question\n</div>\nWhat argument do we use to style the color for our polygons?\n\n<pre>\nfolium.GeoJson(\n data,\n style_function=None,\n highlight_function=None,\n name=None,\n overlay=True,\n control=True,\n show=True,\n smooth_factor=None,\n tooltip=None,\n embed=True,\n)\n</pre>", "_____no_output_____" ], [ "Let's examine the options for the `style_function` in more detail since we will use these to change the style of our mapped data.\n\n\n`style_function = lambda x: {` apply to all features being mapped (ie, all rows in the geodataframe) \n`'weight': line_weight,` set the thickness of a line or polyline where <1 is thin, >1 thick, 1 = default \n`'opacity': line_opacity,` set opacity where 1 is solid, 0.5 is semi-opaque and 0 is transparent \n`'color': line_color` set the color of the line, eg \"red\" or some hexidecimal color value\n`'fillOpacity': opacity,` set opacity of the fill of a polygon \n`'fillColor': color` set color of the fill of a polygon \n`'dashArray': '5, 5'` set line pattern to a dash of 5 pixels on, off \n`}`\n\n\n\nOk! Let's try setting the style of our census tract by defining a style function.", "_____no_output_____" ] ], [ [ "# Define the basemap\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n# Add the census tracts gdf layer\n# setting the style of the data\nfolium.GeoJson(ca_counties_gdf,\n style_function = lambda x: {\n 'weight':2,\n 'color':\"white\",\n 'opacity':1,\n 'fillColor':\"red\",\n 'fillOpacity':0.6\n }\n ).add_to(map1)\n\n\nmap1", "_____no_output_____" ] ], [ [ "#### Exercise\nCopy the code from our last map and paste it below. Take a few minutes edit the code to change the style of the census tract polygons.\n", "_____no_output_____" ] ], [ [ "# Your code here\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='Stamen Watercolor',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=10) # the zoom level for the data to be displayed\n\n# Add the census tracts gdf layer\n# setting the style of the data\nfolium.GeoJson(ca_counties_gdf,\n style_function = lambda x: {\n 'weight':3,\n 'color':\"black\",\n 'opacity':1,\n 'fillColor':\"none\",\n 'fillOpacity':0.6\n }\n ).add_to(map1)\n\n\nmap1", "_____no_output_____" ] ], [ [ "### Adding a Tooltip\n\nA `tooltip` can be added to a folium.GeoJson map layer to display data values when the mouse hovers over a feature.\n", "_____no_output_____" ] ], [ [ "# Double check what columns we have\nca_counties_gdf.columns", "_____no_output_____" ], [ "?folium.GeoJsonTooltip", "_____no_output_____" ], [ "# Define the basemap\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n# Add the census tracts gdf layer\nfolium.GeoJson(ca_counties_gdf,\n style_function = lambda x: {\n 'weight':2,\n 'color':\"white\",\n 'opacity':1,\n 'fillColor':\"red\",\n 'fillOpacity':0.6\n },\n \n tooltip=folium.GeoJsonTooltip(\n fields=['NAME','POP2012','POP12_SQMI' ], \n aliases=['County', 'Population', 'Population Density (mi2)'],\n labels=True,\n localize=True\n ),\n ).add_to(map1)\n\n\nmap1", "_____no_output_____" ] ], [ [ "As always, you can get more help by reading the documentation.", "_____no_output_____" ] ], [ [ "# Uncomment to view help\n#folium.GeoJsonTooltip?", "_____no_output_____" ] ], [ [ "#### Exercise\n\nEdit the code in the cell below to `add` the median age(`MED_AGE`) to the tooltip.", "_____no_output_____" ] ], [ [ "# Define the basemap\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n# Add the census tracts gdf layer\nfolium.GeoJson(ca_counties_gdf,\n style_function = lambda x: {\n 'weight':2,\n 'color':\"white\",\n 'opacity':1,\n 'fillColor':\"red\",\n 'fillOpacity':0.6\n },\n \n tooltip=folium.GeoJsonTooltip(\n fields=['NAME','POP2012','POP12_SQMI','MED_AGE' ], \n aliases=['County', 'Population', 'Population Density (mi2)', 'Median Age'],\n labels=True,\n localize=True\n ),\n ).add_to(map1)\n\n\nmap1", "_____no_output_____" ] ], [ [ "*Click here for answers*\n\n<!---\n# Define the basemap\nmap1 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n# Add the census tracts gdf layer\nfolium.GeoJson(ca_counties_gdf,\n style_function = lambda x: {\n 'weight':2,\n 'color':\"white\",\n 'opacity':1,\n 'fillColor':\"red\",\n 'fillOpacity':0.6\n },\n \n tooltip=folium.GeoJsonTooltip(\n fields=['FID_','POP2012','POP12_SQMI','MED_AGE' ], \n aliases=['County ID', 'Population', 'Population Density (mi2)', 'Median Age'],\n labels=True,\n localize=True\n ),\n ).add_to(map1)\n\n\nmap1\n--->", "_____no_output_____" ], [ "<a id=\"section4\"></a>\n## 12.4 Data Mapping\n\nAbove, we set the style for all of the census tracts to the same fill and outline colors and opacity values. \n\nLet's take a look at how we would use the `data values` to set the color values for the polygons. This is called a `choropleth` map or, more generally, a `thematic map`.\n\nThe `folium.Choropleth` function can be used for this.", "_____no_output_____" ] ], [ [ "# Uncomment to view help docs\n## folium.Choropleth?", "_____no_output_____" ] ], [ [ "With `folium.Choropleth`, we will use some of the same style parameters that we used with `folium.GeoJson`.\n\nWe will also use some new parameters, as shown below.\n\nFirst, let's take a look at the data we will map to refresh our knowledge.", "_____no_output_____" ] ], [ [ "print(ca_counties_gdf.columns)\nca_counties_gdf.head(2)", "_____no_output_____" ] ], [ [ "Now let's create a choropleth map of total population, which is in the `c_race` column.", "_____no_output_____" ] ], [ [ "ca_counties_gdf.head()", "_____no_output_____" ], [ "# Define the basemap\nmap2 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n\n# Add the Choropleth layer\nfolium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'), # The object with the geospatial data\n data=ca_counties_gdf, # The object with the attribute data (can be same)\n columns=['NAME','POP2012'], # the ID and data columns in the data objects\n key_on=\"feature.id\", # the ID in the geo_data object (don't change)\n fill_color=\"Reds\", # The color palette (or color map) - see help\n fill_opacity=0.65,\n line_color=\"grey\",\n legend=True,\n legend_name=\"Population\",\n ).add_to(map2)\n\n# Display the map\nmap2 ", "_____no_output_____" ] ], [ [ "### Choropleth Mapping with Folium - discussion\n\nLet's discuss the following lines from the code above in more detail.\n\n<pre>\n# Add the Choropleth layer\nfolium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'),\n data=ca_counties_gdf, \n columns=['NAME','POP2012'],\n key_on=\"feature.id\",\n fill_color=\"Reds\", \n ...)\n\n\n</pre>\n\n`geo_data` and the `data`: we need to identify the objects that contains both because they could be different objects. In our example they are in the same object.\n\n`ca_counties_gdf.set_index('NAME')`: We need to **set_index('NAME')** in order to identify the column in `geo_data` that will be used to `join` the geometries in the `geo_data` to the data values in `data`.\n\n`columns=['NAME','POP2012']`: we identify in `data` (1) the column that will join these `data` to `geo_data` and (2) the second column is the column with the values that will determine the color.\n\n`fill_color=\"Reds\":` Here we identify the name of the color palette that we will use to style the polygons. These will be the same as the `matplotlib` colormaps.\n", "_____no_output_____" ], [ "#### Question\nRecall our discussion about best practices for choropleth maps. Is population count an appropriate variable to plot as a choropleth? ", "_____no_output_____" ] ], [ [ "# Write your thoughts here", "_____no_output_____" ] ], [ [ "#### Exercise\n\nCopy and paste the code from above into the cell below to create a choropleth map of population density (`POP12_SQMI`).\n\nFeel free to experiment with any of the `folium.Choropleth` style parameters, especially the `fill_color` which needs to be one of the `color brewer palettes` listed below:\n\n<pre>\nfill_color: string, default 'blue'\n Area fill color. Can pass a hex code, color name, or if you are\n binding data, one of the following color brewer palettes:\n 'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',\n 'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.\n</pre>", "_____no_output_____" ] ], [ [ "# Your code here\n# Define the basemap\nmap2 = folium.Map(location=[37.7749, -122.4194], # lat, lon around which to center the map\n tiles='Stamen Toner',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=10) # the zoom level for the data to be displayed\n\n\n# Add the Choropleth layer \nfolium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'), # The object with the geospatial data\n data=ca_counties_gdf, # The object with the attribute data (can be same)\n columns=['NAME','POP12_SQMI'], # the ID and data columns in the data objects\n key_on=\"feature.id\", # the ID in the geo_data object (don't change)\n fill_color=\"RdPu\", # The color palette (or color map) - see help\n fill_opacity=0.8).add_to(map2)\n\nmap2", "_____no_output_____" ] ], [ [ "*Click here for answers*\n\n<!---\n # SOLUTION\n # Get our map center\n ctrX = (tracts_gdf.total_bounds[0] + tracts_gdf.total_bounds[2])/2\n ctrY = (tracts_gdf.total_bounds[1] + tracts_gdf.total_bounds[3])/2\n\n # Create our base map\n map2 = folium.Map(location=[ctrY, ctrX], \n tiles='CartoDB Positron',\n width=800,height=600,\n zoom_start=10)\n\n # Add the Choropleth layer\n folium.Choropleth(geo_data=tracts_gdf.set_index('GEOID'), \n data=tracts_gdf,\n columns=['GEOID','pop_dens_km2'],\n key_on=\"feature.id\",\n fill_color=\"PuBu\",\n fill_opacity=0.65,\n line_color=\"grey\",\n legend=True,\n legend_name=\"Population Density per km2\",\n ).add_to(map2)\n\n # Display \n map2\n--->", "_____no_output_____" ], [ "### Choropleth Maps with Tooltips\n\nYou can add a `tooltip` to a folium.Choropleth map but the process is not straigthforward. The `folium.Choropleth` function does not have a tooltip argument the way `folium.GeoJson` does.\n\nThe workaround is to add the layer as both a `folium.Choropleth` layer and as a `folium.GeoJson` layer and bind the tooltip to the GeoJson layer.\n\nLet's check it out below.", "_____no_output_____" ] ], [ [ "# Define the basemap\nmap3 = folium.Map(location=[37.8721, -122.2578], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n width=1000, # the width & height of the output map\n height=600, # in pixels\n zoom_start=6) # the zoom level for the data to be displayed\n\n\n# Add the Choropleth layer\nfolium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'), # The object with the geospatial data\n data=ca_counties_gdf, # The object with the attribute data (can be same)\n columns=['NAME','POP2012'], # the ID and data columns in the data objects\n key_on=\"feature.id\", # the ID in the geo_data object (don't change)\n fill_color=\"Reds\", # The color palette (or color map) - see help\n fill_opacity=0.65,\n line_color=\"grey\",\n legend=True,\n legend_name=\"Population\",\n ).add_to(map3)\n\n# ADD the same geodataframe to the map to display a tooltip\nlayer2 = folium.GeoJson(ca_counties_gdf,\n style_function=lambda x: {'color':'transparent','fillColor':'transparent'},\n tooltip=folium.GeoJsonTooltip(\n fields=['NAME','POP2012'], \n aliases=['County', 'Population'],\n labels=True,\n localize=True\n ),\n highlight_function=lambda x: {'weight':3,'color':'white'}\n).add_to(map3)\n\n\n\nmap3 # show map", "_____no_output_____" ] ], [ [ "#### Question \nDo you notice anything different about the `style_function` for layer2 above?", "_____no_output_____" ], [ "#### Exercise\nRedo the above choropleth map code to map population density. Add both population and population density to the tooltip. Don't forget to update the legend name.", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "<a id=\"section5\"></a>\n## 12.5 Overlays\n\nWe can overlay other geospatial data on our folium maps.\n\nLet's say we want to focus the previous choropleth map with tooltips (`map3`) on the City of Berkeley. We can fetch the border of the city from our census Places dataset. These data can be downloaded from the Census website. We use the cartographic boundary files not the TIGER line files as these look better on a map (clipped to shoreline). \n\nSpecifically, we will fetch the city boundaries from the following census cartographic boundary file:\n\n- https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_06_place_500k.zip\n\nThen we can overlay the border of the city on the map and set the initial zoom to the center of the Berkeley boundary.\n\nLet's try that.\n", "_____no_output_____" ], [ "First we need to read in the census places data and create a subset geodataframe for our city of interest, here Berkeley.", "_____no_output_____" ] ], [ [ "places = gpd.read_file(\"zip://notebook_data/census/Places/cb_2018_06_place_500k.zip\")", "_____no_output_____" ], [ "places.head(2)", "_____no_output_____" ], [ "berkeley = places[places.NAME=='Berkeley'].copy()\nberkeley.head(2)", "_____no_output_____" ] ], [ [ "Plot the Berkeley geodataframe to make sure it looks ok.", "_____no_output_____" ] ], [ [ "berkeley.plot()", "_____no_output_____" ], [ "# Create a new map centered on Berkeley\nberkeley_map = folium.Map(location=[berkeley.centroid.y.mean(), \n berkeley.centroid.x.mean()], \n tiles='CartoDB Positron',\n width=800,height=600,\n zoom_start=13)\n\n\n# Add the census tract polygons as a choropleth map\nlayer1=folium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'),\n data=ca_counties_gdf,\n columns=['NAME','POP2012'],\n fill_color=\"Reds\",\n fill_opacity=0.65,\n line_color=\"grey\", #\"white\",\n line_weight=1,\n line_opacity=1,\n key_on=\"feature.id\",\n legend=True,\n legend_name=\"Population\",\n highlight=True\n ).add_to(berkeley_map)\n\n# Add the berkeley boundary - note the fill color\nlayer2 = folium.GeoJson(data=berkeley,\n name='Berkeley',smooth_factor=2,\n style_function=lambda x: {'color':'black',\n 'opacity':1,\n 'fillColor':\n 'transparent',\n 'weight':3},\n ).add_to(berkeley_map)\n\n# Add the tooltip for the census tracts as its own layer\nlayer3 = folium.GeoJson(ca_counties_gdf,\n style_function=lambda x: {'color':'transparent','fillColor':'transparent'},\n tooltip=folium.features.GeoJsonTooltip(\n fields=['NAME','POP2012'], \n aliases=['County', 'Population'],\n labels=True,\n localize=True\n ),\n highlight_function=lambda x: {'weight':3,'color':'white'}\n).add_to(berkeley_map)\n\nberkeley_map # show map", "_____no_output_____" ] ], [ [ "<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png\" width=\"30\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Questions\n</div>\n\nAny questions about the above map?\n\nDoes the code for the Berkeley map above differ from our previous choropleth map code?\n\nDoes the order of layer2 & layer3 matter (can they be switched?)", "_____no_output_____" ], [ "#### Exercise\n\nRedo the above map with population density. Create and display the Oakland city boundary on the map instead of Berkeley and center the map on Oakland.", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "*Click here for solution*\n\n<!---\n # SOLUTION\n oakland = places[places.NAME=='Oakland'].copy()\n oakland.plot()\n\n # SOLUTION\n oakland_map = folium.Map(location=[oakland.centroid.y.mean(), oakland.centroid.x.mean()], \n tiles='CartoDB Positron',\n width=800,height=600,\n zoom_start=12)\n\n # Add the census tract polygons as a choropleth map\n layer1=folium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'),\n data=ca_counties_gdf,\n columns=['NAME','POP2012'],\n fill_color=\"Reds\",\n fill_opacity=0.65,\n line_color=\"grey\", #\"white\",\n line_weight=1,\n line_opacity=1,\n key_on=\"feature.id\",\n legend=True,\n legend_name=\"Population\",\n highlight=True\n ).add_to(oakland_map)\n\n\n # Add the oakland boundary\n layer2 = folium.GeoJson(data=oakland,\n name='Oakland',smooth_factor=2,\n style_function=lambda x: {'color':'black','opacity':1,'fillColor':'transparent','weight':3},\n ).add_to(oakland_map)\n\n # Add the tooltip\n layer3 = folium.GeoJson(ca_counties_gdf,\n style_function=lambda x: {'color':'transparent','fillColor':'transparent'},\n tooltip=folium.features.GeoJsonTooltip(\n fields=['NAME','POP2012'], \n aliases=['County', 'Population'],\n labels=True,\n localize=True\n ),\n highlight_function=lambda x: {'weight':3,'color':'white'}\n ).add_to(oakland_map)\n\n\n oakland_map # show map\n--->", "_____no_output_____" ], [ "<a id=\"section6\"></a>\n## 12.6 Mapping Points and Lines\n\nWe can also add points and lines to a folium map.\n\nLet's overlay BART stations as points and BART lines as lines to the interactive map. For the Bay Area these are data are available from the [Metropoliton Transportation Commission (MTC) Open Data portal](http://opendata.mtc.ca.gov/datasets).\n\nWe're going to try pulling in BART station data that we downloaded from the website and subsetted from the passenger-rail-stations. You can learn more about the dataset through here: http://opendata.mtc.ca.gov/datasets/passenger-rail-stations-2019\n\nAs usual, let's try pulling in the data and inspect the first couple of rows.", "_____no_output_____" ] ], [ [ "# Load light rail stop data\nrailstops = gpd.read_file(\"zip://notebook_data/transportation/Passenger_Rail_Stations_2019.zip\") \nrailstops.tail()", "_____no_output_____" ], [ "# Subset to keep just bart stations\nbart_stations = railstops[railstops['agencyname']=='BART'].sort_values(by=\"station_na\")\nbart_stations.head()", "_____no_output_____" ], [ "# Repeat for the rail lines\nrail_lines = gpd.read_file(\"zip://notebook_data/transportation/Passenger_Railways_2019.zip\") \nrail_lines.head()", "_____no_output_____" ], [ "rail_lines.operator.value_counts()", "_____no_output_____" ], [ "# subset by operator to get the bart lines\nbart_lines = rail_lines[rail_lines['operator']=='BART']", "_____no_output_____" ], [ "# Check the CRS of the geodataframes\nprint(bart_stations.crs)\nprint(bart_lines.crs)", "_____no_output_____" ], [ "# Quick plot\nbart_stations.plot()\nbart_lines.plot()", "_____no_output_____" ] ], [ [ "Now that we have fetched and checked the Bart data, let's do a quick folium map with it.\n\nWe will use `folium.GeoJson` to add these data to the map, just as we used it previously for the census tract polygons.", "_____no_output_____" ] ], [ [ "# Bart Map\nmap4 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n tiles='CartoDB Positron',\n width=800,height=600,\n zoom_start=10)\n\n\nfolium.GeoJson(bart_lines).add_to(map4)\n\nfolium.GeoJson(bart_stations).add_to(map4)\n\n\nmap4 # show map", "_____no_output_____" ] ], [ [ "We can also add tooltips, just as we did previously.", "_____no_output_____" ] ], [ [ "# Bart Map\nmap4 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n tiles='CartoDB Positron',\n #width=800,height=600,\n zoom_start=10)\n\n# Add Bart lines\nfolium.GeoJson(bart_lines,\n tooltip=folium.GeoJsonTooltip(\n fields=['operator' ],\n aliases=['Line operator'],\n labels=True,\n localize=True\n ),\n ).add_to(map4)\n\n# Add Bart stations\nfolium.GeoJson(bart_stations,\n tooltip=folium.GeoJsonTooltip(fields=['ts_locatio'], \n aliases=['Stop Name'],\n labels=True,\n localize=True\n ),\n ).add_to(map4)\n\n\nmap4 # show map", "_____no_output_____" ] ], [ [ "That's pretty cool, but don't you just want to click on those marker points to get a `popup` rather than hovering over for a `tooltip`?", "_____no_output_____" ], [ "### Mapping Points\n\nSo far we have used `folium.GeoJson` to map our BART points. By default this uses the push-pin marker symbology made popular by Google Maps. \n\nUnder the hood, folium.GeoJson uses the default object type `folium.Marker` when the input data are points.\n\nThis is helpful to know because `folium.Marker` has a few options that allow further customization of our points.", "_____no_output_____" ] ], [ [ "# Uncomment to view help docs\nfolium.Marker?", "_____no_output_____" ] ], [ [ "Let's explicitly add the Bart Stations as points so we can change the `tooltips` to `popups`.", "_____no_output_____" ] ], [ [ "# Bart Map\nmap4 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n tiles='CartoDB Positron',\n #width=800,height=800,\n zoom_start=10)\n\n# Add Bart lines\nfolium.GeoJson(bart_lines,\n tooltip=folium.GeoJsonTooltip(\n fields=['operator' ],\n aliases=['Line operator'],\n labels=True,\n localize=True\n ),\n ).add_to(map4)\n\n# Add Bart stations\nbart_stations.apply(lambda row:\n folium.Marker(\n location=[row['geometry'].y, row['geometry'].x],\n popup=row['ts_locatio'],\n ).add_to(map4), axis=1)\n\nmap4 # show map", "_____no_output_____" ] ], [ [ "That `folium.Marker` code is a bit more complex than `folium.GeoJson` and may not be worth it unless you really want that popup behavior.\n\nBut let's see what else we can do with a `folium.Marker` by viewing the next map.", "_____no_output_____" ] ], [ [ "# Bart Map\nmap4 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n tiles='CartoDB Positron',\n #width=800,height=600,\n zoom_start=10)\n\n# Add BART lines\nfolium.GeoJson(bart_lines,\n tooltip=folium.GeoJsonTooltip(\n fields=['operator' ],\n aliases=['Line operator'],\n labels=True,\n localize=True\n ),\n ).add_to(map4)\n\n# Add BART Stations\nicon_url = \"https://gomentumstation.net/wp-content/uploads/2018/08/Bay-area-rapid-transit-1000.png\"\nbart_stations.apply(lambda row:\n folium.Marker(\n location=[row['geometry'].y,row['geometry'].x],\n popup=row['ts_locatio'],\n icon=folium.features.CustomIcon(icon_url,icon_size=(20, 20)),\n ).add_to(map4), axis=1)\n\nmap4 # show map", "_____no_output_____" ] ], [ [ "#### Exercise\n\nCopy and paste the code for the previous cell into the next cell and \n1. change the bart icon to \"https://ya-webdesign.com/transparent450_/train-emoji-png-14.png\"\n2. change the popup back to a tooltip.", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ] ], [ [ "*Click here for solution*\n\n<!---\n# Bart Map\nmap4 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n\n tiles='CartoDB Positron',\n #width=800,height=600,\n zoom_start=10)\n\n# Add BART lines\nfolium.GeoJson(bart_lines,\n tooltip=folium.GeoJsonTooltip(\n fields=['operator' ],\n aliases=['Line operator'],\n labels=True,\n localize=True\n ),\n ).add_to(map4)\n\n# Add BART Stations\nicon_url = \"https://ya-webdesign.com/transparent450_/train-emoji-png-14.png\"\nbart_stations.apply(lambda row:\n folium.Marker(\n location=[row['geometry'].y,row['geometry'].x],\n tooltip=row['ts_locatio'],\n icon=folium.features.CustomIcon(icon_url,icon_size=(20, 20)),\n ).add_to(map4), axis=1)\n\nmap4 # show map\n--->", "_____no_output_____" ], [ "### folium.CircleMarkers\n\nYou may prefer to customize points as `CircleMarkers` instead of the icon or pushpin Marker style. This allows you to set size and color of a marker, either manually or as a function of a data variable.\n\nLet's look at some code for doing this.", "_____no_output_____" ] ], [ [ "# Define the basemap\nmap5 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n #width=1000, # the width & height of the output map\n #height=600, # in pixels\n zoom_start=10) # the zoom level for the data to be displayed\n\n# Add BART Lines\nfolium.GeoJson(bart_lines).add_to(map5)\n\n\n# Add BART Stations\nbart_stations.apply(lambda row:\n folium.CircleMarker(\n location=[row['geometry'].y, row['geometry'].x],\n radius=10,\n color='purple',\n fill=True,\n fill_color='purple',\n popup=row['ts_locatio'],\n ).add_to(map5), \n axis=1)\n\n\nmap5\n", "_____no_output_____" ] ], [ [ "### folium.Circle \n\nYou can also set the size of your circles to a fixed radius, in meters, using `folium.Circle`. This is great for exploratory data analysis. For example, you can see what the census tract values are within 500 meters of a BART station.", "_____no_output_____" ] ], [ [ "# Uncomment to view\n#?folium.Circle", "_____no_output_____" ], [ "# Define the basemap\nmap5 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], # lat, lon around which to center the map\n tiles='CartoDB Positron',\n #width=1000, # the width & height of the output map\n #height=600, # in pixels\n zoom_start=10) # the zoom level for the data to be displayed\n\n# Add BART Lines\nfolium.GeoJson(bart_lines).add_to(map5)\n\n\n# Add BART Stations\nbart_stations.apply(lambda row:\n folium.Circle(\n location=[row['geometry'].y, row['geometry'].x],\n radius=500,\n color='purple',\n fill=True,\n fill_color='purple',\n popup=row['ts_locatio'],\n ).add_to(map5), \n axis=1)\n\n\nmap5\n", "_____no_output_____" ] ], [ [ "<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png\" width=\"30\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Question\n</div>\n\nWhat do you notice about the size of the circles as you zoom in/out when you compare folium.Circles and folium.CircleMarkers?", "_____no_output_____" ], [ "### Proportional Symbol Maps\n\nOne of the advantages of the `folium.CircleMarker` is that we can set the size of the map to vary based on a data value.\n\nTo give this a try, let's add a fake column to the `bart_stations` gdf called millions_served and set it to a value between 1 and 10.", "_____no_output_____" ] ], [ [ "# add a column to the bart stations gdf\nbart_stations['millions_served'] = np.random.randint(1,10, size=len(bart_stations))\nbart_stations.head()", "_____no_output_____" ], [ "# Define the basemap\nmap5 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()],\n tiles='CartoDB Positron',\n #width=1000, # the width & height of the output map\n #height=600, # in pixels\n zoom_start=10) # the zoom level for the data to be displayed\n\nfolium.GeoJson(bart_lines).add_to(map5)\n\n# Add BART Stations as CircleMarkers\n# Here, some knowlege of Python string formatting is useful\nbart_stations.apply(lambda row:\n folium.CircleMarker(\n location=[row['geometry'].y, row['geometry'].x],\n radius=row['millions_served'],\n color='purple',\n fill=True,\n fill_color='purple',\n tooltip = \"Bart Station: %s<br>Millions served: %s\" % (row['ts_locatio'], row['millions_served'])\n \n ).add_to(map5), axis=1)\nmap5\n", "_____no_output_____" ] ], [ [ "So if you hover over our BART stations, you see that we've formatted it nicely! Using some HTML and Python string formatting we can make our `tooltip` easier to read. \n\nIf you want to learn more about customizing these, you can [go check this out to learn HTML basics](https://www.w3schools.com/html/html_basic.asp). You can then [go here to learn about Python string formatting](https://python-reference.readthedocs.io/en/latest/docs/str/formatting.html).", "_____no_output_____" ], [ "<a id=\"section7\"></a>\n## 12.7 Creating and Saving a folium Interactive Map\n\nNow that you have seen most of the ways you can add a geodataframe to a folium map, let's create one big map that includes several of our geodataframes.\n\nTo control the display of the data layers, we will add a `folium.LayerControl`\n\n- A `folium.LayerControl` will allow you to toggle on/off a map's visible layers. \n\n- In order to add a layer to the LayerControl, the layer must have value set for its `name`.\n\nLet's take a look. ", "_____no_output_____" ] ], [ [ "# Create a new map centered on the census tract data\nmap6 = folium.Map(location=[bart_stations.centroid.y.mean(), bart_stations.centroid.x.mean()], \n tiles='CartoDB Positron',\n #width=800,height=600,\n zoom_start=10)\n\n# Add the counties polygons as a choropleth map\nlayer1=folium.Choropleth(geo_data=ca_counties_gdf.set_index('NAME'),\n data=ca_counties_gdf,\n columns=['NAME','POP2012'],\n fill_color=\"Reds\",\n fill_opacity=0.65,\n line_color=\"grey\", #\"white\",\n line_weight=1,\n line_opacity=1,\n key_on=\"feature.id\",\n legend=True,\n legend_name=\"Population\",\n highlight=True,\n name=\"Counties\"\n ).add_to(map6)\n\n# Add the tooltip for the counties as its own layer\n# Don't display in the Layer control!\nlayer2 = folium.GeoJson(ca_counties_gdf,\n style_function=lambda x: {'color':'transparent','fillColor':'transparent'},\n tooltip=folium.features.GeoJsonTooltip(\n fields=['NAME','POP2012'], \n aliases=['Name', 'Population'],\n labels=True,\n localize=True\n ),\n highlight_function=lambda x: {'weight':3,'color':'white'}\n).add_to(layer1.geojson)\n\n# Add Bart lines\nfolium.GeoJson(bart_lines,\n name=\"Bart Lines\",\n tooltip=folium.GeoJsonTooltip(\n fields=['operator' ],\n aliases=['Line operator'],\n labels=True,\n localize=True\n ),\n ).add_to(map6)\n\n\n# Add Bart stations\nfolium.GeoJson(bart_stations,\n name=\"Bart stations\",\n tooltip=folium.GeoJsonTooltip(fields=['ts_locatio' ], \n aliases=['Stop Name'],\n labels=True,\n localize=True\n ),\n ).add_to(map6)\n\n# ADD LAYER CONTROL\nfolium.LayerControl(collapsed=False).add_to(map6)\n\nmap6 # show map", "_____no_output_____" ] ], [ [ "<div style=\"display:inline-block;vertical-align:top;\">\n <img src=\"https://image.flaticon.com/icons/svg/87/87705.svg\" width=\"30\" align=left > \n</div> \n<div style=\"display:inline-block;\">\n\n#### Questions\n</div>\n\n1. Take a look at the help docs `folium.LayerControl?`. What parameter would move the location of the LayerControl? What parameter would allow it to be closed by default?\n\n2. Take a look at the way we added `layer2` above (this has the census tract tooltips). How has the code we use to add the layer to the map changed? Why do you think we made this change?", "_____no_output_____" ] ], [ [ "# Uncomment to view\n#folium.LayerControl?", "_____no_output_____" ] ], [ [ "### Saving to an html file\n\nBy saving our map to a html we can use it later as something to add to a website or email to a colleague.\n\nYou can save any of the maps you have in the notebook using this syntax:\n\n> map_name.save(\"file_name.html\")\n\nLet's try that.", "_____no_output_____" ] ], [ [ "map6.save('outdata/bartmap.html')", "_____no_output_____" ] ], [ [ "Find your html file on your computer and double-click on it to open it in a browser.", "_____no_output_____" ], [ "#### Extra Challenge\n\nCheck out the notebook examples and find one to try with the data we have used in this notebook. I recommend the following.\n\n- [Mini-maps](https://nbviewer.jupyter.org/github/python-visualization/folium/blob/master/examples/MiniMap.ipynb)\n- [Dual-map](https://nbviewer.jupyter.org/github/python-visualization/folium/blob/master/examples/plugin-DualMap.ipynb) (choropleth maps two census tract vars)\n- [Search](https://nbviewer.jupyter.org/github/python-visualization/folium/blob/master/examples/plugin-Search.ipynb) (e.g., for a Bart Station by name)", "_____no_output_____" ], [ "<a id=\"section6\"></a>\n## 12.8 Recap\nHere we learned about the wonderful world of `Folium`! We created interactive maps-- whether it be choropleth, points, lines, symbols... we mapped it all. \n\nBelow you'll find a list of key functionalities we learned:\n- Interactive mapping\n\t- `folium.Map()`\n- Adding a map layer\n\t- `.add_to()`\n\t- `folium.Choropleth()`\n\t\t- `geo_data`\n\t\t- `columns`\n\t\t- `fill_color`\n\t- `folium.GeoJson()`\n\t\t- `style_function`\n\t- `folium.Marker()`\n\t\t- `icon`\n\t- `folium.CircleMarker()`\n\t\t- `radius`\n- Adding a Tooltip\n\t- `folium.GeoJsonTooltip`\n\t- `folium.features.GeoJsonTooltip`\n- Adding layer control\n\t- `folium.LayerControl()`", "_____no_output_____" ], [ "## Important note\n\nThe folium library changes often so I recommend you update your package frequently. This will give you increased functionality and may make future code easier to write. However, it might cause your existing code to break.\n\n### References\n\nThis notebook provides an introduction to `folium`. To see what else you can do, check out the references listed below.\n\n- [Folium web site](https://github.com/python-visualization/folium)\n\n- [Folium notebook examples](https://nbviewer.jupyter.org/github/python-visualization/folium/tree/master/examples/)\n\n", "_____no_output_____" ], [ "---\n<div style=\"display:inline-block;vertical-align:middle;\">\n<a href=\"https://dlab.berkeley.edu/\" target=\"_blank\"><img src =\"assets/images/dlab_logo.png\" width=\"75\" align=\"left\">\n</a>\n</div>\n\n<div style=\"display:inline-block;vertical-align:middle;\">\n <div style=\"font-size:larger\">&nbsp;D-Lab @ University of California - Berkeley</div>\n <div>&nbsp;Team Geo<div>\n</div>\n \n\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d180bb8ad116a31b534637707a2d576e001f4d
136,778
ipynb
Jupyter Notebook
sglx_pipe_rasters-z_w12m7_20-20201104.ipynb
zekearneodo/ceciestunepipe
7e771783769816f37de44077177152175aecc2b7
[ "MIT" ]
null
null
null
sglx_pipe_rasters-z_w12m7_20-20201104.ipynb
zekearneodo/ceciestunepipe
7e771783769816f37de44077177152175aecc2b7
[ "MIT" ]
null
null
null
sglx_pipe_rasters-z_w12m7_20-20201104.ipynb
zekearneodo/ceciestunepipe
7e771783769816f37de44077177152175aecc2b7
[ "MIT" ]
null
null
null
158.124855
48,356
0.879922
[ [ [ "### Rasters for a single spikeglx session\n- Load an exctractor for visualization of the data\n- Load the sorts as in notebook sglx_pipe-dev-sort-rasters--z_w12m7_20-20201104\n- load the mot_dict\n- plot rasters\n- export to npy for brad\n\n### SGL spikeextractor needs spikeextractors==0.9.3, spikeinterface==0.12.0. \nWill break with other versions.\nTODO: make sure my spikeglxrecordingextractor works with newer spikeextractors or get rid of it and adapt theirs.\n(the why i did my own is because theirs had an obscure way of reading the digital channels in the nidaqs).", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport os\nimport glob\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom scipy.io import wavfile\nfrom scipy import signal\nimport pickle\n\nfrom matplotlib import pyplot as plt\nfrom importlib import reload\n\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)", "_____no_output_____" ], [ "from ceciestunepipe.file import filestructure as et\nfrom ceciestunepipe.util import sglxutil as sglu\nfrom ceciestunepipe.util.spikeextractors.extractors.spikeglxrecordingextractor import readSGLX as rsgl\nfrom ceciestunepipe.util.spikeextractors.extractors.spikeglxrecordingextractor import spikeglxrecordingextractor as sglex", "h5py version > 2.10.0. Some extractors might not work properly. It is recommended to downgrade to version 2.10.0: \n>>> pip install h5py==2.10.0\n" ], [ "import spikeinterface as si\nimport spikeinterface.extractors as se\nimport spikeinterface.toolkit as st\nimport spikeinterface.sorters as ss\nimport spikeinterface.comparison as sc\nimport spikeinterface.widgets as sw\nlogger.info('all modules loaded')", "2021-08-27 13:52:03,131 root INFO all modules loaded\n" ], [ "reload(et)\n\nsess_par = {'bird': 'z_w12m7_20',\n 'sess': '20201104',\n 'probe': 'probe_0', # probe to sort ('probe_0', 'probe_1') (to lookup in the rig_par which port to extract)\n 'sort': 2}\n\nexp_struct = et.get_exp_struct(sess_par['bird'], sess_par['sess'], sess_par['sort'])\n\nksort_folder = exp_struct['folders']['ksort']\nraw_folder = exp_struct['folders']['raw']\n\nsess_epochs = sglu.list_sgl_epochs(sess_par)\nsess_epochs", "2021-08-27 13:52:36,670 ceciestunepipe.util.sglxutil INFO {'folders': {'bird': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20', 'raw': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/raw/20201104', 'kwik': '/mnt/sphere/earneodo/bci_zf/ss_data/z_w12m7_20/Ephys/kwik/20201104', 'processed': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104', 'derived': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/derived/20201104', 'tmp': '/scratch/earneodo/tmp/tmp', 'msort': '/scratch/earneodo/tmp/z_w12m7_20/Ephys/msort/20201104', 'ksort': '/scratch/earneodo/tmp/z_w12m7_20/Ephys/ksort/20201104'}, 'files': {'par': '/scratch/earneodo/tmp/z_w12m7_20/Ephys/ksort/20201104/params.json', 'set': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/raw/20201104/settings.isf', 'rig': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/raw/20201104/rig.json', 'kwd': '/mnt/sphere/earneodo/bci_zf/ss_data/z_w12m7_20/Ephys/kwik/20201104/stream.kwd', 'kwik': '/mnt/sphere/earneodo/bci_zf/ss_data/z_w12m7_20/Ephys/kwik/20201104/sort_2/spikes.kwik', 'kwe': '/mnt/sphere/earneodo/bci_zf/ss_data/z_w12m7_20/Ephys/kwik/20201104/events.kwe', 'dat_mic': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/dat_mic.mat', 'dat_ap': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/dat_ap.mat', 'allevents': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/dat_all.pkl', 'wav_mic': '/mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/derived/20201104/wav_mic.wav', 'mda_raw': '/scratch/earneodo/tmp/z_w12m7_20/Ephys/msort/20201104/raw.mda', 'bin_raw': '/scratch/earneodo/tmp/z_w12m7_20/Ephys/ksort/20201104/raw.bin'}}\n" ], [ "### pick a session\nreload(et)\nreload(sglu)\nepoch = sess_epochs[1] # g2 is the shortest\n\nexp_struct = sglu.sgl_struct(sess_par, epoch)\nsgl_folders, sgl_files = sglu.sgl_file_struct(exp_struct['folders']['raw'])\nfiles_pd = pd.DataFrame(sgl_files)", "_____no_output_____" ] ], [ [ "### get the recordings just in case", "_____no_output_____" ] ], [ [ "probe_id = int(sess_par['probe'].split('_')[-1])\ni_run = 0\n\nrun_meta_files = {k: v[i_run] for k, v in sgl_files.items()}\nrun_recordings = {k: sglex.SpikeGLXRecordingExtractor(sglu.get_data_meta_path(v)[0]) for k, v in run_meta_files.items()}\n", "_____no_output_____" ] ], [ [ "### load the sort and the motif dictionary", "_____no_output_____" ] ], [ [ "from ceciestunepipe.util.spike import kilosort as ks\nfrom ceciestunepipe.util.sound import spectral as sp\nfrom ceciestunepipe.util import plotutil as pu\nplt.rcParams['lines.linewidth'] = 0.1\n\naxes_pars = {'axes.labelpad': 5,\n 'axes.titlepad': 5,\n 'axes.titlesize': 'small',\n 'axes.grid': False,\n 'axes.xmargin': 0,\n 'axes.ymargin': 0}\n\nplt.rcParams.update(axes_pars)", "_____no_output_____" ] ], [ [ "###### load sort", "_____no_output_____" ] ], [ [ "spike_pickle_path = os.path.join(exp_struct['folders']['processed'], 'spk_df.pkl')\nclu_pickle_path = os.path.join(exp_struct['folders']['processed'], 'clu_df.pkl')\n\nspk_df = pd.read_pickle(spike_pickle_path)\nclu_df = pd.read_pickle(clu_pickle_path)", "_____no_output_____" ] ], [ [ "##### load motif dictionary", "_____no_output_____" ] ], [ [ "mot_dict_path = os.path.join(exp_struct['folders']['processed'], 'mot_dict.pkl')\nlogger.info('Loading mot_dict from {}'.format(mot_dict_path))\n\nwith open(mot_dict_path, 'rb') as handle:\n mot_dict = pickle.load(handle)\n\nmot_dict", "2021-08-27 14:23:39,729 root INFO Loading mot_dict from /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/mot_dict.pkl\n" ] ], [ [ "##### make a raster", "_____no_output_____" ] ], [ [ "## the start times synched to the spike time base (ap_0, comes from sglx_pipe-dev-sort-rasters notebook)\nmot_samples = mot_dict['start_sample_ap_0']\nmot_s_f = mot_dict['s_f']\nap_s_f = mot_dict['s_f_ap_0']", "_____no_output_____" ], [ "mot_samples", "_____no_output_____" ], [ "## get the actural raster for some clusters\ndef get_window_spikes(spk_df, clu_list, start_sample, end_sample):\n onset = start_sample\n offset = end_sample\n \n spk_t = spk_df.loc[spk_df['times'].between(onset, offset, inclusive=False)]\n \n spk_arr = np.zeros((clu_list.size, offset - onset))\n\n for i, clu_id in enumerate(clu_list):\n clu_spk_t = spk_t.loc[spk_t['clusters']==clu_id, 'times'].values\n spk_arr[i, clu_spk_t - onset] = 1\n return spk_arr\n \ndef get_rasters(spk_df, clu_list, start_samp_arr, span_samples):\n # returns np.array([n_clu, n_sample, n_trial])\n \n # get the window spikes for all of the clusters, for each of the start_samp_arr\n spk_arr_list = [get_window_spikes(spk_df, clu_list, x, x+span_samples) for x in start_samp_arr]\n return np.stack(spk_arr_list, axis=-1)", "_____no_output_____" ] ], [ [ "##### collect all good, ra units", "_____no_output_____" ] ], [ [ "t_pre = - 0.5\nt_post = 1.5\nt_pre_samp = int(t_pre * ap_s_f)\nt_post_samp = int(t_post * ap_s_f)", "_____no_output_____" ], [ "clu_list = np.unique(clu_df.loc[(clu_df['KSLabel']=='good') & (clu_df['nucleus'].isin(['ra'])), \n 'cluster_id'])\n\n", "_____no_output_____" ], [ "rast_arr = get_rasters(spk_df, clu_list, mot_dict['start_sample_ap_0'] + t_pre_samp, t_post_samp - t_pre_samp)", "2021-08-27 14:43:52,784 numexpr.utils INFO Note: NumExpr detected 32 cores but \"NUMEXPR_MAX_THREADS\" not set, so enforcing safe limit of 8.\n2021-08-27 14:43:52,784 numexpr.utils INFO Note: NumExpr detected 32 cores but \"NUMEXPR_MAX_THREADS\" not set, so enforcing safe limit of 8.\n2021-08-27 14:43:52,785 numexpr.utils INFO NumExpr defaulting to 8 threads.\n2021-08-27 14:43:52,785 numexpr.utils INFO NumExpr defaulting to 8 threads.\n" ], [ "def plot_as_raster(x, ax=None, t_0=None):\n #x is [n_events, n_timestamps] array\n n_y, n_t = x.shape\n \n row = np.ones(n_t) + 1\n t = np.arange(n_t)\n col = np.arange(n_y)\n \n frame = col[:, np.newaxis] + row[np.newaxis, :]\n x[x==0] = np.nan\n \n if ax is None:\n fig, ax = plt.subplots()\n \n raster = ax.scatter(t * x, frame * x, marker='.', facecolor='k', s=1, rasterized=False)\n if t_0 is not None:\n ax.axvline(x=t_0, color='red')\n return ax\n\nspk_arr = get_window_spikes(spk_df, clu_list, int(ap_start + pre_sec*ap_sf), int(ap_start + post_sec*ap_sf))\n\nfig, ax = plt.subplots(nrows=2, gridspec_kw={'height_ratios': [1, 10]}, figsize=(10, 22))\n\nf, t, sxx = sp.ms_spectrogram(mic_arr.flatten(), nidq_sf)\n\n#ax[0].plot(mic_arr.flatten())\n\nax[0].pcolormesh(t, f, np.log(sxx), cmap='inferno')\n\nplot_as_raster(spk_arr, t_0=int(-pre_sec*ap_sf), ax=ax[1])\nplt.tight_layout()", "_____no_output_____" ], [ "fig, ax_arr = plt.subplots(nrows=10, figsize=[10, 15], sharex=True)\n\nfor i_rast, clu_idx in enumerate(range(20, 30)): \n #one_raster_ms = coarse(rast_arr[clu_idx].T, samples_in_ms)\n #plt.imshow(one_raster_ms[::-1], aspect='auto', cmap='inferno')\n plot_as_raster(rast_arr[clu_idx].T, t_0=-t_pre_samp, ax=ax_arr[i_rast])", "_____no_output_____" ] ], [ [ "##### export to npy arrays", "_____no_output_____" ] ], [ [ "def export_spikes_array(spk_df, clu_list, start_samples, span_samples, file_path, bin_size=None):\n # get the raster for the clu_list\n # if necessary, bin it\n # save it as numpy\n rast_arr = get_rasters(spk_df, clu_list, start_samples, span_samples)\n \n if bin_size:\n logger.info('Getting binned spikes with {} sample bins'.format(bin_size))\n rate_arr = pu.coarse(np.transpose(rast_arr, axes=[0, 2, 1]), n_coarse=bin_size)\n # switch back axes to [clu, t, trial]\n export_arr = np.transpose(rate_arr, axes=[0, 2, 1])\n #export_arr = rate_arr\n else:\n export_arr = rast_arr\n \n logger.info('saving spikes as {}'.format(file_path))\n np.save(file_path, export_arr)\n return export_arr\n\n", "_____no_output_____" ], [ "rast_arr = get_rasters(spk_df, clu_list, mot_dict['start_sample_ap_0'] + t_pre_samp, t_post_samp - t_pre_samp)", "_____no_output_____" ], [ "mot_len = mot_dict['template'].size\nmot_len_s = mot_len / mot_s_f\nt_pre = - 0.5\nt_post = 0.5 + mot_len_s\nbin_ms = 0\n\nt_pre_samp = int(t_pre * ap_s_f)\nt_post_samp = int(t_post * ap_s_f)\nbin_samp = int(bin_ms * ap_s_f * 0.001)\n\nspk_arr_list = []\nfor nucleus in ['hvc', 'ra']:\n # get the cluster list\n clu_list = np.unique(clu_df.loc[(clu_df['KSLabel']=='good') & (clu_df['nucleus'].isin([nucleus])), \n 'cluster_id'])\n # make the file path\n file_path = os.path.join(exp_struct['folders']['processed'], \n 'fr_arr-{}-{}ms.pkl'.format(nucleus, bin_ms))\n logger.info('saving spikes as {}'.format(file_path))\n \n # get the spikes to the file\n spk_arr = export_spikes_array(spk_df, \n clu_list, \n mot_dict['start_sample_ap_0'] + t_pre_samp, \n t_post_samp - t_pre_samp, \n file_path, \n bin_samp)\n spk_arr_list.append(spk_arr)", "2021-08-27 16:26:54,973 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-hvc-0ms.pkl\n2021-08-27 16:26:54,973 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-hvc-0ms.pkl\n2021-08-27 16:26:58,612 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-hvc-0ms.pkl\n2021-08-27 16:26:58,612 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-hvc-0ms.pkl\n2021-08-27 16:26:59,373 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-ra-0ms.pkl\n2021-08-27 16:26:59,373 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-ra-0ms.pkl\n2021-08-27 16:27:03,355 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-ra-0ms.pkl\n2021-08-27 16:27:03,355 root INFO saving spikes as /mnt/cube/earneodo/bci_zf/neuropix/birds/z_w12m7_20/Ephys/processed/20201104/2500r250a_3500_dir_g0/fr_arr-ra-0ms.pkl\n" ], [ "spk_arr.shape", "_____no_output_____" ] ], [ [ "#### plot one spk_arr together with a motif", "_____no_output_____" ] ], [ [ "spk_arr = spk_arr_list[1]\nplt.imshow(spk_arr[32, :, :].T, aspect='auto', cmap='inferno')", "_____no_output_____" ], [ "np.transpose(spk_arr, axes=[0, 2, 1]).shape", "_____no_output_____" ], [ "plt.plot(spk_arr[0].sum(axis=1))", "_____no_output_____" ], [ "np.transpose(rast_arr, axes=[0, 2, 1]).shape", "_____no_output_____" ], [ "spk_arr.shape", "_____no_output_____" ], [ "mot_len = mot_dict['template'].size\nmot_len_s = mot_len / mot_s_f\nt_pre = - 0.5\nt_post = 0.5 + mot_len_s\nbin_ms = 2\n\nt_pre_samp = int(t_pre * ap_s_f)\nt_post_samp = int(t_post * ap_s_f)\nbin_samp = int(bin_ms * ap_s_f * 0.001)", "_____no_output_____" ], [ "mot_len_s", "_____no_output_____" ], [ "fr_arr = pu.coarse(np.transpose(rast_arr, axes=[0, 2, 1]), n_coarse=bin_samp)\nfr_arr.shape", "/mnt/cube/earneodo/repos/ceciestunepipe/ceciestunepipe/util/plotutil.py:66: RuntimeWarning: Mean of empty slice\n coarse_x = np.nanmean(exploded_x, axis=-1)\n" ], [ "fig, ax_arr = plt.subplots(nrows=10, figsize=[10, 15], sharex=True)\n\nfor i_rast, clu_idx in enumerate(range(50, 60)): \n #one_raster_ms = coarse(rast_arr[clu_idx].T, samples_in_ms)\n #plt.imshow(one_raster_ms[::-1], aspect='auto', cmap='inferno')\n plot_as_raster(spk_arr[clu_idx].T, t_0=-t_pre_samp, ax=ax_arr[i_rast])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d1839dc25a701d392abe597315c35927cabfcc
60,096
ipynb
Jupyter Notebook
swift/quant_activation.ipynb
zsc/kid-programming
3e8c8bfe379d5999e548a899a3fc6cd03ba6be7c
[ "MIT" ]
13
2017-07-02T12:44:24.000Z
2021-03-23T07:34:30.000Z
swift/quant_activation.ipynb
zsc/kid-programming
3e8c8bfe379d5999e548a899a3fc6cd03ba6be7c
[ "MIT" ]
1
2020-04-26T07:18:12.000Z
2021-08-15T15:42:01.000Z
swift/quant_activation.ipynb
zsc/kid-programming
3e8c8bfe379d5999e548a899a3fc6cd03ba6be7c
[ "MIT" ]
6
2017-07-14T02:02:13.000Z
2021-06-16T07:22:47.000Z
138.151724
31,988
0.866846
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7d186e65c0e2dafb94bf4968bd01773b2976c19
64,868
ipynb
Jupyter Notebook
labs/lab09.ipynb
Amandaa-S/mat281_portfolio
42bfe41878707c67334f931dbc19db4ee1a55bd4
[ "MIT" ]
null
null
null
labs/lab09.ipynb
Amandaa-S/mat281_portfolio
42bfe41878707c67334f931dbc19db4ee1a55bd4
[ "MIT" ]
null
null
null
labs/lab09.ipynb
Amandaa-S/mat281_portfolio
42bfe41878707c67334f931dbc19db4ee1a55bd4
[ "MIT" ]
null
null
null
102.801902
20,970
0.536983
[ [ [ "# Laboratorio 9", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport altair as alt\nimport matplotlib.pyplot as plt\n\nfrom vega_datasets import data\n\nalt.themes.enable('opaque')\n%matplotlib inline", "_____no_output_____" ] ], [ [ "En este laboratorio utilizaremos un conjunto de datos _famoso_, el GapMinder. Esta es una versión reducida que solo considera países, ingresos, salud y población. \n\n¿Hay alguna forma natural de agrupar a estos países?", "_____no_output_____" ] ], [ [ "gapminder = data.gapminder_health_income()\ngapminder.head()", "_____no_output_____" ] ], [ [ "## Ejercicio 1\n\n(1 pto.)\n\nRealiza un Análisis exploratorio, como mínimo un `describe` del dataframe y una visualización adecuada, por ejemplo un _scatter matrix_ con los valores numéricos.", "_____no_output_____" ] ], [ [ "gapminder.describe()", "_____no_output_____" ], [ "#scatter matrix con valores númerico\nalt.Chart(gapminder).mark_circle().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n).properties(\n width=150,\n height=150\n).repeat(\n row=['income', 'health', 'population'],\n column=['population', 'health', 'income']\n).interactive()", "_____no_output_____" ] ], [ [ "__Pregunta:__ ¿Hay alguna variable que te entregue indicios a simple vista donde se puedan separar países en grupos?\n\n__Respuesta:__ En la variable población se pueden ver 3 grupos, en la variable health se observan 2 grupos y en la variable income se pueden ver 3 grupos distintos. Luego, en los otros gráficos:\n\nEn el gráfico population vs income se puede apreciar 3 grupos, uno con gran cantidad de paises con una baja población y bajos ingresos, otro con dos países con mayor cantidad de habitantes y por último paises con mayor cantidad de ingresos.\n\nEn el grafico health vs population se pueden apreciar 3 grupos que se van diferenciando con la cantidad de habitantes\n\nPor último en el gráfico income vs health se observan 3 grupos, diferenciandose con los ingresos por pais", "_____no_output_____" ], [ "## Ejercicio 2\n\n(1 pto.)\n\nAplicar un escalamiento a los datos antes de aplicar nuestro algoritmo de clustering. Para ello, definir la variable `X_raw` que corresponde a un `numpy.array` con los valores del dataframe `gapminder` en las columnas _income_, _health_ y _population_. Luego, definir la variable `X` que deben ser los datos escalados de `X_raw`.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "X_raw = pd.DataFrame({\"income\": gapminder[\"income\"],\"health\": gapminder[\"health\"],\"population\":gapminder[\"population\"]}).to_numpy()\nX = StandardScaler().fit_transform(X_raw)", "_____no_output_____" ] ], [ [ "## Ejercicio 3\n\n(1 pto.)\n\nDefinir un _estimator_ `KMeans` con `k=3` y `random_state=42`, luego ajustar con `X` y finalmente, agregar los _labels_ obtenidos a una nueva columna del dataframe `gapminder` llamada `cluster`. Finalmente, realizar el mismo gráfico del principio pero coloreado por los clusters obtenidos.\n", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans", "_____no_output_____" ], [ "k = 3\nkmeans = KMeans(n_clusters=k,random_state=42)\nkmeans.fit(X)\nclusters = kmeans.labels_\ngapminder[\"cluster\"] = clusters", "_____no_output_____" ], [ "alt.Chart(gapminder).mark_circle().encode(\n alt.X(alt.repeat(\"column\"), type='quantitative'),\n alt.Y(alt.repeat(\"row\"), type='quantitative'),\n color='cluster:N'\n).properties(\n width=150,\n height=150\n).repeat(\n row=['income', 'health', 'population'],\n column=['population', 'health', 'income']\n).interactive()", "_____no_output_____" ] ], [ [ "## Ejercicio 4\n\n(1 pto.)\n\n__Regla del codo__\n\n__¿Cómo escoger la mejor cantidad de _clusters_?__\n\nEn este ejercicio hemos utilizado que el número de clusters es igual a 3. El ajuste del modelo siempre será mejor al aumentar el número de clusters, pero ello no significa que el número de clusters sea el apropiado. De hecho, si tenemos que ajustar $n$ puntos, claramente tomar $n$ clusters generaría un ajuste perfecto, pero no permitiría representar si existen realmente agrupaciones de datos.\n\nCuando no se conoce el número de clusters a priori, se utiliza la [regla del codo](https://jarroba.com/seleccion-del-numero-optimo-clusters/), que indica que el número más apropiado es aquel donde \"cambia la pendiente\" de decrecimiento de la la suma de las distancias a los clusters para cada punto, en función del número de clusters.\n\nA continuación se provee el código para el caso de clustering sobre los datos estandarizados, leídos directamente de un archivo preparado especialmente.En la línea que se declara `kmeans` dentro del ciclo _for_ debes definir un estimador K-Means, con `k` clusters y `random_state` 42. Recuerda aprovechar de ajustar el modelo en una sola línea.", "_____no_output_____" ] ], [ [ "elbow = pd.Series(name=\"inertia\", dtype=\"float64\").rename_axis(index=\"k\")\nfor k in range(1, 10):\n kmeans = KMeans(n_clusters=k,random_state=42).fit(X)\n elbow.loc[k] = kmeans.inertia_ # Inertia: Sum of distances of samples to their closest cluster center\nelbow = elbow.reset_index()", "_____no_output_____" ], [ "alt.Chart(elbow).mark_line(point=True).encode(\n x=\"k:O\",\n y=\"inertia:Q\"\n).properties(\n height=600,\n width=800\n)", "_____no_output_____" ] ], [ [ "__Pregunta:__ Considerando los datos (países) y el gráfico anterior, ¿Cuántos clusters escogerías?\n\n__Respuesta:__ elegiría el cluster 4, pues tiene una mejora respecto a los anteriores y no hay una mejora significativa a los siguientes clusters", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
e7d18d884d92b5d027edbc5702c5d0195cd699c5
348,991
ipynb
Jupyter Notebook
Course 4 - Convolutional Neural Networks/NoteBooks/Residual_Networks_v2a.ipynb
HarshitRuwali/Coursera-Deep-Learning-Specialization
8038f2f2d746ad455e0e3c45c736c5d9b7348d8a
[ "MIT" ]
null
null
null
Course 4 - Convolutional Neural Networks/NoteBooks/Residual_Networks_v2a.ipynb
HarshitRuwali/Coursera-Deep-Learning-Specialization
8038f2f2d746ad455e0e3c45c736c5d9b7348d8a
[ "MIT" ]
null
null
null
Course 4 - Convolutional Neural Networks/NoteBooks/Residual_Networks_v2a.ipynb
HarshitRuwali/Coursera-Deep-Learning-Specialization
8038f2f2d746ad455e0e3c45c736c5d9b7348d8a
[ "MIT" ]
null
null
null
108.180719
110,302
0.705121
[ [ [ "# Residual Networks\n\nWelcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.\n\n**In this assignment, you will:**\n- Implement the basic building blocks of ResNets. \n- Put together these building blocks to implement and train a state-of-the-art neural network for image classification. ", "_____no_output_____" ], [ "## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"2a\".\n* You can find your original work saved in the notebook with the previous version name (\"v2\") \n* To view the file directory, go to the menu \"File->Open\", and this will open a new tab that shows the file directory.\n\n#### List of updates\n* For testing on an image, replaced `preprocess_input(x)` with `x=x/255.0` to normalize the input image in the same way that the model's training data was normalized.\n* Refers to \"shallower\" layers as those layers closer to the input, and \"deeper\" layers as those closer to the output (Using \"shallower\" layers instead of \"lower\" or \"earlier\").\n* Added/updated instructions.\n", "_____no_output_____" ], [ "This assignment will be done in Keras. \n\nBefore jumping into the problem, let's run the cell below to load the required packages.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\nfrom keras.models import Model, load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport pydot\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom resnets_utils import *\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\n%matplotlib inline\n\nimport keras.backend as K\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)", "Using TensorFlow backend.\n" ] ], [ [ "## 1 - The problem of very deep neural networks\n\nLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.\n\n* The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the shallower layers, closer to the input) to very complex features (at the deeper layers, closer to the output). \n* However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent prohibitively slow. \n* More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and \"explode\" to take very large values). \n* During training, you might therefore see the magnitude (or norm) of the gradient for the shallower layers decrease to zero very rapidly as training proceeds: ", "_____no_output_____" ], [ "<img src=\"images/vanishing_grad_kiank.png\" style=\"width:450px;height:220px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Vanishing gradient** <br> The speed of learning decreases very rapidly for the shallower layers as the network trains </center></caption>\n\nYou are now going to solve this problem by building a Residual Network!", "_____no_output_____" ], [ "## 2 - Building a Residual Network\n\nIn ResNets, a \"shortcut\" or a \"skip connection\" allows the model to skip layers: \n\n<img src=\"images/skip_connection_kiank.png\" style=\"width:650px;height:200px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : A ResNet block showing a **skip-connection** <br> </center></caption>\n\nThe image on the left shows the \"main path\" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. \n\nWe also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. \n \n(There is also some evidence that the ease of learning an identity function accounts for ResNets' remarkable performance even more so than skip connections helping with vanishing gradients).\n\nTwo main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them: the \"identity block\" and the \"convolutional block.\"", "_____no_output_____" ], [ "### 2.1 - The identity block\n\nThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps:\n\n<img src=\"images/idblock2_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 2 layers. </center></caption>\n\nThe upper path is the \"shortcut path.\" The lower path is the \"main path.\" In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! \n\nIn this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection \"skips over\" 3 hidden layers rather than 2 layers. It looks like this: \n\n<img src=\"images/idblock3_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 3 layers.</center></caption>", "_____no_output_____" ], [ "Here are the individual steps.\n\nFirst component of main path: \n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. \n- The first BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is \"same\" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. \n- The second BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. \n- The third BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2c'`. \n- Note that there is **no** ReLU activation function in this component. \n\nFinal step: \n- The `X_shortcut` and the output from the 3rd layer `X` are added together.\n- **Hint**: The syntax will look something like `Add()([var1,var2])`\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\n**Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read this carefully to make sure you understand what it is doing. You should implement the rest. \n- To implement the Conv2D step: [Conv2D](https://keras.io/layers/convolutional/#conv2d)\n- To implement BatchNorm: [BatchNormalization](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the 'channels' axis))\n- For the activation, use: `Activation('relu')(X)`\n- To add the value passed forward by the shortcut: [Add](https://keras.io/layers/merge/#add)", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: identity_block\n\ndef identity_block(X, f, filters, stage, block):\n \"\"\"\n Implementation of the identity block as defined in Figure 3\n\n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n\n Returns:\n X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n\n # First component of main path\n X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n ### START CODE HERE ###\n\n # Second component of main path (≈3 lines)\n X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n ### END CODE HERE ###\n\n return X\n", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))", "out = [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "## 2.2 - The convolutional block\n\nThe ResNet \"convolutional block\" is the second block type. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: \n\n<img src=\"images/convblock_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Convolutional block** </center></caption>\n\n* The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) \n* For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. \n* The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. \n\nThe details of the convolutional block are as follows. \n\nFirst component of main path:\n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. Use 0 as the `glorot_uniform` seed.\n- The first BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of shape (f,f) and a stride of (1,1). Its padding is \"same\" and it's name should be `conv_name_base + '2b'`. Use 0 as the `glorot_uniform` seed.\n- The second BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and it's name should be `conv_name_base + '2c'`. Use 0 as the `glorot_uniform` seed.\n- The third BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. \n\nShortcut path:\n- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '1'`. Use 0 as the `glorot_uniform` seed.\n- The BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '1'`. \n\nFinal step: \n- The shortcut and the main path values are added together.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n \n**Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.\n- [Conv2D](https://keras.io/layers/convolutional/#conv2d)\n- [BatchNormalization](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- For the activation, use: `Activation('relu')(X)`\n- [Add](https://keras.io/layers/merge/#add)", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: convolutional_block\n\ndef convolutional_block(X, f, filters, stage, block, s=2):\n \"\"\"\n Implementation of the convolutional block as defined in Figure 4\n\n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n s -- Integer, specifying the stride to be used\n\n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value\n X_shortcut = X\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n ### START CODE HERE ###\n\n # Second component of main path (≈3 lines)\n X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH #### (≈2 lines)\n X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n ### END CODE HERE ###\n return X", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))", "out = [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "## 3 - Building your first ResNet model (50 layers)\n\nYou now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. \"ID BLOCK\" in the diagram stands for \"Identity block,\" and \"ID BLOCK x3\" means you should stack 3 identity blocks together.\n\n<img src=\"images/resnet_kiank.png\" style=\"width:850px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 5** </u><font color='purple'> : **ResNet-50 model** </center></caption>\n\nThe details of this ResNet-50 model are:\n- Zero-padding pads the input with a pad of (3,3)\n- Stage 1:\n - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is \"conv1\".\n - BatchNorm is applied to the 'channels' axis of the input.\n - MaxPooling uses a (3,3) window and a (2,2) stride.\n- Stage 2:\n - The convolutional block uses three sets of filters of size [64,64,256], \"f\" is 3, \"s\" is 1 and the block is \"a\".\n - The 2 identity blocks use three sets of filters of size [64,64,256], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- Stage 3:\n - The convolutional block uses three sets of filters of size [128,128,512], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 3 identity blocks use three sets of filters of size [128,128,512], \"f\" is 3 and the blocks are \"b\", \"c\" and \"d\".\n- Stage 4:\n - The convolutional block uses three sets of filters of size [256, 256, 1024], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 5 identity blocks use three sets of filters of size [256, 256, 1024], \"f\" is 3 and the blocks are \"b\", \"c\", \"d\", \"e\" and \"f\".\n- Stage 5:\n - The convolutional block uses three sets of filters of size [512, 512, 2048], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 2 identity blocks use three sets of filters of size [512, 512, 2048], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- The 2D Average Pooling uses a window of shape (2,2) and its name is \"avg_pool\".\n- The 'flatten' layer doesn't have any hyperparameters or name.\n- The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`.\n\n**Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above. \n\nYou'll need to use this function: \n- Average pooling [see reference](https://keras.io/layers/pooling/#averagepooling2d)\n\nHere are some other functions we used in the code below:\n- Conv2D: [See reference](https://keras.io/layers/convolutional/#conv2d)\n- BatchNorm: [See reference](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- Zero padding: [See reference](https://keras.io/layers/convolutional/#zeropadding2d)\n- Max pooling: [See reference](https://keras.io/layers/pooling/#maxpooling2d)\n- Fully connected layer: [See reference](https://keras.io/layers/core/#dense)\n- Addition: [See reference](https://keras.io/layers/merge/#add)", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: ResNet50\n\ndef ResNet50(input_shape=(64, 64, 3), classes=6):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n\n # Define the input as a tensor with shape input_shape\n X_input = Input(input_shape)\n\n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n\n # Stage 1\n X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name='bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n\n ### START CODE HERE ###\n\n # Stage 3 (≈4 lines)\n X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2)\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')\n\n # Stage 4 (≈6 lines)\n X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2)\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')\n\n # Stage 5 (≈3 lines)\n X = X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2)\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')\n\n # AVGPOOL (≈1 line). Use \"X = AveragePooling2D(...)(X)\"\n X = AveragePooling2D(pool_size=(2, 2), padding='same')(X)\n\n ### END CODE HERE ###\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer=glorot_uniform(seed=0))(X)\n\n # Create model\n model = Model(inputs=X_input, outputs=X, name='ResNet50')\n\n return model", "_____no_output_____" ] ], [ [ "Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.", "_____no_output_____" ] ], [ [ "model = ResNet50(input_shape = (64, 64, 3), classes = 6)", "_____no_output_____" ] ], [ [ "As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.", "_____no_output_____" ] ], [ [ "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "The model is now ready to be trained. The only thing you need is a dataset.", "_____no_output_____" ], [ "Let's load the SIGNS Dataset.\n\n<img src=\"images/signs_data_kiank.png\" style=\"width:450px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 6** </u><font color='purple'> : **SIGNS dataset** </center></caption>\n", "_____no_output_____" ] ], [ [ "X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()\n\n# Normalize image vectors\nX_train = X_train_orig/255.\nX_test = X_test_orig/255.\n\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6).T\nY_test = convert_to_one_hot(Y_test_orig, 6).T\n\nprint (\"number of training examples = \" + str(X_train.shape[0]))\nprint (\"number of test examples = \" + str(X_test.shape[0]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))", "number of training examples = 1080\nnumber of test examples = 120\nX_train shape: (1080, 64, 64, 3)\nY_train shape: (1080, 6)\nX_test shape: (120, 64, 64, 3)\nY_test shape: (120, 6)\n" ] ], [ [ "Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch. ", "_____no_output_____" ] ], [ [ "model.fit(X_train, Y_train, epochs = 2, batch_size = 32)", "Epoch 1/2\n1080/1080 [==============================] - 228s - loss: 2.8833 - acc: 0.2546 \nEpoch 2/2\n1080/1080 [==============================] - 226s - loss: 1.9659 - acc: 0.3852 \n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n ** Epoch 1/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours.\n </td>\n </tr>\n <tr>\n <td>\n ** Epoch 2/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing.\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "Let's see how this model (trained on only two epochs) performs on the test set.", "_____no_output_____" ] ], [ [ "preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))", "120/120 [==============================] - 8s \nLoss = 2.19054207802\nTest Accuracy = 0.166666666667\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Test Accuracy**\n </td>\n <td>\n between 0.16 and 0.25\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "For the purpose of this assignment, we've asked you to train the model for just two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well.", "_____no_output_____" ], [ "After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. \n\nUsing a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model.", "_____no_output_____" ] ], [ [ "model = load_model('ResNet50.h5') ", "_____no_output_____" ], [ "preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))", "120/120 [==============================] - 8s \nLoss = 0.530178320408\nTest Accuracy = 0.866666662693\n" ] ], [ [ "ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.\n\nCongratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! ", "_____no_output_____" ], [ "## 4 - Test on your own image (Optional/Ungraded)", "_____no_output_____" ], [ "If you wish, you can also take a picture of your own hand and see the output of the model. To do this:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right! ", "_____no_output_____" ] ], [ [ "img_path = 'images/my_image.jpg'\nimg = image.load_img(img_path, target_size=(64, 64))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = x/255.0\nprint('Input image shape:', x.shape)\nmy_image = scipy.misc.imread(img_path)\nimshow(my_image)\nprint(\"class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \")\nprint(model.predict(x))", "Input image shape: (1, 64, 64, 3)\nclass prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \n[[ 3.41876671e-06 2.77412561e-04 9.99522924e-01 1.98842812e-07\n 1.95619068e-04 4.11686671e-07]]\n" ] ], [ [ "You can also print a summary of your model by running the following code.", "_____no_output_____" ] ], [ [ "model.summary()", "____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 64, 64, 3) 0 \n____________________________________________________________________________________________________\nzero_padding2d_1 (ZeroPadding2D) (None, 70, 70, 3) 0 input_1[0][0] \n____________________________________________________________________________________________________\nconv1 (Conv2D) (None, 32, 32, 64) 9472 zero_padding2d_1[0][0] \n____________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0] \n____________________________________________________________________________________________________\nactivation_4 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 15, 15, 64) 0 activation_4[0][0] \n____________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 15, 15, 64) 4160 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_5 (Activation) (None, 15, 15, 64) 0 bn2a_branch2a[0][0] \n____________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_5[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_6 (Activation) (None, 15, 15, 64) 0 bn2a_branch2b[0][0] \n____________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_6[0][0] \n____________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 15, 15, 256) 16640 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalization (None, 15, 15, 256) 1024 res2a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_2 (Add) (None, 15, 15, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_7 (Activation) (None, 15, 15, 256) 0 add_2[0][0] \n____________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_7[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_8 (Activation) (None, 15, 15, 64) 0 bn2b_branch2a[0][0] \n____________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_8[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_9 (Activation) (None, 15, 15, 64) 0 bn2b_branch2b[0][0] \n____________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_9[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_3 (Add) (None, 15, 15, 256) 0 bn2b_branch2c[0][0] \n activation_7[0][0] \n____________________________________________________________________________________________________\nactivation_10 (Activation) (None, 15, 15, 256) 0 add_3[0][0] \n____________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_10[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_11 (Activation) (None, 15, 15, 64) 0 bn2c_branch2a[0][0] \n____________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_11[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_12 (Activation) (None, 15, 15, 64) 0 bn2c_branch2b[0][0] \n____________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_12[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_4 (Add) (None, 15, 15, 256) 0 bn2c_branch2c[0][0] \n activation_10[0][0] \n____________________________________________________________________________________________________\nactivation_13 (Activation) (None, 15, 15, 256) 0 add_4[0][0] \n____________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_14 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0] \n____________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_14[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_15 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0] \n____________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_15[0][0] \n____________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalization (None, 8, 8, 512) 2048 res3a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_5 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_16 (Activation) (None, 8, 8, 512) 0 add_5[0][0] \n____________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_16[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_17 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0] \n____________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_17[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_18 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0] \n____________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_18[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_6 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0] \n activation_16[0][0] \n____________________________________________________________________________________________________\nactivation_19 (Activation) (None, 8, 8, 512) 0 add_6[0][0] \n____________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_19[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_20 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0] \n____________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_20[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_21 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0] \n____________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_21[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_7 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0] \n activation_19[0][0] \n____________________________________________________________________________________________________\nactivation_22 (Activation) (None, 8, 8, 512) 0 add_7[0][0] \n____________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_22[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_23 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0] \n____________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_23[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_24 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0] \n____________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_24[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_8 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0] \n activation_22[0][0] \n____________________________________________________________________________________________________\nactivation_25 (Activation) (None, 8, 8, 512) 0 add_8[0][0] \n____________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_26 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0] \n____________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_26[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_27 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0] \n____________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_27[0][0] \n____________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalization (None, 4, 4, 1024) 4096 res4a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_9 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_28 (Activation) (None, 4, 4, 1024) 0 add_9[0][0] \n____________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_28[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_29 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0] \n____________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_29[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_30 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0] \n____________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_30[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_10 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0] \n activation_28[0][0] \n____________________________________________________________________________________________________\nactivation_31 (Activation) (None, 4, 4, 1024) 0 add_10[0][0] \n____________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_31[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_32 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0] \n____________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_32[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_33 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0] \n____________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_33[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_11 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0] \n activation_31[0][0] \n____________________________________________________________________________________________________\nactivation_34 (Activation) (None, 4, 4, 1024) 0 add_11[0][0] \n____________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_34[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_35 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0] \n____________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_35[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_36 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0] \n____________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_36[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_12 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0] \n activation_34[0][0] \n____________________________________________________________________________________________________\nactivation_37 (Activation) (None, 4, 4, 1024) 0 add_12[0][0] \n____________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_37[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_38 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0] \n____________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_38[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_39 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0] \n____________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_39[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4e_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_13 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0] \n activation_37[0][0] \n____________________________________________________________________________________________________\nactivation_40 (Activation) (None, 4, 4, 1024) 0 add_13[0][0] \n____________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_40[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_41 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0] \n____________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_41[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_42 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0] \n____________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_42[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4f_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_14 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0] \n activation_40[0][0] \n____________________________________________________________________________________________________\nactivation_43 (Activation) (None, 4, 4, 1024) 0 add_14[0][0] \n____________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_44 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0] \n____________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_44[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_45 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0] \n____________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_45[0][0] \n____________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalization (None, 2, 2, 2048) 8192 res5a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_15 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_46 (Activation) (None, 2, 2, 2048) 0 add_15[0][0] \n____________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_46[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_47 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0] \n____________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_47[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_48 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0] \n____________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_48[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_16 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0] \n activation_46[0][0] \n____________________________________________________________________________________________________\nactivation_49 (Activation) (None, 2, 2, 2048) 0 add_16[0][0] \n____________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_49[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_50 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0] \n____________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_50[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_51 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0] \n____________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_51[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_17 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0] \n activation_49[0][0] \n____________________________________________________________________________________________________\nactivation_52 (Activation) (None, 2, 2, 2048) 0 add_17[0][0] \n____________________________________________________________________________________________________\navg_pool (AveragePooling2D) (None, 1, 1, 2048) 0 activation_52[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 2048) 0 avg_pool[0][0] \n____________________________________________________________________________________________________\nfc6 (Dense) (None, 6) 12294 flatten_1[0][0] \n====================================================================================================\nTotal params: 23,600,006\nTrainable params: 23,546,886\nNon-trainable params: 53,120\n____________________________________________________________________________________________________\n" ] ], [ [ "Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to \"File -> Open...-> model.png\".", "_____no_output_____" ] ], [ [ "plot_model(model, to_file='model.png')\nSVG(model_to_dot(model).create(prog='dot', format='svg'))", "_____no_output_____" ] ], [ [ "## What you should remember\n- Very deep \"plain\" networks don't work in practice because they are hard to train due to vanishing gradients. \n- The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. \n- There are two main types of blocks: The identity block and the convolutional block. \n- Very deep Residual Networks are built by stacking these blocks together.", "_____no_output_____" ], [ "### References \n\nThis notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the GitHub repository of Francois Chollet: \n\n- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385)\n- Francois Chollet's GitHub repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7d1942548d36368c8e268177f5b2827194951e8
2,508
ipynb
Jupyter Notebook
Homework #2 P2 Jinrui Bai.ipynb
JinruiBai96/QC_fullAdder
3e824fd6d1dd261f49a4b585812e567efb5305bb
[ "Apache-2.0" ]
null
null
null
Homework #2 P2 Jinrui Bai.ipynb
JinruiBai96/QC_fullAdder
3e824fd6d1dd261f49a4b585812e567efb5305bb
[ "Apache-2.0" ]
null
null
null
Homework #2 P2 Jinrui Bai.ipynb
JinruiBai96/QC_fullAdder
3e824fd6d1dd261f49a4b585812e567efb5305bb
[ "Apache-2.0" ]
null
null
null
29.857143
115
0.589713
[ [ [ "# Homework documentation\n\n## Jinrui Bai\n\n#### This is a documentation for compuational physics 1 in 2021 for homework #2\n\n### Task -- what do I what to do?\n\n* learning __computatuional physics__\n- learning things about __quantum computer__\n+ practicing programming in **C++** and **Python**\n- learning how to write proper **numerical algorithms**\n\n### Reference -- how do other people do it?\n\n1. [they learn it from slack channel: https://app.slack.com](https://app.slack.com)\n2. [They learn it from UB learns: https://ublearns.buffalo.edu](https://ublearns.buffalo.edu)\n3. [They learn it from Google: https://www.google.com/](https://www.google.com/)\n\n### Methods -- how did I implement it?\n\n> I would implement my purposes by attending class on time and finishing every assignment at my best.\n> I should also listen to other people's suggestions and explore area I'm interested to learn this course.\n\n### Results -- what is the outcome?\n\n* The grade for my every assignment counts!\n+ The grade for my midterm counts!\n- The grade for my final project counts!\n+ Most importantly, my passion for physics counts!\n\n### Outlook -- what would be nice to have?\n\n1. Continuing my learning on computation physics 2;\n2. Learning more things about quantum computer;\n3. Strengthen my programming fundamental.\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e7d1a6127238ffd1ee1108f2552ddede6bde1ae3
970,940
ipynb
Jupyter Notebook
docs/source/notebooks/Euler-Maruyama_and_SDEs.ipynb
satrio-hw/pymc3
0bd2d6541f10ad0174a2b20dfe9f34670c5e617d
[ "Apache-2.0" ]
1
2020-09-30T06:26:53.000Z
2020-09-30T06:26:53.000Z
docs/source/notebooks/Euler-Maruyama_and_SDEs.ipynb
satrio-hw/pymc3
0bd2d6541f10ad0174a2b20dfe9f34670c5e617d
[ "Apache-2.0" ]
null
null
null
docs/source/notebooks/Euler-Maruyama_and_SDEs.ipynb
satrio-hw/pymc3
0bd2d6541f10ad0174a2b20dfe9f34670c5e617d
[ "Apache-2.0" ]
null
null
null
545.778527
234,072
0.942539
[ [ [ "# Inferring parameters of SDEs using a Euler-Maruyama scheme\n\n_This notebook is derived from a presentation prepared for the Theoretical Neuroscience Group, Institute of Systems Neuroscience at Aix-Marseile University._", "_____no_output_____" ] ], [ [ "%pylab inline\nimport arviz as az\nimport pymc3 as pm\nimport scipy\nimport theano.tensor as tt\n\nfrom pymc3.distributions.timeseries import EulerMaruyama", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "%config InlineBackend.figure_format = 'retina'\naz.style.use('arviz-darkgrid')", "_____no_output_____" ] ], [ [ "## Toy model 1\n\nHere's a scalar linear SDE in symbolic form\n\n$ dX_t = \\lambda X_t + \\sigma^2 dW_t $\n\ndiscretized with the Euler-Maruyama scheme", "_____no_output_____" ] ], [ [ "# parameters\nλ = -0.78\nσ2 = 5e-3\nN = 200\ndt = 1e-1\n\n# time series\nx = 0.1\nx_t = []\n\n# simulate\nfor i in range(N):\n x += dt * λ * x + sqrt(dt) * σ2 * randn()\n x_t.append(x)\n \nx_t = array(x_t)\n\n# z_t noisy observation\nz_t = x_t + randn(x_t.size) * 5e-3", "_____no_output_____" ], [ "figure(figsize=(10, 3))\nsubplot(121)\nplot(x_t[:30], 'k', label='$x(t)$', alpha=0.5), plot(z_t[:30], 'r', label='$z(t)$', alpha=0.5)\ntitle('Transient'), legend()\nsubplot(122)\nplot(x_t[30:], 'k', label='$x(t)$', alpha=0.5), plot(z_t[30:], 'r', label='$z(t)$', alpha=0.5)\ntitle('All time');\ntight_layout()", "_____no_output_____" ] ], [ [ "What is the inference we want to make? Since we've made a noisy observation of the generated time series, we need to estimate both $x(t)$ and $\\lambda$.", "_____no_output_____" ], [ "First, we rewrite our SDE as a function returning a tuple of the drift and diffusion coefficients", "_____no_output_____" ] ], [ [ "def lin_sde(x, lam):\n return lam * x, σ2", "_____no_output_____" ] ], [ [ "Next, we describe the probability model as a set of three stochastic variables, `lam`, `xh`, and `zh`:", "_____no_output_____" ] ], [ [ "with pm.Model() as model:\n \n # uniform prior, but we know it must be negative\n lam = pm.Flat('lam')\n \n # \"hidden states\" following a linear SDE distribution\n # parametrized by time step (det. variable) and lam (random variable)\n xh = EulerMaruyama('xh', dt, lin_sde, (lam, ), shape=N, testval=x_t)\n \n # predicted observation\n zh = pm.Normal('zh', mu=xh, sigma=5e-3, observed=z_t)", "_____no_output_____" ] ], [ [ "Once the model is constructed, we perform inference, i.e. sample from the posterior distribution, in the following steps:", "_____no_output_____" ] ], [ [ "with model:\n trace = pm.sample(2000, tune=1000)", "Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [xh, lam]\n" ] ], [ [ "Next, we plot some basic statistics on the samples from the posterior,", "_____no_output_____" ] ], [ [ "figure(figsize=(10, 3))\nsubplot(121)\nplot(percentile(trace[xh], [2.5, 97.5], axis=0).T, 'k', label=r'$\\hat{x}_{95\\%}(t)$')\nplot(x_t, 'r', label='$x(t)$')\nlegend()\n\nsubplot(122)\nhist(trace[lam], 30, label=r'$\\hat{\\lambda}$', alpha=0.5)\naxvline(λ, color='r', label=r'$\\lambda$', alpha=0.5)\nlegend();", "_____no_output_____" ] ], [ [ "A model can fit the data precisely and still be wrong; we need to use _posterior predictive checks_ to assess if, under our fit model, the data our likely.\n\nIn other words, we \n- assume the model is correct\n- simulate new observations\n- check that the new observations fit with the original data", "_____no_output_____" ] ], [ [ "# generate trace from posterior\nppc_trace = pm.sample_posterior_predictive(trace, model=model)\n\n# plot with data\nfigure(figsize=(10, 3))\nplot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\\% PP}(t)$')\nplot(z_t, 'r', label='$z(t)$')\nlegend()", "_____no_output_____" ] ], [ [ "Note that \n\n- inference also estimates the initial conditions\n- the observed data $z(t)$ lies fully within the 95% interval of the PPC.\n- there are many other ways of evaluating fit", "_____no_output_____" ], [ "### Toy model 2\n\nAs the next model, let's use a 2D deterministic oscillator, \n\\begin{align}\n\\dot{x} &= \\tau (x - x^3/3 + y) \\\\\n\\dot{y} &= \\frac{1}{\\tau} (a - x)\n\\end{align}\n\nwith noisy observation $z(t) = m x + (1 - m) y + N(0, 0.05)$.", "_____no_output_____" ] ], [ [ "N, τ, a, m, σ2 = 200, 3.0, 1.05, 0.2, 1e-1\nxs, ys = [0.0], [1.0]\nfor i in range(N):\n x, y = xs[-1], ys[-1]\n dx = τ * (x - x**3.0/3.0 + y)\n dy = (1.0 / τ) * (a - x)\n xs.append(x + dt * dx + sqrt(dt) * σ2 * randn())\n ys.append(y + dt * dy + sqrt(dt) * σ2 * randn())\nxs, ys = array(xs), array(ys)\nzs = m * xs + (1 - m) * ys + randn(xs.size) * 0.1\n\nfigure(figsize=(10, 2))\nplot(xs, label='$x(t)$')\nplot(ys, label='$y(t)$')\nplot(zs, label='$z(t)$')\nlegend()", "_____no_output_____" ] ], [ [ "Now, estimate the hidden states $x(t)$ and $y(t)$, as well as parameters $\\tau$, $a$ and $m$.\n\nAs before, we rewrite our SDE as a function returned drift & diffusion coefficients:", "_____no_output_____" ] ], [ [ "def osc_sde(xy, τ, a):\n x, y = xy[:, 0], xy[:, 1]\n dx = τ * (x - x**3.0/3.0 + y)\n dy = (1.0 / τ) * (a - x)\n dxy = tt.stack([dx, dy], axis=0).T\n return dxy, σ2", "_____no_output_____" ] ], [ [ "As before, the Euler-Maruyama discretization of the SDE is written as a prediction of the state at step $i+1$ based on the state at step $i$.", "_____no_output_____" ], [ "We can now write our statistical model as before, with uninformative priors on $\\tau$, $a$ and $m$:", "_____no_output_____" ] ], [ [ "xys = c_[xs, ys]\n\nwith pm.Model() as model:\n τh = pm.Uniform('τh', lower=0.1, upper=5.0)\n ah = pm.Uniform('ah', lower=0.5, upper=1.5)\n mh = pm.Uniform('mh', lower=0.0, upper=1.0)\n xyh = EulerMaruyama('xyh', dt, osc_sde, (τh, ah), shape=xys.shape, testval=xys)\n zh = pm.Normal('zh', mu=mh * xyh[:, 0] + (1 - mh) * xyh[:, 1], sigma=0.1, observed=zs)", "_____no_output_____" ], [ "with model:\n trace = pm.sample(2000, tune=1000)", "Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [xyh, mh, ah, τh]\n" ] ], [ [ "Again, the result is a set of samples from the posterior, including our parameters of interest but also the hidden states", "_____no_output_____" ] ], [ [ "figure(figsize=(10, 6))\nsubplot(211)\nplot(percentile(trace[xyh][..., 0], [2.5, 97.5], axis=0).T, 'k', label=r'$\\hat{x}_{95\\%}(t)$')\nplot(xs, 'r', label='$x(t)$')\nlegend(loc=0)\nsubplot(234), hist(trace['τh']), axvline(τ), xlim([1.0, 4.0]), title('τ')\nsubplot(235), hist(trace['ah']), axvline(a), xlim([0, 2.0]), title('a')\nsubplot(236), hist(trace['mh']), axvline(m), xlim([0, 1]), title('m')\ntight_layout()", "_____no_output_____" ] ], [ [ "Again, we can perform a posterior predictive check, that our data are likely given the fit model", "_____no_output_____" ] ], [ [ "# generate trace from posterior\nppc_trace = pm.sample_posterior_predictive(trace, model=model)\n\n# plot with data\nfigure(figsize=(10, 3))\nplot(percentile(ppc_trace['zh'], [2.5, 97.5], axis=0).T, 'k', label=r'$z_{95\\% PP}(t)$')\nplot(zs, 'r', label='$z(t)$')\nlegend()", "_____no_output_____" ], [ "%load_ext watermark\n%watermark -n -u -v -iv -w", "scipy 1.4.1\nlogging 0.5.1.2\nmatplotlib.pylab 1.18.5\nre 2.2.1\npymc3 3.9.0\nmatplotlib 3.2.1\nnumpy 1.18.5\narviz 0.8.3\nlast updated: Mon Jun 15 2020 \n\nCPython 3.7.7\nIPython 7.15.0\nwatermark 2.0.2\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7d1f4f85444ca577efb6c2fb20d3974c4bc9597
17,037
ipynb
Jupyter Notebook
aula-03-python/.ipynb_checkpoints/Introducao-Python-01-checkpoint.ipynb
Atzingen/curso-IoT-2017
3f4c6859a2ea34d749007f1b40be012e5dc29ae0
[ "MIT" ]
5
2017-04-15T01:21:14.000Z
2017-09-13T00:14:31.000Z
aula-03-python/Introducao-Python-01.ipynb
Atzingen/curso-IoT-2017
3f4c6859a2ea34d749007f1b40be012e5dc29ae0
[ "MIT" ]
null
null
null
aula-03-python/Introducao-Python-01.ipynb
Atzingen/curso-IoT-2017
3f4c6859a2ea34d749007f1b40be012e5dc29ae0
[ "MIT" ]
5
2017-04-10T22:20:00.000Z
2017-09-19T00:49:57.000Z
19.741599
184
0.48741
[ [ [ "# Introdução a linguagem Python (parte 1)\n\nNotebook para o curso de IoT - IFSP Piracicaba\n\nGustavo Voltani von Atzingen\n\nPython - versão 2.7\n\nEste notebook contém uma introdução aos comandos básicos em python.\nSerão cobertos os seguintes tópicos\n\n* Print\n* Comentários\n* Atribuição de variáveis e tipos\n* Trabalhando com strings (parte 1)\n* Listas (parte 1)\n* Estruturas de controle (if-elif-else, for, while)\n* Funções\n* Utilizando módulos (import)", "_____no_output_____" ], [ "### <i>Print</i>", "_____no_output_____" ], [ "Para se \"imprimir\" algum texto na tela, o python (versão 2.7) possui uma palavra reservada, chamada <i>print</i>. ", "_____no_output_____" ] ], [ [ "print \"Hello Python 2.7 !\"", "Hello Python 2.7 !\n" ] ], [ [ "Também podemos imprimir várias strings ou numeros, separando-os por ','", "_____no_output_____" ] ], [ [ "print 'Parte 1 - ', ' A resposta é: ', 42", "Parte 1 - A resposta é: 42\n" ] ], [ [ "Podemos inserir variáveis (núméricas) no meio do texto utilizando o método <i>.format</i>", "_____no_output_____" ] ], [ [ "print 'O valor da leitura dos sensores são {}Votls e {}Volts'.format(4.2, 1.68)", "O valor da leitura dos sensores são 4.2Votls e 1.68Volts\n" ] ], [ [ "### <i>Comentários</i>\n\nComentários são inseridos no programa utilizando o caracter '#' e com isso toda linha é ignorada pelo interpretador", "_____no_output_____" ] ], [ [ "# isto é uma linha de comentário", "_____no_output_____" ] ], [ [ "Para fazer um bloco de comentário (várias linhas), utiliza-se ''' no início e ''' no final do bloco de comentário", "_____no_output_____" ] ], [ [ "''' Isto e um bloco de comentarios\n\nTodas as linhas neste bloco sao ignoradas\npelo interpretador\n'''", "_____no_output_____" ] ], [ [ "### <i> Atribuição de variáveis </i>\n\nEm python as variáveis não são explicitamente declaradas. O interpretador faz a atribuição em tempo de execução. \nOs tipis de estrutura utilizadas pelo interpretador são:\n* Números (number) - Inteiro ou real\n* Strings (string)\n* Listas (list)\n* Tuplas (tuple)\n* Dicionários (dictionary)\n", "_____no_output_____" ] ], [ [ "a = 42 # A variável á recebe um número\nb = 1.68 # Variável real\nc = 'texto' # Texto\nprint a, b, c", "42 1.68 texto\n" ] ], [ [ "As variáveis podem alterar o seu tipo durante a execução (runtime)", "_____no_output_____" ] ], [ [ "a = 1.3\nprint 'valor de a antes: ', a\na = 'texto'\nprint 'valor de a depois: ', a", "valor de a antes: 1.3\nvalor de a depois: texto\n" ] ], [ [ "As variáveis podem sem atribuidas simultaneamente. Isto pode ser feito para simplificar o código e evitar a criação de variáveis temporárias", "_____no_output_____" ] ], [ [ "a, b = 1, 1\nprint a, b\na, b = b, a + b\nprint a, b", "1 1\n1 2\n" ] ], [ [ "### <i> Strings </i>\n\nstrings podem ser criadas utilizando ' ou \" (aspas simples ou dupla)", "_____no_output_____" ] ], [ [ "nome = 'Gustavo' # Isto é uma string\nnome = \"Joao\" # Isto também é uma string\nletra = 'a' # Strings também podem ter um único caracter", "_____no_output_____" ] ], [ [ "Podemos utilizar a indexação para acessar elementos da string ou partes dela", "_____no_output_____" ] ], [ [ "nome = 'Gustavo Voltani von Atzingen'\nprint nome[0], nome[1], nome[8] # A indexação começa em zero e segue até o ultimo valor", "G u V\n" ], [ "nome = 'Gustavo Voltani von Atzingen'\nprint nome[-1], nome[-2] # Também existe a indexação do fim para o início com \n #números negativos iniciando em 1", "n e\n" ], [ "nome = 'Gustavo Voltani von Atzingen'\nprint nome[8:15] # Podemos pegar parte da string desta forma\nprint nome[20:] # Da osição 20 até o final\nprint nome[:7] # Do início até a posição 6 ", "Voltani\nAtzingen\nGustavo\n" ] ], [ [ "Existes vários métodos que podem ser aplicados na string. O método <i>split</i> divide a string em um caracter especificado. Outros métidis serão abordados em aulas posteriores. ", "_____no_output_____" ] ], [ [ "nome = 'Gustavo Voltani von Atzingen'\nprint nome.split(' ') # separando o nome pelo espaço em branco", "['Gustavo', 'Voltani', 'von', 'Atzingen']\n" ] ], [ [ "### <i> Listas </i>\n\nListas são sequencias ordenadas de objetos (que podem ser strings, numeros, listas ou outros)", "_____no_output_____" ] ], [ [ "lista = ['texto1', 'texto2', 'texto3', 'texto4']\nprint lista", "['texto1', 'texto2', 'texto3', 'texto4']\n" ], [ " # também podemos ter vários tipos na mesma lista\nlista = [42, 'texto2', 1.68, 'texto4']\nprint lista", "[42, 'texto2', 1.68, 'texto4']\n" ], [ "# também podemos ter uma lista dentro de outra\nlista = [ [42, 54, 1.7], 'texto2', 1.68, 'texto4'] \nprint lista ", "[[42, 54, 1.7], 'texto2', 1.68, 'texto4']\n" ], [ "# A lista também é indexada e pode ser buscada da mesma forma que\n# foi feito com as strings\nlista = [42, 34, 78, 1, 91, 1, 34]\nprint lista[0], lista[-1], lista[2:5]", "42 34 [78, 1, 91]\n" ] ], [ [ "### <i> Estruturas de controle: if </i>", "_____no_output_____" ] ], [ [ "a = 4\n\nif a < 1:\n print 'a é menor que 1'\nelif a < 3:\n print 'a é menor que 3 e maior ou igual 1'\nelif a < 5:\n print 'a é menor que 5 e maior ou igual 3'\nelse:\n print 'a é maior= 5'", "a é menor que 5 e maior ou igual 3\n" ] ], [ [ "### <i> Estruturas de controle: for </i>\n\nfor é uma estrutura de controle que vai iterar sobre uma lista ou uma string", "_____no_output_____" ] ], [ [ "nome = 'gustavo'\nfor letra in nome:\n print letra", "g\nu\ns\nt\na\nv\no\n" ], [ "lista = ['texto1', 'texto2', 'texto3', 'texto4']\nfor item in lista:\n print item", "texto1\ntexto2\ntexto3\ntexto4\n" ], [ "# Se quisermos fazer uma repetição com contagem numérica, podemos \n# utilizar a função range() ou outras que serão mostradas futuramente", "_____no_output_____" ], [ "# Mostra os números de 0 a 9\nfor i in range(10):\n print i", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n" ], [ "# se quisermos contar os elementos de uma lista podemos usar a função enumerate\nlista = ['texto1', 'texto2', 'texto3', 'texto4']\nfor indice, item in enumerate(lista):\n print indice, item", "0 texto1\n1 texto2\n2 texto3\n3 texto4\n" ] ], [ [ "### <i> Estruturas de while: for </i>\n\nRepete até que a condição seja falsa", "_____no_output_____" ] ], [ [ "contador = 0\nwhile contador < 5:\n print contador\n contador += 1", "0\n1\n2\n3\n4\n" ] ], [ [ "### <i> Funções </i>\n\nFunções são escritas com a palavra <i> def </i> e o nome da função, juntamente com \nos argumentos.\nPode retorar (ou não) um ou mais ojbetos. ", "_____no_output_____" ] ], [ [ "def somador(a, b):\n return a + a\n\nsomador(1, 2)", "_____no_output_____" ], [ "def separa_por_espao(texto):\n if ' ' in texto:\n return texto.split(' ')\n else:\n return None\n \nnome1, nome2 = separa_por_espao('nome1 nome2')\nprint nome1, nome2", "nome1 nome2\n" ], [ "# funções podem ter argumentos chave\n\ndef soma(a, b=1):\n return a + b\n\nprint soma(1,2)\nprint soma(1)", "3\n2\n" ] ], [ [ "### <i> Módulos e importação </i>", "_____no_output_____" ] ], [ [ "import datetime\n\ntempo_atual = datetime.datetime.now()\nprint tempo_atual.hour, tempo_atual.minute, tempo_atual.second", "16 26 42\n" ], [ "from datetime import datetime as d\n\ntempo_atual = d.now()\nprint tempo_atual.hour, tempo_atual.minute, tempo_atual.second", "16 26 42\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
e7d1f6c6057e250984698be1ef60d2411bb08d75
1,346
ipynb
Jupyter Notebook
isu/ui/win/q.ipynb
pecusys/isutils
39fa92dc391cc430dcf1864f4c2f0212f0db58b6
[ "MIT" ]
null
null
null
isu/ui/win/q.ipynb
pecusys/isutils
39fa92dc391cc430dcf1864f4c2f0212f0db58b6
[ "MIT" ]
null
null
null
isu/ui/win/q.ipynb
pecusys/isutils
39fa92dc391cc430dcf1864f4c2f0212f0db58b6
[ "MIT" ]
null
null
null
21.709677
77
0.546805
[ [ [ "from PySide6.QtWidgets import *\nfrom PySide6.QtQuick import QQuickView\nfrom PySide6.QtCore import *\nfrom PySide6.QtUiTools impo54rt *\nfrom PySide6.QtGui import *\n\nmatch QApplication.instance():\n case None: a = QApplication()\n case app: a = app\n\n# view = QQuickView()\n# view.setSource(url)\n# view.show()\nmw = QUiLoader(None).load(\"w.ui\", None)\nmw.show()\n\na.exec()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
e7d1fb9836061b674571385a28fd5218c13ca9a7
1,011,447
ipynb
Jupyter Notebook
junior_class/chapter-3-Computer_Vision/notebook/3-1-CV-CNN_Basis.ipynb
CS-Learnings/PaddlePaddle-awesome-DeepLearning
b4f802e9c24dd1308ad6a1cf8250dddd7804920d
[ "Apache-2.0" ]
1,150
2021-06-01T03:44:21.000Z
2022-03-31T13:43:42.000Z
junior_class/chapter-3-Computer_Vision/notebook/3-1-CV-CNN_Basis.ipynb
tensorfly-gpu/awesome-DeepLearning
b1cb77ed3da2569ad7b003545dd1c7141b7b3f72
[ "Apache-2.0" ]
358
2021-06-01T03:58:47.000Z
2022-03-28T02:55:00.000Z
junior_class/chapter-3-Computer_Vision/notebook/3-1-CV-CNN_Basis.ipynb
tensorfly-gpu/awesome-DeepLearning
b1cb77ed3da2569ad7b003545dd1c7141b7b3f72
[ "Apache-2.0" ]
502
2021-05-31T12:52:14.000Z
2022-03-31T02:51:41.000Z
836.598015
612,928
0.948337
[ [ [ "计算机视觉作为一门让机器学会如何去“看”的学科,具体的说,就是让机器去识别摄像机拍摄的图片或视频中的物体,检测出物体所在的位置,并对目标物体进行跟踪,从而理解并描述出图片或视频里的场景和故事,以此来模拟人脑视觉系统。因此,计算机视觉也通常被叫做机器视觉,其目的是建立能够从图像或者视频中“感知”信息的人工系统。\n\n计算机视觉技术经过几十年的发展,已经在交通(车牌识别、道路违章抓拍)、安防(人脸闸机、小区监控)、金融(刷脸支付、柜台的自动票据识别)、医疗(医疗影像诊断)、工业生产(产品缺陷自动检测)等多个领域应用,影响或正在改变人们的日常生活和工业生产方式。未来,随着技术的不断演进,必将涌现出更多的产品和应用,为我们的生活创造更大的便利和更广阔的机会。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/93476b373dd342d1aae22397aa24c58fc60ab68307fd448189f16c8284723e9d\" width = \"500\"></center>\n<center><br>图1:计算机视觉技术在各领域的应用</br></center>\n<br></br>\n\n飞桨为计算机视觉任务提供了丰富的API,并通过底层优化和加速保证了这些API的性能。同时,飞桨还提供了丰富的模型库,覆盖图像分类、检测、分割、文字识别和视频理解等多个领域。用户可以直接使用这些API组建模型,也可以在飞桨提供的模型库基础上进行二次研发。\n\n由于篇幅所限,本章将重点介绍计算机视觉的经典模型(卷积神经网络)和两个典型任务(图像分类和目标检测)。主要涵盖如下内容:\n\n**卷积神经网络**:卷积神经网络(Convolutional Neural Networks, CNN)是计算机视觉技术最经典的模型结构。本教程主要介绍卷积神经网络的常用模块,包括:卷积、池化、批归一化、丢弃法等。\n\n- **图像分类**:介绍图像分类算法的经典模型结构,包括:LeNet、AlexNet、VGG、GoogLeNet、ResNet,并通过眼疾筛查的案例展示算法的应用。\n\n- **目标检测**:介绍目标检测YOLOv3算法,并通过林业病虫害检测案例展示YOLOv3算法的应用。\n\n\n# 计算机视觉的发展历程\n\n计算机视觉的发展历程要从生物视觉讲起。对于生物视觉的起源,目前学术界尚没有形成定论。有研究者认为最早的生物视觉形成于距今约[7亿年前的水母之中](https://www.pnas.org/content/109/46/18868),也有研究者认为生物视觉产生于距今约5亿年前寒武纪【[1](https://doi.org/10.1038%2Fnature10097), [2](https://en.wikipedia.org/wiki/Evolution_of_the_eye)】。寒武纪生物大爆发的原因一直是个未解之谜,不过可以肯定的是在寒武纪动物具有了视觉能力,捕食者可以更容易地发现猎物,被捕食者也可以更早的发现天敌的位置。视觉能力加剧了猎手和猎物之间的博弈,也催生出更加激烈的生存演化规则。视觉系统的形成有力地推动了食物链的演化,加速了生物进化过程,是生物发展史上重要的里程碑。经过几亿年的演化,目前人类的视觉系统已经具备非常高的复杂度和强大的功能,人脑中神经元数目达到了1000亿个,这些神经元通过网络互相连接,这样庞大的视觉神经网络使得我们可以很轻松的观察周围的世界,如 **图2** 所示。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/70d8475ed908487680057bf1f2760f10e367e7176acf43ebb380207b748b2377\" width = \"600\"></center>\n<center><br>图2:人类视觉感知</br></center>\n<br></br>\n\n对人类来说,识别猫和狗是件非常容易的事。但对计算机来说,即使是一个精通编程的高手,也很难轻松写出具有通用性的程序(比如:假设程序认为体型大的是狗,体型小的是猫,但由于拍摄角度不同,可能一张图片上猫占据的像素比狗还多)。那么,如何让计算机也能像人一样看懂周围的世界呢?研究者尝试着从不同的角度去解决这个问题,由此也发展出一系列的子任务,如 **图3** 所示。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/d65f1ebcb0054dcb81a8eb50223adc529bb9b63265ab467d931a5df5b2864122\" width = \"500\"></center>\n<center><br>图3:计算机视觉子任务示意图</br></center>\n<br></br>\n\n- **(a) Image Classification:** 图像分类,用于识别图像中物体的类别(如:bottle、cup、cube)。\n\n- **(b) Object Localization:** 目标检测,用于检测图像中每个物体的类别,并准确标出它们的位置。\n\n- **(c) Semantic Segmentation:** 图像语义分割,用于标出图像中每个像素点所属的类别,属于同一类别的像素点用一个颜色标识。\n\n- **(d) Instance Segmentation:** 实例分割,值得注意的是,(b)中的目标检测任务只需要标注出物体位置,而(d)中的实例分割任务不仅要标注出物体位置,还需要标注出物体的外形轮廓。\n\n在早期的图像分类任务中,通常是先人工提取图像特征,再用机器学习算法对这些特征进行分类,分类的结果强依赖于特征提取方法,往往只有经验丰富的研究者才能完成,如 **图4** 所示。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/01179d17c9f74570b8a618d6123261ce6e10344f11c84dda8e47d44c1eb4fc81\" width = \"500\"></center>\n<center><br>图4:早期的图像分类任务</br></center>\n<br></br>\n\n在这种背景下,基于神经网络的特征提取方法应运而生。Yann LeCun是最早将卷积神经网络应用到图像识别领域的,其主要逻辑是使用卷积神经网络提取图像特征,并对图像所属类别进行预测,通过训练数据不断调整网络参数,最终形成一套能自动提取图像特征并对这些特征进行分类的网络,如 **图5** 所示。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/1ccd30567304415d98b0b373ec641a3d00f76d803f194ea4b14aa85ce85bf7bb\" width = \"500\"></center>\n<center><br>图5:早期的卷积神经网络处理图像任务示意</br></center>\n<br></br>\n\n这一方法在手写数字识别任务上取得了极大的成功,但在接下来的时间里,却没有得到很好的发展。其主要原因一方面是数据集不完善,只能处理简单任务,在大尺寸的数据上容易发生过拟合;另一方面是硬件瓶颈,网络模型复杂时,计算速度会特别慢。\n\n目前,随着互联网技术的不断进步,数据量呈现大规模的增长,越来越丰富的数据集不断涌现。另外,得益于硬件能力的提升,计算机的算力也越来越强大。不断有研究者将新的模型和算法应用到计算机视觉领域。由此催生了越来越丰富的模型结构和更加准确的精度,同时计算机视觉所处理的问题也越来越丰富,包括分类、检测、分割、场景描述、图像生成和风格变换等,甚至还不仅仅局限于2维图片,包括视频处理技术和3D视觉等。\n\n", "_____no_output_____" ], [ "# 卷积神经网络\n\n卷积神经网络是目前计算机视觉中使用最普遍的模型结构。本章节主要向读者介绍卷积神经网络的一些基础模块,包括:\n \n - 卷积(Convolution)\n - 池化(Pooling)\n - 批归一化(Batch Normalization)\n - 丢弃法(Dropout)\n \n回顾一下,在上一章“一个案例带你吃透深度学习”中,我们介绍了手写数字识别任务,应用的是全连接网络进行特征提取,即将一张图片上的所有像素点展开成一个1维向量输入网络,存在如下两个问题:\n\n**1. 输入数据的空间信息被丢失。** 空间上相邻的像素点往往具有相似的RGB值,RGB的各个通道之间的数据通常密切相关,但是转化成1维向量时,这些信息被丢失。同时,图像数据的形状信息中,可能隐藏着某种本质的模式,但是转变成1维向量输入全连接神经网络时,这些模式也会被忽略。\n\n**2. 模型参数过多,容易发生过拟合。** 在手写数字识别案例中,每个像素点都要跟所有输出的神经元相连接。当图片尺寸变大时,输入神经元的个数会按图片尺寸的平方增大,导致模型参数过多,容易发生过拟合。\n\n为了解决上述问题,我们引入卷积神经网络进行特征提取,既能提取到相邻像素点之间的特征模式,又能保证参数的个数不随图片尺寸变化。**图6** 是一个典型的卷积神经网络结构,多层卷积和池化层组合作用在输入图片上,在网络的最后通常会加入一系列全连接层。网络中通常还会加入Dropout来防止过拟合。\n\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/6d1440daa10944c899a7c98e1bed3931a09bae52730d4c20a65b322193d284e1\" width = \"1000\"></center>\n<center><br>图6:卷积神经网络经典结构</br></center>\n<br></br>\n\n------\n**说明:**\n\n在卷积神经网络中,计算范围是在像素点的空间邻域内进行的,卷积核参数的数目也远小于全连接层。卷积核本身与输入图片大小无关,它代表了对空间邻域内某种特征模式的提取。比如,有些卷积核提取物体边缘特征,有些卷积核提取物体拐角处的特征,图像上不同区域共享同一个卷积核。当输入图片大小不一样时,仍然可以使用同一个卷积核进行操作。\n\n------", "_____no_output_____" ], [ "## 卷积(Convolution)\n\n这一小节将为读者介绍卷积算法的原理和实现方案,并通过具体的案例展示如何使用卷积对图片进行操作,主要涵盖如下内容:\n\n- 卷积计算\n\n- 填充(padding)\n\n- 步幅(stride)\n\n- 感受野(Receptive Field)\n\n- 多输入通道、多输出通道和批量操作\n\n- 飞桨卷积API介绍\n\n- 卷积算子应用举例\n\n\n### 卷积计算\n\n卷积是数学分析中的一种积分变换的方法,在图像处理中采用的是卷积的离散形式。这里需要说明的是,在卷积神经网络中,卷积层的实现方式实际上是数学中定义的互相关 (cross-correlation)运算,与数学分析中的卷积定义有所不同,这里跟其他框架和卷积神经网络的教程保持一致,都使用互相关运算作为卷积的定义,具体的计算过程如 **图7** 所示。\n\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/d5019afe174745efbf7a3d3c604b9c85eeddc947f7184446a9147d128863864d\" width = \"700\"></center>\n<center><br>图7:卷积计算过程</br></center>\n<br></br>\n\n------\n**说明:**\n\n卷积核(kernel)也被叫做滤波器(filter),假设卷积核的高和宽分别为$k_h$和$k_w$,则将称为$k_h\\times k_w$卷积,比如$3\\times5$卷积,就是指卷积核的高为3, 宽为5。\n\n-----\n\n- 如图7(a)所示:左边的图大小是$3\\times3$,表示输入数据是一个维度为$3\\times3$的二维数组;中间的图大小是$2\\times2$,表示一个维度为$2\\times2$的二维数组,我们将这个二维数组称为卷积核。先将卷积核的左上角与输入数据的左上角(即:输入数据的(0, 0)位置)对齐,把卷积核的每个元素跟其位置对应的输入数据中的元素相乘,再把所有乘积相加,得到卷积输出的第一个结果:\n\n$$0\\times1 + 1\\times2 + 2\\times4 + 3\\times5 = 25 \\ \\ \\ \\ \\ \\ \\ (a)$$\n\n- 如图7(b)所示:将卷积核向右滑动,让卷积核左上角与输入数据中的(0,1)位置对齐,同样将卷积核的每个元素跟其位置对应的输入数据中的元素相乘,再把这4个乘积相加,得到卷积输出的第二个结果:\n\n$$0\\times2 + 1\\times3 + 2\\times5 + 3\\times6 = 31 \\ \\ \\ \\ \\ \\ \\ (b)$$\n\n- 如图7(c)所示:将卷积核向下滑动,让卷积核左上角与输入数据中的(1, 0)位置对齐,可以计算得到卷积输出的第三个结果:\n\n$$0\\times4 + 1\\times5 + 2\\times7 + 3\\times8 = 43 \\ \\ \\ \\ \\ \\ \\ (c)$$\n\n- 如图7(d)所示:将卷积核向右滑动,让卷积核左上角与输入数据中的(1, 1)位置对齐,可以计算得到卷积输出的第四个结果:\n\n$$0\\times5 + 1\\times6 + 2\\times8 + 3\\times9 = 49 \\ \\ \\ \\ \\ \\ \\ (d)$$\n\n\n卷积核的计算过程可以用下面的数学公式表示,其中 $a$ 代表输入图片, $b$ 代表输出特征图,$w$ 是卷积核参数,它们都是二维数组,$\\sum{u,v}{\\ }$ 表示对卷积核参数进行遍历并求和。\n\n$$b[i, j] = \\sum_{u,v}{a[i+u, j+v]\\cdot w[u, v]}$$\n\n举例说明,假如上图中卷积核大小是$2\\times 2$,则$u$可以取0和1,$v$也可以取0和1,也就是说:\n$$b[i, j] = a[i+0, j+0]\\cdot w[0, 0] + a[i+0, j+1]\\cdot w[0, 1] + a[i+1, j+0]\\cdot w[1, 0] + a[i+1, j+1]\\cdot w[1, 1]$$\n\n读者可以自行验证,当$[i, j]$取不同值时,根据此公式计算的结果与上图中的例子是否一致。\n\n\n- **【思考】 当卷积核大小为$3 \\times 3$时,$b$和$a$之间的对应关系应该是怎样的?**\n\n\n------\n**其它说明:**\n\n在卷积神经网络中,一个卷积算子除了上面描述的卷积过程之外,还包括加上偏置项的操作。例如假设偏置为1,则上面卷积计算的结果为:\n \n$$0\\times1 + 1\\times2 + 2\\times4 + 3\\times5 \\mathbf{\\ + 1} = 26$$\n$$0\\times2 + 1\\times3 + 2\\times5 + 3\\times6 \\mathbf{\\ + 1} = 32$$\n$$0\\times4 + 1\\times5 + 2\\times7 + 3\\times8 \\mathbf{\\ + 1} = 44$$\n$$0\\times5 + 1\\times6 + 2\\times8 + 3\\times9 \\mathbf{\\ + 1} = 50$$\n\n------\n\n", "_____no_output_____" ], [ "### 填充(padding)\n\n在上面的例子中,输入图片尺寸为$3\\times3$,输出图片尺寸为$2\\times2$,经过一次卷积之后,图片尺寸变小。卷积输出特征图的尺寸计算方法如下(卷积核的高和宽分别为$k_h$和$k_w$):\n\n$$H_{out} = H - k_h + 1$$\n$$W_{out} = W - k_w + 1$$\n\n如果输入尺寸为4,卷积核大小为3时,输出尺寸为$4-3+1=2$。读者可以自行检查当输入图片和卷积核为其他尺寸时,上述计算式是否成立。当卷积核尺寸大于1时,输出特征图的尺寸会小于输入图片尺寸。如果经过多次卷积,输出图片尺寸会不断减小。为了避免卷积之后图片尺寸变小,通常会在图片的外围进行填充(padding),如 **图8** 所示。", "_____no_output_____" ], [ "<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/01d311ec2c65435f85059953a84ec7ea8ef2fd236452450e912346a7da201c5f\" width = \"700\"></center>\n<center><br>图8:图形填充 </br></center>\n<br></br>\n\n- 如图8(a)所示:填充的大小为1,填充值为0。填充之后,输入图片尺寸从$4\\times4$变成了$6\\times6$,使用$3\\times3$的卷积核,输出图片尺寸为$4\\times4$。\n\n- 如图8(b)所示:填充的大小为2,填充值为0。填充之后,输入图片尺寸从$4\\times4$变成了$8\\times8$,使用$3\\times3$的卷积核,输出图片尺寸为$6\\times6$。\n\n如果在图片高度方向,在第一行之前填充$p_{h1}$行,在最后一行之后填充$p_{h2}$行;在图片的宽度方向,在第1列之前填充$p_{w1}$列,在最后1列之后填充$p_{w2}$列;则填充之后的图片尺寸为$(H + p_{h1} + p_{h2})\\times(W + p_{w1} + p_{w2})$。经过大小为$k_h\\times k_w$的卷积核操作之后,输出图片的尺寸为:\n$$H_{out} = H + p_{h1} + p_{h2} - k_h + 1$$\n$$W_{out} = W + p_{w1} + p_{w2} - k_w + 1$$\n\n在卷积计算过程中,通常会在高度或者宽度的两侧采取等量填充,即$p_{h1} = p_{h2} = p_h,\\ \\ p_{w1} = p_{w2} = p_w$,上面计算公式也就变为:\n$$H_{out} = H + 2p_h - k_h + 1$$\n$$W_{out} = W + 2p_w - k_w + 1$$\n卷积核大小通常使用1,3,5,7这样的奇数,如果使用的填充大小为$p_h=(k_h-1)/2 ,p_w=(k_w-1)/2$,则卷积之后图像尺寸不变。例如当卷积核大小为3时,padding大小为1,卷积之后图像尺寸不变;同理,如果卷积核大小为5,padding大小为2,也能保持图像尺寸不变。", "_____no_output_____" ], [ "### 步幅(stride)\n\n**图8** 中卷积核每次滑动一个像素点,这是步幅为1的特殊情况。**图9** 是步幅为2的卷积过程,卷积核在图片上移动时,每次移动大小为2个像素点。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/afdae9af02fc45eabdd9663ee6474e4da86675fa1f444c78aea0e21539b32cf0\" width = \"800\"></center>\n<center><br>图9:步幅为2的卷积过程 </br></center>\n<br></br>\n\n当宽和高方向的步幅分别为$s_h$和$s_w$时,输出特征图尺寸的计算公式是:\n\n$$H_{out} = \\frac{H + 2p_h - k_h}{s_h} + 1$$\n\n$$W_{out} = \\frac{W + 2p_w - k_w}{s_w} + 1$$\n\n假设输入图片尺寸是$H\\times W = 100 \\times 100$,卷积核大小$k_h \\times k_w = 3 \\times 3$,填充$p_h = p_w = 1$,步幅为$s_h = s_w = 2$,则输出特征图的尺寸为:\n\n$$H_{out} = \\frac{100 + 2 - 3}{2} + 1 = 50$$\n\n$$W_{out} = \\frac{100 + 2 - 3}{2} + 1 = 50$$", "_____no_output_____" ], [ "### 感受野(Receptive Field)\n\n输出特征图上每个点的数值,是由输入图片上大小为$k_h\\times k_w$的区域的元素与卷积核每个元素相乘再相加得到的,所以输入图像上$k_h\\times k_w$区域内每个元素数值的改变,都会影响输出点的像素值。我们将这个区域叫做输出特征图上对应点的感受野。感受野内每个元素数值的变动,都会影响输出点的数值变化。比如$3\\times3$卷积对应的感受野大小就是$3\\times3$,如 **图10** 所示。\n\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/1021536721524f4d8f4c1aefa89693c4b0fd388f21a347b583d413b3ac41241b\" width = \"800\"></center>\n<center><br>图10:感受野为3×3的卷积 </br></center>\n<br></br>\n\n而当通过两层$3\\times3$的卷积之后,感受野的大小将会增加到$5\\times5$,如 **图11** 所示。\n\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/ac14916db81e40a48a25ab894d7a95e33fa0eece71d44a55af7bffab462fb7a7\" width = \"800\"></center>\n<center><br>图11:感受野为5×5的卷积 </br></center>\n<br></br>\n\n因此,当增加卷积网络深度的同时,感受野将会增大,输出特征图中的一个像素点将会包含更多的图像语义信息。", "_____no_output_____" ], [ "### 多输入通道、多输出通道和批量操作\n\n前面介绍的卷积计算过程比较简单,实际应用时,处理的问题要复杂的多。例如:对于彩色图片有RGB三个通道,需要处理多输入通道的场景。输出特征图往往也会具有多个通道,而且在神经网络的计算中常常是把一个批次的样本放在一起计算,所以卷积算子需要具有批量处理多输入和多输出通道数据的功能,下面将分别介绍这几种场景的操作方式。\n\n- **多输入通道场景**\n\n上面的例子中,卷积层的数据是一个2维数组,但实际上一张图片往往含有RGB三个通道,要计算卷积的输出结果,卷积核的形式也会发生变化。假设输入图片的通道数为$C_{in}$,输入数据的形状是$C_{in}\\times{H_{in}}\\times{W_{in}}$,计算过程如 **图12** 所示。\n\n1. 对每个通道分别设计一个2维数组作为卷积核,卷积核数组的形状是$C_{in}\\times{k_h}\\times{k_w}$。\n\n1. 对任一通道$C_{in} \\in [0, C_{in})$,分别用大小为$k_h\\times{k_w}$的卷积核在大小为$H_{in}\\times{W_{in}}$的二维数组上做卷积。\n\n1. 将这$C_{in}$个通道的计算结果相加,得到的是一个形状为$H_{out}\\times{W_{out}}$的二维数组。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/92186667b8424a7ca781b22de6766fa62e31512cf2e24e33a4b796541177c9dd\" width = \"800\"></center>\n<center><br>图12:多输入通道计算过程 </br></center>\n<br></br>", "_____no_output_____" ], [ "- **多输出通道场景**\n\n上边我们介绍了只有一个卷积核时的计算方式,那么如果我们希望检测多种类型的特征,实际上我们可以使用多个卷积核进行计算。所以一般来说,卷积操作的输出特征图也会具有多个通道$C_{out}$,这时我们需要设计$C_{out}$个维度为$C_{in}\\times{k_h}\\times{k_w}$的卷积核,卷积核数组的维度是$C_{out}\\times C_{in}\\times{k_h}\\times{k_w}$,如 **图13** 所示。\n\n1. 对任一输出通道$c_{out} \\in [0, C_{out})$,分别使用上面描述的形状为$C_{in}\\times{k_h}\\times{k_w}$的卷积核对输入图片做卷积。\n1. 将这$C_{out}$个形状为$H_{out}\\times{W_{out}}$的二维数组拼接在一起,形成维度为$C_{out}\\times{H_{out}}\\times{W_{out}}$的三维数组。\n\n------\n**说明:**\n\n通常将卷积核的输出通道数叫做卷积核的个数。\n\n------\n\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/cf1fbddc141349e4b7aaeade9a201b78a16d249e069c4f8aaeb77e0ea1a95c31\" width = \"800\"></center>\n<center><br>图13:多输出通道计算过程 </br></center>\n<br></br>\n", "_____no_output_____" ], [ "- **批量操作**\n\n在卷积神经网络的计算中,通常将多个样本放在一起形成一个mini-batch进行批量操作,即输入数据的维度是$N\\times{C_{in}}\\times{H_{in}}\\times{W_{in}}$。由于会对每张图片使用同样的卷积核进行卷积操作,卷积核的维度与上面多输出通道的情况一样,仍然是$C_{out}\\times C_{in}\\times{k_h}\\times{k_w}$,输出特征图的维度是$N\\times{C_{out}}\\times{H_{out}}\\times{W_{out}}$,如 **图14** 所示。\n\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/60760d68001c40d6a6c500b17f57d8deae7b5921631b4b6b896b057b904d24b1\" width = \"800\"></center>\n<center><br>图14:批量操作 </br></center>\n<br></br>\n", "_____no_output_____" ], [ "### 飞桨卷积API介绍\n\n飞桨卷积算子对应的API是[paddle.nn.Conv2D](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc/api/paddle/nn/layer/conv/Conv2D_cn.html),用户可以直接调用API进行计算,也可以在此基础上修改。Conv2D名称中的“2D”表明卷积核是二维的,多用于处理图像数据。类似的,也有Conv3D可以用于处理视频数据(图像的序列)。\n\n> *class* paddle.nn.Conv2D (*in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', weight_attr=None, bias_attr=None, data_format='NCHW'*)\n\n\n常用的参数如下:\n - in_channels(int) - 输入图像的通道数。\n - out_channels(int) - 卷积核的个数,和输出特征图通道数相同,相当于上文中的$C_{out}$。\n - kernel_size(int|list|tuple) - 卷积核大小,可以是整数,比如3,表示卷积核的高和宽均为3 ;或者是两个整数的list,例如[3,2],表示卷积核的高为3,宽为2。\n - stride(int|list|tuple,可选) - 步长大小,可以是整数,默认值为1,表示垂直和水平滑动步幅均为1;或者是两个整数的list,例如[3,2],表示垂直滑动步幅为3,水平滑动步幅为2。\n - padding(int|list|tuple|str,可选) - 填充大小,可以是整数,比如1,表示竖直和水平边界填充大小均为1;或者是两个整数的list,例如[2,1],表示竖直边界填充大小为2,水平边界填充大小为1。\n\n输入数据维度$[N, C_{in}, H_{in}, W_{in}]$,输出数据维度$[N, out\\_channels, H_{out}, W_{out}]$,权重参数$w$的维度$[out\\_channels, C_{in}, filter\\_size\\_h, filter\\_size\\_w]$,偏置参数$b$的维度是$[out\\_channels]$。注意,即使输入只有一张灰度图片$[H_{in}, W_{in}]$,也需要处理成四个维度的输入向量$[1, 1, H_{in}, W_{in}]$。", "_____no_output_____" ], [ "### 卷积算子应用举例\n\n下面介绍卷积算子在图片中应用的三个案例,并观察其计算结果。\n\n**案例1——简单的黑白边界检测**\n\n下面是使用Conv2D算子完成一个图像边界检测的任务。图像左边为光亮部分,右边为黑暗部分,需要检测出光亮跟黑暗的分界处。\n\n设置宽度方向的卷积核为$[1, 0, -1]$,此卷积核会将宽度方向间隔为1的两个像素点的数值相减。当卷积核在图片上滑动时,如果它所覆盖的像素点位于亮度相同的区域,则左右间隔为1的两个像素点数值的差为0。只有当卷积核覆盖的像素点有的处于光亮区域,有的处在黑暗区域时,左右间隔为1的两个点像素值的差才不为0。将此卷积核作用到图片上,输出特征图上只有对应黑白分界线的地方像素值才不为0。具体代码如下所示,结果输出在下方的图案中。", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport paddle\nfrom paddle.nn import Conv2D\nfrom paddle.nn.initializer import Assign\n%matplotlib inline\n\n# 创建初始化权重参数w\nw = np.array([1, 0, -1], dtype='float32')\n# 将权重参数调整成维度为[cout, cin, kh, kw]的四维张量\nw = w.reshape([1, 1, 1, 3])\n# 创建卷积算子,设置输出通道数,卷积核大小,和初始化权重参数\n# kernel_size = [1, 3]表示kh = 1, kw=3\n# 创建卷积算子的时候,通过参数属性weight_attr指定参数初始化方式\n# 这里的初始化方式时,从numpy.ndarray初始化卷积参数\nconv = Conv2D(in_channels=1, out_channels=1, kernel_size=[1, 3],\n weight_attr=paddle.ParamAttr(\n initializer=Assign(value=w)))\n\n# 创建输入图片,图片左边的像素点取值为1,右边的像素点取值为0\nimg = np.ones([50,50], dtype='float32')\nimg[:, 30:] = 0.\n# 将图片形状调整为[N, C, H, W]的形式\nx = img.reshape([1,1,50,50])\n# 将numpy.ndarray转化成paddle中的tensor\nx = paddle.to_tensor(x)\n# 使用卷积算子作用在输入图片上\ny = conv(x)\n# 将输出tensor转化为numpy.ndarray\nout = y.numpy()\nf = plt.subplot(121)\nf.set_title('input image', fontsize=15)\nplt.imshow(img, cmap='gray')\nf = plt.subplot(122)\nf.set_title('output featuremap', fontsize=15)\n# 卷积算子Conv2D输出数据形状为[N, C, H, W]形式\n# 此处N, C=1,输出数据形状为[1, 1, H, W],是4维数组\n# 但是画图函数plt.imshow画灰度图时,只接受2维数组\n# 通过numpy.squeeze函数将大小为1的维度消除\nplt.imshow(out.squeeze(), cmap='gray')\nplt.show()", "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/cbook/__init__.py:2349: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n if isinstance(obj, collections.Iterator):\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/cbook/__init__.py:2366: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n return list(data) if isinstance(data, collections.MappingView) else data\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/numpy/lib/type_check.py:546: DeprecationWarning: np.asscalar(a) is deprecated since NumPy v1.16, use a.item() instead\n 'a.item() instead', DeprecationWarning, stacklevel=1)\n" ], [ "# 查看卷积层的权重参数名字和数值\nprint(conv.weight)\n# 参看卷积层的偏置参数名字和数值\nprint(conv.bias)", "Parameter containing:\nTensor(shape=[1, 1, 1, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n [[[[ 1., 0., -1.]]]])\nParameter containing:\nTensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n [0.])\n" ] ], [ [ "<br></br>\n**案例2——图像中物体边缘检测**\n\n上面展示的是一个人为构造出来的简单图片,使用卷积网络检测图片明暗分界处的示例。对于真实的图片,也可以使用合适的卷积核(3\\*3卷积核的中间值是8,周围一圈的值是8个-1)对其进行操作,用来检测物体的外形轮廓,观察输出特征图跟原图之间的对应关系,如下代码所示:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport paddle\nfrom paddle.nn import Conv2D\nfrom paddle.nn.initializer import Assign\nimg = Image.open('./work/images/section1/000000098520.jpg')\n\n# 设置卷积核参数\nw = np.array([[-1,-1,-1], [-1,8,-1], [-1,-1,-1]], dtype='float32')/8\nw = w.reshape([1, 1, 3, 3])\n# 由于输入通道数是3,将卷积核的形状从[1,1,3,3]调整为[1,3,3,3]\nw = np.repeat(w, 3, axis=1)\n# 创建卷积算子,输出通道数为1,卷积核大小为3x3,\n# 并使用上面的设置好的数值作为卷积核权重的初始化参数\nconv = Conv2D(in_channels=3, out_channels=1, kernel_size=[3, 3], \n weight_attr=paddle.ParamAttr(\n initializer=Assign(value=w)))\n \n# 将读入的图片转化为float32类型的numpy.ndarray\nx = np.array(img).astype('float32')\n# 图片读入成ndarry时,形状是[H, W, 3],\n# 将通道这一维度调整到最前面\nx = np.transpose(x, (2,0,1))\n# 将数据形状调整为[N, C, H, W]格式\nx = x.reshape(1, 3, img.height, img.width)\nx = paddle.to_tensor(x)\ny = conv(x)\nout = y.numpy()\nplt.figure(figsize=(20, 10))\nf = plt.subplot(121)\nf.set_title('input image', fontsize=15)\nplt.imshow(img)\nf = plt.subplot(122)\nf.set_title('output feature map', fontsize=15)\nplt.imshow(out.squeeze(), cmap='gray')\nplt.show()\n", "_____no_output_____" ] ], [ [ "<br></br>\n**案例3——图像均值模糊**\n\n另外一种比较常见的卷积核(5\\*5的卷积核中每个值均为1)是用当前像素跟它邻域内的像素取平均,这样可以使图像上噪声比较大的点变得更平滑,如下代码所示:", "_____no_output_____" ] ], [ [ "import paddle\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nfrom paddle.nn import Conv2D\nfrom paddle.nn.initializer import Assign\n# 读入图片并转成numpy.ndarray\n# 换成灰度图\nimg = Image.open('./work/images/section1/000000355610.jpg').convert('L')\nimg = np.array(img)\n\n# 创建初始化参数\nw = np.ones([1, 1, 5, 5], dtype = 'float32')/25\nconv = Conv2D(in_channels=1, out_channels=1, kernel_size=[5, 5], \n weight_attr=paddle.ParamAttr(\n initializer=Assign(value=w)))\nx = img.astype('float32')\nx = x.reshape(1,1,img.shape[0], img.shape[1])\nx = paddle.to_tensor(x)\ny = conv(x)\nout = y.numpy()\n\nplt.figure(figsize=(20, 12))\nf = plt.subplot(121)\nf.set_title('input image')\nplt.imshow(img, cmap='gray')\n\nf = plt.subplot(122)\nf.set_title('output feature map')\nout = out.squeeze()\nplt.imshow(out, cmap='gray')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## 池化(Pooling)\n\n\n池化是使用某一位置的相邻输出的总体统计特征代替网络在该位置的输出,其好处是当输入数据做出少量平移时,经过池化函数后的大多数输出还能保持不变。比如:当识别一张图像是否是人脸时,我们需要知道人脸左边有一只眼睛,右边也有一只眼睛,而不需要知道眼睛的精确位置,这时候通过池化某一片区域的像素点来得到总体统计特征会显得很有用。由于池化之后特征图会变得更小,如果后面连接的是全连接层,能有效的减小神经元的个数,节省存储空间并提高计算效率。\n如 **图15** 所示,将一个$2\\times 2$的区域池化成一个像素点。通常有两种方法,平均池化和最大池化。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/5479daa3734d424bb710615d3c4f7e017ba2558808a8421ca7c914f3fced0a48\" width = \"600\"></center>\n<center><br>图15:池化 </br></center>\n<br></br>\n\n- 如图15(a):平均池化。这里使用大小为$2\\times2$的池化窗口,每次移动的步幅为2,对池化窗口覆盖区域内的像素取平均值,得到相应的输出特征图的像素值。\n- 如图15(b):最大池化。对池化窗口覆盖区域内的像素取最大值,得到输出特征图的像素值。当池化窗口在图片上滑动时,会得到整张输出特征图。池化窗口的大小称为池化大小,用$k_h \\times k_w$表示。在卷积神经网络中用的比较多的是窗口大小为$2 \\times 2$,步幅为2的池化。\n\n与卷积核类似,池化窗口在图片上滑动时,每次移动的步长称为步幅,当宽和高方向的移动大小不一样时,分别用$s_w$和$s_h$表示。也可以对需要进行池化的图片进行填充,填充方式与卷积类似,假设在第一行之前填充$p_{h1}$行,在最后一行后面填充$p_{h2}$行。在第一列之前填充$p_{w1}$列,在最后一列之后填充$p_{w2}$列,则池化层的输出特征图大小为:\n\n$$H_{out} = \\frac{H + p_{h1} + p_{h2} - k_h}{s_h} + 1$$\n\n$$W_{out} = \\frac{W + p_{w1} + p_{w2} - k_w}{s_w} + 1$$\n\n在卷积神经网络中,通常使用$2\\times2$大小的池化窗口,步幅也使用2,填充为0,则输出特征图的尺寸为:\n\n$$H_{out} = \\frac{H}{2}$$\n\n$$W_{out} = \\frac{W}{2}$$\n\n通过这种方式的池化,输出特征图的高和宽都减半,但通道数不会改变。\n", "_____no_output_____" ], [ "## 批归一化(Batch Normalization)\n\n[批归一化方法](https://arxiv.org/abs/1502.03167)(Batch Normalization,BatchNorm)是由Ioffe和Szegedy于2015年提出的,已被广泛应用在深度学习中,其目的是对神经网络中间层的输出进行标准化处理,使得中间层的输出更加稳定。\n\n通常我们会对神经网络的数据进行标准化处理,处理后的样本数据集满足均值为0,方差为1的统计分布,这是因为当输入数据的分布比较固定时,有利于算法的稳定和收敛。对于深度神经网络来说,由于参数是不断更新的,即使输入数据已经做过标准化处理,但是对于比较靠后的那些层,其接收到的输入仍然是剧烈变化的,通常会导致数值不稳定,模型很难收敛。BatchNorm能够使神经网络中间层的输出变得更加稳定,并有如下三个优点:\n\n- 使学习快速进行(能够使用较大的学习率)\n \n- 降低模型对初始值的敏感性\n \n- 从一定程度上抑制过拟合\n\nBatchNorm主要思路是在训练时以mini-batch为单位,对神经元的数值进行归一化,使数据的分布满足均值为0,方差为1。具体计算过程如下:\n\n**1. 计算mini-batch内样本的均值**\n\n$$\\mu_B \\leftarrow \\frac{1}{m}\\sum_{i=1}^mx^{(i)}$$\n\n其中$x^{(i)}$表示mini-batch中的第$i$个样本。\n\n例如输入mini-batch包含3个样本,每个样本有2个特征,分别是:\n\n$$x^{(1)} = (1,2), \\ \\ x^{(2)} = (3,6), \\ \\ x^{(3)} = (5,10)$$\n\n对每个特征分别计算mini-batch内样本的均值:\n\n$$\\mu_{B0} = \\frac{1+3+5}{3} = 3, \\ \\ \\ \\mu_{B1} = \\frac{2+6+10}{3} = 6$$\n\n则样本均值是:\n\n$$\\mu_{B} = (\\mu_{B0}, \\mu_{B1}) = (3, 6)$$\n\n**2. 计算mini-batch内样本的方差**\n\n$$\\sigma_B^2 \\leftarrow \\frac{1}{m}\\sum_{i=1}^m(x^{(i)} - \\mu_B)^2$$\n\n上面的计算公式先计算一个批次内样本的均值$\\mu_B$和方差$\\sigma_B^2$,然后再对输入数据做归一化,将其调整成均值为0,方差为1的分布。\n\n对于上述给定的输入数据$x^{(1)}, x^{(2)}, x^{(3)}$,可以计算出每个特征对应的方差:\n\n$$\\sigma_{B0}^2 = \\frac{1}{3} \\cdot ((1-3)^2 + (3-3)^2 + (5-3)^2) = \\frac{8}{3}$$\n\n$$\\sigma_{B1}^2 = \\frac{1}{3} \\cdot ((2-6)^2 + (6-6)^2 + (10-6)^2) = \\frac{32}{3}$$\n\n则样本方差是:\n\n$$\\sigma_{B}^2 = (\\sigma_{B0}^2, \\sigma_{B1}^2) = (\\frac{8}{3}, \\frac{32}{3})$$\n\n**3. 计算标准化之后的输出**\n\n$$\\hat{x}^{(i)} \\leftarrow \\frac{x^{(i)} - \\mu_B}{\\sqrt{(\\sigma_B^2 + \\epsilon)}}$$\n\n其中$\\epsilon$是一个微小值(例如$1e-7$),其主要作用是为了防止分母为0。\n\n对于上述给定的输入数据$x^{(1)}, x^{(2)}, x^{(3)}$,可以计算出标准化之后的输出:\n\n$$\\hat{x}^{(1)} = (\\frac{1 - 3}{\\sqrt{\\frac{8}{3}}}, \\ \\ \\frac{2 - 6}{\\sqrt{\\frac{32}{3}}}) = (-\\sqrt{\\frac{3}{2}}, \\ \\ -\\sqrt{\\frac{3}{2}})$$\n\n$$\\hat{x}^{(2)} = (\\frac{3 - 3}{\\sqrt{\\frac{8}{3}}}, \\ \\ \\frac{6 - 6}{\\sqrt{\\frac{32}{3}}}) = (0, \\ \\ 0) \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ $$\n\n$$\\hat{x}^{(3)} = (\\frac{5 - 3}{\\sqrt{\\frac{8}{3}}}, \\ \\ \\frac{10 - 6}{\\sqrt{\\frac{32}{3}}}) = (\\sqrt{\\frac{3}{2}}, \\ \\ \\sqrt{\\frac{3}{2}}) \\ \\ \\ \\ $$\n\n- 读者可以自行验证由$\\hat{x}^{(1)}, \\hat{x}^{(2)}, \\hat{x}^{(3)}$构成的mini-batch,是否满足均值为0,方差为1的分布。\n\n\n如果强行限制输出层的分布是标准化的,可能会导致某些特征模式的丢失,所以在标准化之后,BatchNorm会紧接着对数据做缩放和平移。\n\n$$y_i \\leftarrow \\gamma \\hat{x_i} + \\beta$$\n\n其中$\\gamma$和$\\beta$是可学习的参数,可以赋初始值$\\gamma = 1, \\beta = 0$,在训练过程中不断学习调整。\n\n上面列出的是BatchNorm方法的计算逻辑,下面针对两种类型的输入数据格式分别进行举例。飞桨支持输入数据的维度大小为2、3、4、5四种情况,这里给出的是维度大小为2和4的示例。\n\n* **示例一:** 当输入数据形状是$[N, K]$时,一般对应全连接层的输出,示例代码如下所示。 \n\n这种情况下会分别对K的每一个分量计算N个样本的均值和方差,数据和参数对应如下:\n- 输入 x, [N, K]\n- 输出 y, [N, K]\n- 均值 $\\mu_B$,[K, ]\n- 方差 $\\sigma_B^2$, [K, ]\n- 缩放参数$\\gamma$, [K, ]\n- 平移参数$\\beta$, [K, ]", "_____no_output_____" ] ], [ [ "# 输入数据形状是 [N, K]时的示例\nimport numpy as np\nimport paddle\nfrom paddle.nn import BatchNorm1D\n# 创建数据\ndata = np.array([[1,2,3], [4,5,6], [7,8,9]]).astype('float32')\n# 使用BatchNorm1D计算归一化的输出\n# 输入数据维度[N, K],num_features等于K\nbn = BatchNorm1D(num_features=3) \nx = paddle.to_tensor(data)\ny = bn(x)\nprint('output of BatchNorm1D Layer: \\n {}'.format(y.numpy()))\n\n# 使用Numpy计算均值、方差和归一化的输出\n# 这里对第0个特征进行验证\na = np.array([1,4,7])\na_mean = a.mean()\na_std = a.std()\nb = (a - a_mean) / a_std\nprint('std {}, mean {}, \\n output {}'.format(a_mean, a_std, b))\n\n# 建议读者对第1和第2个特征进行验证,观察numpy计算结果与paddle计算结果是否一致", "output of BatchNorm1D Layer: \n [[-1.2247438 -1.2247438 -1.2247438]\n [ 0. 0. 0. ]\n [ 1.2247438 1.2247438 1.2247438]]\nstd 4.0, mean 2.449489742783178, \n output [-1.22474487 0. 1.22474487]\n" ] ], [ [ "* **示例二:** 当输入数据形状是$[N, C, H, W]$时, 一般对应卷积层的输出,示例代码如下所示。\n\n这种情况下会沿着C这一维度进行展开,分别对每一个通道计算N个样本中总共$N\\times H \\times W$个像素点的均值和方差,数据和参数对应如下:\n- 输入 x, [N, C, H, W]\n- 输出 y, [N, C, H, W]\n- 均值 $\\mu_B$,[C, ]\n- 方差 $\\sigma_B^2$, [C, ]\n- 缩放参数$\\gamma$, [C, ]\n- 平移参数$\\beta$, [C, ]\n\n------\n**小窍门:**\n\n可能有读者会问:“BatchNorm里面不是还要对标准化之后的结果做仿射变换吗,怎么使用Numpy计算的结果与BatchNorm算子一致?” 这是因为BatchNorm算子里面自动设置初始值$\\gamma = 1, \\beta = 0$,这时候仿射变换相当于是恒等变换。在训练过程中这两个参数会不断的学习,这时仿射变换就会起作用。\n\n------", "_____no_output_____" ] ], [ [ "# 输入数据形状是[N, C, H, W]时的batchnorm示例\nimport numpy as np\nimport paddle\nfrom paddle.nn import BatchNorm2D\n\n# 设置随机数种子,这样可以保证每次运行结果一致\nnp.random.seed(100)\n# 创建数据\ndata = np.random.rand(2,3,3,3).astype('float32')\n# 使用BatchNorm2D计算归一化的输出\n# 输入数据维度[N, C, H, W],num_features等于C\nbn = BatchNorm2D(num_features=3)\nx = paddle.to_tensor(data)\ny = bn(x)\nprint('input of BatchNorm2D Layer: \\n {}'.format(x.numpy()))\nprint('output of BatchNorm2D Layer: \\n {}'.format(y.numpy()))\n\n# 取出data中第0通道的数据,\n# 使用numpy计算均值、方差及归一化的输出\na = data[:, 0, :, :]\na_mean = a.mean()\na_std = a.std()\nb = (a - a_mean) / a_std\nprint('channel 0 of input data: \\n {}'.format(a))\nprint('std {}, mean {}, \\n output: \\n {}'.format(a_mean, a_std, b))\n\n# 提示:这里通过numpy计算出来的输出\n# 与BatchNorm2D算子的结果略有差别,\n# 因为在BatchNorm2D算子为了保证数值的稳定性,\n# 在分母里面加上了一个比较小的浮点数epsilon=1e-05", "input of BatchNorm2D Layer: \n [[[[0.54340494 0.2783694 0.4245176 ]\n [0.84477615 0.00471886 0.12156912]\n [0.67074907 0.82585275 0.13670659]]\n\n [[0.5750933 0.89132196 0.20920213]\n [0.18532822 0.10837689 0.21969749]\n [0.9786238 0.8116832 0.17194101]]\n\n [[0.81622475 0.27407375 0.4317042 ]\n [0.9400298 0.81764936 0.33611196]\n [0.17541045 0.37283206 0.00568851]]]\n\n\n [[[0.25242636 0.7956625 0.01525497]\n [0.5988434 0.6038045 0.10514768]\n [0.38194343 0.03647606 0.89041156]]\n\n [[0.98092085 0.05994199 0.89054596]\n [0.5769015 0.7424797 0.63018394]\n [0.5818422 0.02043913 0.21002658]]\n\n [[0.5446849 0.76911515 0.25069523]\n [0.2858957 0.8523951 0.9750065 ]\n [0.8848533 0.35950786 0.59885895]]]]\noutput of BatchNorm2D Layer: \n [[[[ 0.41260773 -0.46198368 0.02029113]\n [ 1.4071033 -1.3650038 -0.9794093 ]\n [ 0.83283097 1.344658 -0.9294571 ]]\n\n [[ 0.25201762 1.2038352 -0.8492796 ]\n [-0.92113775 -1.1527538 -0.81768954]\n [ 1.4666054 0.9641302 -0.9614319 ]]\n\n [[ 0.9541145 -0.9075854 -0.366296 ]\n [ 1.3792504 0.9590065 -0.69455147]\n [-1.2463866 -0.56845784 -1.8291972 ]]]\n\n\n [[[-0.5475932 1.2450331 -1.3302356 ]\n [ 0.5955492 0.6119205 -1.0335984 ]\n [-0.12019946 -1.2602081 1.5576957 ]]\n\n [[ 1.4735192 -1.2985382 1.2014996 ]\n [ 0.25746003 0.75583434 0.41783503]\n [ 0.272331 -1.4174379 -0.84679806]]\n\n [[ 0.02166999 0.7923442 -0.9878652 ]\n [-0.8669898 1.0783204 1.4993575 ]\n [ 1.189779 -0.614212 0.20769906]]]]\nchannel 0 of input data: \n [[[0.54340494 0.2783694 0.4245176 ]\n [0.84477615 0.00471886 0.12156912]\n [0.67074907 0.82585275 0.13670659]]\n\n [[0.25242636 0.7956625 0.01525497]\n [0.5988434 0.6038045 0.10514768]\n [0.38194343 0.03647606 0.89041156]]]\nstd 0.4183686077594757, mean 0.3030227720737457, \n output: \n [[[ 0.41263014 -0.46200886 0.02029219]\n [ 1.4071798 -1.3650781 -0.9794626 ]\n [ 0.8328762 1.3447311 -0.92950773]]\n\n [[-0.54762304 1.2451009 -1.3303081 ]\n [ 0.5955816 0.61195374 -1.0336547 ]\n [-0.12020606 -1.2602768 1.5577804 ]]]\n" ] ], [ [ "\n**- 预测时使用BatchNorm**\n\n上面介绍了在训练过程中使用BatchNorm对一批样本进行归一化的方法,但如果使用同样的方法对需要预测的一批样本进行归一化,则预测结果会出现不确定性。\n\n例如样本A、样本B作为一批样本计算均值和方差,与样本A、样本C和样本D作为一批样本计算均值和方差,得到的结果一般来说是不同的。那么样本A的预测结果就会变得不确定,这对预测过程来说是不合理的。解决方法是在训练过程中将大量样本的均值和方差保存下来,预测时直接使用保存好的值而不再重新计算。实际上,在BatchNorm的具体实现中,训练时会计算均值和方差的移动平均值。在飞桨中,默认是采用如下方式计算:\n\n$$saved\\_\\mu_B \\leftarrow \\ saved\\_\\mu_B \\times 0.9 + \\mu_B \\times (1 - 0.9)$$\n\n$$saved\\_\\sigma_B^2 \\leftarrow \\ saved\\_\\sigma_B^2 \\times 0.9 + \\sigma_B^2 \\times (1 - 0.9)$$\n\n在训练过程的最开始将$saved\\_\\mu_B$和$saved\\_\\sigma_B^2$设置为0,每次输入一批新的样本,计算出$\\mu_B$和$\\sigma_B^2$,然后通过上面的公式更新$saved\\_\\mu_B$和$saved\\_\\sigma_B^2$,在训练的过程中不断的更新它们的值,并作为BatchNorm层的参数保存下来。预测的时候将会加载参数$saved\\_\\mu_B$和$saved\\_\\sigma_B^2$,用他们来代替$\\mu_B$和$\\sigma_B^2$。", "_____no_output_____" ], [ "## 丢弃法(Dropout)\n\n丢弃法(Dropout)是深度学习中一种常用的抑制过拟合的方法,其做法是在神经网络学习过程中,随机删除一部分神经元。训练时,随机选出一部分神经元,将其输出设置为0,这些神经元将不对外传递信号。\n\n**图16** 是Dropout示意图,左边是完整的神经网络,右边是应用了Dropout之后的网络结构。应用Dropout之后,会将标了$\\times$的神经元从网络中删除,让它们不向后面的层传递信号。在学习过程中,丢弃哪些神经元是随机决定,因此模型不会过度依赖某些神经元,能一定程度上抑制过拟合。\n<br></br>\n<center><img src=\"https://ai-studio-static-online.cdn.bcebos.com/2afb5379d93c46c6be802a1257236b5450d3d3a4a2454b36a5ffb4e005e468ec\" width = \"700\"></center>\n<center><br>图16 Dropout示意图 </br></center>\n<br></br>\n\n在预测场景时,会向前传递所有神经元的信号,可能会引出一个新的问题:训练时由于部分神经元被随机丢弃了,输出数据的总大小会变小。比如:计算其$L1$范数会比不使用Dropout时变小,但是预测时却没有丢弃神经元,这将导致训练和预测时数据的分布不一样。为了解决这个问题,飞桨支持如下两种方法:\n\n- **downscale_in_infer**\n\n训练时以比例$r$随机丢弃一部分神经元,不向后传递它们的信号;预测时向后传递所有神经元的信号,但是将每个神经元上的数值乘以 $(1 - r)$。\n\n- **upscale_in_train**\n\n训练时以比例$r$随机丢弃一部分神经元,不向后传递它们的信号,但是将那些被保留的神经元上的数值除以 $(1 - r)$;预测时向后传递所有神经元的信号,不做任何处理。\n\n在飞桨[Dropout API](https://www.paddlepaddle.org.cn/documentation/docs/en/2.0-rc/api/paddle/nn/layer/common/Dropout_en.html#dropout)中,通过mode参数来指定用哪种方式对神经元进行操作,\n\n> paddle.nn.Dropout(p=0.5, axis=None, mode=\"upscale_in_train”, name=None)\n\n主要参数如下:\n\n- p (float) :将输入节点置为0的概率,即丢弃概率,默认值:0.5。该参数对元素的丢弃概率是针对于每一个元素而言,而不是对所有的元素而言。举例说,假设矩阵内有12个数字,经过概率为0.5的dropout未必一定有6个零。\n\n- mode(str) :丢弃法的实现方式,有'downscale_in_infer'和'upscale_in_train'两种,默认是'upscale_in_train'。\n\n------\n**说明:**\n\n不同框架对于Dropout的默认处理方式可能不同,读者可以查看API详细了解。\n\n------\n\n下面这段程序展示了经过Dropout之后输出数据的形式。", "_____no_output_____" ] ], [ [ "# dropout操作\nimport paddle\nimport numpy as np\n\n# 设置随机数种子,这样可以保证每次运行结果一致\nnp.random.seed(100)\n# 创建数据[N, C, H, W],一般对应卷积层的输出\ndata1 = np.random.rand(2,3,3,3).astype('float32')\n# 创建数据[N, K],一般对应全连接层的输出\ndata2 = np.arange(1,13).reshape([-1, 3]).astype('float32')\n# 使用dropout作用在输入数据上\nx1 = paddle.to_tensor(data1)\n# downgrade_in_infer模式下\ndrop11 = paddle.nn.Dropout(p = 0.5, mode = 'downscale_in_infer')\ndroped_train11 = drop11(x1)\n# 切换到eval模式。在动态图模式下,使用eval()切换到求值模式,该模式禁用了dropout。\ndrop11.eval()\ndroped_eval11 = drop11(x1)\n# upscale_in_train模式下\ndrop12 = paddle.nn.Dropout(p = 0.5, mode = 'upscale_in_train')\ndroped_train12 = drop12(x1)\n# 切换到eval模式\ndrop12.eval()\ndroped_eval12 = drop12(x1)\n\nx2 = paddle.to_tensor(data2)\ndrop21 = paddle.nn.Dropout(p = 0.5, mode = 'downscale_in_infer')\ndroped_train21 = drop21(x2)\n# 切换到eval模式\ndrop21.eval()\ndroped_eval21 = drop21(x2)\ndrop22 = paddle.nn.Dropout(p = 0.5, mode = 'upscale_in_train')\ndroped_train22 = drop22(x2)\n# 切换到eval模式\ndrop22.eval()\ndroped_eval22 = drop22(x2)\n \nprint('x1 {}, \\n droped_train11 \\n {}, \\n droped_eval11 \\n {}'.format(data1, droped_train11.numpy(), droped_eval11.numpy()))\nprint('x1 {}, \\n droped_train12 \\n {}, \\n droped_eval12 \\n {}'.format(data1, droped_train12.numpy(), droped_eval12.numpy()))\nprint('x2 {}, \\n droped_train21 \\n {}, \\n droped_eval21 \\n {}'.format(data2, droped_train21.numpy(), droped_eval21.numpy()))\nprint('x2 {}, \\n droped_train22 \\n {}, \\n droped_eval22 \\n {}'.format(data2, droped_train22.numpy(), droped_eval22.numpy()))", "_____no_output_____" ] ], [ [ "从上述代码的输出可以发现,经过dropout之后,tensor中的某些元素变为了0,这个就是dropout实现的功能,通过随机将输入数据的元素置0,消除减弱了神经元节点间的联合适应性,增强模型的泛化能力。", "_____no_output_____" ], [ "# 小结\n\n学习完这些概念,您就具备了搭建卷积神经网络的基础。下一节,我们将应用这些基础模块,一起完成图像分类中的典型应用 — 医疗图像中的眼疾筛查任务的模型搭建。", "_____no_output_____" ], [ "# 作业\n\n## 1 计算卷积中一共有多少次乘法和加法操作\n\n输入数据形状是$[10, 3, 224, 224]$,卷积核$k_h = k_w = 3$,输出通道数为$64$,步幅$stride=1$,填充$p_h = p_w = 1$。\n\n则完成这样一个卷积,一共需要做多少次乘法和加法操作?\n\n- 提示\n\n先看输出一个像素点需要做多少次乘法和加法操作,然后再计算总共需要的操作次数。\n\n- 提交方式\n\n请回复乘法和加法操作的次数,例如:乘法1000,加法1000", "_____no_output_____" ], [ "## 2 计算网络层的输出数据和参数的形状\n\n网络结构定义如下面的代码所示,输入数据形状是$[10, 3, 224, 224]$,\n\n请分别计算每一层的输出数据形状,以及各层包含的参数形状", "_____no_output_____" ] ], [ [ "# 定义 SimpleNet 网络结构\nimport paddle\nfrom paddle.nn import Conv2D, MaxPool2D, Linear\nimport paddle.nn.functional as F\n\nclass SimpleNet(paddle.nn.Layer):\n def __init__(self, num_classes=1):\n #super(SimpleNet, self).__init__(name_scope)\n self.conv1 = Conv2D(in_channels=3, out_channels=6, kernel_size=5, stride=1, padding=2)\n self.max_pool1 = MaxPool2D(kernel_size=2, tride=2)\n self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=2)\n self.max_pool2 = MaxPool2D(kernel_size=2, tride=2)\n self.fc1 = Linear(in_features=50176, out_features=64)\n self.fc2 = Linear(in_features=64, out_features=num_classes) \n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool2(x)\n x = paddle.reshape(x, [x.shape[0], -1])\n x = self.fc1(x)\n x = F.sigmoid(x)\n x = self.fc2(x)\n return x\n", "_____no_output_____" ] ], [ [ "- 提示,第一层卷积$conv1$,各项参数如下:\n\n$$C_{in} = 3, C_{out} = 6, k_h = k_w = 5, p_h = p_w = 2, stride = 1$$\n\n则卷积核权重参数$w$的形状是:$[C_{out}, C_{in}, k_h, K_w] = [6, 3, 5, 5]$,个数$6\\times3\\times5\\times5 = 450$\n\n偏置参数$b$的形状是:$[C_{out}]$,偏置参数的个数是6\n\n输出特征图的大小是:\n\n$$H_{out} = 224 + 2\\times2 - 5 + 1 = 224, \\ \\ \\ \\ \\ W_{out} = 224 + 2\\times2 - 5 + 1 = 224$$\n\n输出特征图的形状是$[N, C_{out}, H_{out}, W_{out}] = [10, 6, 224, 224]$\n\n请将下面的表格补充完整:\n\n| 名称 | w形状 | w参数个数 | b形状 | b参数个数 | 输出形状 |\n| :------: | :------: | :-----: | :-----: | :----: | :----: |\n| conv1 | [6,3,5,5] | 450 | [6] | 6 | [10, 6, 224, 224] |\n| pool1 | 无 | 无 | 无 | 无 | [10, 6, 112, 112] |\n| conv2 | | | | | |\n| pool2 | | | | | |\n| fc1 | | | | | |\n| fc2 | | | | | |\n\n- 提交方式:将表格截图发到讨论区\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
e7d1ff030ec4976c5a9ddb5e1d7e606a5eb8c856
331,021
ipynb
Jupyter Notebook
experiments/tl_1v2/wisig-oracle.run1.limited/trials/16/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
experiments/tl_1v2/wisig-oracle.run1.limited/trials/16/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
experiments/tl_1v2/wisig-oracle.run1.limited/trials/16/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
88.43735
76,821
0.659076
[ [ [ "# Transfer Learning Template", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\n \nimport os, json, sys, time, random\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\nfrom easydict import EasyDict\nimport matplotlib.pyplot as plt\n\nfrom steves_models.steves_ptn import Steves_Prototypical_Network\n\nfrom steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper\nfrom steves_utils.iterable_aggregator import Iterable_Aggregator\nfrom steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig\nfrom steves_utils.torch_sequential_builder import build_sequential\nfrom steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader\nfrom steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)\nfrom steves_utils.PTN.utils import independent_accuracy_assesment\n\nfrom torch.utils.data import DataLoader\n\nfrom steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory\n\nfrom steves_utils.ptn_do_report import (\n get_loss_curve,\n get_results_table,\n get_parameters_table,\n get_domain_accuracies,\n)\n\nfrom steves_utils.transforms import get_chained_transform", "_____no_output_____" ] ], [ [ "# Allowed Parameters\nThese are allowed parameters, not defaults\nEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)\n\nPapermill uses the cell tag \"parameters\" to inject the real parameters below this cell.\nEnable tags to see what I mean", "_____no_output_____" ] ], [ [ "required_parameters = {\n \"experiment_name\",\n \"lr\",\n \"device\",\n \"seed\",\n \"dataset_seed\",\n \"n_shot\",\n \"n_query\",\n \"n_way\",\n \"train_k_factor\",\n \"val_k_factor\",\n \"test_k_factor\",\n \"n_epoch\",\n \"patience\",\n \"criteria_for_best\",\n \"x_net\",\n \"datasets\",\n \"torch_default_dtype\",\n \"NUM_LOGS_PER_EPOCH\",\n \"BEST_MODEL_PATH\",\n \"x_shape\",\n}", "_____no_output_____" ], [ "from steves_utils.CORES.utils import (\n ALL_NODES,\n ALL_NODES_MINIMUM_1000_EXAMPLES,\n ALL_DAYS\n)\n\nfrom steves_utils.ORACLE.utils_v2 import (\n ALL_DISTANCES_FEET_NARROWED,\n ALL_RUNS,\n ALL_SERIAL_NUMBERS,\n)\n\nstandalone_parameters = {}\nstandalone_parameters[\"experiment_name\"] = \"STANDALONE PTN\"\nstandalone_parameters[\"lr\"] = 0.001\nstandalone_parameters[\"device\"] = \"cuda\"\n\nstandalone_parameters[\"seed\"] = 1337\nstandalone_parameters[\"dataset_seed\"] = 1337\n\nstandalone_parameters[\"n_way\"] = 8\nstandalone_parameters[\"n_shot\"] = 3\nstandalone_parameters[\"n_query\"] = 2\nstandalone_parameters[\"train_k_factor\"] = 1\nstandalone_parameters[\"val_k_factor\"] = 2\nstandalone_parameters[\"test_k_factor\"] = 2\n\n\nstandalone_parameters[\"n_epoch\"] = 50\n\nstandalone_parameters[\"patience\"] = 10\nstandalone_parameters[\"criteria_for_best\"] = \"source_loss\"\n\nstandalone_parameters[\"datasets\"] = [\n {\n \"labels\": ALL_SERIAL_NUMBERS,\n \"domains\": ALL_DISTANCES_FEET_NARROWED,\n \"num_examples_per_domain_per_label\": 100,\n \"pickle_path\": os.path.join(get_datasets_base_path(), \"oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl\"),\n \"source_or_target_dataset\": \"source\",\n \"x_transforms\": [\"unit_mag\", \"minus_two\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"ORACLE_\"\n },\n {\n \"labels\": ALL_NODES,\n \"domains\": ALL_DAYS,\n \"num_examples_per_domain_per_label\": 100,\n \"pickle_path\": os.path.join(get_datasets_base_path(), \"cores.stratified_ds.2022A.pkl\"),\n \"source_or_target_dataset\": \"target\",\n \"x_transforms\": [\"unit_power\", \"times_zero\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"CORES_\"\n } \n]\n\nstandalone_parameters[\"torch_default_dtype\"] = \"torch.float32\" \n\n\n\nstandalone_parameters[\"x_net\"] = [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\":[-1, 1, 2, 256]}},\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":1, \"out_channels\":256, \"kernel_size\":(1,7), \"bias\":False, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":256, \"out_channels\":80, \"kernel_size\":(2,7), \"bias\":True, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 80*256, \"out_features\": 256}}, # 80 units per IQ pair\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 256}},\n]\n\n# Parameters relevant to results\n# These parameters will basically never need to change\nstandalone_parameters[\"NUM_LOGS_PER_EPOCH\"] = 10\nstandalone_parameters[\"BEST_MODEL_PATH\"] = \"./best_model.pth\"\n\n\n\n\n", "_____no_output_____" ], [ "# Parameters\nparameters = {\n \"experiment_name\": \"tl_1v2:wisig-oracle.run1.limited\",\n \"device\": \"cuda\",\n \"lr\": 0.0001,\n \"n_shot\": 3,\n \"n_query\": 2,\n \"train_k_factor\": 3,\n \"val_k_factor\": 2,\n \"test_k_factor\": 2,\n \"torch_default_dtype\": \"torch.float32\",\n \"n_epoch\": 50,\n \"patience\": 3,\n \"criteria_for_best\": \"target_accuracy\",\n \"x_net\": [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\": [-1, 1, 2, 256]}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 1,\n \"out_channels\": 256,\n \"kernel_size\": [1, 7],\n \"bias\": False,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 256}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 256,\n \"out_channels\": 80,\n \"kernel_size\": [2, 7],\n \"bias\": True,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 20480, \"out_features\": 256}},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\": 256}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 256}},\n ],\n \"NUM_LOGS_PER_EPOCH\": 10,\n \"BEST_MODEL_PATH\": \"./best_model.pth\",\n \"n_way\": 16,\n \"datasets\": [\n {\n \"labels\": [\n \"1-10\",\n \"1-12\",\n \"1-14\",\n \"1-16\",\n \"1-18\",\n \"1-19\",\n \"1-8\",\n \"10-11\",\n \"10-17\",\n \"10-4\",\n \"10-7\",\n \"11-1\",\n \"11-10\",\n \"11-19\",\n \"11-20\",\n \"11-4\",\n \"11-7\",\n \"12-19\",\n \"12-20\",\n \"12-7\",\n \"13-14\",\n \"13-18\",\n \"13-19\",\n \"13-20\",\n \"13-3\",\n \"13-7\",\n \"14-10\",\n \"14-11\",\n \"14-12\",\n \"14-13\",\n \"14-14\",\n \"14-19\",\n \"14-20\",\n \"14-7\",\n \"14-8\",\n \"14-9\",\n \"15-1\",\n \"15-19\",\n \"15-6\",\n \"16-1\",\n \"16-16\",\n \"16-19\",\n \"16-20\",\n \"17-10\",\n \"17-11\",\n \"18-1\",\n \"18-10\",\n \"18-11\",\n \"18-12\",\n \"18-13\",\n \"18-14\",\n \"18-15\",\n \"18-16\",\n \"18-17\",\n \"18-19\",\n \"18-2\",\n \"18-20\",\n \"18-4\",\n \"18-5\",\n \"18-7\",\n \"18-8\",\n \"18-9\",\n \"19-1\",\n \"19-10\",\n \"19-11\",\n \"19-12\",\n \"19-13\",\n \"19-14\",\n \"19-15\",\n \"19-19\",\n \"19-2\",\n \"19-20\",\n \"19-3\",\n \"19-4\",\n \"19-6\",\n \"19-7\",\n \"19-8\",\n \"19-9\",\n \"2-1\",\n \"2-13\",\n \"2-15\",\n \"2-3\",\n \"2-4\",\n \"2-5\",\n \"2-6\",\n \"2-7\",\n \"2-8\",\n \"20-1\",\n \"20-12\",\n \"20-14\",\n \"20-15\",\n \"20-16\",\n \"20-18\",\n \"20-19\",\n \"20-20\",\n \"20-3\",\n \"20-4\",\n \"20-5\",\n \"20-7\",\n \"20-8\",\n \"3-1\",\n \"3-13\",\n \"3-18\",\n \"3-2\",\n \"3-8\",\n \"4-1\",\n \"4-10\",\n \"4-11\",\n \"5-1\",\n \"5-5\",\n \"6-1\",\n \"6-15\",\n \"6-6\",\n \"7-10\",\n \"7-11\",\n \"7-12\",\n \"7-13\",\n \"7-14\",\n \"7-7\",\n \"7-8\",\n \"7-9\",\n \"8-1\",\n \"8-13\",\n \"8-14\",\n \"8-18\",\n \"8-20\",\n \"8-3\",\n \"8-8\",\n \"9-1\",\n \"9-7\",\n ],\n \"domains\": [1, 2, 3, 4],\n \"num_examples_per_domain_per_label\": -1,\n \"pickle_path\": \"/root/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl\",\n \"source_or_target_dataset\": \"target\",\n \"x_transforms\": [\"unit_mag\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"Wisig_\",\n },\n {\n \"labels\": [\n \"3123D52\",\n \"3123D65\",\n \"3123D79\",\n \"3123D80\",\n \"3123D54\",\n \"3123D70\",\n \"3123D7B\",\n \"3123D89\",\n \"3123D58\",\n \"3123D76\",\n \"3123D7D\",\n \"3123EFE\",\n \"3123D64\",\n \"3123D78\",\n \"3123D7E\",\n \"3124E4A\",\n ],\n \"domains\": [32, 38, 8, 44, 14, 50, 20, 26],\n \"num_examples_per_domain_per_label\": 2000,\n \"pickle_path\": \"/root/csc500-main/datasets/oracle.Run1_10kExamples_stratified_ds.2022A.pkl\",\n \"source_or_target_dataset\": \"source\",\n \"x_transforms\": [\"unit_mag\"],\n \"episode_transforms\": [],\n \"domain_prefix\": \"ORACLE.run1\",\n },\n ],\n \"dataset_seed\": 154325,\n \"seed\": 154325,\n}\n", "_____no_output_____" ], [ "# Set this to True if you want to run this template directly\nSTANDALONE = False\nif STANDALONE:\n print(\"parameters not injected, running with standalone_parameters\")\n parameters = standalone_parameters\n\nif not 'parameters' in locals() and not 'parameters' in globals():\n raise Exception(\"Parameter injection failed\")\n\n#Use an easy dict for all the parameters\np = EasyDict(parameters)\n\nif \"x_shape\" not in p:\n p.x_shape = [2,256] # Default to this if we dont supply x_shape\n\n\nsupplied_keys = set(p.keys())\n\nif supplied_keys != required_parameters:\n print(\"Parameters are incorrect\")\n if len(supplied_keys - required_parameters)>0: print(\"Shouldn't have:\", str(supplied_keys - required_parameters))\n if len(required_parameters - supplied_keys)>0: print(\"Need to have:\", str(required_parameters - supplied_keys))\n raise RuntimeError(\"Parameters are incorrect\")", "_____no_output_____" ], [ "###################################\n# Set the RNGs and make it all deterministic\n###################################\nnp.random.seed(p.seed)\nrandom.seed(p.seed)\ntorch.manual_seed(p.seed)\n\ntorch.use_deterministic_algorithms(True) ", "_____no_output_____" ], [ "###########################################\n# The stratified datasets honor this\n###########################################\ntorch.set_default_dtype(eval(p.torch_default_dtype))", "_____no_output_____" ], [ "###################################\n# Build the network(s)\n# Note: It's critical to do this AFTER setting the RNG\n###################################\nx_net = build_sequential(p.x_net)", "_____no_output_____" ], [ "start_time_secs = time.time()", "_____no_output_____" ], [ "p.domains_source = []\np.domains_target = []\n\n\ntrain_original_source = []\nval_original_source = []\ntest_original_source = []\n\ntrain_original_target = []\nval_original_target = []\ntest_original_target = []", "_____no_output_____" ], [ "# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), \"unit_power\") # unit_power, unit_mag\n# global_x_transform_func = lambda x: normalize(x, \"unit_power\") # unit_power, unit_mag", "_____no_output_____" ], [ "def add_dataset(\n labels,\n domains,\n pickle_path,\n x_transforms,\n episode_transforms,\n domain_prefix,\n num_examples_per_domain_per_label,\n source_or_target_dataset:str,\n iterator_seed=p.seed,\n dataset_seed=p.dataset_seed,\n n_shot=p.n_shot,\n n_way=p.n_way,\n n_query=p.n_query,\n train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),\n):\n \n if x_transforms == []: x_transform = None\n else: x_transform = get_chained_transform(x_transforms)\n \n if episode_transforms == []: episode_transform = None\n else: raise Exception(\"episode_transforms not implemented\")\n \n episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])\n\n\n eaf = Episodic_Accessor_Factory(\n labels=labels,\n domains=domains,\n num_examples_per_domain_per_label=num_examples_per_domain_per_label,\n iterator_seed=iterator_seed,\n dataset_seed=dataset_seed,\n n_shot=n_shot,\n n_way=n_way,\n n_query=n_query,\n train_val_test_k_factors=train_val_test_k_factors,\n pickle_path=pickle_path,\n x_transform_func=x_transform,\n )\n\n train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()\n train = Lazy_Iterable_Wrapper(train, episode_transform)\n val = Lazy_Iterable_Wrapper(val, episode_transform)\n test = Lazy_Iterable_Wrapper(test, episode_transform)\n\n if source_or_target_dataset==\"source\":\n train_original_source.append(train)\n val_original_source.append(val)\n test_original_source.append(test)\n\n p.domains_source.extend(\n [domain_prefix + str(u) for u in domains]\n )\n elif source_or_target_dataset==\"target\":\n train_original_target.append(train)\n val_original_target.append(val)\n test_original_target.append(test)\n p.domains_target.extend(\n [domain_prefix + str(u) for u in domains]\n )\n else:\n raise Exception(f\"invalid source_or_target_dataset: {source_or_target_dataset}\")\n ", "_____no_output_____" ], [ "for ds in p.datasets:\n add_dataset(**ds)", "_____no_output_____" ], [ "# from steves_utils.CORES.utils import (\n# ALL_NODES,\n# ALL_NODES_MINIMUM_1000_EXAMPLES,\n# ALL_DAYS\n# )\n\n# add_dataset(\n# labels=ALL_NODES,\n# domains = ALL_DAYS,\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"cores.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"cores_{u}\"\n# )", "_____no_output_____" ], [ "# from steves_utils.ORACLE.utils_v2 import (\n# ALL_DISTANCES_FEET,\n# ALL_RUNS,\n# ALL_SERIAL_NUMBERS,\n# )\n\n\n# add_dataset(\n# labels=ALL_SERIAL_NUMBERS,\n# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"source\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"oracle1_{u}\"\n# )\n", "_____no_output_____" ], [ "# from steves_utils.ORACLE.utils_v2 import (\n# ALL_DISTANCES_FEET,\n# ALL_RUNS,\n# ALL_SERIAL_NUMBERS,\n# )\n\n\n# add_dataset(\n# labels=ALL_SERIAL_NUMBERS,\n# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"source\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"oracle2_{u}\"\n# )", "_____no_output_____" ], [ "# add_dataset(\n# labels=list(range(19)),\n# domains = [0,1,2],\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"metehan.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"met_{u}\"\n# )", "_____no_output_____" ], [ "# # from steves_utils.wisig.utils import (\n# # ALL_NODES_MINIMUM_100_EXAMPLES,\n# # ALL_NODES_MINIMUM_500_EXAMPLES,\n# # ALL_NODES_MINIMUM_1000_EXAMPLES,\n# # ALL_DAYS\n# # )\n\n# import steves_utils.wisig.utils as wisig\n\n\n# add_dataset(\n# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,\n# domains = wisig.ALL_DAYS,\n# num_examples_per_domain_per_label=100,\n# pickle_path=os.path.join(get_datasets_base_path(), \"wisig.node3-19.stratified_ds.2022A.pkl\"),\n# source_or_target_dataset=\"target\",\n# x_transform_func=global_x_transform_func,\n# domain_modifier=lambda u: f\"wisig_{u}\"\n# )", "_____no_output_____" ], [ "###################################\n# Build the dataset\n###################################\ntrain_original_source = Iterable_Aggregator(train_original_source, p.seed)\nval_original_source = Iterable_Aggregator(val_original_source, p.seed)\ntest_original_source = Iterable_Aggregator(test_original_source, p.seed)\n\n\ntrain_original_target = Iterable_Aggregator(train_original_target, p.seed)\nval_original_target = Iterable_Aggregator(val_original_target, p.seed)\ntest_original_target = Iterable_Aggregator(test_original_target, p.seed)\n\n# For CNN We only use X and Y. And we only train on the source.\n# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader\n\ntransform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only\n\ntrain_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)\nval_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)\ntest_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)\n\ntrain_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)\nval_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)\ntest_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)\n\ndatasets = EasyDict({\n \"source\": {\n \"original\": {\"train\":train_original_source, \"val\":val_original_source, \"test\":test_original_source},\n \"processed\": {\"train\":train_processed_source, \"val\":val_processed_source, \"test\":test_processed_source}\n },\n \"target\": {\n \"original\": {\"train\":train_original_target, \"val\":val_original_target, \"test\":test_original_target},\n \"processed\": {\"train\":train_processed_target, \"val\":val_processed_target, \"test\":test_processed_target}\n },\n})", "_____no_output_____" ], [ "from steves_utils.transforms import get_average_magnitude, get_average_power\n\nprint(set([u for u,_ in val_original_source]))\nprint(set([u for u,_ in val_original_target]))\n\ns_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))\nprint(s_x)\n\n# for ds in [\n# train_processed_source,\n# val_processed_source,\n# test_processed_source,\n# train_processed_target,\n# val_processed_target,\n# test_processed_target\n# ]:\n# for s_x, s_y, q_x, q_y, _ in ds:\n# for X in (s_x, q_x):\n# for x in X:\n# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)\n# assert np.isclose(get_average_power(x.numpy()), 1.0)\n ", "{'ORACLE.run150', 'ORACLE.run114', 'ORACLE.run138', 'ORACLE.run126', 'ORACLE.run132', 'ORACLE.run18', 'ORACLE.run144', 'ORACLE.run120'}\n" ], [ "###################################\n# Build the model\n###################################\n# easfsl only wants a tuple for the shape\nmodel = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape))\noptimizer = Adam(params=model.parameters(), lr=p.lr)", "(2, 256)\n" ], [ "###################################\n# train\n###################################\njig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)\n\njig.train(\n train_iterable=datasets.source.processed.train,\n source_val_iterable=datasets.source.processed.val,\n target_val_iterable=datasets.target.processed.val,\n num_epochs=p.n_epoch,\n num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,\n patience=p.patience,\n optimizer=optimizer,\n criteria_for_best=p.criteria_for_best,\n)", "epoch: 1, [batch: 1 / 6720], examples_per_second: 32.3076, train_label_loss: 2.7896, \n" ], [ "total_experiment_time_secs = time.time() - start_time_secs", "_____no_output_____" ], [ "###################################\n# Evaluate the model\n###################################\nsource_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)\ntarget_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)\n\nsource_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)\ntarget_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)\n\nhistory = jig.get_history()\n\ntotal_epochs_trained = len(history[\"epoch_indices\"])\n\nval_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))\n\nconfusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)\nper_domain_accuracy = per_domain_accuracy_from_confusion(confusion)\n\n# Add a key to per_domain_accuracy for if it was a source domain\nfor domain, accuracy in per_domain_accuracy.items():\n per_domain_accuracy[domain] = {\n \"accuracy\": accuracy,\n \"source?\": domain in p.domains_source\n }\n\n# Do an independent accuracy assesment JUST TO BE SURE!\n# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)\n# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)\n# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)\n# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)\n\n# assert(_source_test_label_accuracy == source_test_label_accuracy)\n# assert(_target_test_label_accuracy == target_test_label_accuracy)\n# assert(_source_val_label_accuracy == source_val_label_accuracy)\n# assert(_target_val_label_accuracy == target_val_label_accuracy)\n\nexperiment = {\n \"experiment_name\": p.experiment_name,\n \"parameters\": dict(p),\n \"results\": {\n \"source_test_label_accuracy\": source_test_label_accuracy,\n \"source_test_label_loss\": source_test_label_loss,\n \"target_test_label_accuracy\": target_test_label_accuracy,\n \"target_test_label_loss\": target_test_label_loss,\n \"source_val_label_accuracy\": source_val_label_accuracy,\n \"source_val_label_loss\": source_val_label_loss,\n \"target_val_label_accuracy\": target_val_label_accuracy,\n \"target_val_label_loss\": target_val_label_loss,\n \"total_epochs_trained\": total_epochs_trained,\n \"total_experiment_time_secs\": total_experiment_time_secs,\n \"confusion\": confusion,\n \"per_domain_accuracy\": per_domain_accuracy,\n },\n \"history\": history,\n \"dataset_metrics\": get_dataset_metrics(datasets, \"ptn\"),\n}", "_____no_output_____" ], [ "ax = get_loss_curve(experiment)\nplt.show()", "_____no_output_____" ], [ "get_results_table(experiment)", "_____no_output_____" ], [ "get_domain_accuracies(experiment)", "_____no_output_____" ], [ "print(\"Source Test Label Accuracy:\", experiment[\"results\"][\"source_test_label_accuracy\"], \"Target Test Label Accuracy:\", experiment[\"results\"][\"target_test_label_accuracy\"])\nprint(\"Source Val Label Accuracy:\", experiment[\"results\"][\"source_val_label_accuracy\"], \"Target Val Label Accuracy:\", experiment[\"results\"][\"target_val_label_accuracy\"])", "Source Test Label Accuracy: 0.5931640625 Target Test Label Accuracy: 0.68916015625\nSource Val Label Accuracy: 0.5961588541666667 Target Val Label Accuracy: 0.6913441145281018\n" ], [ "json.dumps(experiment)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d209f152fcbc94b0396b55920b88e3353453ae
49,790
ipynb
Jupyter Notebook
data_cleaning/03-parsing-dates.ipynb
drakearch/kaggle-courses
206244caf38c98e9b6d2f37cbeb8b045460723c9
[ "MIT" ]
27
2021-08-15T01:06:40.000Z
2022-03-18T02:26:29.000Z
data_cleaning/03-parsing-dates.ipynb
drakessn/Kaggle-Courses
db9fb7a73743a22f35d65ec435d9b5dfd89fc5a7
[ "MIT" ]
17
2019-12-29T23:33:28.000Z
2020-05-04T00:07:59.000Z
data_cleaning/03-parsing-dates.ipynb
drakessn/Kaggle-Courses
db9fb7a73743a22f35d65ec435d9b5dfd89fc5a7
[ "MIT" ]
11
2021-08-16T16:07:53.000Z
2022-03-27T02:55:40.000Z
36.369613
7,352
0.555011
[ [ [ "**This notebook is an exercise in the [Data Cleaning](https://www.kaggle.com/learn/data-cleaning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/parsing-dates).**\n\n---\n", "_____no_output_____" ], [ "In this exercise, you'll apply what you learned in the **Parsing dates** tutorial.\n\n# Setup\n\nThe questions below will give you feedback on your work. Run the following cell to set up the feedback system.", "_____no_output_____" ] ], [ [ "from learntools.core import binder\nbinder.bind(globals())\nfrom learntools.data_cleaning.ex3 import *\nprint(\"Setup Complete\")", "Setup Complete\n" ] ], [ [ "# Get our environment set up\n\nThe first thing we'll need to do is load in the libraries and dataset we'll be using. We'll be working with a dataset containing information on earthquakes that occured between 1965 and 2016.", "_____no_output_____" ] ], [ [ "# modules we'll use\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport datetime\n\n# read in our data\nearthquakes = pd.read_csv(\"../input/earthquake-database/database.csv\")\n\n# set seed for reproducibility\nnp.random.seed(0)", "_____no_output_____" ] ], [ [ "# 1) Check the data type of our date column\n\nYou'll be working with the \"Date\" column from the `earthquakes` dataframe. Investigate this column now: does it look like it contains dates? What is the dtype of the column?", "_____no_output_____" ] ], [ [ "# TODO: Your code here!\nearthquakes['Date'].dtype", "_____no_output_____" ] ], [ [ "Once you have answered the question above, run the code cell below to get credit for your work.", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to receive credit!)\nq1.check()", "_____no_output_____" ], [ "# Line below will give you a hint\n#q1.hint()", "_____no_output_____" ] ], [ [ "# 2) Convert our date columns to datetime\n\nMost of the entries in the \"Date\" column follow the same format: \"month/day/four-digit year\". However, the entry at index 3378 follows a completely different pattern. Run the code cell below to see this.", "_____no_output_____" ] ], [ [ "earthquakes[3378:3383]", "_____no_output_____" ] ], [ [ "This does appear to be an issue with data entry: ideally, all entries in the column have the same format. We can get an idea of how widespread this issue is by checking the length of each entry in the \"Date\" column.", "_____no_output_____" ] ], [ [ "date_lengths = earthquakes.Date.str.len()\ndate_lengths.value_counts()", "_____no_output_____" ] ], [ [ "Looks like there are two more rows that has a date in a different format. Run the code cell below to obtain the indices corresponding to those rows and print the data.", "_____no_output_____" ] ], [ [ "indices = np.where([date_lengths == 24])[1]\nprint('Indices with corrupted data:', indices)\nearthquakes.loc[indices]", "Indices with corrupted data: [ 3378 7512 20650]\n" ] ], [ [ "Given all of this information, it's your turn to create a new column \"date_parsed\" in the `earthquakes` dataset that has correctly parsed dates in it. \n\n**Note**: When completing this problem, you are allowed to (but are not required to) amend the entries in the \"Date\" and \"Time\" columns. Do not remove any rows from the dataset.", "_____no_output_____" ] ], [ [ "# TODO: Your code here\ndate_format = '%m/%d/%Y'\nearthquakes.loc[indices,'Date'] = pd.to_datetime(earthquakes.loc[indices,'Date']) \\\n .dt.strftime(date_format)\nearthquakes['date_parsed'] = pd.to_datetime(earthquakes['Date'])\n# Check your answer\nq2.check()", "_____no_output_____" ], [ "# Lines below will give you a hint or solution code\n#q2.hint()\n#q2.solution()", "_____no_output_____" ] ], [ [ "# 3) Select the day of the month\n\nCreate a Pandas Series `day_of_month_earthquakes` containing the day of the month from the \"date_parsed\" column.", "_____no_output_____" ] ], [ [ "# try to get the day of the month from the date column\nday_of_month_earthquakes = earthquakes['date_parsed'].dt.day\n\n# Check your answer\nq3.check()", "_____no_output_____" ], [ "# Lines below will give you a hint or solution code\n#q3.hint()\n#q3.solution()", "_____no_output_____" ] ], [ [ "# 4) Plot the day of the month to check the date parsing\n\nPlot the days of the month from your earthquake dataset.", "_____no_output_____" ] ], [ [ "# TODO: Your code here!\nsns.displot(day_of_month_earthquakes, kde=False, bins=31);", "_____no_output_____" ] ], [ [ "Does the graph make sense to you?", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to receive credit!)\nq4.check()", "_____no_output_____" ], [ "# Line below will give you a hint\n#q4.hint()", "_____no_output_____" ] ], [ [ "# (Optional) Bonus Challenge\n\nFor an extra challenge, you'll work with a [Smithsonian dataset](https://www.kaggle.com/smithsonian/volcanic-eruptions) that documents Earth's volcanoes and their eruptive history over the past 10,000 years \n\nRun the next code cell to load the data.", "_____no_output_____" ] ], [ [ "volcanos = pd.read_csv(\"../input/volcanic-eruptions/database.csv\")", "_____no_output_____" ] ], [ [ "Try parsing the column \"Last Known Eruption\" from the `volcanos` dataframe. This column contains a mixture of text (\"Unknown\") and years both before the common era (BCE, also known as BC) and in the common era (CE, also known as AD).", "_____no_output_____" ] ], [ [ "volcanos['Last Known Eruption'].sample(5)", "_____no_output_____" ] ], [ [ "# (Optional) More practice\n\nIf you're interested in graphing time series, [check out this tutorial](https://www.kaggle.com/residentmario/time-series-plotting-optional).\n\nYou can also look into passing columns that you know have dates in them the `parse_dates` argument in `read_csv`. (The documention [is here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html).) Do note that this method can be very slow, but depending on your needs it may sometimes be handy to use.\n\n# Keep going\n\nIn the next lesson, learn how to [**work with character encodings**](https://www.kaggle.com/alexisbcook/character-encodings).", "_____no_output_____" ], [ "---\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/172650) to chat with other Learners.*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7d2111709cbbb0add6deb90056f2d780ec0f66d
634,068
ipynb
Jupyter Notebook
rllib_industry_webinar_20211201/demo_dec1.ipynb
sven1977/rllib_tutorials
a9e872cbe3ca3a13a30090a96ac2acb242fe0afc
[ "Apache-2.0" ]
36
2021-06-11T10:01:07.000Z
2022-03-31T05:44:02.000Z
rllib_industry_webinar_20211201/demo_dec1.ipynb
sven1977/rllib_tutorials
a9e872cbe3ca3a13a30090a96ac2acb242fe0afc
[ "Apache-2.0" ]
1
2021-10-08T14:19:58.000Z
2021-10-08T14:19:58.000Z
rllib_industry_webinar_20211201/demo_dec1.ipynb
sven1977/rllib_tutorials
a9e872cbe3ca3a13a30090a96ac2acb242fe0afc
[ "Apache-2.0" ]
15
2021-06-24T19:41:28.000Z
2022-03-31T00:52:45.000Z
164.821419
4,508
0.64582
[ [ [ "# Third party imports.\nimport gym\nfrom gym.spaces import Discrete, MultiDiscrete\nfrom ipywidgets import Output\nfrom IPython import display\nimport numpy as np\nimport os\nfrom starlette.requests import Request\nimport time\n\n# Ray imports.\nimport ray\nfrom ray.rllib.agents.ppo import PPOTrainer\nfrom ray.rllib.env.multi_agent_env import MultiAgentEnv\nfrom ray.rllib.policy.policy import PolicySpec\nfrom ray import serve\nfrom ray import tune", "_____no_output_____" ] ], [ [ "### Running on Anyscale\n\nLet's connect to an existing 1GPU/16CPUs cluster via `ray.init(address=...)`.", "_____no_output_____" ] ], [ [ "ray.init(\n # Connecting to an existing (and running) cluster (\"cluster-12\" in my account).\n address=\"anyscale://cluster-12\",\n\n # This will upload this directory to Anyscale so that the code can be run on cluster.\n project_dir=\".\",\n \n #cloud=\"anyscale_default_cloud\",\n \n # Our Python dependencies, e.g. tensorflow\n # (make sure everything is available on the cluster).\n runtime_env={\"pip\": \"./requirements.txt\"}\n)", "\u001b[1m\u001b[36m(anyscale +0.1s)\u001b[0m Loaded Anyscale authentication token from ~/.anyscale/credentials.json\n\u001b[1m\u001b[36m(anyscale +0.1s)\u001b[0m Loaded Anyscale authentication token from ~/.anyscale/credentials.json\n\u001b[1m\u001b[36m(anyscale +0.9s)\u001b[0m .anyscale.yaml found in project_dir. Directory is attached to a project.\n\u001b[1m\u001b[36m(anyscale +1.7s)\u001b[0m Using project (name: cuj_rllib, project_dir: /Users/sven/Dropbox/Projects/anyscale_projects/cuj-rl-in-production, id: prj_84JWkW5F1TqLJwhSqLDadyML).\n\u001b[1m\u001b[36m(anyscale +3.1s)\u001b[0m cluster cluster-12 is currently running, the cluster will not be restarted.\n" ] ], [ [ "### Coding/defining our \"problem\" via an RL environment.\n\nWe will use the following (adversarial) multi-agent environment throughout this demo.", "_____no_output_____" ], [ "<img src=\"img/environment.png\" width=800>", "_____no_output_____" ] ], [ [ "# Let's code our multi-agent environment.\n\nclass MultiAgentArena(MultiAgentEnv):\n def __init__(self, config=None):\n config = config or {}\n # Dimensions of the grid.\n self.width = config.get(\"width\", 10)\n self.height = config.get(\"height\", 10)\n\n # End an episode after this many timesteps.\n self.timestep_limit = config.get(\"ts\", 100)\n\n self.observation_space = MultiDiscrete([self.width * self.height,\n self.width * self.height])\n # 0=up, 1=right, 2=down, 3=left.\n self.action_space = Discrete(4)\n\n # Reset env.\n self.reset()\n\n # For rendering.\n self.out = None\n if config.get(\"render\"):\n self.out = Output()\n display.display(self.out)\n\n def reset(self):\n \"\"\"Returns initial observation of next(!) episode.\"\"\"\n # Row-major coords.\n self.agent1_pos = [0, 0] # upper left corner\n self.agent2_pos = [self.height - 1, self.width - 1] # lower bottom corner\n\n # Accumulated rewards in this episode.\n self.agent1_R = 0.0\n self.agent2_R = 0.0\n\n # Reset agent1's visited fields.\n self.agent1_visited_fields = set([tuple(self.agent1_pos)])\n\n # How many timesteps have we done in this episode.\n self.timesteps = 0\n\n # Did we have a collision in recent step?\n self.collision = False\n # How many collisions in total have we had in this episode?\n self.num_collisions = 0\n\n # Return the initial observation in the new episode.\n return self._get_obs()\n\n def step(self, action: dict):\n \"\"\"\n Returns (next observation, rewards, dones, infos) after having taken the given actions.\n \n e.g.\n `action={\"agent1\": action_for_agent1, \"agent2\": action_for_agent2}`\n \"\"\"\n \n # increase our time steps counter by 1.\n self.timesteps += 1\n # An episode is \"done\" when we reach the time step limit.\n is_done = self.timesteps >= self.timestep_limit\n\n # Agent2 always moves first.\n # events = [collision|agent1_new_field]\n events = self._move(self.agent2_pos, action[\"agent2\"], is_agent1=False)\n events |= self._move(self.agent1_pos, action[\"agent1\"], is_agent1=True)\n\n # Useful for rendering.\n self.collision = \"collision\" in events\n if self.collision is True:\n self.num_collisions += 1\n \n # Get observations (based on new agent positions).\n obs = self._get_obs()\n\n # Determine rewards based on the collected events:\n r1 = -1.0 if \"collision\" in events else 1.0 if \"agent1_new_field\" in events else -0.5\n r2 = 1.0 if \"collision\" in events else -0.1\n\n self.agent1_R += r1\n self.agent2_R += r2\n \n rewards = {\n \"agent1\": r1,\n \"agent2\": r2,\n }\n\n # Generate a `done` dict (per-agent and total).\n dones = {\n \"agent1\": is_done,\n \"agent2\": is_done,\n # special `__all__` key indicates that the episode is done for all agents.\n \"__all__\": is_done,\n }\n\n return obs, rewards, dones, {} # <- info dict (not needed here).\n\n def _get_obs(self):\n \"\"\"\n Returns obs dict (agent name to discrete-pos tuple) using each\n agent's current x/y-positions.\n \"\"\"\n ag1_discrete_pos = self.agent1_pos[0] * self.width + \\\n (self.agent1_pos[1] % self.width)\n ag2_discrete_pos = self.agent2_pos[0] * self.width + \\\n (self.agent2_pos[1] % self.width)\n return {\n \"agent1\": np.array([ag1_discrete_pos, ag2_discrete_pos]),\n \"agent2\": np.array([ag2_discrete_pos, ag1_discrete_pos]),\n }\n\n def _move(self, coords, action, is_agent1):\n \"\"\"\n Moves an agent (agent1 iff is_agent1=True, else agent2) from `coords` (x/y) using the\n given action (0=up, 1=right, etc..) and returns a resulting events dict:\n Agent1: \"new\" when entering a new field. \"bumped\" when having been bumped into by agent2.\n Agent2: \"bumped\" when bumping into agent1 (agent1 then gets -1.0).\n \"\"\"\n orig_coords = coords[:]\n # Change the row: 0=up (-1), 2=down (+1)\n coords[0] += -1 if action == 0 else 1 if action == 2 else 0\n # Change the column: 1=right (+1), 3=left (-1)\n coords[1] += 1 if action == 1 else -1 if action == 3 else 0\n\n # Solve collisions.\n # Make sure, we don't end up on the other agent's position.\n # If yes, don't move (we are blocked).\n if (is_agent1 and coords == self.agent2_pos) or (not is_agent1 and coords == self.agent1_pos):\n coords[0], coords[1] = orig_coords\n # Agent2 blocked agent1 (agent1 tried to run into agent2)\n # OR Agent2 bumped into agent1 (agent2 tried to run into agent1)\n return {\"collision\"}\n\n # No agent blocking -> check walls.\n if coords[0] < 0:\n coords[0] = 0\n elif coords[0] >= self.height:\n coords[0] = self.height - 1\n if coords[1] < 0:\n coords[1] = 0\n elif coords[1] >= self.width:\n coords[1] = self.width - 1\n\n # If agent1 -> \"new\" if new tile covered.\n if is_agent1 and not tuple(coords) in self.agent1_visited_fields:\n self.agent1_visited_fields.add(tuple(coords))\n return {\"agent1_new_field\"}\n # No new tile for agent1.\n return set()\n\n def render(self, mode=None):\n\n if self.out is not None:\n self.out.clear_output(wait=True)\n\n print(\"_\" * (self.width + 2))\n for r in range(self.height):\n print(\"|\", end=\"\")\n for c in range(self.width):\n field = r * self.width + c % self.width\n if self.agent1_pos == [r, c]:\n print(\"1\", end=\"\")\n elif self.agent2_pos == [r, c]:\n print(\"2\", end=\"\")\n elif (r, c) in self.agent1_visited_fields:\n print(\".\", end=\"\")\n else:\n print(\" \", end=\"\")\n print(\"|\")\n print(\"‾\" * (self.width + 2))\n print(f\"{'!!Collision!!' if self.collision else ''}\")\n print(\"R1={: .1f}\".format(self.agent1_R))\n print(\"R2={: .1f} ({} collisions)\".format(self.agent2_R, self.num_collisions))\n print()\n time.sleep(0.25)\n\n\nenv = MultiAgentArena(config={\"render\": True})\nobs = env.reset()\n\nwith env.out:\n # Agent1 moves down, Agent2 moves up.\n obs, rewards, dones, infos = env.step(action={\"agent1\": 2, \"agent2\": 0})\n env.render()\n\n # Agent1 moves right, Agent2 moves left.\n obs, rewards, dones, infos = env.step(action={\"agent1\": 1, \"agent2\": 3})\n env.render()\n\n # Agent1 moves right, Agent2 moves left.\n obs, rewards, dones, infos = env.step(action={\"agent1\": 1, \"agent2\": 3})\n env.render()\n\n # Agent1 moves down, Agent2 moves up.\n obs, rewards, dones, infos = env.step(action={\"agent1\": 2, \"agent2\": 0})\n env.render()\n\n\nprint(\"Agent1's x/y position={}\".format(env.agent1_pos))\nprint(\"Agent2's x/y position={}\".format(env.agent2_pos))\nprint(\"Env timesteps={}\".format(env.timesteps))\n", "_____no_output_____" ] ], [ [ "### Configuring our Trainer", "_____no_output_____" ] ], [ [ "TRAINER_CFG = {\n # Using our environment class defined above.\n \"env\": MultiAgentArena,\n # Use `framework=torch` here for PyTorch.\n \"framework\": \"tf\",\n\n # Run on 1 GPU on the \"learner\".\n \"num_gpus\": 1,\n # Use 15 ray-parallelized environment workers,\n # which collect samples to learn from. Each worker gets assigned\n # 1 CPU.\n \"num_workers\": 15,\n # Each of the 15 workers has 10 environment copies (\"vectorization\")\n # for faster (batched) forward passes.\n \"num_envs_per_worker\": 10,\n\n # Multi-agent setup: 2 policies.\n \"multiagent\": {\n \"policies\": {\"policy1\", \"policy2\"},\n \"policy_mapping_fn\": lambda agent_id: \"policy1\" if agent_id == \"agent1\" else \"policy2\"\n },\n}", "_____no_output_____" ] ], [ [ "### Training our 2 Policies (agent1 and agent2)", "_____no_output_____" ] ], [ [ "results = tune.run(\n # RLlib Trainer class (we use the \"PPO\" algorithm today).\n PPOTrainer,\n # Give our experiment a name (we will find results/checkpoints\n # under this name on the server's `~ray_results/` dir).\n name=f\"CUJ-RL\",\n # The RLlib config (defined in a cell above).\n config=TRAINER_CFG,\n # Take a snapshot every 2 iterations.\n checkpoint_freq=2,\n # Plus one at the very end of training.\n checkpoint_at_end=True,\n # Run for exactly 30 training iterations.\n stop={\"training_iteration\": 20},\n # Define what we are comparing for, when we search for the\n # \"best\" checkpoint at the end.\n metric=\"episode_reward_mean\",\n mode=\"max\")\n\nprint(\"Best checkpoint: \", results.best_checkpoint)\n", "\u001b[2m\u001b[36m(run pid=None)\u001b[0m == Status ==\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Current time: 2021-12-01 09:24:41 (running for 00:00:00.14)\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Memory usage on this node: 4.7/119.9 GiB\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Using FIFO scheduling algorithm.\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Resources requested: 0/16 CPUs, 0/1 GPUs, 0.0/74.36 GiB heap, 0.0/35.86 GiB objects (0.0/1.0 accelerator_type:M60)\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Result logdir: /home/ray/ray_results/CUJ-RL\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m Number of trials: 1/1 (1 PENDING)\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m +---------------------------------+----------+-------+\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m | Trial name | status | loc |\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m |---------------------------------+----------+-------|\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m | PPO_MultiAgentArena_9155f_00000 | PENDING | |\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m +---------------------------------+----------+-------+\n\u001b[2m\u001b[36m(run pid=None)\u001b[0m \n\u001b[2m\u001b[36m(run pid=None)\u001b[0m \n" ] ], [ [ "### Restoring from a checkpoint", "_____no_output_____" ] ], [ [ "local_checkpoint = \"/Users/sven/Downloads/checkpoint-20-2\"\n\nif os.path.isfile(local_checkpoint):\n print(\"yes, checkpoint files are on local machine ('Downloads' folder)\")", "yes, checkpoint files are on local machine ('Downloads' folder)\n" ], [ "# We'll restore the trained PPOTrainer locally on this laptop here and have it run\n# through a new environment to demonstrate it has learnt useful policies for our agents:\n\ncpu_config = TRAINER_CFG.copy()\ncpu_config[\"num_gpus\"] = 0\ncpu_config[\"num_workers\"] = 0\n\nnew_trainer = PPOTrainer(config=cpu_config)\n# Restore weights of the learnt policies via `restore()`.\nnew_trainer.restore(local_checkpoint)", "2021-12-01 18:34:51,615\tWARNING deprecation.py:38 -- DeprecationWarning: `SampleBatch['is_training']` has been deprecated. Use `SampleBatch.is_training` instead. This will raise an error in the future!\n2021-12-01 18:34:54,804\tWARNING trainer_template.py:185 -- `execution_plan` functions should accept `trainer`, `workers`, and `config` as args!\nInstall gputil for GPU system monitoring.\n" ] ], [ [ "### Running inference locally", "_____no_output_____" ] ], [ [ "env = MultiAgentArena(config={\"render\": True})\n\nwith env.out:\n\n obs = env.reset()\n env.render()\n\n while True:\n a1 = new_trainer.compute_single_action(obs[\"agent1\"], policy_id=\"policy1\", explore=True)\n a2 = new_trainer.compute_single_action(obs[\"agent2\"], policy_id=\"policy2\", explore=False)\n\n obs, rewards, dones, _ = env.step({\"agent1\": a1, \"agent2\": a2})\n\n env.render()\n\n if dones[\"agent1\"] is True:\n break\n", "_____no_output_____" ] ], [ [ "### Inference using Ray Serve", "_____no_output_____" ] ], [ [ "@serve.deployment(route_prefix=\"/multi-agent-arena\")\nclass ServeRLlibTrainer:\n\n def __init__(self, config, checkpoint_path):\n # Link to our trainer.\n self.trainer = PPOTrainer(cpu_config)\n self.trainer.restore(checkpoint_path)\n\n async def __call__(self, request: Request):\n json_input = await request.json()\n\n # Compute and return the action for the given observation.\n obs1 = json_input[\"observation_agent1\"]\n obs2 = json_input[\"observation_agent2\"]\n a1 = self.trainer.compute_single_action(obs1, policy_id=\"policy1\")\n a2 = self.trainer.compute_single_action(obs2, policy_id=\"policy2\")\n\n return {\"action\": {\"agent1\": int(a1), \"agent2\": int(a2)}}\n", "_____no_output_____" ], [ "client = serve.start()\nServeRLlibTrainer.deploy(cpu_config, results.best_checkpoint)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7d2210ef5f76152e5071a6bf706e6bb6b7977b8
23,664
ipynb
Jupyter Notebook
examples/ch13/snippets_ipynb/13_07-11withSelfChecks.ipynb
edson-gomes/Intro-to-Python
00a2f549916616b0f2036401573e35d66317f998
[ "MIT" ]
null
null
null
examples/ch13/snippets_ipynb/13_07-11withSelfChecks.ipynb
edson-gomes/Intro-to-Python
00a2f549916616b0f2036401573e35d66317f998
[ "MIT" ]
null
null
null
examples/ch13/snippets_ipynb/13_07-11withSelfChecks.ipynb
edson-gomes/Intro-to-Python
00a2f549916616b0f2036401573e35d66317f998
[ "MIT" ]
null
null
null
22.451613
212
0.530215
[ [ [ "**_Note: This notebook contains ALL the code for Sections 13.7 through 13.11, including the Self Check snippets because all the snippets in these sections are consecutively numbered in the text._**", "_____no_output_____" ], [ "# 13.7 Authenticating with Twitter Via Tweepy ", "_____no_output_____" ] ], [ [ "import tweepy", "_____no_output_____" ], [ "import keys", "_____no_output_____" ] ], [ [ "### Creating and Configuring an `OAuthHandler` to Authenticate with Twitter", "_____no_output_____" ] ], [ [ "auth = tweepy.OAuthHandler(keys.consumer_key,\n keys.consumer_secret)", "_____no_output_____" ], [ "auth.set_access_token(keys.access_token,\n keys.access_token_secret)", "_____no_output_____" ] ], [ [ "### Creating an API Object", "_____no_output_____" ] ], [ [ "api = tweepy.API(auth, wait_on_rate_limit=True, \n wait_on_rate_limit_notify=True)\n ", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.7 Self Check\n\n**1. _(Fill-In)_** Authenticating with Twitter via Tweepy involves two steps. First, create an object of the Tweepy module’s `________` class, passing your API key and API secret key to its constructor. \n\n**Answer:** `OAuthHandler`.\n\n**2. _(True/False)_** The keyword argument `wait_on_rate_limit_notify=True` to the `tweepy.API` call tells Tweepy to terminate the user because of a rate-limit violation.\n\n**Answer:** False. The call tells Tweepy that if it needs to wait to avoid rate-limit violations it should display a message at the command line indicating that it’s waiting for the rate limit to replenish.", "_____no_output_____" ], [ "# 13.8 Getting Information About a Twitter Account", "_____no_output_____" ] ], [ [ "nasa = api.get_user('nasa')", "_____no_output_____" ] ], [ [ "### Getting Basic Account Information", "_____no_output_____" ] ], [ [ "nasa.id", "_____no_output_____" ], [ "nasa.name", "_____no_output_____" ], [ "nasa.screen_name", "_____no_output_____" ], [ "nasa.description", "_____no_output_____" ] ], [ [ "### Getting the Most Recent Status Update", "_____no_output_____" ] ], [ [ "nasa.status.text", "_____no_output_____" ] ], [ [ "### Getting the Number of Followers", "_____no_output_____" ] ], [ [ "nasa.followers_count", "_____no_output_____" ] ], [ [ "### Getting the Number of Friends ", "_____no_output_____" ] ], [ [ "nasa.friends_count", "_____no_output_____" ] ], [ [ "### Getting Your Own Account’s Information", "_____no_output_____" ], [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.8 Self Check\n\n**1. _(Fill-In)_** After authenticating with Twitter, you can use the Tweepy `API` object’s `________` method to get a tweepy.models.User object containing information about a user’s Twitter account.\n\n**Answer:** get_user\n\n**2. _(True/False)_** Retweeting often results in truncation because a retweet adds characters that could exceed the character limit.\n\n**Answer:** True.\n\n**3. _(IPython Session)_** Use the `api` object to get a `User` object for the `NASAKepler` account, then display its number of followers and most recent tweet.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "nasa_kepler = api.get_user('NASAKepler')", "_____no_output_____" ], [ "nasa_kepler.followers_count", "_____no_output_____" ], [ "nasa_kepler.status.text", "_____no_output_____" ] ], [ [ "# 13.9 Introduction to Tweepy `Cursor`s: Getting an Account’s Followers and Friends\n# 13.9.1 Determining an Account’s Followers ", "_____no_output_____" ] ], [ [ "followers = []", "_____no_output_____" ] ], [ [ "### Creating a Cursor", "_____no_output_____" ] ], [ [ "cursor = tweepy.Cursor(api.followers, screen_name='nasa')", "_____no_output_____" ] ], [ [ "### Getting Results", "_____no_output_____" ] ], [ [ "for account in cursor.items(10):\n followers.append(account.screen_name)\n", "_____no_output_____" ], [ "print('Followers:', \n ' '.join(sorted(followers, key=lambda s: s.lower())))", "_____no_output_____" ] ], [ [ "### Automatic Paging\n### Getting Follower IDs Rather Than Followers", "_____no_output_____" ], [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.9.1 Self Check\n\n**1. _(Fill-In)_** Each Twitter API method’s documentation discusses the maximum number of items the method can return in one call—this is known as a `________` of results. \n\n**Answer:** page.\n\n**2. _(True/False)_** Though you can get complete `User` objects for a maximum of 200 followers at a time, you can get many more Twitter ID numbers by calling the `API` object’s `followers_ids` method.\n\n**Answer:** True.\n\n**3. _(IPython Session)_** Use a Cursor to get and display 10 followers of the `NASAKepler` account.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "kepler_followers = []", "_____no_output_____" ], [ "cursor = tweepy.Cursor(api.followers, screen_name='NASAKepler')", "_____no_output_____" ], [ "for account in cursor.items(10):\n kepler_followers.append(account.screen_name)\n ", "_____no_output_____" ], [ "print(' '.join(kepler_followers))", "_____no_output_____" ] ], [ [ "# 13.9.2 Determining Whom an Account Follows ", "_____no_output_____" ] ], [ [ "friends = []", "_____no_output_____" ], [ "cursor = tweepy.Cursor(api.friends, screen_name='nasa')", "_____no_output_____" ], [ "for friend in cursor.items(10):\n friends.append(friend.screen_name)\n ", "_____no_output_____" ], [ "print('Friends:', \n ' '.join(sorted(friends, key=lambda s: s.lower())))", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.9.2 Self Check\n\n**1. _(Fill-In)_** The `API` object’s `friends` method calls the Twitter API’s `________` method to get a list of User objects representing an account’s friends. \n\n**Answer:** `friends/list`.", "_____no_output_____" ], [ "# 13.9.3 Getting a User’s Recent Tweets", "_____no_output_____" ] ], [ [ "nasa_tweets = api.user_timeline(screen_name='nasa', count=3)", "_____no_output_____" ], [ "for tweet in nasa_tweets:\n print(f'{tweet.user.screen_name}: {tweet.text}\\n')\n ", "_____no_output_____" ] ], [ [ "### Grabbing Recent Tweets from Your Own Timeline", "_____no_output_____" ], [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.9.3 Self Check\n\n**1. _(Fill-In)_** You can call the `API` method `home_timeline` to get tweets from your home timeline, that is, your tweets and tweets from `________`. \n\n**Answer:** the people you follow.\n\n**2. _(IPython Session)_** Get and display two tweets from the `NASAKepler` account.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "kepler_tweets = api.user_timeline(\n screen_name='NASAKepler', count=2) ", "_____no_output_____" ], [ "for tweet in kepler_tweets:\n print(f'{tweet.user.screen_name}: {tweet.text}\\n') ", "_____no_output_____" ] ], [ [ "# 13.10 Searching Recent Tweets\n### Tweet Printer", "_____no_output_____" ] ], [ [ "from tweetutilities import print_tweets", "_____no_output_____" ] ], [ [ "### Searching for Specific Words", "_____no_output_____" ] ], [ [ "tweets = api.search(q='Mars Opportunity Rover', count=3)", "_____no_output_____" ], [ "print_tweets(tweets)", "_____no_output_____" ] ], [ [ "### Searching with Twitter Search Operators", "_____no_output_____" ] ], [ [ "tweets = api.search(q='from:nasa since:2018-09-01', count=3)", "_____no_output_____" ], [ "print_tweets(tweets)", "_____no_output_____" ] ], [ [ "### Searching for a Hashtag", "_____no_output_____" ] ], [ [ "tweets = api.search(q='#collegefootball', count=20)", "_____no_output_____" ], [ "print_tweets(tweets)", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.10 Self Check\n\n**1. _(Fill-In)_** The Tweepy `API` method `________` returns tweets that match a query string.\n\n**Answer:** search.\n\n**2. _(True/False)_** If you plan to request more results than can be returned by one call to search, you should use an `API` object.\n\n**Answer:** False. If you plan to request more results than can be returned by one call to `search`, you should use a `Cursor` object.\n\n**3. _(IPython Session)_** Search for one tweet from the `nasa` account containing `'astronaut'`.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "tweets = api.search(q='astronaut from:nasa', count=1)", "_____no_output_____" ], [ "print_tweets(tweets)", "_____no_output_____" ] ], [ [ "# 13.11 Spotting Trends with the Twitter Trends API\n# 13.11.1 Places with Trending Topics", "_____no_output_____" ] ], [ [ "trends_available = api.trends_available()", "_____no_output_____" ], [ "len(trends_available)", "_____no_output_____" ], [ "trends_available[0]", "_____no_output_____" ], [ "trends_available[1]", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.11.1 Self Check\n\n**1. _(Fill-In)_** If a topic “goes viral,” you could have thousands or even millions of people tweeting about that topic at once. Twitter refers to these as `________` topics.\n\n**Answer:** trending.\n\n**2. _(True/False)_** The Twitter Trends API’s `trends/place` method uses Yahoo! Where on Earth IDs (WOEIDs) to look up trending topics. The WOEID `1` represents worldwide. \n\n**Answer:** True.", "_____no_output_____" ], [ "# 13.11.2 Getting a List of Trending Topics\n### Worldwide Trending Topics", "_____no_output_____" ] ], [ [ "world_trends = api.trends_place(id=1)", "_____no_output_____" ], [ "trends_list = world_trends[0]['trends']", "_____no_output_____" ], [ "trends_list[0]", "_____no_output_____" ], [ "trends_list = [t for t in trends_list if t['tweet_volume']]", "_____no_output_____" ], [ "from operator import itemgetter ", "_____no_output_____" ], [ "trends_list.sort(key=itemgetter('tweet_volume'), reverse=True) ", "_____no_output_____" ], [ "for trend in trends_list[:5]:\n print(trend['name'])", "_____no_output_____" ] ], [ [ "### New York City Trending Topics", "_____no_output_____" ] ], [ [ "nyc_trends = api.trends_place(id=2459115) # New York City WOEID", "_____no_output_____" ], [ "nyc_list = nyc_trends[0]['trends']", "_____no_output_____" ], [ "nyc_list = [t for t in nyc_list if t['tweet_volume']]", "_____no_output_____" ], [ "nyc_list.sort(key=itemgetter('tweet_volume'), reverse=True) ", "_____no_output_____" ], [ "for trend in nyc_list[:5]:\n print(trend['name'])\n ", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.11.2 Self Check\n\n**1. _(Fill-In)_** You also can look up WOEIDs programmatically using Yahoo!’s web services via Python libraries like `________`.\n\n**Answer:** `woeid`.\n\n**2. _(True/False)_** The statement `todays_trends = api.trends_place(id=1)` gets today’s U. S. trending topics.\n\n**Answer:** False. Actually, it gets today’s worldwide trending topics.\n\n**3. _(IPython Session)_** Display the top 3 trending topics today in the United States.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "us_trends = api.trends_place(id='23424977')", "_____no_output_____" ], [ "us_list = us_trends[0]['trends']", "_____no_output_____" ], [ "us_list = [t for t in us_list if t['tweet_volume']]", "_____no_output_____" ], [ "us_list.sort(key=itemgetter('tweet_volume'), reverse=True)", "_____no_output_____" ], [ "for trend in us_list[:3]:\n print(trend['name'])", "_____no_output_____" ] ], [ [ "# 13.11.3 Create a Word Cloud from Trending Topics", "_____no_output_____" ] ], [ [ "topics = {}", "_____no_output_____" ], [ "for trend in nyc_list:\n topics[trend['name']] = trend['tweet_volume']\n ", "_____no_output_____" ], [ "from wordcloud import WordCloud", "_____no_output_____" ], [ "wordcloud = WordCloud(width=1600, height=900,\n prefer_horizontal=0.5, min_font_size=10, colormap='prism', \n background_color='white')\n ", "_____no_output_____" ], [ "wordcloud = wordcloud.fit_words(topics)", "_____no_output_____" ], [ "wordcloud = wordcloud.to_file('TrendingTwitter.png')", "_____no_output_____" ] ], [ [ "![Self Check Exercises check mark image](files/art/check.png)\n# 13.11.3 Self Check\n\n**1. _(IPython Session)_** Create a word cloud using the `us_list` list from the previous section’s Self Check.\n\n**Answer:** ", "_____no_output_____" ] ], [ [ "topics = {}", "_____no_output_____" ], [ "for trend in us_list:\n topics[trend['name']] = trend['tweet_volume']\n ", "_____no_output_____" ], [ "wordcloud = wordcloud.fit_words(topics)", "_____no_output_____" ], [ "wordcloud = wordcloud.to_file('USTrendingTwitter.png')", "_____no_output_____" ], [ "##########################################################################\n# (C) Copyright 2019 by Deitel & Associates, Inc. and #\n# Pearson Education, Inc. All Rights Reserved. #\n# #\n# DISCLAIMER: The authors and publisher of this book have used their #\n# best efforts in preparing the book. These efforts include the #\n# development, research, and testing of the theories and programs #\n# to determine their effectiveness. The authors and publisher make #\n# no warranty of any kind, expressed or implied, with regard to these #\n# programs or to the documentation contained in these books. The authors #\n# and publisher shall not be liable in any event for incidental or #\n# consequential damages in connection with, or arising out of, the #\n# furnishing, performance, or use of these programs. #\n##########################################################################\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e7d23075c46392bf1746a43007e227af1ce4a5de
4,010
ipynb
Jupyter Notebook
classical-systems/Exercises_Probabilistic_Systems.ipynb
jaorduz/QWorld2021_QMexico
34667130ade29511f3e8b09612726cf1431ec989
[ "Apache-2.0", "CC-BY-4.0" ]
1
2021-07-27T13:39:00.000Z
2021-07-27T13:39:00.000Z
classical-systems/Exercises_Probabilistic_Systems.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
classical-systems/Exercises_Probabilistic_Systems.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
3
2021-08-11T11:12:38.000Z
2021-09-14T09:15:08.000Z
42.210526
310
0.488279
[ [ [ "<a href=\"https://qworld.net\" target=\"_blank\" align=\"left\"><img src=\"../qworld/images/header.jpg\" align=\"left\"></a>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $\n$ \\newcommand{\\greenbit}[1] {\\mathbf{{\\color{green}#1}}} $\n$ \\newcommand{\\bluebit}[1] {\\mathbf{{\\color{blue}#1}}} $\n$ \\newcommand{\\redbit}[1] {\\mathbf{{\\color{red}#1}}} $\n$ \\newcommand{\\brownbit}[1] {\\mathbf{{\\color{brown}#1}}} $\n$ \\newcommand{\\blackbit}[1] {\\mathbf{{\\color{black}#1}}} $", "_____no_output_____" ], [ "<font style=\"font-size:28px;\" align=\"left\"><b> Exercises for Probabilistic Systems </b></font>\n<br>\n_prepared by Abuzer Yakaryilmaz_\n<br><br>", "_____no_output_____" ], [ "Run the following cell to open the exercises.\n\n<i><a href=\"https://www.mathjax.org\" target=\"_blank\">MathJax</a> is used to express mathematical expressions and it requires internet connection.</i>\n<hr>", "_____no_output_____" ] ], [ [ "import os, webbrowser\nwebbrowser.open(os.path.abspath(\"Exercises_Probabilistic_Systems.html\"))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ] ]
e7d238df3d02ea046119c2785196b2865e196b99
448,182
ipynb
Jupyter Notebook
notebook/onion/7-Insight.ipynb
narnaik/art-data-science
2627012bf279fba5a621b1d10c91171723a142e4
[ "MIT" ]
34
2017-01-16T11:58:24.000Z
2022-03-26T02:06:33.000Z
notebook/onion/7-Insight.ipynb
narnaik/art-data-science
2627012bf279fba5a621b1d10c91171723a142e4
[ "MIT" ]
null
null
null
notebook/onion/7-Insight.ipynb
narnaik/art-data-science
2627012bf279fba5a621b1d10c91171723a142e4
[ "MIT" ]
32
2017-02-09T17:03:35.000Z
2020-07-08T03:20:49.000Z
460.145791
230,806
0.95627
[ [ [ "# 7. Share the Insight", "_____no_output_____" ], [ "\n> “The goal is to turn data into insight”\n \n- Why do we need to communicate insight?\n- Types of communication - Exploration vs. Explanation\n- Explanation: Telling a story with data\n- Exploration: Building an interface for people to find stories\n\nThere are two main insights we want to communicate. \n- Bangalore is the largest market for Onion Arrivals. \n- Onion Price variation has increased in the recent years.\n\nLet us explore how we can communicate these insight visually.", "_____no_output_____" ], [ "## Preprocessing to get the data", "_____no_output_____" ] ], [ [ "# Import the library we need, which is Pandas and Matplotlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n# import seaborn as sns", "_____no_output_____" ], [ "# Set some parameters to get good visuals - style to ggplot and size to 15,10\nplt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (15, 10)", "_____no_output_____" ], [ "# Read the csv file of Monthwise Quantity and Price csv file we have.\ndf = pd.read_csv('MonthWiseMarketArrivals_clean.csv')", "_____no_output_____" ], [ "# Change the index to the date column\ndf.index = pd.PeriodIndex(df.date, freq='M')", "_____no_output_____" ], [ "# Sort the data frame by date\ndf = df.sort_values(by = \"date\")", "_____no_output_____" ], [ "# Get the data for year 2016\ndf2016 = df[df.year == 2016]", "_____no_output_____" ], [ "# Groupby on City to get the sum of quantity\ndf2016City = df2016.groupby(['city'], as_index=False)['quantity'].sum()", "_____no_output_____" ], [ "df2016City = df2016City.sort_values(by = \"quantity\", ascending = False)", "_____no_output_____" ], [ "df2016City.head()", "_____no_output_____" ] ], [ [ "## Let us plot the Cities in a Geographic Map", "_____no_output_____" ] ], [ [ "# Load the geocode file\ndfGeo = pd.read_csv('city_geocode.csv')", "_____no_output_____" ], [ "dfGeo.head()", "_____no_output_____" ] ], [ [ "### PRINCIPLE: Joining two data frames\n\nThere will be many cases in which your data is in two different dataframe and you would like to merge them in to one dataframe. Let us look at one example of this - which is called left join\n\n![](../img/left_merge.png)", "_____no_output_____" ] ], [ [ "dfCityGeo = pd.merge(df2016City, dfGeo, how='left', on=['city', 'city'])", "_____no_output_____" ], [ "dfCityGeo.head()", "_____no_output_____" ], [ "dfCityGeo.isnull().describe()", "_____no_output_____" ], [ "dfCityGeo.plot(kind = 'scatter', x = 'lon', y = 'lat', s = 100)", "_____no_output_____" ] ], [ [ "We can do a crude aspect ratio adjustment to make the cartesian coordinate systesm appear like a mercator map", "_____no_output_____" ] ], [ [ "dfCityGeo.plot(kind = 'scatter', x = 'lon', y = 'lat', s = 100, figsize = [10,11])", "_____no_output_____" ], [ "# Let us at quanitity as the size of the bubble\ndfCityGeo.plot(kind = 'scatter', x = 'lon', y = 'lat',\n s = dfCityGeo.quantity, figsize = [10,11])", "_____no_output_____" ], [ "# Let us scale down the quantity variable\ndfCityGeo.plot(kind = 'scatter', x = 'lon', y = 'lat', \n s = dfCityGeo.quantity/1000, figsize = [10,11])", "_____no_output_____" ], [ "# Reduce the opacity of the color, so that we can see overlapping values\ndfCityGeo.plot(kind = 'scatter', x = 'lon', y = 'lat', s = dfCityGeo.quantity/1000,\n alpha = 0.5, figsize = [10,11])", "_____no_output_____" ] ], [ [ "### Exercise", "_____no_output_____" ], [ "Can you plot all the States by quantity in (pseudo) geographic map?", "_____no_output_____" ], [ "## Plotting on a Map", "_____no_output_____" ] ], [ [ "import folium", "_____no_output_____" ], [ "# Getting an India Map\nmap_osm = folium.Map(location=[20.5937, 78.9629])", "_____no_output_____" ], [ "map_osm", "_____no_output_____" ], [ "# Using any map provider\nmap_stamen = folium.Map(location=[20.5937, 78.9629],\n tiles='Stamen Toner', zoom_start=5)\nmap_stamen", "_____no_output_____" ] ], [ [ "Adding markers on the map", "_____no_output_____" ] ], [ [ "folium.CircleMarker(location=[20.5937, 78.9629],\n radius=50000,\n popup='Central India',\n color='#3186cc',\n fill_color='#3186cc',\n ).add_to(map_stamen)\nmap_stamen", "_____no_output_____" ] ], [ [ "Add markers from a dataframe", "_____no_output_____" ] ], [ [ "length = dfCityGeo.shape[0]", "_____no_output_____" ], [ "length", "_____no_output_____" ], [ "map_india = folium.Map(location=[20.5937, 78.9629], tiles='Stamen Toner', zoom_start=5)", "_____no_output_____" ], [ "for i in range(length):\n lon = dfCityGeo.iloc[i, 2]\n lat = dfCityGeo.iloc[i, 3]\n location = [lat, lon]\n radius = dfCityGeo.iloc[i, 1]/25\n name = dfCityGeo.iloc[i,0]\n \n folium.CircleMarker(location=location, radius=radius,\n popup=name, color='#3186cc', fill_color='#3186cc',\n ).add_to(map_india) ", "_____no_output_____" ], [ "map_india", "_____no_output_____" ] ], [ [ "### Exercise", "_____no_output_____" ], [ "Can you plot all the States by quantity on an actual geographic map?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
e7d23964ea15cf7af5c43e0f2be2bb1d7f179f81
352,958
ipynb
Jupyter Notebook
notebooks/20201103-moa-lgbm-v38.ipynb
KFurudate/kaggle_MoA
8eb3f6cc970189170e601753ee71169fd59ef593
[ "MIT" ]
null
null
null
notebooks/20201103-moa-lgbm-v38.ipynb
KFurudate/kaggle_MoA
8eb3f6cc970189170e601753ee71169fd59ef593
[ "MIT" ]
1
2020-12-01T02:24:13.000Z
2020-12-01T02:24:13.000Z
notebooks/20201103-moa-lgbm-v38.ipynb
KFurudate/kaggle_MoA
8eb3f6cc970189170e601753ee71169fd59ef593
[ "MIT" ]
null
null
null
352,958
352,958
0.673477
[ [ [ "Thanks for:\n* https://www.kaggle.com/sishihara/moa-lgbm-benchmark#Preprocessing\n\n* https://www.kaggle.com/ttahara/osic-baseline-lgbm-with-custom-metric\n\n* https://zenn.dev/fkubota/articles/2b8d46b11c178ac2fa2d\n\n* https://qiita.com/ryouta0506/items/619d9ac0d80f8c0aed92\n\n* https://github.com/nejumi/tools_for_kaggle/blob/master/semi_supervised_learner.py\n\n* https://upura.hatenablog.com/entry/2019/03/03/233534\n\n* https://pompom168.hatenablog.com/entry/2019/07/22/113433\n\n* https://www.kaggle.com/c/lish-moa/discussion/193878\n\n* https://tsumit.hatenablog.com/entry/2020/06/20/044835\n\n* https://www.kaggle.com/kushal1506/moa-pytorch-feature-engineering-0-01846\n\n* https://www.kaggle.com/c/lish-moa/discussion/195195\n", "_____no_output_____" ] ], [ [ "# Version = \"v1\" # starter model\n# Version = \"v2\" # Compare treat Vs. ctrl and minor modifications, StratifiedKFold\n# Version = \"v3\" # Add debug mode and minor modifications\n# Version = \"v4\" # Clipping a control with an outlier(25-75)\n# Version = \"v5\" # Clipping a control with an outlier(20-80)\n# Version = \"v6\" # under sampling 500 → oversamplling 500, lipping a control with an outlier(10-90)\n# Version = \"v7\" # Use anotated data, under sampling 500 → oversamplling 500, clipping a control with an outlier(10-90)\n# Version = \"v8\" # pseudo labeling (thresholds:0.5), timeout\n# Version = \"v9\" # pseudo labeling (thresholds:0.6), timeout\n# Version = \"v10\" # pseudo labeling (thresholds:0.6), ReduceCol: Kolmogorov-Smirnov, PCA(whiten)&UMAP\n# Version = \"v11\" # pseudo labeling (thresholds:0.6), ReduceCol: Kolmogorov-Smirnov, PCA(whiten)&UMAP, lgbm parames adjust\n# Version = \"v12\" # Feature engineering based on feature importance\n# Version = \"v13\" # Calibration, SMOTE(k_neighbors=5→1)\n# Version = \"v14\" # Removed the Calibration, SMOTE(k_neighbors=1), pseudo labeling (thresholds:0.7)\n# Version = \"v15\" # Updata anotated data\n# Version = \"v16\" # Remove noisy label(confidence: 0.5)\n# Version = \"v17\" # Modifications with remove noisy label func, Calibration, confidence = y_prob.probability.max()*0.3\n# Version = \"v18\" # SMOTE(k_neighbors=1→2), confidence = y_prob.probability.max()*0.2\n# Version = \"v19\" # SMOTE(k_neighbors=2→3),\n# Version = \"v20\" # Modifications with confidence, Removed the Calibration, SMOTE(k_neighbors=2), \n# Version = \"v21\" # DEBUG = False\n# Version = \"v22\" # minor modifications\n# Version = \"v23\" # TOP100→PCA→UMAP(n_components=3)\n# Version = \"v24\" # TOP100→PCA→UMAP(n_components=10), UMAP(n_components=2→3)\n# Version = \"v25\" # Feature engineering based on Feature importance\n# Version = \"v26\" # Modify pseudo labeling func to exclude low confidence pseudo labels in the TEST data.\n# Version = \"v27\" # LGBMClassifie:clf.predict→clf.predict_proba\n# Version = \"v28\" # Calibration (No calbration:CV:0.06542)\n# Version = \"v29\" # Remove Calibration, is_unbalance': True, SMOTE(k_neighbors=2→3), Modify pseudo labeling func to include low confidence pseudo labels in the TEST data, target_rate *= 1.2\n# Version = \"v30\" # drop_duplicates(keep=\"last\")\n# Version = \"v31\" # target_rate *= 1.1, if Threshold <= 0.2: break, if sum(p_label)*1.5 >= check: break, if sum(p_label) <= check*1.5: break\n# Version = \"v32\" # y_prob.probability.quantile(0.3), if Threshold >= 0.95: break\n# Version = \"v33\" # RankGauss, Scaled by category, SMOTE(k_neighbors=2),\n# Version = \"v34\" # RankGauss apply c-columns, remove TOP100, Add f_diff = lambda x: x - med, Create features\n# Version = \"v35\" # f_div = lambda x: ((x+d)*10 / (abs(med)+d))**2, f_diff = lambda x: ((x-med)*10)**2, select features\n# Version = \"v36\" # Add feature importance func\n# Version = \"v37\" # Remove RankGauss for gene expression, fix feature importance func\n\nVersion = \"v38\" # Add MultiLabel Stratification func, fix index of data before split with \"data = data.sort_index(axis='index')\"\"\n\n# Feature engineering based on Feature importance with v36 notebook", "_____no_output_____" ], [ "DEBUG = True", "_____no_output_____" ] ], [ [ "# Library", "_____no_output_____" ] ], [ [ "import lightgbm as lgb\nfrom lightgbm import LGBMClassifier\n\nimport imblearn\nfrom imblearn.over_sampling import SMOTE\nfrom logging import getLogger, INFO, StreamHandler, FileHandler, Formatter\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport os\nimport random\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom tqdm.notebook import tqdm\nimport torch\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nprint(\"lightgbm Version: \", lgb.__version__)\nprint(\"imblearn Version: \", imblearn.__version__)\nprint(\"numpy Version: \", np.__version__)\nprint(\"pandas Version: \", pd.__version__)", "lightgbm Version: 2.3.1\nimblearn Version: 0.7.0\nnumpy Version: 1.18.5\npandas Version: 1.1.3\n" ] ], [ [ "# Utils", "_____no_output_____" ] ], [ [ "def get_logger(filename='log'):\n logger = getLogger(__name__)\n logger.setLevel(INFO)\n handler1 = StreamHandler()\n handler1.setFormatter(Formatter(\"%(message)s\"))\n handler2 = FileHandler(filename=f\"{filename}.{Version}.log\")\n handler2.setFormatter(Formatter(\"%(message)s\"))\n logger.addHandler(handler1)\n logger.addHandler(handler2)\n return logger\n\nlogger = get_logger()\n\ndef seed_everything(seed=777):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True", "_____no_output_____" ] ], [ [ "# Config", "_____no_output_____" ] ], [ [ "if DEBUG:\n N_FOLD = 2\n Num_boost_round=1000\n Early_stopping_rounds=10\n Learning_rate = 0.03\nelse:\n N_FOLD = 4\n Num_boost_round=10000\n Early_stopping_rounds=30\n Learning_rate = 0.01\n\nSEED = 42\nseed_everything(seed=SEED)\n\nMax_depth = 7", "_____no_output_____" ] ], [ [ "# Data Loading", "_____no_output_____" ] ], [ [ "train = pd.read_csv(\"../input/lish-moa/train_features.csv\")\ntest = pd.read_csv(\"../input/lish-moa/test_features.csv\")\ntrain_targets_scored = pd.read_csv(\"../input/lish-moa/train_targets_scored.csv\")\ntrain_targets_nonscored = pd.read_csv(\"../input/lish-moa/train_targets_nonscored.csv\")\nsub = pd.read_csv(\"../input/lish-moa/sample_submission.csv\")\n\n# New data file available from 3th November\ndrug = pd.read_csv('../input/lish-moa/train_drug.csv')", "_____no_output_____" ], [ "Targets = train_targets_scored.columns[1:]\nScored = train_targets_scored.merge(drug, on='sig_id', how='left')\nScored", "_____no_output_____" ], [ "def label_encoding(train: pd.DataFrame, test: pd.DataFrame, encode_cols):\n n_train = len(train)\n train = pd.concat([train, test], sort=False).reset_index(drop=True)\n \n for f in encode_cols:\n try:\n lbl = preprocessing.LabelEncoder()\n train[f] = lbl.fit_transform(list(train[f].values))\n except:\n print(f)\n test = train[n_train:].reset_index(drop=True)\n train = train[:n_train]\n \n return train, test", "_____no_output_____" ], [ "# Manual annotation by myself\nannot = pd.read_csv(\"../input/moa-annot-data/20201024_moa_sig_list.v2.csv\")\nannot", "_____no_output_____" ], [ "annot_sig = []\nannot_sig = annot.sig_id.tolist()\nprint(annot_sig)", "['adenylyl_cyclase_activator', 'aldehyde_dehydrogenase_inhibitor', 'antiarrhythmic', 'anticonvulsant', 'antifungal', 'antihistamine', 'atp-sensitive_potassium_channel_antagonist', 'bacterial_membrane_integrity_inhibitor', 'calcineurin_inhibitor', 'catechol_o_methyltransferase_inhibitor', 'cdk_inhibitor', 'coagulation_factor_inhibitor', 'elastase_inhibitor', 'erbb2_inhibitor', 'nicotinic_receptor_agonist', 'nitric_oxide_production_inhibitor', 'protein_phosphatase_inhibitor', 'sphingosine_receptor_agonist', 'steroid', 'ubiquitin_specific_protease_inhibitor']\n" ], [ "train_target = pd.concat([train_targets_scored, train_targets_nonscored], axis=1)\ntrain_target.head() ", "_____no_output_____" ] ], [ [ "# Training Utils", "_____no_output_____" ] ], [ [ "def get_target(target_col, annot_sig):\n if target_col in annot_sig:\n t_cols = []\n for t_col in list(annot[annot.sig_id == target_col].iloc[0]):\n if t_col is not np.nan:\n t_cols.append(t_col)\n target = train_target[t_cols]\n target = target.sum(axis=1)\n #1 or more, replace it with 1.\n target = target.where(target < 1, 1)\n else:\n target = train_targets_scored[target_col]\n \n return target", "_____no_output_____" ], [ "def Multi_Stratification(df, target_col, target):\n \n _df = df.copy() \n sig_id_lst = [list(Scored.sig_id[Scored.drug_id == id_].sample())[0] for id_ in Scored.drug_id.unique()]\n \n # Remove sig_id wih target \n del_idx = train[target==1].sig_id.unique()\n select_idx = [i for i in sig_id_lst if i not in del_idx]\n print(f\"neg labels: {len(sig_id_lst)}→ selected neg labels: {len(select_idx)}\")\n \n # Select negative target\n _df = _df.set_index('sig_id')\n _df = _df.loc[select_idx, :]\n _df = _df.reset_index(drop=True)\n \n _df[\"target\"] = 0\n \n return _df", "_____no_output_____" ], [ "#===========================================================\n# model\n#===========================================================\ndef run_lgbm(target_col: str):\n target = get_target(target_col, annot_sig)\n target_rate = target.sum() / len(target)\n \n # Estimate test target rate\n #target_rate *= (-0.001*target.sum()+1.1)\n Adj_target_rate = (2 * target_rate) / (target.sum()**0.15)\n \n trt = train[target==1].copy().reset_index(drop=True)\n trt[\"target\"] = 1\n trt = trt.drop(\"sig_id\", axis=1)\n \n logger.info(f\"{target_col}, len(trt):{len(trt)}, target_rate:{target_rate:.7f} → Adj_target_rate:{Adj_target_rate:.7f}\")\n \n othr = Multi_Stratification(train, target_col, target)\n \n X_train = pd.concat([trt, othr], axis=0, sort=False, ignore_index=True) \n y_train = X_train[\"target\"]\n X_train = X_train.drop(\"target\", axis=1)\n\n sm = SMOTE(0.1, k_neighbors=3, n_jobs=2, random_state=SEED)\n X_train, y_train = sm.fit_sample(X_train, y_train)\n \n X_test = test.drop(\"sig_id\", axis=1)\n \n train_X, train_y, feature_importance_df_ = pseudo_labeling(X_train, y_train, X_test, target_rate, target_col)\n \n y_preds = []\n models = []\n oof_train = np.zeros((len(train_X),))\n score = 0\n \n for fold_, (train_index, valid_index) in enumerate(cv.split(train_X, train_y)):\n logger.info(f'len(train_index) : {len(train_index)}')\n logger.info(f'len(valid_index) : {len(valid_index)}')\n \n X_tr = train_X.loc[train_index, :]\n X_val = train_X.loc[valid_index, :]\n y_tr = train_y[train_index]\n y_val = train_y[valid_index]\n\n lgb_train = lgb.Dataset(X_tr,\n y_tr,\n categorical_feature=categorical_cols)\n\n lgb_eval = lgb.Dataset(X_val,\n y_val,\n reference=lgb_train,\n categorical_feature=categorical_cols)\n \n logger.info(f\"================================= fold {fold_+1}/{cv.get_n_splits()} {target_col}=================================\")\n \n \n model = lgb.train(params,\n lgb_train,\n valid_sets=[lgb_train, lgb_eval],\n verbose_eval=100,\n num_boost_round=Num_boost_round,\n early_stopping_rounds=Early_stopping_rounds)\n \n oof_train[valid_index] = model.predict(X_val, num_iteration=model.best_iteration)\n\n y_pred = model.predict(X_test, num_iteration=model.best_iteration)\n y_preds.append(y_pred)\n models.append(model)\n \n score = log_loss(train_y, oof_train)\n \n logger.info(f\"{target_col} logloss: {score}\")\n logger.info(f\"=========================================================================================\")\n \n return sum(y_preds) / len(y_preds), score, models, feature_importance_df_", "_____no_output_____" ], [ "def convert_label(df, conf_0, conf_1, threshold=0.5):\n df = df.copy()\n Probability = df.iloc[:,0]\n # Remove low confidence labels\n conf_index = df[(Probability <= conf_0) & (conf_1 <= Probability)].index.values\n \n Probability = Probability.where(Probability < threshold, 1).copy()\n p_label = Probability.where(Probability >= threshold, 0).copy()\n \n return p_label, conf_index", "_____no_output_____" ], [ "classifier_params = {\n 'max_depth': Max_depth,\n 'num_leaves': int((Max_depth**2)*0.7),\n 'n_estimators': Num_boost_round,\n 'learning_rate': 0.03,\n 'objective': \"binary\",\n 'colsample_bytree':0.4,\n 'subsample':0.8,\n 'subsample_freq':5,\n 'reg_alpha':0.1,\n 'reg_lambda':0.1,\n 'random_state':SEED,\n 'n_jobs':2,\n}", "_____no_output_____" ], [ "#===========================================================\n# pseudo_labeling\n#===========================================================\n\ndef pseudo_labeling(X_train, y_train, X_test, target_rate, target_col, max_iter=3):\n \n X = X_train.copy()\n y = y_train.copy()\n feature_importance_df = pd.DataFrame()\n \n for iter_ in range(1, max_iter+1):\n \n logger.info(f\"================= Pseudo labeling {iter_} / {max_iter} =================\")\n \n y_preds = np.zeros((X.shape[0], 2))\n y_preds[:, 0] = y.copy()\n \n y_prob = np.zeros((X_test.shape[0]))\n \n X_conf = pd.DataFrame()\n y_conf = pd.DataFrame()\n _importance_df = pd.DataFrame()\n _importance_df[\"Feature\"] = X.columns\n \n for fold_, (train_idx, valid_idx) in enumerate(cv.split(X, y)): \n X_tr, X_val = X.loc[train_idx, :], X.loc[valid_idx, :]\n y_tr, y_val = y[train_idx], y[valid_idx]\n \n clf = LGBMClassifier(**classifier_params)\n \n clf.fit(X_tr, y_tr,\n eval_set=[(X_tr, y_tr), (X_val, y_val)],\n eval_metric='logloss',\n verbose=100,\n early_stopping_rounds=Early_stopping_rounds)\n \n y_preds[valid_idx, 1] = clf.predict_proba(X_val, num_iteration=clf.best_iteration_)[:, 1]\n y_prob += clf.predict_proba(X_test, num_iteration=clf.best_iteration_)[:, 1] / N_FOLD\n \n # feature importance with target col\n _importance_df[\"importance\"] = clf.feature_importances_\n feature_importance_df = pd.concat([feature_importance_df, _importance_df], axis=0)\n \n auc_score = roc_auc_score(y_preds[:, 0], y_preds[:, 1])\n logger.info(f\"{iter_} / {max_iter} AUC score:{auc_score:.3f}\") \n y_preds = pd.DataFrame(y_preds, index=X.index, columns=[[\"Labels\", \"Preds\"]])\n \n if iter_ == 1: Threshold = y_preds.iloc[:, 1].quantile(0.89)\n \n logger.info(f\"Threshold: {Threshold}\")\n \n y_preds.iloc[:,1] = y_preds.iloc[:,1].where(y_preds.iloc[:,1] < Threshold, 1).copy()\n y_preds.iloc[:,1] = y_preds.iloc[:,1].where(y_preds.iloc[:,1] >= Threshold, 0).copy()\n y_preds = y_preds.sum(axis=1)\n \n corect_idx = y_preds[y_preds != 1].index.values\n X_corect, y_corect = X[X.index.isin(corect_idx)], y[y.index.isin(corect_idx)]\n \n logger.info(f\"Remove_noisy_labels: {len(y)-len(y_corect)} → positive_corect_labels: {sum(y_corect)}/{len(y_corect)}\")\n \n # Remove low confidence labels\n y_prob = pd.DataFrame(y_prob, index=X_test.index, columns=[\"probability\"])\n \n percentile = y_prob.probability.quantile(0.3)\n high_conf_0 = min(y_prob.probability.min()*30, percentile)\n high_conf_1 = max(y_prob.probability.max()*0.6,Threshold)\n logger.info(f\"30th percentile: {percentile:.7f}\")\n \n p_label, conf_idx = convert_label(y_prob, high_conf_0, high_conf_1, Threshold)\n \n p_label_rate = sum(p_label)/len(p_label) \n logger.info(f\"p_label_rate: {p_label_rate:.7f} Vs.target_rate: {target_rate:.5f}, Num_p_label: {sum(p_label)}, conf_0:{high_conf_0:.5f}, conf_1:{high_conf_1:.5f}\")\n \n # Set the params of threshold based on train labels rate (target_rate).\n # target_rate = target.sum() / len(target)\n \n if p_label_rate*3 < target_rate:\n check = len(y_prob)*target_rate\n for i in range(10):\n logger.info(f\"Num_p_label: {sum(p_label)}, Expected: {check:.1f}, Adj_threshold_{i+1}: {Threshold:.7f}\")\n if sum(p_label)*1.5 >= check: break \n if (Threshold-0.005) < 0: break\n Threshold -= 0.005\n high_conf_1 = max(y_prob.probability.max()*0.6,Threshold)\n p_label, conf_idx = convert_label(y_prob, high_conf_0, high_conf_1, Threshold)\n \n \n if p_label_rate > target_rate*3:\n check = len(y_prob)*target_rate\n for i in range(10):\n logger.info(f\"Num_p_label: {sum(p_label)}, Expected: {check:.1f}, Adj_threshold_{i+1}: {Threshold:.7f}\")\n if sum(p_label) <= check*1.5: break\n if (Threshold+0.005) > 0.99: break\n Threshold += 0.005\n high_conf_1 = max(y_prob.probability.max()*0.6,Threshold)\n p_label, conf_idx = convert_label(y_prob, high_conf_0, high_conf_1, Threshold)\n \n if iter_ == max_iter:\n X_conf = X_test.copy()\n else:\n X_conf = X_test[X_test.index.isin(conf_idx)].copy()\n \n logger.info(f\"threshold:{Threshold:.7f}, positive p_label:{sum(p_label)}/{len(p_label)}, p_label_rate: {sum(p_label)/len(p_label):.7f}\")\n \n X = pd.concat([X_corect, X_conf], axis=0, ignore_index=True)\n y = pd.concat([y_corect, p_label], axis=0, ignore_index=True)\n \n X = X.drop_duplicates(keep=\"last\").reset_index(drop=True)\n y = y[X.index.values].reset_index(drop=True)\n \n logger.info(f\"positive y_label:{sum(y)}/{len(y)}, y_label_rate: {sum(y)/len(y):.7f}\")\n \n if DEBUG:\n show_feature_importance(feature_importance_df, target_col, num=10)\n \n return X, y, feature_importance_df", "_____no_output_____" ], [ "categorical_cols = []\nfeature_importance_df = pd.DataFrame()\nimportance_cols_df = pd.DataFrame()\nscores = []\nmodels = []\n\nfor target_col in tqdm(train_targets_scored.columns[1:]):\n _preds, _score, models, _feature_importance_df = run_lgbm(target_col)\n\n sub[target_col] = _preds\n scores.append(_score)\n \n if DEBUG:\n if _score > 0.02:\n importance_cols_df[target_col] = select_importance_cols(_feature_importance_df)\n print(importance_cols_df)\n \n feature_importance_df = create_featureimprotance(models, feature_importance_df)", "_____no_output_____" ], [ "def show_feature_importance(feature_importance_df, title=\"all\", num=100):\n cols = (feature_importance_df[[\"Feature\", \"importance\"]]\n .groupby(\"Feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:num].index)\n \n best_features = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)]\n \n hight = int(num//3.3)\n plt.figure(figsize=(8, hight))\n sns.barplot(x=\"importance\", \n y=\"Feature\", \n data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title(f'{title}_Features importance (averaged)')\n plt.tight_layout()\n plt.savefig(f\"./{title}_feature_importance_{Version}.png\")\n plt.show()", "_____no_output_____" ] ], [ [ "# Preprocessing\n\nWe have to convert some categorical features into numbers in train and test. We can identify categorical features by `pd.DataFrame.select_dtypes`.", "_____no_output_____" ] ], [ [ "train.head()", "_____no_output_____" ], [ "train.select_dtypes(include=['object']).columns", "_____no_output_____" ], [ "train, test = label_encoding(train, test, ['cp_type', 'cp_time', 'cp_dose'])", "_____no_output_____" ], [ "train['WHERE'] = 'train'\ntest['WHERE'] = 'test'\n\ndata = train.append(test)\ndata = data.reset_index(drop=True)\ndata", "_____no_output_____" ], [ "# Select control data\nctl = train[(train.cp_type==0)].copy()\nctl = ctl.reset_index(drop=True)\nctl", "_____no_output_____" ], [ "# clipping\n\ndef outlaier_clip(df):\n df = df.copy()\n clipping = df.columns[4:6]\n for col in clipping:\n lower, upper= np.percentile(df[col], [10, 90])\n df[col] = np.clip(df[col], lower, upper)\n \n return df\n\nctl_df = pd.DataFrame(columns=train.columns)\nfor i in ctl.cp_time.unique():\n for j in ctl.cp_dose.unique():\n print(len(ctl[(ctl.cp_time==i) & (ctl.cp_dose==j)]))\n tmp_ctl = ctl[(ctl.cp_time==i) & (ctl.cp_dose==j)]\n tmp_ctl = outlaier_clip(tmp_ctl)\n ctl_df = pd.concat([ctl_df, tmp_ctl], axis=0).reset_index(drop=True)\nctl_df", "343\n305\n301\n305\n307\n305\n" ], [ "col_list = list(data.columns)[:-1]\ndata_df = pd.DataFrame(columns=col_list)\nSplitdata = []\nd = 1e-6\n\nfor i in tqdm(data.cp_time.unique()):\n for j in data.cp_dose.unique():\n select = data[(data.cp_time==i) & (data.cp_dose==j)]\n print(len(select))\n \n for k in list(select['WHERE']): Splitdata.append(k)\n \n select = select.drop(columns='WHERE')\n med = ctl[(ctl.cp_time==i) & (ctl.cp_dose==j)].iloc[:, 4:].median()\n \n f_div = lambda x: ((x+d)*10 / (abs(med)+d))**3\n select_div = select.iloc[:,4:].apply(f_div, axis=1).add_prefix('d_')\n tmp_data = pd.concat([select, select_div], axis=1, sort=False)\n \n \n f_diff = lambda x: ((x-med)*10)**2\n select_diff = select.iloc[:,4:].apply(f_diff, axis=1).add_prefix('df_')\n tmp_data = pd.concat([tmp_data, select_diff], axis=1, sort=False)\n \n data_df = pd.concat([data_df, tmp_data], axis=0)\n \ndata_df", "_____no_output_____" ], [ "# clipping\nclipping = data_df.columns[4:]\nfor col in tqdm(clipping):\n lower, upper = np.percentile(data_df[col], [1, 99])\n data_df[col] = np.clip(data_df[col], lower, upper)\ndata_df", "_____no_output_____" ], [ "data_df = data_df.replace([np.inf, -np.inf], np.nan)\ndata_df = data_df.dropna(how='any', axis=1)\ndata = data_df.copy()", "_____no_output_____" ], [ "g_list = [col for col in data.columns[4:] if col.startswith(\"g-\")]\nc_list = [col for col in data.columns[4:] if col.startswith(\"c-\")]\nd_g_list = [col for col in data.columns[4:] if col.startswith(\"d_g-\")]\nd_c_list = [col for col in data.columns[4:] if col.startswith(\"d_c-\")]\ndf_g_list = [col for col in data.columns[4:] if col.startswith(\"df_g-\")]\ndf_c_list = [col for col in data.columns[4:] if col.startswith(\"df_c-\")]\ng_all_list = g_list + d_g_list + df_g_list\nc_all_list = c_list + d_c_list + df_c_list", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler, QuantileTransformer\n \n# Z-score\n#scaler = StandardScaler(with_mean=True, with_std=True)\n\n# RankGauss\nscaler = QuantileTransformer(output_distribution='normal', random_state=SEED)\nsize = len(data[col].values)\n\n# Without Z-scored gene expression data\nfor col in tqdm(data.columns[4+len(g_list):]):\n \n raw = data[col].values.reshape(size, 1)\n scaler.fit(raw)\n\n data[col] = scaler.transform(raw).reshape(1, size)[0]\n \ndata", "_____no_output_____" ], [ "std_df = data.iloc[:, 4:].copy()", "_____no_output_____" ], [ "data_df.cp_type = data_df.cp_type.astype('int16')\ndata_df.cp_time = data_df.cp_time.astype('int16')\ndata_df.cp_dose = data_df.cp_dose.astype('int16')", "_____no_output_____" ], [ "from sklearn.cluster import KMeans\n\nn_clusters = 7\n\ndef create_cluster(data, features, kind, n_clusters):\n \n data_ = data[features].copy()\n kmeans = KMeans(n_clusters = n_clusters, random_state = SEED).fit(data_)\n data[f'clusters_{kind}'] = kmeans.labels_[:data.shape[0]]\n \n return data\n\n\ndef detect_cluster(data, feature_list, kind_list, n_clusters):\n \n for idx, feature in enumerate(tqdm(feature_list)):\n data = create_cluster(data, feature, kind=kind_list[idx], n_clusters=n_clusters)\n \n clusters = data.iloc[:, -len(feature_list):].copy()\n \n return clusters", "_____no_output_____" ], [ "feature_list = (g_list, c_list, d_g_list, d_c_list, df_g_list, df_c_list, g_all_list, c_all_list)\nkind_list = ('g', 'c', 'd_g', 'd_c', 'df_g', 'df_c', 'g_all', 'c_all')\n\n\nclusters = detect_cluster(data, feature_list, kind_list, n_clusters)\nclusters", "_____no_output_____" ], [ "# Count cluster types\nfor i in tqdm(range(n_clusters-1, -1, -1)):\n clusters[f\"cnt_{i}\"] = clusters.apply(lambda x: (x == i).sum(), axis=1)\nclusters", "_____no_output_____" ], [ "def fe_stats(df, features, kind):\n df_ = df.copy()\n MAX, MIN = df_[features].max(axis = 1), df_[features].min(axis = 1)\n Kurt = df_[features].kurtosis(axis = 1)\n Skew = df_[features].skew(axis = 1)\n \n df_[f'{kind}_max'] = MAX\n df_[f'{kind}_min'] = MIN\n df_[f'{kind}_max_min'] = (MAX * MIN)**2\n \n df_[f'{kind}_kurt'] = Kurt**3\n df_[f'{kind}_skew'] = Skew**3\n df_[f'{kind}_max_kurt'] = MAX * Kurt\n df_[f'{kind}_max_skew'] = MAX * Skew\n df_[f'{kind}_kurt_skew'] = Kurt * Skew\n \n df_[f'{kind}_sum'] = (df_[features].sum(axis = 1))**3\n df_[f'{kind}_mean'] = (df_[features].mean(axis = 1))**3\n df_[f'{kind}_median'] = (df_[features].median(axis = 1))**3\n df_[f'{kind}_mad'] = (df_[features].mad(axis = 1))**3\n df_[f'{kind}_std'] = (df_[features].std(axis = 1))**3\n\n return df_\n\ndef detect_stats(data, feature_list, kind_list):\n \n for idx, feature in enumerate(tqdm(feature_list)):\n data = fe_stats(data, feature, kind=kind_list[idx])\n\n stats = data.iloc[:, -9*len(feature_list):].copy()\n \n return stats", "_____no_output_____" ], [ "stats = detect_stats(data, feature_list, kind_list)\nstats", "_____no_output_____" ], [ "# Add data with sig_id, cp_type, cp_time, and cp_dose\ndata = pd.concat([data.iloc[:, :4], clusters], axis=1)\ndata = pd.concat([data, stats], axis=1)\ndata = pd.concat([data, std_df], axis=1)\ndata", "_____no_output_____" ], [ "# Create feature\nimport itertools\ndef CreateFeat(df):\n def func_product(row):\n return (row[col1]) * (row[col2])\n def func_division(row):\n delta = 1e-6\n return (row[col1]+delta) / (row[col2]+delta) \n \n Columns = df.columns \n for col1, col2 in tqdm(tuple(itertools.permutations(Columns, 2))):\n df[f\"{col1}_{col2}_prd\"] = df[[col1, col2]].apply(func_product, axis=1)\n df[f\"{col1}_{col2}_div\"] = round(df[[col1, col2]].apply(func_division, axis=1), 0)\n\n print(f\"Crated {len(df.columns) - len(Columns)} columns\")\n return df\n\n# Create feature2\ndef CreateFeat2(df):\n func_list = (\"max\", \"min\", \"mean\", \"median\", \"mad\", \"var\", \"std\")\n Columns = df.columns \n for idx, func in enumerate(func_list):\n print(f\"{idx}/{len(func_list)}: Calucurating... {func}\")\n for col1, col2 in tqdm(tuple(itertools.permutations(Columns, 2))):\n df[f\"{col1}_{col2}_{func}\"] = df[[col1, col2]].apply(func, axis=1)\n print(f\"Crated {len(df.columns) - len(Columns)} columns\")\n return df\n\n\n#Reduce columens\ndef ReduceCol(df):\n remove_cols = []\n Columns = df.columns\n \n for col1, col2 in tqdm(tuple(itertools.permutations(Columns, 2))):\n # constant columns\n if df[col1].std() == 0: remove_cols.append(col1)\n \n # duplicated columns\n if (col1 not in remove_cols) and (col2 not in remove_cols):\n x, y = df[col1].values, df[col2].values\n if np.array_equal(x, y): remove_cols.append(col1)\n\n df.drop(remove_cols, inplace=True, axis=1)\n print(f\"Removed {len(remove_cols)} constant & duplicated columns\")\n\n return df", "_____no_output_____" ], [ "# Create feature based on feature importance with v24 notebook\n\n#important_col = []\n#tmp = CreateFeat(data[important_col])\n#data = pd.concat([data, tmp], axis=1)\n\n\n# Create feature based on feature importance with v24 notebook\n#tmp = CreateFeat2(data[important_col])\n#data = pd.concat([data, tmp], axis=1)\n\n#remove dup colunes\n#data = data.loc[:,~data.columns.duplicated()]\n#tmp = ReduceCol(data.iloc[:,4:])\n#data = pd.concat([data.iloc[:,:4], tmp], axis=1)\n#data", "_____no_output_____" ], [ "# clipping\nclipping = data.columns[4:]\nfor col in clipping:\n lower, upper = np.percentile(data[col], [1, 99])\n data[col] = np.clip(data[col], lower, upper)\ndata", "_____no_output_____" ], [ "data['WHERE'] = Splitdata\ndata = data.sort_index(axis='index')\nSplitdata = data['WHERE'] \ndata ", "_____no_output_____" ], [ "from sklearn.feature_selection import VarianceThreshold\n\nvar_thresh = VarianceThreshold(0.99) \ndata_var_thresh = var_thresh.fit_transform(data.iloc[:, 4:-1])\n\nRemove_columns = np.array(data.columns[4:-1])[var_thresh.get_support()==False]\n\ntmp = pd.DataFrame(data_var_thresh, columns=np.array(data.columns[4:-1])[var_thresh.get_support()==True])\ndata = pd.concat([data.iloc[:,:4], tmp], axis=1)\n\nprint(f\"Remove {len(Remove_columns)} columns: {Remove_columns}\")", "Remove 1331 columns: ['cnt_6' 'cnt_5' 'cnt_4' ... 'df_c-95' 'df_c-98' 'df_c-99']\n" ], [ "data['WHERE'] = Splitdata\ntrain = data[data['WHERE']==\"train\"].drop('WHERE', axis=1).reset_index(drop=True)\ntest = data[data['WHERE']==\"test\"].drop('WHERE', axis=1).reset_index(drop=True)", "_____no_output_____" ], [ "# Kolmogorov-Smirnov test applied for train data and test data.\n\nfrom scipy.stats import ks_2samp\n\ntr, ts = train.iloc[:, 4:], test.iloc[:, 4:]\nlist_p_value =[ks_2samp(ts[i], tr[i])[1] for i in tqdm(tr.columns)]\nSe = pd.Series(list_p_value, index=tr.columns).sort_values() \nlist_discarded = list(Se[Se < .1].index)\n\ntrain, test = train.drop(list_discarded, axis=1), test.drop(list_discarded, axis=1)\nprint(f\"Removed {len(list_discarded)} columns\")", "_____no_output_____" ] ], [ [ "# Modeling", "_____no_output_____" ] ], [ [ "cv = StratifiedKFold(n_splits=N_FOLD, shuffle=True, random_state=SEED)\n\nparams = {\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'learning_rate': Learning_rate,\n 'num_threads': 2,\n 'verbose': -1,\n 'max_depth': Max_depth,\n 'num_leaves': int((Max_depth**2)*0.7),\n 'feature_fraction':0.4, # randomly select part of features on each iteration\n 'lambda_l1':0.1,\n 'lambda_l2':0.1,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n}\n", "_____no_output_____" ], [ "def select_importance_cols(feature_importance_df, num=10):\n best_cols = (feature_importance_df[[\"Feature\", \"importance\"]]\n .groupby(\"Feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:num].index)\n return best_cols", "_____no_output_____" ], [ "def create_featureimprotance(models, feature_importance_df):\n for model in models:\n _importance_df = pd.DataFrame()\n _importance_df[\"Feature\"] = train.columns[1:]\n _importance_df[\"importance\"] = model.feature_importance(importance_type='gain')\n feature_importance_df = pd.concat([feature_importance_df, _importance_df], axis=0)\n \n return feature_importance_df", "_____no_output_____" ], [ "categorical_cols = []\nfeature_importance_df = pd.DataFrame()\nimportance_cols_df = pd.DataFrame()\nscores = []\nmodels = []\n\nfor target_col in tqdm(train_targets_scored.columns[1:]):\n _preds, _score, models, _feature_importance_df = run_lgbm(target_col)\n\n sub[target_col] = _preds\n scores.append(_score)\n \n if DEBUG:\n if _score > 0.02:\n importance_cols_df[target_col] = select_importance_cols(_feature_importance_df)\n print(importance_cols_df)\n \n feature_importance_df = create_featureimprotance(models, feature_importance_df)", "_____no_output_____" ], [ "sub.to_csv('submission.csv', index=False)", "_____no_output_____" ], [ "print(f\"CV:{np.mean(scores)}\")", "_____no_output_____" ], [ "if DEBUG:\n show_feature_importance(feature_importance_df)\n feature_importance_df.to_csv(f'feature_importance_df.{Version}.csv', index=False)\n importance_cols_df.to_csv(f'importance_cols_df.{Version}.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d241651caf56cce509076f9f8da03d76a3b6ee
8,143
ipynb
Jupyter Notebook
OPM_Descriptors.ipynb
learningmatter-mit/Deep-Drug-Coder
1edc899a63332038afc3dd0f866af328527e82eb
[ "MIT" ]
5
2021-01-12T14:07:25.000Z
2021-09-08T00:51:01.000Z
OPM_Descriptors.ipynb
learningmatter-mit/Deep-Drug-Coder
1edc899a63332038afc3dd0f866af328527e82eb
[ "MIT" ]
null
null
null
OPM_Descriptors.ipynb
learningmatter-mit/Deep-Drug-Coder
1edc899a63332038afc3dd0f866af328527e82eb
[ "MIT" ]
null
null
null
38.961722
142
0.512465
[ [ [ "%load_ext autoreload\n%autoreload 2\n# Occupy a GPU for the model to be loaded \n%env CUDA_DEVICE_ORDER=PCI_BUS_ID\n# GPU ID, if occupied change to an available GPU ID listed under !nvidia-smi\n%env CUDA_VISIBLE_DEVICES=2\n\nimport numpy as np\nimport pandas as pd\nimport rdkit\nfrom rdkit import Chem\nimport h5py, ast, pickle\n\nfrom ddc_pub.vectorizers import SmilesVectorizer\nfrom ddc_pub import ddc_v3 as ddc", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\nenv: CUDA_DEVICE_ORDER=PCI_BUS_ID\nenv: CUDA_VISIBLE_DEVICES=2\n" ], [ "df = pd.read_csv('./datasets/OPD_Data/Desc_Training_Validation.csv')", "_____no_output_____" ], [ "mol_data = df['smiles'].tolist()\n\nbinmols = np.array([Chem.MolFromSmiles(x) for x in mol_data])\nsv = SmilesVectorizer()\nsv.fit(binmols)\nmaxlen = sv.maxlength + 35\ncharset = sv.charset", "_____no_output_____" ], [ "descr = df[['homo', 'gap', 'lumo']].values", "_____no_output_____" ], [ "# Name of the dataset\nname = \"OPD_Descr\"\n\ndataset_info = {\"charset\": charset, \"maxlen\": maxlen, \"name\": name}", "_____no_output_____" ], [ "# Initialize a model\nmodel = ddc.DDC(x = descr, # input\n y = binmols, # output\n dataset_info = dataset_info, # dataset information\n scaling = True, # scale the descriptors\n noise_std = 0.1, # std of the noise layer\n lstm_dim = 512, # breadth of LSTM layers\n dec_layers = 3, # number of decoding layers\n batch_size = 128) # batch size for training", "Initializing model in train mode.\nInput type is 'molecular descriptors'.\nApplying scaling on input.\nModel received 12251 train samples and 1362 validation samples.\nModel: \"model_2\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nLatent_Input (InputLayer) [(None, 3)] 0 \n__________________________________________________________________________________________________\nDecoder_Inputs (InputLayer) [(None, 244, 47)] 0 \n__________________________________________________________________________________________________\nlatent_to_states_model (Model) [(None, 512), (None, 24576 Latent_Input[0][0] \n__________________________________________________________________________________________________\nbatch_model (Model) (None, 244, 47) 5381679 Decoder_Inputs[0][0] \n latent_to_states_model[1][0] \n latent_to_states_model[1][1] \n latent_to_states_model[1][2] \n latent_to_states_model[1][3] \n latent_to_states_model[1][4] \n latent_to_states_model[1][5] \n==================================================================================================\nTotal params: 5,406,255\nTrainable params: 5,397,039\nNon-trainable params: 9,216\n__________________________________________________________________________________________________\nNone\n" ], [ "model.fit(epochs = 100, # number of epochs\n lr = 1e-3, # initial learning rate for Adam, recommended\n model_name = \"opd_descr\", # base name to append the checkpoints with\n checkpoint_dir = \"./models/\", # save checkpoints in the notebook's directory\n mini_epochs = 10, # number of sub-epochs within an epoch to trigger lr decay\n save_period = 50, # checkpoint frequency (in mini_epochs)\n lr_decay = True, # whether to use exponential lr decay or not\n sch_epoch_to_start = 500, # mini-epoch to start lr decay (bypassed if lr_decay=False)\n sch_lr_init = 1e-3, # initial lr, should be equal to lr (bypassed if lr_decay=False)\n sch_lr_final = 1e-6, # final lr before finishing training (bypassed if lr_decay=False)\n patience = 25) # patience for Keras' ReduceLROnPlateau (bypassed if lr_decay=True)", "WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen.\n\nModel trained with dataset OPD_Descr that has maxlen=240 and charset=]Z=g@N78Ma/+(PlC9I2s3)S6%rc0Hp5oFeO#1-\\[Bin4^$? for 2 epochs.\nnoise_std: 0.100000, lstm_dim: 512, dec_layers: 3, td_dense_dim: 0, batch_size: 128, codelayer_dim: 3, lr: 0.001000.\n\nEpoch 00001: LearningRateScheduler reducing learning rate to 0.0010000000474974513.\nEpoch 1/2\nModel saved in ./models/opd_descr--01--0.4807--0.0010000.\n95/95 - 31s - loss: 0.5956 - val_loss: 0.4807\n\nEpoch 00002: LearningRateScheduler reducing learning rate to 0.0010000000474974513.\nEpoch 2/2\nModel saved in ./models/opd_descr--02--0.3681--0.0010000.\n95/95 - 32s - loss: 0.3277 - val_loss: 0.3681\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d24de059bece959a446b28b8281f26b092b264
120,192
ipynb
Jupyter Notebook
notebooks/02-20_9_16-generating_features.ipynb
sfblake/Record_Price_Predictor
a428863116a4940fe3c77cba0b31dea9de265f50
[ "MIT" ]
null
null
null
notebooks/02-20_9_16-generating_features.ipynb
sfblake/Record_Price_Predictor
a428863116a4940fe3c77cba0b31dea9de265f50
[ "MIT" ]
null
null
null
notebooks/02-20_9_16-generating_features.ipynb
sfblake/Record_Price_Predictor
a428863116a4940fe3c77cba0b31dea9de265f50
[ "MIT" ]
1
2019-07-15T22:33:09.000Z
2019-07-15T22:33:09.000Z
70.370023
31,868
0.759868
[ [ [ "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "albums = pd.read_csv('../data/raw/albums.csv',encoding='utf-8',index_col=0)\n#Pick out only the albums that have been sold\nalbums = albums[pd.notnull(albums['median price sold'])]", "_____no_output_____" ], [ "#Define our cost measures\ndef get_mean_diff(y_test,y_pred):\n \"\"\"Calculate the mean difference between test and prediction\"\"\"\n mean_diff = np.mean(abs(y_test - y_pred))\n return mean_diff\n\ndef get_score(y_test, y_pred):\n \"\"\"Calculate the R^2 score from two numpy array inputs, using a guess of the median as a benchmark\"\"\"\n u = sum((y_test - y_pred)**2)\n v = sum((y_test - np.mean(y_test))**2)\n R_sq = 1 - u/v\n return R_sq", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.cross_validation import KFold\nfrom sklearn.preprocessing import StandardScaler\n\n#Define our predicting algorithm\ndef get_random_forest_predictions(new_albums, predictors):\n \"\"\"Predict the price of each recors in albums from the features in predictors, using a random forest regressor\"\"\"\n\n #Do a random forest regression\n alg = RandomForestRegressor(min_samples_leaf = 3, n_estimators = 100)\n\n #Split the data into 10 subsets\n kf = KFold(new_albums.shape[0], n_folds = 10, random_state = 1)\n predictions = []\n scores = []\n\n #Train and predict\n for train, test in kf:\n X_train = (new_albums[predictors].iloc[train,:])\n y_train = new_albums['median price sold'].iloc[train]\n #Scale all the features\n scaler = StandardScaler().fit(X_train)\n X_train_transformed = scaler.transform(X_train)\n alg.fit(X_train_transformed, y_train)\n X_test = (new_albums[predictors].iloc[test,:])\n y_test = new_albums['median price sold'].iloc[test]\n test_predictions = alg.predict(scaler.transform(X_test))\n predictions.append(test_predictions)\n\n predictions = np.concatenate(predictions,axis = 0)\n #If anything is predicted a negative price, set this to zero\n predictions[predictions<0] = 0\n \n return predictions", "_____no_output_____" ] ], [ [ "Before adding any more features, let's check the performance with the initial set.", "_____no_output_____" ] ], [ [ "#Use the following predictors\npredictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest regression on initial features'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Random forest regression on initial features gives £19.64 mean difference and 0.3423 score\n" ] ], [ [ "Let's monitor how these (hopefully) improve as we add more features.", "_____no_output_____" ] ], [ [ "mean_diff_each_step = [mean_diff]\nscore_each_step = [score]\neach_step = ['Initial features']", "_____no_output_____" ] ], [ [ "## Year of release\n\nThis should have a large bearing on the price, but the years of some releases are unknown. Let's make some assumptions about these to generate the feature.", "_____no_output_____" ] ], [ [ "#Convert the known years to integers\nalbums['integer year'] = [int(year[-4:]) if not pd.isnull(year) else year for year in albums['year']]\n\n#For the unknown years\nfor index in albums[albums['year'].isnull()].index:\n ave_artist_year = np.round(albums.ix[albums['artist'] == albums.ix[index,'artist'],'integer year'].mean())\n ave_label_year = np.round(albums.ix[albums['label'] == albums.ix[index,'label'],'integer year'].mean())\n #If the artist has released other albums, take the year to be the artist's mean\n if not pd.isnull(ave_artist_year):\n albums.ix[index,'integer year'] = ave_artist_year\n #Otherwise if the label has released other albums, take the year to be the label's mean\n elif not pd.isnull(ave_label_year):\n albums.ix[index,'integer year'] = ave_label_year\n #Otherwise set the year to be the mean\n else:\n albums.ix[index,'integer year'] = albums['integer year'].mean()", "_____no_output_____" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest regression with year added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Year of release')", "Random forest regression with year added gives £18.29 mean difference and 0.4012 score\n" ] ], [ [ "So knowing the year helps a lot.", "_____no_output_____" ], [ "## Average rating\n\nA lot of releases also have a rating given by users.", "_____no_output_____" ] ], [ [ "albums[~albums['average rating'].isnull()]['average rating'].hist(bins=100)\nplt.xlabel('Average Rating')\nplt.ylabel('Count')\nplt.title('Distribution of average ratings');", "_____no_output_____" ], [ "for i in range(1,6):\n print(str(i) + '* rated albums have mean price £{0:.2f}'.format(albums.ix[(albums['average rating'] <= i)\n & (albums['average rating'] > i-1),\n 'median price sold'].mean()))\nprint('Unrated albums have mean price £{0:.2f}'.format(albums.ix[albums['average rating'].isnull(),\n 'median price sold'].mean()))", "1* rated albums have mean price £38.79\n2* rated albums have mean price £32.74\n3* rated albums have mean price £36.40\n4* rated albums have mean price £24.29\n5* rated albums have mean price £37.11\nUnrated albums have mean price £41.45\n" ] ], [ [ "These are distributed towards the high end (as expected: we're looking at the most wanted records), but a higher rating does not necessarily correspond to a more expensive record.\n\nLet's first try giving the unrated records the mean rating.", "_____no_output_____" ] ], [ [ "#If no rating is given, set it to be the average\nalbums['new rating'] = albums['average rating']\nalbums.ix[albums['new rating'].isnull(),'new rating'] = albums['new rating'].mean()\n\npredictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'new rating']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Assuming the mean rating for those releases unrated'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Assuming the mean rating for those releases unrated gives £18.33 mean difference and 0.4007 score\n" ] ], [ [ "What about setting the unrated records to 0, or 6?", "_____no_output_____" ] ], [ [ "#If no rating is given, set it to be the average\nalbums['new rating'] = albums['average rating']\nalbums.ix[albums['new rating'].isnull(),'new rating'] = 0\n\npredictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'new rating']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Assuming a rating of 0 for those releases unrated'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Assuming a rating of 0 for those releases unrated gives £18.40 mean difference and 0.4018 score\n" ], [ "#If no rating is given, set it to be the average\nalbums['new rating'] = albums['average rating']\nalbums.ix[albums['new rating'].isnull(),'new rating'] = 6\n\npredictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'new rating']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Assuming a rating of 6 for those releases unrated'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Assuming a rating of 6 for those releases unrated gives £18.33 mean difference and 0.4021 score\n" ] ], [ [ "So assigning a 6 for the unrated items gives the best score.", "_____no_output_____" ] ], [ [ "albums.ix[albums['average rating'].isnull(),'average rating'] = 6\n\npredictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Average rating')", "_____no_output_____" ] ], [ [ "## Number of records \nObviously all the records are LPs, but some may be double or even triple, let's add a feature of the number of records.", "_____no_output_____" ] ], [ [ "albums['format'].unique()", "_____no_output_____" ], [ "mean_diff = get_mean_diff(np.array(new_albums['median price sold'][new_albums['format']!='Vinyl']),\n predictions[np.array(new_albums['format']!='Vinyl')])\nscore = get_score(np.array(new_albums['median price sold'][new_albums['format']!='Vinyl']),\n predictions[np.array(new_albums['format']!='Vinyl')])\nprint('For boxsets we have £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff, score))", "For boxsets we have £15.45 mean difference and 0.3669 score\n" ] ], [ [ "So these boxsets are not being served well by the current predictor.", "_____no_output_____" ] ], [ [ "#If the format is 'Vinyl', assume one record\n#Otherwise extract the number\nalbums['number of records'] = [1 if format_str == 'Vinyl' \n else [s for s in format_str.split() if s.isdigit()][0] if '×' in format_str\n else None\n for format_str in albums['format']]\n#For the formats labelled just 'box set' look for the number of records in format details\nfor index in albums[albums['number of records'].isnull()].index:\n split_formats = albums.ix[index, 'format details'].split(';')\n num_records = [[s for s in format_str.split() if s.isdigit()][0] \n for format_str in split_formats if '×' in format_str]\n #Assume the maximum number is correct\n if len(num_records) != 0:\n albums.ix[index,'number of records'] = max(num_records)\n #If no number specified, assume 1\n else:\n albums.ix[index,'number of records'] = 1", "_____no_output_____" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with number of records added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Number of records')", "Random forest with number of records added gives £18.00 mean difference and 0.4124 score\n" ], [ "mean_diff = get_mean_diff(np.array(new_albums['median price sold'][new_albums['format']!='Vinyl']),\n predictions[np.array(new_albums['format']!='Vinyl')])\nscore = get_score(np.array(new_albums['median price sold'][new_albums['format']!='Vinyl']),\n predictions[np.array(new_albums['format']!='Vinyl')])\nprint('For boxsets we now have £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff, score))", "For boxsets we now have £14.33 mean difference and 0.4546 score\n" ] ], [ [ "So adding the number of records doesn't affect the score much, but offers a great improvement for boxsets themselves.", "_____no_output_____" ], [ "## Limited editions\n\nSome records are described as 'limited edition' in either the format details or release notes. This could correlate with higher prices so let's add a binary variable for it.", "_____no_output_____" ] ], [ [ "#Pick out if limited edition is specified in format details or notes\nalbums.ix[albums['notes'].isnull(),'notes'] = ''\nalbums['limited edition'] = [(('Limited Edition' in albums.ix[index,'format details']) or \n ('Limited Edition' in albums.ix[index,'notes']))\n for index in albums.index]", "_____no_output_____" ], [ "print('Limited edition albums have mean price £{0:.2f}'.format(albums.ix[albums['limited edition'] == 1,\n 'median price sold'].mean()))\nprint('Non-limited edition albums have mean price £{0:.2f}'.format(albums.ix[albums['limited edition'] == 0,\n 'median price sold'].mean()))", "Limited edition albums have mean price £38.76\nNon-limited edition albums have mean price £35.06\n" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with limited edition added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Limited edition')", "Random forest with limited edition added gives £18.08 mean difference and 0.4018 score\n" ] ], [ [ "Seems to make it worse.", "_____no_output_____" ], [ "## Reissues\n\nReissues are likely to be cheaper, while the original records that have been reissued will be more in demand, and so more expensive. \nSome reissues are specified in the format details and notes.", "_____no_output_____" ] ], [ [ "#Pick out if reissue is specified in format details or notes\nalbums.ix[albums['notes'].isnull(),'notes'] = ''\nalbums['reissue'] = [(('Reissue' in albums.ix[index,'format details']) or ('Reissue' in albums.ix[index,'notes'])) \n for index in albums.index]", "_____no_output_____" ], [ "print('Reissue albums have mean price £{0:.2f}'.format(albums.ix[albums['reissue'] == 1,\n 'median price sold'].mean()))\nprint('Non-reissue albums have mean price £{0:.2f}'.format(albums.ix[albums['reissue'] == 0,\n 'median price sold'].mean()))", "Reissue albums have mean price £20.38\nNon-reissue albums have mean price £39.12\n" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', 'reissue']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with reissue added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Random forest with reissue added gives £18.03 mean difference and 0.4012 score\n" ] ], [ [ "Not a great difference, and makes no distinction whether a record has itself been reissued. \nWe could perhaps get a better grasp of reissues if we look at the difference between year of release between versions.", "_____no_output_____" ] ], [ [ "#If other versions have been released, find the difference to the earliest and latest versions\nalbums.ix[albums['years of versions'].isnull(),'years of versions'] = ''\n\nalbums['list version years'] = [[int(s) for s in albums.ix[index,'years of versions'].split('; ') if s.isdigit()] \n + [int(albums.ix[index,'integer year'])] for index in albums.index]\n\nalbums['difference to earliest version'] = [min(albums.ix[index,'list version years']) - int(albums.ix[index,'integer year'])\n for index in albums.index]\nalbums['difference to latest version'] = [max(albums.ix[index,'list version years']) - int(albums.ix[index,'integer year'])\n for index in albums.index]", "_____no_output_____" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', \n 'reissue', 'difference to earliest version', 'difference to latest version']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with years to reissues added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Reissues')", "Random forest with years to reissues added gives £17.86 mean difference and 0.4075 score\n" ] ], [ [ "Together these improve both the mean difference and score.", "_____no_output_____" ], [ "## Countries\n\nThere are lots of different countries, many with few entries. First let's try binary variables for countries with more (>10) entries than the likely min leaf size in the random forest regressor (as otherwise they won't be used in the regressor).", "_____no_output_____" ] ], [ [ "albums.ix[albums['country'].isnull(),'country'] = ''\n\n#Find all the countries\ncountry_list = albums['country'].unique()\n \n#Create a binary variable for each country\nfor country in country_list:\n albums['c_'+country] = 0\n#Populate the binary variable\n albums.ix[albums['country'] == country, 'c_'+country] = 1\n \n#Ignore the styles with few (<10) entries\nc_country_list = []\nfor country in country_list:\n if sum(albums['c_'+country])<10:\n albums.drop('c_'+country, axis=1, inplace=True)\n else:\n c_country_list.append('c_'+country)", "_____no_output_____" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', \n 'reissue', 'difference to earliest version', 'difference to latest version'] + c_country_list\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with countries added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Countries')", "Random forest with countries added gives £17.44 mean difference and 0.4231 score\n" ] ], [ [ "Now let's try using k-means to cluster together the countries into a few groups, and using these as features.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\n\n#Make a dataframe with the price statistics for each country\ncountry_stats = pd.DataFrame(albums.groupby(by = 'country')\n .describe()['median price sold']).reset_index().pivot(index = 'country',\n columns = 'level_1',\n values = 'median price sold')\n#Replace the nan stds with 0\ncountry_stats.ix[country_stats['std'].isnull(),'std'] = 0\n\n#scale each column by the median value\ncountry_stats_scaled = pd.DataFrame()\nfor column in country_stats.columns:\n #Take the square root of each value to reduce the effect of outlying, really expensive records\n country_stats_scaled[column] = [np.sqrt(country_stats.ix[index,column] / country_stats[column].median()) \n for index in country_stats.index]\n\n#Cluster with kmeans\nkm = KMeans(n_clusters=6)\n#Cluster on the limits, mean and percentiles\nkm.fit(country_stats_scaled[['max', 'mean', 'min', '25%', '50%', '75%']].as_matrix())\n\ncountry_stats['label'] = km.labels_", "C:\\Anaconda3\\lib\\site-packages\\numpy\\core\\_methods.py:82: RuntimeWarning: Degrees of freedom <= 0 for slice\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning)\n" ], [ "fig = plt.figure(figsize=(15,3))\nc_list = 'rgbmkc'\n\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\ncount = 0\nfor label in country_stats['label'].unique():\n x = list(country_stats.ix[country_stats['label'] == label,'min'])\n y = list(country_stats.ix[country_stats['label'] == label,'max'])\n z = list(country_stats.ix[country_stats['label'] == label,'mean'])\n ax1.scatter(z, y, c=c_list[count], marker='o')\n ax2.scatter(z, x, c=c_list[count], marker='o')\n count = count+1\n\n\nax1.set_xlabel('mean')\nax1.set_ylabel('max')\nax1.set_xlim([0,500])\nax1.set_ylim([0,2500])\nax2.set_xlabel('mean')\nax2.set_ylabel('min')\nax2.set_xlim([0,500])\nax2.set_ylim([0,500]);", "_____no_output_____" ], [ "for i in range(0,6):\n print(c_list[i] + ' corresponds to:')\n print(list(country_stats[country_stats['label'] == country_stats['label'].unique()[i]].index[:5]))", "r corresponds to:\n['', 'Argentina', 'Australia', 'Austria', 'Belgium']\ng corresponds to:\n['Algeria', 'Germany, Austria, & Switzerland', 'Haiti', 'Lebanon', 'Pakistan']\nb corresponds to:\n['Andorra', 'Australia & New Zealand', 'Bahamas, The', 'Barbados', 'Benelux']\nm corresponds to:\n['Brazil', 'France', 'Germany', 'Italy', 'Japan']\nk corresponds to:\n['Denmark', 'Egypt', 'Ethiopia', 'Finland', 'India']\nc corresponds to:\n['Mongolia']\n" ], [ "albums.ix[albums['country'] == 'Mongolia', ['artist','title','label','year','median price sold']]", "_____no_output_____" ] ], [ [ "The six groups are:\n\n1) Red: low mean/min, middling max; uniformly cheap countries \nplaces where major labels release records with fewer small pressings \ne.g. Australia, Canada, Cuba, Poland, plus hybrid 'countries' like Europe\n\n2) Green: high mean/min, low max; uniformly expensive countries \nplaces with only a few, sought after releases \ne.g. Haiti, Lebanon, Pakistan\n\n3) Blue: middling mean/min, low max; uniformly mid-range countries \nplaces where records are slightly rarer, with local scenes of interest \ne.g. Colombia, Ghana, Jamaica, Mexico\n\n4) Magenta: low mean, low min, high max; diversely cheaper countries, \nmajor labels releasing records in bulk but also smaller (private?) pressings \ne.g. US, UK, Brazil, France, Japan \n\n5) Black: high mean, low min, high max; diversely expensive countries \nplaces with both major label operations, but also local scenes of interest \ne.g. Belgium, India, South Africa, Turkey\n\n6) Cyan: Mongolia, or specifically one record of a Soviet variety show by the Bayan Mongol Group \nthis fits in best with the records in 1) so reassign Mongolia there\n\nThese are pretty neat divisions (except for the last one), so we should add these as binary variables. \nI don't agree with where some of the countries end up, but these have few data points - more data will place them in the 'correct' category.\n\nN.B. This is a bit dodgy as we're using the test data to build the classifier, so perhaps in future we should only cluster on those in the training data. \nWe also should create a more rigorous way of weeding out 'Mongolias': if there's less than a certain number of countries in a cluster, assign them to the next nearest cluster.", "_____no_output_____" ] ], [ [ "#Reassign Mongolia\ncountry_stats.ix['Mongolia', 'label'] = country_stats.ix['Lebanon', 'label']\n\n#Create binary variables for each label\ncount = 1\nfor label in country_stats['label'].unique():\n albums['country label ' + str(count)] = 0\n for country in country_stats[country_stats['label'] == label].index:\n albums.ix[albums['country'] == country, 'country label ' + str(count)] = 1\n count = count + 1", "_____no_output_____" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', \n 'reissue', 'difference to earliest version', 'difference to latest version',\n 'country label 1', 'country label 2', 'country label 3', 'country label 4', 'country label 5']\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with countries added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))", "Random forest with countries added gives £17.84 mean difference and 0.4017 score\n" ] ], [ [ "This isn't as good as the binary variables.", "_____no_output_____" ], [ "## Genres\n\nGenre may affect price: bebop collectors will probably pay more for originals than smooth jazz collectors, for example.", "_____no_output_____" ] ], [ [ "#Extract all the genres\ngenre_list = []\nfor genres in albums['genre'].unique():\n for genre in genres.split('; '):\n if genre not in genre_list:\n genre_list.append(genre)\n \n#Create a binary variable for each genre\nfor genre in genre_list:\n albums['g_'+genre] = 0\n\n#Populate the binary variable\nfor genres in albums['genre'].unique():\n for genre in genres.split('; '):\n albums.ix[albums['genre'] == genres, 'g_'+genre] = 1\n \n#Ignore the genres with few (<10) entries\ng_genre_list = []\nfor genre in genre_list:\n if sum(albums['g_'+genre])<10:\n albums.drop('g_'+genre, axis=1, inplace=True)\n else:\n print(genre + ': {0}, £{1:.2f}'.format(sum(albums['g_'+genre]),albums.ix[albums['g_'+genre] == 1,\n 'median price sold'].mean()))\n g_genre_list.append('g_'+genre)", "Electronic: 2474, £32.80\nJazz: 23124, £35.27\nHip Hop: 365, £24.21\nStage & Screen: 1865, £51.57\nFunk / Soul: 6067, £34.71\nRock: 3636, £35.45\nLatin: 1763, £37.27\nFolk, World, & Country: 1262, £46.08\nPop: 1131, £33.75\nClassical: 388, £49.13\nNon-Music: 284, £43.94\nReggae: 153, £30.66\nBlues: 521, £30.02\nChildren's: 19, £22.49\nBrass & Military: 38, £38.46\n" ] ], [ [ "Hip hop records are invariably newer, so less expensive. \nStage & Screen is more expensive (perhaps less popular upon initial release, at least in the realms of jazz). \nFolk, World, & Country will be pressings from more 'exotic' locations, so more expensive. \nClassical/non-music is more expensive (perhaps less popular upon initial release, at least in the realms of jazz).\n\nLet's use some of these as features (with >100 entries)", "_____no_output_____" ] ], [ [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', \n 'reissue', 'difference to earliest version', 'difference to latest version'] + c_country_list + g_genre_list\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with genres added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Genres')", "Random forest with genres added gives £17.45 mean difference and 0.4155 score\n" ] ], [ [ "Little difference.", "_____no_output_____" ], [ "## Styles\n\nSlightly more specific categorisations, but may have a similar effect to genres.", "_____no_output_____" ] ], [ [ "albums.ix[albums['style'].isnull(),'style'] = ''\n\n#Extract all the styles\nstyle_list = []\nfor styles in albums['style'].unique():\n for style in styles.split('; '):\n if style not in style_list:\n style_list.append(style)\n \n#Create a binary variable for each style\nfor style in style_list:\n albums['s_'+style] = 0\n\n#Populate the binary variable\nfor styles in albums['style'].unique():\n for style in styles.split('; '):\n albums.ix[albums['style'] == styles, 's_'+style] = 1\n \n#Ignore the styles with few (<10) entries\ns_style_list = []\nfor style in style_list:\n if sum(albums['s_'+style])<10:\n albums.drop('s_'+style, axis=1, inplace=True)\n else:\n print(style + ': {0}, £{1:.2f}'.format(sum(albums['s_'+style]),albums.ix[albums['s_'+style] == 1, \n 'median price sold'].mean()))\n s_style_list.append('s_'+style)", "Jazz-Funk: 4587, £29.90\nInstrumental: 103, £31.48\nDowntempo: 216, £23.30\nJazzy Hip-Hop: 46, £32.09\nFusion: 3136, £27.98\nSoundtrack: 1029, £46.82\nScore: 287, £50.94\nAmbient: 249, £33.81\nModal: 1343, £40.89\nFunk: 1755, £32.85\nDisco: 884, £23.29\nSoul-Jazz: 2786, £28.06\nJazz-Rock: 1846, £35.35\nContemporary Jazz: 1371, £34.15\nPsychedelic: 330, £48.71\nSoul: 1016, £22.10\nAfro-Cuban: 129, £29.44\nAfrobeat: 490, £34.54\nAbstract: 285, £45.04\nAvantgarde: 546, £31.73\nProg Rock: 852, £43.80\nBoogie: 21, £73.78\nRhythm & Blues: 261, £25.87\nSmooth Jazz: 520, £25.19\nAvant-garde Jazz: 424, £51.67\nPost Bop: 1222, £33.23\n: 1893, £45.13\nBossa Nova: 596, £39.55\nAlternative Rock: 70, £27.53\nPunk: 26, £23.47\nLatin Jazz: 1329, £27.85\nMPB: 227, £58.44\nSpace-Age: 603, £33.65\nCool Jazz: 492, £30.99\nBig Band: 584, £32.31\nContemporary: 102, £49.56\nAfrican: 147, £49.20\nFree Jazz: 2594, £39.91\nFuture Jazz: 249, £23.86\nDeep House: 40, £17.25\nExperimental: 1123, £41.37\nFree Improvisation: 1074, £48.59\nEasy Listening: 1835, £38.64\nHard Bop: 1689, £37.80\nLounge: 227, £32.60\nPsychedelic Rock: 517, £43.13\nEducation: 13, £56.05\nPost-Modern: 11, £44.83\nReggae: 44, £29.72\nFolk Rock: 193, £33.99\nClassic Rock: 105, £17.39\nAcoustic: 79, £20.87\nFolk: 153, £54.57\nBop: 690, £38.13\nSymphonic Rock: 10, £18.54\nNeo Soul: 36, £22.65\nModern Classical: 138, £39.50\nNew Age: 54, £33.75\nKrautrock: 138, £54.16\nBlues Rock: 220, £26.43\nPoetry: 68, £34.18\nPiano Blues: 60, £17.83\nBallad: 133, £23.63\nArt Rock: 167, £32.94\nCountry Rock: 36, £18.09\nDialogue: 31, £61.11\nHip Hop: 41, £19.38\nHighlife: 31, £32.91\nTrip Hop: 45, £22.31\nAfro-Cuban Jazz: 418, £35.06\nLeftfield: 162, £44.60\nSoft Rock: 107, £13.25\nPop Rock: 272, £22.57\nBossanova: 214, £34.36\nHardcore: 15, £24.92\nCut-up/DJ: 23, £21.11\nSamba: 196, £32.37\nSynth-pop: 187, £27.70\nAcid Jazz: 115, £18.47\nDub: 63, £24.35\nFree Funk: 108, £35.05\nJazzdance: 98, £20.41\nSpoken Word: 150, £35.63\nHard Rock: 33, £21.56\nGospel: 63, £33.90\nLouisiana Blues: 15, £19.98\nBayou Funk: 26, £17.41\nPop Rap: 12, £62.37\nCubano: 14, £31.18\nSon: 21, £22.87\nSalsa: 91, £27.15\nRumba: 15, £23.99\nComedy: 15, £36.16\nVocal: 239, £26.91\nBroken Beat: 32, £18.77\nModern: 24, £46.20\nConscious: 41, £24.18\nTribal: 22, £31.37\nBoogaloo: 72, £27.92\nChanson: 85, £36.16\nNoise: 89, £32.95\nField Recording: 21, £68.15\nMusique Concrète: 60, £66.74\nIDM: 12, £19.70\nHouse: 61, £22.34\nBreaks: 80, £36.40\nGypsy Jazz: 53, £26.55\nRock & Roll: 44, £28.72\nIndie Rock: 28, £34.70\nEthereal: 10, £31.13\nSwing: 265, £24.06\nPolitical: 19, £35.75\nEast Coast Blues: 18, £20.16\nCape Jazz: 32, £104.23\nCumbia: 14, £33.68\nElectro: 113, £20.92\nTheme: 196, £58.79\nFlamenco: 26, £32.93\nBatucada: 33, £29.65\nClassical: 25, £64.99\nBeat: 47, £55.28\nSpace Rock: 52, £36.87\nTechno: 24, £18.38\nDrum n Bass: 19, £18.68\nMod: 30, £35.96\nRagtime: 23, £50.96\nNew Wave: 59, £22.07\nSchlager: 20, £22.03\nGarage Rock: 31, £27.22\nMambo: 51, £32.69\nCalypso: 27, £30.67\nZouk: 26, £50.05\nDoo Wop: 12, £44.32\nParody: 23, £33.40\nRnB/Swing: 21, £29.84\nMinimal: 52, £39.02\nReligious: 10, £82.93\nNo Wave: 26, £21.52\nItalo-Disco: 11, £35.21\nDixieland: 25, £20.23\nCountry: 12, £42.92\nNovelty: 34, £22.02\nP.Funk: 15, £33.23\nBreakbeat: 30, £43.50\nDescarga: 38, £54.02\nGuaguancó: 18, £32.72\nBolero: 22, £34.81\nDrone: 43, £38.64\nNeo-Classical: 27, £38.94\nIndustrial: 29, £25.71\nLatin: 37, £16.93\nSurf: 28, £27.83\nIndian Classical: 15, £53.69\nHindustani: 11, £99.68\nCha-Cha: 24, £25.85\nSpecial Effects: 11, £56.59\nPost Rock: 19, £20.02\nNeo-Romantic: 14, £31.95\nPacific: 27, £13.21\nPost-Punk: 10, £20.55\nMath Rock: 18, £23.46\nBeguine: 11, £59.23\nMerengue: 10, £29.90\nCountry Blues: 11, £56.83\nModern Electric Blues: 13, £10.57\nRocksteady: 10, £16.96\nTango: 11, £26.93\nHeavy Metal: 17, £22.81\nMovie Effects: 10, £32.53\nBrass Band: 11, £24.47\nCompas: 10, £37.38\nRomantic: 10, £21.26\nSka: 13, £22.74\nElectric Blues: 21, £19.80\n" ], [ "predictors = ['compilation','number for sale', 'number have', 'number of ratings', 'number of tracks',\n 'number of versions', 'number on label', 'number on label for sale', 'number want', \n 'integer year', 'average rating', 'number of records', 'limited edition', \n 'reissue', 'difference to earliest version', 'difference to latest version'] + c_country_list + g_genre_list + s_style_list\n\n#Randomly shuffle the row order\nnew_albums = albums.sample(frac=1)\n\n#Generate predictions\npredictions = get_random_forest_predictions(new_albums, predictors)\n\n#Calculate\nmean_diff = get_mean_diff(np.array(new_albums['median price sold']), predictions)\nscore = get_score(np.array(new_albums['median price sold']), predictions)\n\nprint('Random forest with styles added'\n ' gives £{0:.2f} mean difference and {1:.4f} score'.format(mean_diff,score))\n\nmean_diff_each_step.append(mean_diff)\nscore_each_step.append(score)\neach_step.append('Styles')", "Random forest with styles added gives £17.27 mean difference and 0.4179 score\n" ] ], [ [ "Improves mean difference a little at least.", "_____no_output_____" ] ], [ [ "xticks = list(range(0,len(score_each_step)))\nfig = plt.figure(figsize=(12,3))\nax1 = fig.add_subplot(121)\nax1.plot(xticks,mean_diff_each_step,'-or')\nax1.set_title('mean diff')\nax1.set_xticks(xticks)\nax1.set_xticklabels(each_step, rotation=90)\nax1.set_ylabel('mean diff (£)')\nax2 = fig.add_subplot(122)\nax2.plot(xticks,score_each_step,'-ob')\nax2.set_title('score')\nax2.set_xticks(xticks)\nax2.set_xticklabels(each_step, rotation=90)\nax2.set_ylabel('score');", "_____no_output_____" ] ], [ [ "Although there's obviously some noise in both the mean difference and score associated with the random partitioning of the data into train and test sets, there's still a clear improvement in both measures as features have been added.", "_____no_output_____" ] ], [ [ "albums.to_csv('../data/interim/albums.csv',encoding='utf-8')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d25de871369cf4fc69aec2688dd38736bf4556
1,398
ipynb
Jupyter Notebook
DAY 101 ~ 200/DAY139_[BaekJoon] 카드 구매하기 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
15
2020-03-17T01:18:33.000Z
2021-12-24T06:31:06.000Z
DAY 101 ~ 200/DAY139_[BaekJoon] 카드 구매하기 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
null
null
null
DAY 101 ~ 200/DAY139_[BaekJoon] 카드 구매하기 (Python).ipynb
SOMJANG/CODINGTEST_PRACTICE
1a7304e9063579441b8a67765175c82b0ad93ac9
[ "MIT" ]
10
2020-03-17T01:18:34.000Z
2022-03-30T10:53:07.000Z
22.190476
136
0.495708
[ [ [ "## 2020년 6월 24일 수요일\n### BaekJoon - 11052 : 카드 구매하기 (Python)\n### 문제 : https://www.acmicpc.net/problem/11052\n### 블로그 : https://somjang.tistory.com/entry/BaekJoon-11052%EB%B2%88-%EC%B9%B4%EB%93%9C-%EA%B5%AC%EB%A7%A4%ED%95%98%EA%B8%B0-Python", "_____no_output_____" ], [ "### 첫번째 시도", "_____no_output_____" ] ], [ [ "cardNum = int(input())\nNC = [0]*(cardNum+1)\ncardPrice = [0]+list(map(int, input().split()))\n\ndef answer():\n NC[0], NC[1] = 0, cardPrice[1]\n for i in range(2, cardNum+1):\n for j in range(1, i+1):\n NC[i] = max(NC[i], NC[i-j]+cardPrice[j])\n print(NC[cardNum])\n\nanswer()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
e7d2609baff512fcb774c1ec3f1039581a31d2e6
14,168
ipynb
Jupyter Notebook
test.ipynb
csyvenky/jupyter_for_all_ntsb
2728228c74e6c3ae4edabbca66b622417d5764af
[ "MIT" ]
null
null
null
test.ipynb
csyvenky/jupyter_for_all_ntsb
2728228c74e6c3ae4edabbca66b622417d5764af
[ "MIT" ]
null
null
null
test.ipynb
csyvenky/jupyter_for_all_ntsb
2728228c74e6c3ae4edabbca66b622417d5764af
[ "MIT" ]
null
null
null
51.708029
2,361
0.471132
[ [ [ "import os\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom urllib.request import urlretrieve", "_____no_output_____" ], [ "filename = 'data/test.csv' \ndata = pd.read_csv(filename)", "_____no_output_____" ], [ "# row and column count\ndata.shape", "_____no_output_____" ], [ "# show the column names (note all the extraneous spaces in the column names)\ndata.columns", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3 entries, 0 to 2\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 3 non-null int64 \n 1 first 3 non-null object\n 2 second 3 non-null object\n 3 third 3 non-null object\n 4 fourth 3 non-null object\n 5 fifth 3 non-null object\ndtypes: int64(1), object(5)\nmemory usage: 272.0+ bytes\n" ], [ "# summary statistics (with strings too)\ndata.describe(include=\"all\")", "_____no_output_____" ], [ "# show the first 10 rows\ndata.head()", "_____no_output_____" ], [ "data.index", "_____no_output_____" ], [ "# from the docs\n# pandas.to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=True)\n\ndata[\"first\"] = pd.to_datetime(data[\"first\"])\ndata[\"second\"] = pd.to_datetime(data[\"second\"])\ndata[\"third\"] = pd.to_datetime(data[\"third\"])\ndata[\"fourth\"] = pd.to_datetime(data[\"fourth\"])\ndata[\"fifth\"] = pd.to_datetime(data[\"fifth\"])", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "data.sort_values(by=['second'], inplace=True, ascending=True)", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "# Set index as DateTimeIndex\ndatetime_index = pd.DatetimeIndex(data.second)\ndata.set_index(datetime_index, inplace=True)\ndata.head()", "_____no_output_____" ], [ "data.index", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d280b302d62562c001c6caa6d1a25fe59f2d13
8,981
ipynb
Jupyter Notebook
k-means clustering/CLUSTERING.ipynb
GeorgeOduor/unsupervised-learning
7178abfea0656dddef3efe09a71a16982ddeb026
[ "MIT" ]
null
null
null
k-means clustering/CLUSTERING.ipynb
GeorgeOduor/unsupervised-learning
7178abfea0656dddef3efe09a71a16982ddeb026
[ "MIT" ]
null
null
null
k-means clustering/CLUSTERING.ipynb
GeorgeOduor/unsupervised-learning
7178abfea0656dddef3efe09a71a16982ddeb026
[ "MIT" ]
4
2019-05-27T07:30:49.000Z
2021-07-03T12:01:55.000Z
36.958848
320
0.578889
[ [ [ "\n______\n\n# Introduction to Clustering with K-Means.\n______\n\n### Contents.\n\n1. [What is Clustering](#1)\n1. [What is a Cluster?](#2)\n1. [Difference between clustering and classification.](#3)\n 1. [Uses of Clustering in different industries.](#3.1)\n 1. [Why clustering?](#3.2)\n1. [Clustering algorithms.](#4)\n1. [K- Means Clustering.](#5)\n 1. [A little bit of the mathematical side.](#5.1)\n 1. [The K-Mean Algorithm](#5.2)\n 1. [K-Means Accuracy.](#5.3)\n 1. [K-Means Accuracy.](#5.4)\n \n\n<div id =\"1\"></div>\n\n## What is Clustering?\n\nClustering means finding clusters in a dataset unsupervised.Unsupervised in this case means that there are no predictor and response variables.\n\n<div id =\"2\"></div>\n\n## What is a Cluster?\n\nA cluster is a group of objects that are __similar to other objects__ in the cluster and __dissimilar to data points__ in other clusters.\n\nWhen observations are clustered in a particular dataset,they are partitioned into distinct groups in such a way that the observations within each group are very similar to each other.\n\n<div id =\"3\"></div>\n\n## Difference between clustering and classification.\n\nClassification predict categorical class labels which means assigning instances to predefined clases in a supervised model while clusteringsimply groups similar observations together in an unsupervised fasion.\n\n\n<div id =\"3.1\"></div>\n\n### Uses of Clustering in different industries.\n\n- **Retail/Marketing**\n\n - Identifying buying paterns of customers.\n \n - Recommending new books or movies to new customers.\n \n- **Banking**:\n \n - Fraud detection in credit card use\n \n - Identifying clusters of customers \n \n- **Insurance**:\n \n - Fraud detection in claims analysis.\n \n - Insurance risk of cudtomers.\n \n- **Publication**:\n \n - Auto-categorizing news based on their content\n \n - Recomending similar news articles\n \n- **Medicine**:\n\n - Characterising patient behaviour \n \n- **Biology**:\n \n - Clustering genetic markers to identify family ties.\n \n <div id =\"3.2\"></div>\n \n### Why clustering?\n\nIn data analysis we can make use of clustering to achieve the following:\n \n- Exploratory data analysis.\n \n- Summary Generation.\n \n- Outlier detection.\n \n- Finding duplicates\n \n- Pre-procesing step.\n\n<div id =\"4\"></div>\n \n## Clustering algorithms.\n\nClustering is so popular in many fields and below are some of the clustering algorithms.\n\n1. Partition Based Algorithms:\n\n1. Hierarchical clustering:\n\n1. Density Based algorithm\n\n1. K- Means Clustering.\n\nIn this article i am talking about Kmeans algorithm.\n\n<div id =\"5\"></div>\n\n## K- Means Clustering.\n\n<div id =\"5.1\"></div>\n\n### A little bit of the mathematical side.\n\nLets say we have sets, $C_1,C_2,C_3,...,C_K$ that denote indices of the observations in each cluster and must satisfy the following conditions,\n\n1. $C_1 \\cup C_2 \\cup C_3 \\cup ...\\cup C_K=\\{1,...,n\\}$\n\nmeaning that each observation must belong to each group.\n\n2. $C_k\\cap C_{k'}=\\varnothing \\ \\forall \\ k \\neq k'$\n\nmeaning that there is no overlaping among the groups.\n\nThe idea is that clustering a good cluster is one for which the within luster variation is as small as possible.\n\nHaving a measure $W(C_k)$ representing the amount of variation within the cluster,below is the problem problem that it gives and requires solution.\n\n$$minimize\\{\\sum_{k=1}^KW(C_k)\\};C_1,C_2,C_3,...,C_K$$\n\nA first thought of solving this is to calculate the distance between two observations using the eulcledian distance.The euclidian distace can be calculated as follows:\n\n$$Dis(x_1,x_2) = \\sqrt{\\sum_{i=0}^n(x_{1i}-x_{2i})^2}$$\n\nIn case of many variables the same equation is used in a hire dimension.\nBy the use of Eulidien distance ,we can then define the within cluster variation as:\n\n$$W(C_k)=\\frac{1}{|C_k|}\\sum _{i,i'\\in C_k }\\sum_{j=1}^p(x_{ij}-x_{i'j})^2$$\n\nIn the above function $|C_k|$ stands for the number of observationsin the k$^{th}$ cluster.\n\nNormalization in this case is a requirent to get the correct disimilarity measure but is higly depends on on the data type and also the domain that clustering is done for it.\n\nThe two equations above combines to give;\n\n$$minimize\\{\\frac{1}{|C_k|}\\sum _{i,i'\\in C_k }\\sum_{j=1}^p(x_{ij}-x_{i'j})^2\\};C_1,C_2,C_3,...,C_K$$\n\nFrom here the task is to find an algorithm that will partition the observations into K clusters such that the above objective is met.\n\nIn simple terms we can say that K-Means tries to minimize the _intra-cluster(within)_$Dis(x_1,x_2)$ class distances while trying to maximize the _inter-cluster(between)_$Dis(C_1,C_2)$ class distances.\n\nError per cluster can be calculted as:\n$$SSE = \\sum_i^n(x_i-C_j)^2$$\n\n<div id =\"5.2\"></div>\n\n### The K-Mean Algorithm\n\nBellow is an algorithm that can be used to solve the k means problem:\n\n1. Choose a number of clusters \"K\"\n\n1. Randomly assign each point to a cluster\n\n1. Until clusters stop changing,repeat the following:\n\n - For each cluster,compute the cluster centroid by taking the mean vector of points in the cluster.\n \n - Assign each data point to the cluster for which the centroid is closest.\n\n<div id =\"5.3\"></div> \n\n### K-Means Accuracy.\n\nHow do we evaluate the K means model?We can do this by following two approches:\n\n1. **External Aproach**: In this case we compare the clusters with the ground truthif available.Unfortunately this is absent in the real world.\n\n1. **Internal Approach:** In this the average distance of data points within a cluster.Also average of the data points distance from the cluster centroids can be used as a metric.\n\n<div id =\"5.4\"></div>\n\n### Choosing the Value of K.\n\nThere is no straightforward way of choosing the best value .On of the methods that can be used to run the clustering across the different values of K and looking at a metric of accuracy for clustering eg the average distance of datapoints from their cluster centroid.This shows how dense or spece a cluster was.\n\nThe problem with this is only that the distance of the datapoints will always reduce as the value of K increases.\n\nTherefor inorder to get the best value of K,the metric is ploted against the different values of K and the point where the metric sharply shifts is determined.This is called the __elbow method.__\n\n\n[Back to top](#top)\n\n____\n## Python Implementation\n___\n\nAfter uderstanding K-Means Clustering,its time to [implement this in python](#)\n.\nSee my other [project in R](#).", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
e7d2817aa3839f4a49dd0a3025bd2188db079d04
11,371
ipynb
Jupyter Notebook
bwhom/Julia_Notes.ipynb
KorfLab/julie
9a87778621d94fe2be6a578a59f3900007b5b186
[ "MIT" ]
1
2021-07-08T22:18:24.000Z
2021-07-08T22:18:24.000Z
bwhom/Julia_Notes.ipynb
KorfLab/julie
9a87778621d94fe2be6a578a59f3900007b5b186
[ "MIT" ]
null
null
null
bwhom/Julia_Notes.ipynb
KorfLab/julie
9a87778621d94fe2be6a578a59f3900007b5b186
[ "MIT" ]
1
2021-07-08T23:31:44.000Z
2021-07-08T23:31:44.000Z
20.525271
199
0.470847
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7d28bd2216dd367cf94eb7a5a7dc66266ac84f2
203,410
ipynb
Jupyter Notebook
LabelEncoder.ipynb
AlejandroDaneri/Machine-Learning-Properati
5d0f77eaff5ea7ef5a8f0e446c9220592d9b2e1a
[ "MIT" ]
1
2020-12-17T19:21:16.000Z
2020-12-17T19:21:16.000Z
LabelEncoder.ipynb
AlejandroDaneri/Machine-Learning-Properati
5d0f77eaff5ea7ef5a8f0e446c9220592d9b2e1a
[ "MIT" ]
null
null
null
LabelEncoder.ipynb
AlejandroDaneri/Machine-Learning-Properati
5d0f77eaff5ea7ef5a8f0e446c9220592d9b2e1a
[ "MIT" ]
1
2019-04-03T22:25:13.000Z
2019-04-03T22:25:13.000Z
46.847075
302
0.378295
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nproperati = pd.read_csv('datos/caba_para_mapa.csv',error_bad_lines=False)\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:97% !important; }</style>\"))", "_____no_output_____" ], [ "properati.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 49955 entries, 2015-07-01 to 2017-08-01\nData columns (total 25 columns):\nproperty_type 49955 non-null object\nplace_name 49955 non-null object\nstate_name 49955 non-null object\nlat-lon 49955 non-null object\nlat 49955 non-null float64\nlon 49955 non-null float64\nprice 49955 non-null float64\ncurrency 49955 non-null object\nprice_aprox_local_currency 49955 non-null float64\nprice_aprox_usd 49955 non-null float64\nsurface_total_in_m2 49955 non-null float64\nsurface_covered_in_m2 48166 non-null float64\nprice_usd_per_m2 49112 non-null float64\nprice_per_m2 49955 non-null float64\nfloor 6576 non-null float64\nrooms 34521 non-null float64\nexpenses 14967 non-null float64\nproperati_url 49955 non-null object\ndescription 49955 non-null object\ntitle 49955 non-null object\ndist_a_subte 49955 non-null float64\ndist_a_tren 49955 non-null float64\ndist_a_univ 49955 non-null float64\ndist_a_villa 49955 non-null float64\ndist_a_zona_anegada 49955 non-null float64\ndtypes: float64(17), object(8)\nmemory usage: 9.9+ MB\n" ], [ "from sklearn import preprocessing\n\nle_tipo = preprocessing.LabelEncoder()\ntipos_prop=properati['property_type']\nle_tipo.fit(tipos_prop)\nproperati['property_type'] = le_tipo.transform(tipos_prop)\nprint list(le_tipo.inverse_transform([0,1,2,3]))\n\nle_region = preprocessing.LabelEncoder() \nregiones=properati['state_name']\nle_region.fit(regiones)\nproperati['state_name'] = le_region.transform(regiones)\n\nle_barrio = preprocessing.LabelEncoder()\nbarrios=properati['place_name']\nle_barrio.fit(barrios)\nproperati['place_name'] = le_barrio.transform(barrios)", "['PH', 'apartment', 'house', 'store']\n" ], [ "properati", "_____no_output_____" ], [ "print list(le_tipo.inverse_transform([0,1,2,3]))", "['PH', 'apartment', 'house', 'store']\n" ], [ "print list(le_region.inverse_transform([0])) #porque es solo CABA", "['Capital Federal']\n" ], [ "print list(le_barrio.inverse_transform(range(0,20)))", "['Abasto', 'Agronom\\xc3\\xada', 'Almagro', 'Balvanera', 'Barracas', 'Barrio Norte', 'Belgrano', 'Boca', 'Boedo', 'Caballito', 'Capital Federal', 'Catalinas', 'Centro / Microcentro', 'Chacarita', 'Coghlan', 'Colegiales', 'Congreso', 'Constituci\\xc3\\xb3n', 'Distrito de las Artes', 'Flores']\n" ], [ "#quiero buscar las propiedades que esten en Flores\nproperati= properati.loc[properati['place_name'] == le_barrio.transform(['Flores'])[0]] #porque devuelve un array", "_____no_output_____" ], [ "properati", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d28c7369e0ea752c7706b4adb6c8d9d5b4fd8a
237,444
ipynb
Jupyter Notebook
Simplified-Role-Filler-net1.ipynb
vsapy/DTIN07
cb9ada042d7a6f9569b9952bea7a097008ef1c84
[ "MIT" ]
null
null
null
Simplified-Role-Filler-net1.ipynb
vsapy/DTIN07
cb9ada042d7a6f9569b9952bea7a097008ef1c84
[ "MIT" ]
null
null
null
Simplified-Role-Filler-net1.ipynb
vsapy/DTIN07
cb9ada042d7a6f9569b9952bea7a097008ef1c84
[ "MIT" ]
null
null
null
194.945813
119,636
0.867421
[ [ [ "from brian2 import *\nfrom brian2tools import *\n%matplotlib inline\nnp.set_printoptions() \n#np.set_printoptions(threshold=12)\n", "_____no_output_____" ], [ "def fancy_print_vector(v):\n if v.shape[1] > 24:\n # Fancy formating for slots_per_vector that are longer than 24 bits_per_slot \n # shows start and end of slot values.\n for kk in range(v.shape[0]):\n print(f\"slot[{kk:2d}]:\"\n f\"{((v[kk]).astype(int))[:12]} ... {((v[kk]).astype(int))[-12:]}, \"\n f\"Argmax bit pos=[{np.argmax(v[kk])}]\")\n else:\n for kk in range(v.shape[0]):\n print(f\"slot[{kk:2d}]:\"\n f\"{((v[kk]).astype(int))}, \"\n f\"Argmax bit pos=[{np.argmax(v[kk])}]\")", "_____no_output_____" ] ], [ [ "This code uses the Brian2 neuromorphic simulator code to implement\n a version of role/filler binding and unbinding based on the \npaper :High-Dimensional Computing with Sparse Vectors\" by Laiho et al 2016. \nThe vector representation is a block structure comprising slots_per_vector \nwhere the number of slots_per_vector is the vector dimension. In each slot there are a\nnumber of possible bit positions with one bit set per slot. \nIn this implementation we implement the role/filler binding and unbinding \noperations in Brian2 by representing each slot as a neuron and the time delay\n of the neuron's spike as the bit position. \n\nTo ensure that the Brian2 network is performing correctly the first section of the code \ncomputes the expected sparse bound vector. \nThe neuromorphic equivalent is implemented as two Brian2 networks. The first network (net1) implements\nthe role/filler binding and the second netwok (net2) implements the role/filler unbinding and the clean-up memory\noperation which compares the unbound vector with all the memory vectors to find the best match.\nThe sparse bound vector resulting from net1 is passed to net2 to initiate the unbinding.", "_____no_output_____" ], [ "## Init base vars", "_____no_output_____" ] ], [ [ "# Init base vars\nshow_bound_vecs_slot_detail = False\n\nslots_per_vector = 100 # This is the number of neurons used to represent a vector\nbits_per_slot = 100 # This is the number of bit positions\nmem_size = 500 # The number of vectors against which the resulting unbound vector is compared\nNum_bound = 5 # The number of vectors that are to be bound\ninput_delay = bits_per_slot # Time delay between adding cyclically shifted vectors to construct the bound vector is set to 'bits_per_slot' milliseconds.\n\n#NB all timings use milliseconds and we can use a random seed if required.\nnp.random.seed(54321)\n\ntarget_neuron = 1\ny_low=target_neuron-1 # This is used to select the lowest index of the range of neurons that are to be displayed\ny_high=target_neuron+1 # This is used to select the highest index of the range of neurons that are to be displayed\n\ndelta = (2*Num_bound) * bits_per_slot #This determins the time period over which the Brian2 simulation is to be run.\n\n", "_____no_output_____" ] ], [ [ "## Create a set of sparse VSA vectors\nGenerate a random matrix (P_matrix) which represents all of the sparse vectors that are to be used.\nThis matrix has columns equal to the number of slots_per_vector in each vector with the number of rows equal to the memory size (mem_size)\n", "_____no_output_____" ] ], [ [ "P_matrix = np.random.randint(0, bits_per_slot, size=(mem_size,slots_per_vector))\nRole_matrix = P_matrix[::2]\nVal_matrix = P_matrix[1::2]", "_____no_output_____" ] ], [ [ "## Demonstration of modulo addition binding", "_____no_output_____" ] ], [ [ "test_target_neuron = target_neuron # Change this to try different columns e.g. 0, 1, 2....\nprint(f\"Showing modulo {bits_per_slot} addition role+filler bind/unbind for target column {test_target_neuron}\\n\") \nfor n in range(0,2*Num_bound,2):\n bind_val = (P_matrix[n][test_target_neuron]+P_matrix[n+1][test_target_neuron])%bits_per_slot\n print(f\"\\t{P_matrix[n][test_target_neuron]:2d}+{P_matrix[n+1][test_target_neuron]:2d}=\"\n f\"{P_matrix[n][test_target_neuron]+P_matrix[n+1][test_target_neuron]:2d} %\"\n f\" {bits_per_slot} = {bind_val:2d} Bind\") \n\n unbind_val = (bind_val-P_matrix[n+1][test_target_neuron]) % bits_per_slot \n print(f\"\\t{bind_val:2d}-{P_matrix[n+1][test_target_neuron]:2d}=\"\n f\"{bind_val-P_matrix[n+1][test_target_neuron]:2d} %\"\n f\" {bits_per_slot} = {unbind_val:2d} Unbind\\n\") \n", "Showing modulo 100 addition role+filler bind/unbind for target column 1\n\n\t10+51=61 % 100 = 61 Bind\n\t61-51=10 % 100 = 10 Unbind\n\n\t 0+69=69 % 100 = 69 Bind\n\t69-69= 0 % 100 = 0 Unbind\n\n\t22+77=99 % 100 = 99 Bind\n\t99-77=22 % 100 = 22 Unbind\n\n\t30+31=61 % 100 = 61 Bind\n\t61-31=30 % 100 = 30 Unbind\n\n\t27+63=90 % 100 = 90 Bind\n\t90-63=27 % 100 = 27 Unbind\n\n" ] ], [ [ "# Empirical calc \nThis section of the code computes the theoretical values for the sparse vector (which can then be compared with\nthe output of the net1 neuromorphic circuit. It then computes the expected number of bits_per_slot that will align in the clean-up memory operation (which can then be compared with the net2 neuromorphic circuit output).\n", "_____no_output_____" ], [ "## Create sparse representation of the bound vector", "_____no_output_____" ] ], [ [ "# print the cyclically shifted version of the vectors that are to be bound\nnp.set_printoptions(threshold=24)\nnp.set_printoptions(edgeitems=11)\n\nfor n in range(0, Num_bound):\n print(np.roll(P_matrix[n], n))\n\nnp.set_printoptions()\n\n# Init sparse bound vector (s_bound) with zeros\ns_bound = np.zeros((slots_per_vector, bits_per_slot)) # Create a slotted vector with\n\n#Create sparse representation of the bound vector\n#We take pairs of vector and bind them together and in each slot\n# we cyclically shift the bit position of the filler vector by the \n# bit position of the role vector then we add them together to get the vector s_bound\n\n\n# Do the binding\nfor n in range(0, Num_bound):\n for s in range(0, slots_per_vector): # For each slot\n role_pos = Role_matrix[n][s] # Position of the set bit in this role vector for this slot\n filler_pos = Val_matrix[n][s] # Position of the set bit in this value vector for this slot\n b = (filler_pos+role_pos) % bits_per_slot # Get new 'phase' (bit position) to set in the bound vector's slot\n s_bound[s][b] += 1\n", "[81 10 26 96 22 78 18 72 71 10 98 ... 18 37 76 16 21 9 18 4 25 46 28]\n[81 64 51 34 27 17 11 94 75 98 76 ... 4 63 38 90 33 86 44 21 38 99 97]\n[34 63 41 0 34 93 97 95 30 30 90 ... 19 84 6 82 93 9 98 25 93 19 75]\n[56 95 6 33 69 31 82 11 27 56 42 ... 61 80 26 72 98 22 91 51 33 85 99]\n[74 60 76 13 55 22 87 59 65 89 8 ... 80 18 27 77 22 54 75 63 71 35 76]\n" ], [ "if show_bound_vecs_slot_detail:\n np.set_printoptions(formatter={'int':lambda x: f\"{x:2d}\"})\n print()\n fancy_print_vector(s_bound)\n\n np.set_printoptions()", "_____no_output_____" ] ], [ [ "## Make s_bound sparse using the argmax function which finds the bit position with the highest random value.", "_____no_output_____" ] ], [ [ "# Make s_bound sparse using the argmax function which finds the bit position with the highest random value.\nnp.set_printoptions(threshold=24)\nnp.set_printoptions(edgeitems=11)\nprint(\"\\nResultant Sparse vector, value indicates 'SET' bit position in each slot. \"\n \"\\n(Note, a value of '0' means bit zero is set).\\n\")\n\nsparse_bound = np.array([np.argmax(s_bound[s]) for s in range(0,slots_per_vector)])\nprint(sparse_bound)\nprint()\nnp.set_printoptions()", "\nResultant Sparse vector, value indicates 'SET' bit position in each slot. \n(Note, a value of '0' means bit zero is set).\n\n[81 61 19 21 68 3 12 9 8 62 17 ... 6 15 15 64 1 53 1 1 24 16 9]\n\n" ] ], [ [ "# Perform the unbinding", "_____no_output_____" ], [ "Unbind the vector sparse_bound vector and compare with each of the vectors in the P_matrix couting the\nnumber of slots_per_vector that have matching bit positions. This gives the number of spikes that should line up \nin the clean up memory operation.", "_____no_output_____" ] ], [ [ "np.set_printoptions(threshold=24)\nnp.set_printoptions(edgeitems=11)\n\nhd_threshold = 0.1\nresults_set = []\nresults = None\nmiss_matches = []\nmin_match = slots_per_vector\nfor nn in range(0, len(Role_matrix)):\n # for pvec in P_matrix:\n pvec = Role_matrix[nn]\n results = []\n for test_vec in Val_matrix:\n unbound_vals = np.array([(sparse_bound[ss] - pvec[ss]) % bits_per_slot for ss in range(0, slots_per_vector)])\n match = np.count_nonzero(unbound_vals == test_vec)\n results.append(match)\n\n win_indx = np.argmax(results)\n max_count = np.max(results)\n hd = max(results) / slots_per_vector\n # print(nn, end=' ')\n if hd > hd_threshold:\n print(f\"Role_vec_idx[{nn:02d}], Val_match_idx[{win_indx:02d}]:\\t{np.array(results)}\")\n if max_count <= min_match:\n min_match = max_count\n else:\n # store the a failed match\n miss_matches.append((nn, win_indx, np.array(results)))\n \n results_set.append(results)\nnp.set_printoptions()", "Role_vec_idx[00], Val_match_idx[00]:\t[20 0 1 3 3 1 0 2 1 2 1 ... 0 2 2 0 1 0 1 0 4 0 1]\nRole_vec_idx[01], Val_match_idx[01]:\t[ 1 29 3 1 1 2 0 1 1 0 1 ... 0 0 1 2 1 0 0 1 2 1 1]\nRole_vec_idx[02], Val_match_idx[02]:\t[ 0 0 22 1 0 0 2 0 2 1 1 ... 0 0 2 0 0 0 1 2 1 2 1]\nRole_vec_idx[03], Val_match_idx[03]:\t[ 1 0 2 19 0 0 1 1 0 2 0 ... 0 0 1 0 1 0 1 0 1 1 3]\nRole_vec_idx[04], Val_match_idx[04]:\t[ 0 1 0 0 18 1 0 3 2 2 2 ... 0 3 1 1 1 1 0 3 1 1 2]\n" ], [ "print(\"\\nShowing failed matches:\\n\")\nfor fm in miss_matches:\n print(f\"Role_vec_idx[{fm[0]:02d}], Val_match_idx[{fm[1]:02d}]:\\t{fm[2]}\")", "\nShowing failed matches:\n\nRole_vec_idx[05], Val_match_idx[94]:\t[3 2 1 0 1 0 0 0 2 2 0 ... 0 2 0 1 0 0 1 3 1 1 1]\nRole_vec_idx[06], Val_match_idx[116]:\t[0 0 0 2 0 1 1 0 1 0 0 ... 1 1 1 0 1 2 1 0 0 2 2]\nRole_vec_idx[07], Val_match_idx[27]:\t[0 1 0 1 1 1 3 1 1 2 2 ... 0 0 2 0 1 2 0 3 0 1 3]\nRole_vec_idx[08], Val_match_idx[44]:\t[2 0 1 0 0 0 0 1 0 0 2 ... 2 2 1 1 0 0 3 2 2 0 1]\nRole_vec_idx[09], Val_match_idx[159]:\t[1 2 0 1 0 0 1 1 2 0 1 ... 1 2 0 2 0 0 0 1 0 1 1]\nRole_vec_idx[10], Val_match_idx[49]:\t[0 2 2 0 3 3 1 1 2 1 0 ... 0 1 2 3 2 0 0 2 0 3 0]\nRole_vec_idx[11], Val_match_idx[15]:\t[0 2 2 0 2 1 1 0 2 1 0 ... 2 0 1 0 2 1 2 0 1 0 3]\nRole_vec_idx[12], Val_match_idx[42]:\t[2 2 1 0 0 1 3 1 0 0 1 ... 2 2 1 0 1 1 0 3 0 2 1]\nRole_vec_idx[13], Val_match_idx[23]:\t[0 1 0 1 3 0 0 0 1 1 1 ... 0 0 0 3 0 0 0 1 1 0 0]\nRole_vec_idx[14], Val_match_idx[174]:\t[2 0 0 1 1 0 3 3 1 0 1 ... 1 0 0 1 0 1 2 0 0 0 0]\nRole_vec_idx[15], Val_match_idx[40]:\t[2 2 0 0 1 0 1 2 1 0 2 ... 0 3 2 1 2 1 1 0 2 3 0]\nRole_vec_idx[16], Val_match_idx[35]:\t[0 2 1 1 2 2 1 0 0 1 2 ... 0 0 0 1 2 1 3 2 1 1 0]\nRole_vec_idx[17], Val_match_idx[195]:\t[0 1 0 1 0 3 0 0 4 1 3 ... 1 2 0 1 1 1 0 1 0 2 1]\nRole_vec_idx[18], Val_match_idx[81]:\t[0 1 1 1 1 0 1 1 1 1 0 ... 1 1 1 0 2 1 2 1 1 0 1]\nRole_vec_idx[19], Val_match_idx[36]:\t[0 3 1 0 0 1 2 2 1 1 0 ... 0 1 2 1 1 3 0 1 1 1 2]\nRole_vec_idx[20], Val_match_idx[53]:\t[0 0 1 2 2 1 2 3 2 1 0 ... 2 0 2 2 1 0 1 1 0 1 1]\nRole_vec_idx[21], Val_match_idx[184]:\t[1 0 0 1 0 0 3 1 1 0 1 ... 0 1 2 0 1 0 1 0 0 1 1]\nRole_vec_idx[22], Val_match_idx[130]:\t[2 2 0 1 1 2 2 0 1 3 0 ... 1 0 0 3 1 0 1 2 0 1 1]\nRole_vec_idx[23], Val_match_idx[23]:\t[0 2 2 1 0 2 0 1 0 2 1 ... 1 0 1 0 1 2 1 0 1 0 1]\nRole_vec_idx[24], Val_match_idx[201]:\t[0 1 1 2 1 0 0 1 1 1 1 ... 0 1 2 1 2 1 1 0 1 2 1]\nRole_vec_idx[25], Val_match_idx[107]:\t[1 0 2 1 0 0 0 0 0 3 1 ... 1 0 2 1 0 1 0 2 2 1 1]\nRole_vec_idx[26], Val_match_idx[32]:\t[1 2 2 1 0 1 0 0 2 1 1 ... 0 0 0 0 1 1 3 2 0 3 1]\nRole_vec_idx[27], Val_match_idx[11]:\t[2 0 0 0 0 1 1 1 1 2 3 ... 2 0 1 0 0 0 0 2 2 0 2]\nRole_vec_idx[28], Val_match_idx[181]:\t[1 0 1 1 1 1 1 1 0 2 2 ... 1 2 0 1 2 0 0 1 0 0 0]\nRole_vec_idx[29], Val_match_idx[50]:\t[2 0 0 1 2 0 0 2 1 1 1 ... 1 1 0 1 1 2 2 0 0 0 1]\nRole_vec_idx[30], Val_match_idx[29]:\t[0 0 0 3 0 0 1 1 0 1 0 ... 3 0 1 1 2 0 0 1 2 0 1]\nRole_vec_idx[31], Val_match_idx[49]:\t[0 1 2 0 0 0 0 1 1 0 2 ... 1 1 3 0 0 1 0 0 1 1 2]\nRole_vec_idx[32], Val_match_idx[04]:\t[0 2 2 2 4 0 1 0 2 1 2 ... 1 0 2 0 1 1 1 0 0 1 0]\nRole_vec_idx[33], Val_match_idx[175]:\t[0 1 2 1 0 0 0 0 1 2 1 ... 2 0 1 1 0 0 0 0 0 0 1]\nRole_vec_idx[34], Val_match_idx[124]:\t[3 3 0 0 1 2 2 2 0 1 1 ... 3 1 2 0 1 0 2 1 0 0 1]\nRole_vec_idx[35], Val_match_idx[194]:\t[0 2 1 2 3 1 0 0 1 2 1 ... 1 0 0 0 0 4 1 0 2 0 0]\nRole_vec_idx[36], Val_match_idx[162]:\t[0 0 3 1 2 0 1 0 0 1 1 ... 1 3 0 1 1 0 1 2 0 1 1]\nRole_vec_idx[37], Val_match_idx[236]:\t[3 1 0 3 3 2 1 2 1 0 0 ... 1 1 3 2 2 3 5 0 2 2 0]\nRole_vec_idx[38], Val_match_idx[26]:\t[1 2 0 1 1 1 2 1 2 1 1 ... 1 0 0 1 0 1 0 1 1 1 0]\nRole_vec_idx[39], Val_match_idx[70]:\t[2 3 3 0 0 1 0 0 3 1 3 ... 0 2 1 3 0 1 0 1 0 0 1]\nRole_vec_idx[40], Val_match_idx[51]:\t[1 1 0 1 2 0 1 2 1 1 1 ... 1 0 1 2 0 1 2 0 0 0 1]\nRole_vec_idx[41], Val_match_idx[189]:\t[0 1 1 0 1 4 2 3 2 0 1 ... 1 1 0 1 1 1 1 1 0 1 1]\nRole_vec_idx[42], Val_match_idx[54]:\t[2 0 0 1 1 1 0 1 3 1 0 ... 1 0 0 0 0 2 1 1 0 3 0]\nRole_vec_idx[43], Val_match_idx[79]:\t[1 2 2 2 0 3 2 1 1 1 0 ... 1 2 1 1 2 2 0 2 0 0 1]\nRole_vec_idx[44], Val_match_idx[61]:\t[2 2 4 1 2 0 3 0 2 1 1 ... 0 0 2 1 1 1 4 2 0 1 2]\nRole_vec_idx[45], Val_match_idx[25]:\t[1 0 0 1 1 2 1 0 0 0 2 ... 0 1 0 0 3 0 2 1 3 2 1]\nRole_vec_idx[46], Val_match_idx[46]:\t[1 1 1 1 1 0 0 3 1 1 0 ... 2 0 1 3 2 2 1 0 1 2 0]\nRole_vec_idx[47], Val_match_idx[51]:\t[1 1 1 2 0 1 1 0 2 1 1 ... 1 1 2 3 2 0 1 1 1 0 2]\nRole_vec_idx[48], Val_match_idx[88]:\t[1 1 1 1 1 1 0 4 0 1 1 ... 0 1 2 0 0 0 4 2 1 0 0]\nRole_vec_idx[49], Val_match_idx[116]:\t[1 0 1 1 1 1 0 0 0 0 2 ... 2 0 0 0 1 2 0 0 0 2 1]\nRole_vec_idx[50], Val_match_idx[45]:\t[1 0 1 0 0 1 2 0 2 2 2 ... 1 0 0 1 1 1 1 2 1 0 0]\nRole_vec_idx[51], Val_match_idx[14]:\t[2 1 1 2 1 3 0 3 1 1 1 ... 1 0 1 2 1 0 0 0 1 1 3]\nRole_vec_idx[52], Val_match_idx[15]:\t[0 0 0 1 0 3 0 0 0 2 1 ... 1 2 2 1 0 0 1 1 1 1 1]\nRole_vec_idx[53], Val_match_idx[64]:\t[0 1 1 2 0 1 0 0 0 3 2 ... 0 1 0 1 1 0 2 1 3 0 1]\nRole_vec_idx[54], Val_match_idx[138]:\t[1 1 1 2 1 2 1 1 0 2 1 ... 0 1 0 2 0 0 0 3 1 2 0]\nRole_vec_idx[55], Val_match_idx[227]:\t[1 1 2 0 0 0 2 2 1 1 0 ... 0 1 1 0 0 0 2 1 0 1 1]\nRole_vec_idx[56], Val_match_idx[192]:\t[1 1 2 0 1 1 1 0 0 1 0 ... 0 2 1 2 0 2 0 2 4 1 1]\nRole_vec_idx[57], Val_match_idx[142]:\t[1 2 0 3 0 0 1 1 0 0 1 ... 1 1 1 1 3 1 1 1 0 2 2]\nRole_vec_idx[58], Val_match_idx[189]:\t[2 0 1 2 1 1 1 1 1 2 0 ... 3 0 3 0 0 0 1 0 2 1 1]\nRole_vec_idx[59], Val_match_idx[06]:\t[2 2 0 0 0 0 4 1 1 2 1 ... 2 0 2 1 1 1 1 1 0 4 1]\nRole_vec_idx[60], Val_match_idx[37]:\t[0 0 0 1 0 1 0 1 3 0 1 ... 2 2 1 1 0 0 1 1 0 1 0]\nRole_vec_idx[61], Val_match_idx[26]:\t[2 1 0 0 2 1 1 1 0 3 0 ... 2 2 2 2 1 0 1 3 0 1 1]\nRole_vec_idx[62], Val_match_idx[06]:\t[0 0 1 1 0 1 4 1 0 0 0 ... 0 2 1 2 0 1 2 1 0 2 0]\nRole_vec_idx[63], Val_match_idx[18]:\t[2 0 2 1 1 2 2 0 1 1 3 ... 2 1 1 1 0 0 1 1 2 1 1]\nRole_vec_idx[64], Val_match_idx[118]:\t[1 0 1 2 1 1 0 0 1 0 1 ... 0 1 0 1 2 1 2 1 2 2 3]\nRole_vec_idx[65], Val_match_idx[65]:\t[1 2 2 0 1 2 1 2 1 1 2 ... 4 1 0 2 1 0 0 0 0 1 3]\nRole_vec_idx[66], Val_match_idx[25]:\t[3 1 2 1 1 1 0 0 1 1 0 ... 0 3 0 0 1 1 1 0 2 0 0]\nRole_vec_idx[67], Val_match_idx[48]:\t[3 1 1 0 1 0 1 1 1 1 0 ... 1 1 0 1 2 3 1 1 0 1 1]\nRole_vec_idx[68], Val_match_idx[34]:\t[1 2 3 0 1 2 2 1 0 2 2 ... 0 0 1 2 0 0 1 0 0 3 2]\nRole_vec_idx[69], Val_match_idx[67]:\t[0 1 0 3 1 0 0 0 0 1 0 ... 0 1 1 0 2 1 2 1 1 1 0]\nRole_vec_idx[70], Val_match_idx[16]:\t[2 1 1 1 0 0 1 0 1 2 2 ... 1 3 1 1 1 1 1 3 1 3 2]\nRole_vec_idx[71], Val_match_idx[24]:\t[1 2 1 1 1 1 2 2 1 1 1 ... 0 0 0 1 0 0 0 1 1 0 1]\nRole_vec_idx[72], Val_match_idx[32]:\t[2 3 0 0 0 2 0 0 2 3 0 ... 2 0 1 1 1 1 0 1 1 1 0]\nRole_vec_idx[73], Val_match_idx[01]:\t[0 4 2 3 1 1 0 3 0 2 1 ... 0 1 0 1 1 1 4 2 1 0 0]\nRole_vec_idx[74], Val_match_idx[16]:\t[1 0 1 2 0 0 0 2 1 1 0 ... 0 3 0 1 0 1 0 0 1 1 1]\nRole_vec_idx[75], Val_match_idx[54]:\t[1 2 0 3 2 1 3 0 0 0 2 ... 0 0 2 1 2 1 1 4 0 0 0]\nRole_vec_idx[76], Val_match_idx[41]:\t[1 2 0 0 0 2 1 1 0 1 1 ... 0 0 2 0 4 2 1 2 1 1 1]\nRole_vec_idx[77], Val_match_idx[36]:\t[0 1 0 1 2 2 2 0 1 0 0 ... 1 3 2 0 0 1 0 1 1 1 0]\nRole_vec_idx[78], Val_match_idx[82]:\t[2 2 1 1 1 0 2 0 1 2 3 ... 0 3 0 3 0 1 0 2 1 1 1]\nRole_vec_idx[79], Val_match_idx[160]:\t[0 0 3 1 0 1 0 0 1 3 0 ... 1 2 2 0 0 0 1 1 1 2 0]\nRole_vec_idx[80], Val_match_idx[11]:\t[1 2 1 2 0 3 2 0 2 1 1 ... 3 0 1 1 0 1 1 0 0 2 0]\nRole_vec_idx[81], Val_match_idx[18]:\t[2 1 0 0 0 0 1 2 1 1 1 ... 2 3 1 1 0 2 3 1 1 1 2]\nRole_vec_idx[82], Val_match_idx[42]:\t[0 0 1 1 0 0 0 1 1 4 0 ... 1 1 1 0 1 1 0 2 0 3 1]\nRole_vec_idx[83], Val_match_idx[235]:\t[0 1 1 0 1 2 2 0 1 3 1 ... 0 0 0 0 0 3 0 1 2 3 2]\nRole_vec_idx[84], Val_match_idx[43]:\t[3 1 0 1 1 0 0 2 2 2 0 ... 2 1 1 2 2 3 0 0 0 2 1]\nRole_vec_idx[85], Val_match_idx[12]:\t[1 1 1 1 1 1 0 1 2 0 0 ... 3 1 1 1 3 0 1 0 2 0 1]\nRole_vec_idx[86], Val_match_idx[107]:\t[0 1 0 0 0 1 3 1 2 1 1 ... 1 0 0 1 0 1 0 1 0 1 1]\nRole_vec_idx[87], Val_match_idx[155]:\t[1 1 0 4 4 1 2 2 0 1 1 ... 0 0 2 1 1 2 3 1 1 3 2]\nRole_vec_idx[88], Val_match_idx[110]:\t[1 1 2 1 2 1 1 2 0 0 2 ... 1 2 1 0 1 3 2 1 1 2 1]\nRole_vec_idx[89], Val_match_idx[194]:\t[0 1 1 0 1 0 2 1 2 1 1 ... 2 2 0 1 4 1 1 0 2 0 0]\nRole_vec_idx[90], Val_match_idx[246]:\t[0 1 0 0 1 0 0 0 1 0 2 ... 1 3 2 1 0 1 1 5 1 0 2]\nRole_vec_idx[91], Val_match_idx[14]:\t[0 2 0 2 0 1 1 3 1 2 0 ... 2 2 1 0 0 2 0 1 2 0 0]\nRole_vec_idx[92], Val_match_idx[109]:\t[1 0 0 1 0 2 2 1 1 0 1 ... 0 0 1 1 2 1 1 0 1 0 2]\nRole_vec_idx[93], Val_match_idx[222]:\t[2 0 1 1 0 1 1 1 0 0 1 ... 2 0 1 0 0 1 0 0 1 1 1]\nRole_vec_idx[94], Val_match_idx[88]:\t[1 0 1 1 1 1 1 1 1 0 0 ... 2 0 0 1 0 0 0 0 2 0 0]\nRole_vec_idx[95], Val_match_idx[172]:\t[3 2 0 3 1 3 1 0 1 0 0 ... 0 3 0 2 0 2 3 1 2 1 0]\nRole_vec_idx[96], Val_match_idx[37]:\t[1 3 2 0 1 0 2 2 1 1 0 ... 0 1 2 0 1 0 0 1 1 1 0]\nRole_vec_idx[97], Val_match_idx[24]:\t[0 0 1 0 0 0 0 3 0 2 2 ... 2 1 0 0 3 0 1 2 3 3 1]\nRole_vec_idx[98], Val_match_idx[149]:\t[0 0 2 0 1 0 0 2 2 2 3 ... 1 2 3 0 0 1 2 0 1 2 0]\nRole_vec_idx[99], Val_match_idx[86]:\t[1 1 1 2 1 0 1 1 0 0 2 ... 2 0 0 1 0 2 0 1 1 1 3]\nRole_vec_idx[100], Val_match_idx[211]:\t[0 1 0 1 0 1 0 1 0 0 1 ... 1 5 0 1 0 3 0 3 1 1 2]\nRole_vec_idx[101], Val_match_idx[86]:\t[2 0 1 0 0 0 2 1 0 1 0 ... 0 1 2 0 4 0 2 1 2 1 3]\nRole_vec_idx[102], Val_match_idx[33]:\t[1 2 2 1 2 0 2 0 1 2 0 ... 2 2 0 0 1 0 0 3 0 3 2]\nRole_vec_idx[103], Val_match_idx[181]:\t[4 2 1 0 1 1 0 0 2 0 0 ... 3 2 0 0 1 1 0 1 2 0 0]\nRole_vec_idx[104], Val_match_idx[59]:\t[1 1 2 2 1 0 0 2 2 0 1 ... 1 0 1 2 1 1 1 1 0 0 1]\nRole_vec_idx[105], Val_match_idx[34]:\t[0 3 0 1 2 1 1 2 1 2 2 ... 0 0 1 0 0 1 0 0 2 1 0]\nRole_vec_idx[106], Val_match_idx[39]:\t[1 0 0 0 1 1 1 0 1 0 0 ... 0 0 2 1 1 0 0 0 1 1 0]\nRole_vec_idx[107], Val_match_idx[89]:\t[1 0 0 0 2 1 1 1 1 2 1 ... 1 0 1 1 1 0 1 1 0 0 2]\nRole_vec_idx[108], Val_match_idx[122]:\t[0 1 1 0 1 1 0 2 2 0 1 ... 1 0 0 1 0 2 1 0 1 0 2]\nRole_vec_idx[109], Val_match_idx[119]:\t[0 1 0 1 0 1 0 1 1 1 2 ... 2 1 0 3 0 0 0 2 1 1 0]\nRole_vec_idx[110], Val_match_idx[65]:\t[0 2 1 2 3 2 3 2 2 0 1 ... 1 3 1 2 1 1 2 0 0 0 0]\nRole_vec_idx[111], Val_match_idx[100]:\t[1 2 0 1 4 1 0 3 0 1 3 ... 2 1 1 1 0 1 0 2 1 3 2]\nRole_vec_idx[112], Val_match_idx[102]:\t[1 1 0 0 0 0 0 0 0 0 2 ... 1 1 0 0 1 0 2 0 2 0 0]\nRole_vec_idx[113], Val_match_idx[04]:\t[1 0 0 1 4 1 1 1 1 1 0 ... 0 1 0 0 2 0 3 1 2 0 1]\nRole_vec_idx[114], Val_match_idx[235]:\t[2 1 2 0 0 0 2 3 1 0 2 ... 1 3 1 3 0 1 2 0 1 0 1]\nRole_vec_idx[115], Val_match_idx[02]:\t[0 0 5 2 0 1 1 3 0 0 0 ... 0 0 2 1 1 1 1 2 1 0 3]\nRole_vec_idx[116], Val_match_idx[15]:\t[0 0 1 1 1 0 2 0 2 2 3 ... 1 1 1 0 0 1 0 1 2 1 1]\nRole_vec_idx[117], Val_match_idx[37]:\t[0 0 3 3 0 1 2 3 2 1 2 ... 0 1 0 0 1 0 0 3 0 1 2]\nRole_vec_idx[118], Val_match_idx[194]:\t[1 2 1 0 1 2 0 1 2 1 1 ... 2 0 1 2 0 0 0 2 0 1 1]\nRole_vec_idx[119], Val_match_idx[58]:\t[1 1 2 1 1 0 2 2 1 0 3 ... 2 1 1 0 4 0 0 1 1 3 1]\nRole_vec_idx[120], Val_match_idx[39]:\t[1 0 1 0 0 2 1 1 0 1 1 ... 0 1 0 4 0 0 2 3 1 0 0]\nRole_vec_idx[121], Val_match_idx[00]:\t[4 0 1 4 1 1 0 1 2 2 1 ... 4 1 1 1 0 1 1 0 2 0 4]\nRole_vec_idx[122], Val_match_idx[01]:\t[1 5 1 0 2 1 3 2 1 1 1 ... 1 2 4 0 1 3 0 0 0 1 0]\nRole_vec_idx[123], Val_match_idx[67]:\t[0 2 1 1 2 1 0 0 0 1 1 ... 1 0 0 1 0 3 0 1 1 0 2]\nRole_vec_idx[124], Val_match_idx[128]:\t[0 0 2 0 0 2 0 3 0 2 1 ... 0 1 0 0 3 1 0 1 1 1 2]\nRole_vec_idx[125], Val_match_idx[21]:\t[1 0 0 0 1 1 0 0 1 0 0 ... 1 0 1 0 0 2 1 1 1 2 1]\nRole_vec_idx[126], Val_match_idx[57]:\t[0 1 0 1 0 3 1 1 0 2 0 ... 1 1 1 1 1 4 2 1 0 1 0]\nRole_vec_idx[127], Val_match_idx[73]:\t[2 2 0 0 0 0 2 3 0 0 1 ... 0 1 3 1 1 1 0 0 0 2 2]\nRole_vec_idx[128], Val_match_idx[06]:\t[1 0 0 0 1 0 4 0 0 1 2 ... 1 0 2 2 2 2 3 1 1 1 0]\nRole_vec_idx[129], Val_match_idx[168]:\t[1 1 3 0 1 0 1 1 2 1 2 ... 0 0 0 0 0 1 2 1 1 1 0]\nRole_vec_idx[130], Val_match_idx[55]:\t[0 3 1 1 0 2 1 0 0 3 1 ... 0 1 0 2 0 0 0 2 0 1 0]\nRole_vec_idx[131], Val_match_idx[40]:\t[0 0 0 0 1 2 1 0 0 2 0 ... 1 1 2 1 2 3 0 2 0 2 0]\nRole_vec_idx[132], Val_match_idx[23]:\t[1 0 0 1 0 1 2 1 2 1 0 ... 0 1 4 0 0 1 1 1 1 1 0]\nRole_vec_idx[133], Val_match_idx[108]:\t[1 2 1 1 2 1 2 2 0 1 2 ... 1 2 1 0 0 1 0 1 0 1 2]\nRole_vec_idx[134], Val_match_idx[04]:\t[0 2 1 2 5 0 2 0 1 2 1 ... 1 1 1 1 1 0 0 2 0 0 1]\nRole_vec_idx[135], Val_match_idx[181]:\t[1 3 1 1 1 1 0 2 1 2 1 ... 2 0 1 0 0 1 0 0 0 2 0]\nRole_vec_idx[136], Val_match_idx[111]:\t[0 0 0 2 0 2 2 1 2 0 2 ... 0 2 2 1 1 2 0 0 0 0 1]\nRole_vec_idx[137], Val_match_idx[246]:\t[1 3 0 0 0 2 0 1 0 1 2 ... 1 0 1 1 3 0 1 7 2 0 1]\nRole_vec_idx[138], Val_match_idx[57]:\t[1 0 1 2 2 0 0 1 1 3 1 ... 0 2 0 1 2 0 2 1 0 1 1]\nRole_vec_idx[139], Val_match_idx[79]:\t[2 0 2 3 1 1 0 3 1 1 0 ... 2 1 0 0 1 1 0 0 1 0 1]\nRole_vec_idx[140], Val_match_idx[79]:\t[0 0 0 1 0 2 2 2 4 0 1 ... 1 1 0 2 0 0 1 2 1 1 0]\nRole_vec_idx[141], Val_match_idx[185]:\t[1 1 1 0 2 0 0 1 0 1 2 ... 0 1 0 0 0 3 1 0 0 1 0]\nRole_vec_idx[142], Val_match_idx[74]:\t[1 1 1 1 1 0 1 2 0 2 1 ... 1 1 1 0 0 0 0 0 1 1 2]\nRole_vec_idx[143], Val_match_idx[72]:\t[1 1 2 4 0 1 4 1 2 1 1 ... 1 3 1 3 1 1 0 1 2 1 1]\nRole_vec_idx[144], Val_match_idx[25]:\t[0 2 2 1 0 0 2 0 0 1 1 ... 0 1 1 0 1 1 2 2 1 0 1]\nRole_vec_idx[145], Val_match_idx[199]:\t[1 1 0 1 1 2 1 0 0 1 0 ... 3 1 0 0 0 0 0 1 0 1 0]\nRole_vec_idx[146], Val_match_idx[248]:\t[1 2 0 1 0 0 3 1 0 1 2 ... 1 2 1 0 2 2 1 2 0 5 4]\nRole_vec_idx[147], Val_match_idx[59]:\t[0 2 1 0 1 0 2 1 0 1 0 ... 1 1 1 2 3 2 2 1 2 0 0]\nRole_vec_idx[148], Val_match_idx[104]:\t[3 3 2 1 2 0 0 2 1 0 0 ... 0 2 3 2 2 0 0 0 3 2 1]\nRole_vec_idx[149], Val_match_idx[204]:\t[3 1 0 1 0 2 1 0 0 0 0 ... 1 0 1 1 0 1 0 0 0 0 2]\nRole_vec_idx[150], Val_match_idx[156]:\t[0 1 3 0 0 2 0 2 2 3 0 ... 2 1 0 1 3 2 0 0 0 0 0]\nRole_vec_idx[151], Val_match_idx[85]:\t[1 0 1 0 1 0 1 1 1 0 1 ... 0 0 0 2 0 1 1 1 3 0 3]\nRole_vec_idx[152], Val_match_idx[110]:\t[4 1 1 1 0 0 0 2 0 0 2 ... 3 1 0 1 0 0 0 0 2 0 0]\nRole_vec_idx[153], Val_match_idx[84]:\t[0 0 1 3 1 1 1 0 1 0 0 ... 1 1 2 1 2 0 0 1 1 1 2]\nRole_vec_idx[154], Val_match_idx[108]:\t[1 0 2 0 1 0 1 0 2 3 0 ... 1 3 3 3 1 2 0 1 2 3 1]\nRole_vec_idx[155], Val_match_idx[11]:\t[3 2 0 0 0 1 0 1 0 0 0 ... 2 0 0 1 1 1 2 1 1 1 0]\nRole_vec_idx[156], Val_match_idx[232]:\t[1 0 1 1 0 1 1 0 4 1 1 ... 1 2 1 1 0 1 1 3 1 0 0]\nRole_vec_idx[157], Val_match_idx[245]:\t[2 1 2 1 2 1 2 1 0 0 1 ... 0 3 3 0 1 2 5 2 1 0 1]\nRole_vec_idx[158], Val_match_idx[15]:\t[2 0 0 0 1 2 0 3 1 0 1 ... 0 1 0 1 0 1 0 1 1 4 2]\nRole_vec_idx[159], Val_match_idx[63]:\t[1 1 0 2 1 1 0 1 0 0 2 ... 1 0 1 2 0 1 1 1 1 1 0]\nRole_vec_idx[160], Val_match_idx[109]:\t[2 3 0 0 0 3 2 0 1 1 1 ... 1 2 0 1 1 1 1 3 0 0 0]\nRole_vec_idx[161], Val_match_idx[02]:\t[0 1 3 0 0 1 0 0 2 3 1 ... 1 1 0 1 1 0 2 1 2 1 1]\nRole_vec_idx[162], Val_match_idx[221]:\t[0 3 2 1 2 1 1 1 0 2 1 ... 0 1 3 0 1 2 2 1 0 2 1]\nRole_vec_idx[163], Val_match_idx[66]:\t[1 1 3 1 1 1 1 1 0 1 3 ... 1 0 2 0 1 0 0 0 1 1 3]\nRole_vec_idx[164], Val_match_idx[183]:\t[2 0 1 1 0 0 2 2 0 3 1 ... 0 2 2 2 0 0 2 2 2 1 0]\nRole_vec_idx[165], Val_match_idx[132]:\t[0 1 2 2 0 0 1 1 1 0 1 ... 2 2 2 3 2 1 1 2 0 1 1]\nRole_vec_idx[166], Val_match_idx[18]:\t[2 1 0 2 0 3 0 2 2 1 2 ... 1 4 0 1 0 0 1 1 1 2 3]\nRole_vec_idx[167], Val_match_idx[47]:\t[0 0 2 1 0 1 2 1 0 3 1 ... 0 0 0 0 3 1 1 1 0 0 0]\nRole_vec_idx[168], Val_match_idx[25]:\t[2 1 2 0 0 3 1 4 0 3 1 ... 2 2 0 2 1 1 1 2 1 3 0]\nRole_vec_idx[169], Val_match_idx[11]:\t[0 2 2 1 1 0 0 1 0 1 2 ... 0 0 0 0 0 0 0 2 3 1 1]\nRole_vec_idx[170], Val_match_idx[29]:\t[0 0 1 1 0 3 0 0 0 0 1 ... 1 1 0 1 2 0 1 0 0 0 2]\nRole_vec_idx[171], Val_match_idx[30]:\t[1 2 2 0 1 1 1 0 1 0 1 ... 1 1 0 1 1 1 1 0 1 2 0]\nRole_vec_idx[172], Val_match_idx[14]:\t[1 1 0 0 1 1 0 2 3 2 2 ... 0 1 1 0 1 1 4 1 1 0 2]\nRole_vec_idx[173], Val_match_idx[42]:\t[0 1 0 2 0 0 1 0 2 1 2 ... 0 3 0 0 2 2 2 1 1 3 1]\nRole_vec_idx[174], Val_match_idx[47]:\t[1 0 4 0 2 1 0 2 0 0 1 ... 0 1 1 1 0 0 1 1 1 1 2]\nRole_vec_idx[175], Val_match_idx[86]:\t[3 2 2 1 1 0 0 1 1 1 2 ... 0 0 1 1 0 1 2 1 0 2 2]\nRole_vec_idx[176], Val_match_idx[129]:\t[0 0 0 1 1 0 0 0 1 1 2 ... 0 0 1 0 0 2 0 1 1 1 1]\nRole_vec_idx[177], Val_match_idx[193]:\t[3 1 2 0 1 2 1 0 0 0 1 ... 2 0 1 2 1 0 0 0 1 0 1]\nRole_vec_idx[178], Val_match_idx[16]:\t[0 1 0 1 0 2 0 1 0 1 2 ... 0 0 1 1 0 0 1 0 1 3 1]\nRole_vec_idx[179], Val_match_idx[51]:\t[3 0 0 2 1 2 2 1 1 2 2 ... 2 1 2 1 2 0 0 0 2 0 1]\nRole_vec_idx[180], Val_match_idx[138]:\t[1 2 0 1 2 3 0 3 2 1 1 ... 2 1 1 1 1 0 0 1 1 0 1]\nRole_vec_idx[181], Val_match_idx[28]:\t[0 2 0 0 0 0 0 1 2 1 0 ... 2 1 0 0 0 1 0 0 1 1 1]\nRole_vec_idx[182], Val_match_idx[248]:\t[1 0 0 0 2 2 1 2 2 0 0 ... 0 2 0 2 0 1 1 0 2 5 1]\nRole_vec_idx[183], Val_match_idx[88]:\t[1 1 0 2 0 1 0 0 1 2 0 ... 0 1 0 1 0 2 0 0 0 1 1]\nRole_vec_idx[184], Val_match_idx[35]:\t[0 2 2 0 0 0 3 1 2 0 2 ... 1 0 2 2 2 2 1 0 0 0 3]\nRole_vec_idx[185], Val_match_idx[17]:\t[1 1 2 2 1 0 1 1 1 2 0 ... 0 0 1 2 1 1 0 0 0 1 0]\nRole_vec_idx[186], Val_match_idx[244]:\t[2 1 1 0 2 1 0 0 0 1 0 ... 0 1 1 0 3 5 2 1 1 1 3]\nRole_vec_idx[187], Val_match_idx[152]:\t[2 1 0 1 0 1 0 0 2 2 1 ... 0 1 2 1 3 1 0 1 1 1 3]\nRole_vec_idx[188], Val_match_idx[33]:\t[3 0 2 3 2 0 2 1 1 0 0 ... 1 2 0 2 2 1 3 2 1 2 0]\nRole_vec_idx[189], Val_match_idx[202]:\t[0 0 1 2 0 2 1 0 2 1 1 ... 1 0 2 1 1 0 0 0 1 1 0]\nRole_vec_idx[190], Val_match_idx[50]:\t[4 0 0 2 0 0 1 2 3 0 2 ... 1 0 0 1 2 0 0 1 1 0 2]\nRole_vec_idx[191], Val_match_idx[111]:\t[3 2 4 0 1 1 2 1 1 2 2 ... 1 1 0 1 0 0 0 0 2 1 0]\nRole_vec_idx[192], Val_match_idx[150]:\t[2 0 2 1 0 3 1 0 0 1 2 ... 1 1 0 0 0 0 1 1 1 0 4]\nRole_vec_idx[193], Val_match_idx[223]:\t[0 2 2 1 2 0 1 0 1 0 0 ... 1 1 0 1 0 2 3 3 2 1 4]\nRole_vec_idx[194], Val_match_idx[85]:\t[0 0 0 0 2 0 0 0 1 1 1 ... 1 1 1 2 0 1 4 2 1 0 2]\nRole_vec_idx[195], Val_match_idx[127]:\t[2 2 0 1 0 0 2 0 2 1 2 ... 1 1 1 2 0 0 0 0 0 2 1]\nRole_vec_idx[196], Val_match_idx[47]:\t[1 1 0 0 2 0 0 1 0 0 1 ... 0 0 2 0 2 2 0 2 1 1 2]\nRole_vec_idx[197], Val_match_idx[124]:\t[0 1 4 0 3 0 0 2 1 2 1 ... 0 0 0 0 1 2 1 1 0 0 1]\nRole_vec_idx[198], Val_match_idx[95]:\t[1 2 1 1 2 2 2 2 0 3 1 ... 0 1 2 3 0 1 1 1 1 0 1]\nRole_vec_idx[199], Val_match_idx[22]:\t[0 2 0 1 0 1 0 0 0 0 1 ... 1 1 2 3 1 0 2 0 1 2 1]\nRole_vec_idx[200], Val_match_idx[59]:\t[2 0 1 0 3 2 1 0 0 1 0 ... 1 0 4 0 0 1 1 1 2 1 1]\nRole_vec_idx[201], Val_match_idx[14]:\t[1 2 1 1 2 1 0 0 1 1 0 ... 4 0 0 1 0 0 1 0 1 0 0]\nRole_vec_idx[202], Val_match_idx[138]:\t[0 0 2 1 0 2 2 1 0 2 4 ... 1 1 2 2 1 1 0 2 0 2 1]\nRole_vec_idx[203], Val_match_idx[71]:\t[1 1 1 0 0 1 0 1 0 1 1 ... 0 0 1 0 1 0 0 0 0 0 0]\nRole_vec_idx[204], Val_match_idx[66]:\t[0 0 2 3 0 2 1 0 0 2 2 ... 0 2 2 1 0 2 0 0 2 0 2]\nRole_vec_idx[205], Val_match_idx[121]:\t[2 0 2 0 1 1 3 1 0 0 1 ... 3 1 3 1 0 0 2 1 1 2 3]\nRole_vec_idx[206], Val_match_idx[04]:\t[2 0 1 1 4 2 0 1 1 0 0 ... 1 2 0 0 0 1 1 1 1 0 0]\nRole_vec_idx[207], Val_match_idx[128]:\t[0 4 1 2 1 0 1 1 0 1 0 ... 3 0 0 1 1 3 1 1 0 1 1]\nRole_vec_idx[208], Val_match_idx[128]:\t[2 2 0 2 1 1 2 2 0 1 2 ... 1 1 1 1 0 1 1 2 0 1 1]\nRole_vec_idx[209], Val_match_idx[110]:\t[1 3 1 1 0 0 0 2 0 1 1 ... 0 2 0 1 0 1 1 0 2 3 1]\nRole_vec_idx[210], Val_match_idx[51]:\t[0 0 0 0 1 0 2 0 1 0 2 ... 1 1 2 2 1 2 1 1 0 0 1]\nRole_vec_idx[211], Val_match_idx[61]:\t[0 1 3 0 0 2 1 0 1 0 2 ... 0 1 0 1 3 0 0 4 2 0 0]\nRole_vec_idx[212], Val_match_idx[146]:\t[1 2 0 4 0 0 0 1 1 1 1 ... 0 1 0 0 0 3 1 1 0 0 2]\nRole_vec_idx[213], Val_match_idx[104]:\t[3 1 0 1 2 0 1 2 1 2 0 ... 0 3 1 1 0 0 0 1 0 0 1]\nRole_vec_idx[214], Val_match_idx[04]:\t[0 0 1 0 4 0 0 0 0 1 3 ... 0 0 0 0 1 1 0 1 0 1 0]\nRole_vec_idx[215], Val_match_idx[98]:\t[2 3 0 1 1 2 2 1 2 0 1 ... 1 0 1 2 1 1 1 3 2 0 0]\nRole_vec_idx[216], Val_match_idx[18]:\t[0 1 1 3 0 1 0 1 1 2 2 ... 2 1 2 0 2 0 1 0 1 1 2]\nRole_vec_idx[217], Val_match_idx[152]:\t[0 3 1 1 1 1 1 2 2 2 2 ... 2 0 2 3 2 1 0 0 0 0 1]\nRole_vec_idx[218], Val_match_idx[08]:\t[0 0 0 2 1 3 0 0 5 1 1 ... 1 2 1 1 0 0 2 3 0 3 2]\nRole_vec_idx[219], Val_match_idx[27]:\t[2 0 0 1 1 2 1 1 1 2 1 ... 2 1 1 2 3 3 1 1 0 1 0]\nRole_vec_idx[220], Val_match_idx[14]:\t[1 1 0 2 0 1 1 0 2 2 1 ... 1 1 1 3 0 4 1 0 0 1 1]\nRole_vec_idx[221], Val_match_idx[190]:\t[1 1 1 0 1 1 0 2 0 2 4 ... 1 0 1 1 0 2 0 0 2 1 1]\nRole_vec_idx[222], Val_match_idx[92]:\t[1 0 1 1 1 0 1 2 3 3 2 ... 3 0 0 1 0 0 0 1 0 1 1]\nRole_vec_idx[223], Val_match_idx[07]:\t[0 2 1 1 2 1 0 4 1 0 1 ... 0 1 1 1 0 1 1 3 1 1 2]\nRole_vec_idx[224], Val_match_idx[153]:\t[1 3 0 0 3 0 2 0 0 1 1 ... 1 1 2 1 3 2 2 2 2 4 0]\nRole_vec_idx[225], Val_match_idx[120]:\t[1 1 2 1 2 0 0 0 1 2 1 ... 2 1 3 3 2 1 1 0 1 0 1]\nRole_vec_idx[226], Val_match_idx[223]:\t[2 1 0 0 0 1 1 3 1 1 1 ... 1 1 0 1 0 0 1 2 2 1 2]\nRole_vec_idx[227], Val_match_idx[116]:\t[1 2 2 3 1 1 1 0 0 2 0 ... 0 0 1 1 1 0 2 0 1 2 1]\nRole_vec_idx[228], Val_match_idx[00]:\t[4 1 3 0 0 1 0 0 1 1 0 ... 2 2 0 2 1 0 0 1 0 1 1]\nRole_vec_idx[229], Val_match_idx[67]:\t[2 1 1 0 3 2 1 0 0 0 0 ... 2 1 4 2 3 1 0 0 1 1 1]\nRole_vec_idx[230], Val_match_idx[24]:\t[1 1 2 0 1 0 0 0 2 1 1 ... 1 2 3 1 1 3 0 1 1 1 0]\nRole_vec_idx[231], Val_match_idx[108]:\t[2 0 1 3 2 1 0 0 2 0 1 ... 0 0 0 1 3 0 0 1 0 0 0]\nRole_vec_idx[232], Val_match_idx[246]:\t[0 1 0 0 1 1 1 1 2 1 1 ... 0 3 1 1 0 1 0 4 2 1 0]\nRole_vec_idx[233], Val_match_idx[07]:\t[0 1 2 4 2 1 1 5 0 1 2 ... 0 0 0 1 1 0 1 1 0 0 0]\nRole_vec_idx[234], Val_match_idx[15]:\t[0 0 1 0 1 1 0 1 1 1 0 ... 1 1 2 0 0 1 0 1 0 0 1]\nRole_vec_idx[235], Val_match_idx[08]:\t[0 1 2 0 0 3 3 1 4 1 1 ... 0 2 3 2 2 2 2 3 2 3 1]\nRole_vec_idx[236], Val_match_idx[231]:\t[2 0 2 1 0 2 1 2 0 2 1 ... 0 0 1 2 0 0 2 1 1 1 0]\nRole_vec_idx[237], Val_match_idx[140]:\t[0 2 2 2 1 2 0 1 3 0 0 ... 0 0 1 0 2 0 0 4 2 0 1]\nRole_vec_idx[238], Val_match_idx[08]:\t[0 3 2 0 2 2 1 3 4 3 0 ... 1 0 0 2 0 1 2 1 1 1 2]\nRole_vec_idx[239], Val_match_idx[180]:\t[3 0 1 5 2 1 0 1 0 1 0 ... 1 0 1 1 3 1 4 1 0 2 1]\nRole_vec_idx[240], Val_match_idx[224]:\t[2 0 0 3 1 2 2 1 0 1 1 ... 0 0 0 0 2 2 0 0 2 1 1]\nRole_vec_idx[241], Val_match_idx[121]:\t[1 1 1 1 1 1 1 1 0 2 1 ... 1 0 2 0 5 1 1 1 0 1 3]\nRole_vec_idx[242], Val_match_idx[01]:\t[1 5 3 2 0 1 3 0 1 0 2 ... 0 1 1 0 0 2 0 2 2 0 0]\nRole_vec_idx[243], Val_match_idx[20]:\t[1 0 0 0 0 1 0 1 0 0 0 ... 0 0 0 2 2 0 0 1 0 2 2]\nRole_vec_idx[244], Val_match_idx[68]:\t[2 1 3 0 1 2 1 0 1 0 0 ... 1 1 0 1 0 3 2 3 2 1 1]\nRole_vec_idx[245], Val_match_idx[21]:\t[1 1 2 1 1 3 0 1 0 2 1 ... 0 4 1 1 2 2 1 0 0 1 0]\nRole_vec_idx[246], Val_match_idx[52]:\t[0 1 0 1 0 0 2 1 1 1 2 ... 0 3 1 0 1 1 0 1 0 4 0]\nRole_vec_idx[247], Val_match_idx[03]:\t[1 3 1 4 0 2 0 2 1 2 1 ... 0 0 2 2 0 0 2 2 2 0 1]\nRole_vec_idx[248], Val_match_idx[227]:\t[0 1 1 0 0 1 1 0 2 2 0 ... 4 0 0 1 2 2 1 1 2 1 1]\nRole_vec_idx[249], Val_match_idx[136]:\t[0 0 0 0 2 0 0 2 1 1 1 ... 1 2 2 1 2 1 0 1 2 0 0]\n" ] ], [ [ "# Binding: Brian 2 Code - SIMPLIFIED CIRCUIT ==================", "_____no_output_____" ], [ "![title](img/Fig10.png)", "_____no_output_____" ], [ "Generate the time delay data_matrix from the so that the input vector time delay in each slot plus the delay matrix line up at the number of bits_per_slot per slot (e.g. a time delay in slot 0 of the input vector of say 10 will have a corresponding delay of 90 in the corresponding data_matrix so that if this vector is received then the match condition is an input potential to the neuron at 100)\n\n\n---------------------------------------------------------------------------------------------------------------\n\nThis section of the code implements the role/filler binding in the Brian2 network (net1)", "_____no_output_____" ] ], [ [ "net1=Network()\n\n#We first create an array of time delays which will be used to select the first Num_bound vectors from \n# the P_matrix with a time delay (input_delay) between each vector.\n\n\n#Calculate the array for the input spike generator\narray1 = np.ones(mem_size) * slots_per_vector * bits_per_slot\n\n# The input spike generator creates pairs of spkies corresponding to contiguous pairs of vectors from the memory that are\n# going to be bound together (i.e., vector_0 & vector_1 then vector_2 and Vector_3 etc.)\n\nfor b in range(0,2*Num_bound,2):\n array1[b] = (b)*input_delay\n array1[b+1] = (b)*input_delay\n\n# Create the corresponding spike generator group.\nP = SpikeGeneratorGroup(mem_size,np.arange(mem_size), (array1)*ms)\n\nnet1.add(P)\n\n#We now define the set of equation and reset definitions that will be used to generate the neuron action\n#potentials and spike reset operations. Note that we make use of the Brian2 refractory operation.\n\nequ1 ='''\ndv/dt = (I)/tau : 1 \nI : 1\ntau : second\n'''\n\nequ2 = '''\ndv/dt = -v/tau : 1 (unless refractory)\ntau : second\nts : second\n'''\nreset1 = '''\nI=0.0\nv=0.0\n'''\n\nreset2 = '''\nts=t\nv=0.0\n'''\n\n\n# The G1 neurons perform the addition operation in the two selected vectors. Equ1 is a linearly increasing function \n# with a time constant of 2*bits_per_slot*ms (I=1.0). The G1 neuron group is stimulated from the P spike generator group with\n# spikes that simultaneously select a role and filler vector using the time delay on the G1 dendrites obtained from the P_matrix (S0.delay)\n# On receiving the first spike from either role or filler vector the value of I\n# is changed to 0.0 which holds the neuron potential constant until the second spike is received when I = -1.0 and the neuron\n# potential decays until the threshold value v<0.0 when it fires to give the required modulus addition. The value of I is \n# reset to 1.0 using the spike from the P spikemonitorgroup (S1) and the next two vectors are added.\n\nG1 = NeuronGroup(slots_per_vector, equ1,\n threshold='v < 0.0 or v>=1.0', reset=reset1, method='euler')\n\nG1.v =0.0\nG1.I = 1.0\nG1.tau = 2 * bits_per_slot * ms\n\nnet1.add(G1)\n\n\nS0 = Synapses(P, G1, 'w : 1',on_pre= 'I = (I-1)')\n\nrange_array1 = range(0, slots_per_vector)\nfor n in range(0,mem_size):\n S0.connect(i=n,j=range_array1) \nS0.delay = np.reshape(P_matrix, mem_size * slots_per_vector) * ms\n\nnet1.add(S0)\n\n\nSP1 = Synapses(P, G1, 'w : 1',on_pre= 'I=1.0')\n\nfor n in range(0,mem_size):\n SP1.connect(i=n,j=range_array1) \n\nnet1.add(SP1)\n\n#The G2 neurons select the earliest spike across all of the Num_bound cycles by remembering the time of the previous spike \n#such that if there is no earlier spike from the G1 neurons this spike is regenerated. The refractory property of the \n#neuron is used to supress any later spike in the curret cycle.\n\nG2 = NeuronGroup(slots_per_vector, equ2, threshold='v>=0.5 or (t>=ts-0.01*ms+2*bits_per_slot*ms and t<=ts+0.01*ms+2*bits_per_slot*ms) ', reset=reset2, method='euler', refractory ='t%(2*bits_per_slot*ms)')\n\nG2.v = 0.0\nG2.tau = 1*ms\nG2.ts = 0.0*ms\n\nnet1.add(G2)\n\nS12 = Synapses(G1, G2, 'w : 1', on_pre='v=1.0')\nS12.connect(j='i') \nnet1.add(S12)\n\n\n# Create the required monitors\n\nSMP = SpikeMonitor(P)\nnet1.add(SMP)\nM1 = StateMonitor(G1, 'v', record=True)\nnet1.add(M1)\nSM1= SpikeMonitor(G1)\nnet1.add(SM1)\nM2 = StateMonitor(G2, 'v', record=True)\nnet1.add(M2)\nM2ts = StateMonitor(G2, 'ts', record=True)\nnet1.add(M2ts)\nSM2= SpikeMonitor(G2)\nnet1.add(SM2)\n\n\n# Run Net1 for delta milliseconds\n\nnet1.run(delta*ms)\n\n# Obtain the sparse vector timings from the SM2 monitor and print the timings so that they can be compared with the theoretical values.\narray2 = np.array([SM2.i,SM2.t/ms])\nsub_array2 = array2[0:2, slots_per_vector:]\nprint()\nsorted_sub_array2 = sub_array2[:,sub_array2[0].argsort()].astype(int) \nP1_timing = sorted_sub_array2[1]\nP1_timing = P1_timing[P1_timing >= 2 * (Num_bound-1) * bits_per_slot] - 2 * (Num_bound - 1) * bits_per_slot\nprint(len(P1_timing))\nprint(P1_timing)", "\n100\n[74 61 60 85 39 82 47 72 90 79 17 ... 37 75 33 49 65 53 39 42 69 46 29]\n" ], [ "# The following plots output from the different monitors\nfig, axs = plt.subplots(6,1,figsize=(16,14), gridspec_kw={'height_ratios': [2, 2, 2, 2, 2, 2]})\nsubplot(6,1,1)\nplot(SMP.t/ms, SMP.i,'|')\nxlabel('Time (ms)')\nylabel('P Neuron id')\n\nsubplot(6,1,2)\nplot(M1.t/ms, M1.v[target_neuron].T)\nxlabel('Time (ms)')\nylabel('G1 Neuron Voltage')\n\nsubplot(6,1,3)\nplot(SM1.t/ms, SM1.i,'|')\nxlabel('Time (ms)')\nylabel('G1 Neuron id')\nplt.ylim(y_low,y_high)\n\nsubplot(6,1,4)\nplot(M2.t/ms, M2.v[target_neuron].T)\nxlabel('Time (ms)')\nylabel('G2 Neuron Voltage')\n#plt.xlim(12000,14000)\n\nsubplot(6,1,5)\nplot(M2ts.t/ms, M2ts.ts[target_neuron].T)\nxlabel('Time (ms)')\nylabel('Previous Spike Time')\n#plt.xlim(12000,14000)\n\nsubplot(6,1,6)\nplot(SM2.t/ms, SM2.i,'|')\nxlabel('Time (ms)')\nylabel('G2 Neuron id')\n#plt.ylim(y_low,y_high)\n", "_____no_output_____" ] ], [ [ "# UnBinding: Brian 2 Code =====================================", "_____no_output_____" ], [ "![title](img/Fig9.png)", "_____no_output_____" ] ], [ [ "#--------------------------------------------------------------------------------------------------------\n#This section of the code implements the Brian2 neuromorphic circuit which unbinds the vector. \n#The unbound vector and a selected role vector are processed to give the corresponding 'noisy' filler vector.\n# which is then compared to the memory vectors to find the best match (i.e. the clean-up memory operation)\n\n\n# We first generate the time delay data_matrix which will be used in the 'clean-up memory' so that the input vector \n# time delay in each slot plus the delay matrix line up at the number of bits per slot \n# (e.g. a time delay in slot 0 of the input vector of say 10 will have a corresponding delay of 90 in the corresponding\n# data_matrix so that if this vector is received then the match condition is an input potential to the neuron at 100)\n\ndata_matrix = bits_per_slot - P_matrix\n\nnet2=Network()\n\nprint()\n\n# To pass the sparse vector from Net1 into Net2 we create a SpikeGeneratorGroup that uses the P1_timing from Net1 to generate\n# the sparse bound vector which is the input to NeuronGroup G6 (S6).\nP1 = SpikeGeneratorGroup(slots_per_vector, np.arange(slots_per_vector), P1_timing * ms)\n\nnet2.add(P1)\n\n# We now define the neuron potential equations and resets plus a preset\nequ2 = '''\ndv/dt = -v/tau : 1 \nI : 1\ntau : second\n'''\n\nequ3 ='''\ndv/dt = (I)/tau : 1 \nI : 1\ntau : second\n'''\n\nreset3 = '''\nI=1.0\nv=0.0\n'''\npreset1 = '''\nI = 1.0\nv= 0.0\n'''\n\n# NeuronGroup G7 is a recurrent circuit which simply repeates the sparse bound vector from P1 every 3*bits milliseconds \n# and feeds the output vector into the G6 neurongroup (see S7 below)\n\nG7 = NeuronGroup(slots_per_vector, equ2, threshold='v>=1.0', reset='v=0.0', method='euler')\nG7.v=0.0\nG7.tau = 0.5*ms\n\nSP17 = Synapses(P1, G7, 'w : 1',on_pre= 'v=1.25')\nSP17.connect(j='i')\n#SP17.delay = bits_per_slot*ms\n\n\nS77 = Synapses(G7, G7, 'w : 1',on_pre= 'v=1.25')\nS77.connect(j='i')\n#S77.delay = 3*bits_per_slot*ms\nS77.delay = 2 * bits_per_slot * ms\n\nnet2.add(G7)\nnet2.add(SP17)\nnet2.add(S77)\n\n\n\n#Calculate the array for the input spike generator which cycles through the role vectors 0,2,4 etc\narray2 = np.ones(mem_size) * slots_per_vector * bits_per_slot\nfor b in range(0,Num_bound):\n #array2[b*2] = (b*3)*input_delay\n array2[b*2] = (b*2)*input_delay\n\nP2 = SpikeGeneratorGroup(mem_size,np.arange(mem_size), (array2)*ms)\nnet2.add(P2)", "\n" ] ], [ [ "The G6 neuron group is stimulated from the P spike generator group with and the G7 neuron group. The P spike generator generates a role vector role using the time delay on the G6 dendrites obtained from the P_matrix (S5.delay) and the G6 neuron group produces the sparse bound vector.\n\nThe G6 neurons perform the subtraction operation on the selected vectors. In this case Equ3 is a linearly increasing function with a time constant of bits_per_slot*ms (I=1.0). On receiving the first spike from either role or filler vector the value of I=0.0 which holds the neuron potential constant until the second spike is received when I again becomes 1.0 and the neuron potential continues to increase until the threshold value v>1.0 when it fires. To give the required modulus addition the value of I is maintained at 1.0 to ensure a second vector is generated. One of these two vector will have the correct modulus timings and so we compare both vectors in the final neuron group stage (G8) to get the best match. ", "_____no_output_____" ] ], [ [ "G6 = NeuronGroup(slots_per_vector, equ3, threshold='v>=1.0', reset=reset3, method='euler', refractory ='2*Num_bound*ms')\n\nG6.v =0.0\nG6.I = 1.0\nG6.tau = bits_per_slot * ms\n\nnet2.add(G6)\n\n\n\nS5 = Synapses(P2, G6, 'w : 1',on_pre= 'I = (I-1)%2')\n\nrange_array2 = range(0, slots_per_vector)\nfor n in range(0,mem_size):\n S5.connect(i=n,j=range_array2) \nS5.delay = np.reshape(P_matrix, mem_size * slots_per_vector) * ms\n\nnet2.add(S5)\n\nS6 = Synapses(P2, G6, 'w : 1',on_pre= preset1)\n\nfor n in range(0,mem_size):\n S6.connect(i=n,j=range_array2) \n\nnet2.add(S6)\n\n\nS7 = Synapses(G7, G6, 'w : 1',on_pre= 'I = (I-1)%2')\nS7.connect(j='i')\n\n\n\nnet2.add(S7)\n", "_____no_output_____" ] ], [ [ "This final NeuronGroup,G8, stage is the clean up memory operation using the transpose of the data_matrix to set the synaptic delays on the G8 dendrites. We only produce one output spike per match by using the refractory operator to suppress any further spikes. This could be improved to choose the larget matching spike.", "_____no_output_____" ] ], [ [ "G8 = NeuronGroup(mem_size, equ2, threshold='v >= 13.0', reset='v=0.0', method='euler')\n\nG8.v = 1.0\nG8.tau = 2.0*ms\n\nnet2.add(G8)\n\nrange_array3 = range(0,mem_size)\n\nS8 = Synapses(G6, G8, on_pre='v += 1.0')\n\nfor n in range(0, slots_per_vector):\n S8.connect(i=n,j=range_array3) \n\ndata_matrix2 = np.transpose(data_matrix) \nS8.delay = np.reshape(data_matrix2, mem_size * slots_per_vector) * ms\nnet2.add(S8)\n\n# Create the required monitors\n\nSMP1 = SpikeMonitor(P1)\n\nnet2.add(SMP1)\n\nSM7 = SpikeMonitor(G7)\nnet2.add(SM7)\n\nSMP2 = SpikeMonitor(P2)\n\nnet2.add(SMP2)\n\nM6 = StateMonitor(G6, 'v', record=True)\n\nnet2.add(M6)\n\nSM6 = SpikeMonitor(G6)\n\nnet2.add(SM6)\n\n\nM8 = StateMonitor(G8, 'v', record=True)\n\nnet2.add(M8)\n\nSM8 = SpikeMonitor(G8)\n\nnet2.add(SM8)\n\nnet2.run(((2*Num_bound) * bits_per_slot + 3) * ms)", "_____no_output_____" ], [ "# Plot the sparse bound vector\n\nplot(SMP1.t/ms, SMP1.i,'|')\nxlabel('Time (ms)')\nylabel('P Neuron id')\nshow()\n\nfig, axs = plt.subplots(6,1,figsize=(16,14), gridspec_kw={'height_ratios': [2, 2, 2, 2, 2, 2]})\n\n# Plot the other monitors\nsubplot(6,1,1)\nplot(SM7.t/ms, SM7.i,'|')\nxlabel('Time (ms)')\nylabel('P1 Neuron id')\nplt.ylim(0, slots_per_vector)\n\n\nsubplot(6,1,2)\nplot(SMP2.t/ms, SMP2.i,'|')\nxlabel('Time (ms)')\nylabel('P2 Neuron id')\n#plt.xlim(0,2*bits_per_slot*Num_bound)\n#plt.xlim(bits_per_slot*Num_bound-100,2*bits_per_slot*(Num_bound+1))\n#plt.ylim(y_low,y_high)\n\nsubplot(6,1,3)\nplot(M6.t/ms, M6.v[0].T)\nxlabel('Time (ms)')\nylabel('G6 Neuron Voltage')\n#plt.xlim(0,2*bits_per_slot*Num_bound)\n#plt.xlim(bits_per_slot*Num_bound-100,2*bits_per_slot*(Num_bound+1))\n\nsubplot(6,1,4)\nplot(SM6.t/ms, SM6.i,'|')\nxlabel('Time (ms)')\nylabel('G6 Neuron id')\n\nsubplot(6,1,5)\nplot(M8.t/ms, M8.v.T)\nxlabel('Time (ms)')\nylabel('G8 Neuron Voltage')\n\nsubplot(6,1,6)\nplot(SM8.t/ms, SM8.i,'|')\nxlabel('Time (ms)')\nylabel('G8 Neuron id')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7d298145bf4db3d4fa8c7dc57185c2969b55998
71,825
ipynb
Jupyter Notebook
ipython-notebook/Latest point detection with the Anomaly Detector API.ipynb
Tapasgt/AnomalyDetector
53367091dd239047ce4ee6b20accd93dc476f105
[ "MIT" ]
null
null
null
ipython-notebook/Latest point detection with the Anomaly Detector API.ipynb
Tapasgt/AnomalyDetector
53367091dd239047ce4ee6b20accd93dc476f105
[ "MIT" ]
null
null
null
ipython-notebook/Latest point detection with the Anomaly Detector API.ipynb
Tapasgt/AnomalyDetector
53367091dd239047ce4ee6b20accd93dc476f105
[ "MIT" ]
null
null
null
117.745902
20,455
0.629641
[ [ [ "# Latest point anomaly detection with the Anomaly Detector API", "_____no_output_____" ], [ "### Use this Jupyter notebook to start visualizing anomalies as a batch with the Anomaly Detector API in Python.\n\nWhile you can detect anomalies as a batch, you can also detect the anomaly status of the last data point in the time series. This notebook iteratively sends latest-point anomaly detection requests to the Anomaly Detector API and visualizes the response. The graph created at the end of this notebook will display the following:\n* Anomalies found while in the data set, highlighted.\n* Anomaly detection boundaries \n* Anomalies seen in the data, highlighted.\n\nBy calling the API on your data's latest points, you can monitor your data as it's created. \n\nThe following example simulates using the Anomaly Detector API on streaming data. Sections of the example time series are sent to the API over multiple iterations, and the anomaly status of each section's last data point is saved. The data set used in this example has a pattern that repeats roughly every 7 data points (the `period` in the request's JSON file), so for best results, the data set is sent in groups of 29 points (`4 * <period> + an extra data point`. See [Best practices for using the Anomaly Detector API](https://docs.microsoft.com/azure/cognitive-services/anomaly-detector/concepts/anomaly-detection-best-practices) for more information). ", "_____no_output_____" ] ], [ [ "# To start sending requests to the Anomaly Detector API, paste your subscription key below,\n# and replace the endpoint variable with the endpoint for your region.\nsubscription_key = '' \nendpoint_latest = 'https://westus2.api.cognitive.microsoft.com/anomalydetector/v1.0/timeseries/last/detect'", "_____no_output_____" ], [ "import requests\nimport json\nimport pandas as pd\nimport numpy as np\nfrom __future__ import print_function\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Import library to display results\nimport matplotlib.pyplot as plt\n%matplotlib inline ", "_____no_output_____" ], [ "from bokeh.plotting import figure,output_notebook, show\nfrom bokeh.palettes import Blues4\nfrom bokeh.models import ColumnDataSource,Slider\nimport datetime\nfrom bokeh.io import push_notebook\nfrom dateutil import parser\nfrom ipywidgets import interact, widgets, fixed\noutput_notebook()", "_____no_output_____" ], [ "def detect(endpoint, subscription_key, request_data):\n headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key}\n response = requests.post(endpoint, data=json.dumps(request_data), headers=headers)\n if response.status_code == 200:\n return json.loads(response.content.decode(\"utf-8\"))\n else:\n print(response.status_code)\n raise Exception(response.text)", "_____no_output_____" ], [ "def build_figure(result, sample_data, sensitivity):\n columns = {'expectedValues': result['expectedValues'], 'isAnomaly': result['isAnomaly'], 'isNegativeAnomaly': result['isNegativeAnomaly'],\n 'isPositiveAnomaly': result['isPositiveAnomaly'], 'upperMargins': result['upperMargins'], 'lowerMargins': result['lowerMargins']\n , 'value': [x['value'] for x in sample_data['series']], 'timestamp': [parser.parse(x['timestamp']) for x in sample_data['series']]}\n response = pd.DataFrame(data=columns)\n values = response['value']\n label = response['timestamp']\n anomalies = []\n anomaly_labels = []\n index = 0\n anomaly_indexes = []\n p = figure(x_axis_type='datetime', title=\"Anomaly Detection Result ({0} Sensitivity)\".format(sensitivity), width=800, height=600)\n for anom in response['isAnomaly']:\n if anom == True and (values[index] > response.iloc[index]['expectedValues'] + response.iloc[index]['upperMargins'] or \n values[index] < response.iloc[index]['expectedValues'] - response.iloc[index]['lowerMargins']):\n anomalies.append(values[index])\n anomaly_labels.append(label[index])\n anomaly_indexes.append(index)\n index = index+1\n upperband = response['expectedValues'] + response['upperMargins']\n lowerband = response['expectedValues'] -response['lowerMargins']\n band_x = np.append(label, label[::-1])\n band_y = np.append(lowerband, upperband[::-1])\n boundary = p.patch(band_x, band_y, color=Blues4[2], fill_alpha=0.5, line_width=1, legend='Boundary')\n p.line(label, values, legend='value', color=\"#2222aa\", line_width=1)\n p.line(label, response['expectedValues'], legend='expectedValue', line_width=1, line_dash=\"dotdash\", line_color='olivedrab')\n anom_source = ColumnDataSource(dict(x=anomaly_labels, y=anomalies))\n anoms = p.circle('x', 'y', size=5, color='tomato', source=anom_source)\n p.legend.border_line_width = 1\n p.legend.background_fill_alpha = 0.1\n show(p, notebook_handle=True)", "_____no_output_____" ] ], [ [ "### Detect latest anomaly of sample timeseries", "_____no_output_____" ], [ "The following cells call the Anomaly Detector API with an example time series data set and different sensitivities for anomaly detection. Varying the sensitivity of the Anomaly Detector API can improve how well the response fits your data. ", "_____no_output_____" ] ], [ [ "def detect_anomaly(sensitivity):\n sample_data = json.load(open('sample.json'))\n points = sample_data['series']\n skip_point = 29\n result = {'expectedValues': [None]*len(points), 'upperMargins': [None]*len(points), \n 'lowerMargins': [None]*len(points), 'isNegativeAnomaly': [False]*len(points), \n 'isPositiveAnomaly':[False]*len(points), 'isAnomaly': [False]*len(points)}\n anom_count = 0\n for i in range(skip_point, len(points)+1):\n single_sample_data = {}\n single_sample_data['series'] = points[i-29:i]\n single_sample_data['granularity'] = 'daily'\n single_sample_data['maxAnomalyRatio'] = 0.25\n single_sample_data['sensitivity'] = sensitivity\n single_point = detect(endpoint_latest, subscription_key, single_sample_data)\n if single_point['isAnomaly'] == True:\n anom_count = anom_count + 1\n\n result['expectedValues'][i-1] = single_point['expectedValue']\n result['upperMargins'][i-1] = single_point['upperMargin']\n result['lowerMargins'][i-1] = single_point['lowerMargin']\n result['isNegativeAnomaly'][i-1] = single_point['isNegativeAnomaly']\n result['isPositiveAnomaly'][i-1] = single_point['isPositiveAnomaly']\n result['isAnomaly'][i-1] = single_point['isAnomaly']\n \n build_figure(result, sample_data, sensitivity)", "_____no_output_____" ], [ "# 95 sensitvity\ndetect_anomaly(95)", "_____no_output_____" ], [ "# 85 sensitvity\ndetect_anomaly(85)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
e7d29ee2bd987816103274f53e87eaf77abd3438
2,944
ipynb
Jupyter Notebook
notebooks/01_intro/01-Intro-FTW2019.ipynb
PyDataMallorca/FTW2019_Introduccion_a_data_science_en_Python
3872c1a1174ebb1c8d764f1cc123e370bb8d93f4
[ "MIT" ]
6
2019-06-30T20:22:34.000Z
2019-10-05T13:50:50.000Z
notebooks/01_intro/01-Intro-FTW2019.ipynb
PyDataMallorca/FTW2019_Introduccion_a_data_science_en_Python
3872c1a1174ebb1c8d764f1cc123e370bb8d93f4
[ "MIT" ]
null
null
null
notebooks/01_intro/01-Intro-FTW2019.ipynb
PyDataMallorca/FTW2019_Introduccion_a_data_science_en_Python
3872c1a1174ebb1c8d764f1cc123e370bb8d93f4
[ "MIT" ]
2
2019-06-29T09:52:11.000Z
2021-08-03T09:16:04.000Z
27.773585
184
0.61481
[ [ [ "# Introducción a Data Science en Python\n\n**https://github.com/PyDataMallorca/FTW2019_Introduccion_a_data_science_en_Python**\n\n**¿Dudas?, ¿problemas?** Una persona diferente estará dando la formación de cada uno de los apartados y los demás os ayudaremos de forma personalizada.\n", "_____no_output_____" ], [ "# ¿Qué es la Ciencia de Datos o Data Science?\n\n* Área interdisciplinar relacionada con métodos, procesos y sistemas científicos con los objetivos de comprender y extraer conocimiento de datos.\n\n* Principales áreas: \n * Matemáticas, Estadística.\n * Programación.", "_____no_output_____" ], [ "# Conceptos relacionados\n\n* Machine Learning: se utiliza como sinónimo de Data Science. Conjunto de algoritmos y técnicas que permiten a un programa comprender y extraer conocimiento a partir de datos.\n* Deep Learning: cuando utilizamos redes neuronales muy amplias.\n* Big Data: únicamente cuando tengamos conjuntos de datos muy grandes. Del orden de miles de millones de registros. \n", "_____no_output_____" ], [ "# ¿Y Python qué lugar ocupa en Data Science?\n\n* Conjuntamente con `R` es el lenguaje más utilizado en Data Science actualmente:\n![Python vs R](images/01_01_python_vs_R.png)\n* Mediante Python tenemos cubiertas todas las necesidades: desde la exploración de datos en local hasta la puesta en producción de proyectos de Data Science.\n* Muchísimas herramientas maduras y de código abierto, con una gran comunidad de personas que utilizan y mejoran estas herramientas a diario.\n\nFuente: https://www.kdnuggets.com/2017/08/python-overtakes-r-leader-analytics-data-science.html", "_____no_output_____" ], [ "# ¡Empezamos!\n\n\n![PyData Mallorca logo](images/01_02_PyM_logo.png)\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d2bfd656727bfa7e05e40bdabb30ae60535b5f
219,567
ipynb
Jupyter Notebook
marketlearn/network_spanning_trees/mst_example.ipynb
mrajancsr/QuantEquityManagement
026aa038268295a7d5e32c3d24c491e3c559e22e
[ "Apache-2.0" ]
2
2020-07-22T09:28:31.000Z
2020-08-17T01:19:42.000Z
marketlearn/network_spanning_trees/mst_example.ipynb
mrajancsr/QuantEquityManagement
026aa038268295a7d5e32c3d24c491e3c559e22e
[ "Apache-2.0" ]
null
null
null
marketlearn/network_spanning_trees/mst_example.ipynb
mrajancsr/QuantEquityManagement
026aa038268295a7d5e32c3d24c491e3c559e22e
[ "Apache-2.0" ]
3
2020-08-04T02:48:32.000Z
2020-08-17T01:20:09.000Z
157.508608
85,047
0.756411
[ [ [ "from learning.clustering.mst import MinimumSpanningTrees, Graph, Cluster, PriorityQueue\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "import yfinance as yf\nticker_names = \"\"\"\\\nTSLA SPY MSFT MMM ABBV ABMD ACN ATVI ADBE AMD \nAES AFL APD AKAM ALB ARE ALLE GOOG AAL AMT\nABC AME AMGN APH ADI AMAT APTV BKR BAX BDX\nBIO BA BKNG BWA BXP BSX BMY COG COF KMX\nCBOE CE CNC CF DLTR EFX FIS GD GE GS\n\"\"\"\nstart = \"2019-01-01\"\nend = \"2019-12-31\"\ndf = yf.download(ticker_names, start=start, end=end, progress=False)[\"Adj Close\"]", "_____no_output_____" ] ], [ [ "# A review of 2 decades of correlation, hierarchies, netowrks and clustering\n\n### Aim\nThe aim of this project is to review state of the art clustering algorithms for financial time series and to study their correlation in complicated networks. This will form the basis of an \nopen toolbox to study correlations, hierarchies, networks and clustering in financial markets\n\n### Methodology adopted in Financial markets:\n\n- we compute log returns using the formula: $r_t = log(1 + R_t)$\n\nwhere $1 + R_t = \\frac{P_i(t)}{P_i(t-1)}$\n\n- The sample correlation matrix is given by: $\\Omega = \\frac{1}{n} R^T R $\n \n where $R$ is given by returns matrix minus sample mean return for that stock\n\n- Once we get the correlation matrix, we convert it to distances given by the formula: \n \n $d = \\sqrt{2 (1 - \\Omega)}$\n\n where d is a nxn distance matrix which is symmetric and since $\\Omega$ is symmetric positive definite, $d$ is symmetric positive definite as well. \n\n### Methodology described in Paper:\n\n- Once we compute the distance matrix, we compute the minimum spanning tree using Kruskal's algorithm\n\n### Implementation\n\n- Python 3.6 is used as the primary language \n- libraries used: numpy, yfinance to download prices, heapq, itertools & networkx library for plotting\n- DataStructures and Classes used: \n - A graph class is implemented using adjacency map. \n - A priority queue class is implemented using heapq module to keep track of minimum weight in Kruska's algorithm. This ensures finding the minimum is done in O(1) operation. Addition and removal are worse case O(floor(mlogn)) where m is the number of edges and n is the number of nodes\n - A cluster class is implemented using union find algorithm to merge clusters in same group\n - A MinimumSpanningTree class is implemented with a nested Price class that downloads prices and computes the distances. Calling this library automatically downloads prices from yahoo and computes their distances. A start and end parameter is all is needed to get historical prices of 50 shares\n - Share names were obtained from sp500 index as of Tuesday July 16. \n\n- Redundancy\n - Networkx library was used to create another graph inside mst.draw_graph() function. This is mainly used for plotting and visualization of the network structure of the correlation matrix. \n\n- Analysis\n - When distance is 0, correlation is 1 or -1 i.e perfectly correlation. Higher the distance, less correlation between the share prices \n\n \n", "_____no_output_____" ] ], [ [ "# set parameters here\nstart = '2019-01-01'\nend = '2019-12-31'\nmst = MinimumSpanningTrees(start=start, end=end)\n# create a graph from distance computed from mst\n# share prices are the vertices and edges are the distance from two share prices\ng = mst.create_graph()\n# get the minimum spanning tree from the graph\nmst_tree = mst.mst_kruskal(g)", "_____no_output_____" ], [ "# plot the minimum spanning tree\nplt.figure(figsize=(10,6))\nmst.draw_graph(mst_tree)\nplt.title(\"MST Using Kruskal's Algorithm of 50 shares of SP500\");", "_____no_output_____" ] ], [ [ "# Observation\n- All the shares are strongly correlated with SPY which makes sense since SPY is an index that contains all the other shares\n\n### Note: \n- Following is the edge weights associated with the spanning tree", "_____no_output_____" ] ], [ [ "mst_tree", "_____no_output_____" ], [ "def create_graph():\n from itertools import combinations\n g = Graph()\n input_vertices = range(1, 8)\n vertices = [g.insert_vertex(v) for v in input_vertices]\n g.insert_edge(vertices[0], vertices[1], 28)\n g.insert_edge(vertices[1], vertices[2], 16)\n g.insert_edge(vertices[2], vertices[3], 12)\n g.insert_edge(vertices[3], vertices[4], 22)\n g.insert_edge(vertices[4], vertices[5], 25)\n g.insert_edge(vertices[5], vertices[0], 10)\n g.insert_edge(vertices[3], vertices[6], 18)\n g.insert_edge(vertices[4], vertices[6], 24)\n g.insert_edge(vertices[1], vertices[6], 14)\n return g ", "_____no_output_____" ], [ "g = create_graph()", "_____no_output_____" ], [ "cluster = Cluster()\npq = PriorityQueue()\nposition = {}\nfor v in g.get_vertices():\n position[v] = cluster.make_cluster(v)\nfor e in g.get_edges():\n pq.push(e, e.get_value())\ntree = []\nsize = g.count_vertices()\nwhile len(tree) != size - 1 and not pq.is_empty():\n weight, _, edge = pq.pop()\n u, v = edge.endpoint()\n a = cluster.find(position[u])\n b = cluster.find(position[v])\n if a != b: \n tree.append(edge)\n cluster.union(a, b)", "_____no_output_____" ], [ "position[v]._container ", "_____no_output_____" ], [ "# container points to cluster object", "_____no_output_____" ], [ "position[v]._parent", "_____no_output_____" ], [ "chk = list(g.get_vertices())", "_____no_output_____" ], [ "position[chk[0]]._parent, position[chk[1]]._parent # each parent is a different position", "_____no_output_____" ], [ "position[v]", "_____no_output_____" ], [ "position[v]._parent == position[v]", "_____no_output_____" ], [ "w, _, edge = pq.pop()", "_____no_output_____" ], [ "u,v = edge.endpoint()", "_____no_output_____" ], [ "a = cluster.find(position[u])\nb = cluster.find(position[v])", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "edge", "_____no_output_____" ], [ "a.get_value(), b.get_value()", "_____no_output_____" ], [ "a._parent", "_____no_output_____" ], [ "position[u]", "_____no_output_____" ], [ "a != b", "_____no_output_____" ], [ "cluster.union(a,b)", "_____no_output_____" ], [ "start = '2019-01-01'\nend = '2019-12-31'\nmst = MinimumSpanningTrees(start=start, end=end)", "_____no_output_____" ], [ "mst.draw_graph(tree)", "_____no_output_____" ], [ "type(cluster)", "_____no_output_____" ], [ "id(v)", "_____no_output_____" ], [ "cols = df.columns[:5]", "_____no_output_____" ], [ "data = df.to_dict()", "_____no_output_____" ], [ "data.keys()", "_____no_output_____" ], [ "np.array(((3, 4), (5, 6)))", "_____no_output_____" ], [ "np.vstack([np.fromiter(v[0], dtype=float) for v in chk])", "_____no_output_____" ], [ "chk = iter([(3, 4), (5, 6)])\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d2c362adf1017f299d564a5110085fd50b0d0f
12,835
ipynb
Jupyter Notebook
Session 0 - Installation and Setup.ipynb
justwinata/Py-R
0c027837b97daa737678abebfa9d95d4bedee129
[ "MIT" ]
null
null
null
Session 0 - Installation and Setup.ipynb
justwinata/Py-R
0c027837b97daa737678abebfa9d95d4bedee129
[ "MIT" ]
null
null
null
Session 0 - Installation and Setup.ipynb
justwinata/Py-R
0c027837b97daa737678abebfa9d95d4bedee129
[ "MIT" ]
null
null
null
25.82495
467
0.593222
[ [ [ "# Jupyter Notebook\nThis link has taken you to what is called a Jupyter Notebook. This is an interactive environment capable of integrating plain text, Python code, and R code. The Jupyter notebook will be used to present the material, as it is a powerful tool for demonstration, and having access to the notebooks is also a good source of reference in the future, and using the given link will keep things updated without having to re-download any files.\n\nThe development, on the other hand, will be done in an integrated development environment, or IDE.", "_____no_output_____" ], [ "## Anaconda\n\n![Anaconda](img/00_anaconda.png)\n> [Download Link](https://www.anaconda.com/distribution/#windows)", "_____no_output_____" ], [ "### Installing Anaconda\n\nPlease download the Python 3.7 version of Anaconda for Windows Installer (this link tends to default to the macOS installer -- make sure you click on the Windows icon above the installer links)\n![Windows Installer for 3.7](img/00_download.png)", "_____no_output_____" ], [ "Run the installer using the default settings:", "_____no_output_____" ], [ "Select \"Just Me\" for Users:\n\n![Users](img/00_install1.PNG)", "_____no_output_____" ], [ "Select the default destination folder in AppData/Local:\n![Destination](img/00_install2.PNG)", "_____no_output_____" ], [ "Run the installer using the default settings:", "_____no_output_____" ], [ "Register Anaconda as your default Python 3.7 in Advanced Options:\n![Path](img/00_install3.PNG) ", "_____no_output_____" ], [ "Let the install complete.", "_____no_output_____" ], [ "### Configuring Anaconda\n\nWe will run an isolated environment for Python and R. ", "_____no_output_____" ], [ "Open Anaconda Navigator:\n![Start Menu](img/00_navigator_start.PNG) \nIt might take a while to load. This is normal. \nDismiss the popup, if applicable.", "_____no_output_____" ], [ "Navigate to the Environments tab:\n![Environments](img/00_anaconda_env.PNG)", "_____no_output_____" ], [ "Click \"Create\" in the lower left:\n![Create](img/00_anaconda_create.PNG)", "_____no_output_____" ], [ "Check R and select \"r\" in the drop-down. \nCheck Python and select \"3.7\" in the drop-down.\nName your environment as desired.\n![Packages](img/00_anaconda_env_packages.PNG)\n\nClick \"Create\".\n\nAllow the environment to be created.", "_____no_output_____" ], [ "## Other Programs", "_____no_output_____" ], [ "### Spyder\n\nWe will be using an IDE called Spyder in this course. \nThere are many Python IDEs out there, including PyCharm, Jupyter Notebook/Jupyter Lab, Visual Studio Code.\nHowever, we will be using Spyder because it is quick to install via Anaconda, and because it has a strong resemblance to RStudio.\n![Spyder](img/00_spyder.png)", "_____no_output_____" ], [ "### RStudio\n\n![RStudio](img/00_rstudio.png)\nRStudio is the IDE we will be using for R development. Again, there are many R IDEs out there, including Jupyter Notebook/Jupyter Lab, Visual Studio Code.\nHowever, RStudio is an extremely popular R IDE that is specifically geared towards R, and similarly to Spyder, it is quick to install via Anaconda and is very similar to Spyder, so this is what we will be using in this course.", "_____no_output_____" ], [ "### Jupyter\n\n![Jupyter](img/00_jupyter.png)\n\nJupyter is optional.", "_____no_output_____" ], [ "As mentioned before, the program currently being used to view this material is called Jupyter Notebook. You'll notice that Notebook is by default installed with Anaconda when looking at your programs. The view you are currently looking at is through a third party called Binder, which renders an interactive copy of a Notebook. However, if you would like to have your own local copy and interact with these files, you will have to do so using a Jupyter program.", "_____no_output_____" ], [ "You'll also see a program called JupyterLab in the applications available in Anaconda. Our team recently attended SciPy, a conference held here in Austin, where Jupyter developers informed us of the deprecation of Jupyter Notebook and evolving into the new interface of JupyterLab.\n![JupyterLab](img/00_jupyterlab.png)", "_____no_output_____" ], [ "We will not go much into detail about how to use Jupyter to it's full potential aside from navigating it and running code inside of it, as for actual development in our use cases, there is not much use for Jupyter. However, if you are interested, there are plenty of resources online, and feel free to ask me any questions regarding it as well.", "_____no_output_____" ], [ "## Installation\nIn order to install Spyder, RStudio, and Jupyter, navigate to the home tab of Anaconda Navigator.\n![Home](img/00_anaconda_home.PNG)", "_____no_output_____" ], [ "Here you will see the applications mentioned above. \n> Click the \"Install\" buttons on each of these.\n\nNote: Anaconda will only allow one install at a time, so you will not be able to click install on another application before the previous one finishes. \nWhen the install finishes, the button that previously said \"Install\" should now read \"Launch\".\n![Install](img/00_anaconda_spyder.PNG)", "_____no_output_____" ], [ "When everything is installed and your environment is set up, your \"Home\" screen should now look like this:\n![Complete](img/00_anaconda_complete.PNG)", "_____no_output_____" ], [ "## Configuration\nFinally, let's do a little bit of configuration for Spyder and RStudio.", "_____no_output_____" ], [ "### Spyder\nLaunch the Spyder application from Anaconda Navigator. \nNote: you will see that Spyder is now accessible from your Start Menu, but this requires some configuration to get running in the right environment. Please open Spyder via the Anaconda Navigator.\n\nOur end goal is to have Spyder look a little something like this:", "_____no_output_____" ], [ "![Spyder](img/00_spyder_overview.PNG)", "_____no_output_____" ], [ "First, to get the window layout as pictured above, and to develop in an environment that feels close to RStudio, go to the toolbar and navigate to the following: ", "_____no_output_____" ], [ "`View > Window Layouts > RStudio Layout`\n![Layout](img/00_spyder_layout.png)", "_____no_output_____" ], [ "I also prefer coding in a light text over dark background. It helps with eye-strain when staring at a screen all day, but feel free to look at the different color themes and pick which one you prefer. \nChange the color preferences by doing the following:", "_____no_output_____" ], [ "`Tools > Preferences > Syntax Coloring > Monokai (or your preferred theme)`\n![Preferences](img/00_spyder_pref.png)", "_____no_output_____" ], [ "`Tools > Preferences > Syntax Coloring > Monokai (or your preferred theme)`\n![Coloring](img/00_spyder_color.png)", "_____no_output_____" ], [ "### RStudio\nLaunch the RStudio application from Anaconda Navigator.\nNote: you will see that RStudio is now accessible from your Start Menu, but this requires some configuration to get running in the right environment. Please open RStudio via the Anaconda Navigator.\n\nOur end goal is to have Spyder look a little something like this:\n![Overview](img/00_rstudio_overview.PNG)", "_____no_output_____" ], [ "Obviously the layout is already in RStudio layout, so we will just be changing the colors in RStudio.\nChange the color preferences by navigating to the toolbar and doing the following:", "_____no_output_____" ], [ "`Tools > Global Options > Appearance > Editor Theme: > Monokai (or your preferred theme)`\n![Preferences](img/00_rstudio_pref.PNG)", "_____no_output_____" ], [ "`Tools > Global Options > Appearance > Editor Theme: > Monokai (or your preferred theme)`\n![Coloring](img/00_rstudio_color.png)", "_____no_output_____" ], [ "# Wrap-up\n\nAnd that should be it! Hopefully no complications occurred, but hopefully we can address them during a break now.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d2c75d392667bbf69cb4f6b395da3ff53f37e5
194,128
ipynb
Jupyter Notebook
Day_7/Lesson 4 - Real Estate Model Training.ipynb
SoftStackFactory/DSML_Primer
a05aa978a580981d011430dfffa0cf834ceeb456
[ "MIT" ]
null
null
null
Day_7/Lesson 4 - Real Estate Model Training.ipynb
SoftStackFactory/DSML_Primer
a05aa978a580981d011430dfffa0cf834ceeb456
[ "MIT" ]
null
null
null
Day_7/Lesson 4 - Real Estate Model Training.ipynb
SoftStackFactory/DSML_Primer
a05aa978a580981d011430dfffa0cf834ceeb456
[ "MIT" ]
1
2020-01-16T23:22:06.000Z
2020-01-16T23:22:06.000Z
63.192708
22,032
0.640964
[ [ [ "<h1 style=\"font-size:42px; text-align:center; margin-bottom:30px;\"><span style=\"color:SteelBlue\">Lesson 4:</span> Model Training</h1>\n<hr>\n\nAt last, it's time to build our models! \n\nIt might seem like it took us a while to get here, but professional data scientists actually spend the bulk of their time on the 3 steps leading up to this one: \n* Exploratory Analysis\n* Data Cleaning\n* Feature Engineering\n\nThat's because the biggest jumps in model performance are from **better data**, not from fancier algorithms.\n\nThis is lengthy and action-packed lesson, so buckle up and let's dive right in!\n\n<br><hr id=\"toc\">\n\n### In this lesson...\n\nFirst, we'll load our analytical base table from lesson 3. \n\nThen, we'll go through the essential modeling steps:\n\n1. [Split your dataset](#split)\n2. [Build model pipelines](#pipelines)\n3. [Declare hyperparameters to tune](#hyperparameters)\n4. [Fit and tune models with cross-validation](#fit-tune)\n5. [Evaluate metrics and select winner](#evaluate)\n\nFinally, we'll save the best model as a project deliverable!\n\n<br><hr>", "_____no_output_____" ], [ "### First, let's import libraries, recruit models, and load the analytical base table.\n\nLet's import our libraries and load the dataset. It's good practice to keep all of your library imports at the top of your notebook or program.", "_____no_output_____" ] ], [ [ "# NumPy for numerical computing\nimport numpy as np\n\n# Pandas for DataFrames\nimport pandas as pd\npd.set_option('display.max_columns', 100)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n# Matplotlib for visualization\nfrom matplotlib import pyplot as plt\n# display plots in the notebook\n%matplotlib inline \n\n# Seaborn for easier visualization\nimport seaborn as sns\n\n# Scikit-Learn for Modeling\nimport sklearn", "_____no_output_____" ] ], [ [ "Next, let's import 5 algorithms we introduced in the previous lesson.", "_____no_output_____" ] ], [ [ "# Import Elastic Net, Ridge Regression, and Lasso Regression\nfrom sklearn.linear_model import ElasticNet, Ridge, Lasso\n\n# Import Random Forest and Gradient Boosted Trees\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor", "_____no_output_____" ] ], [ [ "<strong>Quick note about this lesson.</strong><br> In this lesson, we'll be relying heavily on Scikit-Learn, which has many helpful functions we can take advantage of. However, we won't import everything right away. Instead, we'll be importing each function from Scikit-Learn as we need it. That way, we can point out where you can find each function.\n\n\nNext, let's load the analytical base table from lesson 3.", "_____no_output_____" ] ], [ [ "# Load cleaned dataset from lesson 3\ndf = pd.read_csv('project_files/analytical_base_table.csv')\n\nprint(df.shape)", "(1863, 41)\n" ] ], [ [ "<br id=\"split\">\n# 1. Split your dataset\n\nLet's start with a crucial but sometimes overlooked step: **Splitting** your data.\n\n<br>\nFirst, let's import the <code style=\"color:steelblue\">train_test_split()</code> function from Scikit-Learn.", "_____no_output_____" ] ], [ [ "# Function for splitting training and test set\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "Next, separate your dataframe into separate objects for the target variable (<code style=\"color:steelblue\">y</code>) and the input features (<code style=\"color:steelblue\">X</code>).", "_____no_output_____" ] ], [ [ "# Create separate object for target variable\ny = df.tx_price\n# Create separate object for input features\nX = df.drop('tx_price', axis=1)", "_____no_output_____" ] ], [ [ "<br><hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n## <span style=\"color:RoyalBlue\">Exercise 5.1</span>\n\n**First, split <code style=\"color:steelblue\">X</code> and <code style=\"color:steelblue\">y</code> into training and test sets using the <code style=\"color:steelblue\">train_test_split()</code> function.** \n* **Tip:** Its first two arguments should be X and y.\n* **Pass in the argument <code style=\"color:steelblue\">test_size=<span style=\"color:crimson\">0.2</span></code> to set aside 20% of our observations for the test set.**\n* **Pass in <code style=\"color:steelblue\">random_state=<span style=\"color:crimson\">1234</span></code> to set the random state for replicable results.**\n* You can read more about this function in the <a href=\"http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\" target=\"_blank\">documentation</a>.\n\nThe function returns a tuple with 4 elements: <code style=\"color:steelblue\">(X_train, X_test, y_train, y_test)</code>. Remember, you can **unpack** it. We've given you a head-start below with the code to unpack the tuple:", "_____no_output_____" ] ], [ [ "# Split X and y into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)", "_____no_output_____" ] ], [ [ "Let's confirm we have the right number of observations in each subset.\n\n<br>\n**Next, run this code to confirm the size of each subset is correct.**", "_____no_output_____" ] ], [ [ "print( len(X_train), len(X_test), len(y_train), len(y_test) )", "1490 373 1490 373\n" ] ], [ [ "Next, when we train our models, we can fit them on the <code style=\"color:steelblue\">X_train</code> feature values and <code style=\"color:steelblue\">y_train</code> target values.\n\nFinally, when we're ready to evaluate our models on our test set, we would use the trained models to predict <code style=\"color:steelblue\">X_test</code> and evaluate the predictions against <code style=\"color:steelblue\">y_test</code>.\n\n<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>", "_____no_output_____" ], [ "<br id=\"pipelines\">\n# 2. Build model pipelines\n\nIn lesson 1, 2, and 3, you explored the dataset, cleaned it, and engineered new features. However, sometimes we'll want to preprocess the training data even more before feeding it into our algorithms. \n\n<br>\nFirst, let's show the summary statistics from our training data.", "_____no_output_____" ] ], [ [ "# Summary statistics of X_train\nX_train.describe()", "_____no_output_____" ] ], [ [ "Next, standardize the training data manually, creating a new <code style=\"color:steelblue\">X_train_new</code> object.", "_____no_output_____" ] ], [ [ "# Standardize X_train\nX_train_new = (X_train - X_train.mean()) / X_train.std()", "_____no_output_____" ] ], [ [ "Let's look at the summary statistics for <code style=\"color:steelblue\">X_train_new</code> to confirm standarization worked correctly.\n* How can you tell?", "_____no_output_____" ] ], [ [ "# Summary statistics of X_train_new\nX_train_new.describe()", "_____no_output_____" ] ], [ [ "For the most part, we'll almost never perform manual standardization because we'll include preprocessing steps in **model pipelines**.\n\n<br>\nSo let's import the <code style=\"color:steelblue\">make_pipeline()</code> function from Scikit-Learn.", "_____no_output_____" ] ], [ [ "# Function for creating model pipelines\nfrom sklearn.pipeline import make_pipeline", "_____no_output_____" ] ], [ [ "Now let's import the <code style=\"color:steelblue\">StandardScaler</code>, which is used for standardization.", "_____no_output_____" ] ], [ [ "# For standardization\nfrom sklearn.preprocessing import StandardScaler", "_____no_output_____" ] ], [ [ "Next, create a <code style=\"color:steelblue\">pipelines</code> dictionary.\n* It should include 3 keys: <code style=\"color:crimson\">'lasso'</code>, <code style=\"color:crimson\">'ridge'</code>, and <code style=\"color:crimson\">'enet'</code>\n* The corresponding values should be pipelines that first standardize the data.\n* For the algorithm in each pipeline, set <code style=\"color:steelblue\">random_state=<span style=\"color:crimson\">123</span></code> to ensure replicable results.", "_____no_output_____" ] ], [ [ "# Create pipelines dictionary\npipeline_dict = { 'lasso' : make_pipeline(StandardScaler(), Lasso(random_state=123)),\n 'ridge' : make_pipeline(StandardScaler(), Ridge(random_state=123)),\n 'enet' : make_pipeline(StandardScaler(), ElasticNet(random_state=123)) }", "_____no_output_____" ] ], [ [ "In the next exercise, you'll add pipelines for tree ensembles.", "_____no_output_____" ], [ "<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n## <span style=\"color:RoyalBlue\">Exercise 5.2</span>\n\n**Add pipelines for <code style=\"color:SteelBlue\">RandomForestRegressor</code> and <code style=\"color:SteelBlue\">GradientBoostingRegressor</code> to your pipeline dictionary.**\n* Name them <code style=\"color:crimson\">'rf'</code> for random forest and <code style=\"color:crimson\">'gb'</code> for gradient boosted tree.\n* Both pipelines should standardize the data first.\n* For both, set <code style=\"color:steelblue\">random_state=<span style=\"color:crimson\">123</span></code> to ensure replicable results.", "_____no_output_____" ] ], [ [ "# Add a pipeline for 'rf'\npipeline_dict['rf'] = make_pipeline(StandardScaler(), RandomForestRegressor(random_state=123))\n# Add a pipeline for 'gb'\npipeline_dict['gb'] = make_pipeline(StandardScaler(), GradientBoostingRegressor(random_state=123))", "_____no_output_____" ] ], [ [ "Let's make sure our dictionary has pipelines for each of our algorithms.\n\n<br>\n**Run this code to confirm that you have all 5 algorithms, each part of a pipeline.**", "_____no_output_____" ] ], [ [ "# Check that we have all 5 algorithms, and that they are all pipelines\nfor key, value in pipeline_dict.items():\n print( key, type(value) )", "lasso <class 'sklearn.pipeline.Pipeline'>\nridge <class 'sklearn.pipeline.Pipeline'>\nenet <class 'sklearn.pipeline.Pipeline'>\nrf <class 'sklearn.pipeline.Pipeline'>\ngb <class 'sklearn.pipeline.Pipeline'>\n" ] ], [ [ "Now that we have our pipelines, we're ready to move on to declaring hyperparameters to tune.\n\n<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>\n", "_____no_output_____" ], [ "<br id=\"hyperparameters\">\n# 3. Declare hyperparameters to tune\n\nUp to now, we've been casually talking about \"tuning\" models, but now it's time to treat the topic more formally.\n\n<br>\nFirst, list all the tunable hyperparameters for your Lasso regression pipeline.", "_____no_output_____" ] ], [ [ "# List tuneable hyperparameters of our Lasso pipeline\npipeline_dict['lasso'].get_params()", "_____no_output_____" ] ], [ [ "Next, declare hyperparameters to tune for Lasso and Ridge regression.\n* Try values between 0.001 and 10 for <code style=\"color:steelblue\">alpha</code>.", "_____no_output_____" ] ], [ [ "# Lasso hyperparameters\nlasso_hyperparameters = { 'lasso__alpha' : [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10] }\n\n# Ridge hyperparameters \nridge_hyperparameters = { 'ridge__alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10] }", "_____no_output_____" ] ], [ [ "Now declare a hyperparameter grid fo Elastic-Net.\n* You should tune the <code style=\"color:steelblue\">l1_ratio</code> in addition to <code style=\"color:steelblue\">alpha</code>.", "_____no_output_____" ] ], [ [ "# Elastic Net hyperparameters\nenet_hyperparameters = { 'elasticnet__alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10], \n 'elasticnet__l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9]}", "_____no_output_____" ] ], [ [ "<br><hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n## <span style=\"color:RoyalBlue\">Exercise 5.3</span>\n\nLet's start by declaring the hyperparameter grid for our random forest.\n\n<br>\n**Declare a hyperparameter grid for <code style=\"color:SteelBlue\">RandomForestRegressor</code>.**\n* Name it <code style=\"color:steelblue\">rf_hyperparameters</code>\n\n* Set <code style=\"color:steelblue\"><span style=\"color:crimson\">'randomforestregressor\\__n_estimators'</span>: [100, 200]</code>\n* Set <code style=\"color:steelblue\"><span style=\"color:crimson\">'randomforestregressor\\__max_features'</span>: ['auto', 'sqrt', 0.33]</code>", "_____no_output_____" ] ], [ [ "# Random forest hyperparameters\nrf_hyperparameters = { \n 'randomforestregressor__n_estimators' : [100, 200],\n 'randomforestregressor__max_features': ['auto', 'sqrt', 0.33],\n}", "_____no_output_____" ] ], [ [ "Next, let's declare settings to try for our boosted tree.\n\n<br>\n**Declare a hyperparameter grid for <code style=\"color:SteelBlue\">GradientBoostingRegressor</code>.**\n* Name it <code style=\"color:steelblue\">gb_hyperparameters</code>.\n* Set <code style=\"color:steelblue\"><span style=\"color:crimson\">'gradientboostingregressor\\__n_estimators'</span>: [100, 200]</code>\n* Set <code style=\"color:steelblue\"><span style=\"color:crimson\">'gradientboostingregressor\\__learning_rate'</span>: [0.05, 0.1, 0.2]</code>\n* Set <code style=\"color:steelblue\"><span style=\"color:crimson\">'gradientboostingregressor\\__max_depth'</span>: [1, 3, 5]</code>", "_____no_output_____" ] ], [ [ "# Boosted tree hyperparameters\ngb_hyperparameters = { 'gradientboostingregressor__n_estimators': [100, 200],\n 'gradientboostingregressor__learning_rate': [0.05, 0.1, 0.2],\n 'gradientboostingregressor__max_depth': [1, 3, 5]}", "_____no_output_____" ] ], [ [ "Now that we have all of our hyperparameters declared, let's store them in a dictionary for ease of access.\n\n<br>\n**Create a <code style=\"color:steelblue\">hyperparameters</code> dictionary**.\n* Use the same keys as in the <code style=\"color:steelblue\">pipelines</code> dictionary.\n * If you forgot what those keys were, you can insert a new code cell and call <code style=\"color:steelblue\">pipelines.keys()</code> for a reminder.\n* Set the values to the corresponding **hyperparameter grids** we've been declaring throughout this module.\n * e.g. <code style=\"color:steelblue\"><span style=\"color:crimson\">'rf'</span> : rf_hyperparameters</code>\n * e.g. <code style=\"color:steelblue\"><span style=\"color:crimson\">'lasso'</span> : lasso_hyperparameters</code>", "_____no_output_____" ] ], [ [ "# Create hyperparameters dictionary\nhyperparameters = {\n 'rf' : rf_hyperparameters,\n 'gb' : gb_hyperparameters,\n 'lasso' : lasso_hyperparameters,\n 'ridge' : ridge_hyperparameters,\n 'enet' : enet_hyperparameters\n}", "_____no_output_____" ] ], [ [ "**Finally, run this code to check that <code style=\"color:steelblue\">hyperparameters</code> is set up correctly.**", "_____no_output_____" ] ], [ [ "for key in ['enet', 'gb', 'ridge', 'rf', 'lasso']:\n if key in hyperparameters:\n if type(hyperparameters[key]) is dict:\n print( key, 'was found in hyperparameters, and it is a grid.' )\n else:\n print( key, 'was found in hyperparameters, but it is not a grid.' )\n else:\n print( key, 'was not found in hyperparameters')", "enet was found in hyperparameters, and it is a grid.\ngb was found in hyperparameters, and it is a grid.\nridge was found in hyperparameters, and it is a grid.\nrf was found in hyperparameters, and it is a grid.\nlasso was found in hyperparameters, and it is a grid.\n" ] ], [ [ "<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>", "_____no_output_____" ], [ "<br id=\"fit-tune\">\n# 4. Fit and tune models with cross-validation\n\nNow that we have our <code style=\"color:steelblue\">pipelines</code> and <code style=\"color:steelblue\">hyperparameters</code> dictionaries declared, we're ready to tune our models with cross-validation.\n\n<br>\nFirst, let's to import a helper for cross-validation called <code style=\"color:steelblue\">GridSearchCV</code>.", "_____no_output_____" ] ], [ [ "# Helper for cross-validation\nfrom sklearn.model_selection import GridSearchCV", "_____no_output_____" ] ], [ [ "Next, to see an example, set up cross-validation for Lasso regression.", "_____no_output_____" ] ], [ [ "# Create cross-validation object from Lasso pipeline and Lasso hyperparameters\nmodel = GridSearchCV(pipeline_dict['lasso'], hyperparameters['lasso'], cv=10, n_jobs=-1)", "_____no_output_____" ] ], [ [ "Pass <code style=\"color:steelblue\">X_train</code> and <code style=\"color:steelblue\">y_train</code> into the <code style=\"color:steelblue\">.fit()</code> function to tune hyperparameters.", "_____no_output_____" ] ], [ [ "# Fit and tune model\nmodel.fit(X_train, y_train)", "/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n" ] ], [ [ "By the way, don't worry if you get the message:\n\n<pre style=\"color:crimson\">ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations</pre>\n\nWe'll dive into some of the under-the-hood nuances later.\n\n<br>\nIn the next exercise, we'll write a loop that tunes all of our models.", "_____no_output_____" ], [ "<br><hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n## <span style=\"color:RoyalBlue\">Exercise 5.4</span>\n\n**Create a dictionary of models named <code style=\"color:SteelBlue\">fitted_models</code> that have been tuned using cross-validation.**\n* The keys should be the same as those in the <code style=\"color:SteelBlue\">pipelines</code> and <code style=\"color:SteelBlue\">hyperparameters</code> dictionaries. \n* The values should be <code style=\"color:steelblue\">GridSearchCV</code> objects that have been fitted to <code style=\"color:steelblue\">X_train</code> and <code style=\"color:steelblue\">y_train</code>.\n* After fitting each model, print <code style=\"color:crimson\">'{name} has been fitted.'</code> just to track the progress.\n\nThis step can take a few minutes, so please be patient.", "_____no_output_____" ] ], [ [ "# Create empty dictionary called fitted_models\nfitted_models = {}\n\n# Loop through model pipelines, tuning each one and saving it to fitted_models\nfor name, pipeline in pipeline_dict.items():\n # Create cross-validation object from pipeline and hyperparameters\n model = GridSearchCV(pipeline, hyperparameters[name], cv=10, n_jobs=-1)\n \n # Fit model on X_train, y_train\n model.fit(X_train, y_train)\n \n # Store model in fitted_models[name] \n fitted_models[name] = model\n \n # Print '{name} has been fitted'\n print(name, 'has been fitted.')", "/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n/opt/conda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n" ] ], [ [ "<br>\n**Run this code to check that the models are of the correct type.**", "_____no_output_____" ] ], [ [ "# Check that we have 5 cross-validation objects\nfor key, value in fitted_models.items():\n print( key, type(value) )", "lasso <class 'sklearn.model_selection._search.GridSearchCV'>\nridge <class 'sklearn.model_selection._search.GridSearchCV'>\nenet <class 'sklearn.model_selection._search.GridSearchCV'>\nrf <class 'sklearn.model_selection._search.GridSearchCV'>\ngb <class 'sklearn.model_selection._search.GridSearchCV'>\n" ] ], [ [ "<br>\n**Finally, run this code to check that the models have been fitted correctly.**", "_____no_output_____" ] ], [ [ "from sklearn.exceptions import NotFittedError\n\nfor name, model in fitted_models.items():\n try:\n pred = model.predict(X_test)\n print(name, 'has been fitted.')\n except NotFittedError as e:\n print(repr(e))", "lasso has been fitted.\nridge has been fitted.\nenet has been fitted.\nrf has been fitted.\ngb has been fitted.\n" ] ], [ [ "Nice. Now we're ready to evaluate how our models performed!\n\n<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>", "_____no_output_____" ], [ "<br id=\"evaluate\">\n# 5. Evaluate models and select winner\n\nFinally, it's time to evaluate our models and pick the best one.\n\n<br>\nLet's display the holdout $R^2$ score for each fitted model.", "_____no_output_____" ] ], [ [ "# Display best_score_ for each fitted model\nfor name, model in fitted_models.items():\n print(name, model.best_score_)", "lasso 0.3074411588306972\nridge 0.3155067536069877\nenet 0.3422914802767902\nrf 0.4833888008561866\ngb 0.48722517575886765\n" ] ], [ [ "You should see something similar to the below scores:\n\n enet 0.342759786956\n lasso 0.309321321129\n ridge 0.316805719351\n gb 0.48873808731\n rf 0.480576134721\n\n\nIf your numbers are way off, check to see if you've set the <code style=\"color:steelblue\">random_state=</code> correctly for each of the models.", "_____no_output_____" ], [ "Next, import the <code style=\"color:steelblue\">r2_score()</code> and <code style=\"color:steelblue\">mean_absolute_error()</code> functions.", "_____no_output_____" ] ], [ [ "# Import r2_score and mean_absolute_error functions\nfrom sklearn.metrics import r2_score \nfrom sklearn.metrics import mean_absolute_error", "_____no_output_____" ] ], [ [ "Finally, let's see how the fitted models perform on our test set!\n\n<br>\nFirst, access your fitted random forest and display the object.", "_____no_output_____" ] ], [ [ "# Display fitted random forest object\nfitted_models['rf']", "_____no_output_____" ] ], [ [ "Predict the test set using the fitted random forest.", "_____no_output_____" ] ], [ [ "# Predict test set using fitted random forest\npred = fitted_models['rf'].predict(X_test)", "_____no_output_____" ] ], [ [ "Finally, we use the scoring functions we imported to calculate and print $R^2$ and MAE.", "_____no_output_____" ] ], [ [ "# Calculate and print R^2 and MAE\nprint('R^2: ', r2_score(y_test, pred))\nprint('MAE: ', mean_absolute_error(y_test, pred))", "R^2: 0.566278620200386\nMAE: 68497.58\n" ] ], [ [ "In the next exercise, we'll evaluate all of our fitted models on the test set and pick the winner.", "_____no_output_____" ], [ "<br><hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n## <span style=\"color:RoyalBlue\">Exercise 5.5</span>\n\n**Use a <code style=\"color:SteelBlue\">for</code> loop, print the performance of each model in <code style=\"color:SteelBlue\">fitted_models</code> on the test set.**\n* Print both <code style=\"color:SteelBlue\">r2_score</code> and <code style=\"color:SteelBlue\">mean_absolute_error</code>.\n* Those functions each take two arguments:\n * The actual values for your target variable (<code style=\"color:SteelBlue\">y_test</code>)\n * Predicted values for your target variable\n* Label the output with the name of the algorithm. For example:\n\n<pre style=\"color:crimson\">\nlasso\n--------\nR^2: 0.409313458932\nMAE: 84963.5598922\n</pre>", "_____no_output_____" ] ], [ [ "# Code here\nfor name, model in fitted_models.items(): \n pred_var = model.predict(X_test)\n print(name)\n print('R^2: ', r2_score(y_test, pred_var))\n print('MAE: ', mean_absolute_error(y_test, pred_var))\n print('===================================')\n ", "lasso\nR^2: 0.4093410739690313\nMAE: 84957.9784492079\n===================================\nridge\nR^2: 0.40978386776640285\nMAE: 84899.82281275438\n===================================\nenet\nR^2: 0.40415614629545416\nMAE: 86465.82558534491\n===================================\nrf\nR^2: 0.566278620200386\nMAE: 68497.58\n===================================\ngb\nR^2: 0.5416475698153993\nMAE: 70505.20969788785\n===================================\n" ] ], [ [ "**Next, ask yourself these questions to pick the winning model:**\n* Which model had the highest $R^2$ on the test set?\n\n> Random forest\n\n* Which model had the lowest mean absolute error?\n\n> Random forest\n\n* Are these two models the same one?\n\n> Yes\n\n* Did it also have the best holdout $R^2$ score from cross-validation?\n\n> Yes\n\n* **Does it satisfy our win condition?**\n\n> Yes, its mean absolute error is less than \\$70,000!", "_____no_output_____" ], [ "**Finally, let's plot the performance of the winning model on the test set. Run the code below.**\n* It first plots a scatter plot.\n* Then, it plots predicted transaction price on the X-axis.\n* Finally, it plots actual transaction price on the y-axis.", "_____no_output_____" ] ], [ [ "gb_pred = fitted_models['rf'].predict(X_test)\nplt.scatter(gb_pred, y_test)\nplt.xlabel('predicted')\nplt.ylabel('actual')\nplt.show()", "_____no_output_____" ] ], [ [ "This last visual check is a nice way to confirm our model's performance.\n* Are the points scattered around the 45 degree diagonal?\n\n<br>\n<hr style=\"border-color:royalblue;background-color:royalblue;height:1px;\">\n\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>", "_____no_output_____" ], [ "<br>\n### Finally, let's save the winning model.\n\nGreat job! You've created a pretty kick-ass model for real-estate valuation. Now it's time to save your hard work.\n\nFirst, let's take a look at the data type of your winning model.\n\n***Run each code cell below after completing the exercises above.***", "_____no_output_____" ] ], [ [ "type(fitted_models['rf'])", "_____no_output_____" ] ], [ [ "It looks like this is still the <code style=\"color:steelblue\">GridSearchCV</code> data type. \n* You can actually directly save this object if you want, because it will use the winning model pipeline by default. \n* However, what we really care about is the actual winning model <code style=\"color:steelblue\">Pipeline</code>, right?\n\nIn that case, we can use the <code style=\"color:steelblue\">best\\_estimator_</code> method to access it:", "_____no_output_____" ] ], [ [ "type(fitted_models['rf'].best_estimator_)", "_____no_output_____" ] ], [ [ "If we output that object directly, we can also see the winning values for our hyperparameters.", "_____no_output_____" ] ], [ [ "fitted_models['rf'].best_estimator_", "_____no_output_____" ] ], [ [ "See? The winning values for our hyperparameters are:\n* <code style=\"color:steelblue\">n_estimators: <span style=\"color:crimson\">200</span></code>\n* <code style=\"color:steelblue\">max_features : <span style=\"color:crimson\">'auto'</span></code>\n\nGreat, now let's import a helpful package called <code style=\"color:steelblue\">pickle</code>, which saves Python objects to disk.", "_____no_output_____" ] ], [ [ "import pickle", "_____no_output_____" ] ], [ [ "Let's save the winning <code style=\"color:steelblue\">Pipeline</code> object into a pickle file.", "_____no_output_____" ] ], [ [ "with open('saved_models/final_model_employee.pkl', 'wb') as f:\n pickle.dump(fitted_models['rf'].best_estimator_, f)", "_____no_output_____" ] ], [ [ "Congratulations... you've built and saved a successful model trained using machine learning!\n\nAs a reminder, here are a few things you did in this module:\n* You split your dataset into separate training and test sets.\n* You set up preprocessing pipelines.\n* You tuned your models using cross-validation.\n* And you evaluated your models, selecting and saving the winner.\n\n<br>\n<hr>\n\n<div style=\"text-align:center; margin: 40px 0 40px 0;\">\n[**Back to Contents**](#toc)\n</div>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7d2cbdd23623333376349bae6d63be602546e2b
4,966
ipynb
Jupyter Notebook
Machine Learning/Basic Implementation/Classification/Random Forest.ipynb
zementalist/Professional-Experience
04fc2db56ea3dd2389577ae90e479028009724f5
[ "Apache-2.0" ]
null
null
null
Machine Learning/Basic Implementation/Classification/Random Forest.ipynb
zementalist/Professional-Experience
04fc2db56ea3dd2389577ae90e479028009724f5
[ "Apache-2.0" ]
null
null
null
Machine Learning/Basic Implementation/Classification/Random Forest.ipynb
zementalist/Professional-Experience
04fc2db56ea3dd2389577ae90e479028009724f5
[ "Apache-2.0" ]
null
null
null
31.833333
114
0.514901
[ [ [ "# Import pre-processing libs\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n# Import model\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Import post-processing libs\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.metrics import confusion_matrix\nimport pickle", "_____no_output_____" ], [ "###################### 1- Import Data ######################\nfilename = \"\"\ndataset = pd.read_csv(filename) # Check file extension before using this function\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:, 1:].values", "_____no_output_____" ], [ "###################### 2- Preprocessing ######################\n\n# Split data\ntest_train_ratio = 0.2\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_train_ratio)", "_____no_output_____" ], [ "###################### 3- Training ######################\nmodel = RandomForestClassifier(n_estimators = 10, criterion = \"gini\")\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "###################### 4- Testing ######################\n\n#model_score = model.score(X_test, y_test)\ny_pred = model.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)", "_____no_output_____" ], [ "###################### 5- Visualization ######################\n# Visualising the Training set results\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('yellow', 'black'))(i), label = j)\nplt.title('Model fitting (Training set)')\nplt.xlabel('X')\nplt.ylabel('y')\nplt.legend()\nplt.show()\n\n# Visualising the Test set results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('yellow', 'black'))(i), label = j)\nplt.title('Model fitting (Test set)')\nplt.xlabel('X')\nplt.ylabel('y')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "###################### 6- Save & Use ######################\nvalues_to_predict = X_test\nprediction_result = model.predict([ values_to_predict ])\n\nwith open('classifier.pkl', 'wb') as f:\n pickle.dump(model, f)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d2db70cf77d6da84e71be02e54a89e928c8237
5,421
ipynb
Jupyter Notebook
assignments/Final Project Report Template.ipynb
mikev6/UMBC_Data601
b10396d6e3e9fdcf46dfe2e97811c36b2c233ad5
[ "MIT" ]
10
2020-09-03T20:35:50.000Z
2020-11-05T00:57:25.000Z
assignments/Final Project Report Template.ipynb
mguner/UMBC_Data601
32398d15fde083e4541a16bea144818ec997ed6d
[ "MIT" ]
2
2020-10-15T20:36:36.000Z
2020-10-22T02:31:45.000Z
assignments/Final Project Report Template.ipynb
mikev6/UMBC_Data601
b10396d6e3e9fdcf46dfe2e97811c36b2c233ad5
[ "MIT" ]
21
2020-09-03T20:34:39.000Z
2021-01-28T16:05:56.000Z
23.776316
184
0.551743
[ [ [ "# Title of the Project\n\n__Enter Subtitle here if any__", "_____no_output_____" ], [ "# Overview\n\n__What?__\n\n- Tell us about the problem you are about to solve.\n\n__When?__\n\n- Tell us when and how you will determine that this project is successful (metrics)\n\n__Why?__\n- Tell us why this problem is interesting.\n\n__Who?__\n\n- Tell us who might be interested in your project.\n\n__Background and Research__\n\n- What has already been done on the problem you are working on?", "_____no_output_____" ], [ "\n# Get the data\n\n__Who?__\n\nWho collected the original data.\n\n__When?__\n\nWhen is the data collected?\n\n__What?__\n\n- What is data look like? \n- Number of columns, rows, missing values\n- Size of the data\n\n__Links__\n\n- Link to data if available\n\n- Link to data dictionary if available.\n\n__Connect__\n\n- Connect this part to the overview.\n\n- If this is supervised learning problem, what is the target column? Which columns will be important in your discussion.", "_____no_output_____" ], [ "# Explore the Data", "_____no_output_____" ], [ "- Show us the head of the data, shape of the data.\n\n- Missing values, data types, distributions, interesting statistics, etc. \n\n- You don't have to show us all the code here but make sure that the work you show us here is connected to the problem you are trying to solve. \n\n- Don't share a scrape book with me.\n\n- If you are showing a plot make sure that there is a title, axes are labeled and you explained why you are showing me this plot (it's connection to the problem and solution).\n\n- If you are working with a supervised learning problem, talk about the target variable. It's distribution, class imbalance etc.\n", "_____no_output_____" ], [ "# Prepare Data\n\n- Don't change the original dataset\n\n- Don't necessarily show me the functions you wrote. \n\n- Use utils.py script and call the utility functions if necessary.\n\n- Explain don't show and only mention a work if it is relevant for the later parts of the project.", "_____no_output_____" ], [ "# Modeling\n\n- What models do you use and why?\n\n- What is a good baseline?\n\n- Which metric you will be focusing on, why?", "_____no_output_____" ] ], [ [ "## Decision tree model", "_____no_output_____" ], [ "## Logistic regression model", "_____no_output_____" ], [ "## confusion matrices for both models", "_____no_output_____" ] ], [ [ "# Fine Tune \n\n- Make sure that you played with the hyper-parameters and fine-tuned the parameters. \n\n- Use grid search, cross validation to compare different models. \n\n- Don't show all of your work here only mention if it is necessary to understand you work's results.", "_____no_output_____" ] ], [ [ "## here your code if necessary", "_____no_output_____" ] ], [ [ "# Present Your Solution\n\n\n- What are your results? Why? \n\n- You must connect this part to the original business problem.\n\n- How certain are you of your results?\n\n- What are some limitations of your work?\n\n- Compared to others' work in this field, how is your result? Above expectations? Why? Below expectations? Why?\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
e7d2e2a770f0e511eaa22f1714d6de10eecb3e9e
343,764
ipynb
Jupyter Notebook
FINAL-TF2-FILES/TF_2_Notebooks_and_Data/03-ANNs/00-Keras-Syntax-Basics.ipynb
tanuja333/Tensorflow_Keras
e29464da56666c675667b491b12d625ffaefddd9
[ "Apache-2.0" ]
null
null
null
FINAL-TF2-FILES/TF_2_Notebooks_and_Data/03-ANNs/00-Keras-Syntax-Basics.ipynb
tanuja333/Tensorflow_Keras
e29464da56666c675667b491b12d625ffaefddd9
[ "Apache-2.0" ]
9
2020-09-25T21:54:00.000Z
2022-02-10T01:39:05.000Z
FINAL-TF2-FILES/TF_2_Notebooks_and_Data/03-ANNs/00-Keras-Syntax-Basics.ipynb
tanuja333/Tensorflow_Keras
e29464da56666c675667b491b12d625ffaefddd9
[ "Apache-2.0" ]
null
null
null
79.117146
116,028
0.700379
[ [ [ "<a href=\"https://www.pieriandata.com\"><img src=\"../Pierian_Data_Logo.PNG\"></a>\n<strong><center>Copyright by Pierian Data Inc.</center></strong> \n<strong><center>Created by Jose Marcial Portilla.</center></strong>", "_____no_output_____" ], [ "# Keras Syntax Basics\n\nWith TensorFlow 2.0 , Keras is now the main API choice. Let's work through a simple regression project to understand the basics of the Keras syntax and adding layers.", "_____no_output_____" ], [ "## The Data\n\nTo learn the basic syntax of Keras, we will use a very simple fake data set, in the subsequent lectures we will focus on real datasets, along with feature engineering! For now, let's focus on the syntax of TensorFlow 2.0.\n\nLet's pretend this data are measurements of some rare gem stones, with 2 measurement features and a sale price. Our final goal would be to try to predict the sale price of a new gem stone we just mined from the ground, in order to try to set a fair price in the market.\n\n### Load the Data", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('../DATA/fake_reg.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "### Explore the data\n\nLet's take a quick look, we should see strong correlation between the features and the \"price\" of this made up product.", "_____no_output_____" ] ], [ [ "import seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "sns.pairplot(df)", "_____no_output_____" ] ], [ [ "Feel free to visualize more, but this data is fake, so we will focus on feature engineering and exploratory data analysis later on in the course in much more detail!", "_____no_output_____" ], [ "### Test/Train Split", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# Convert Pandas to Numpy for Keras\n\n# Features\nX = df[['feature1','feature2']].values\n\n# Label\ny = df['price'].values\n\n# Split\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=42)", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ], [ "X_test.shape", "_____no_output_____" ], [ "y_train.shape", "_____no_output_____" ], [ "y_test.shape", "_____no_output_____" ] ], [ [ "## Normalizing/Scaling the Data\n\nWe scale the feature data.\n\n[Why we don't need to scale the label](https://stats.stackexchange.com/questions/111467/is-it-necessary-to-scale-the-target-value-in-addition-to-scaling-features-for-re)", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler", "_____no_output_____" ], [ "help(MinMaxScaler)", "Help on class MinMaxScaler in module sklearn.preprocessing._data:\n\nclass MinMaxScaler(sklearn.base.TransformerMixin, sklearn.base.BaseEstimator)\n | MinMaxScaler(feature_range=(0, 1), copy=True)\n | \n | Transform features by scaling each feature to a given range.\n | \n | This estimator scales and translates each feature individually such\n | that it is in the given range on the training set, e.g. between\n | zero and one.\n | \n | The transformation is given by::\n | \n | X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n | X_scaled = X_std * (max - min) + min\n | \n | where min, max = feature_range.\n | \n | The transformation is calculated as::\n | \n | X_scaled = scale * X + min - X.min(axis=0) * scale\n | where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))\n | \n | This transformation is often used as an alternative to zero mean,\n | unit variance scaling.\n | \n | Read more in the :ref:`User Guide <preprocessing_scaler>`.\n | \n | Parameters\n | ----------\n | feature_range : tuple (min, max), default=(0, 1)\n | Desired range of transformed data.\n | \n | copy : bool, default=True\n | Set to False to perform inplace row normalization and avoid a\n | copy (if the input is already a numpy array).\n | \n | Attributes\n | ----------\n | min_ : ndarray of shape (n_features,)\n | Per feature adjustment for minimum. Equivalent to\n | ``min - X.min(axis=0) * self.scale_``\n | \n | scale_ : ndarray of shape (n_features,)\n | Per feature relative scaling of the data. Equivalent to\n | ``(max - min) / (X.max(axis=0) - X.min(axis=0))``\n | \n | .. versionadded:: 0.17\n | *scale_* attribute.\n | \n | data_min_ : ndarray of shape (n_features,)\n | Per feature minimum seen in the data\n | \n | .. versionadded:: 0.17\n | *data_min_*\n | \n | data_max_ : ndarray of shape (n_features,)\n | Per feature maximum seen in the data\n | \n | .. versionadded:: 0.17\n | *data_max_*\n | \n | data_range_ : ndarray of shape (n_features,)\n | Per feature range ``(data_max_ - data_min_)`` seen in the data\n | \n | .. versionadded:: 0.17\n | *data_range_*\n | \n | n_samples_seen_ : int\n | The number of samples processed by the estimator.\n | It will be reset on new calls to fit, but increments across\n | ``partial_fit`` calls.\n | \n | Examples\n | --------\n | >>> from sklearn.preprocessing import MinMaxScaler\n | >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]\n | >>> scaler = MinMaxScaler()\n | >>> print(scaler.fit(data))\n | MinMaxScaler()\n | >>> print(scaler.data_max_)\n | [ 1. 18.]\n | >>> print(scaler.transform(data))\n | [[0. 0. ]\n | [0.25 0.25]\n | [0.5 0.5 ]\n | [1. 1. ]]\n | >>> print(scaler.transform([[2, 2]]))\n | [[1.5 0. ]]\n | \n | See also\n | --------\n | minmax_scale: Equivalent function without the estimator API.\n | \n | Notes\n | -----\n | NaNs are treated as missing values: disregarded in fit, and maintained in\n | transform.\n | \n | For a comparison of the different scalers, transformers, and normalizers,\n | see :ref:`examples/preprocessing/plot_all_scaling.py\n | <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n | \n | Method resolution order:\n | MinMaxScaler\n | sklearn.base.TransformerMixin\n | sklearn.base.BaseEstimator\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, feature_range=(0, 1), copy=True)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | fit(self, X, y=None)\n | Compute the minimum and maximum to be used for later scaling.\n | \n | Parameters\n | ----------\n | X : array-like of shape (n_samples, n_features)\n | The data used to compute the per-feature minimum and maximum\n | used for later scaling along the features axis.\n | \n | y : None\n | Ignored.\n | \n | Returns\n | -------\n | self : object\n | Fitted scaler.\n | \n | inverse_transform(self, X)\n | Undo the scaling of X according to feature_range.\n | \n | Parameters\n | ----------\n | X : array-like of shape (n_samples, n_features)\n | Input data that will be transformed. It cannot be sparse.\n | \n | Returns\n | -------\n | Xt : array-like of shape (n_samples, n_features)\n | Transformed data.\n | \n | partial_fit(self, X, y=None)\n | Online computation of min and max on X for later scaling.\n | \n | All of X is processed as a single batch. This is intended for cases\n | when :meth:`fit` is not feasible due to very large number of\n | `n_samples` or because X is read from a continuous stream.\n | \n | Parameters\n | ----------\n | X : array-like of shape (n_samples, n_features)\n | The data used to compute the mean and standard deviation\n | used for later scaling along the features axis.\n | \n | y : None\n | Ignored.\n | \n | Returns\n | -------\n | self : object\n | Transformer instance.\n | \n | transform(self, X)\n | Scale features of X according to feature_range.\n | \n | Parameters\n | ----------\n | X : array-like of shape (n_samples, n_features)\n | Input data that will be transformed.\n | \n | Returns\n | -------\n | Xt : array-like of shape (n_samples, n_features)\n | Transformed data.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from sklearn.base.TransformerMixin:\n | \n | fit_transform(self, X, y=None, **fit_params)\n | Fit to data, then transform it.\n | \n | Fits transformer to X and y with optional parameters fit_params\n | and returns a transformed version of X.\n | \n | Parameters\n | ----------\n | X : numpy array of shape [n_samples, n_features]\n | Training set.\n | \n | y : numpy array of shape [n_samples]\n | Target values.\n | \n | **fit_params : dict\n | Additional fit parameters.\n | \n | Returns\n | -------\n | X_new : numpy array of shape [n_samples, n_features_new]\n | Transformed array.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from sklearn.base.TransformerMixin:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Methods inherited from sklearn.base.BaseEstimator:\n | \n | __getstate__(self)\n | \n | __repr__(self, N_CHAR_MAX=700)\n | Return repr(self).\n | \n | __setstate__(self, state)\n | \n | get_params(self, deep=True)\n | Get parameters for this estimator.\n | \n | Parameters\n | ----------\n | deep : bool, default=True\n | If True, will return the parameters for this estimator and\n | contained subobjects that are estimators.\n | \n | Returns\n | -------\n | params : mapping of string to any\n | Parameter names mapped to their values.\n | \n | set_params(self, **params)\n | Set the parameters of this estimator.\n | \n | The method works on simple estimators as well as on nested objects\n | (such as pipelines). The latter have parameters of the form\n | ``<component>__<parameter>`` so that it's possible to update each\n | component of a nested object.\n | \n | Parameters\n | ----------\n | **params : dict\n | Estimator parameters.\n | \n | Returns\n | -------\n | self : object\n | Estimator instance.\n\n" ], [ "scaler = MinMaxScaler()", "_____no_output_____" ], [ "# Notice to prevent data leakage from the test set, we only fit our scaler to the training set", "_____no_output_____" ], [ "scaler.fit(X_train)", "_____no_output_____" ], [ "X_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "# TensorFlow 2.0 Syntax\n\n\n## Import Options\n\nThere are several ways you can import Keras from Tensorflow (this is hugely a personal style choice, please use any import methods you prefer). We will use the method shown in the **official TF documentation**.", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ], [ "from tensorflow.keras.models import Sequential", "_____no_output_____" ], [ "help(Sequential)", "Help on class Sequential in module tensorflow.python.keras.engine.sequential:\n\nclass Sequential(tensorflow.python.keras.engine.training.Model)\n | Sequential(layers=None, name=None)\n | \n | Linear stack of layers.\n | \n | Arguments:\n | layers: list of layers to add to the model.\n | \n | Example:\n | \n | ```python\n | # Optionally, the first layer can receive an `input_shape` argument:\n | model = Sequential()\n | model.add(Dense(32, input_shape=(500,)))\n | # Afterwards, we do automatic shape inference:\n | model.add(Dense(32))\n | \n | # This is identical to the following:\n | model = Sequential()\n | model.add(Dense(32, input_dim=500))\n | \n | # And to the following:\n | model = Sequential()\n | model.add(Dense(32, batch_input_shape=(None, 500)))\n | \n | # Note that you can also omit the `input_shape` argument:\n | # In that case the model gets built the first time you call `fit` (or other\n | # training and evaluation methods).\n | model = Sequential()\n | model.add(Dense(32))\n | model.add(Dense(32))\n | model.compile(optimizer=optimizer, loss=loss)\n | # This builds the model for the first time:\n | model.fit(x, y, batch_size=32, epochs=10)\n | \n | # Note that when using this delayed-build pattern (no input shape specified),\n | # the model doesn't have any weights until the first call\n | # to a training/evaluation method (since it isn't yet built):\n | model = Sequential()\n | model.add(Dense(32))\n | model.add(Dense(32))\n | model.weights # returns []\n | \n | # Whereas if you specify the input shape, the model gets built continuously\n | # as you are adding layers:\n | model = Sequential()\n | model.add(Dense(32, input_shape=(500,)))\n | model.add(Dense(32))\n | model.weights # returns list of length 4\n | \n | # When using the delayed-build pattern (no input shape specified), you can\n | # choose to manually build your model by calling `build(batch_input_shape)`:\n | model = Sequential()\n | model.add(Dense(32))\n | model.add(Dense(32))\n | model.build((None, 500))\n | model.weights # returns list of length 4\n | ```\n | \n | Method resolution order:\n | Sequential\n | tensorflow.python.keras.engine.training.Model\n | tensorflow.python.keras.engine.network.Network\n | tensorflow.python.keras.engine.base_layer.Layer\n | tensorflow.python.module.module.Module\n | tensorflow.python.training.tracking.tracking.AutoTrackable\n | tensorflow.python.training.tracking.base.Trackable\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, layers=None, name=None)\n | \n | add(self, layer)\n | Adds a layer instance on top of the layer stack.\n | \n | Arguments:\n | layer: layer instance.\n | \n | Raises:\n | TypeError: If `layer` is not a layer instance.\n | ValueError: In case the `layer` argument does not\n | know its input shape.\n | ValueError: In case the `layer` argument has\n | multiple output tensors, or is already connected\n | somewhere else (forbidden in `Sequential` models).\n | \n | build(self, input_shape=None)\n | Builds the model based on input shapes received.\n | \n | This is to be used for subclassed models, which do not know at instantiation\n | time what their inputs look like.\n | \n | This method only exists for users who want to call `model.build()` in a\n | standalone way (as a substitute for calling the model on real data to\n | build it). It will never be called by the framework (and thus it will\n | never throw unexpected errors in an unrelated workflow).\n | \n | Args:\n | input_shape: Single tuple, TensorShape, or list of shapes, where shapes\n | are tuples, integers, or TensorShapes.\n | \n | Raises:\n | ValueError:\n | 1. In case of invalid user-provided data (not of type tuple,\n | list, or TensorShape).\n | 2. If the model requires call arguments that are agnostic\n | to the input shapes (positional or kwarg in call signature).\n | 3. If not all layers were properly built.\n | 4. If float type inputs are not supported within the layers.\n | \n | In each of these cases, the user should build their model by calling it\n | on real tensor data.\n | \n | call(self, inputs, training=None, mask=None)\n | Calls the model on new inputs.\n | \n | In this case `call` just reapplies\n | all ops in the graph to the new inputs\n | (e.g. build a new computational graph from the provided inputs).\n | \n | Arguments:\n | inputs: A tensor or list of tensors.\n | training: Boolean or boolean scalar tensor, indicating whether to run\n | the `Network` in training mode or inference mode.\n | mask: A mask or list of masks. A mask can be\n | either a tensor or None (no mask).\n | \n | Returns:\n | A tensor if there is a single output, or\n | a list of tensors if there are more than one outputs.\n | \n | compute_mask(self, inputs, mask)\n | Computes an output mask tensor.\n | \n | Arguments:\n | inputs: Tensor or list of tensors.\n | mask: Tensor or list of tensors.\n | \n | Returns:\n | None or a tensor (or list of tensors,\n | one per output tensor of the layer).\n | \n | compute_output_shape(self, input_shape)\n | Computes the output shape of the layer.\n | \n | If the layer has not been built, this method will call `build` on the\n | layer. This assumes that the layer will later be used with inputs that\n | match the input shape provided here.\n | \n | Arguments:\n | input_shape: Shape tuple (tuple of integers)\n | or list of shape tuples (one per output tensor of the layer).\n | Shape tuples can include None for free dimensions,\n | instead of an integer.\n | \n | Returns:\n | An input shape tuple.\n | \n | get_config(self)\n | Returns the config of the layer.\n | \n | A layer config is a Python dictionary (serializable)\n | containing the configuration of a layer.\n | The same layer can be reinstantiated later\n | (without its trained weights) from this configuration.\n | \n | The config of a layer does not include connectivity\n | information, nor the layer class name. These are handled\n | by `Network` (one layer of abstraction above).\n | \n | Returns:\n | Python dictionary.\n | \n | pop(self)\n | Removes the last layer in the model.\n | \n | Raises:\n | TypeError: if there are no layers in the model.\n | \n | predict_classes(self, x, batch_size=32, verbose=0)\n | Generate class predictions for the input samples.\n | \n | The input samples are processed batch by batch.\n | \n | Arguments:\n | x: input data, as a Numpy array or list of Numpy arrays\n | (if the model has multiple inputs).\n | batch_size: integer.\n | verbose: verbosity mode, 0 or 1.\n | \n | Returns:\n | A numpy array of class predictions.\n | \n | predict_proba(self, x, batch_size=32, verbose=0)\n | Generates class probability predictions for the input samples.\n | \n | The input samples are processed batch by batch.\n | \n | Arguments:\n | x: input data, as a Numpy array or list of Numpy arrays\n | (if the model has multiple inputs).\n | batch_size: integer.\n | verbose: verbosity mode, 0 or 1.\n | \n | Returns:\n | A Numpy array of probability predictions.\n | \n | ----------------------------------------------------------------------\n | Class methods defined here:\n | \n | from_config(config, custom_objects=None) from builtins.type\n | Instantiates a Model from its config (output of `get_config()`).\n | \n | Arguments:\n | config: Model config dictionary.\n | custom_objects: Optional dictionary mapping names\n | (strings) to custom classes or functions to be\n | considered during deserialization.\n | \n | Returns:\n | A model instance.\n | \n | Raises:\n | ValueError: In case of improperly formatted config dict.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | dynamic\n | \n | input_spec\n | Gets the network's input specs.\n | \n | Returns:\n | A list of `InputSpec` instances (one per input to the model)\n | or a single instance if the model has only one input.\n | \n | layers\n | \n | ----------------------------------------------------------------------\n | Methods inherited from tensorflow.python.keras.engine.training.Model:\n | \n | compile(self, optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs)\n | Configures the model for training.\n | \n | Arguments:\n | optimizer: String (name of optimizer) or optimizer instance.\n | See `tf.keras.optimizers`.\n | loss: String (name of objective function), objective function or\n | `tf.losses.Loss` instance. See `tf.losses`. If the model has\n | multiple outputs, you can use a different loss on each output by\n | passing a dictionary or a list of losses. The loss value that will\n | be minimized by the model will then be the sum of all individual\n | losses.\n | metrics: List of metrics to be evaluated by the model during training\n | and testing. Typically you will use `metrics=['accuracy']`.\n | To specify different metrics for different outputs of a\n | multi-output model, you could also pass a dictionary, such as\n | `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.\n | You can also pass a list (len = len(outputs)) of lists of metrics\n | such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or\n | `metrics=['accuracy', ['accuracy', 'mse']]`.\n | loss_weights: Optional list or dictionary specifying scalar\n | coefficients (Python floats) to weight the loss contributions\n | of different model outputs.\n | The loss value that will be minimized by the model\n | will then be the *weighted sum* of all individual losses,\n | weighted by the `loss_weights` coefficients.\n | If a list, it is expected to have a 1:1 mapping\n | to the model's outputs. If a tensor, it is expected to map\n | output names (strings) to scalar coefficients.\n | sample_weight_mode: If you need to do timestep-wise\n | sample weighting (2D weights), set this to `\"temporal\"`.\n | `None` defaults to sample-wise weights (1D).\n | If the model has multiple outputs, you can use a different\n | `sample_weight_mode` on each output by passing a\n | dictionary or a list of modes.\n | weighted_metrics: List of metrics to be evaluated and weighted\n | by sample_weight or class_weight during training and testing.\n | target_tensors: By default, Keras will create placeholders for the\n | model's target, which will be fed with the target data during\n | training. If instead you would like to use your own\n | target tensors (in turn, Keras will not expect external\n | Numpy data for these targets at training time), you\n | can specify them via the `target_tensors` argument. It can be\n | a single tensor (for a single-output model), a list of tensors,\n | or a dict mapping output names to target tensors.\n | distribute: NOT SUPPORTED IN TF 2.0, please create and compile the\n | model under distribution strategy scope instead of passing it to\n | compile.\n | **kwargs: Any additional arguments.\n | \n | Raises:\n | ValueError: In case of invalid arguments for\n | `optimizer`, `loss`, `metrics` or `sample_weight_mode`.\n | \n | evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)\n | Returns the loss value & metrics values for the model in test mode.\n | \n | Computation is done in batches.\n | \n | Arguments:\n | x: Input data. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A dict mapping input names to the corresponding array/tensors,\n | if the model has named inputs.\n | - A `tf.data` dataset.\n | - A generator or `keras.utils.Sequence` instance.\n | y: Target data. Like the input data `x`,\n | it could be either Numpy array(s) or TensorFlow tensor(s).\n | It should be consistent with `x` (you cannot have Numpy inputs and\n | tensor targets, or inversely).\n | If `x` is a dataset, generator or\n | `keras.utils.Sequence` instance, `y` should not be specified (since\n | targets will be obtained from the iterator/dataset).\n | batch_size: Integer or `None`.\n | Number of samples per gradient update.\n | If unspecified, `batch_size` will default to 32.\n | Do not specify the `batch_size` is your data is in the\n | form of symbolic tensors, dataset,\n | generators, or `keras.utils.Sequence` instances (since they generate\n | batches).\n | verbose: 0 or 1. Verbosity mode.\n | 0 = silent, 1 = progress bar.\n | sample_weight: Optional Numpy array of weights for\n | the test samples, used for weighting the loss function.\n | You can either pass a flat (1D)\n | Numpy array with the same length as the input samples\n | (1:1 mapping between weights and samples),\n | or in the case of temporal data,\n | you can pass a 2D array with shape\n | `(samples, sequence_length)`,\n | to apply a different weight to every timestep of every sample.\n | In this case you should make sure to specify\n | `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n | supported when `x` is a dataset, instead pass\n | sample weights as the third element of `x`.\n | steps: Integer or `None`.\n | Total number of steps (batches of samples)\n | before declaring the evaluation round finished.\n | Ignored with the default value of `None`.\n | If x is a `tf.data` dataset and `steps` is\n | None, 'evaluate' will run until the dataset is exhausted.\n | This argument is not supported with array inputs.\n | callbacks: List of `keras.callbacks.Callback` instances.\n | List of callbacks to apply during evaluation.\n | See [callbacks](/api_docs/python/tf/keras/callbacks).\n | max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n | input only. Maximum size for the generator queue.\n | If unspecified, `max_queue_size` will default to 10.\n | workers: Integer. Used for generator or `keras.utils.Sequence` input\n | only. Maximum number of processes to spin up when using\n | process-based threading. If unspecified, `workers` will default\n | to 1. If 0, will execute the generator on the main thread.\n | use_multiprocessing: Boolean. Used for generator or\n | `keras.utils.Sequence` input only. If `True`, use process-based\n | threading. If unspecified, `use_multiprocessing` will default to\n | `False`. Note that because this implementation relies on\n | multiprocessing, you should not pass non-picklable arguments to\n | the generator as they can't be passed easily to children processes.\n | \n | Returns:\n | Scalar test loss (if the model has a single output and no metrics)\n | or list of scalars (if the model has multiple outputs\n | and/or metrics). The attribute `model.metrics_names` will give you\n | the display labels for the scalar outputs.\n | \n | Raises:\n | ValueError: in case of invalid arguments.\n | \n | evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)\n | Evaluates the model on a data generator.\n | \n | The generator should return the same kind of data\n | as accepted by `test_on_batch`.\n | \n | Arguments:\n | generator: Generator yielding tuples (inputs, targets)\n | or (inputs, targets, sample_weights)\n | or an instance of `keras.utils.Sequence`\n | object in order to avoid duplicate data\n | when using multiprocessing.\n | steps: Total number of steps (batches of samples)\n | to yield from `generator` before stopping.\n | Optional for `Sequence`: if unspecified, will use\n | the `len(generator)` as a number of steps.\n | callbacks: List of `keras.callbacks.Callback` instances.\n | List of callbacks to apply during evaluation.\n | See [callbacks](/api_docs/python/tf/keras/callbacks).\n | max_queue_size: maximum size for the generator queue\n | workers: Integer. Maximum number of processes to spin up\n | when using process-based threading.\n | If unspecified, `workers` will default to 1. If 0, will\n | execute the generator on the main thread.\n | use_multiprocessing: Boolean.\n | If `True`, use process-based threading.\n | If unspecified, `use_multiprocessing` will default to `False`.\n | Note that because this implementation relies on multiprocessing,\n | you should not pass non-picklable arguments to the generator\n | as they can't be passed easily to children processes.\n | verbose: Verbosity mode, 0 or 1.\n | \n | Returns:\n | Scalar test loss (if the model has a single output and no metrics)\n | or list of scalars (if the model has multiple outputs\n | and/or metrics). The attribute `model.metrics_names` will give you\n | the display labels for the scalar outputs.\n | \n | Raises:\n | ValueError: in case of invalid arguments.\n | \n | Raises:\n | ValueError: In case the generator yields data in an invalid format.\n | \n | fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs)\n | Trains the model for a fixed number of epochs (iterations on a dataset).\n | \n | Arguments:\n | x: Input data. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A dict mapping input names to the corresponding array/tensors,\n | if the model has named inputs.\n | - A `tf.data` dataset. Should return a tuple\n | of either `(inputs, targets)` or\n | `(inputs, targets, sample_weights)`.\n | - A generator or `keras.utils.Sequence` returning `(inputs, targets)`\n | or `(inputs, targets, sample weights)`.\n | y: Target data. Like the input data `x`,\n | it could be either Numpy array(s) or TensorFlow tensor(s).\n | It should be consistent with `x` (you cannot have Numpy inputs and\n | tensor targets, or inversely). If `x` is a dataset, generator,\n | or `keras.utils.Sequence` instance, `y` should\n | not be specified (since targets will be obtained from `x`).\n | batch_size: Integer or `None`.\n | Number of samples per gradient update.\n | If unspecified, `batch_size` will default to 32.\n | Do not specify the `batch_size` if your data is in the\n | form of symbolic tensors, datasets,\n | generators, or `keras.utils.Sequence` instances (since they generate\n | batches).\n | epochs: Integer. Number of epochs to train the model.\n | An epoch is an iteration over the entire `x` and `y`\n | data provided.\n | Note that in conjunction with `initial_epoch`,\n | `epochs` is to be understood as \"final epoch\".\n | The model is not trained for a number of iterations\n | given by `epochs`, but merely until the epoch\n | of index `epochs` is reached.\n | verbose: 0, 1, or 2. Verbosity mode.\n | 0 = silent, 1 = progress bar, 2 = one line per epoch.\n | Note that the progress bar is not particularly useful when\n | logged to a file, so verbose=2 is recommended when not running\n | interactively (eg, in a production environment).\n | callbacks: List of `keras.callbacks.Callback` instances.\n | List of callbacks to apply during training.\n | See `tf.keras.callbacks`.\n | validation_split: Float between 0 and 1.\n | Fraction of the training data to be used as validation data.\n | The model will set apart this fraction of the training data,\n | will not train on it, and will evaluate\n | the loss and any model metrics\n | on this data at the end of each epoch.\n | The validation data is selected from the last samples\n | in the `x` and `y` data provided, before shuffling. This argument is\n | not supported when `x` is a dataset, generator or\n | `keras.utils.Sequence` instance.\n | validation_data: Data on which to evaluate\n | the loss and any model metrics at the end of each epoch.\n | The model will not be trained on this data.\n | `validation_data` will override `validation_split`.\n | `validation_data` could be:\n | - tuple `(x_val, y_val)` of Numpy arrays or tensors\n | - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays\n | - dataset\n | For the first two cases, `batch_size` must be provided.\n | For the last case, `validation_steps` must be provided.\n | shuffle: Boolean (whether to shuffle the training data\n | before each epoch) or str (for 'batch').\n | 'batch' is a special option for dealing with the\n | limitations of HDF5 data; it shuffles in batch-sized chunks.\n | Has no effect when `steps_per_epoch` is not `None`.\n | class_weight: Optional dictionary mapping class indices (integers)\n | to a weight (float) value, used for weighting the loss function\n | (during training only).\n | This can be useful to tell the model to\n | \"pay more attention\" to samples from\n | an under-represented class.\n | sample_weight: Optional Numpy array of weights for\n | the training samples, used for weighting the loss function\n | (during training only). You can either pass a flat (1D)\n | Numpy array with the same length as the input samples\n | (1:1 mapping between weights and samples),\n | or in the case of temporal data,\n | you can pass a 2D array with shape\n | `(samples, sequence_length)`,\n | to apply a different weight to every timestep of every sample.\n | In this case you should make sure to specify\n | `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n | supported when `x` is a dataset, generator, or\n | `keras.utils.Sequence` instance, instead provide the sample_weights\n | as the third element of `x`.\n | initial_epoch: Integer.\n | Epoch at which to start training\n | (useful for resuming a previous training run).\n | steps_per_epoch: Integer or `None`.\n | Total number of steps (batches of samples)\n | before declaring one epoch finished and starting the\n | next epoch. When training with input tensors such as\n | TensorFlow data tensors, the default `None` is equal to\n | the number of samples in your dataset divided by\n | the batch size, or 1 if that cannot be determined. If x is a\n | `tf.data` dataset, and 'steps_per_epoch'\n | is None, the epoch will run until the input dataset is exhausted.\n | This argument is not supported with array inputs.\n | validation_steps: Only relevant if `validation_data` is provided and\n | is a `tf.data` dataset. Total number of steps (batches of\n | samples) to draw before stopping when performing validation\n | at the end of every epoch. If validation_data is a `tf.data` dataset\n | and 'validation_steps' is None, validation\n | will run until the `validation_data` dataset is exhausted.\n | validation_freq: Only relevant if validation data is provided. Integer\n | or `collections_abc.Container` instance (e.g. list, tuple, etc.).\n | If an integer, specifies how many training epochs to run before a\n | new validation run is performed, e.g. `validation_freq=2` runs\n | validation every 2 epochs. If a Container, specifies the epochs on\n | which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n | validation at the end of the 1st, 2nd, and 10th epochs.\n | max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n | input only. Maximum size for the generator queue.\n | If unspecified, `max_queue_size` will default to 10.\n | workers: Integer. Used for generator or `keras.utils.Sequence` input\n | only. Maximum number of processes to spin up\n | when using process-based threading. If unspecified, `workers`\n | will default to 1. If 0, will execute the generator on the main\n | thread.\n | use_multiprocessing: Boolean. Used for generator or\n | `keras.utils.Sequence` input only. If `True`, use process-based\n | threading. If unspecified, `use_multiprocessing` will default to\n | `False`. Note that because this implementation relies on\n | multiprocessing, you should not pass non-picklable arguments to\n | the generator as they can't be passed easily to children processes.\n | **kwargs: Used for backwards compatibility.\n | \n | Returns:\n | A `History` object. Its `History.history` attribute is\n | a record of training loss values and metrics values\n | at successive epochs, as well as validation loss values\n | and validation metrics values (if applicable).\n | \n | Raises:\n | RuntimeError: If the model was never compiled.\n | ValueError: In case of mismatch between the provided input data\n | and what the model expects.\n | \n | fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)\n | Fits the model on data yielded batch-by-batch by a Python generator.\n | \n | The generator is run in parallel to the model, for efficiency.\n | For instance, this allows you to do real-time data augmentation\n | on images on CPU in parallel to training your model on GPU.\n | \n | The use of `keras.utils.Sequence` guarantees the ordering\n | and guarantees the single use of every input per epoch when\n | using `use_multiprocessing=True`.\n | \n | Arguments:\n | generator: A generator or an instance of `Sequence`\n | (`keras.utils.Sequence`)\n | object in order to avoid duplicate data\n | when using multiprocessing.\n | The output of the generator must be either\n | - a tuple `(inputs, targets)`\n | - a tuple `(inputs, targets, sample_weights)`.\n | This tuple (a single output of the generator) makes a single batch.\n | Therefore, all arrays in this tuple must have the same length (equal\n | to the size of this batch). Different batches may have different\n | sizes.\n | For example, the last batch of the epoch is commonly smaller than\n | the\n | others, if the size of the dataset is not divisible by the batch\n | size.\n | The generator is expected to loop over its data\n | indefinitely. An epoch finishes when `steps_per_epoch`\n | batches have been seen by the model.\n | steps_per_epoch: Total number of steps (batches of samples)\n | to yield from `generator` before declaring one epoch\n | finished and starting the next epoch. It should typically\n | be equal to the number of samples of your dataset\n | divided by the batch size.\n | Optional for `Sequence`: if unspecified, will use\n | the `len(generator)` as a number of steps.\n | epochs: Integer, total number of iterations on the data.\n | verbose: Verbosity mode, 0, 1, or 2.\n | callbacks: List of callbacks to be called during training.\n | validation_data: This can be either\n | - a generator for the validation data\n | - a tuple (inputs, targets)\n | - a tuple (inputs, targets, sample_weights).\n | validation_steps: Only relevant if `validation_data`\n | is a generator. Total number of steps (batches of samples)\n | to yield from `generator` before stopping.\n | Optional for `Sequence`: if unspecified, will use\n | the `len(validation_data)` as a number of steps.\n | validation_freq: Only relevant if validation data is provided. Integer\n | or `collections_abc.Container` instance (e.g. list, tuple, etc.).\n | If an integer, specifies how many training epochs to run before a\n | new validation run is performed, e.g. `validation_freq=2` runs\n | validation every 2 epochs. If a Container, specifies the epochs on\n | which to run validation, e.g. `validation_freq=[1, 2, 10]` runs\n | validation at the end of the 1st, 2nd, and 10th epochs.\n | class_weight: Dictionary mapping class indices to a weight\n | for the class.\n | max_queue_size: Integer. Maximum size for the generator queue.\n | If unspecified, `max_queue_size` will default to 10.\n | workers: Integer. Maximum number of processes to spin up\n | when using process-based threading.\n | If unspecified, `workers` will default to 1. If 0, will\n | execute the generator on the main thread.\n | use_multiprocessing: Boolean.\n | If `True`, use process-based threading.\n | If unspecified, `use_multiprocessing` will default to `False`.\n | Note that because this implementation relies on multiprocessing,\n | you should not pass non-picklable arguments to the generator\n | as they can't be passed easily to children processes.\n | shuffle: Boolean. Whether to shuffle the order of the batches at\n | the beginning of each epoch. Only used with instances\n | of `Sequence` (`keras.utils.Sequence`).\n | Has no effect when `steps_per_epoch` is not `None`.\n | initial_epoch: Epoch at which to start training\n | (useful for resuming a previous training run)\n | \n | Returns:\n | A `History` object.\n | \n | Example:\n | \n | ```python\n | def generate_arrays_from_file(path):\n | while 1:\n | f = open(path)\n | for line in f:\n | # create numpy arrays of input data\n | # and labels, from each line in the file\n | x1, x2, y = process_line(line)\n | yield ({'input_1': x1, 'input_2': x2}, {'output': y})\n | f.close()\n | \n | model.fit_generator(generate_arrays_from_file('/my_file.txt'),\n | steps_per_epoch=10000, epochs=10)\n | ```\n | Raises:\n | ValueError: In case the generator yields data in an invalid format.\n | \n | get_weights(self)\n | Retrieves the weights of the model.\n | \n | Returns:\n | A flat list of Numpy arrays.\n | \n | load_weights(self, filepath, by_name=False)\n | Loads all layer weights, either from a TensorFlow or an HDF5 file.\n | \n | predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)\n | Generates output predictions for the input samples.\n | \n | Computation is done in batches.\n | \n | Arguments:\n | x: Input samples. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A `tf.data` dataset.\n | - A generator or `keras.utils.Sequence` instance.\n | batch_size: Integer or `None`.\n | Number of samples per gradient update.\n | If unspecified, `batch_size` will default to 32.\n | Do not specify the `batch_size` is your data is in the\n | form of symbolic tensors, dataset,\n | generators, or `keras.utils.Sequence` instances (since they generate\n | batches).\n | verbose: Verbosity mode, 0 or 1.\n | steps: Total number of steps (batches of samples)\n | before declaring the prediction round finished.\n | Ignored with the default value of `None`. If x is a `tf.data`\n | dataset and `steps` is None, `predict` will\n | run until the input dataset is exhausted.\n | callbacks: List of `keras.callbacks.Callback` instances.\n | List of callbacks to apply during prediction.\n | See [callbacks](/api_docs/python/tf/keras/callbacks).\n | max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n | input only. Maximum size for the generator queue.\n | If unspecified, `max_queue_size` will default to 10.\n | workers: Integer. Used for generator or `keras.utils.Sequence` input\n | only. Maximum number of processes to spin up when using\n | process-based threading. If unspecified, `workers` will default\n | to 1. If 0, will execute the generator on the main thread.\n | use_multiprocessing: Boolean. Used for generator or\n | `keras.utils.Sequence` input only. If `True`, use process-based\n | threading. If unspecified, `use_multiprocessing` will default to\n | `False`. Note that because this implementation relies on\n | multiprocessing, you should not pass non-picklable arguments to\n | the generator as they can't be passed easily to children processes.\n | \n | \n | Returns:\n | Numpy array(s) of predictions.\n | \n | Raises:\n | ValueError: In case of mismatch between the provided\n | input data and the model's expectations,\n | or in case a stateful model receives a number of samples\n | that is not a multiple of the batch size.\n | \n | predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)\n | Generates predictions for the input samples from a data generator.\n | \n | The generator should return the same kind of data as accepted by\n | `predict_on_batch`.\n | \n | Arguments:\n | generator: Generator yielding batches of input samples\n | or an instance of `keras.utils.Sequence` object in order to\n | avoid duplicate data when using multiprocessing.\n | steps: Total number of steps (batches of samples)\n | to yield from `generator` before stopping.\n | Optional for `Sequence`: if unspecified, will use\n | the `len(generator)` as a number of steps.\n | callbacks: List of `keras.callbacks.Callback` instances.\n | List of callbacks to apply during prediction.\n | See [callbacks](/api_docs/python/tf/keras/callbacks).\n | max_queue_size: Maximum size for the generator queue.\n | workers: Integer. Maximum number of processes to spin up\n | when using process-based threading.\n | If unspecified, `workers` will default to 1. If 0, will\n | execute the generator on the main thread.\n | use_multiprocessing: Boolean.\n | If `True`, use process-based threading.\n | If unspecified, `use_multiprocessing` will default to `False`.\n | Note that because this implementation relies on multiprocessing,\n | you should not pass non-picklable arguments to the generator\n | as they can't be passed easily to children processes.\n | verbose: verbosity mode, 0 or 1.\n | \n | Returns:\n | Numpy array(s) of predictions.\n | \n | Raises:\n | ValueError: In case the generator yields data in an invalid format.\n | \n | predict_on_batch(self, x)\n | Returns predictions for a single batch of samples.\n | \n | Arguments:\n | x: Input data. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A `tf.data` dataset.\n | \n | Returns:\n | Numpy array(s) of predictions.\n | \n | Raises:\n | ValueError: In case of mismatch between given number of inputs and\n | expectations of the model.\n | \n | reset_metrics(self)\n | Resets the state of metrics.\n | \n | test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True)\n | Test the model on a single batch of samples.\n | \n | Arguments:\n | x: Input data. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A dict mapping input names to the corresponding array/tensors,\n | if the model has named inputs.\n | - A `tf.data` dataset.\n | y: Target data. Like the input data `x`,\n | it could be either Numpy array(s) or TensorFlow tensor(s).\n | It should be consistent with `x` (you cannot have Numpy inputs and\n | tensor targets, or inversely). If `x` is a dataset `y` should\n | not be specified (since targets will be obtained from the iterator).\n | sample_weight: Optional array of the same length as x, containing\n | weights to apply to the model's loss for each sample.\n | In the case of temporal data, you can pass a 2D array\n | with shape (samples, sequence_length),\n | to apply a different weight to every timestep of every sample.\n | In this case you should make sure to specify\n | sample_weight_mode=\"temporal\" in compile(). This argument is not\n | supported when `x` is a dataset.\n | reset_metrics: If `True`, the metrics returned will be only for this\n | batch. If `False`, the metrics will be statefully accumulated across\n | batches.\n | \n | Returns:\n | Scalar test loss (if the model has a single output and no metrics)\n | or list of scalars (if the model has multiple outputs\n | and/or metrics). The attribute `model.metrics_names` will give you\n | the display labels for the scalar outputs.\n | \n | Raises:\n | ValueError: In case of invalid user-provided arguments.\n | \n | train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True)\n | Runs a single gradient update on a single batch of data.\n | \n | Arguments:\n | x: Input data. It could be:\n | - A Numpy array (or array-like), or a list of arrays\n | (in case the model has multiple inputs).\n | - A TensorFlow tensor, or a list of tensors\n | (in case the model has multiple inputs).\n | - A dict mapping input names to the corresponding array/tensors,\n | if the model has named inputs.\n | - A `tf.data` dataset.\n | y: Target data. Like the input data `x`, it could be either Numpy\n | array(s) or TensorFlow tensor(s). It should be consistent with `x`\n | (you cannot have Numpy inputs and tensor targets, or inversely). If\n | `x` is a dataset, `y` should not be specified\n | (since targets will be obtained from the iterator).\n | sample_weight: Optional array of the same length as x, containing\n | weights to apply to the model's loss for each sample. In the case of\n | temporal data, you can pass a 2D array with shape (samples,\n | sequence_length), to apply a different weight to every timestep of\n | every sample. In this case you should make sure to specify\n | sample_weight_mode=\"temporal\" in compile(). This argument is not\n | supported when `x` is a dataset.\n | class_weight: Optional dictionary mapping class indices (integers) to a\n | weight (float) to apply to the model's loss for the samples from this\n | class during training. This can be useful to tell the model to \"pay\n | more attention\" to samples from an under-represented class.\n | reset_metrics: If `True`, the metrics returned will be only for this\n | batch. If `False`, the metrics will be statefully accumulated across\n | batches.\n | \n | Returns:\n | Scalar training loss\n | (if the model has a single output and no metrics)\n | or list of scalars (if the model has multiple outputs\n | and/or metrics). The attribute `model.metrics_names` will give you\n | the display labels for the scalar outputs.\n | \n | Raises:\n | ValueError: In case of invalid user-provided arguments.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from tensorflow.python.keras.engine.training.Model:\n | \n | metrics\n | Returns the model's metrics added using `compile`, `add_metric` APIs.\n | \n | metrics_names\n | Returns the model's display labels for all outputs.\n | \n | run_eagerly\n | Settable attribute indicating whether the model should run eagerly.\n | \n | Running eagerly means that your model will be run step by step,\n | like Python code. Your model might run slower, but it should become easier\n | for you to debug it by stepping into individual layer calls.\n | \n | By default, we will attempt to compile your model to a static graph to\n | deliver the best execution performance.\n | \n | Returns:\n | Boolean, whether the model should run eagerly.\n | \n | sample_weights\n | \n | ----------------------------------------------------------------------\n | Methods inherited from tensorflow.python.keras.engine.network.Network:\n | \n | __setattr__(self, name, value)\n | Support self.foo = trackable syntax.\n | \n | get_layer(self, name=None, index=None)\n | Retrieves a layer based on either its name (unique) or index.\n | \n | If `name` and `index` are both provided, `index` will take precedence.\n | Indices are based on order of horizontal graph traversal (bottom-up).\n | \n | Arguments:\n | name: String, name of layer.\n | index: Integer, index of layer.\n | \n | Returns:\n | A layer instance.\n | \n | Raises:\n | ValueError: In case of invalid layer name or index.\n | \n | reset_states(self)\n | \n | save(self, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None)\n | Saves the model to Tensorflow SavedModel or a single HDF5 file.\n | \n | The savefile includes:\n | - The model architecture, allowing to re-instantiate the model.\n | - The model weights.\n | - The state of the optimizer, allowing to resume training\n | exactly where you left off.\n | \n | This allows you to save the entirety of the state of a model\n | in a single file.\n | \n | Saved models can be reinstantiated via `keras.models.load_model`.\n | The model returned by `load_model`\n | is a compiled model ready to be used (unless the saved model\n | was never compiled in the first place).\n | \n | Arguments:\n | filepath: String, path to SavedModel or H5 file to save the model.\n | overwrite: Whether to silently overwrite any existing file at the\n | target location, or provide the user with a manual prompt.\n | include_optimizer: If True, save optimizer's state together.\n | save_format: Either 'tf' or 'h5', indicating whether to save the model\n | to Tensorflow SavedModel or HDF5. The default is currently 'h5', but\n | will switch to 'tf' in TensorFlow 2.0. The 'tf' option is currently\n | disabled (use `tf.keras.experimental.export_saved_model` instead).\n | signatures: Signatures to save with the SavedModel. Applicable to the 'tf'\n | format only. Please see the `signatures` argument in\n | `tf.saved_model.save` for details.\n | options: Optional `tf.saved_model.SaveOptions` object that specifies\n | options for saving to SavedModel.\n | \n | Example:\n | \n | ```python\n | from keras.models import load_model\n | \n | model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'\n | del model # deletes the existing model\n | \n | # returns a compiled model\n | # identical to the previous one\n | model = load_model('my_model.h5')\n | ```\n | \n | save_weights(self, filepath, overwrite=True, save_format=None)\n | Saves all layer weights.\n | \n | Either saves in HDF5 or in TensorFlow format based on the `save_format`\n | argument.\n | \n | When saving in HDF5 format, the weight file has:\n | - `layer_names` (attribute), a list of strings\n | (ordered names of model layers).\n | - For every layer, a `group` named `layer.name`\n | - For every such layer group, a group attribute `weight_names`,\n | a list of strings\n | (ordered names of weights tensor of the layer).\n | - For every weight in the layer, a dataset\n | storing the weight value, named after the weight tensor.\n | \n | When saving in TensorFlow format, all objects referenced by the network are\n | saved in the same format as `tf.train.Checkpoint`, including any `Layer`\n | instances or `Optimizer` instances assigned to object attributes. For\n | networks constructed from inputs and outputs using `tf.keras.Model(inputs,\n | outputs)`, `Layer` instances used by the network are tracked/saved\n | automatically. For user-defined classes which inherit from `tf.keras.Model`,\n | `Layer` instances must be assigned to object attributes, typically in the\n | constructor. See the documentation of `tf.train.Checkpoint` and\n | `tf.keras.Model` for details.\n | \n | While the formats are the same, do not mix `save_weights` and\n | `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be\n | loaded using `Model.load_weights`. Checkpoints saved using\n | `tf.train.Checkpoint.save` should be restored using the corresponding\n | `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over\n | `save_weights` for training checkpoints.\n | \n | The TensorFlow format matches objects and variables by starting at a root\n | object, `self` for `save_weights`, and greedily matching attribute\n | names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this\n | is the `Checkpoint` even if the `Checkpoint` has a model attached. This\n | means saving a `tf.keras.Model` using `save_weights` and loading into a\n | `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match\n | the `Model`'s variables. See the [guide to training\n | checkpoints](https://www.tensorflow.org/alpha/guide/checkpoints) for details\n | on the TensorFlow format.\n | \n | Arguments:\n | filepath: String, path to the file to save the weights to. When saving\n | in TensorFlow format, this is the prefix used for checkpoint files\n | (multiple files are generated). Note that the '.h5' suffix causes\n | weights to be saved in HDF5 format.\n | overwrite: Whether to silently overwrite any existing file at the\n | target location, or provide the user with a manual prompt.\n | save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or\n | '.keras' will default to HDF5 if `save_format` is `None`. Otherwise\n | `None` defaults to 'tf'.\n | \n | Raises:\n | ImportError: If h5py is not available when attempting to save in HDF5\n | format.\n | ValueError: For invalid/unknown format arguments.\n | \n | summary(self, line_length=None, positions=None, print_fn=None)\n | Prints a string summary of the network.\n | \n | Arguments:\n | line_length: Total length of printed lines\n | (e.g. set this to adapt the display to different\n | terminal window sizes).\n | positions: Relative or absolute positions of log elements\n | in each line. If not provided,\n | defaults to `[.33, .55, .67, 1.]`.\n | print_fn: Print function to use. Defaults to `print`.\n | It will be called on each line of the summary.\n | You can set it to a custom function\n | in order to capture the string summary.\n | \n | Raises:\n | ValueError: if `summary()` is called before the model is built.\n | \n | to_json(self, **kwargs)\n | Returns a JSON string containing the network configuration.\n | \n | To load a network from a JSON save file, use\n | `keras.models.model_from_json(json_string, custom_objects={})`.\n | \n | Arguments:\n | **kwargs: Additional keyword arguments\n | to be passed to `json.dumps()`.\n | \n | Returns:\n | A JSON string.\n | \n | to_yaml(self, **kwargs)\n | Returns a yaml string containing the network configuration.\n | \n | To load a network from a yaml save file, use\n | `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n | \n | `custom_objects` should be a dictionary mapping\n | the names of custom losses / layers / etc to the corresponding\n | functions / classes.\n | \n | Arguments:\n | **kwargs: Additional keyword arguments\n | to be passed to `yaml.dump()`.\n | \n | Returns:\n | A YAML string.\n | \n | Raises:\n | ImportError: if yaml module is not found.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from tensorflow.python.keras.engine.network.Network:\n | \n | non_trainable_weights\n | \n | state_updates\n | Returns the `updates` from all layers that are stateful.\n | \n | This is useful for separating training updates and\n | state updates, e.g. when we need to update a layer's internal state\n | during prediction.\n | \n | Returns:\n | A list of update ops.\n | \n | stateful\n | \n | trainable_weights\n | \n | weights\n | Returns the list of all layer variables/weights.\n | \n | Returns:\n | A list of variables.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from tensorflow.python.keras.engine.base_layer.Layer:\n | \n | __call__(self, inputs, *args, **kwargs)\n | Wraps `call`, applying pre- and post-processing steps.\n | \n | Arguments:\n | inputs: input tensor(s).\n | *args: additional positional arguments to be passed to `self.call`.\n | **kwargs: additional keyword arguments to be passed to `self.call`.\n | \n | Returns:\n | Output tensor(s).\n | \n | Note:\n | - The following optional keyword arguments are reserved for specific uses:\n | * `training`: Boolean scalar tensor of Python boolean indicating\n | whether the `call` is meant for training or inference.\n | * `mask`: Boolean input mask.\n | - If the layer's `call` method takes a `mask` argument (as some Keras\n | layers do), its default value will be set to the mask generated\n | for `inputs` by the previous layer (if `input` did come from\n | a layer that generated a corresponding mask, i.e. if it came from\n | a Keras layer with masking support.\n | \n | Raises:\n | ValueError: if the layer's `call` method returns None (an invalid value).\n | \n | __delattr__(self, name)\n | Implement delattr(self, name).\n | \n | add_loss(self, losses, inputs=None)\n | Add loss tensor(s), potentially dependent on layer inputs.\n | \n | Some losses (for instance, activity regularization losses) may be dependent\n | on the inputs passed when calling a layer. Hence, when reusing the same\n | layer on different inputs `a` and `b`, some entries in `layer.losses` may\n | be dependent on `a` and some on `b`. This method automatically keeps track\n | of dependencies.\n | \n | This method can be used inside a subclassed layer or model's `call`\n | function, in which case `losses` should be a Tensor or list of Tensors.\n | \n | Example:\n | \n | ```python\n | class MyLayer(tf.keras.layers.Layer):\n | def call(inputs, self):\n | self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)\n | return inputs\n | ```\n | \n | This method can also be called directly on a Functional Model during\n | construction. In this case, any loss Tensors passed to this Model must\n | be symbolic and be able to be traced back to the model's `Input`s. These\n | losses become part of the model's topology and are tracked in `get_config`.\n | \n | Example:\n | \n | ```python\n | inputs = tf.keras.Input(shape=(10,))\n | x = tf.keras.layers.Dense(10)(inputs)\n | outputs = tf.keras.layers.Dense(1)(x)\n | model = tf.keras.Model(inputs, outputs)\n | # Actvity regularization.\n | model.add_loss(tf.abs(tf.reduce_mean(x)))\n | ```\n | \n | If this is not the case for your loss (if, for example, your loss references\n | a `Variable` of one of the model's layers), you can wrap your loss in a\n | zero-argument lambda. These losses are not tracked as part of the model's\n | topology since they can't be serialized.\n | \n | Example:\n | \n | ```python\n | inputs = tf.keras.Input(shape=(10,))\n | x = tf.keras.layers.Dense(10)(inputs)\n | outputs = tf.keras.layers.Dense(1)(x)\n | model = tf.keras.Model(inputs, outputs)\n | # Weight regularization.\n | model.add_loss(lambda: tf.reduce_mean(x.kernel))\n | ```\n | \n | The `get_losses_for` method allows to retrieve the losses relevant to a\n | specific set of inputs.\n | \n | Arguments:\n | losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses\n | may also be zero-argument callables which create a loss tensor.\n | inputs: Ignored when executing eagerly. If anything other than None is\n | passed, it signals the losses are conditional on some of the layer's\n | inputs, and thus they should only be run where these inputs are\n | available. This is the case for activity regularization losses, for\n | instance. If `None` is passed, the losses are assumed\n | to be unconditional, and will apply across all dataflows of the layer\n | (e.g. weight regularization losses).\n | \n | add_metric(self, value, aggregation=None, name=None)\n | Adds metric tensor to the layer.\n | \n | Args:\n | value: Metric tensor.\n | aggregation: Sample-wise metric reduction function. If `aggregation=None`,\n | it indicates that the metric tensor provided has been aggregated\n | already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by\n | `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the\n | given metric tensor will be sample-wise reduced using `mean` function.\n | eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',\n | aggregation='mean')`.\n | name: String metric name.\n | \n | Raises:\n | ValueError: If `aggregation` is anything other than None or `mean`.\n | \n | add_update(self, updates, inputs=None)\n | Add update op(s), potentially dependent on layer inputs. (deprecated arguments)\n | \n | Warning: SOME ARGUMENTS ARE DEPRECATED: `(inputs)`. They will be removed in a future version.\n | Instructions for updating:\n | `inputs` is now automatically inferred\n | \n | Weight updates (for instance, the updates of the moving mean and variance\n | in a BatchNormalization layer) may be dependent on the inputs passed\n | when calling a layer. Hence, when reusing the same layer on\n | different inputs `a` and `b`, some entries in `layer.updates` may be\n | dependent on `a` and some on `b`. This method automatically keeps track\n | of dependencies.\n | \n | The `get_updates_for` method allows to retrieve the updates relevant to a\n | specific set of inputs.\n | \n | This call is ignored when eager execution is enabled (in that case, variable\n | updates are run on the fly and thus do not need to be tracked for later\n | execution).\n | \n | Arguments:\n | updates: Update op, or list/tuple of update ops, or zero-arg callable\n | that returns an update op. A zero-arg callable should be passed in\n | order to disable running the updates by setting `trainable=False`\n | on this Layer, when executing in Eager mode.\n | inputs: Deprecated, will be automatically inferred.\n | \n | add_variable(self, *args, **kwargs)\n | Deprecated, do NOT use! Alias for `add_weight`. (deprecated)\n | \n | Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n | Instructions for updating:\n | Please use `layer.add_weight` method instead.\n | \n | add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, partitioner=None, use_resource=None, synchronization=<VariableSynchronization.AUTO: 0>, aggregation=<VariableAggregation.NONE: 0>, **kwargs)\n | Adds a new variable to the layer.\n | \n | Arguments:\n | name: Variable name.\n | shape: Variable shape. Defaults to scalar if unspecified.\n | dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n | initializer: Initializer instance (callable).\n | regularizer: Regularizer instance (callable).\n | trainable: Boolean, whether the variable should be part of the layer's\n | \"trainable_variables\" (e.g. variables, biases)\n | or \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n | Note that `trainable` cannot be `True` if `synchronization`\n | is set to `ON_READ`.\n | constraint: Constraint instance (callable).\n | partitioner: Partitioner to be passed to the `Trackable` API.\n | use_resource: Whether to use `ResourceVariable`.\n | synchronization: Indicates when a distributed a variable will be\n | aggregated. Accepted values are constants defined in the class\n | `tf.VariableSynchronization`. By default the synchronization is set to\n | `AUTO` and the current `DistributionStrategy` chooses\n | when to synchronize. If `synchronization` is set to `ON_READ`,\n | `trainable` must not be set to `True`.\n | aggregation: Indicates how a distributed variable will be aggregated.\n | Accepted values are constants defined in the class\n | `tf.VariableAggregation`.\n | **kwargs: Additional keyword arguments. Accepted values are `getter` and\n | `collections`.\n | \n | Returns:\n | The created variable. Usually either a `Variable` or `ResourceVariable`\n | instance. If `partitioner` is not `None`, a `PartitionedVariable`\n | instance is returned.\n | \n | Raises:\n | RuntimeError: If called with partitioned variable regularization and\n | eager execution is enabled.\n | ValueError: When giving unsupported dtype and no initializer or when\n | trainable has been set to True with synchronization set as `ON_READ`.\n | \n | apply(self, inputs, *args, **kwargs)\n | Deprecated, do NOT use! (deprecated)\n | \n | Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n | Instructions for updating:\n | Please use `layer.__call__` method instead.\n | \n | This is an alias of `self.__call__`.\n | \n | Arguments:\n | inputs: Input tensor(s).\n | *args: additional positional arguments to be passed to `self.call`.\n | **kwargs: additional keyword arguments to be passed to `self.call`.\n | \n | Returns:\n | Output tensor(s).\n | \n | compute_output_signature(self, input_signature)\n | Compute the output tensor signature of the layer based on the inputs.\n | \n | Unlike a TensorShape object, a TensorSpec object contains both shape\n | and dtype information for a tensor. This method allows layers to provide\n | output dtype information if it is different from the input dtype.\n | For any layer that doesn't implement this function,\n | the framework will fall back to use `compute_output_shape`, and will\n | assume that the output dtype matches the input dtype.\n | \n | Args:\n | input_signature: Single TensorSpec or nested structure of TensorSpec\n | objects, describing a candidate input for the layer.\n | \n | Returns:\n | Single TensorSpec or nested structure of TensorSpec objects, describing\n | how the layer would transform the provided input.\n | \n | Raises:\n | TypeError: If input_signature contains a non-TensorSpec object.\n | \n | count_params(self)\n | Count the total number of scalars composing the weights.\n | \n | Returns:\n | An integer count.\n | \n | Raises:\n | ValueError: if the layer isn't yet built\n | (in which case its weights aren't yet defined).\n | \n | get_input_at(self, node_index)\n | Retrieves the input tensor(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A tensor (or list of tensors if the layer has multiple inputs).\n | \n | Raises:\n | RuntimeError: If called in Eager mode.\n | \n | get_input_mask_at(self, node_index)\n | Retrieves the input mask tensor(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A mask tensor\n | (or list of tensors if the layer has multiple inputs).\n | \n | get_input_shape_at(self, node_index)\n | Retrieves the input shape(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A shape tuple\n | (or list of shape tuples if the layer has multiple inputs).\n | \n | Raises:\n | RuntimeError: If called in Eager mode.\n | \n | get_losses_for(self, inputs)\n | Retrieves losses relevant to a specific set of inputs.\n | \n | Arguments:\n | inputs: Input tensor or list/tuple of input tensors.\n | \n | Returns:\n | List of loss tensors of the layer that depend on `inputs`.\n | \n | get_output_at(self, node_index)\n | Retrieves the output tensor(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A tensor (or list of tensors if the layer has multiple outputs).\n | \n | Raises:\n | RuntimeError: If called in Eager mode.\n | \n | get_output_mask_at(self, node_index)\n | Retrieves the output mask tensor(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A mask tensor\n | (or list of tensors if the layer has multiple outputs).\n | \n | get_output_shape_at(self, node_index)\n | Retrieves the output shape(s) of a layer at a given node.\n | \n | Arguments:\n | node_index: Integer, index of the node\n | from which to retrieve the attribute.\n | E.g. `node_index=0` will correspond to the\n | first time the layer was called.\n | \n | Returns:\n | A shape tuple\n | (or list of shape tuples if the layer has multiple outputs).\n | \n | Raises:\n | RuntimeError: If called in Eager mode.\n | \n | get_updates_for(self, inputs)\n | Retrieves updates relevant to a specific set of inputs.\n | \n | Arguments:\n | inputs: Input tensor or list/tuple of input tensors.\n | \n | Returns:\n | List of update ops of the layer that depend on `inputs`.\n | \n | set_weights(self, weights)\n | Sets the weights of the layer, from Numpy arrays.\n | \n | Arguments:\n | weights: a list of Numpy arrays. The number\n | of arrays and their shape must match\n | number of the dimensions of the weights\n | of the layer (i.e. it should match the\n | output of `get_weights`).\n | \n | Raises:\n | ValueError: If the provided weights list does not match the\n | layer's specifications.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from tensorflow.python.keras.engine.base_layer.Layer:\n | \n | activity_regularizer\n | Optional regularizer function for the output of this layer.\n | \n | dtype\n | \n | inbound_nodes\n | Deprecated, do NOT use! Only for compatibility with external Keras.\n | \n | input\n | Retrieves the input tensor(s) of a layer.\n | \n | Only applicable if the layer has exactly one input,\n | i.e. if it is connected to one incoming layer.\n | \n | Returns:\n | Input tensor or list of input tensors.\n | \n | Raises:\n | RuntimeError: If called in Eager mode.\n | AttributeError: If no inbound nodes are found.\n | \n | input_mask\n | Retrieves the input mask tensor(s) of a layer.\n | \n | Only applicable if the layer has exactly one inbound node,\n | i.e. if it is connected to one incoming layer.\n | \n | Returns:\n | Input mask tensor (potentially None) or list of input\n | mask tensors.\n | \n | Raises:\n | AttributeError: if the layer is connected to\n | more than one incoming layers.\n | \n | input_shape\n | Retrieves the input shape(s) of a layer.\n | \n | Only applicable if the layer has exactly one input,\n | i.e. if it is connected to one incoming layer, or if all inputs\n | have the same shape.\n | \n | Returns:\n | Input shape, as an integer shape tuple\n | (or list of shape tuples, one tuple per input tensor).\n | \n | Raises:\n | AttributeError: if the layer has no defined input_shape.\n | RuntimeError: if called in Eager mode.\n | \n | losses\n | Losses which are associated with this `Layer`.\n | \n | Variable regularization tensors are created when this property is accessed,\n | so it is eager safe: accessing `losses` under a `tf.GradientTape` will\n | propagate gradients back to the corresponding variables.\n | \n | Returns:\n | A list of tensors.\n | \n | name\n | Returns the name of this module as passed or determined in the ctor.\n | \n | NOTE: This is not the same as the `self.name_scope.name` which includes\n | parent module names.\n | \n | non_trainable_variables\n | \n | outbound_nodes\n | Deprecated, do NOT use! Only for compatibility with external Keras.\n | \n | output\n | Retrieves the output tensor(s) of a layer.\n | \n | Only applicable if the layer has exactly one output,\n | i.e. if it is connected to one incoming layer.\n | \n | Returns:\n | Output tensor or list of output tensors.\n | \n | Raises:\n | AttributeError: if the layer is connected to more than one incoming\n | layers.\n | RuntimeError: if called in Eager mode.\n | \n | output_mask\n | Retrieves the output mask tensor(s) of a layer.\n | \n | Only applicable if the layer has exactly one inbound node,\n | i.e. if it is connected to one incoming layer.\n | \n | Returns:\n | Output mask tensor (potentially None) or list of output\n | mask tensors.\n | \n | Raises:\n | AttributeError: if the layer is connected to\n | more than one incoming layers.\n | \n | output_shape\n | Retrieves the output shape(s) of a layer.\n | \n | Only applicable if the layer has one output,\n | or if all outputs have the same shape.\n | \n | Returns:\n | Output shape, as an integer shape tuple\n | (or list of shape tuples, one tuple per output tensor).\n | \n | Raises:\n | AttributeError: if the layer has no defined output shape.\n | RuntimeError: if called in Eager mode.\n | \n | trainable\n | \n | trainable_variables\n | Sequence of variables owned by this module and it's submodules.\n | \n | Note: this method uses reflection to find variables on the current instance\n | and submodules. For performance reasons you may wish to cache the result\n | of calling this method if you don't expect the return value to change.\n | \n | Returns:\n | A sequence of variables for the current module (sorted by attribute\n | name) followed by variables from all submodules recursively (breadth\n | first).\n | \n | updates\n | \n | variables\n | Returns the list of all layer variables/weights.\n | \n | Alias of `self.weights`.\n | \n | Returns:\n | A list of variables.\n | \n | ----------------------------------------------------------------------\n | Class methods inherited from tensorflow.python.module.module.Module:\n | \n | with_name_scope(method) from builtins.type\n | Decorator to automatically enter the module name scope.\n | \n | ```\n | class MyModule(tf.Module):\n | @tf.Module.with_name_scope\n | def __call__(self, x):\n | if not hasattr(self, 'w'):\n | self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))\n | return tf.matmul(x, self.w)\n | ```\n | \n | Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose\n | names included the module name:\n | \n | ```\n | mod = MyModule()\n | mod(tf.ones([8, 32]))\n | # ==> <tf.Tensor: ...>\n | mod.w\n | # ==> <tf.Variable ...'my_module/w:0'>\n | ```\n | \n | Args:\n | method: The method to wrap.\n | \n | Returns:\n | The original method wrapped such that it enters the module's name scope.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from tensorflow.python.module.module.Module:\n | \n | name_scope\n | Returns a `tf.name_scope` instance for this class.\n | \n | submodules\n | Sequence of all sub-modules.\n | \n | Submodules are modules which are properties of this module, or found as\n | properties of modules which are properties of this module (and so on).\n | \n | ```\n | a = tf.Module()\n | b = tf.Module()\n | c = tf.Module()\n | a.b = b\n | b.c = c\n | assert list(a.submodules) == [b, c]\n | assert list(b.submodules) == [c]\n | assert list(c.submodules) == []\n | ```\n | \n | Returns:\n | A sequence of all submodules.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from tensorflow.python.training.tracking.base.Trackable:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n\n" ] ], [ [ "## Creating a Model\n\nThere are two ways to create models through the TF 2 Keras API, either pass in a list of layers all at once, or add them one by one.\n\nLet's show both methods (its up to you to choose which method you prefer).", "_____no_output_____" ] ], [ [ "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation", "_____no_output_____" ] ], [ [ "### Model - as a list of layers", "_____no_output_____" ] ], [ [ "model = Sequential([\n Dense(units=2),\n Dense(units=2),\n Dense(units=2)\n])", "_____no_output_____" ] ], [ [ "### Model - adding in layers one by one", "_____no_output_____" ] ], [ [ "model = Sequential()\n\nmodel.add(Dense(2))\nmodel.add(Dense(2))\nmodel.add(Dense(2))", "_____no_output_____" ] ], [ [ "Let's go ahead and build a simple model and then compile it by defining our solver", "_____no_output_____" ] ], [ [ "model = Sequential()\n\nmodel.add(Dense(4,activation='relu'))\nmodel.add(Dense(4,activation='relu'))\nmodel.add(Dense(4,activation='relu'))\n\n# Final output node for prediction\nmodel.add(Dense(1))\n\nmodel.compile(optimizer='rmsprop',loss='mse')", "_____no_output_____" ] ], [ [ "### Choosing an optimizer and loss\n\nKeep in mind what kind of problem you are trying to solve:\n\n # For a multi-class classification problem\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # For a binary classification problem\n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n # For a mean squared error regression problem\n model.compile(optimizer='rmsprop',\n loss='mse')", "_____no_output_____" ], [ "# Training\n\nBelow are some common definitions that are necessary to know and understand to correctly utilize Keras:\n\n* Sample: one element of a dataset.\n * Example: one image is a sample in a convolutional network\n * Example: one audio file is a sample for a speech recognition model\n* Batch: a set of N samples. The samples in a batch are processed independently, in parallel. If training, a batch results in only one update to the model.A batch generally approximates the distribution of the input data better than a single input. The larger the batch, the better the approximation; however, it is also true that the batch will take longer to process and will still result in only one update. For inference (evaluate/predict), it is recommended to pick a batch size that is as large as you can afford without going out of memory (since larger batches will usually result in faster evaluation/prediction).\n* Epoch: an arbitrary cutoff, generally defined as \"one pass over the entire dataset\", used to separate training into distinct phases, which is useful for logging and periodic evaluation.\n* When using validation_data or validation_split with the fit method of Keras models, evaluation will be run at the end of every epoch.\n* Within Keras, there is the ability to add callbacks specifically designed to be run at the end of an epoch. Examples of these are learning rate changes and model checkpointing (saving).", "_____no_output_____" ] ], [ [ "model.fit(X_train,y_train,epochs=250)", "Train on 700 samples\nEpoch 1/250\n700/700 [==============================] - 1s 1ms/sample - loss: 256678.6899\nEpoch 2/250\n700/700 [==============================] - 0s 67us/sample - loss: 256557.3328\nEpoch 3/250\n700/700 [==============================] - 0s 67us/sample - loss: 256435.2685\nEpoch 4/250\n700/700 [==============================] - 0s 69us/sample - loss: 256297.5242\nEpoch 5/250\n700/700 [==============================] - 0s 67us/sample - loss: 256139.6521\nEpoch 6/250\n700/700 [==============================] - 0s 89us/sample - loss: 255959.0959\nEpoch 7/250\n700/700 [==============================] - 0s 56us/sample - loss: 255751.4558\nEpoch 8/250\n700/700 [==============================] - 0s 89us/sample - loss: 255515.1171\nEpoch 9/250\n700/700 [==============================] - 0s 67us/sample - loss: 255240.5993\nEpoch 10/250\n700/700 [==============================] - 0s 89us/sample - loss: 254925.4916\nEpoch 11/250\n700/700 [==============================] - 0s 69us/sample - loss: 254567.7298\nEpoch 12/250\n700/700 [==============================] - 0s 67us/sample - loss: 254163.5860\nEpoch 13/250\n700/700 [==============================] - 0s 67us/sample - loss: 253711.2249\nEpoch 14/250\n700/700 [==============================] - 0s 57us/sample - loss: 253207.9388\nEpoch 15/250\n700/700 [==============================] - 0s 89us/sample - loss: 252649.8949\nEpoch 16/250\n700/700 [==============================] - 0s 67us/sample - loss: 252035.8005\nEpoch 17/250\n700/700 [==============================] - 0s 89us/sample - loss: 251361.9668\nEpoch 18/250\n700/700 [==============================] - 0s 69us/sample - loss: 250630.4323\nEpoch 19/250\n700/700 [==============================] - 0s 89us/sample - loss: 249834.5367\nEpoch 20/250\n700/700 [==============================] - 0s 67us/sample - loss: 248964.4419\nEpoch 21/250\n700/700 [==============================] - 0s 89us/sample - loss: 248029.2328\nEpoch 22/250\n700/700 [==============================] - 0s 67us/sample - loss: 247016.8577\nEpoch 23/250\n700/700 [==============================] - 0s 89us/sample - loss: 245919.6555\nEpoch 24/250\n700/700 [==============================] - 0s 67us/sample - loss: 244745.7887\nEpoch 25/250\n700/700 [==============================] - 0s 89us/sample - loss: 243485.6529\nEpoch 26/250\n700/700 [==============================] - 0s 67us/sample - loss: 242129.3484\nEpoch 27/250\n700/700 [==============================] - 0s 89us/sample - loss: 240689.1388\nEpoch 28/250\n700/700 [==============================] - 0s 67us/sample - loss: 239153.4667\nEpoch 29/250\n700/700 [==============================] - 0s 89us/sample - loss: 237520.4308\nEpoch 30/250\n700/700 [==============================] - 0s 67us/sample - loss: 235783.5987\nEpoch 31/250\n700/700 [==============================] - 0s 89us/sample - loss: 233942.2699\nEpoch 32/250\n700/700 [==============================] - 0s 69us/sample - loss: 231982.6838\nEpoch 33/250\n700/700 [==============================] - 0s 67us/sample - loss: 229905.5206\nEpoch 34/250\n700/700 [==============================] - 0s 89us/sample - loss: 227726.2409\nEpoch 35/250\n700/700 [==============================] - 0s 67us/sample - loss: 225433.7657\nEpoch 36/250\n700/700 [==============================] - 0s 91us/sample - loss: 223007.5024\nEpoch 37/250\n700/700 [==============================] - 0s 67us/sample - loss: 220470.3121\nEpoch 38/250\n700/700 [==============================] - 0s 67us/sample - loss: 217800.4992\nEpoch 39/250\n700/700 [==============================] - 0s 89us/sample - loss: 215000.5040\nEpoch 40/250\n700/700 [==============================] - 0s 67us/sample - loss: 212070.4630\nEpoch 41/250\n700/700 [==============================] - 0s 89us/sample - loss: 209021.6112\nEpoch 42/250\n700/700 [==============================] - 0s 67us/sample - loss: 205820.6153\nEpoch 43/250\n700/700 [==============================] - 0s 67us/sample - loss: 202485.9254\nEpoch 44/250\n700/700 [==============================] - 0s 89us/sample - loss: 199032.7301\nEpoch 45/250\n700/700 [==============================] - 0s 67us/sample - loss: 195436.0692\nEpoch 46/250\n700/700 [==============================] - 0s 89us/sample - loss: 191699.3609\nEpoch 47/250\n700/700 [==============================] - 0s 67us/sample - loss: 187801.8943\nEpoch 48/250\n700/700 [==============================] - 0s 67us/sample - loss: 183781.5669\nEpoch 49/250\n700/700 [==============================] - 0s 89us/sample - loss: 179660.2206\nEpoch 50/250\n700/700 [==============================] - 0s 67us/sample - loss: 175374.3602\nEpoch 51/250\n700/700 [==============================] - 0s 89us/sample - loss: 170959.2488\nEpoch 52/250\n700/700 [==============================] - 0s 67us/sample - loss: 166390.8793\nEpoch 53/250\n700/700 [==============================] - 0s 89us/sample - loss: 161693.8322\nEpoch 54/250\n700/700 [==============================] - 0s 67us/sample - loss: 156896.2863\nEpoch 55/250\n700/700 [==============================] - 0s 67us/sample - loss: 151958.7138\nEpoch 56/250\n700/700 [==============================] - 0s 67us/sample - loss: 146943.3821\nEpoch 57/250\n700/700 [==============================] - 0s 67us/sample - loss: 141799.3351\nEpoch 58/250\n700/700 [==============================] - 0s 67us/sample - loss: 136534.7192\nEpoch 59/250\n700/700 [==============================] - 0s 67us/sample - loss: 131191.1925\nEpoch 60/250\n700/700 [==============================] - 0s 67us/sample - loss: 125746.5604\nEpoch 61/250\n700/700 [==============================] - 0s 89us/sample - loss: 120214.6602\nEpoch 62/250\n700/700 [==============================] - 0s 67us/sample - loss: 114611.5430\nEpoch 63/250\n700/700 [==============================] - 0s 67us/sample - loss: 108961.6057\nEpoch 64/250\n700/700 [==============================] - 0s 89us/sample - loss: 103238.3104\nEpoch 65/250\n700/700 [==============================] - 0s 67us/sample - loss: 97488.6292\nEpoch 66/250\n700/700 [==============================] - 0s 67us/sample - loss: 91736.5993\nEpoch 67/250\n700/700 [==============================] - 0s 78us/sample - loss: 85975.4235\nEpoch 68/250\n700/700 [==============================] - 0s 67us/sample - loss: 80189.9361\nEpoch 69/250\n700/700 [==============================] - 0s 89us/sample - loss: 74465.9286\nEpoch 70/250\n700/700 [==============================] - 0s 67us/sample - loss: 68733.6601\nEpoch 71/250\n700/700 [==============================] - 0s 69us/sample - loss: 63123.0146\nEpoch 72/250\n700/700 [==============================] - 0s 89us/sample - loss: 57568.7673\nEpoch 73/250\n700/700 [==============================] - 0s 67us/sample - loss: 52143.8000\nEpoch 74/250\n700/700 [==============================] - 0s 67us/sample - loss: 46841.6530\nEpoch 75/250\n700/700 [==============================] - 0s 67us/sample - loss: 41664.3811\nEpoch 76/250\n700/700 [==============================] - 0s 89us/sample - loss: 36710.3025\nEpoch 77/250\n700/700 [==============================] - 0s 67us/sample - loss: 31980.2638\nEpoch 78/250\n700/700 [==============================] - 0s 67us/sample - loss: 27490.0044\nEpoch 79/250\n700/700 [==============================] - 0s 67us/sample - loss: 23295.2193\nEpoch 80/250\n700/700 [==============================] - 0s 89us/sample - loss: 19399.2424\nEpoch 81/250\n700/700 [==============================] - 0s 67us/sample - loss: 15821.4121\nEpoch 82/250\n700/700 [==============================] - 0s 67us/sample - loss: 12634.9319\nEpoch 83/250\n700/700 [==============================] - 0s 67us/sample - loss: 9866.9726\nEpoch 84/250\n700/700 [==============================] - 0s 67us/sample - loss: 7541.5573\nEpoch 85/250\n700/700 [==============================] - 0s 67us/sample - loss: 5719.8526\nEpoch 86/250\n700/700 [==============================] - 0s 67us/sample - loss: 4370.8675\nEpoch 87/250\n700/700 [==============================] - 0s 67us/sample - loss: 3482.8717\nEpoch 88/250\n700/700 [==============================] - 0s 67us/sample - loss: 3081.3459\nEpoch 89/250\n700/700 [==============================] - 0s 89us/sample - loss: 2955.0584\nEpoch 90/250\n700/700 [==============================] - 0s 67us/sample - loss: 2919.0084\nEpoch 91/250\n700/700 [==============================] - 0s 67us/sample - loss: 2878.1071\nEpoch 92/250\n700/700 [==============================] - 0s 85us/sample - loss: 2835.3627\nEpoch 93/250\n700/700 [==============================] - 0s 67us/sample - loss: 2793.9308\nEpoch 94/250\n700/700 [==============================] - 0s 89us/sample - loss: 2754.3078\nEpoch 95/250\n700/700 [==============================] - 0s 69us/sample - loss: 2718.6959\nEpoch 96/250\n700/700 [==============================] - 0s 67us/sample - loss: 2676.1233\nEpoch 97/250\n700/700 [==============================] - 0s 67us/sample - loss: 2641.5044\nEpoch 98/250\n700/700 [==============================] - 0s 67us/sample - loss: 2600.8627\nEpoch 99/250\n700/700 [==============================] - 0s 67us/sample - loss: 2567.6836\nEpoch 100/250\n700/700 [==============================] - 0s 67us/sample - loss: 2527.4432\nEpoch 101/250\n700/700 [==============================] - 0s 67us/sample - loss: 2494.8283\nEpoch 102/250\n700/700 [==============================] - 0s 89us/sample - loss: 2459.9839\nEpoch 103/250\n700/700 [==============================] - 0s 80us/sample - loss: 2422.0237\nEpoch 104/250\n700/700 [==============================] - 0s 67us/sample - loss: 2385.5557\nEpoch 105/250\n700/700 [==============================] - 0s 67us/sample - loss: 2352.6271\nEpoch 106/250\n700/700 [==============================] - 0s 67us/sample - loss: 2315.9826\nEpoch 107/250\n700/700 [==============================] - 0s 69us/sample - loss: 2275.5747\nEpoch 108/250\n700/700 [==============================] - 0s 67us/sample - loss: 2240.5681\nEpoch 109/250\n700/700 [==============================] - 0s 67us/sample - loss: 2202.7267\nEpoch 110/250\n700/700 [==============================] - 0s 78us/sample - loss: 2164.8818\nEpoch 111/250\n700/700 [==============================] - 0s 67us/sample - loss: 2128.8680\nEpoch 112/250\n700/700 [==============================] - 0s 67us/sample - loss: 2093.5601\nEpoch 113/250\n700/700 [==============================] - 0s 89us/sample - loss: 2059.8525\nEpoch 114/250\n700/700 [==============================] - 0s 67us/sample - loss: 2027.5212\nEpoch 115/250\n700/700 [==============================] - 0s 69us/sample - loss: 1993.6040\nEpoch 116/250\n700/700 [==============================] - 0s 67us/sample - loss: 1956.8016\nEpoch 117/250\n700/700 [==============================] - 0s 89us/sample - loss: 1925.7439\nEpoch 118/250\n700/700 [==============================] - 0s 67us/sample - loss: 1893.9992\nEpoch 119/250\n700/700 [==============================] - 0s 67us/sample - loss: 1859.5495\nEpoch 120/250\n700/700 [==============================] - 0s 67us/sample - loss: 1829.7004\nEpoch 121/250\n700/700 [==============================] - 0s 67us/sample - loss: 1794.5159\nEpoch 122/250\n700/700 [==============================] - 0s 89us/sample - loss: 1762.4011\nEpoch 123/250\n700/700 [==============================] - 0s 67us/sample - loss: 1731.3614\nEpoch 124/250\n700/700 [==============================] - 0s 67us/sample - loss: 1694.8818\nEpoch 125/250\n700/700 [==============================] - 0s 67us/sample - loss: 1660.6659\nEpoch 126/250\n700/700 [==============================] - 0s 69us/sample - loss: 1628.8121\nEpoch 127/250\n700/700 [==============================] - 0s 67us/sample - loss: 1596.7363\nEpoch 128/250\n700/700 [==============================] - 0s 89us/sample - loss: 1561.3069\nEpoch 129/250\n700/700 [==============================] - 0s 67us/sample - loss: 1525.3697\nEpoch 130/250\n700/700 [==============================] - 0s 67us/sample - loss: 1501.4490\nEpoch 131/250\n700/700 [==============================] - 0s 67us/sample - loss: 1471.8032\nEpoch 132/250\n700/700 [==============================] - 0s 69us/sample - loss: 1441.8526\nEpoch 133/250\n700/700 [==============================] - 0s 67us/sample - loss: 1411.3840\nEpoch 134/250\n700/700 [==============================] - 0s 67us/sample - loss: 1375.3392\nEpoch 135/250\n700/700 [==============================] - 0s 67us/sample - loss: 1344.4005\nEpoch 136/250\n700/700 [==============================] - 0s 67us/sample - loss: 1316.0051\nEpoch 137/250\n700/700 [==============================] - 0s 67us/sample - loss: 1286.1575\nEpoch 138/250\n700/700 [==============================] - 0s 67us/sample - loss: 1258.5466\nEpoch 139/250\n700/700 [==============================] - 0s 89us/sample - loss: 1231.0350\nEpoch 140/250\n700/700 [==============================] - 0s 67us/sample - loss: 1202.8353\nEpoch 141/250\n700/700 [==============================] - 0s 67us/sample - loss: 1171.3123\nEpoch 142/250\n700/700 [==============================] - 0s 67us/sample - loss: 1145.8823\nEpoch 143/250\n700/700 [==============================] - 0s 67us/sample - loss: 1117.1228\nEpoch 144/250\n700/700 [==============================] - 0s 67us/sample - loss: 1091.9406\nEpoch 145/250\n700/700 [==============================] - 0s 67us/sample - loss: 1066.3266\nEpoch 146/250\n700/700 [==============================] - 0s 67us/sample - loss: 1034.5236\nEpoch 147/250\n700/700 [==============================] - 0s 67us/sample - loss: 1009.6341\nEpoch 148/250\n700/700 [==============================] - 0s 89us/sample - loss: 982.0937\nEpoch 149/250\n700/700 [==============================] - 0s 67us/sample - loss: 954.0501\nEpoch 150/250\n700/700 [==============================] - 0s 67us/sample - loss: 926.7213\nEpoch 151/250\n700/700 [==============================] - 0s 67us/sample - loss: 903.3459\nEpoch 152/250\n700/700 [==============================] - 0s 67us/sample - loss: 873.8258\nEpoch 153/250\n700/700 [==============================] - 0s 89us/sample - loss: 846.7390\nEpoch 154/250\n700/700 [==============================] - 0s 67us/sample - loss: 822.1480\nEpoch 155/250\n700/700 [==============================] - 0s 67us/sample - loss: 795.3657\nEpoch 156/250\n700/700 [==============================] - 0s 88us/sample - loss: 770.9504\nEpoch 157/250\n700/700 [==============================] - 0s 89us/sample - loss: 744.3620\nEpoch 158/250\n700/700 [==============================] - 0s 67us/sample - loss: 719.1004\nEpoch 159/250\n700/700 [==============================] - 0s 113us/sample - loss: 696.3267\nEpoch 160/250\n700/700 [==============================] - 0s 89us/sample - loss: 671.8435\nEpoch 161/250\n700/700 [==============================] - 0s 100us/sample - loss: 649.7230\nEpoch 162/250\n700/700 [==============================] - 0s 97us/sample - loss: 627.0320\nEpoch 163/250\n700/700 [==============================] - 0s 89us/sample - loss: 605.2505\nEpoch 164/250\n700/700 [==============================] - 0s 89us/sample - loss: 582.2282\nEpoch 165/250\n700/700 [==============================] - 0s 134us/sample - loss: 561.1635\nEpoch 166/250\n700/700 [==============================] - 0s 89us/sample - loss: 541.3536\nEpoch 167/250\n700/700 [==============================] - 0s 89us/sample - loss: 522.3132\nEpoch 168/250\n700/700 [==============================] - 0s 69us/sample - loss: 503.2385\nEpoch 169/250\n700/700 [==============================] - 0s 89us/sample - loss: 481.9888\nEpoch 170/250\n700/700 [==============================] - 0s 89us/sample - loss: 461.5032\nEpoch 171/250\n700/700 [==============================] - 0s 89us/sample - loss: 442.1222\nEpoch 172/250\n700/700 [==============================] - 0s 67us/sample - loss: 423.0606\nEpoch 173/250\n700/700 [==============================] - 0s 89us/sample - loss: 403.8695\nEpoch 174/250\n700/700 [==============================] - 0s 84us/sample - loss: 386.0664\nEpoch 175/250\n700/700 [==============================] - 0s 70us/sample - loss: 370.9212\nEpoch 176/250\n700/700 [==============================] - 0s 89us/sample - loss: 352.6306\nEpoch 177/250\n700/700 [==============================] - 0s 67us/sample - loss: 333.7979\nEpoch 178/250\n700/700 [==============================] - 0s 67us/sample - loss: 316.0235\nEpoch 179/250\n700/700 [==============================] - 0s 67us/sample - loss: 296.4844\nEpoch 180/250\n700/700 [==============================] - 0s 69us/sample - loss: 280.1557\nEpoch 181/250\n700/700 [==============================] - 0s 67us/sample - loss: 263.3886\nEpoch 182/250\n" ] ], [ [ "## Evaluation\n\nLet's evaluate our performance on our training set and our test set. We can compare these two performances to check for overfitting.", "_____no_output_____" ] ], [ [ "model.history.history", "_____no_output_____" ], [ "loss = model.history.history['loss']", "_____no_output_____" ], [ "sns.lineplot(x=range(len(loss)),y=loss)\nplt.title(\"Training Loss per Epoch\");", "_____no_output_____" ] ], [ [ "### Compare final evaluation (MSE) on training set and test set.\n\nThese should hopefully be fairly close to each other.", "_____no_output_____" ] ], [ [ "model.metrics_names", "_____no_output_____" ], [ "training_score = model.evaluate(X_train,y_train,verbose=0)\ntest_score = model.evaluate(X_test,y_test,verbose=0)", "_____no_output_____" ], [ "training_score", "_____no_output_____" ], [ "test_score", "_____no_output_____" ] ], [ [ "### Further Evaluations", "_____no_output_____" ] ], [ [ "test_predictions = model.predict(X_test)", "_____no_output_____" ], [ "test_predictions", "_____no_output_____" ], [ "pred_df = pd.DataFrame(y_test,columns=['Test Y'])", "_____no_output_____" ], [ "pred_df", "_____no_output_____" ], [ "test_predictions = pd.Series(test_predictions.reshape(300,))", "_____no_output_____" ], [ "test_predictions", "_____no_output_____" ], [ "pred_df = pd.concat([pred_df,test_predictions],axis=1)", "_____no_output_____" ], [ "pred_df.columns = ['Test Y','Model Predictions']", "_____no_output_____" ], [ "pred_df", "_____no_output_____" ] ], [ [ "Let's compare to the real test labels!", "_____no_output_____" ] ], [ [ "sns.scatterplot(x='Test Y',y='Model Predictions',data=pred_df)", "_____no_output_____" ], [ "pred_df['Error'] = pred_df['Test Y'] - pred_df['Model Predictions']", "_____no_output_____" ], [ "sns.distplot(pred_df['Error'],bins=50)", "_____no_output_____" ], [ "from sklearn.metrics import mean_absolute_error,mean_squared_error", "_____no_output_____" ], [ "mean_absolute_error(pred_df['Test Y'],pred_df['Model Predictions'])", "_____no_output_____" ], [ "mean_squared_error(pred_df['Test Y'],pred_df['Model Predictions'])", "_____no_output_____" ], [ "# Essentially the same thing, difference just due to precision\ntest_score", "_____no_output_____" ], [ "#RMSE\ntest_score**0.5", "_____no_output_____" ] ], [ [ "# Predicting on brand new data\n\nWhat if we just saw a brand new gemstone from the ground? What should we price it at? This is the **exact** same procedure as predicting on a new test data!", "_____no_output_____" ] ], [ [ "# [[Feature1, Feature2]]\nnew_gem = [[998,1000]]", "_____no_output_____" ], [ "# Don't forget to scale!\nscaler.transform(new_gem)", "_____no_output_____" ], [ "new_gem = scaler.transform(new_gem)", "_____no_output_____" ], [ "model.predict(new_gem)", "_____no_output_____" ] ], [ [ "## Saving and Loading a Model", "_____no_output_____" ] ], [ [ "from tensorflow.keras.models import load_model", "_____no_output_____" ], [ "model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'", "_____no_output_____" ], [ "later_model = load_model('my_model.h5')", "WARNING:tensorflow:Sequential models without an `input_shape` passed to the first layer cannot reload their optimizer state. As a result, your model isstarting with a freshly initialized optimizer.\n" ], [ "later_model.predict(new_gem)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
e7d2fcf062f2b187434ff500fc5c0dce53f68fa7
4,262
ipynb
Jupyter Notebook
docs/source/auto_examples/plot_basic_example.ipynb
wendazhou/c-lasso
62cf2f1136a0f233f943c8788a93ddbc1b830f58
[ "MIT" ]
20
2020-10-01T08:18:08.000Z
2021-07-30T09:21:23.000Z
docs/source/auto_examples/plot_basic_example.ipynb
wendazhou/c-lasso
62cf2f1136a0f233f943c8788a93ddbc1b830f58
[ "MIT" ]
14
2020-11-12T14:39:20.000Z
2021-01-06T15:59:14.000Z
docs/source/auto_examples/plot_basic_example.ipynb
wendazhou/c-lasso
62cf2f1136a0f233f943c8788a93ddbc1b830f58
[ "MIT" ]
5
2020-09-27T20:22:01.000Z
2021-01-17T18:41:50.000Z
26.308642
537
0.522055
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Basic example\n\nLet's present what classo does when using its default parameters on synthetic data.\n", "_____no_output_____" ] ], [ [ "from classo import classo_problem, random_data\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Generate the data\n\nThis code snippet generates a problem instance with sparse ß in dimension\nd=100 (sparsity d_nonzero=5). The design matrix X comprises n=100 samples generated from an i.i.d standard normal\ndistribution. The dimension of the constraint matrix C is d x k matrix. The noise level is σ=0.5. \nThe input `zerosum=True` implies that C is the all-ones vector and Cß=0. The n-dimensional outcome vector y\nand the regression vector ß is then generated to satisfy the given constraints. \n\n", "_____no_output_____" ] ], [ [ "m, d, d_nonzero, k, sigma = 100, 200, 5, 1, 0.5\n(X, C, y), sol = random_data(m, d, d_nonzero, k, sigma, zerosum=True, seed=1)", "_____no_output_____" ] ], [ [ "Remark : one can see the parameters that should be selected :\n\n", "_____no_output_____" ] ], [ [ "print(np.nonzero(sol))", "_____no_output_____" ] ], [ [ "## Define the classo instance\n\nNext we can define a default c-lasso problem instance with the generated data:\n\n", "_____no_output_____" ] ], [ [ "problem = classo_problem(X, y, C)", "_____no_output_____" ] ], [ [ "## Check parameters\n\nYou can look at the generated problem instance by typing:\n\n", "_____no_output_____" ] ], [ [ "print(problem)", "_____no_output_____" ] ], [ [ "## Solve optimization problems\n\nWe only use stability selection as default model selection strategy. \nThe command also allows you to inspect the computed stability profile for all variables \nat the theoretical λ\n\n", "_____no_output_____" ] ], [ [ "problem.solve()", "_____no_output_____" ] ], [ [ "## Visualisation\n\nAfter completion, the results of the optimization and model selection routines \ncan be visualized using\n\n", "_____no_output_____" ] ], [ [ "print(problem.solution)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
e7d30d30ab318ac728ba7cfb4b475393bf667797
137,714
ipynb
Jupyter Notebook
colab-analysis-training.ipynb
NISH1001/earth-science-text-classification
0231361fee5fd5c44ee7b60c9c62ad811023d538
[ "MIT" ]
1
2021-07-29T16:44:19.000Z
2021-07-29T16:44:19.000Z
colab-analysis-training.ipynb
NISH1001/earth-science-text-classification
0231361fee5fd5c44ee7b60c9c62ad811023d538
[ "MIT" ]
1
2021-09-26T03:48:46.000Z
2021-09-26T03:48:46.000Z
colab-analysis-training.ipynb
NISH1001/earth-science-text-classification
0231361fee5fd5c44ee7b60c9c62ad811023d538
[ "MIT" ]
null
null
null
28.295459
149
0.569426
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "_____no_output_____" ], [ "import os", "_____no_output_____" ], [ "DRIVE_BASE = \"/content/drive/MyDrive/Colab Notebooks/uah-ra/\"", "_____no_output_____" ], [ "KW_PATH = os.path.join(DRIVE_BASE, \"data/keywords.txt\")\nDATA_PATH = os.path.join(DRIVE_BASE, \"data/data.csv\")", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n%matplotlib inline", "_____no_output_____" ], [ "def load_keywords(path):\n res = []\n with open(path) as f:\n text = f.read().strip()\n tags_str = text.split(\",\")\n res = map(lambda t: [_.strip().lower() for _ in t.split(\">\")], tags_str)\n res = filter(lambda x: len(x) > 0, res)\n res = list(res)\n return res", "_____no_output_____" ], [ "KEYWORDS = load_keywords(KW_PATH)\nlen(KEYWORDS), len(set([kw for kws in KEYWORDS for kw in kws]))", "_____no_output_____" ], [ "KEYWORDS", "_____no_output_____" ] ], [ [ "# Tag Analysis", "_____no_output_____" ] ], [ [ "!pip install loguru", "_____no_output_____" ], [ "from loguru import logger", "_____no_output_____" ], [ "from collections import Counter, defaultdict", "_____no_output_____" ], [ "def get_counts(keywords, level=0):\n kws = map(lambda x: x[level if level<len(x) else len(x)-1], keywords)\n kws = list(kws)\n # kws = list(map(str.lower, kws))\n counter = Counter(kws)\n return counter", "_____no_output_____" ], [ "def analyze_kws(keywords, topn=10):\n plt.figure(figsize=(15, 8))\n for level in [0, 1, 2, 3, -1]:\n _ = get_counts(KEYWORDS, level=level)\n logger.debug(f\"[Level={level}, NKWs={len(_)}] : {_.most_common(10)}\")\n df = pd.DataFrame(_.most_common(topn), columns=[\"kw\", \"frequency\"])\n ax = sns.barplot(\n x=\"frequency\", y=\"kw\",\n data=df,\n linewidth=2.5,\n facecolor=(1, 1, 1, 0),\n errcolor=\".2\",\n edgecolor=\".2\"\n )\n plt.title(f\"Level={level}, topn={topn}\")\n plt.figure(figsize=(15, 8))", "_____no_output_____" ], [ "\", \".join(list(get_counts(KEYWORDS, level=1).keys()))", "_____no_output_____" ], [ "analyze_kws(KEYWORDS, topn=20)", "_____no_output_____" ] ], [ [ "# Data Analysis", "_____no_output_____" ] ], [ [ "def parse_kws(kw_str, level=2):\n res = kw_str.split(\",\")\n res = map(lambda kw: [_.strip().lower() for _ in kw.split(\">\")], res)\n res = map(lambda x: x[level if level<len(x) else len(x)-1], res)\n return list(set(res))\n\ndef load_data(path, level=0):\n logger.info(f\"Loading data from {path}. [KW Level={level}]\")\n df = pd.read_csv(path)\n df[\"desc\"] = df[\"desc\"].apply(str.strip)\n df[\"labels\"] = df[\"keywords\"].apply(lambda x: parse_kws(x, level))\n df[\"textlen\"] = df[\"desc\"].apply(len)\n return df", "_____no_output_____" ], [ "DATA = load_data(DATA_PATH, level=1)", "_____no_output_____" ], [ "DATA.shape", "_____no_output_____" ], [ "DATA.head(10)", "_____no_output_____" ], [ "def analyze_labels(df):\n df = df.copy()\n labels = [l for ls in df[\"labels\"] for l in ls]\n uniques = set(labels)\n logger.info(f\"{len(uniques)} unique labels\")", "_____no_output_____" ], [ "analyze_labels(DATA)", "_____no_output_____" ], [ "# idx = 2\n# _data.iloc[2].keywords_processed", "_____no_output_____" ], [ "_data = DATA.copy()\n_data = _data[_data[\"textlen\"]>0]", "_____no_output_____" ], [ "_data.shape", "_____no_output_____" ], [ "# BERT can only process 512 tokens at once\nlen(_data[_data[\"textlen\"] <= 512]) / len(_data), len(_data[_data[\"textlen\"] <= 1024]) / len(_data)", "_____no_output_____" ], [ "plt.figure(figsize=(20, 15))\nsns.histplot(data=_data, x=\"textlen\", bins=100).set(xlim=(0, 3000))", "_____no_output_____" ] ], [ [ "# Baseline Model", "_____no_output_____" ], [ "# Encode Labels", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MultiLabelBinarizer", "_____no_output_____" ], [ "DATA_TO_USE = DATA.copy()\nDATA_TO_USE = DATA_TO_USE[DATA_TO_USE[\"textlen\"]<=500]", "_____no_output_____" ], [ "DATA_TO_USE.shape", "_____no_output_____" ], [ "DATA_TO_USE.head()", "_____no_output_____" ], [ "analyze_labels(DATA_TO_USE)", "_____no_output_____" ], [ "LE = MultiLabelBinarizer()\nLABELS_ENCODED = LE.fit_transform(DATA_TO_USE[\"labels\"])", "_____no_output_____" ], [ "LABELS_ENCODED.shape", "_____no_output_____" ], [ "LE.classes_", "_____no_output_____" ], [ "LE.inverse_transform(LABELS_ENCODED[0].reshape(1,-1))", "_____no_output_____" ], [ "DATA_TO_USE[\"labels_encoded\"] = list(LABELS_ENCODED)", "_____no_output_____" ], [ "DATA_TO_USE.head()", "_____no_output_____" ] ], [ [ "# Split Dataset", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ " X_train, X_test, Y_train, Y_test = train_test_split(DATA_TO_USE[\"desc\"].to_numpy(), LABELS_ENCODED, test_size=0.1, random_state=42)\n\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state=42)", "_____no_output_____" ], [ "X_train.shape, X_val.shape, X_test.shape", "_____no_output_____" ], [ "Y_train.shape, Y_val.shape, Y_test.shape", "_____no_output_____" ], [ "X_test", "_____no_output_____" ] ], [ [ "# CreateDataset", "_____no_output_____" ] ], [ [ "! pip install pytorch_lightning", "_____no_output_____" ], [ "import torch\nfrom torch.utils.data import DataLoader, Dataset", "_____no_output_____" ], [ "import pytorch_lightning as pl", "_____no_output_____" ], [ "class TagDataset (Dataset):\n def __init__(self,texts, tags, tokenizer, max_len=512):\n self.tokenizer = tokenizer\n self.texts = texts\n self.labels = tags\n self.max_len = max_len\n \n def __len__(self):\n return len(self.texts)\n \n def __getitem__(self, item_idx):\n text = self.texts[item_idx]\n inputs = self.tokenizer.encode_plus(\n text,\n None,\n add_special_tokens=True,\n max_length= self.max_len,\n padding = 'max_length',\n return_token_type_ids= False,\n return_attention_mask= True,\n truncation=True,\n return_tensors = 'pt'\n )\n \n input_ids = inputs['input_ids'].flatten()\n attn_mask = inputs['attention_mask'].flatten()\n \n return {\n 'input_ids': input_ids ,\n 'attention_mask': attn_mask,\n 'label': torch.tensor(self.labels[item_idx], dtype=torch.float)\n \n }", "_____no_output_____" ], [ "class TagDataModule (pl.LightningDataModule):\n \n def __init__(self, x_train, y_train, x_val, y_val, x_test, y_test,tokenizer, batch_size=16, max_token_len=512):\n super().__init__()\n self.train_text = x_train\n self.train_label = y_train\n self.val_text = x_val\n self.val_label = y_val\n self.test_text = x_test\n self.test_label = y_test\n self.tokenizer = tokenizer\n self.batch_size = batch_size\n self.max_token_len = max_token_len\n\n def setup(self):\n self.train_dataset = TagDataset(texts=self.train_text, tags=self.train_label, tokenizer=self.tokenizer,max_len = self.max_token_len)\n self.val_dataset = TagDataset(texts=self.val_text,tags=self.val_label,tokenizer=self.tokenizer,max_len = self.max_token_len)\n self.test_dataset = TagDataset(texts=self.test_text,tags=self.test_label,tokenizer=self.tokenizer,max_len = self.max_token_len)\n \n \n def train_dataloader(self):\n return DataLoader (self.train_dataset, batch_size = self.batch_size,shuffle = True , num_workers=2)\n\n def val_dataloader(self):\n return DataLoader (self.val_dataset, batch_size= 16)\n\n def test_dataloader(self):\n return DataLoader (self.test_dataset, batch_size= 16)", "_____no_output_____" ] ], [ [ "# Transformers", "_____no_output_____" ] ], [ [ "!pip install transformers", "_____no_output_____" ], [ "from transformers import AutoTokenizer, AutoModel", "_____no_output_____" ], [ "TOKENIZER = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n# BASE_MODEL = AutoModel.from_pretrained(\"bert-base-uncased\")\nBASE_MODEL = None", "_____no_output_____" ], [ "# Initialize the parameters that will be use for training\nEPOCHS = 10\nBATCH_SIZE = 4\nMAX_LEN = 512\nLR = 1e-03", "_____no_output_____" ], [ "TAG_DATA_MODULE = TagDataModule(\n X_train, Y_train,\n X_val, Y_val,\n X_test, Y_test,\n TOKENIZER,\n BATCH_SIZE,\n MAX_LEN\n)\nTAG_DATA_MODULE.setup()", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "from pytorch_lightning.callbacks import ModelCheckpoint", "_____no_output_____" ], [ "from transformers import AdamW, get_linear_schedule_with_warmup", "_____no_output_____" ], [ "class TagClassifier(pl.LightningModule):\n # Set up the classifier\n def __init__(self, base_model=None, n_classes=10, steps_per_epoch=None, n_epochs=5, lr=1e-5 ):\n super().__init__()\n\n self.model = base_model or AutoModel.from_pretrained(\"bert-base-uncased\", return_dict=True)\n self.classifier = torch.nn.Linear(self.model.config.hidden_size,n_classes)\n self.steps_per_epoch = steps_per_epoch\n self.n_epochs = n_epochs\n self.lr = lr\n self.criterion = torch.nn.BCEWithLogitsLoss()\n \n def forward(self,input_ids, attn_mask):\n output = self.model(input_ids = input_ids ,attention_mask = attn_mask)\n output = self.classifier(output.pooler_output)\n return output\n \n \n def training_step(self,batch,batch_idx):\n input_ids = batch['input_ids']\n attention_mask = batch['attention_mask']\n labels = batch['label']\n \n outputs = self(input_ids,attention_mask)\n loss = self.criterion(outputs,labels)\n self.log('train_loss',loss , prog_bar=True,logger=True)\n \n return {\"loss\" :loss, \"predictions\":outputs, \"labels\": labels }\n\n\n def validation_step(self,batch,batch_idx):\n input_ids = batch['input_ids']\n attention_mask = batch['attention_mask']\n labels = batch['label']\n \n outputs = self(input_ids,attention_mask)\n loss = self.criterion(outputs,labels)\n self.log('val_loss',loss , prog_bar=True,logger=True)\n \n return loss\n\n def test_step(self,batch,batch_idx):\n input_ids = batch['input_ids']\n attention_mask = batch['attention_mask']\n labels = batch['label']\n \n outputs = self(input_ids,attention_mask)\n loss = self.criterion(outputs,labels)\n self.log('test_loss',loss , prog_bar=True,logger=True)\n \n return loss\n \n \n def configure_optimizers(self):\n optimizer = AdamW(self.parameters() , lr=self.lr)\n warmup_steps = self.steps_per_epoch//3\n total_steps = self.steps_per_epoch * self.n_epochs - warmup_steps\n\n scheduler = get_linear_schedule_with_warmup(optimizer,warmup_steps,total_steps)\n\n return [optimizer], [scheduler]", "_____no_output_____" ], [ "steps_per_epoch = len(X_train)//BATCH_SIZE\nMODEL = TagClassifier(BASE_MODEL, n_classes=22, steps_per_epoch=steps_per_epoch,n_epochs=EPOCHS,lr=LR)", "_____no_output_____" ], [ "# # saves a file like: input/QTag-epoch=02-val_loss=0.32.ckpt\n# checkpoint_callback = ModelCheckpoint(\n# monitor='val_loss',# monitored quantity\n# filename='QTag-{epoch:02d}-{val_loss:.2f}',\n# save_top_k=3, # save the top 3 models\n# mode='min', # mode of the monitored quantity for optimization\n# )", "_____no_output_____" ], [ "trainer = pl.Trainer(max_epochs = EPOCHS , gpus = 1, callbacks=[], progress_bar_refresh_rate = 30)", "_____no_output_____" ], [ "trainer.fit(MODEL, TAG_DATA_MODULE)", "_____no_output_____" ], [ "!nvidia-smi", "_____no_output_____" ], [ "trainer.save_checkpoint(\"model-10.ckpt\")", "_____no_output_____" ], [ "!mkdir \"$DRIVE_BASE/checkpoints/\"", "_____no_output_____" ], [ "! cp \"/content/model-10.ckpt\" \"$DRIVE_BASE/checkpoints\"", "_____no_output_____" ], [ "!ls \"$DRIVE_BASE/checkpoints\"", "_____no_output_____" ] ], [ [ "# Test", "_____no_output_____" ] ], [ [ "trainer.test(MODEL,datamodule=TAG_DATA_MODULE)", "_____no_output_____" ] ], [ [ "# Inference", "_____no_output_____" ] ], [ [ "MODEL.eval()", "_____no_output_____" ], [ "import pickle", "_____no_output_____" ], [ "with open(\"le.pkl\", \"wb\") as f:\n pickle.dump(LE, f)", "_____no_output_____" ], [ "from torch.utils.data import TensorDataset, SequentialSampler", "_____no_output_____" ], [ "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ], [ "MODEL.to(DEVICE)", "_____no_output_____" ], [ "def inference(model, texts, tokenizer, batch_size=2):\n # model.eval()\n if isinstance(texts, str):\n texts = [texts]\n input_ids, attention_masks = [], []\n for text in texts:\n text_encoded = tokenizer.encode_plus(\n text,\n None,\n add_special_tokens=True,\n max_length= MAX_LEN,\n padding = 'max_length',\n return_token_type_ids= False,\n return_attention_mask= True,\n truncation=True,\n return_tensors = 'pt' \n )\n input_ids.append(text_encoded[\"input_ids\"])\n attention_masks.append(text_encoded[\"attention_mask\"])\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n pred_data = TensorDataset(input_ids, attention_masks)\n pred_sampler = SequentialSampler(pred_data)\n pred_dataloader = DataLoader(pred_data, sampler=pred_sampler, batch_size=batch_size)\n pred_outs = []\n for batch in pred_dataloader:\n # Add batch to GPU\n batch = tuple(t.to(DEVICE) for t in batch)\n \n # Unpack the inputs from our dataloader\n b_input_ids, b_attn_mask = batch\n \n with torch.no_grad():\n # Forward pass, calculate logit predictions\n pred_out = model(b_input_ids,b_attn_mask)\n pred_out = torch.sigmoid(pred_out)\n # Move predicted output and labels to CPU\n pred_out = pred_out.detach().cpu().numpy()\n pred_outs.append(pred_out)\n return pred_outs", "_____no_output_____" ], [ "_texts = X_test[:10]\n_pred_outs = inference(MODEL, _texts, TOKENIZER)", "_____no_output_____" ], [ "_pred_outs", "_____no_output_____" ], [ "_texts", "_____no_output_____" ], [ "thresh = 0.3\nfor _txt, _yt, _p in zip(_texts, Y_test, _pred_outs.copy()):\n _p = _p.flatten()\n confs = _p[_p>thresh]\n _p[_p<thresh] = 0\n _p[_p>=thresh] = 1\n \n print(confs)\n pred_tag = LE.inverse_transform(np.array([_p]))[0]\n gt_tag = LE.inverse_transform(np.array([_yt]))[0]\n print(_txt[:50], gt_tag, pred_tag)", "_____no_output_____" ] ], [ [ "# Custom Evaluation", "_____no_output_____" ] ], [ [ "def inference2(model, tokenizer, texts, gts, threshold=0.3):\n _pred_outs = inference(model, texts, tokenizer, batch_size=1)\n res = []\n for txt, gt, pred in zip(texts, gts, _pred_outs):\n p = pred.flatten().copy()\n confs = p[p>threshold]\n p[p<threshold] = 0\n p[p>=threshold] = 1\n p = np.array([p])\n gt = np.array([gt])\n pred_tags = LE.inverse_transform(p)[0]\n gt_tags = LE.inverse_transform(gt)[0]\n res.append({\"gts\": gt_tags, \"preds\": pred_tags, \"text\": txt})\n return res", "_____no_output_____" ], [ "def compute_jaccard(tokens1, tokens2):\n if not tokens1 or not tokens2:\n return 0\n intersection = set(tokens1).intersection(tokens2)\n union = set(tokens1).union(tokens2)\n return len(intersection)/len(union)", "_____no_output_____" ], [ "compute_jaccard([1, 2], [1, 2, 3])", "_____no_output_____" ], [ "import json", "_____no_output_____" ], [ "!mkdir \"$DRIVE_BASE/outputs/\"", "_____no_output_____" ], [ "def evaluate_jaccard(model, tokenizer, texts, gts, threshold=0.3):\n \"\"\"\n Jaccard Evaluation. SIimlar to IoU\n \"\"\"\n predictions = inference2(model, tokenizer, texts, gts, threshold)\n with open(\"inference.json\", \"w\") as f:\n json.dump(predictions, f)\n metrics = []\n for pmap in predictions:\n metrics.append(compute_jaccard(pmap[\"gts\"], pmap[\"preds\"]))\n return metrics", "_____no_output_____" ], [ "_ = evaluate_jaccard(MODEL, TOKENIZER, X_test[:50], Y_test[:50], threshold=0.3)", "_____no_output_____" ], [ "_", "_____no_output_____" ], [ "!cp \"inference.json\" \"$DRIVE_BASE/outputs/\"", "_____no_output_____" ] ], [ [ "# Reference\n\n- https://discuss.pytorch.org/t/using-bcewithlogisloss-for-multi-label-classification/67011/2\n- https://medium.com/analytics-vidhya/finetune-distilbert-for-multi-label-text-classsification-task-994eb448f94c", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7d30f110fe0d13b23bafcdd3e6152efb7f30278
54,305
ipynb
Jupyter Notebook
homeworks/D076/Day76-Optimizer_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
6
2019-05-19T05:53:07.000Z
2020-04-18T05:02:13.000Z
homeworks/D076/Day76-Optimizer_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
null
null
null
homeworks/D076/Day76-Optimizer_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
1
2019-11-20T14:33:12.000Z
2019-11-20T14:33:12.000Z
145.589812
22,148
0.872516
[ [ [ "\n# 作業: \n \n (1)以, Adam, 為例, 調整 batch_size, epoch , 觀察accurancy, loss 的變化\n \n (2)以同一模型, 分別驗證 SGD, Adam, Rmsprop 的 accurancy", "_____no_output_____" ] ], [ [ "import keras\n#from keras.datasets import cifar10\nfrom keras.datasets import mnist \nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import optimizers\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport numpy \n", "_____no_output_____" ], [ "# 第一步:選擇模型, 順序模型是多個網絡層的線性堆疊\n \nmodel = Sequential()\n\n# 第二步:構建網絡層\n \nmodel.add(Dense( 500,input_shape=(784,),kernel_initializer='uniform')) # 輸入層,28*28=784 \nmodel.add(Activation('relu')) # 激活函數是relu \n\nmodel.add(Dense( 500, kernel_initializer='uniform')) # 隱藏層節點500個 \nmodel.add(Activation('relu')) \n\nmodel.add(Dense( 500, kernel_initializer='uniform')) # 隱藏層節點500個 \nmodel.add(Activation('relu')) \n\nmodel.add(Dense( 500, kernel_initializer='uniform')) # 隱藏層節點500個 \nmodel.add(Activation('relu')) \n\nmodel.add(Dense( 10, kernel_initializer='uniform')) # 輸出結果是10個類別,所以維度是10 \nmodel.add(Activation('softmax')) # 最後一層用softmax作為激活函數", "_____no_output_____" ], [ "# 模型建立完成後,統計參數總量\nprint(\"Total Parameters:%d\" % model.count_params())", "Total Parameters:1149010\n" ], [ "# 輸出模型摘要資訊\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_21 (Dense) (None, 500) 392500 \n_________________________________________________________________\nactivation_21 (Activation) (None, 500) 0 \n_________________________________________________________________\ndense_22 (Dense) (None, 500) 250500 \n_________________________________________________________________\nactivation_22 (Activation) (None, 500) 0 \n_________________________________________________________________\ndense_23 (Dense) (None, 500) 250500 \n_________________________________________________________________\nactivation_23 (Activation) (None, 500) 0 \n_________________________________________________________________\ndense_24 (Dense) (None, 500) 250500 \n_________________________________________________________________\nactivation_24 (Activation) (None, 500) 0 \n_________________________________________________________________\ndense_25 (Dense) (None, 10) 5010 \n_________________________________________________________________\nactivation_25 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 1,149,010\nTrainable params: 1,149,010\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ " '''\n SGD(隨機梯度下降) - Arguments\nlr: float >= 0. Learning rate.\nmomentum: float >= 0. Parameter that accelerates SGD in the relevant direction and dampens oscillations.\ndecay: float >= 0. Learning rate decay over each update.\nnesterov: boolean. Whether to apply Nesterov momentum.\n'''\nopt = keras.optimizers.SGD(lr=0.1, momentum=0.9, decay=0.95, nesterov=True)\n'''\nRMSprop- Arguments\nlr: float >= 0. Learning rate.\nrho: float >= 0.\nepsilon: float >= 0. Fuzz factor. If None, defaults to K.epsilon().\ndecay: float >= 0. Learning rate decay over each update.\n'''\nopt = keras.optimizers.RMSprop(lr=0.001)", "_____no_output_____" ], [ "# 第三步:編譯, \nmodel.compile(optimizer=opt, loss = 'binary_crossentropy', metrics = ['accuracy'])", "_____no_output_____" ], [ "# 第四步:資料分割\n# 使用Keras自帶的mnist工具讀取數據(第一次需要聯網)\n(X_train, y_train), (X_test, y_test) = mnist.load_data() \n\n# 由於mist的輸入數據維度是(num, 28 , 28),這裡需要把後面的維度直接拼起來變成784維 \nX_train = (X_train.reshape(X_train.shape[0], X_train.shape[1] * X_train.shape[2])).astype('float32') / 255.\nX_test = (X_test.reshape(X_test.shape[0], X_test.shape[1] * X_test.shape[2])).astype('float32') / 255.\nY_train = (numpy.arange(10) == y_train[:, None]).astype(int)\nY_test = (numpy.arange(10) == y_test[:, None]).astype(int)", "_____no_output_____" ], [ "\n'''\n 宣告並設定\n batch_size:對總的樣本數進行分組,每組包含的樣本數量\n epochs :訓練次數\n \n'''\nbatch_size = 256\nepochs = 20\n", "_____no_output_____" ], [ "# 第五步:訓練, 修正 model 參數\n#Blas GEMM launch failed , 避免動態分配GPU / CPU, 出現問題\nimport tensorflow as tf\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.999)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n\nhistory = model.fit(X_train,Y_train,batch_size=batch_size, epochs=epochs,\n shuffle=True,verbose=2,validation_split=0.3)", "Train on 42000 samples, validate on 18000 samples\nEpoch 1/20\n - 2s - loss: 0.0823 - acc: 0.9707 - val_loss: 0.0296 - val_acc: 0.9901\nEpoch 2/20\n - 2s - loss: 0.0260 - acc: 0.9915 - val_loss: 0.0363 - val_acc: 0.9888\nEpoch 3/20\n - 2s - loss: 0.0164 - acc: 0.9948 - val_loss: 0.0203 - val_acc: 0.9940\nEpoch 4/20\n - 2s - loss: 0.0120 - acc: 0.9963 - val_loss: 0.0194 - val_acc: 0.9944\nEpoch 5/20\n - 2s - loss: 0.0094 - acc: 0.9970 - val_loss: 0.0197 - val_acc: 0.9949\nEpoch 6/20\n - 2s - loss: 0.0075 - acc: 0.9977 - val_loss: 0.0210 - val_acc: 0.9947\nEpoch 7/20\n - 2s - loss: 0.0066 - acc: 0.9980 - val_loss: 0.0200 - val_acc: 0.9948\nEpoch 8/20\n - 2s - loss: 0.0053 - acc: 0.9984 - val_loss: 0.0259 - val_acc: 0.9945\nEpoch 9/20\n - 2s - loss: 0.0044 - acc: 0.9987 - val_loss: 0.0568 - val_acc: 0.9911\nEpoch 10/20\n - 2s - loss: 0.0041 - acc: 0.9988 - val_loss: 0.0266 - val_acc: 0.9954\nEpoch 11/20\n - 2s - loss: 0.0039 - acc: 0.9989 - val_loss: 0.0271 - val_acc: 0.9951\nEpoch 12/20\n - 2s - loss: 0.0035 - acc: 0.9990 - val_loss: 0.0447 - val_acc: 0.9937\nEpoch 13/20\n - 2s - loss: 0.0034 - acc: 0.9991 - val_loss: 0.0292 - val_acc: 0.9952\nEpoch 14/20\n - 2s - loss: 0.0035 - acc: 0.9991 - val_loss: 0.0246 - val_acc: 0.9956\nEpoch 15/20\n - 2s - loss: 0.0026 - acc: 0.9993 - val_loss: 0.0321 - val_acc: 0.9944\nEpoch 16/20\n - 2s - loss: 0.0025 - acc: 0.9994 - val_loss: 0.0307 - val_acc: 0.9953\nEpoch 17/20\n - 2s - loss: 0.0028 - acc: 0.9992 - val_loss: 0.0317 - val_acc: 0.9956\nEpoch 18/20\n - 2s - loss: 0.0024 - acc: 0.9994 - val_loss: 0.0297 - val_acc: 0.9958\nEpoch 19/20\n - 2s - loss: 0.0020 - acc: 0.9995 - val_loss: 0.0353 - val_acc: 0.9952\nEpoch 20/20\n - 2s - loss: 0.0027 - acc: 0.9995 - val_loss: 0.0348 - val_acc: 0.9950\n" ], [ "# 第六步:輸出\nprint ( \" test set \" )\nscores = model.evaluate(X_test,Y_test,batch_size=200,verbose= 0)\nprint ( \"\" )\n#print ( \" The test loss is %f \" % scores)\nprint ( \" The test loss is %f \", scores)\nresult = model.predict(X_test,batch_size=200,verbose= 0)\n\nresult_max = numpy.argmax(result, axis = 1 )\ntest_max = numpy.argmax(Y_test, axis = 1 )\n\nresult_bool = numpy.equal(result_max, test_max)\ntrue_num = numpy.sum(result_bool)\nprint ( \"\" )\nprint ( \" The accuracy of the model is %f \" % (true_num/len(result_bool)))", " test set \n\n The test loss is %f [0.02919830844754415, 0.9956800174713135]\n\n The accuracy of the model is 0.978000 \n" ], [ "import matplotlib.pyplot as plt\n\n%matplotlib inline\n\n# history = model.fit(x, y, validation_split=0.25, epochs=50, batch_size=16, verbose=1)\n\n# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d32bb8ea780ff693fb5509d0e9f837c078cc5a
35,436
ipynb
Jupyter Notebook
pmcm/fragmentation2.ipynb
bmcs-group/bmcs_fragmentation
4d6ff26e60cae1937fc773cc1d6d91f6ce26ad03
[ "MIT" ]
null
null
null
pmcm/fragmentation2.ipynb
bmcs-group/bmcs_fragmentation
4d6ff26e60cae1937fc773cc1d6d91f6ce26ad03
[ "MIT" ]
null
null
null
pmcm/fragmentation2.ipynb
bmcs-group/bmcs_fragmentation
4d6ff26e60cae1937fc773cc1d6d91f6ce26ad03
[ "MIT" ]
null
null
null
128.391304
26,062
0.606135
[ [ [ "# Probabilistic Multiple Cracking Model of Brittle-Matrix Composite: One-by-One Crack Tracing Algorithm\nInteractive application for fragmentation model presented in the paper \n[citation and link will be added upon paper publication]\n - Change the material parameters to trigger the recalculation. \n - Inspect the cracking history by changing the crack slider.\n - Visit an annotated source code of the implementation [here](../notebooks/annotated_fragmentation.ipynb)", "_____no_output_____" ] ], [ [ "%%html\n<style>\n.output_wrapper button.btn.btn-default,\n.output_wrapper .ui-dialog-titlebar {\n display: none;\n}\n</style>", "_____no_output_____" ], [ "%matplotlib notebook\nimport numpy as np\nfrom scipy.optimize import newton\nimport matplotlib.pylab as plt", "_____no_output_____" ], [ "Em=25e3 # [MPa] matrix modulus\nEf=180e3 # [MPa] fiber modulus\nvf=0.01 # [-] reinforcement ratio\nT=12. # [N/mm^3] bond intensity\nsig_cu=10.0 # [MPa] composite strength\nsig_mu=3.0 # [MPa] matrix strength\nm=10000 # Weibull shape modulus", "_____no_output_____" ], [ "## Crack bridge with constant bond\ndef get_sig_m(z, sig_c): # matrix stress (*\\label{sig_m}*)\n sig_m = np.minimum(z * T * vf / (1 - vf), Em * sig_c / (vf * Ef + (1 - vf) * Em))\n return sig_m\n\ndef get_eps_f(z, sig_c): # reinforcement strain (*\\label{sig_f}*)\n sig_m = get_sig_m(z, sig_c)\n eps_f = (sig_c - sig_m * (1 - vf)) / vf / Ef\n return eps_f", "_____no_output_____" ], [ "## Specimen discretization\ndef get_z_x(x, XK): # distance to the closest crack (*\\label{get_z_x}*)\n z_grid = np.abs(x[:, np.newaxis] - np.array(XK)[np.newaxis, :])\n return np.amin(z_grid, axis=1)\n\nimport warnings # (*\\label{error1}*)\nwarnings.filterwarnings(\"error\", category=RuntimeWarning) # (*\\label{error2}*)\t\ndef get_sig_c_z(sig_mu, z, sig_c_pre): \n # crack initiating load at a material element\n fun = lambda sig_c: sig_mu - get_sig_m(z, sig_c)\n try: # search for the local crack load level\n return newton(fun, sig_c_pre)\n except (RuntimeWarning, RuntimeError):\n # solution not found (shielded zone) return the ultimate composite strength\n return sig_cu\n\ndef get_sig_c_K(z_x, x, sig_c_pre, sig_mu_x):\n # crack initiating loads over the whole specimen\n get_sig_c_x = np.vectorize(get_sig_c_z)\n sig_c_x = get_sig_c_x(sig_mu_x, z_x, sig_c_pre) \n y_idx = np.argmin(sig_c_x)\n return sig_c_x[y_idx], x[y_idx]", "_____no_output_____" ], [ "## Crack tracing algorithm\nn_x=5000\nL_x=500\ndef get_cracking_history(update_progress=None):\n x = np.linspace(0, L_x, n_x) # specimen discretization (*\\label{discrete}*)\n sig_mu_x = sig_mu * np.random.weibull(m, size=n_x) # matrix strength (*\\label{m_strength}*)\n\n Ec = Em * (1-vf) + Ef*vf # [MPa] mixture rule\n\n XK = [] # recording the crack postions\n sig_c_K = [0.] # recording the crack initating loads\n eps_c_K = [0.] # recording the composite strains\n CS = [L_x, L_x/2] # crack spacing\n sig_m_x_K = [np.zeros_like(x)] # stress profiles for crack states\n\n idx_0 = np.argmin(sig_mu_x)\n XK.append(x[idx_0]) # position of the first crack\n sig_c_0 = sig_mu_x[idx_0] * Ec / Em\n sig_c_K.append(sig_c_0)\n eps_c_K.append(sig_mu_x[idx_0] / Em)\n \n while True:\n z_x = get_z_x(x, XK) # distances to the nearest crack\n sig_m_x_K.append(get_sig_m(z_x, sig_c_K[-1])) # matrix stress\n sig_c_k, y_i = get_sig_c_K(z_x, x, sig_c_K[-1], sig_mu_x) # identify next crack\n if sig_c_k == sig_cu: # (*\\label{no_crack}*)\n break\n if update_progress: # callback to user interface\n update_progress(sig_c_k)\n XK.append(y_i) # record crack position\n sig_c_K.append(sig_c_k) # corresponding composite stress\n eps_c_K.append( # composite strain - integrate the strain field\n np.trapz(get_eps_f(get_z_x(x, XK), sig_c_k), x) / np.amax(x)) # (*\\label{imple_avg_strain}*)\n XK_arr = np.hstack([[0], np.sort(np.array(XK)), [L_x]])\n CS.append(np.average(XK_arr[1:]-XK_arr[:-1])) # crack spacing\n \n sig_c_K.append(sig_cu) # the ultimate state\n eps_c_K.append(np.trapz(get_eps_f(get_z_x(x, XK), sig_cu), x) / np.amax(x))\n CS.append(CS[-1])\n if update_progress:\n update_progress(sig_c_k)\n return np.array(sig_c_K), np.array(eps_c_K), sig_mu_x, x, np.array(CS), np.array(sig_m_x_K)", "_____no_output_____" ], [ "sig_c_K, eps_c_K, sig_mu_x, x, CS, sig_m_x_K = get_cracking_history()\n\nfig, (ax, ax_sig_x) = plt.subplots(1, 2, figsize=(8, 3), tight_layout=True)\nax_cs = ax.twinx()\n\nsig_c_K, eps_c_K, sig_mu_x, x, CS, sig_m_x_K = get_cracking_history()\nn_c = len(eps_c_K) - 2 # numer of cracks\nax.plot(eps_c_K, sig_c_K, marker='o', label='%d cracks:' % n_c)\nax.set_xlabel(r'$\\varepsilon_\\mathrm{c}$ [-]');\nax.set_ylabel(r'$\\sigma_\\mathrm{c}$ [MPa]')\nax_sig_x.plot(x, sig_mu_x, color='orange')\nax_sig_x.fill_between(x, sig_mu_x, 0, color='orange', alpha=0.1)\nax_sig_x.set_xlabel(r'$x$ [mm]');\nax_sig_x.set_ylabel(r'$\\sigma$ [MPa]')\nax.legend()\neps_c_KK = np.array([eps_c_K[:-1], eps_c_K[1:]]).T.flatten()\nCS_KK = np.array([CS[:-1], CS[:-1]]).T.flatten()\nax_cs.plot(eps_c_KK, CS_KK, color='gray')\nax_cs.fill_between(eps_c_KK, CS_KK, color='gray', alpha=0.2)\nax_cs.set_ylabel(r'$\\ell_\\mathrm{cs}$ [mm]');\nplt.interactive(False)\nplt.show()\nprint('two')", "_____no_output_____" ] ], [ [ "# Model parameters\n\n| Symbol | Unit | Description\n| :-: | :-: | :- |\n| $E_\\mathrm{m}$ | MPa | Elastic modulus of matrix |\n| $E_\\mathrm{f}$ | MPa | Elastic modulus of reinforcement |\n| $V_\\mathrm{f}$ |- | reinforcement ratio |\n| $T$ | N/mm$^3$ | Bond intensity | \n| $\\sigma_\\mathrm{cu}$ | MPa | Composite strength |\n| $\\sigma_\\mathrm{mu}$ | MPa | Scale parameter of matrix strength distribution |\n| $m$ | - | Weibull modulus |\n| $L$ | mm | Specimen length |\n| $n_\\mathrm{points}$ | - | Number of discretization points |", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
e7d34e7d133f8cc1f7fe942120305f029ba2cd23
283,411
ipynb
Jupyter Notebook
TestPairsAnswer.ipynb
franktpmvu/keras-yolo3-parts
bad37b53f072f013dc45784b70f7685dab64e1a2
[ "MIT" ]
null
null
null
TestPairsAnswer.ipynb
franktpmvu/keras-yolo3-parts
bad37b53f072f013dc45784b70f7685dab64e1a2
[ "MIT" ]
null
null
null
TestPairsAnswer.ipynb
franktpmvu/keras-yolo3-parts
bad37b53f072f013dc45784b70f7685dab64e1a2
[ "MIT" ]
null
null
null
72.483632
131,704
0.703247
[ [ [ "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"3\"\nfrom PIL import Image\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Input, Lambda\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\nfrom matplotlib.pyplot import imshow\n\nfrom yolo3.model import *\nfrom yolo3.utils import *\nfrom train_pairs import *\nfrom yolo import *\nimport tensorflow as tf\n\ngpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\nsession = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n \nannotation_path = 'CrowdHuman_train_sp.txt'\nannotation_path2 = 'CrowdHuman_train.txt'\n#annotation_path = 'train_coco2014.txt'\nlog_dir = 'logs/pairs_/'\nclasses_path = 'model_data/CrowdHuman_classes.txt'\n#classes_path = 'model_data/coco_classes.txt'\nanchors_path = 'model_data/yolo_anchors_CH.txt'\nclass_names = get_classes(classes_path)\nnum_classes = len(class_names)\nanchors = get_anchors(anchors_path)\n\n\nval_split = 0.1\nwith open(annotation_path) as f:\n lines = f.readlines()\n#np.random.seed(10101)\n#np.random.shuffle(lines)\n#np.random.seed(None)\nnum_val = int(len(lines)*val_split)\nnum_train = len(lines) - num_val\ninput_shape = (416,416) # multiple of 32, hw\nprint('1')", "Using TensorFlow backend.\n" ], [ "weights_path='model_data/pairs_ep258-loss188.367-val_loss193.041.h5' # make sure you know what you freeze\n\n\nnum_anchors = len(anchors)\nimage_input = Input(shape=(None, None, 3))\nmodel_ops= yolo_pairs_body(image_input, num_anchors//3, num_classes)\nmodel_ops.load_weights(weights_path, by_name=True, skip_mismatch=True)\nprint('2')", "2\n" ], [ "model = create_pairs_model(input_shape, anchors, num_classes,\n freeze_body=2, weights_path=weights_path) # make sure you know what you freeze\n\nmodel.compile(optimizer=Adam(lr=1e-3), loss={\n # use custom yolo_loss Lambda layer.\n 'yolo_loss': lambda y_true, y_pred: y_pred})\nprint('3')", "Create YOLOv3 model with 9 anchors and 3 classes.\nLoad weights model_data/pairs_ep258-loss188.367-val_loss193.041.h5.\nFreeze the first 249 layers of total 252 layers.\nWARNING:tensorflow:From /data3/keras-yolo3-parts/yolo3/model.py:793: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.\nInstructions for updating:\nUse tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:\n```python\n sess = tf.Session()\n with sess.as_default():\n tensor = tf.range(10)\n print_op = tf.print(tensor)\n with tf.control_dependencies([print_op]):\n out = tf.add(tensor, tensor)\n sess.run(out)\n ```\nAdditionally, to use tf.print in python 2.7, users must make sure to import\nthe following:\n\n `from __future__ import print_function`\n\n3\n" ], [ "for a in data_generator_wrapper_pairs(lines[:num_train], 1, input_shape, anchors, num_classes):\n b=a\n \n break\n\n\n#model_ops=Model(model_ops.input,model_ops.output)\n#model_ops.input=b[0]\nyolo_outputs=model.predict(b[0],batch_size=32)\nticks = time.time()\ndataNew='./yolo_gt_bboxes'+str(ticks)+'.mat'\n#sess=tf.Session()\nscio.savemat(dataNew, {'image':np.float32(a[0][0][0]),'bboxse0':np.float32(a[0][1]),'bboxes2':np.float32(a[0][2]),'bboxes3':np.float32(a[0][3])})\n\n\nprint('4')", "4\n" ], [ "\nprint(np.shape(a[0][0][0]))\nprint(np.shape(a[0][1]))\nimshow(a[0][0][0])\nprint(np.max(a[0][0][0]))\nrsgt=K.reshape(a[0][1],[-1,13,13,3,3,5])\n\n\n\n\nobject_mask = rsgt[..., 4:5]\n# Darknet raw box to calculate loss.\nraw_true_xy = rsgt[..., :2]#*grid_shapes[l][::-1] #- grid_rs\nraw_true_wh = rsgt[..., 2:4]\nraw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n\nsess=tf.Session()\nrsgt=rsgt.eval(session=sess)\nraw_true_xy=raw_true_xy.eval(session=sess)\nraw_true_wh=raw_true_wh.eval(session=sess)\nobject_mask=object_mask.eval(session=sess)\nticks = time.time()\ndataNew='./yolo_rsgt'+str(ticks)+'.mat'\nscio.savemat(dataNew, {'rsgt':np.float32(rsgt),'raw_true_xy':np.float32(raw_true_xy),'raw_true_wh':np.float32(raw_true_wh),'object_mask':np.float32(object_mask)})\nprint('4.25')", "(416, 416, 3)\n(1, 13, 13, 3, 15)\n0.8726098277351474\n4.25\n" ], [ "a[0][0][0]=a[0][0][0]\nr_image,gt_image,yoloopt,details = YOLO.detect_image_with_gt(YOLO(),a)\nticks = time.time()\ndataNew='./yolo_outputs'+str(ticks)+'.mat'\n#sess=tf.Session()\nfor i in range(len(details)):\n details[i]['box_xy']=np.float32(details[i]['box_xy'])\n details[i]['box_wh']=np.float32(details[i]['box_wh'])\n details[i]['box_confidence']=np.float32(details[i]['box_confidence'])\n details[i]['feats']=np.float32(details[i]['feats'])\nscio.savemat(dataNew, {'yolo_outputs0':np.float32(yoloopt[0]),'yolo_outputs1':np.float32(yoloopt[1]),'yolo_outputs2':np.float32(yoloopt[2]),'details0':details[0],'details1':details[1],'details2':details[2]})\n#r_image.show()\nr_image.save('r_image.jpg')\ngt_image.save('gt_image.jpg')\n\nprint(yoloopt[0][0][0][0])\nprint('4.5')", "model_data/trained_weights_final_pairs_181227.h5 model, anchors, and classes loaded.\n(416, 416, 3)\nFound 51 boxes for img\nhead 0.35 (323, 0) (323, 0)\nhead 0.36 (0, 0) (0, 0)\nhead 0.39 (0, 1) (0, 1)\nhead 0.39 (416, 0) (416, 0)\nhead 0.39 (0, 0) (0, 0)\nhead 0.50 (0, 0) (0, 0)\nhead 0.53 (416, 0) (416, 0)\nhead 0.55 (416, 0) (416, 0)\nhead 0.59 (416, 1) (416, 1)\nhead 0.64 (0, 1) (0, 1)\nhead 0.67 (416, 0) (416, 0)\nhead 0.73 (338, 0) (338, 0)\nhead 0.75 (220, 0) (220, 0)\nhead 0.87 (0, 0) (0, 0)\nhead 0.94 (414, 0) (414, 0)\nhead 0.96 (416, 0) (416, 0)\nhead 0.98 (0, 0) (0, 0)\nperson_V 0.35 (314, 0) (314, 0)\nperson_V 0.36 (0, 51) (0, 51)\nperson_V 0.39 (0, 72) (0, 72)\nperson_V 0.39 (416, 3) (416, 3)\nperson_V 0.39 (0, 2) (0, 2)\nperson_V 0.50 (0, 2) (0, 2)\nperson_V 0.53 (416, 4) (416, 4)\nperson_V 0.55 (416, 3) (416, 3)\nperson_V 0.59 (416, 228) (416, 228)\nperson_V 0.64 (0, 89) (0, 89)\nperson_V 0.67 (416, 0) (416, 0)\nperson_V 0.73 (312, 1) (312, 1)\nperson_V 0.76 (189, 134) (189, 134)\nperson_V 0.87 (0, 5) (0, 5)\nperson_V 0.94 (412, 0) (412, 0)\nperson_V 0.96 (416, 195) (416, 195)\nperson_V 0.98 (0, 208) (0, 208)\nperson_F 0.35 (313, 1) (313, 1)\nperson_F 0.36 (0, 96) (0, 96)\nperson_F 0.38 (0, 76) (0, 76)\nperson_F 0.39 (416, 12) (416, 12)\nperson_F 0.39 (0, 3) (0, 3)\nperson_F 0.50 (0, 4) (0, 4)\nperson_F 0.52 (416, 5) (416, 5)\nperson_F 0.54 (416, 4) (416, 4)\nperson_F 0.59 (416, 192) (416, 192)\nperson_F 0.64 (0, 93) (0, 93)\nperson_F 0.66 (416, 0) (416, 0)\nperson_F 0.73 (307, 1) (307, 1)\nperson_F 0.77 (179, 139) (179, 139)\nperson_F 0.87 (0, 23) (0, 23)\nperson_F 0.94 (411, 0) (411, 0)\nperson_F 0.96 (416, 178) (416, 178)\nperson_F 0.98 (0, 198) (0, 198)\n4.0369952553883195\nhead 1.00 [ 58. 176. 14.000001 21. ]\nhead 1.00 [168. 142. 21. 37.]\nhead 1.00 [309. 161. 9. 11.]\nhead 1.00 [ 28.000002 90. 57. 111. ]\nperson_V 1.00 [ 70. 213. 35. 162.]\nperson_V 1.00 [128. 226. 44. 117.]\nperson_V 1.00 [ 77. 219. 65. 107.]\nperson_V 1.00 [252. 237.00002 75. 282. ]\nperson_V 1.00 [164. 247. 66. 255.]\nperson_V 1.00 [404. 243.99998 24. 240. ]\nperson_V 1.00 [398. 259. 35. 230.99998]\nperson_V 1.00 [369. 217.99998 39. 148. ]\nperson_V 1.00 [313. 195. 27. 84.]\nperson_V 1.00 [ 42. 224.99998 84. 382. ]\nperson_F 1.00 [ 70. 213. 35. 162.]\nperson_F 1.00 [128. 226. 44. 117.]\nperson_F 1.00 [ 75. 220. 74. 115.00001]\nperson_F 1.00 [252. 237.00002 75. 282. ]\nperson_F 1.00 [164. 247. 66. 255.]\nperson_F 1.00 [404. 243.99998 24. 240. ]\nperson_F 1.00 [398. 259. 35. 230.99998]\nperson_F 1.00 [367. 216. 42. 146.]\nperson_F 1.00 [313. 195. 27. 84.]\nperson_F 1.00 [ 42. 224.99998 84. 382. ]\n4.058508559130132\n[-1.83591866e+00 -1.83704245e+00 -4.14818048e-01 -1.06570154e-01\n -1.66614742e+01 -9.13047194e-01 -2.75937414e+00 -1.11520147e+00\n -1.28598237e+00 -1.66062107e+01 8.45466375e-01 -1.81580982e+01\n -1.90340948e+00 -1.90610409e+00 -1.65374031e+01 -1.42581689e+00\n -1.50823379e+00 8.22723627e-01 2.76659220e-01 -1.61935368e+01\n -7.42605805e-01 -2.44594669e+00 3.74600619e-01 1.60990745e-01\n -1.60478516e+01 7.94715941e-01 -1.63351574e+01 -1.15374100e+00\n -9.89955902e-01 -1.60238419e+01 9.31648804e-32 -3.70174503e-31\n 2.87836484e-31 5.28722548e-31 -2.36138382e+01 4.14161219e-31\n 3.32445554e-31 -7.22304104e-33 -2.31013628e-31 -2.36484432e+01\n 3.71808064e-31 -7.06788172e-31 -3.46601891e-31 -2.07455498e-31\n -2.37012386e+01]\n4.5\n" ], [ "ddd=model.evaluate(b[0],b[1])\nprint(ddd)\nprint('5')", "1/1 [==============================] - 5s 5s/step\n128.65318298339844\n5\n" ], [ "num_anchors = len(anchors)\ny_true = [Input(shape=(416//{0:32, 1:16, 2:8}[l], 416//{0:32, 1:16, 2:8}[l], \n num_anchors//3, 5*num_classes)) for l in range(3)]\ny_true\nprint(np.shape(a[0][1]))\nprint(np.shape(a[0][2]))\nprint(np.shape(a[0][3]))", "(32, 13, 13, 3, 15)\n(32, 26, 26, 3, 15)\n(32, 52, 52, 3, 15)\n" ], [ "ignore_thresh=.5 \ny_true_nonimg=b[0][1:4]\ntry:\n print(yolo_outputs[0][0][0][0])\n print('ok')\nexcept:\n yolo_outputs=y_true_nonimg\n print('not ok')\nnum_layers = len(anchors)//3 # default setting\nanchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\nm=32\nmf=K.cast(m,'float32')\ninput_shape = K.cast(input_shape,'float32')\ngrid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], 'float32') for l in range(num_layers)]\nloss = 0\nfor l in range(num_layers):\n num_anchors = len(anchors[anchor_mask[l]])\n now_y_true = y_true_nonimg[l]\n now_y_true = K.cast(now_y_true,'float32')\n now_y_true = K.reshape(now_y_true,\n [-1,grid_shapes[l][0],grid_shapes[l][1],num_anchors,num_classes,5])\n object_mask = now_y_true[..., 4:5]\n grid, raw_pred, pred_xy, pred_wh = yolo_head_pairs(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n #grid_rs=K.reshape(K.repeat_elements(grid,num_classes,axis=-1),[-1,grid_shapes[l][0],grid_shapes[l][1],1,num_classes,2])\n pred_box = K.concatenate([pred_xy, pred_wh])\n raw_true_xy = now_y_true[..., :2]#*grid_shapes[l][::-1]#- grid_rs\n raw_true_wh = K.log(now_y_true[..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - now_y_true[...,2:3]*now_y_true[...,3:4]\n \n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(('float32'), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(now_y_true[b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, 'float32'))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n #class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n #class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss #+ class_loss\n\nsess=tf.Session()\nprint(raw_true_xy.eval(session=sess)[raw_true_xy.eval(session=sess)>1])\nprint(raw_pred[...,0:2].eval(session=sess))\nprint(loss.eval(session=sess))\nprint(xy_loss.eval(session=sess))\nprint(wh_loss.eval(session=sess))\nprint('next step\\n')\n", "[ 0.02815166 -0.00449173 0.00947604 0.01065763 -0.05421416 0.01215659\n -0.00580604 0.02706541 -0.04757445 -0.01695506 0.03142005 -0.00863177\n -0.04458168 0.01626821 0.02219467 -0.01637337 -0.02595228 0.00523904\n 0.00378977 -0.00526657 -0.02860149 0.02228234 -0.0150856 0.00558145\n 0.00548915 0.00591929 0.00499514 0.03953756 0.02466222 -0.00201772\n 0.01073603 0.02615837 0.02380669 0.01783217 -0.01763675 -0.01497953\n -0.04955422 -0.05427511 0.02567083 0.01498314 0.01911949 0.00971386\n 0.00671473 -0.00980505 -0.03852819]\nok\n[2.78125 2.9615386 2.0528846 1.1538461 2.3052886 1.1466346 1.8653846\n 1.15625 1.5288461 1.1802884 1.4423077 1.2283654 2.0745192 2.3100963\n 2.0745192 2.2644231 2.8629808 2.3245192 3.0120192 2.2524037 3.1490386\n 2.2572114 2.3557692 1.1394231 1.4471154 1.0480769 1.0384616 1.8653846\n 1.0769231]\n[[[[[[-7.76199903e-03 -2.52986327e-04]\n [ 1.91877375e-03 -4.39084321e-03]\n [-3.85966478e-03 -9.47872410e-04]]\n\n [[-3.35405860e-03 -3.26556806e-03]\n [ 1.71350595e-03 -7.65158911e-04]\n [-1.15225147e-02 2.96236784e-03]]\n\n [[ 3.14655202e-03 4.09239717e-03]\n [-2.54756701e-03 5.70710562e-03]\n [ 5.48732188e-03 6.04594359e-03]]]\n\n\n [[[-1.03132110e-02 6.96292627e-05]\n [ 6.90172520e-03 -2.35764729e-03]\n [-5.33053419e-03 -4.48967272e-04]]\n\n [[-7.00793136e-03 -8.29854328e-03]\n [-2.31352562e-04 4.29451793e-05]\n [-1.02665899e-02 6.87506609e-03]]\n\n [[ 9.68858693e-03 4.90171043e-03]\n [-1.36373754e-04 8.89890734e-03]\n [ 1.80912167e-02 1.44468108e-02]]]\n\n\n [[[-1.02271270e-02 -1.88350410e-03]\n [ 9.44433454e-03 -6.39465731e-03]\n [-6.35877950e-03 1.52410255e-04]]\n\n [[-7.82866031e-03 -1.09551344e-02]\n [ 1.99061004e-03 5.16441232e-03]\n [-1.12827783e-02 3.25339008e-03]]\n\n [[ 8.46600626e-03 4.97515779e-03]\n [-1.63862575e-03 6.51614880e-03]\n [ 1.93055253e-02 1.61516201e-02]]]\n\n\n ...\n\n\n [[[-1.33740660e-02 -2.48516118e-03]\n [ 1.03638759e-02 -4.89518000e-03]\n [-4.27882932e-03 -3.82950739e-03]]\n\n [[-6.43373607e-03 -1.07999733e-02]\n [ 4.35012206e-03 4.35466319e-03]\n [-1.04666715e-02 4.64146212e-03]]\n\n [[ 1.07766017e-02 5.57421055e-03]\n [ 2.10409006e-03 3.85614205e-03]\n [ 1.72884241e-02 1.79704912e-02]]]\n\n\n [[[-1.17731169e-02 -3.20158969e-03]\n [ 1.28216576e-02 8.89911549e-04]\n [-3.54987755e-03 -3.54702258e-03]]\n\n [[-3.82347335e-03 -8.85835756e-03]\n [ 3.93411703e-03 1.38157024e-03]\n [-1.14257419e-02 4.13418328e-03]]\n\n [[ 8.50957725e-03 1.88292039e-03]\n [ 1.45087822e-03 -8.79353611e-04]\n [ 1.72879174e-02 1.60445906e-02]]]\n\n\n [[[-3.54345515e-03 1.25023001e-03]\n [ 5.82267484e-03 2.29927222e-03]\n [-3.47318710e-03 -2.51886668e-03]]\n\n [[-1.89360732e-03 -1.48971938e-03]\n [-2.95208138e-03 5.89239225e-03]\n [ 2.72243633e-04 7.40337372e-03]]\n\n [[ 9.08785500e-03 -2.44704029e-03]\n [ 3.21517163e-03 -2.30176258e-03]\n [ 1.58859231e-02 9.86805465e-03]]]]\n\n\n\n [[[[-7.33576622e-03 4.99249389e-03]\n [-2.74810544e-03 -5.62091218e-03]\n [-2.75802682e-03 -4.79790010e-03]]\n\n [[-1.43932912e-03 -6.15816424e-03]\n [ 5.51506458e-03 -1.03931117e-03]\n [-1.84240993e-02 9.46638081e-03]]\n\n [[ 6.36587734e-04 6.79224171e-03]\n [-8.76791403e-03 1.35972208e-04]\n [-4.15056897e-03 2.49187369e-03]]]\n\n\n [[[-1.03697153e-02 1.22573720e-02]\n [-2.17581168e-03 4.75693587e-03]\n [-5.24445996e-03 -6.34518825e-03]]\n\n [[-2.96864519e-03 -1.03784902e-02]\n [ 3.02504096e-03 -3.93321132e-03]\n [-1.10714147e-02 1.87590811e-02]]\n\n [[ 2.69073923e-03 8.77895486e-03]\n [-7.85420276e-03 3.35239165e-04]\n [-1.01671426e-03 1.39260394e-02]]]\n\n\n [[[-1.27103534e-02 1.20373098e-02]\n [-5.96305402e-03 2.65730661e-03]\n [-5.69564803e-03 -1.13471523e-02]]\n\n [[-3.87582183e-03 -1.18391160e-02]\n [ 5.56901610e-03 -1.45389757e-03]\n [-1.57784186e-02 1.98563430e-02]]\n\n [[ 5.01069799e-03 5.41892182e-03]\n [-4.04308410e-03 2.75600841e-03]\n [ 1.15251436e-03 1.29162334e-02]]]\n\n\n ...\n\n\n [[[-1.95336491e-02 9.54511017e-03]\n [-1.04618873e-02 4.13260236e-03]\n [-5.71350846e-03 -1.72039699e-02]]\n\n [[-8.57106317e-03 -1.83074512e-02]\n [ 4.21806844e-03 -1.50279282e-03]\n [-1.24380663e-02 1.85247660e-02]]\n\n [[ 1.22883795e-02 5.02491184e-03]\n [ 4.76654246e-03 6.25602156e-03]\n [ 5.21677127e-03 1.74855329e-02]]]\n\n\n [[[-1.67454872e-02 3.61637911e-03]\n [-3.87330796e-03 5.09839458e-03]\n [ 2.49577337e-04 -1.53828468e-02]]\n\n [[-5.31765306e-03 -1.00708725e-02]\n [ 6.73846388e-03 8.33321654e-04]\n [-7.52681773e-03 1.06375692e-02]]\n\n [[ 7.38153746e-03 -1.72713306e-03]\n [ 6.83061732e-03 3.84075241e-03]\n [ 4.60951822e-03 1.51561154e-02]]]\n\n\n [[[-9.68462881e-03 2.80531705e-03]\n [-5.34231402e-03 1.57532550e-03]\n [-3.35103972e-03 -8.12946912e-03]]\n\n [[ 1.61571981e-04 -1.41667193e-02]\n [ 7.75220012e-03 3.16922506e-03]\n [ 3.71891353e-03 1.19186416e-02]]\n\n [[ 7.92673603e-03 -6.31686300e-04]\n [ 1.09411823e-02 -8.33930739e-04]\n [ 8.57538823e-03 1.42475087e-02]]]]\n\n\n\n [[[[-3.74915265e-03 2.74065277e-03]\n [ 1.17996044e-03 -9.06729046e-03]\n [-4.44851955e-03 -7.29031535e-03]]\n\n [[-5.25426818e-03 -7.41743529e-03]\n [ 4.71266126e-03 1.66781887e-03]\n [-2.02106889e-02 6.75781677e-03]]\n\n [[ 4.77190968e-03 4.84010018e-03]\n [-6.29572570e-03 2.90798442e-03]\n [-3.06543615e-03 4.06238856e-03]]]\n\n\n [[[-5.76713169e-03 2.60478817e-03]\n [ 2.90706754e-03 5.61077322e-04]\n [-1.96736073e-04 -1.18663749e-02]]\n\n [[-2.18720175e-03 -8.50200746e-03]\n [ 1.88174832e-03 -2.29640375e-03]\n [-2.06104890e-02 1.86611637e-02]]\n\n [[ 5.65412175e-03 3.45594995e-03]\n [ 1.36221328e-03 -5.19062299e-03]\n [ 4.98834997e-03 1.71829816e-02]]]\n\n\n [[[-7.65082799e-03 -9.44399857e-04]\n [ 2.78944732e-04 8.14043335e-04]\n [ 4.85044700e-04 -1.56125966e-02]]\n\n [[-3.63376900e-03 -8.06985795e-03]\n [ 4.62348713e-03 -6.18307677e-04]\n [-1.63378064e-02 1.86108891e-02]]\n\n [[ 9.82312858e-03 4.05271258e-03]\n [ 5.08015789e-03 -2.13165302e-03]\n [ 4.83040651e-03 1.94995645e-02]]]\n\n\n ...\n\n\n [[[-5.34637598e-03 -1.68120326e-03]\n [ 1.60990050e-03 3.22415982e-03]\n [ 1.84171984e-03 -1.64417513e-02]]\n\n [[-5.06517710e-03 -8.79945979e-03]\n [ 2.54278933e-03 -4.25824523e-03]\n [-1.08980583e-02 1.85380634e-02]]\n\n [[ 1.30346986e-02 2.42119445e-03]\n [ 5.32433856e-03 -1.34832435e-03]\n [ 1.25289774e-02 2.30085682e-02]]]\n\n\n [[[-2.90621049e-03 -3.54990177e-03]\n [ 4.25137719e-03 1.86823693e-03]\n [ 3.22549557e-03 -1.55547578e-02]]\n\n [[-6.54181873e-04 -3.08579742e-03]\n [ 4.99889394e-03 -1.54172431e-03]\n [-6.69011520e-03 1.40960775e-02]]\n\n [[ 1.26308706e-02 -1.66522732e-04]\n [ 5.89753641e-03 2.84735160e-03]\n [ 5.13770292e-03 1.87338330e-02]]]\n\n\n [[[-1.88976096e-03 -2.64144107e-03]\n [-1.79340038e-03 1.21163926e-03]\n [ 8.57050181e-04 -6.37095375e-03]]\n\n [[ 6.93897367e-04 -1.16348611e-02]\n [ 6.03542710e-03 1.32280902e-03]\n [ 4.74297116e-03 1.18183140e-02]]\n\n [[ 8.09297524e-03 -2.98242853e-03]\n [ 7.39170890e-03 -4.72280954e-04]\n [ 1.12873064e-02 1.52228307e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-8.18965863e-03 1.22589595e-03]\n [ 3.52657074e-03 -1.77281834e-02]\n [-1.65929226e-03 -6.11451920e-03]]\n\n [[-2.86381971e-03 -6.38202578e-03]\n [ 9.26396903e-03 -3.60205350e-03]\n [-2.88737807e-02 5.11881011e-03]]\n\n [[ 2.67670047e-03 3.82103096e-03]\n [-3.79860122e-03 5.16013149e-03]\n [-2.85025151e-03 5.29830204e-03]]]\n\n\n [[[-1.06269689e-02 5.74604375e-03]\n [ 8.45074933e-03 -9.84207913e-03]\n [ 2.92524528e-05 -8.93935002e-03]]\n\n [[-2.23549455e-03 -1.42735373e-02]\n [ 1.02784792e-02 -3.16249672e-03]\n [-2.60877684e-02 2.00848784e-02]]\n\n [[ 6.10642461e-03 -1.50526568e-04]\n [-3.00901069e-04 2.60269339e-03]\n [ 1.15402751e-02 2.15510745e-02]]]\n\n\n [[[-1.20676830e-02 -1.47529368e-04]\n [ 9.46590025e-03 -1.15890866e-02]\n [-3.08609614e-03 -1.67596024e-02]]\n\n [[-7.97883887e-03 -1.23539027e-02]\n [ 1.14940908e-02 -9.18858335e-04]\n [-2.65156943e-02 1.89107731e-02]]\n\n [[ 9.62442812e-03 -2.73206830e-03]\n [-1.98689522e-03 5.58088440e-03]\n [ 1.28706275e-02 2.30202358e-02]]]\n\n\n ...\n\n\n [[[-1.73914153e-02 1.29062240e-03]\n [ 9.95767117e-03 -7.03566801e-03]\n [-3.87502066e-03 -1.72296576e-02]]\n\n [[-9.51092690e-03 -2.00129002e-02]\n [ 4.90484620e-03 -4.17946884e-03]\n [-1.83165632e-02 2.06369944e-02]]\n\n [[ 1.81094874e-02 6.05582446e-03]\n [ 1.57708791e-03 3.36727453e-03]\n [ 1.80281270e-02 3.53282206e-02]]]\n\n\n [[[-1.25639876e-02 -3.02400021e-03]\n [ 1.25764506e-02 1.27759599e-03]\n [-2.11223611e-04 -1.81264561e-02]]\n\n [[-6.26327517e-03 -1.44967483e-02]\n [ 6.49429997e-03 -4.09129914e-03]\n [-9.95541830e-03 1.50521994e-02]]\n\n [[ 1.36252427e-02 3.78066488e-03]\n [-2.33754443e-04 2.31413380e-03]\n [ 9.28455498e-03 2.58941595e-02]]]\n\n\n [[[-1.01139508e-02 -7.10027805e-03]\n [-2.86288606e-03 2.83333519e-03]\n [-7.13767670e-03 -3.45773157e-03]]\n\n [[-4.56789089e-03 -1.52226640e-02]\n [ 1.32304023e-03 -6.35751698e-04]\n [ 9.32355504e-03 1.75298229e-02]]\n\n [[ 1.43155772e-02 3.92413745e-03]\n [ 9.38621629e-03 -4.61775949e-03]\n [ 2.04747543e-02 2.38332748e-02]]]]\n\n\n\n [[[[-7.73862703e-03 2.15329742e-03]\n [ 5.72428666e-03 -6.24388456e-03]\n [-4.00456972e-03 -9.66663938e-03]]\n\n [[-2.17328314e-03 -8.31293035e-03]\n [ 7.75982672e-03 7.32776185e-04]\n [-2.67834105e-02 7.83493742e-03]]\n\n [[ 3.83453909e-03 4.44730883e-03]\n [-1.26228447e-03 6.58339262e-03]\n [-2.42120354e-03 5.46757877e-03]]]\n\n\n [[[-1.18729705e-02 5.67935733e-03]\n [ 1.06518148e-02 -6.85042818e-04]\n [ 6.36805024e-04 -1.84331574e-02]]\n\n [[-1.89891679e-03 -1.61800832e-02]\n [ 5.66103728e-03 5.00472821e-03]\n [-3.10716406e-02 1.62418671e-02]]\n\n [[ 9.13610682e-03 4.71691135e-03]\n [ 5.88949351e-03 -4.58303513e-03]\n [ 6.54142583e-03 1.42663540e-02]]]\n\n\n [[[-1.24806464e-02 5.57032460e-03]\n [ 7.00602727e-03 -4.19931160e-03]\n [-1.04331295e-03 -2.38824114e-02]]\n\n [[-8.33663344e-03 -1.66263431e-02]\n [ 3.88287473e-03 8.91224574e-03]\n [-3.33716795e-02 1.63045917e-02]]\n\n [[ 1.45271868e-02 7.29134027e-03]\n [ 9.83135030e-03 -2.23084702e-03]\n [ 5.20087313e-03 1.63011514e-02]]]\n\n\n ...\n\n\n [[[-1.62587874e-02 -1.37597066e-03]\n [ 8.29914212e-03 6.29654108e-03]\n [ 1.53977098e-03 -2.40595397e-02]]\n\n [[-8.30811169e-03 -1.26230670e-02]\n [ 1.82399119e-03 5.29420422e-03]\n [-3.41212675e-02 1.47192888e-02]]\n\n [[ 1.56226065e-02 4.23571793e-03]\n [ 1.91591922e-02 -7.96898035e-04]\n [ 1.31761516e-02 2.04496607e-02]]]\n\n\n [[[-1.94809139e-02 -2.99757021e-03]\n [ 7.10007083e-03 7.86880869e-03]\n [ 1.60735287e-03 -1.91395078e-02]]\n\n [[ 9.32090916e-04 -8.71183351e-03]\n [ 7.22217932e-03 5.68055408e-03]\n [-2.15223208e-02 9.02055949e-03]]\n\n [[ 1.68319158e-02 2.92042503e-03]\n [ 1.22905318e-02 5.64834196e-03]\n [-1.27916213e-03 1.58093777e-02]]]\n\n\n [[[-1.52327083e-02 -8.99924524e-03]\n [ 6.83188997e-03 8.35463032e-03]\n [ 2.26305216e-03 -7.08778109e-03]]\n\n [[ 8.72176606e-03 -1.31562389e-02]\n [ 1.44121121e-03 -1.78920606e-03]\n [-3.41504929e-03 1.39645329e-02]]\n\n [[ 8.25930387e-03 -1.32819125e-03]\n [ 1.82127859e-02 5.01021324e-03]\n [ 1.67388860e-02 2.08158214e-02]]]]\n\n\n\n [[[[-1.06925145e-02 3.77292209e-03]\n [ 9.39378235e-03 -3.47171398e-03]\n [-3.42051662e-03 -9.01095197e-03]]\n\n [[ 3.03232484e-03 -3.83500801e-03]\n [ 1.66672433e-03 -5.40463254e-04]\n [-1.35628907e-02 8.84807482e-03]]\n\n [[ 9.46614612e-03 5.51301695e-04]\n [ 5.06644417e-03 8.77985789e-04]\n [-1.74137275e-03 3.88781819e-03]]]\n\n\n [[[-1.35979326e-02 3.29593336e-03]\n [ 7.84535985e-03 2.73891794e-03]\n [-2.78001244e-04 -1.53780729e-02]]\n\n [[ 8.66053160e-03 -8.50443263e-03]\n [ 5.86496026e-04 -4.29044059e-03]\n [-1.52284242e-02 1.71949137e-02]]\n\n [[ 1.52285853e-02 -4.44630440e-03]\n [ 1.52613381e-02 -6.84473477e-03]\n [ 6.08425122e-03 1.48353027e-02]]]\n\n\n [[[-1.31760631e-02 -6.72418566e-04]\n [ 8.40505119e-03 -1.51167263e-03]\n [ 1.29994960e-03 -1.75491143e-02]]\n\n [[ 5.52670844e-03 -1.26623530e-02]\n [-3.83573468e-03 -1.37023348e-03]\n [-1.72257703e-02 1.77710652e-02]]\n\n [[ 2.36052554e-02 -7.24755321e-03]\n [ 1.29102301e-02 -9.11834277e-03]\n [ 9.77391750e-03 1.95862558e-02]]]\n\n\n ...\n\n\n [[[-1.13088982e-02 -1.94944220e-03]\n [ 2.36991420e-03 1.16426230e-03]\n [ 4.71507479e-03 -1.94502529e-02]]\n\n [[ 6.12487597e-03 -1.60901714e-02]\n [-6.65014004e-03 -2.22885842e-03]\n [-1.74536500e-02 1.22396238e-02]]\n\n [[ 2.39951182e-02 -6.50753779e-03]\n [ 1.49190091e-02 -7.72895571e-03]\n [ 1.26650669e-02 2.09565796e-02]]]\n\n\n [[[-7.38176983e-03 -3.76592251e-03]\n [-2.74490565e-03 3.39771126e-04]\n [ 2.32577906e-03 -1.79615300e-02]]\n\n [[ 4.86305496e-03 -1.41483368e-02]\n [-8.88498500e-03 -5.09298407e-03]\n [-8.40941351e-03 4.83571505e-03]]\n\n [[ 1.89039074e-02 6.23880827e-04]\n [ 8.88659433e-03 -3.41297220e-03]\n [ 2.75438698e-03 1.56263672e-02]]]\n\n\n [[[-6.97499188e-03 -6.35433616e-03]\n [-6.14867266e-03 1.67819337e-04]\n [ 3.83501593e-03 -9.13862139e-03]]\n\n [[ 1.95316691e-03 -1.19473282e-02]\n [-3.67227895e-03 -1.88097556e-03]\n [ 8.23586073e-04 4.01709741e-03]]\n\n [[ 1.00100636e-02 -6.33813301e-03]\n [ 1.05565237e-02 -4.68333624e-03]\n [ 1.37300538e-02 1.75428763e-02]]]]]\n\n\n\n\n [[[[[-1.04294010e-02 1.98544515e-03]\n [ 4.54818131e-03 -9.41433012e-03]\n [-3.48772621e-03 -3.58328572e-03]]\n\n [[-1.55819720e-03 -3.23674711e-03]\n [ 1.96232344e-03 -2.74393731e-03]\n [-1.42712258e-02 4.49310895e-03]]\n\n [[ 5.59117179e-03 4.41390229e-03]\n [-2.90352688e-03 8.00392963e-03]\n [ 1.09048476e-02 1.12142358e-02]]]\n\n\n [[[-1.33776525e-02 7.81489944e-04]\n [ 7.96931703e-03 -4.30313405e-03]\n [-6.38961140e-03 -1.66459684e-03]]\n\n [[-7.59285269e-03 -1.13919536e-02]\n [ 2.75793369e-03 -4.25262639e-04]\n [-9.58179589e-03 1.02638630e-02]]\n\n [[ 1.38286036e-02 3.82097694e-03]\n [-2.46336684e-03 9.78692155e-03]\n [ 2.34479476e-02 2.38144752e-02]]]\n\n\n [[[-1.47987595e-02 -1.69781933e-03]\n [ 1.03026042e-02 -9.02049895e-03]\n [-9.97501332e-03 -3.65479244e-03]]\n\n [[-6.60537276e-03 -9.69544239e-03]\n [ 1.00686389e-03 3.47442762e-03]\n [-1.35462172e-02 6.79061888e-03]]\n\n [[ 1.33004664e-02 3.97039112e-03]\n [-4.71425476e-03 9.25527234e-03]\n [ 2.36673839e-02 2.34467238e-02]]]\n\n\n ...\n\n\n [[[-1.71155669e-02 -6.26126677e-03]\n [ 8.31147097e-03 -1.20550552e-02]\n [-7.00091058e-03 -6.21549413e-03]]\n\n [[-1.67579427e-02 -1.07086347e-02]\n [ 8.35066009e-03 1.18608843e-03]\n [-1.06402123e-02 1.30929789e-02]]\n\n [[ 1.16379978e-02 1.38644408e-03]\n [ 2.76763370e-04 8.84959009e-03]\n [ 2.46107839e-02 2.52032187e-02]]]\n\n\n [[[-1.09429304e-02 -5.55713288e-03]\n [ 1.20714959e-02 -3.64995888e-03]\n [-7.36091333e-03 -6.04128279e-03]]\n\n [[-1.23426467e-02 -1.03127118e-02]\n [ 3.98734352e-03 1.07459782e-03]\n [-8.91280081e-03 4.90815844e-03]]\n\n [[ 5.92373963e-03 -1.86221721e-03]\n [ 5.34778973e-03 3.24093597e-03]\n [ 2.18948238e-02 1.60489678e-02]]]\n\n\n [[[-2.27978220e-03 7.73747975e-04]\n [ 6.50531473e-03 5.51253848e-04]\n [-5.17523661e-03 -9.22906958e-03]]\n\n [[-2.82998034e-03 -2.45194067e-03]\n [-5.29819110e-04 9.87308379e-03]\n [ 2.52706185e-03 9.91593581e-03]]\n\n [[ 8.72903876e-03 -4.01970651e-03]\n [ 3.81746073e-03 -3.16723227e-03]\n [ 2.10441444e-02 8.25122930e-03]]]]\n\n\n\n [[[[-1.31903673e-02 6.70172228e-03]\n [-1.73449493e-03 -1.02359140e-02]\n [-3.36074678e-04 -6.92624832e-03]]\n\n [[-6.74682250e-03 -8.15319177e-03]\n [ 1.03247110e-02 -6.38899533e-03]\n [-2.88125891e-02 1.19270729e-02]]\n\n [[ 1.50083774e-03 9.20765661e-03]\n [-1.26400748e-02 4.48698574e-06]\n [-3.74960923e-03 6.83074072e-03]]]\n\n\n [[[-1.61607806e-02 2.07145624e-02]\n [-2.70783273e-03 7.96984602e-03]\n [-9.46568511e-03 -8.94248951e-03]]\n\n [[-1.22531345e-02 -1.50459819e-02]\n [ 2.33600219e-03 -1.31984819e-02]\n [-1.68261994e-02 2.84525882e-02]]\n\n [[ 6.26435736e-03 1.36863962e-02]\n [-1.30800484e-02 4.17140499e-03]\n [ 6.04020758e-03 2.57200934e-02]]]\n\n\n [[[-2.34927721e-02 1.52724721e-02]\n [-6.93945307e-03 1.75249844e-03]\n [-1.46960337e-02 -1.48330135e-02]]\n\n [[-8.49601626e-03 -1.41448164e-02]\n [ 5.38661319e-04 -4.96139657e-03]\n [-2.22073533e-02 2.12921351e-02]]\n\n [[ 1.20713683e-02 5.60118537e-03]\n [-4.73734364e-03 1.11004664e-02]\n [ 3.70801752e-03 2.69281771e-02]]]\n\n\n ...\n\n\n [[[-3.27272639e-02 4.08153888e-03]\n [-8.08612071e-03 -1.60190824e-03]\n [-1.24287149e-02 -2.20747888e-02]]\n\n [[-1.73670165e-02 -1.98091771e-02]\n [ 8.95773154e-03 -4.01201705e-03]\n [-2.00785361e-02 2.48337127e-02]]\n\n [[ 2.29369998e-02 4.56533907e-03]\n [ 3.85594554e-03 1.57157294e-02]\n [ 9.50247888e-03 3.50568295e-02]]]\n\n\n [[[-2.22341958e-02 2.30816985e-03]\n [-2.82312976e-04 6.67019654e-03]\n [-6.06460823e-03 -2.18360145e-02]]\n\n [[-1.65792145e-02 -1.14066070e-02]\n [ 9.70410183e-03 -1.59525592e-03]\n [-1.19788628e-02 1.55835990e-02]]\n\n [[ 1.51085593e-02 1.55275536e-03]\n [ 8.21915641e-03 1.02456044e-02]\n [ 1.58783719e-02 2.60655861e-02]]]\n\n\n [[[-1.18482467e-02 6.43728347e-03]\n [-6.07831031e-03 5.18183922e-03]\n [-8.21985025e-03 -1.34153888e-02]]\n\n [[-5.38861984e-03 -1.61913130e-02]\n [ 9.16302949e-03 9.14857537e-03]\n [ 8.39931052e-03 1.92734785e-02]]\n\n [[ 1.68100540e-02 -2.09858268e-03]\n [ 1.49276676e-02 -5.33284945e-03]\n [ 2.28764992e-02 1.49898091e-02]]]]\n\n\n\n [[[[-1.23598659e-02 6.32433360e-03]\n [ 3.06535629e-03 -1.59654114e-02]\n [-7.32522458e-03 -7.37537816e-03]]\n\n [[-7.79392384e-03 -1.16494317e-02]\n [ 8.58231261e-03 -3.56348231e-03]\n [-3.54733653e-02 1.00746155e-02]]\n\n [[ 8.31136573e-03 8.09166767e-03]\n [-1.12568401e-02 4.65652766e-03]\n [-4.57867654e-03 9.34534986e-03]]]\n\n\n [[[-1.14705442e-02 1.05236126e-02]\n [ 3.95652140e-03 -2.40879389e-03]\n [-7.63754919e-03 -9.77241714e-03]]\n\n [[-1.23218102e-02 -1.76117830e-02]\n [ 4.45556967e-03 -6.39298651e-03]\n [-2.42385138e-02 3.00099291e-02]]\n\n [[ 1.53658483e-02 8.36069882e-03]\n [-5.06219175e-03 -1.06609031e-03]\n [ 8.18651076e-03 3.43098044e-02]]]\n\n\n [[[-1.72837898e-02 2.44047237e-03]\n [ 1.08209520e-03 -8.45646579e-03]\n [-5.56948828e-03 -1.70365851e-02]]\n\n [[-1.47254905e-02 -1.87636148e-02]\n [ 6.45905547e-03 1.38711499e-03]\n [-2.33493056e-02 2.24006139e-02]]\n\n [[ 2.11613569e-02 6.50362112e-03]\n [-1.34526915e-03 6.45510526e-03]\n [ 1.25767523e-02 3.59675251e-02]]]\n\n\n ...\n\n\n [[[-2.53323149e-02 -2.84892065e-03]\n [ 1.39211130e-03 -4.76094289e-03]\n [-1.47916062e-03 -2.30430961e-02]]\n\n [[-2.33867858e-02 -2.66492087e-02]\n [ 9.20467917e-03 -7.98927899e-03]\n [-1.63237769e-02 3.17156352e-02]]\n\n [[ 2.41378769e-02 4.29770071e-03]\n [ 3.23916203e-03 7.87086785e-03]\n [ 2.18589865e-02 4.29828651e-02]]]\n\n\n [[[-1.68861542e-02 -4.14957991e-03]\n [ 8.27945024e-03 3.77127947e-03]\n [-4.49350657e-04 -2.28677671e-02]]\n\n [[-1.71122141e-02 -9.74429213e-03]\n [ 1.08371079e-02 3.05746263e-03]\n [-1.05575705e-02 2.03667991e-02]]\n\n [[ 1.73044987e-02 -7.08024658e-04]\n [ 2.81188008e-03 6.52952166e-03]\n [ 1.58096496e-02 2.89265960e-02]]]\n\n\n [[[-7.43328873e-03 9.44130064e-04]\n [-3.11569800e-03 4.54238616e-03]\n [-7.10799778e-03 -1.34932017e-02]]\n\n [[-6.67653978e-03 -2.37084981e-02]\n [ 9.78232268e-03 6.15719846e-03]\n [ 1.43399397e-02 2.17536930e-02]]\n\n [[ 1.60944983e-02 -2.72888504e-03]\n [ 1.29044876e-02 -7.26819225e-03]\n [ 2.77319252e-02 2.26398166e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-9.65207815e-03 3.22950166e-03]\n [ 9.02298838e-03 -1.84213836e-02]\n [-6.45675370e-03 -1.91597510e-02]]\n\n [[-4.56379680e-03 -1.14006279e-02]\n [ 1.12596955e-02 5.42841433e-03]\n [-4.13623080e-02 7.95861334e-03]]\n\n [[ 4.64803400e-03 4.90003871e-03]\n [-4.79869498e-03 6.96613593e-03]\n [ 7.92208535e-04 4.84793261e-03]]]\n\n\n [[[-1.12250410e-02 4.95846290e-03]\n [ 1.20371645e-02 -9.24485736e-03]\n [ 9.99593269e-03 -2.80725565e-02]]\n\n [[ 1.12910327e-02 -1.71852577e-02]\n [ 1.02250632e-02 -1.06539791e-02]\n [-4.04728800e-02 3.08906492e-02]]\n\n [[ 7.77079957e-03 -4.79996111e-03]\n [ 5.66470530e-03 -3.43636610e-03]\n [ 1.61830373e-02 3.19879614e-02]]]\n\n\n [[[-6.85119117e-03 -4.42019384e-03]\n [ 9.07407235e-03 -1.59482416e-02]\n [ 3.67901172e-03 -3.29397209e-02]]\n\n [[ 6.22715522e-03 -1.58981476e-02]\n [ 8.97001661e-03 -4.15756227e-03]\n [-3.94702144e-02 2.69035175e-02]]\n\n [[ 2.03554872e-02 1.78906473e-03]\n [ 7.94778299e-03 2.20357324e-03]\n [ 1.57134030e-02 3.80363911e-02]]]\n\n\n ...\n\n\n [[[-5.68999955e-03 -5.44343330e-03]\n [ 4.39294800e-03 -3.03630624e-03]\n [ 1.40269399e-02 -3.91707383e-02]]\n\n [[-6.07995549e-04 -2.69536600e-02]\n [ 9.78600979e-03 -6.71601063e-03]\n [-3.47342901e-02 2.92317830e-02]]\n\n [[ 3.18655409e-02 7.54419900e-03]\n [ 1.23150637e-02 5.07510500e-04]\n [ 2.47873757e-02 5.13397008e-02]]]\n\n\n [[[-9.37189255e-03 -8.75839405e-03]\n [ 4.08624811e-03 7.10754236e-03]\n [ 1.40144108e-02 -3.76130156e-02]]\n\n [[ 3.24853673e-03 -1.57417450e-02]\n [ 6.74699945e-03 -1.26630925e-02]\n [-1.74764749e-02 2.22485829e-02]]\n\n [[ 2.93868333e-02 2.44149473e-03]\n [ 4.41668136e-03 5.24617033e-03]\n [ 1.02798603e-02 4.01199870e-02]]]\n\n\n [[[-9.78569407e-03 -1.59883760e-02]\n [-6.66000787e-03 4.68698423e-03]\n [ 8.56783707e-03 -1.13959443e-02]]\n\n [[-4.20816988e-03 -2.49232464e-02]\n [ 7.54829822e-03 -2.12895661e-03]\n [ 1.10682165e-02 2.37462595e-02]]\n\n [[ 2.16447636e-02 -1.85776479e-03]\n [ 1.27231469e-02 -7.55728548e-03]\n [ 2.52786502e-02 3.53484787e-02]]]]\n\n\n\n [[[[ 5.83522196e-04 4.18807380e-03]\n [ 7.24444166e-03 -7.81731028e-03]\n [-3.95857403e-03 -1.93665754e-02]]\n\n [[-4.28702869e-03 -9.15585179e-03]\n [ 5.12191467e-03 2.95405928e-03]\n [-3.03003788e-02 5.47038438e-03]]\n\n [[ 1.09191064e-03 4.45809122e-03]\n [ 2.08606850e-03 5.32714184e-03]\n [ 4.29456076e-03 2.65543070e-03]]]\n\n\n [[[-4.00556950e-03 8.79306812e-03]\n [ 1.05702970e-02 2.52625230e-03]\n [ 5.63769788e-03 -2.82643549e-02]]\n\n [[ 8.53647571e-03 -1.39596015e-02]\n [ 6.15884969e-03 -3.30412178e-03]\n [-3.92178558e-02 2.26573907e-02]]\n\n [[ 4.87951608e-03 4.62041527e-04]\n [ 1.64882466e-02 -8.06625094e-03]\n [ 1.87354330e-02 1.64298117e-02]]]\n\n\n [[[-5.17453020e-03 3.60917952e-03]\n [ 8.51019192e-03 1.31148507e-03]\n [ 3.79683031e-03 -3.87029611e-02]]\n\n [[ 9.36952536e-04 -1.76380388e-02]\n [ 2.33022519e-03 -2.23091338e-03]\n [-3.93984877e-02 1.78745650e-02]]\n\n [[ 1.74596626e-02 1.12168444e-02]\n [ 1.58224553e-02 -4.69980808e-03]\n [ 1.23395193e-02 1.72721352e-02]]]\n\n\n ...\n\n\n [[[-5.21048158e-03 -4.71633300e-03]\n [-6.28086680e-04 6.56004902e-03]\n [ 1.54029364e-02 -3.72909829e-02]]\n\n [[ 6.88057858e-04 -1.75378434e-02]\n [ 2.28803582e-03 -9.24072135e-03]\n [-3.24056782e-02 1.44218234e-02]]\n\n [[ 2.55807415e-02 1.21413218e-02]\n [ 2.07778849e-02 -5.65390859e-04]\n [ 1.81020815e-02 2.79181283e-02]]]\n\n\n [[[-1.10537680e-02 -5.29450178e-03]\n [-2.57347291e-03 9.65437293e-03]\n [ 4.57762321e-03 -3.43920588e-02]]\n\n [[ 7.83387758e-03 -1.08519625e-02]\n [ 4.28032642e-03 -8.77804682e-03]\n [-1.56058110e-02 1.40315201e-02]]\n\n [[ 2.53686849e-02 1.08364010e-02]\n [ 5.29614929e-03 7.93692935e-03]\n [-7.64168857e-04 2.40916368e-02]]]\n\n\n [[[-5.60860056e-03 -1.11755608e-02]\n [-1.30807946e-03 6.97721401e-03]\n [ 1.18941600e-02 -1.19161392e-02]]\n\n [[ 2.14122678e-03 -1.71201918e-02]\n [ 3.45002813e-03 1.92713545e-04]\n [ 3.11087584e-03 1.50130577e-02]]\n\n [[ 1.53272888e-02 7.67924474e-04]\n [ 1.57778878e-02 -2.70513794e-03]\n [ 2.10040286e-02 2.87625864e-02]]]]\n\n\n\n [[[[-1.02728894e-02 6.37871167e-03]\n [ 9.36744828e-03 -2.45805155e-03]\n [-4.91190283e-03 -1.18369777e-02]]\n\n [[ 7.45670637e-03 -4.99679567e-03]\n [ 2.20782124e-03 -2.33782781e-03]\n [-1.15093449e-02 8.51443037e-03]]\n\n [[ 8.67277011e-03 -2.56107363e-04]\n [ 9.72921215e-03 3.53469513e-03]\n [ 2.22031400e-03 1.10962836e-03]]]\n\n\n [[[-1.25303380e-02 2.49821041e-03]\n [ 8.46095011e-03 -1.09719869e-03]\n [-8.71226715e-04 -1.66495331e-02]]\n\n [[ 1.70729831e-02 -4.85611707e-03]\n [ 3.49744107e-03 -1.26032466e-02]\n [-1.60390679e-02 1.47851072e-02]]\n\n [[ 1.42476736e-02 -6.46807859e-03]\n [ 2.09461357e-02 -5.02426643e-03]\n [ 1.03779230e-02 1.48924142e-02]]]\n\n\n [[[-1.13324672e-02 -5.14203857e-04]\n [ 1.22349430e-02 -5.12100523e-04]\n [ 3.30644799e-03 -2.05619913e-02]]\n\n [[ 1.38427727e-02 -6.43980084e-03]\n [-7.11750519e-03 -1.32297017e-02]\n [-2.12325919e-02 1.27947992e-02]]\n\n [[ 1.94506869e-02 -4.67410730e-03]\n [ 1.87730994e-02 -8.41051619e-03]\n [ 1.21516157e-02 2.40504462e-02]]]\n\n\n ...\n\n\n [[[-6.66342210e-03 -6.59024063e-03]\n [ 7.66330399e-03 4.29338543e-03]\n [ 1.55892204e-02 -2.41726004e-02]]\n\n [[ 7.81588349e-03 -8.20273906e-03]\n [-7.27705238e-03 -1.59718469e-02]\n [-1.48718655e-02 6.01593591e-03]]\n\n [[ 1.91380233e-02 -5.33218496e-03]\n [ 1.62327830e-02 -1.34589169e-02]\n [ 1.80279147e-02 2.29121596e-02]]]\n\n\n [[[-7.82454945e-03 -5.48409019e-03]\n [-2.46319221e-03 2.23805173e-03]\n [ 8.88891891e-03 -2.73702927e-02]]\n\n [[ 5.09150699e-03 -9.88726225e-03]\n [-8.87249503e-03 -1.31668299e-02]\n [-3.78509751e-03 9.18499660e-03]]\n\n [[ 2.02405266e-02 6.87324768e-03]\n [ 6.21943735e-03 -5.93376020e-03]\n [ 6.88920682e-03 2.02840772e-02]]]\n\n\n [[[-2.73186341e-03 -9.69717279e-03]\n [-6.11920655e-03 1.80502573e-03]\n [ 6.46934658e-03 -9.66373272e-03]]\n\n [[-4.96778055e-04 -1.14221834e-02]\n [-4.59919730e-03 -4.29121265e-03]\n [ 2.88335700e-03 2.37574847e-03]]\n\n [[ 8.99798702e-03 -1.61658146e-03]\n [ 6.89592771e-03 -8.81218724e-03]\n [ 1.35627324e-02 2.07037255e-02]]]]]\n\n\n\n\n [[[[[-9.73742921e-03 1.28139829e-04]\n [ 2.14325590e-03 -6.47787284e-03]\n [-5.42727858e-03 -9.15969780e-04]]\n\n [[-2.41188542e-03 -5.36508393e-03]\n [ 2.46389327e-03 -2.38168606e-04]\n [-1.26702935e-02 2.68937997e-03]]\n\n [[ 5.21141011e-03 4.73428750e-03]\n [-2.68021761e-03 7.05668470e-03]\n [ 7.06846546e-03 7.13633420e-03]]]\n\n\n [[[-1.27183832e-02 1.40522444e-03]\n [ 9.39492974e-03 -3.81360948e-03]\n [-4.77906270e-03 7.47643120e-04]]\n\n [[-5.21491887e-03 -9.84201767e-03]\n [-3.90616304e-04 -4.20569151e-04]\n [-1.30653251e-02 8.58136080e-03]]\n\n [[ 1.24916406e-02 4.19125985e-03]\n [ 2.85572466e-03 1.21216346e-02]\n [ 2.35821605e-02 1.97764803e-02]]]\n\n\n [[[-1.21002784e-02 -2.48315046e-03]\n [ 1.40915662e-02 -8.20836984e-03]\n [-5.60857309e-03 -9.68956389e-04]]\n\n [[-8.77045188e-03 -1.22368746e-02]\n [ 7.75317836e-04 5.91831002e-03]\n [-1.54377017e-02 4.63983184e-03]]\n\n [[ 1.03567392e-02 4.18704841e-03]\n [ 1.36051071e-03 1.03017092e-02]\n [ 2.61520948e-02 2.13036258e-02]]]\n\n\n ...\n\n\n [[[-1.72593910e-02 -7.78352190e-03]\n [ 1.24205388e-02 -1.26416599e-02]\n [-7.73623353e-03 -7.29509443e-03]]\n\n [[-1.39720393e-02 -1.00861238e-02]\n [ 1.03045255e-02 2.87040602e-03]\n [-1.62576400e-02 1.24778729e-02]]\n\n [[ 1.02188960e-02 3.48354550e-03]\n [ 3.10349678e-05 1.16302762e-02]\n [ 2.76295934e-02 2.81049721e-02]]]\n\n\n [[[-1.31651107e-02 -3.64122214e-03]\n [ 1.59831923e-02 -3.97993997e-03]\n [-9.28572472e-03 -6.11743936e-03]]\n\n [[-1.08825099e-02 -1.25200925e-02]\n [ 6.71771215e-03 2.67174153e-04]\n [-1.10782133e-02 9.16342158e-03]]\n\n [[ 6.65573869e-03 -2.80253915e-03]\n [ 5.74986544e-03 4.62995702e-03]\n [ 2.65410338e-02 2.01045293e-02]]]\n\n\n [[[-5.50939748e-03 2.01112893e-03]\n [ 7.27507845e-03 1.30054809e-03]\n [-5.39054954e-03 -8.52103438e-03]]\n\n [[-9.85235674e-04 -2.57112854e-03]\n [ 2.87242787e-04 1.18224192e-02]\n [ 1.64272904e-04 1.09125935e-02]]\n\n [[ 1.09206149e-02 -5.68529312e-03]\n [ 3.48862144e-03 -4.31049708e-03]\n [ 2.37510744e-02 9.73520614e-03]]]]\n\n\n\n [[[[-1.12601062e-02 6.46896567e-03]\n [-4.95053083e-03 -7.92662241e-03]\n [-4.30276524e-03 -6.05114223e-03]]\n\n [[-1.05014641e-03 -5.61960926e-03]\n [ 4.06701583e-03 -2.06313701e-03]\n [-2.51205061e-02 1.15697961e-02]]\n\n [[ 2.14573834e-03 8.27732403e-03]\n [-1.05767902e-02 6.71488757e-04]\n [-7.17789913e-03 2.12649652e-03]]]\n\n\n [[[-1.66231096e-02 1.71088353e-02]\n [-4.91514057e-03 5.48958499e-03]\n [-7.45881815e-03 -1.08515425e-02]]\n\n [[ 7.86080142e-04 -1.29760858e-02]\n [ 3.65926092e-03 -2.37365632e-04]\n [-1.60110872e-02 1.97165757e-02]]\n\n [[ 3.56868678e-03 1.01486212e-02]\n [-5.71885752e-03 3.33336997e-04]\n [-7.84048694e-04 1.21698398e-02]]]\n\n\n [[[-2.02579852e-02 1.71593986e-02]\n [-1.00752283e-02 1.42859120e-03]\n [-8.69087037e-03 -1.61664020e-02]]\n\n [[-5.70440944e-03 -1.48933642e-02]\n [ 4.78312140e-03 2.09640316e-03]\n [-2.34716125e-02 2.22841408e-02]]\n\n [[ 7.71901570e-03 5.58651984e-03]\n [-1.96135114e-03 4.33425978e-03]\n [ 2.94322101e-03 1.47758666e-02]]]\n\n\n ...\n\n\n [[[-3.68502066e-02 4.80590109e-03]\n [-1.23453569e-02 1.47879915e-03]\n [-1.39168249e-02 -2.22649761e-02]]\n\n [[-1.47222532e-02 -2.50896513e-02]\n [ 5.94186410e-03 -5.63055696e-03]\n [-2.31357869e-02 2.81742532e-02]]\n\n [[ 2.23559309e-02 4.02869750e-03]\n [ 3.79393646e-03 2.12868378e-02]\n [ 9.42278933e-03 3.48098017e-02]]]\n\n\n [[[-2.53845360e-02 3.70945572e-03]\n [-2.38320732e-04 7.32296985e-03]\n [-4.62375628e-03 -2.84557268e-02]]\n\n [[-1.44961393e-02 -1.41718928e-02]\n [ 1.30568901e-02 -2.07808334e-03]\n [-1.30979354e-02 1.96154900e-02]]\n\n [[ 1.39383152e-02 -4.52372897e-03]\n [ 9.92424879e-03 1.05788214e-02]\n [ 1.74686331e-02 3.33582349e-02]]]\n\n\n [[[-1.51117323e-02 9.59524605e-03]\n [-8.02178681e-03 7.12474762e-03]\n [-1.02283694e-02 -1.40377525e-02]]\n\n [[-6.46476308e-03 -2.12501250e-02]\n [ 9.19686165e-03 1.11859497e-02]\n [ 9.40005295e-03 2.26142891e-02]]\n\n [[ 2.07701661e-02 -1.08146027e-03]\n [ 1.37541648e-02 -6.18240563e-03]\n [ 2.65903659e-02 1.87273696e-02]]]]\n\n\n\n [[[[-3.49036534e-03 4.26771631e-03]\n [ 7.04602688e-04 -1.04206987e-02]\n [-4.47198376e-03 -1.02311121e-02]]\n\n [[-4.26309975e-03 -8.78747925e-03]\n [ 6.46104896e-03 3.40874004e-03]\n [-2.58880258e-02 7.51438551e-03]]\n\n [[ 3.91117716e-03 5.35534276e-03]\n [-5.58056450e-03 3.60462093e-03]\n [-4.48943209e-03 4.60491329e-03]]]\n\n\n [[[-7.14197941e-03 4.23519220e-03]\n [ 1.36998796e-03 1.98933692e-03]\n [ 2.09631585e-03 -1.74279567e-02]]\n\n [[-7.35989364e-04 -7.88930338e-03]\n [ 2.47875950e-03 -8.83913381e-05]\n [-2.53808666e-02 2.24051904e-02]]\n\n [[ 8.10909085e-03 2.84849759e-03]\n [ 4.38263686e-03 -6.98393956e-03]\n [ 2.81536067e-03 1.70585122e-02]]]\n\n\n [[[-7.19063636e-03 1.71591807e-03]\n [-6.01956446e-04 1.60591118e-03]\n [ 2.67252163e-03 -2.04416495e-02]]\n\n [[-2.29451619e-03 -1.27503313e-02]\n [ 8.13012104e-03 2.72017438e-03]\n [-2.42090002e-02 2.39450354e-02]]\n\n [[ 1.32952062e-02 7.64456205e-03]\n [ 9.18049086e-03 -2.76373234e-03]\n [ 5.23745874e-03 2.21801326e-02]]]\n\n\n ...\n\n\n [[[-2.39001457e-02 -1.06341636e-03]\n [-5.35811589e-04 -5.03756432e-03]\n [-5.34511451e-03 -2.51337104e-02]]\n\n [[-2.37025172e-02 -2.56631523e-02]\n [ 7.71694537e-03 -9.51291434e-03]\n [-1.85572971e-02 3.66213508e-02]]\n\n [[ 2.76556797e-02 2.68921396e-03]\n [-1.96896936e-03 7.28210015e-03]\n [ 2.80794315e-02 4.73210551e-02]]]\n\n\n [[[-1.78128704e-02 -1.86683761e-03]\n [ 9.62629262e-03 2.73499358e-03]\n [ 2.48395931e-03 -2.74353754e-02]]\n\n [[-1.41887721e-02 -1.28233442e-02]\n [ 1.12861581e-02 -1.01913197e-03]\n [-9.61955450e-03 2.38122661e-02]]\n\n [[ 1.96544882e-02 -3.11879185e-03]\n [ 1.60576671e-03 1.08447438e-02]\n [ 1.86608098e-02 4.01529595e-02]]]\n\n\n [[[-6.69559604e-03 1.11974659e-03]\n [-4.64664493e-03 4.27299505e-03]\n [-6.22379780e-03 -1.35713750e-02]]\n\n [[-6.17495086e-03 -2.38466356e-02]\n [ 6.44179201e-03 7.64180021e-03]\n [ 1.69417504e-02 2.80524250e-02]]\n\n [[ 2.22120024e-02 -3.63839674e-03]\n [ 8.23867321e-03 -9.15693957e-03]\n [ 3.34326513e-02 2.69257165e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-1.39988447e-02 3.01944604e-03]\n [ 8.01721402e-03 -2.29787044e-02]\n [-9.77113098e-03 -1.60134602e-02]]\n\n [[-4.29674564e-03 -1.17479265e-02]\n [ 1.38658220e-02 3.16006667e-03]\n [-4.78768982e-02 9.53821279e-03]]\n\n [[ 7.54198153e-03 6.02846313e-03]\n [-9.11315344e-03 8.15039501e-03]\n [-2.80769495e-03 8.45852401e-03]]]\n\n\n [[[-1.62208155e-02 7.26180710e-03]\n [ 1.38949044e-02 -1.69749409e-02]\n [ 4.39155614e-03 -2.68503334e-02]]\n\n [[ 8.01002048e-03 -2.26273369e-02]\n [ 1.57827102e-02 -6.77592400e-03]\n [-3.98370512e-02 3.12958807e-02]]\n\n [[ 8.15724861e-03 -3.81844910e-03]\n [ 6.42237207e-03 -7.22948054e-04]\n [ 1.51321320e-02 4.33499478e-02]]]\n\n\n [[[-1.49333151e-02 -1.24090852e-03]\n [ 1.42979631e-02 -2.29058806e-02]\n [ 1.16243109e-03 -3.36145721e-02]]\n\n [[-4.91750776e-04 -1.84220914e-02]\n [ 1.28373019e-02 -3.99238430e-03]\n [-3.95709239e-02 2.83149444e-02]]\n\n [[ 2.11519878e-02 -2.46738113e-04]\n [-1.68450970e-05 6.53823512e-03]\n [ 1.58299133e-02 4.87659127e-02]]]\n\n\n ...\n\n\n [[[-1.45601658e-02 -5.68455411e-03]\n [ 1.11145247e-02 -7.74455070e-03]\n [ 7.08535034e-03 -3.33878361e-02]]\n\n [[-7.65934819e-03 -2.91957632e-02]\n [ 9.43233352e-03 -3.17133055e-03]\n [-3.02412622e-02 2.44175717e-02]]\n\n [[ 2.89776847e-02 6.27877889e-03]\n [ 2.65442231e-03 3.11819348e-03]\n [ 2.60237101e-02 5.58756255e-02]]]\n\n\n [[[-1.39357951e-02 -1.12073142e-02]\n [ 8.30456242e-03 2.48562777e-03]\n [ 6.72384631e-03 -3.33678238e-02]]\n\n [[-3.53849214e-03 -1.78667586e-02]\n [ 1.07295373e-02 -2.66158464e-03]\n [-1.92277022e-02 1.92568693e-02]]\n\n [[ 2.21148673e-02 -9.65095933e-06]\n [ 1.19203608e-03 3.73517699e-03]\n [ 1.04610622e-02 4.27297093e-02]]]\n\n\n [[[-1.06820837e-02 -1.34265125e-02]\n [-3.91982170e-03 5.27126761e-03]\n [-1.01548422e-03 -7.12693157e-03]]\n\n [[-5.63117908e-03 -2.21513230e-02]\n [ 5.38446242e-03 -3.28591955e-03]\n [ 1.29705323e-02 2.07714885e-02]]\n\n [[ 1.52338231e-02 1.32133579e-03]\n [ 1.29598770e-02 -6.60783378e-03]\n [ 2.40198951e-02 3.36636901e-02]]]]\n\n\n\n [[[[-2.76872679e-03 3.85069684e-03]\n [ 1.25188725e-02 -8.02265760e-03]\n [-5.38623612e-03 -1.88634843e-02]]\n\n [[-3.94674810e-03 -1.21528180e-02]\n [ 8.58687516e-03 4.32856241e-03]\n [-3.74337435e-02 8.26797262e-03]]\n\n [[ 2.80523184e-03 4.69672261e-03]\n [ 7.64568977e-04 7.45325070e-03]\n [ 1.71785580e-03 6.70629041e-03]]]\n\n\n [[[-9.61084571e-03 8.40500183e-03]\n [ 1.55661963e-02 -2.72100500e-04]\n [ 5.63367084e-03 -2.83796564e-02]]\n\n [[ 8.82828422e-03 -1.97724123e-02]\n [ 6.72772899e-03 2.76929004e-05]\n [-4.59652171e-02 2.67073195e-02]]\n\n [[ 7.81802554e-03 3.80948512e-03]\n [ 1.72674544e-02 -7.20414426e-03]\n [ 1.45858610e-02 2.15224009e-02]]]\n\n\n [[[-7.30105955e-03 4.82591707e-03]\n [ 1.45488670e-02 -6.62540132e-03]\n [ 5.26351947e-03 -3.90490815e-02]]\n\n [[-2.90541700e-03 -1.92204565e-02]\n [ 6.86873961e-03 5.01036923e-03]\n [-4.71427254e-02 2.12504230e-02]]\n\n [[ 2.07806211e-02 1.31357126e-02]\n [ 1.69181470e-02 -6.97043771e-03]\n [ 1.34514980e-02 2.48978231e-02]]]\n\n\n ...\n\n\n [[[-1.12265134e-02 -5.30299125e-03]\n [ 4.83358279e-03 6.74072187e-03]\n [ 8.08473304e-03 -3.60548496e-02]]\n\n [[-5.58759132e-03 -1.90933328e-02]\n [ 5.82246808e-03 6.60855148e-04]\n [-4.10007276e-02 1.80453192e-02]]\n\n [[ 2.32849345e-02 1.07235182e-02]\n [ 2.35281494e-02 -1.86786766e-03]\n [ 2.12060828e-02 3.03930715e-02]]]\n\n\n [[[-1.49273332e-02 -4.93158726e-03]\n [ 1.06986123e-03 8.94123502e-03]\n [ 3.38606234e-03 -2.96446979e-02]]\n\n [[ 1.49035745e-03 -1.07800057e-02]\n [ 7.04650860e-03 2.82491930e-03]\n [-2.26511173e-02 1.14354715e-02]]\n\n [[ 2.69157812e-02 9.65243299e-03]\n [ 1.06710261e-02 7.92798772e-03]\n [ 1.75539160e-03 2.57874671e-02]]]\n\n\n [[[-1.16573926e-02 -1.09018218e-02]\n [ 3.72231728e-03 8.40647146e-03]\n [ 9.97838657e-03 -9.73012019e-03]]\n\n [[ 5.05427364e-03 -1.58576332e-02]\n [ 1.53312925e-03 -6.44985412e-04]\n [-6.47196022e-04 1.53521234e-02]]\n\n [[ 1.40468562e-02 -5.63283800e-04]\n [ 2.14737318e-02 3.21027520e-03]\n [ 1.95212383e-02 2.86878198e-02]]]]\n\n\n\n [[[[-1.34262228e-02 5.61025087e-03]\n [ 1.23399226e-02 -4.78068087e-03]\n [-4.70848894e-03 -1.26767606e-02]]\n\n [[ 6.09271554e-03 -5.16111497e-03]\n [ 2.80604209e-03 -1.39912253e-03]\n [-1.55829834e-02 1.06203565e-02]]\n\n [[ 1.05613666e-02 1.73283438e-03]\n [ 7.90504739e-03 3.45320418e-03]\n [ 7.59643910e-04 4.53266921e-03]]]\n\n\n [[[-1.70026775e-02 3.53205088e-03]\n [ 1.03984289e-02 4.69343286e-05]\n [ 7.94117979e-04 -2.25958489e-02]]\n\n [[ 1.59689970e-02 -6.75595505e-03]\n [-9.41138205e-06 -1.03009166e-02]\n [-1.90514866e-02 2.08869632e-02]]\n\n [[ 1.74923707e-02 -7.37096928e-03]\n [ 2.44264398e-02 -7.32011627e-03]\n [ 1.27817653e-02 1.96068045e-02]]]\n\n\n [[[-1.81737021e-02 -1.16853611e-04]\n [ 1.33020869e-02 -1.08732691e-03]\n [ 6.81238715e-03 -2.68312022e-02]]\n\n [[ 1.49620669e-02 -9.51234531e-03]\n [-8.42749048e-03 -1.21394703e-02]\n [-2.58013681e-02 1.76448934e-02]]\n\n [[ 2.89274435e-02 -7.93633796e-03]\n [ 2.06693709e-02 -1.13653736e-02]\n [ 1.34011805e-02 2.82106437e-02]]]\n\n\n ...\n\n\n [[[-1.16709974e-02 -6.16672635e-03]\n [ 4.44129342e-03 1.95081357e-03]\n [ 1.46541717e-02 -2.51427833e-02]]\n\n [[ 7.80960638e-03 -1.47897135e-02]\n [-8.59835092e-03 -9.93657857e-03]\n [-1.95190031e-02 8.26573558e-03]]\n\n [[ 2.38403305e-02 -7.79508473e-03]\n [ 2.04015914e-02 -9.33742989e-03]\n [ 1.92546397e-02 2.57094353e-02]]]\n\n\n [[[-1.08295009e-02 -6.42260676e-03]\n [-3.99199454e-03 1.50653545e-03]\n [ 7.80942198e-03 -2.45216116e-02]]\n\n [[ 5.32443076e-03 -1.55481342e-02]\n [-1.24481898e-02 -1.06938006e-02]\n [-7.95903709e-03 4.07073507e-03]]\n\n [[ 2.13111080e-02 2.94730766e-03]\n [ 1.02171944e-02 -2.02822802e-03]\n [ 6.66186446e-03 2.09144894e-02]]]\n\n\n [[[-7.43854884e-03 -8.37981887e-03]\n [-7.39440182e-03 1.55893189e-03]\n [ 5.61273424e-03 -9.10907704e-03]]\n\n [[ 1.94895396e-03 -1.29029695e-02]\n [-5.30300569e-03 -3.32134846e-03]\n [ 1.47424231e-03 2.43940903e-03]]\n\n [[ 1.17594898e-02 -6.36995863e-03]\n [ 1.04192225e-02 -7.33862072e-03]\n [ 1.52957132e-02 2.04938389e-02]]]]]\n\n\n\n\n ...\n\n\n\n\n [[[[[-1.38486121e-02 8.54878745e-04]\n [ 3.28188948e-03 -1.07127503e-02]\n [-4.17448441e-03 -7.38878676e-04]]\n\n [[-2.02091737e-03 -5.32597769e-03]\n [ 2.12863111e-03 -4.56961244e-03]\n [-1.67099237e-02 3.58393579e-03]]\n\n [[ 6.26000809e-03 1.25236926e-03]\n [-7.10344128e-03 1.06409863e-02]\n [ 1.22952759e-02 1.00146160e-02]]]\n\n\n [[[-1.70396324e-02 -1.47977102e-04]\n [ 6.26623724e-03 -5.71524724e-03]\n [-6.44479226e-03 -2.59381696e-03]]\n\n [[-9.03828628e-03 -1.04072774e-02]\n [-6.92040892e-04 -1.41346641e-03]\n [-1.40382573e-02 1.28074065e-02]]\n\n [[ 1.58827640e-02 2.08898471e-03]\n [-3.80434329e-03 1.60170961e-02]\n [ 2.75929123e-02 2.81512849e-02]]]\n\n\n [[[-1.89823639e-02 -3.82202421e-03]\n [ 1.06764846e-02 -1.27547737e-02]\n [-1.12508032e-02 -3.96407954e-03]]\n\n [[-1.06756380e-02 -1.28133893e-02]\n [ 3.36330989e-03 2.99848290e-03]\n [-1.52502432e-02 7.65706925e-03]]\n\n [[ 1.60663128e-02 5.18777128e-03]\n [-6.45092037e-03 1.47991357e-02]\n [ 2.87648588e-02 3.00389435e-02]]]\n\n\n ...\n\n\n [[[-1.91473644e-02 -5.37156360e-03]\n [ 1.20478394e-02 -1.19658420e-02]\n [-6.99640671e-03 -6.15546806e-03]]\n\n [[-1.41370380e-02 -1.29987802e-02]\n [ 9.97495186e-03 3.58728878e-03]\n [-1.31969992e-02 1.07730404e-02]]\n\n [[ 1.24897202e-02 3.25752073e-03]\n [ 1.37840665e-03 1.14007238e-02]\n [ 2.69734748e-02 2.68529318e-02]]]\n\n\n [[[-1.34664075e-02 -3.72318341e-03]\n [ 1.52310487e-02 -1.07879005e-03]\n [-7.73588754e-03 -5.42007992e-03]]\n\n [[-9.31837782e-03 -1.19533669e-02]\n [ 7.97291566e-03 1.75109995e-03]\n [-1.12868268e-02 5.74864680e-03]]\n\n [[ 7.93944485e-03 -7.25705351e-04]\n [ 4.51804977e-03 4.10036976e-03]\n [ 2.50399616e-02 1.91644132e-02]]]\n\n\n [[[-4.65556094e-03 2.55377404e-03]\n [ 7.84662180e-03 1.98230403e-03]\n [-5.85226249e-03 -8.66227876e-03]]\n\n [[-9.78007913e-04 -1.73859135e-03]\n [-3.76131618e-04 1.14007415e-02]\n [ 6.59369107e-04 1.09370118e-02]]\n\n [[ 1.00789592e-02 -4.73025069e-03]\n [ 4.44863131e-03 -2.99421069e-03]\n [ 2.32709013e-02 9.70653724e-03]]]]\n\n\n\n [[[[-1.74321663e-02 9.23631433e-03]\n [-1.52831490e-03 -1.03711803e-02]\n [-2.98990845e-03 -6.72412431e-03]]\n\n [[-6.32471824e-03 -5.90237183e-03]\n [ 8.93504359e-03 -5.92412241e-03]\n [-3.63648683e-02 1.21765602e-02]]\n\n [[ 3.81298503e-03 8.54921434e-03]\n [-1.58697683e-02 3.55971255e-03]\n [-3.73781123e-03 5.51057095e-03]]]\n\n\n [[[-2.35841535e-02 2.30858903e-02]\n [-1.12744397e-03 8.69511627e-03]\n [-7.74330785e-03 -9.21307504e-03]]\n\n [[-1.49064334e-02 -1.67321786e-02]\n [ 5.21184120e-04 -1.53472079e-02]\n [-2.40228754e-02 3.11366282e-02]]\n\n [[ 7.99572002e-03 1.24857677e-02]\n [-1.77616384e-02 7.31562451e-03]\n [ 8.02021287e-03 3.18592638e-02]]]\n\n\n [[[-3.15590240e-02 1.72782205e-02]\n [-7.89788924e-03 4.47750790e-03]\n [-1.51584363e-02 -1.65353995e-02]]\n\n [[-1.59169436e-02 -1.64105073e-02]\n [-1.40522979e-03 -8.27520899e-03]\n [-2.95275394e-02 2.74828132e-02]]\n\n [[ 1.51004456e-02 9.71921999e-03]\n [-1.21583855e-02 1.44118182e-02]\n [ 7.14297732e-03 3.21200565e-02]]]\n\n\n ...\n\n\n [[[-3.72455679e-02 6.70497585e-03]\n [-1.02336938e-02 1.62292505e-03]\n [-1.31533733e-02 -2.23109107e-02]]\n\n [[-1.75219085e-02 -2.38922071e-02]\n [ 5.12258708e-03 -8.22090358e-03]\n [-2.11411789e-02 2.62933653e-02]]\n\n [[ 2.39653401e-02 5.60137490e-03]\n [ 2.07696017e-03 1.67873465e-02]\n [ 8.01145565e-03 3.52821536e-02]]]\n\n\n [[[-2.49690358e-02 3.95940524e-03]\n [-8.18644708e-04 9.01546795e-03]\n [-6.12750743e-03 -2.60520503e-02]]\n\n [[-1.67297460e-02 -1.26224505e-02]\n [ 1.12085827e-02 -2.11054669e-03]\n [-1.32312281e-02 1.77102983e-02]]\n\n [[ 1.64228398e-02 3.64101470e-05]\n [ 7.32704811e-03 9.44274943e-03]\n [ 1.49948662e-02 2.96468828e-02]]]\n\n\n [[[-1.36413844e-02 8.58962070e-03]\n [-7.17491703e-03 7.57754082e-03]\n [-9.46568511e-03 -1.40390741e-02]]\n\n [[-5.27161872e-03 -1.98785923e-02]\n [ 9.96403955e-03 9.46300011e-03]\n [ 8.97658803e-03 2.16298867e-02]]\n\n [[ 1.72870327e-02 -2.53401697e-03]\n [ 1.69815011e-02 -4.19944059e-03]\n [ 2.41850093e-02 1.69337951e-02]]]]\n\n\n\n [[[[-1.86510757e-02 6.40374888e-03]\n [ 4.82127769e-03 -2.02454962e-02]\n [-1.13487411e-02 -5.88976918e-03]]\n\n [[-8.41867551e-03 -1.07822157e-02]\n [ 8.72364268e-03 -1.21209805e-03]\n [-4.55891714e-02 1.17854271e-02]]\n\n [[ 9.63169802e-03 8.68293084e-03]\n [-1.42472144e-02 1.23735145e-02]\n [-5.17754443e-03 7.50310207e-03]]]\n\n\n [[[-2.16133092e-02 1.28718624e-02]\n [ 5.65068983e-03 -4.31496091e-03]\n [-8.74256250e-03 -1.21003734e-02]]\n\n [[-1.71768256e-02 -2.46761832e-02]\n [ 4.79768449e-03 -7.45966099e-03]\n [-3.59307788e-02 3.73275764e-02]]\n\n [[ 1.64837502e-02 9.84060206e-03]\n [-9.53149609e-03 4.19594161e-03]\n [ 1.12395631e-02 4.12276164e-02]]]\n\n\n [[[-2.25970503e-02 2.96957046e-03]\n [ 2.77373963e-03 -1.05217146e-02]\n [-9.73985344e-03 -1.86676551e-02]]\n\n [[-2.12424248e-02 -2.39062179e-02]\n [ 5.74690057e-03 -2.49951659e-03]\n [-3.55238318e-02 3.26357782e-02]]\n\n [[ 2.55851615e-02 6.97670178e-03]\n [-5.90673322e-03 9.41152871e-03]\n [ 1.58019513e-02 4.60982397e-02]]]\n\n\n ...\n\n\n [[[-2.40464639e-02 -1.87811791e-03]\n [ 2.71667889e-03 -4.51243296e-03]\n [-1.67632673e-03 -2.28341389e-02]]\n\n [[-2.34590676e-02 -2.65479609e-02]\n [ 7.45961349e-03 -7.50753470e-03]\n [-1.73948184e-02 3.26944925e-02]]\n\n [[ 2.60412674e-02 4.00524540e-03]\n [ 5.23422961e-04 6.53112307e-03]\n [ 2.42960155e-02 4.66187485e-02]]]\n\n\n [[[-1.68911051e-02 -2.88682757e-03]\n [ 1.12238545e-02 5.43853128e-03]\n [ 9.47139924e-04 -2.45635603e-02]]\n\n [[-1.80954989e-02 -1.06983650e-02]\n [ 9.58542433e-03 1.73598842e-03]\n [-1.03091132e-02 2.18145642e-02]]\n\n [[ 1.96447298e-02 2.43401082e-04]\n [ 1.62668584e-03 6.07525976e-03]\n [ 1.51083320e-02 3.34593244e-02]]]\n\n\n [[[-8.18144903e-03 8.85471003e-04]\n [-3.17317992e-03 5.48157608e-03]\n [-6.02767011e-03 -1.35091571e-02]]\n\n [[-5.34548238e-03 -2.46851742e-02]\n [ 7.87309278e-03 6.06593443e-03]\n [ 1.59832388e-02 2.35666931e-02]]\n\n [[ 1.64522920e-02 -3.77996243e-03]\n [ 1.40065728e-02 -6.43260079e-03]\n [ 2.98343934e-02 2.41306908e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-1.34538589e-02 2.88655260e-03]\n [ 7.58094387e-03 -2.22005267e-02]\n [-9.47876088e-03 -1.58288833e-02]]\n\n [[-3.65816127e-03 -1.13827642e-02]\n [ 1.35239512e-02 3.40412208e-03]\n [-4.67287898e-02 9.37724393e-03]]\n\n [[ 7.35891331e-03 5.36158821e-03]\n [-8.47219303e-03 8.16282909e-03]\n [-2.49710958e-03 8.07313155e-03]]]\n\n\n [[[-1.54374111e-02 6.72245864e-03]\n [ 1.34188933e-02 -1.65498387e-02]\n [ 4.17337893e-03 -2.56758276e-02]]\n\n [[ 7.60077033e-03 -2.22831909e-02]\n [ 1.57387294e-02 -6.22058054e-03]\n [-3.86564434e-02 3.06397490e-02]]\n\n [[ 7.71686761e-03 -4.45622625e-03]\n [ 5.97233744e-03 -4.57656017e-04]\n [ 1.47531237e-02 4.19676714e-02]]]\n\n\n [[[-1.42285079e-02 -1.17916765e-03]\n [ 1.44453449e-02 -2.20513251e-02]\n [ 7.68987346e-04 -3.31302062e-02]]\n\n [[ 2.69190292e-04 -1.82473138e-02]\n [ 1.30644385e-02 -3.20514105e-03]\n [-3.84528935e-02 2.76289918e-02]]\n\n [[ 2.05323007e-02 -9.75254632e-04]\n [ 4.54412890e-04 6.02683518e-03]\n [ 1.47897573e-02 4.69204448e-02]]]\n\n\n ...\n\n\n [[[-1.37055265e-02 -6.09450135e-03]\n [ 1.03858346e-02 -6.79506268e-03]\n [ 6.90844562e-03 -3.24049108e-02]]\n\n [[-6.91411318e-03 -2.81955041e-02]\n [ 9.59826354e-03 -2.41565867e-03]\n [-2.92053632e-02 2.37223282e-02]]\n\n [[ 2.80284006e-02 6.03674166e-03]\n [ 2.75607174e-03 3.09257652e-03]\n [ 2.54250951e-02 5.32915741e-02]]]\n\n\n [[[-1.35262348e-02 -1.09906048e-02]\n [ 8.08481313e-03 2.98745441e-03]\n [ 6.74070371e-03 -3.22694890e-02]]\n\n [[-2.77899276e-03 -1.74417924e-02]\n [ 1.07718790e-02 -2.32576230e-03]\n [-1.88829396e-02 1.88780054e-02]]\n\n [[ 2.09684707e-02 -6.57722761e-04]\n [ 1.45243248e-03 3.50755616e-03]\n [ 1.06801111e-02 4.09423225e-02]]]\n\n\n [[[-1.04699824e-02 -1.30192861e-02]\n [-3.82354693e-03 4.92004212e-03]\n [-6.82543672e-04 -7.02360272e-03]]\n\n [[-5.44816907e-03 -2.11692397e-02]\n [ 4.83695976e-03 -3.36872833e-03]\n [ 1.21968100e-02 1.97584517e-02]]\n\n [[ 1.46505227e-02 1.19267963e-03]\n [ 1.21852001e-02 -6.24884153e-03]\n [ 2.30025668e-02 3.22047286e-02]]]]\n\n\n\n [[[[-2.56233197e-03 3.59148136e-03]\n [ 1.18957488e-02 -7.79080437e-03]\n [-5.31773455e-03 -1.80642623e-02]]\n\n [[-3.60785564e-03 -1.16530722e-02]\n [ 8.10312759e-03 4.18979255e-03]\n [-3.62260006e-02 7.92388804e-03]]\n\n [[ 2.57811975e-03 4.36036941e-03]\n [ 9.42418235e-04 7.50478916e-03]\n [ 1.74784742e-03 6.21620798e-03]]]\n\n\n [[[-9.21200402e-03 8.43666214e-03]\n [ 1.50344204e-02 -1.48366016e-04]\n [ 5.54213906e-03 -2.72779688e-02]]\n\n [[ 8.75002332e-03 -1.94196198e-02]\n [ 6.65023178e-03 2.57679785e-04]\n [-4.43954207e-02 2.57430933e-02]]\n\n [[ 7.63387792e-03 3.33783566e-03]\n [ 1.67337898e-02 -6.56976458e-03]\n [ 1.42688490e-02 2.08426975e-02]]]\n\n\n [[[-7.53487460e-03 5.18720131e-03]\n [ 1.41247157e-02 -6.33687247e-03]\n [ 5.03387209e-03 -3.73634286e-02]]\n\n [[-2.40113377e-03 -1.89764332e-02]\n [ 6.44990429e-03 5.09982975e-03]\n [-4.57732193e-02 2.04403400e-02]]\n\n [[ 2.01750640e-02 1.22394813e-02]\n [ 1.62815098e-02 -6.64913189e-03]\n [ 1.28449211e-02 2.36073676e-02]]]\n\n\n ...\n\n\n [[[-1.06942356e-02 -5.28174685e-03]\n [ 4.49144607e-03 6.59594173e-03]\n [ 7.84812775e-03 -3.46369334e-02]]\n\n [[-5.41470759e-03 -1.82834640e-02]\n [ 5.79559896e-03 9.25537955e-04]\n [-3.92442346e-02 1.72342975e-02]]\n\n [[ 2.23335177e-02 1.01866294e-02]\n [ 2.24391390e-02 -1.44516281e-03]\n [ 2.03981437e-02 2.90628858e-02]]]\n\n\n [[[-1.40440380e-02 -4.76846984e-03]\n [ 6.61619008e-04 8.84646364e-03]\n [ 3.26451729e-03 -2.86891386e-02]]\n\n [[ 1.53018313e-03 -1.04360180e-02]\n [ 6.99214870e-03 3.01096705e-03]\n [-2.19962206e-02 1.13693131e-02]]\n\n [[ 2.58619450e-02 8.84543452e-03]\n [ 1.04443608e-02 7.17671635e-03]\n [ 1.79410842e-03 2.46437490e-02]]]\n\n\n [[[-1.12148896e-02 -1.04644448e-02]\n [ 3.08632944e-03 8.00045952e-03]\n [ 9.57427919e-03 -9.28570610e-03]]\n\n [[ 4.80924407e-03 -1.55371679e-02]\n [ 1.47940859e-03 -6.60902937e-04]\n [-6.30450668e-04 1.46520613e-02]]\n\n [[ 1.35973189e-02 -4.05036961e-04]\n [ 2.07278281e-02 3.21083795e-03]\n [ 1.87001731e-02 2.75082197e-02]]]]\n\n\n\n [[[[-1.31535633e-02 5.58871171e-03]\n [ 1.18652070e-02 -4.33525024e-03]\n [-4.76588821e-03 -1.21788792e-02]]\n\n [[ 6.16385648e-03 -5.02032135e-03]\n [ 2.74681253e-03 -1.22338603e-03]\n [-1.52102858e-02 1.03204446e-02]]\n\n [[ 1.02727124e-02 1.59964862e-03]\n [ 7.88590685e-03 3.56690167e-03]\n [ 8.01206450e-04 4.45191469e-03]]]\n\n\n [[[-1.65507887e-02 3.68945859e-03]\n [ 1.00604780e-02 1.27249456e-04]\n [ 5.75189770e-04 -2.17828751e-02]]\n\n [[ 1.53513784e-02 -6.72377646e-03]\n [ 2.75842584e-04 -9.99598764e-03]\n [-1.83357950e-02 2.02320591e-02]]\n\n [[ 1.69263948e-02 -7.12077320e-03]\n [ 2.35744491e-02 -7.04012113e-03]\n [ 1.23850368e-02 1.91767197e-02]]]\n\n\n [[[-1.76339261e-02 2.62188434e-04]\n [ 1.30274678e-02 -9.60249803e-04]\n [ 6.37900177e-03 -2.59879250e-02]]\n\n [[ 1.43903866e-02 -9.37614217e-03]\n [-7.93949887e-03 -1.16584944e-02]\n [-2.49889176e-02 1.70545503e-02]]\n\n [[ 2.80464850e-02 -7.72066275e-03]\n [ 2.00681221e-02 -1.08841248e-02]\n [ 1.27358623e-02 2.73959823e-02]]]\n\n\n ...\n\n\n [[[-1.11899963e-02 -5.75895561e-03]\n [ 4.34430782e-03 2.20903778e-03]\n [ 1.38310334e-02 -2.38891020e-02]]\n\n [[ 7.33690802e-03 -1.42672397e-02]\n [-8.31848290e-03 -9.66225471e-03]\n [-1.86180919e-02 8.04278441e-03]]\n\n [[ 2.29222700e-02 -7.35996105e-03]\n [ 1.94925219e-02 -9.03036259e-03]\n [ 1.85060855e-02 2.47517116e-02]]]\n\n\n [[[-1.03570102e-02 -6.02985779e-03]\n [-4.03722515e-03 1.53967051e-03]\n [ 7.40399119e-03 -2.34026257e-02]]\n\n [[ 4.96102730e-03 -1.51564125e-02]\n [-1.19268252e-02 -1.04553699e-02]\n [-7.55364960e-03 4.12590988e-03]]\n\n [[ 2.03605350e-02 2.75852764e-03]\n [ 9.71691776e-03 -2.22505000e-03]\n [ 6.60083536e-03 2.01974735e-02]]]\n\n\n [[[-7.07453024e-03 -7.98378233e-03]\n [-7.32497964e-03 1.52461661e-03]\n [ 5.34807285e-03 -8.70072003e-03]]\n\n [[ 1.78172649e-03 -1.25839291e-02]\n [-5.18969726e-03 -3.19505087e-03]\n [ 1.53172866e-03 2.41027796e-03]]\n\n [[ 1.14993742e-02 -5.96752251e-03]\n [ 9.84079577e-03 -7.05176964e-03]\n [ 1.45945558e-02 1.97608247e-02]]]]]\n\n\n\n\n [[[[[-5.46204532e-03 1.00217492e-03]\n [ 1.46662327e-03 -5.33874752e-03]\n [-1.87927904e-03 -6.74395531e-04]]\n\n [[-9.37563484e-04 -2.03475682e-03]\n [ 6.80876372e-04 -1.28508883e-03]\n [-6.20812457e-03 1.25849445e-03]]\n\n [[ 3.72263230e-03 -5.80632593e-04]\n [-3.41688795e-03 3.39972251e-03]\n [ 4.14349604e-03 3.56803765e-03]]]\n\n\n [[[-7.30717555e-03 6.44736050e-04]\n [ 1.28931471e-03 -3.47973569e-03]\n [-1.66657718e-03 -1.09638926e-03]]\n\n [[-5.18077565e-03 -4.32737637e-03]\n [-2.18673190e-03 -2.39828893e-04]\n [-6.80524949e-03 5.12070302e-03]]\n\n [[ 7.98900425e-03 -8.94422235e-04]\n [-3.10381106e-03 6.78467471e-03]\n [ 9.88298096e-03 1.03728278e-02]]]\n\n\n [[[-9.48695559e-03 -8.59724125e-04]\n [ 1.95964030e-03 -7.81958178e-03]\n [-3.15160560e-03 -1.17811491e-03]]\n\n [[-6.19428745e-03 -4.36350005e-03]\n [-7.34449306e-04 1.38550345e-03]\n [-9.39665642e-03 3.64694162e-03]]\n\n [[ 7.67479977e-03 -9.80366603e-04]\n [-5.14564849e-03 7.90314469e-03]\n [ 1.19235376e-02 1.16259539e-02]]]\n\n\n ...\n\n\n [[[-1.94583219e-02 -3.94925149e-03]\n [ 7.45911244e-03 -1.04787657e-02]\n [-3.28780850e-03 -3.91719630e-03]]\n\n [[-1.12575134e-02 -1.63867641e-02]\n [ 9.12986975e-03 -1.29515771e-03]\n [-7.05620879e-03 1.05317282e-02]]\n\n [[ 1.66391972e-02 6.09367527e-03]\n [-4.31716879e-04 1.26472339e-02]\n [ 2.57197078e-02 3.06747630e-02]]]\n\n\n [[[-1.32824322e-02 -4.66244947e-03]\n [ 1.28099965e-02 -2.88207550e-04]\n [-7.71098398e-03 -5.13970712e-03]]\n\n [[-7.35718291e-03 -6.07598107e-03]\n [ 8.57574586e-03 2.59274268e-03]\n [-6.87120575e-03 4.37192107e-03]]\n\n [[ 9.50975996e-03 -2.92107230e-04]\n [ 1.48196449e-03 7.07345828e-03]\n [ 2.19744314e-02 2.36993413e-02]]]\n\n\n [[[ 6.68809807e-04 -6.93940907e-04]\n [ 9.50503349e-03 -7.03485508e-04]\n [-5.75809460e-03 -9.37276799e-03]]\n\n [[-2.11901916e-03 -5.34260087e-03]\n [ 1.56816025e-03 1.20470962e-02]\n [ 5.63606946e-03 8.59337952e-03]]\n\n [[ 1.08696409e-02 -5.95112285e-03]\n [ 4.50286269e-03 -7.30819360e-04]\n [ 2.61152759e-02 1.25875883e-02]]]]\n\n\n\n [[[[-7.69137312e-03 4.23175097e-03]\n [-1.45461207e-04 -4.36185906e-03]\n [-1.48592226e-03 -2.56016059e-03]]\n\n [[-2.48357002e-03 -2.48666178e-03]\n [ 2.70920387e-03 -2.24215770e-03]\n [-1.27253821e-02 5.70478151e-03]]\n\n [[ 2.25233682e-03 3.99925141e-03]\n [-7.31122354e-03 1.18088291e-03]\n [-1.12554501e-03 3.89434607e-03]]]\n\n\n [[[-1.29888458e-02 8.21385905e-03]\n [-2.61878176e-03 1.16171362e-03]\n [-1.17672677e-03 -3.26382159e-03]]\n\n [[-7.91432895e-03 -6.46301778e-03]\n [-1.19681796e-03 -7.62804551e-03]\n [-1.08108791e-02 1.39008379e-02]]\n\n [[ 5.58157545e-03 3.42470268e-03]\n [-7.87006412e-03 3.18167894e-03]\n [ 2.27409764e-03 1.61187574e-02]]]\n\n\n [[[-1.89032033e-02 9.54914186e-03]\n [-6.97181793e-03 -1.87073369e-03]\n [-3.88264051e-03 -5.01380488e-03]]\n\n [[-6.87855622e-03 -7.47603597e-03]\n [-3.35425162e-03 -5.10209100e-03]\n [-1.87467430e-02 1.38598746e-02]]\n\n [[ 8.98252241e-03 8.89526040e-04]\n [-9.18919127e-03 5.39723318e-03]\n [ 2.24055431e-04 1.65507551e-02]]]\n\n\n ...\n\n\n [[[-3.33210155e-02 1.20071205e-03]\n [-1.21534932e-02 9.13383497e-04]\n [-1.29250186e-02 -2.18477175e-02]]\n\n [[-1.90328322e-02 -1.80593897e-02]\n [ 5.95412031e-03 -1.46306055e-02]\n [-2.01106388e-02 2.62024458e-02]]\n\n [[ 2.13566702e-02 1.19336378e-02]\n [-3.81766213e-03 1.10357422e-02]\n [ 1.13237789e-02 3.58909033e-02]]]\n\n\n [[[-2.25995947e-02 2.26013176e-03]\n [ 2.26927549e-03 7.69008091e-03]\n [-4.81128087e-03 -2.57011801e-02]]\n\n [[-1.83194671e-02 -9.04522836e-03]\n [ 6.27699820e-03 -2.30863271e-03]\n [-6.10346394e-03 2.03896388e-02]]\n\n [[ 1.70985684e-02 3.13007110e-03]\n [ 1.81880046e-03 7.14986818e-03]\n [ 1.08315451e-02 3.01810913e-02]]]\n\n\n [[[-1.29130017e-02 6.54396042e-03]\n [-6.84446329e-03 1.70925562e-03]\n [-8.69707763e-03 -1.70858391e-02]]\n\n [[-6.43893285e-03 -1.94508061e-02]\n [ 1.32064037e-02 1.02554401e-02]\n [ 7.38768931e-03 2.35064644e-02]]\n\n [[ 1.53900767e-02 -4.03296435e-03]\n [ 1.83644351e-02 -2.48664105e-03]\n [ 2.43488997e-02 2.11429037e-02]]]]\n\n\n\n [[[[-9.62762255e-03 2.89606652e-03]\n [ 3.95760144e-04 -7.53399683e-03]\n [-3.15614301e-03 -2.98281759e-03]]\n\n [[-2.94682034e-03 -3.58614326e-03]\n [ 3.20832105e-03 -4.92783496e-04]\n [-1.63623206e-02 4.92657721e-03]]\n\n [[ 6.17178436e-03 3.27876606e-03]\n [-6.49497565e-03 7.85153452e-03]\n [ 7.23895966e-04 5.87956328e-03]]]\n\n\n [[[-1.06185768e-02 7.89275207e-03]\n [-2.58038216e-03 -3.11733852e-03]\n [ 8.62345332e-05 -4.39868215e-03]]\n\n [[-7.56120635e-03 -9.76627320e-03]\n [ 2.68224254e-03 -5.55595849e-03]\n [-1.68190487e-02 1.77232549e-02]]\n\n [[ 7.53744785e-03 2.81363004e-03]\n [-4.76972712e-03 3.29317991e-03]\n [ 5.70221152e-03 2.13303175e-02]]]\n\n\n [[[-1.52959237e-02 5.41231222e-03]\n [-7.11918296e-03 -1.00066746e-02]\n [-2.10959814e-03 -5.63672977e-03]]\n\n [[-8.87478702e-03 -1.01696942e-02]\n [ 1.68720318e-03 -2.66690738e-03]\n [-2.24715341e-02 1.70740467e-02]]\n\n [[ 1.33307446e-02 2.35758396e-03]\n [-4.48746979e-03 9.00909770e-03]\n [ 7.28163449e-03 2.55863965e-02]]]\n\n\n ...\n\n\n [[[-2.12199502e-02 -1.60812330e-03]\n [ 3.25857988e-03 -5.35294507e-03]\n [-1.80183281e-03 -1.25574153e-02]]\n\n [[-2.41887849e-02 -1.66260190e-02]\n [ 2.05682096e-04 -1.21605434e-02]\n [-1.61541477e-02 2.90137529e-02]]\n\n [[ 2.30195113e-02 6.54285681e-03]\n [-9.10358038e-04 4.92878864e-03]\n [ 2.18994804e-02 4.71209623e-02]]]\n\n\n [[[-8.55110958e-03 -3.82296578e-03]\n [ 5.54849021e-03 -3.11065558e-03]\n [ 1.41350916e-04 -2.76761502e-02]]\n\n [[-1.42434714e-02 -3.43692116e-03]\n [ 1.34843579e-02 4.09977278e-03]\n [-6.00693654e-03 2.85776351e-02]]\n\n [[ 1.70442834e-02 -4.66136495e-03]\n [ 4.82063042e-03 4.01599845e-03]\n [ 1.68819204e-02 3.21183801e-02]]]\n\n\n [[[-3.24491062e-03 -3.67121142e-03]\n [-1.04375081e-02 -6.57012546e-03]\n [-6.32735016e-03 -1.74015388e-02]]\n\n [[-4.14733589e-03 -2.30872538e-02]\n [ 1.42194415e-02 6.94341026e-03]\n [ 1.83455478e-02 2.39024349e-02]]\n\n [[ 1.89767499e-02 4.13991074e-05]\n [ 1.21089369e-02 -2.49830168e-03]\n [ 2.57454440e-02 2.19566096e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-3.20229959e-03 -3.88559129e-04]\n [ 2.02784641e-03 -5.58431027e-03]\n [-9.82669997e-04 -2.76012905e-03]]\n\n [[-5.52172191e-04 -2.04111286e-03]\n [ 3.10308114e-03 -8.68427800e-04]\n [-9.41752456e-03 1.78978976e-03]]\n\n [[ 4.43047989e-04 2.13724197e-04]\n [-1.04063633e-03 1.58345432e-03]\n [-2.85327144e-04 2.26207869e-03]]]\n\n\n [[[-2.99236807e-03 1.66597194e-03]\n [ 2.57912441e-03 -3.22736427e-03]\n [ 1.31360907e-03 -4.39041294e-03]]\n\n [[-5.27552293e-05 -5.02576632e-03]\n [ 3.92759079e-03 -2.59672129e-03]\n [-8.30940716e-03 7.11753871e-03]]\n\n [[ 1.83659233e-03 -2.40552844e-03]\n [ 6.12541218e-04 2.85840477e-04]\n [ 4.18481324e-03 8.56910832e-03]]]\n\n\n [[[-3.19335051e-03 -5.08837285e-04]\n [ 3.17763584e-03 -4.18145163e-03]\n [ 8.74270510e-04 -7.28968158e-03]]\n\n [[-1.78303674e-03 -4.29719128e-03]\n [ 4.12225444e-03 -1.79637771e-03]\n [-8.48502945e-03 5.74369403e-03]]\n\n [[ 2.91169551e-03 -2.15001474e-03]\n [-8.50127661e-04 1.31915417e-03]\n [ 4.56531532e-03 8.75160098e-03]]]\n\n\n ...\n\n\n [[[-5.30893030e-03 1.95346132e-04]\n [ 2.36292346e-03 -3.59778362e-03]\n [-1.79541984e-03 -4.25610878e-03]]\n\n [[-3.54265590e-04 -6.84512127e-03]\n [ 2.28909310e-03 -8.01329617e-04]\n [-7.31901452e-03 6.87898090e-03]]\n\n [[ 6.15255302e-03 6.48638408e-04]\n [ 3.98260134e-04 2.16259132e-03]\n [ 2.74850987e-03 8.47131759e-03]]]\n\n\n [[[-4.04858636e-03 5.04606229e-04]\n [ 4.88650426e-03 7.95730215e-04]\n [ 1.77084515e-03 -5.90064656e-03]]\n\n [[ 6.91075635e-04 -3.64363170e-03]\n [ 1.84754678e-03 -7.96860957e-04]\n [-2.77163694e-03 4.96330252e-03]]\n\n [[ 4.71501471e-03 -7.39852316e-04]\n [ 1.71070220e-04 1.15831615e-03]\n [ 3.00744991e-03 8.82100966e-03]]]\n\n\n [[[-2.45098071e-03 -2.13359878e-03]\n [-6.70056033e-04 1.57635775e-03]\n [-1.48716453e-03 -1.32989069e-03]]\n\n [[-1.16481585e-03 -5.76945161e-03]\n [ 8.27130396e-04 -1.37479539e-04]\n [ 2.58039450e-03 6.67358562e-03]]\n\n [[ 4.67883097e-03 1.27342041e-03]\n [ 1.52253825e-03 -2.59720720e-03]\n [ 7.05825631e-03 8.98365956e-03]]]]\n\n\n\n [[[[-1.75480964e-03 3.84268409e-04]\n [ 2.38190172e-03 -1.72787230e-03]\n [-1.10294600e-03 -2.97339843e-03]]\n\n [[-4.20314434e-04 -2.78871087e-03]\n [ 2.54390854e-03 7.20772150e-05]\n [-8.89236666e-03 2.22882279e-03]]\n\n [[ 7.96983251e-04 1.01217057e-03]\n [ 2.27023877e-04 2.55966396e-03]\n [-6.27140980e-04 1.96579099e-03]]]\n\n\n [[[-3.48793087e-03 1.70968263e-03]\n [ 3.51527776e-03 7.80487200e-04]\n [ 5.72605350e-04 -6.17667101e-03]]\n\n [[ 8.04200012e-04 -5.12244506e-03]\n [ 2.05247174e-03 3.73472896e-04]\n [-1.02642570e-02 5.19074313e-03]]\n\n [[ 2.43917969e-03 8.27631622e-04]\n [ 2.94024637e-03 -1.61989499e-03]\n [ 2.69137090e-03 6.15702942e-03]]]\n\n\n [[[-3.58730066e-03 1.85339106e-03]\n [ 3.01152281e-03 -1.11278309e-03]\n [-3.36979225e-04 -8.42281524e-03]]\n\n [[-1.91559771e-03 -4.66355216e-03]\n [ 1.43221579e-03 2.35720025e-03]\n [-1.08309211e-02 4.08017728e-03]]\n\n [[ 5.10602165e-03 2.61326437e-03]\n [ 2.98571493e-03 -1.88574812e-03]\n [ 1.24224089e-03 5.22893108e-03]]]\n\n\n ...\n\n\n [[[-4.21247864e-03 1.12247735e-03]\n [ 3.30725405e-03 4.44892503e-04]\n [ 6.08293747e-04 -6.11167448e-03]]\n\n [[-8.66383314e-04 -4.74880869e-03]\n [ 1.04543089e-03 5.74762351e-04]\n [-9.58519615e-03 5.36640827e-03]]\n\n [[ 5.02647134e-03 -2.30708480e-04]\n [ 3.48675833e-03 -4.64910299e-05]\n [ 3.78115871e-03 4.12490591e-03]]]\n\n\n [[[-4.47360054e-03 1.16830121e-03]\n [ 1.79696677e-03 1.39143493e-03]\n [ 9.24623280e-04 -5.02460124e-03]]\n\n [[ 1.54165539e-03 -1.52390532e-03]\n [ 3.75278265e-04 1.72503205e-04]\n [-6.91528060e-03 2.89732590e-03]]\n\n [[ 7.54096126e-03 1.83547207e-03]\n [ 1.84250483e-03 2.42651533e-03]\n [-6.50854316e-04 5.49392868e-03]]]\n\n\n [[[-3.43847903e-03 -3.22623085e-03]\n [ 2.82164197e-03 2.14111013e-03]\n [ 1.00792001e-03 -2.46364949e-03]]\n\n [[ 1.97388767e-03 -3.93737573e-03]\n [ 5.17548106e-05 -4.55718284e-04]\n [-9.10198432e-04 5.37772942e-03]]\n\n [[ 2.73754960e-03 1.17557640e-04]\n [ 4.78979573e-03 -6.69028377e-05]\n [ 6.00698311e-03 7.30711780e-03]]]]\n\n\n\n [[[[-3.91723309e-03 1.27972197e-03]\n [ 2.48081773e-03 -9.45521635e-04]\n [-1.71293167e-03 -2.79670814e-03]]\n\n [[ 1.84583687e-03 -1.58122485e-03]\n [ 7.00984208e-04 2.35647240e-05]\n [-4.40754835e-03 3.06753325e-03]]\n\n [[ 3.15104122e-03 4.31314984e-05]\n [ 2.27813865e-03 1.02565892e-03]\n [-4.68783459e-04 1.81676727e-03]]]\n\n\n [[[-4.24048398e-03 1.45440456e-03]\n [ 2.28931126e-03 7.55374553e-04]\n [-6.58679113e-04 -4.89762798e-03]]\n\n [[ 3.49789485e-03 -2.75810668e-03]\n [ 1.11373514e-03 -1.69699488e-03]\n [-4.84598428e-03 6.05975697e-03]]\n\n [[ 4.85323276e-03 -1.81206001e-03]\n [ 5.62975742e-03 -1.30978681e-03]\n [ 2.68837158e-03 5.30672353e-03]]]\n\n\n [[[-3.63937719e-03 6.46750152e-04]\n [ 2.63103005e-03 -7.47994563e-05]\n [ 4.80850576e-04 -5.71826333e-03]]\n\n [[ 2.51103961e-03 -3.92788509e-03]\n [-4.78499278e-04 -1.22813846e-03]\n [-6.26887241e-03 5.43808145e-03]]\n\n [[ 6.75018132e-03 -2.42702756e-03]\n [ 3.91172431e-03 -2.61909747e-03]\n [ 3.68035911e-03 7.25283939e-03]]]\n\n\n ...\n\n\n [[[-2.96315248e-03 1.98423932e-03]\n [ 4.21514781e-03 1.56231021e-04]\n [ 1.24066032e-03 -3.38638970e-03]]\n\n [[ 2.16134265e-03 -2.87081301e-03]\n [-1.20604935e-03 -1.09048898e-03]\n [-5.42901363e-03 2.55046180e-03]]\n\n [[ 4.90931980e-03 -2.00757338e-03]\n [ 4.16362518e-03 -1.59029372e-03]\n [ 2.92395381e-03 3.61586455e-03]]]\n\n\n [[[-3.20870290e-03 8.89836752e-04]\n [ 1.13965711e-03 4.34709596e-04]\n [ 7.32294109e-04 -4.00539208e-03]]\n\n [[ 3.06769740e-03 -3.59967118e-03]\n [-3.75522580e-03 -2.70782853e-03]\n [-4.12417157e-03 1.57517451e-03]]\n\n [[ 6.58123242e-03 1.97484437e-03]\n [ 1.78266549e-03 7.06138555e-04]\n [ 1.08435645e-03 3.11800465e-03]]]\n\n\n [[[-2.15911423e-03 -2.71307537e-03]\n [-8.23390670e-04 -7.73921667e-04]\n [ 1.95488869e-03 -2.25435849e-03]]\n\n [[ 3.48386238e-04 -3.25017655e-03]\n [-1.79816771e-03 -7.23917561e-04]\n [-5.98820159e-04 1.58468960e-03]]\n\n [[ 3.36571853e-03 -1.77584798e-03]\n [ 2.88509415e-03 -1.37366191e-03]\n [ 4.74538561e-03 5.82838850e-03]]]]]\n\n\n\n\n [[[[[-1.53750181e-02 9.40922066e-04]\n [ 3.64501099e-03 -1.19025102e-02]\n [-4.63781226e-03 -8.21719645e-04]]\n\n [[-2.24069529e-03 -5.90546569e-03]\n [ 2.36812769e-03 -5.08207036e-03]\n [-1.85618568e-02 3.98192601e-03]]\n\n [[ 6.94956537e-03 1.39384042e-03]\n [-7.89111108e-03 1.18257748e-02]\n [ 1.36555061e-02 1.11199291e-02]]]\n\n\n [[[-1.89096313e-02 -1.71150532e-04]\n [ 6.95416192e-03 -6.35372661e-03]\n [-7.14767165e-03 -2.88788253e-03]]\n\n [[-1.00323670e-02 -1.15552889e-02]\n [-7.68519589e-04 -1.57992623e-03]\n [-1.55832591e-02 1.42244725e-02]]\n\n [[ 1.76347345e-02 2.33142497e-03]\n [-4.21783002e-03 1.77903473e-02]\n [ 3.06488052e-02 3.12676914e-02]]]\n\n\n [[[-2.10641231e-02 -4.24629962e-03]\n [ 1.18579855e-02 -1.41688846e-02]\n [-1.24934511e-02 -4.41726344e-03]]\n\n [[-1.18592959e-02 -1.42358877e-02]\n [ 3.73428897e-03 3.32157291e-03]\n [-1.69510785e-02 8.52151029e-03]]\n\n [[ 1.78403836e-02 5.75860171e-03]\n [-7.16672093e-03 1.64255146e-02]\n [ 3.19389477e-02 3.33693884e-02]]]\n\n\n ...\n\n\n [[[-2.12768000e-02 -5.96236205e-03]\n [ 1.33925872e-02 -1.32889831e-02]\n [-7.75507698e-03 -6.84919115e-03]]\n\n [[-1.57070365e-02 -1.44462129e-02]\n [ 1.10737411e-02 3.99145856e-03]\n [-1.46615980e-02 1.19588589e-02]]\n\n [[ 1.38552012e-02 3.60278436e-03]\n [ 1.53237907e-03 1.26477657e-02]\n [ 2.99781282e-02 2.98248120e-02]]]\n\n\n [[[-1.49780670e-02 -4.13630949e-03]\n [ 1.69328749e-02 -1.20305468e-03]\n [-8.59279837e-03 -6.03693118e-03]]\n\n [[-1.03666214e-02 -1.32904239e-02]\n [ 8.85595568e-03 1.92482397e-03]\n [-1.25257028e-02 6.39171666e-03]]\n\n [[ 8.79860763e-03 -8.04978365e-04]\n [ 5.02297655e-03 4.54507768e-03]\n [ 2.78294925e-02 2.12679412e-02]]]\n\n\n [[[-5.18138614e-03 2.82411533e-03]\n [ 8.71347357e-03 2.20625382e-03]\n [-6.49579102e-03 -9.62460972e-03]]\n\n [[-1.10231678e-03 -1.93407410e-03]\n [-4.34896356e-04 1.26340725e-02]\n [ 7.33369961e-04 1.21399714e-02]]\n\n [[ 1.11899152e-02 -5.24419546e-03]\n [ 4.93008317e-03 -3.33077600e-03]\n [ 2.58441158e-02 1.07835187e-02]]]]\n\n\n\n [[[[-1.93474982e-02 1.02459770e-02]\n [-1.69366156e-03 -1.15113016e-02]\n [-3.31432582e-03 -7.48363789e-03]]\n\n [[-7.02443486e-03 -6.55865623e-03]\n [ 9.92697943e-03 -6.57782704e-03]\n [-4.03980389e-02 1.35091068e-02]]\n\n [[ 4.22830414e-03 9.49423108e-03]\n [-1.76196080e-02 3.96199059e-03]\n [-4.14563296e-03 6.12249086e-03]]]\n\n\n [[[-2.61692703e-02 2.56257690e-02]\n [-1.25880039e-03 9.67182592e-03]\n [-8.60402826e-03 -1.02417711e-02]]\n\n [[-1.65655650e-02 -1.85640957e-02]\n [ 5.85524307e-04 -1.70385391e-02]\n [-2.66753137e-02 3.45677435e-02]]\n\n [[ 8.88649095e-03 1.38820373e-02]\n [-1.97298359e-02 8.13054293e-03]\n [ 8.91359616e-03 3.53906229e-02]]]\n\n\n [[[-3.50403301e-02 1.91597082e-02]\n [-8.78176838e-03 4.97476151e-03]\n [-1.68595556e-02 -1.83582008e-02]]\n\n [[-1.76907275e-02 -1.82057042e-02]\n [-1.56579469e-03 -9.16632265e-03]\n [-3.28135937e-02 3.05089522e-02]]\n\n [[ 1.67931411e-02 1.08072795e-02]\n [-1.35101881e-02 1.60040800e-02]\n [ 7.93663599e-03 3.56858633e-02]]]\n\n\n ...\n\n\n [[[-4.14171740e-02 7.47532165e-03]\n [-1.13672512e-02 1.82834768e-03]\n [-1.46195712e-02 -2.47686468e-02]]\n\n [[-1.94125790e-02 -2.65566129e-02]\n [ 5.68199297e-03 -9.13183670e-03]\n [-2.34898720e-02 2.92226616e-02]]\n\n [[ 2.66109034e-02 6.20274758e-03]\n [ 2.29991600e-03 1.86626837e-02]\n [ 8.89535714e-03 3.91943417e-02]]]\n\n\n [[[-2.77563389e-02 4.41104826e-03]\n [-9.05859808e-04 1.00337723e-02]\n [-6.81825215e-03 -2.89303288e-02]]\n\n [[-1.85857918e-02 -1.40428767e-02]\n [ 1.24194687e-02 -2.35102815e-03]\n [-1.47096254e-02 1.96794756e-02]]\n\n [[ 1.82375535e-02 5.26842014e-05]\n [ 8.11222661e-03 1.04783112e-02]\n [ 1.66769307e-02 3.29568014e-02]]]\n\n\n [[[-1.51589625e-02 9.55061149e-03]\n [-7.95675348e-03 8.41595605e-03]\n [-1.05357347e-02 -1.55887967e-02]]\n\n [[-5.88141149e-03 -2.20857654e-02]\n [ 1.10477405e-02 1.04907127e-02]\n [ 9.97497328e-03 2.40196977e-02]]\n\n [[ 1.92048568e-02 -2.77805724e-03]\n [ 1.88355483e-02 -4.65121819e-03]\n [ 2.68564858e-02 1.88357830e-02]]]]\n\n\n\n [[[[-2.07063146e-02 7.08698900e-03]\n [ 5.37135918e-03 -2.24937592e-02]\n [-1.25988582e-02 -6.55870652e-03]]\n\n [[-9.36495047e-03 -1.19790006e-02]\n [ 9.66999680e-03 -1.35263894e-03]\n [-5.06397225e-02 1.30890096e-02]]\n\n [[ 1.06920265e-02 9.64844041e-03]\n [-1.58307832e-02 1.37178926e-02]\n [-5.75930951e-03 8.33124109e-03]]]\n\n\n [[[-2.39740983e-02 1.42789986e-02]\n [ 6.26898091e-03 -4.78884811e-03]\n [-9.69509315e-03 -1.34536801e-02]]\n\n [[-1.90781411e-02 -2.73995847e-02]\n [ 5.32592041e-03 -8.29144195e-03]\n [-3.99017334e-02 4.14463095e-02]]\n\n [[ 1.83142982e-02 1.09557379e-02]\n [-1.05909361e-02 4.66128672e-03]\n [ 1.24853859e-02 4.57866564e-02]]]\n\n\n [[[-2.50626579e-02 3.26982373e-03]\n [ 3.06643778e-03 -1.17067005e-02]\n [-1.08193597e-02 -2.07371283e-02]]\n\n [[-2.35616975e-02 -2.65543517e-02]\n [ 6.38440764e-03 -2.76591885e-03]\n [-3.94615233e-02 3.62195000e-02]]\n\n [[ 2.84092259e-02 7.75048137e-03]\n [-6.56297198e-03 1.04711261e-02]\n [ 1.75483823e-02 5.11799194e-02]]]\n\n\n ...\n\n\n [[[-2.67752819e-02 -2.04508519e-03]\n [ 2.99656298e-03 -4.97365929e-03]\n [-1.86623028e-03 -2.53743157e-02]]\n\n [[-2.60454286e-02 -2.94761788e-02]\n [ 8.27796571e-03 -8.35226383e-03]\n [-1.93332508e-02 3.63079980e-02]]\n\n [[ 2.89514903e-02 4.45771357e-03]\n [ 6.36133831e-04 7.29406578e-03]\n [ 2.69841459e-02 5.17754592e-02]]]\n\n\n [[[-1.87914278e-02 -3.19216843e-03]\n [ 1.24909822e-02 6.05374435e-03]\n [ 1.04888249e-03 -2.72501465e-02]]\n\n [[-2.00879816e-02 -1.18861301e-02]\n [ 1.06373401e-02 1.93211972e-03]\n [-1.14550870e-02 2.42152661e-02]]\n\n [[ 2.18381304e-02 2.94058089e-04]\n [ 1.80147588e-03 6.78023370e-03]\n [ 1.68048665e-02 3.71682383e-02]]]\n\n\n [[[-9.07776505e-03 9.85248480e-04]\n [-3.51757370e-03 6.07927935e-03]\n [-6.68749586e-03 -1.50054442e-02]]\n\n [[-5.95995318e-03 -2.73994952e-02]\n [ 8.73213075e-03 6.73721591e-03]\n [ 1.77278090e-02 2.61795614e-02]]\n\n [[ 1.82898864e-02 -4.16271295e-03]\n [ 1.55394431e-02 -7.11782742e-03]\n [ 3.31355780e-02 2.68225651e-02]]]]\n\n\n\n ...\n\n\n\n [[[[-1.49600962e-02 3.19656008e-03]\n [ 8.44460353e-03 -2.46624760e-02]\n [-1.05819916e-02 -1.76087972e-02]]\n\n [[-4.08028625e-03 -1.26725622e-02]\n [ 1.50255747e-02 3.85275250e-03]\n [-5.19160815e-02 1.04224123e-02]]\n\n [[ 8.11118633e-03 5.95294172e-03]\n [-9.36823618e-03 9.03633144e-03]\n [-2.68756482e-03 8.97112861e-03]]]\n\n\n [[[-1.70993805e-02 7.42273219e-03]\n [ 1.49398856e-02 -1.84350032e-02]\n [ 4.69154399e-03 -2.84985844e-02]]\n\n [[ 8.42096284e-03 -2.47708373e-02]\n [ 1.75158419e-02 -6.89977407e-03]\n [-4.29658331e-02 3.39980088e-02]]\n\n [[ 8.49191472e-03 -4.96297562e-03]\n [ 6.66390453e-03 -5.20790112e-04]\n [ 1.63304117e-02 4.66919467e-02]]]\n\n\n [[[-1.57411639e-02 -1.44028687e-03]\n [ 1.60485506e-02 -2.44774874e-02]\n [ 9.14088276e-04 -3.68629955e-02]]\n\n [[ 3.23815650e-04 -2.02309489e-02]\n [ 1.45694092e-02 -3.55512090e-03]\n [-4.27590422e-02 3.06507479e-02]]\n\n [[ 2.26704925e-02 -1.05953694e-03]\n [ 5.71192882e-04 6.60846196e-03]\n [ 1.64360553e-02 5.21636233e-02]]]\n\n\n ...\n\n\n [[[-1.45464726e-02 -7.96685554e-03]\n [ 4.92438814e-03 -6.52483106e-03]\n [ 7.02392263e-03 -4.42422070e-02]]\n\n [[-9.35207587e-03 -3.22839208e-02]\n [ 1.17217600e-02 -4.49595414e-03]\n [-3.29275839e-02 3.04118264e-02]]\n\n [[ 3.47472057e-02 7.14307697e-03]\n [ 4.16203169e-03 6.02355227e-03]\n [ 2.89518181e-02 6.60742968e-02]]]\n\n\n [[[-1.75324343e-02 -1.42637733e-02]\n [ 7.45605957e-03 1.99099211e-03]\n [ 1.00754965e-02 -3.79447788e-02]]\n\n [[-4.34252387e-03 -2.07780283e-02]\n [ 1.37069421e-02 -5.18729584e-03]\n [-2.44281515e-02 1.82690062e-02]]\n\n [[ 2.49811411e-02 -1.39510958e-03]\n [ 2.08577560e-03 3.24957189e-03]\n [ 1.07304351e-02 4.96405810e-02]]]\n\n\n [[[-1.25213927e-02 -1.60803590e-02]\n [-4.61651245e-03 7.84838293e-03]\n [-3.79900914e-04 -8.28326680e-03]]\n\n [[-5.80866681e-03 -2.73452327e-02]\n [ 4.95266216e-03 -4.11567558e-03]\n [ 1.44992862e-02 2.18479689e-02]]\n\n [[ 1.77695937e-02 1.90989382e-03]\n [ 1.27346581e-02 -8.75510462e-03]\n [ 2.78749447e-02 3.88084613e-02]]]]\n\n\n\n [[[[-2.82425759e-03 4.00491385e-03]\n [ 1.32013746e-02 -8.64593219e-03]\n [-5.92811545e-03 -2.00767405e-02]]\n\n [[-4.02699923e-03 -1.29730366e-02]\n [ 9.02255159e-03 4.67879744e-03]\n [-4.02544886e-02 8.78270064e-03]]\n\n [[ 2.82844668e-03 4.82054893e-03]\n [ 1.03899685e-03 8.28687008e-03]\n [ 1.96289551e-03 6.91238139e-03]]]\n\n\n [[[-1.01911593e-02 9.36161168e-03]\n [ 1.67460758e-02 -1.69591513e-04]\n [ 6.08023675e-03 -3.03327776e-02]]\n\n [[ 9.75110754e-03 -2.15658825e-02]\n [ 7.48731475e-03 3.28705646e-04]\n [-4.93368395e-02 2.85891574e-02]]\n\n [[ 8.42048414e-03 3.65450233e-03]\n [ 1.85435284e-02 -7.25111132e-03]\n [ 1.58640128e-02 2.31470503e-02]]]\n\n\n [[[-8.35425872e-03 5.69575233e-03]\n [ 1.56704634e-02 -7.08290609e-03]\n [ 5.50776441e-03 -4.15312871e-02]]\n\n [[-2.71606259e-03 -2.10948084e-02]\n [ 7.25898845e-03 5.67539735e-03]\n [-5.08718863e-02 2.26676632e-02]]\n\n [[ 2.23615374e-02 1.36515619e-02]\n [ 1.79942288e-02 -7.33612897e-03]\n [ 1.42511809e-02 2.61859521e-02]]]\n\n\n ...\n\n\n [[[-6.82264799e-03 -5.87180490e-03]\n [ 3.98616400e-03 8.74721445e-03]\n [ 1.10214576e-02 -4.22889143e-02]]\n\n [[-5.19770989e-03 -1.81042347e-02]\n [ 6.53793709e-03 -5.55366860e-04]\n [-4.30747606e-02 2.04404164e-02]]\n\n [[ 2.80187894e-02 1.39578516e-02]\n [ 2.53188908e-02 -3.22477252e-04]\n [ 2.58307941e-02 3.72119062e-02]]]\n\n\n [[[-1.36122555e-02 -9.00375191e-03]\n [ 2.04813201e-03 8.90389178e-03]\n [ 6.57739677e-03 -3.51767689e-02]]\n\n [[ 4.23872657e-03 -8.47871508e-03]\n [ 7.57753756e-03 1.31954745e-04]\n [-2.37438791e-02 1.20772934e-02]]\n\n [[ 2.86425222e-02 1.13215996e-02]\n [ 1.02611212e-02 8.64109118e-03]\n [ 2.05893116e-03 2.91149188e-02]]]\n\n\n [[[-1.13385459e-02 -1.25593022e-02]\n [ 3.02282069e-03 7.88317807e-03]\n [ 1.18356049e-02 -1.21148620e-02]]\n\n [[ 4.88908682e-03 -1.68232117e-02]\n [ 2.43704394e-03 5.93177334e-04]\n [-7.49639003e-04 1.65972449e-02]]\n\n [[ 1.69286747e-02 -1.85606259e-04]\n [ 2.26787832e-02 1.44987123e-03]\n [ 2.34193783e-02 3.15570273e-02]]]]\n\n\n\n [[[[-1.46000069e-02 6.18311064e-03]\n [ 1.32147660e-02 -4.83794184e-03]\n [-5.32265613e-03 -1.35112023e-02]]\n\n [[ 6.86710654e-03 -5.56576205e-03]\n [ 3.06461542e-03 -1.36581145e-03]\n [-1.69026889e-02 1.14603536e-02]]\n\n [[ 1.13923410e-02 1.76664745e-03]\n [ 8.74015875e-03 3.94477928e-03]\n [ 8.63718509e-04 4.96460311e-03]]]\n\n\n [[[-1.83844920e-02 4.07628249e-03]\n [ 1.11581795e-02 1.18049771e-04]\n [ 5.34786959e-04 -2.41546948e-02]]\n\n [[ 1.70674697e-02 -7.48197408e-03]\n [ 3.75809032e-04 -1.11345230e-02]\n [-2.04408355e-02 2.24853586e-02]]\n\n [[ 1.87395439e-02 -7.90352002e-03]\n [ 2.61949189e-02 -7.80770136e-03]\n [ 1.36814853e-02 2.13346351e-02]]]\n\n\n [[[-1.95560735e-02 2.79202941e-04]\n [ 1.44627085e-02 -1.12368097e-03]\n [ 7.00262422e-03 -2.88842842e-02]]\n\n [[ 1.60208177e-02 -1.03835482e-02]\n [-8.77358206e-03 -1.29765412e-02]\n [-2.78305486e-02 1.89367700e-02]]\n\n [[ 3.10825557e-02 -8.61853547e-03]\n [ 2.22487822e-02 -1.20214950e-02]\n [ 1.40974736e-02 3.04632057e-02]]]\n\n\n ...\n\n\n [[[-1.23151606e-02 -8.49189702e-03]\n [ 5.22340555e-03 4.20831563e-03]\n [ 1.80442762e-02 -2.90494245e-02]]\n\n [[ 7.80198909e-03 -1.70872994e-02]\n [-9.85494722e-03 -1.25443032e-02]\n [-1.97778400e-02 6.91377046e-03]]\n\n [[ 2.52749566e-02 -8.92812945e-03]\n [ 1.94331370e-02 -1.23055950e-02]\n [ 2.14687306e-02 2.91569289e-02]]]\n\n\n [[[-1.19456891e-02 -6.53994037e-03]\n [-5.70362294e-03 1.24786887e-03]\n [ 9.66076832e-03 -2.99686901e-02]]\n\n [[ 5.78475511e-03 -1.80214439e-02]\n [-1.26437740e-02 -1.38829397e-02]\n [-7.93141685e-03 5.32131642e-03]]\n\n [[ 2.35354993e-02 3.36971739e-03]\n [ 9.15806554e-03 -3.76632554e-03]\n [ 7.92465545e-03 2.47026589e-02]]]\n\n\n [[[-8.33915081e-03 -8.58359318e-03]\n [-8.85175914e-03 2.91040307e-03]\n [ 4.98987455e-03 -1.11016743e-02]]\n\n [[ 1.90004113e-03 -1.45194372e-02]\n [-5.35708247e-03 -4.02483623e-03]\n [ 2.02356279e-03 2.94978451e-03]]\n\n [[ 1.22881401e-02 -6.39952626e-03]\n [ 1.00862337e-02 -8.75836518e-03]\n [ 1.70228910e-02 2.28097960e-02]]]]]]\n" ], [ "sess=tf.Session()\nprint(pred_box)\n\nyteval=now_y_true.eval(session=sess)[...,:5]\nprint(yteval[yteval>1])\nc=yteval[np.sum(yteval[...,:5],axis=5)>0]\nprint(c)\nxylosssess=xy_loss.eval(session=sess)\n#object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n#omeval=object_mask.eval(session=sess)\n#om=omeval[omeval>0]\n#print(om)\nblseval=(object_mask * box_loss_scale).eval(session=sess)\nbls=blseval[blseval<0]\nprint(bls)\nbceeval=(object_mask * box_loss_scale * 0.5 * K.square(raw_true_xy-raw_pred[...,0:2])).eval(session=sess)\nrtxy=raw_true_xy.eval(session=sess)\nrped=raw_pred.eval(session=sess)[...,0:2]\nprint([np.shape(rtxy),np.shape(rped),np.shape(bceeval<0)])\nprint(['rtxy: ',rtxy[bceeval<0]])\nprint(rped[bceeval<0])\n\nprint(bceeval[bceeval<0])\nprint(xylosssess)\n#print(['pred xy :',pred_xy.eval(session=sess)[0,...,1,2,:2]])\n#print(['pred wh :',pred_wh.eval(session=sess)[0,...,1,2,:2]])\n#print(tf.boolean_mask(now_y_true[0,...,0:4],object_mask_bool[0,...,0]))\n\nprint(['now y true :',now_y_true.eval(session=sess)[0,...,2,2,0:4]])\nprint(object_mask_bool[0,...,0])\nprint(grid_shapes[l][::-1])\nprint(grid.eval(session=sess)[51][51])\n#print(raw_true_xy.eval(session=sess))\n'''\nprint(now_y_true[0,...,0:4])\nprint(object_mask_bool[0,...,0])\nprint(K.repeat_elements(grid,num_classes,axis=-1))\nprint(K.reshape(K.repeat_elements(grid,num_classes,axis=-1),[-1,grid_shapes[l][0],grid_shapes[l][1],1,num_classes,2]))\nprint(now_y_true)\nprint(now_y_true[..., :2]*grid_shapes[l][::-1])\nprint(K.cast(object_mask,dtype='bool'))\nprint('raw true xy: ',raw_true_xy)\nprint('raw pred 0:2: ',raw_pred[...,0:2])\n'''", "Tensor(\"concat_34:0\", shape=(32, 52, 52, 3, 3, 4), dtype=float32)\n[2.5913463 1.6971154 2.9158654 1.6971154 2.9855769 1.6947116 2.8413463\n 1.7259616 1.9447116 1.2764423 2.8557692 2.7451923 2.7860577 2.7548077\n 2.8581731 2.78125 1.1826923 2.7956731 1.4447116 2.7980769 1.8173077\n 2.7956731 2.3653846 1.03125 1.46875 3.7620192 1.2884616 1.2956731\n 1.3942307 1.4543269 1.3725961 1.2908654 1.9975961 1.1923077 3.2355769\n 1.6274039 3.2403846 1.4783654 1.3581731 1.3221154 1.2908654 1.3125\n 1.2692307 1.25 1.2019231 1.2211539 1.3245193 1.4302884 1.3341346\n 1.0961539 1.71875 1.0600961 2.6129808 1.21875 2.6009614 2.3894231\n 1.6225961]\n[[0.71875 0.6298077 0.05048077 0.13461539 1. ]\n [0.71875 0.61538464 0.04567308 0.10336538 1. ]\n [0.7235577 0.5793269 0.01923077 0.03125 1. ]\n ...\n [0.56490386 0.5168269 0.03125 0.1201923 1. ]\n [0.56490386 0.5168269 0.03125 0.1201923 1. ]\n [0.56490386 0.46634614 0.01201923 0.01682692 1. ]]\n[]\n[(32, 52, 52, 3, 3, 2), (32, 52, 52, 3, 3, 2), (32, 52, 52, 3, 3, 2)]\n['rtxy: ', array([], dtype=float32)]\n[]\n[]\n-2568115.2\n['now y true :', array([[[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n\n [[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n\n [[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n\n ...,\n\n [[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n\n [[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n\n [[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n ...,\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]]], dtype=float32)]\nTensor(\"strided_slice_295:0\", shape=(52, 52, 3, 3), dtype=bool)\nTensor(\"strided_slice_296:0\", shape=(2,), dtype=float32)\n[[51. 51.]]\n" ], [ "sess=tf.Session()\nignore_thresh=.5 \nfor y_true in data_generator_wrapper_pairs(lines[:num_train], 32, input_shape, anchors, num_classes):\n y_true_nonimg=y_true[0][1:4]\n try:\n print(yolo_outputs.shape)\n print('ok')\n except:\n yolo_outputs=y_true_nonimg\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n m=32\n mf=K.cast(m,'float32')\n input_shape = K.cast(input_shape,'float32')\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], 'float32') for l in range(num_layers)]\n loss = 0\n for l in range(num_layers):\n num_anchors = len(anchors[anchor_mask[l]])\n now_y_true = y_true_nonimg[l]\n now_y_true = K.cast(now_y_true,'float32')\n now_y_true = K.reshape(now_y_true,\n [-1,grid_shapes[l][0],grid_shapes[l][1],num_anchors,num_classes,5])\n object_mask = now_y_true[..., 4:5]\n grid, raw_pred, pred_xy, pred_wh = yolo_head_pairs(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n #grid_rs=K.reshape(K.repeat_elements(grid,num_classes,axis=-1),[-1,grid_shapes[l][0],grid_shapes[l][1],1,num_classes,2])\n pred_box = K.concatenate([pred_xy, pred_wh])\n raw_true_xy = now_y_true[..., :2]#*grid_shapes[l][::-1]#- grid_rs\n raw_true_wh = K.log(now_y_true[..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - now_y_true[...,2:3]*now_y_true[...,3:4]\n \n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(('float32'), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(now_y_true[b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, 'float32'))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n #class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n #class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss #+ class_loss\n \n print(loss)\n print('next step\\n')\n if np.sum(now_y_true.eval(session=sess).flatten(),axis=0)>=0:\n print(np.sum(now_y_true.eval(session=sess).flatten(),axis=0))\n break\n", "_____no_output_____" ], [ "dataL=lines[:num_train]\ndatanp=[]\nfor b in range(5):\n datanow=dataL[b]\n line=datanow.split()\n #print(line)\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n datanp.append(box)\ndata_master=[]\ndata_slave=[]\ndatanp=np.array(box)\nprint(datanp)\n", "[[-16 228 92 555 0]\n [ 0 228 92 533 1]\n [ 18 232 58 281 2]\n [ 3 241 96 486 0]\n [ 3 241 96 486 1]\n [ 30 249 58 277 2]\n [154 212 288 550 0]\n [154 212 288 533 1]\n [195 215 243 266 2]\n [ 76 250 108 318 0]\n [ 76 250 108 318 1]\n [ 92 253 99 262 2]\n [132 259 163 342 0]\n [132 259 163 342 1]\n [140 261 150 274 2]\n [285 242 374 495 0]\n [285 242 374 495 1]\n [313 244 343 282 2]\n [352 224 468 509 0]\n [356 228 463 504 1]\n [391 229 425 272 2]\n [339 227 451 489 0]\n [356 227 452 491 1]\n [373 228 400 261 2]\n [441 232 515 485 0]\n [452 233 510 473 1]\n [459 234 488 272 2]\n [483 252 554 476 0]\n [497 256 553 465 1]\n [506 258 536 292 2]\n [543 254 627 484 0]\n [543 254 627 484 1]\n [576 257 605 291 2]\n [600 256 670 461 0]\n [616 259 654 304 1]\n [631 260 653 294 2]\n [626 215 712 466 0]\n [630 218 712 300 1]\n [651 220 684 263 2]\n [158 269 199 350 0]\n [158 269 187 349 1]\n [168 271 180 286 2]\n [142 239 163 290 0]\n [144 239 161 278 1]\n [150 241 156 248 2]]\n" ], [ "data_slave=[]\ndata_master=[]\nfor ind_datanp in range(len(datanp[...,4])):\n if datanp[...,4][ind_datanp] in slaveclass:\n data_slave.append(datanp[ind_datanp])\n else:\n data_master.append(datanp[ind_datanp])\n\nprint(data_slave)\nprint('\\n')\nprint(data_master)\n", "[array([ 18, 232, 58, 281, 2]), array([ 30, 249, 58, 277, 2]), array([195, 215, 243, 266, 2]), array([ 92, 253, 99, 262, 2]), array([140, 261, 150, 274, 2]), array([313, 244, 343, 282, 2]), array([391, 229, 425, 272, 2]), array([373, 228, 400, 261, 2]), array([459, 234, 488, 272, 2]), array([506, 258, 536, 292, 2]), array([576, 257, 605, 291, 2]), array([631, 260, 653, 294, 2]), array([651, 220, 684, 263, 2]), array([168, 271, 180, 286, 2]), array([150, 241, 156, 248, 2])]\n\n\n[array([-16, 228, 92, 555, 0]), array([ 0, 228, 92, 533, 1]), array([ 3, 241, 96, 486, 0]), array([ 3, 241, 96, 486, 1]), array([154, 212, 288, 550, 0]), array([154, 212, 288, 533, 1]), array([ 76, 250, 108, 318, 0]), array([ 76, 250, 108, 318, 1]), array([132, 259, 163, 342, 0]), array([132, 259, 163, 342, 1]), array([285, 242, 374, 495, 0]), array([285, 242, 374, 495, 1]), array([352, 224, 468, 509, 0]), array([356, 228, 463, 504, 1]), array([339, 227, 451, 489, 0]), array([356, 227, 452, 491, 1]), array([441, 232, 515, 485, 0]), array([452, 233, 510, 473, 1]), array([483, 252, 554, 476, 0]), array([497, 256, 553, 465, 1]), array([543, 254, 627, 484, 0]), array([543, 254, 627, 484, 1]), array([600, 256, 670, 461, 0]), array([616, 259, 654, 304, 1]), array([626, 215, 712, 466, 0]), array([630, 218, 712, 300, 1]), array([158, 269, 199, 350, 0]), array([158, 269, 187, 349, 1]), array([142, 239, 163, 290, 0]), array([144, 239, 161, 278, 1])]\n" ], [ "eee=np.array(datanp, dtype='float32')", "_____no_output_____" ], [ "eee.shape", "_____no_output_____" ], [ "ddd=K.cast([1,2,3,4],'float32')\nprint([ddd]+[ddd]+[ddd]+[ddd])\nprint(K.reshape([ddd]+[ddd]+[ddd]+[ddd],[-1,4]))\nprint(ddd)", "[<tf.Tensor 'Cast_14:0' shape=(4,) dtype=float32>, <tf.Tensor 'Cast_14:0' shape=(4,) dtype=float32>, <tf.Tensor 'Cast_14:0' shape=(4,) dtype=float32>, <tf.Tensor 'Cast_14:0' shape=(4,) dtype=float32>]\nTensor(\"Reshape_10:0\", shape=(4, 4), dtype=float32)\nTensor(\"Cast_14:0\", shape=(4,), dtype=float32)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d357a56d2e0a4a03ae253ccab94919148222a5
26,324
ipynb
Jupyter Notebook
tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb
QS-L-1992/tensorflow
4bd51c0e182715bf94a34bd51b4f89dd5cf46163
[ "Apache-2.0" ]
null
null
null
tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb
QS-L-1992/tensorflow
4bd51c0e182715bf94a34bd51b4f89dd5cf46163
[ "Apache-2.0" ]
null
null
null
tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb
QS-L-1992/tensorflow
4bd51c0e182715bf94a34bd51b4f89dd5cf46163
[ "Apache-2.0" ]
null
null
null
47.091234
819
0.608532
[ [ [ "##### Copyright 2022 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Text Searcher with TensorFlow Lite Model Maker", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/models/modify/model_maker/text_searcher\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/models/modify/model_maker/text_searcher.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/google/universal-sentence-encoder-lite/2\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>", "_____no_output_____" ], [ "In this colab notebook, you can learn how to use the [TensorFlow Lite Model Maker](https://www.tensorflow.org/lite/guide/model_maker) library to create a TFLite Searcher model. You can use a text Searcher model to build Sematic Search or Smart Reply for your app. This type of model lets you take a text query and search for the most related entries in a text dataset, such as a database of web pages. The model returns a list of the smallest distance scoring entries in the dataset, including metadata you specify, such as URL, page title, or other text entry identifiers. After building this, you can deploy it onto devices (e.g. Android) using [Task Library Searcher API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/text_searcher) to run inference with just a few lines of code.\n\nThis tutorial leverages CNN/DailyMail dataset as an instance to create the TFLite Searcher model. You can try with your own dataset with the compatible input comma separated value (CSV) format.", "_____no_output_____" ], [ "## Text search using Scalable Nearest Neighbor", "_____no_output_____" ], [ "This tutorial uses the publicly available CNN/DailyMail non-anonymized summarization dataset, which was produced from the [GitHub repo](https://github.com/abisee/cnn-dailymail). This dataset contains over 300k news articles, which makes it a good dataset to build the Searcher model, and return various related news during model inference for a text query.\n\nThe text Searcher model in this example uses a [ScaNN](https://github.com/google-research/google-research/tree/master/scann) (Scalable Nearest Neighbors) index file that can search for similar items from a predefined database. ScaNN achieves state-of-the-art performance for efficient vector similarity search at scale.\n\nHighlights and urls in this dataset are used in this colab to create the model:\n\n1. Highlights are the text for generating the embedding feature vectors and then used for search.\n2. Urls are the returned result shown to users after searching the related highlights.\n\nThis tutorial saves these data into the CSV file and then uses the CSV file to build the model. Here are several examples from the dataset.\n\n\n| Highlights | Urls\n| ---------- |----------\n|Hawaiian Airlines again lands at No. 1 in on-time performance. The Airline Quality Rankings Report looks at the 14 largest U.S. airlines. ExpressJet <br> and American Airlines had the worst on-time performance. Virgin America had the best baggage handling; Southwest had lowest complaint rate. | http://www.cnn.com/2013/04/08/travel/airline-quality-report\n| European football's governing body reveals list of countries bidding to host 2020 finals. The 60th anniversary edition of the finals will be hosted by 13 <br> countries. Thirty-two countries are considering bids to host 2020 matches. UEFA will announce host cities on September 25. | http://edition.cnn.com:80/2013/09/20/sport/football/football-euro-2020-bid-countries/index.html?\n| Once octopus-hunter Dylan Mayer has now also signed a petition of 5,000 divers banning their hunt at Seacrest Park. Decision by Washington <br> Department of Fish and Wildlife could take months. | http://www.dailymail.co.uk:80/news/article-2238423/Dylan-Mayer-Washington-considers-ban-Octopus-hunting-diver-caught-ate-Puget-Sound.html?\n| Galaxy was observed 420 million years after the Big Bang. found by NASA’s Hubble Space Telescope, Spitzer Space Telescope, and one of nature’s <br> own natural 'zoom lenses' in space. | http://www.dailymail.co.uk/sciencetech/article-2233883/The-furthest-object-seen-Record-breaking-image-shows-galaxy-13-3-BILLION-light-years-Earth.html\n\n", "_____no_output_____" ], [ "## Setup\n", "_____no_output_____" ], [ "Start by installing the required packages, including the Model Maker package from the [GitHub repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).", "_____no_output_____" ] ], [ [ "!sudo apt -y install libportaudio2\n!pip install -q tflite-model-maker\n!pip install gdown", "_____no_output_____" ] ], [ [ "Import the required packages.", "_____no_output_____" ] ], [ [ "from tflite_model_maker import searcher", "_____no_output_____" ] ], [ [ "### Prepare the dataset\n\nThis tutorial uses the dataset CNN / Daily Mail summarization dataset from the [GitHub repo](https://github.com/abisee/cnn-dailymail).\n\nFirst, download the text and urls of cnn and dailymail and unzip them. If it\nfailed to download from google drive, please wait a few minutes to try it again or download it manually and then upload it to the colab.", "_____no_output_____" ] ], [ [ "!gdown https://drive.google.com/uc?id=0BwmD_VLjROrfTHk4NFg2SndKcjQ\n!gdown https://drive.google.com/uc?id=0BwmD_VLjROrfM1BxdkxVaTY2bWs\n\n!wget -O all_train.txt https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt\n!tar xzf cnn_stories.tgz\n!tar xzf dailymail_stories.tgz", "_____no_output_____" ] ], [ [ "Then, save the data into the CSV file that can be loaded into `tflite_model_maker` library. The code is based on the logic used to load this data in [`tensorflow_datasets`](https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/summarization/cnn_dailymail.py). We can't use `tensorflow_dataset` directly since it doesn't contain urls which are used in this colab.\n\nSince it takes a long time to process the data into embedding feature vectors\nfor the whole dataset. Only first 5% stories of CNN and Daily Mail dataset are\nselected by default for demo purpose. You can adjust the\nfraction or try with the pre-built TFLite [model](https://storage.googleapis.com/download.tensorflow.org/models/tflite_support/searcher/text_to_image_blogpost/cnn_daily_text_searcher.tflite) with 50% stories of CNN and Daily Mail dataset to search as well.", "_____no_output_____" ] ], [ [ "#@title Save the highlights and urls to the CSV file\n#@markdown Load the highlights from the stories of CNN / Daily Mail, map urls with highlights, and save them to the CSV file.\n\nCNN_FRACTION = 0.05 #@param {type:\"number\"}\nDAILYMAIL_FRACTION = 0.05 #@param {type:\"number\"}\n\nimport csv\nimport hashlib\nimport os\nimport tensorflow as tf\n\ndm_single_close_quote = u\"\\u2019\" # unicode\ndm_double_close_quote = u\"\\u201d\"\nEND_TOKENS = [\n \".\", \"!\", \"?\", \"...\", \"'\", \"`\", '\"', dm_single_close_quote,\n dm_double_close_quote, \")\"\n] # acceptable ways to end a sentence\n\n\ndef read_file(file_path):\n \"\"\"Reads lines in the file.\"\"\"\n lines = []\n with tf.io.gfile.GFile(file_path, \"r\") as f:\n for line in f:\n lines.append(line.strip())\n return lines\n\n\ndef url_hash(url):\n \"\"\"Gets the hash value of the url.\"\"\"\n h = hashlib.sha1()\n url = url.encode(\"utf-8\")\n h.update(url)\n return h.hexdigest()\n\n\ndef get_url_hashes_dict(urls_path):\n \"\"\"Gets hashes dict that maps the hash value to the original url in file.\"\"\"\n urls = read_file(urls_path)\n return {url_hash(url): url[url.find(\"id_/\") + 4:] for url in urls}\n\n\ndef find_files(folder, url_dict):\n \"\"\"Finds files corresponding to the urls in the folder.\"\"\"\n all_files = tf.io.gfile.listdir(folder)\n ret_files = []\n for file in all_files:\n # Gets the file name without extension.\n filename = os.path.splitext(os.path.basename(file))[0]\n if filename in url_dict:\n ret_files.append(os.path.join(folder, file))\n return ret_files\n\n\ndef fix_missing_period(line):\n \"\"\"Adds a period to a line that is missing a period.\"\"\"\n if \"@highlight\" in line:\n return line\n if not line:\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \".\"\n\n\ndef get_highlights(story_file):\n \"\"\"Gets highlights from a story file path.\"\"\"\n lines = read_file(story_file)\n\n # Put periods on the ends of lines that are missing them\n # (this is a problem in the dataset because many image captions don't end in\n # periods; consequently they end up in the body of the article as run-on\n # sentences)\n lines = [fix_missing_period(line) for line in lines]\n\n # Separate out article and abstract sentences\n highlight_list = []\n next_is_highlight = False\n for line in lines:\n if not line:\n continue # empty line\n elif line.startswith(\"@highlight\"):\n next_is_highlight = True\n elif next_is_highlight:\n highlight_list.append(line)\n\n # Make highlights into a single string.\n highlights = \"\\n\".join(highlight_list)\n\n return highlights\n\nurl_hashes_dict = get_url_hashes_dict(\"all_train.txt\")\ncnn_files = find_files(\"cnn/stories\", url_hashes_dict)\ndailymail_files = find_files(\"dailymail/stories\", url_hashes_dict)\n\n# The size to be selected.\ncnn_size = int(CNN_FRACTION * len(cnn_files))\ndailymail_size = int(DAILYMAIL_FRACTION * len(dailymail_files))\nprint(\"CNN size: %d\"%cnn_size)\nprint(\"Daily Mail size: %d\"%dailymail_size)\n\nwith open(\"cnn_dailymail.csv\", \"w\") as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=[\"highlights\", \"urls\"])\n writer.writeheader()\n\n for file in cnn_files[:cnn_size] + dailymail_files[:dailymail_size]:\n highlights = get_highlights(file)\n # Gets the filename which is the hash value of the url.\n filename = os.path.splitext(os.path.basename(file))[0]\n url = url_hashes_dict[filename]\n writer.writerow({\"highlights\": highlights, \"urls\": url})\n", "_____no_output_____" ] ], [ [ "## Build the text Searcher model", "_____no_output_____" ], [ "Create a text Searcher model by loading a dataset, creating a model with the data and exporting the TFLite model.\n\n### Step 1. Load the dataset\n\nModel Maker takes the text dataset and the corresponding metadata of each text string (such as urls in this example) in the CSV format. It embeds the text strings into feature vectors using the user-specified embedder model.\n\nIn this demo, we build the Searcher model using [Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder-lite/2), a state-of-the-art sentence embedding model which is already retrained from [colab](https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/examples/colab/on_device_text_to_image_search_tflite.ipynb). The model is optimized for on-device inference performance, and only takes 6ms to embed a query string (measured on Pixel 6). Alternatively, you can use [this](https://tfhub.dev/google/lite-model/universal-sentence-encoder-qa-ondevice/1?lite-format=tflite) quantized version, which is smaller but takes 38ms for each embedding.", "_____no_output_____" ] ], [ [ "!wget -O universal_sentence_encoder.tflite https://storage.googleapis.com/download.tensorflow.org/models/tflite_support/searcher/text_to_image_blogpost/text_embedder.tflite", "_____no_output_____" ] ], [ [ "Create a `searcher.TextDataLoader` instance and use `data_loader.load_from_csv` method to load the dataset. It takes ~10 minutes for this\nstep since it generates the embedding feature vector for each text one by one. You can try to upload your own CSV file and load it to build the customized model as well.\n\nSpecify the name of text column and metadata column in the CSV file.\n* Text is used to generate the embedding feature vectors.\n* Metadata is the content to be shown when you search the certain text.\n\nHere are the first 4 lines of the CNN-DailyMail CSV file generated above.\n\n| highlights| urls\n| ---------- |----------\n|Syrian official: Obama climbed to the top of the tree, doesn't know how to get down. Obama sends a letter to the heads of the House and Senate. Obama <br> to seek congressional approval on military action against Syria. Aim is to determine whether CW were used, not by whom, says U.N. spokesman.|http://www.cnn.com/2013/08/31/world/meast/syria-civil-war/\n|Usain Bolt wins third gold of world championship. Anchors Jamaica to 4x100m relay victory. Eighth gold at the championships for Bolt. Jamaica double <br> up in women's 4x100m relay.|http://edition.cnn.com/2013/08/18/sport/athletics-bolt-jamaica-gold\n|The employee in agency's Kansas City office is among hundreds of \"virtual\" workers. The employee's travel to and from the mainland U.S. last year cost <br> more than $24,000. The telecommuting program, like all GSA practices, is under review.|http://www.cnn.com:80/2012/08/23/politics/gsa-hawaii-teleworking\n|NEW: A Canadian doctor says she was part of a team examining Harry Burkhart in 2010. NEW: Diagnosis: \"autism, severe anxiety, post-traumatic stress <br> disorder and depression\" Burkhart is also suspected in a German arson probe, officials say. Prosecutors believe the German national set a string of fires <br> in Los Angeles.|http://edition.cnn.com:80/2012/01/05/justice/california-arson/index.html?\n", "_____no_output_____" ] ], [ [ "data_loader = searcher.TextDataLoader.create(\"universal_sentence_encoder.tflite\", l2_normalize=True)\ndata_loader.load_from_csv(\"cnn_dailymail.csv\", text_column=\"highlights\", metadata_column=\"urls\")", "_____no_output_____" ] ], [ [ "For image use cases, you can create a `searcher.ImageDataLoader` instance and then use `data_loader.load_from_folder` to load images from the folder. The `searcher.ImageDataLoader` instance needs to be created by a TFLite embedder model because it will be leveraged to encode queries to feature vectors and be exported with the TFLite Searcher model. For instance:\n```python\ndata_loader = searcher.ImageDataLoader.create(\"mobilenet_v2_035_96_embedder_with_metadata.tflite\")\ndata_loader.load_from_folder(\"food/\")\n```", "_____no_output_____" ], [ "###Step 2. Create the Searcher model\n\n* Configure ScaNN options. See [api doc](https://www.tensorflow.org/lite/api_docs/python/tflite_model_maker/searcher/ScaNNOptions) for more details.\n* Create the Searcher model from data and ScaNN options. You can see the [in-depth examination](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) to learn more about the ScaNN algorithm.", "_____no_output_____" ] ], [ [ "scann_options = searcher.ScaNNOptions(\n distance_measure=\"dot_product\",\n tree=searcher.Tree(num_leaves=140, num_leaves_to_search=4),\n score_ah=searcher.ScoreAH(dimensions_per_block=1, anisotropic_quantization_threshold=0.2))\nmodel = searcher.Searcher.create_from_data(data_loader, scann_options)", "_____no_output_____" ] ], [ [ "In the above example, we define the following options:\n* `distance_measure`: we use \"dot_product\" to measure the distance between two embedding vectors. Note that we actually compute the **negative** dot product value to preserve the notion that \"smaller is closer\".\n\n* `tree`: the dataset is divided the dataset into 140 partitions (roughly the square root of the data size), and 4 of them are searched during retrieval, which is roughly 3% of the dataset.\n\n* `score_ah`: we quantize the float embeddings to int8 values with the same dimension to save space.", "_____no_output_____" ], [ "###Step 3. Export the TFLite model\n\nThen you can export the TFLite Searcher model.", "_____no_output_____" ] ], [ [ "model.export(\n export_filename=\"searcher.tflite\",\n userinfo=\"\",\n export_format=searcher.ExportFormat.TFLITE)", "_____no_output_____" ] ], [ [ "## Test the TFLite model on your query\n\nYou can test the exported TFLite model using custom query text. To query text using the Searcher model, initialize the model and run a search with text phrase, as follows:", "_____no_output_____" ] ], [ [ "from tflite_support.task import text\n\n# Initializes a TextSearcher object.\nsearcher = text.TextSearcher.create_from_file(\"searcher.tflite\")\n\n# Searches the input query.\nresults = searcher.search(\"The Airline Quality Rankings Report looks at the 14 largest U.S. airlines.\")\nprint(results)", "_____no_output_____" ] ], [ [ "See the [Task Library documentation](https://www.tensorflow.org/lite/inference_with_metadata/task_library/text_searcher) for more information about how to integrate the model to various platforms.", "_____no_output_____" ], [ "# Read more\n\n For more information, please refer to:\n\n* TensorFlow Lite Model Maker [guide](https://www.tensorflow.org/lite/guide/model_maker) and [API reference](https://www.tensorflow.org/lite/api_docs/python/tflite_model_maker).\n\n* Task Library: [TextSearcher](https://www.tensorflow.org/lite/inference_with_metadata/task_library/text_searcher) for deployment.\n* The end-to-end reference apps: [Android](https://github.com/tensorflow/examples/tree/master/lite/examples/text_searcher/android).\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7d36e1cd9ca12a47590b2bd97f8fa9c40629cdd
356,840
ipynb
Jupyter Notebook
Ewaldkugel.ipynb
moosbruggerj/ewaldkugel-sim
d1b6cdb05da987af4202b8ca1674ffd04bf4ba18
[ "MIT" ]
null
null
null
Ewaldkugel.ipynb
moosbruggerj/ewaldkugel-sim
d1b6cdb05da987af4202b8ca1674ffd04bf4ba18
[ "MIT" ]
null
null
null
Ewaldkugel.ipynb
moosbruggerj/ewaldkugel-sim
d1b6cdb05da987af4202b8ca1674ffd04bf4ba18
[ "MIT" ]
null
null
null
1,493.054393
213,324
0.95477
[ [ [ "# Streuung\n\n\n<div><img src=\"attachment:image.png\" width=\"600\"/></div>\n\nWellenvektor des einfallenden Strahls: $\\vec {k}^{(i)}$ \nWellenvektor des ausgehenden Strahls: $\\vec {k}^{(s)}$\n\nImpulsübertrag: $\\vec Q =\\vec {k}^{(i)} - \\vec {k}^{(s)}$", "_____no_output_____" ], [ "# Laue-Bedingung\nVoraussetzung: elastische Streuung ($|\\vec {k}^{(i)}| = |\\vec {k}^{(s)}| = \\frac{2\\pi}{\\lambda}$)\n\n$\\vec Q = \\Delta \\vec{k} = \\vec {k}^{(i)} - \\vec {k}^{(s)} = \\vec G$\n\n<div><img src=\"attachment:image.png\" width=\"500\"/></div>", "_____no_output_____" ] ], [ [ "import ipywidgets as iw\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom math import *\nimport numpy as np\n\n\n", "_____no_output_____" ], [ "class Vector:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def length(self):\n return sqrt(self.x*self.x + self.y * self.y)\n \n def length_sq(self):\n return self.x*self.x + self.y * self.y\n \n def __mul__(self, s):\n return Vector(self.x * s, self.y * s)\n \n def __rmul__(self, s):\n return self * s\n \n def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y)\n \n def rotate(self, phi):\n return Vector(self.x * cos(phi) + self.y * (-sin(phi)), self.x * sin(phi) + self.y * cos(phi))\n \n @staticmethod\n def fromAbsPhi(r, phi):\n x = r * cos(phi)\n y = r * sin(phi)\n return Vector(x, y)", "_____no_output_____" ], [ "#b reziprokes gitter 2d abbildung\nb1 = Vector(2,0)\nb2 = Vector(0,2)\n\nangle = 50*pi/180\n\nlambda_ = 0.5\nlight_vec = Vector.fromAbsPhi(1/lambda_, 0)\n", "_____no_output_____" ], [ "# plot\ndef draw(alpha, lambda_, b2_mag, b2_phi):\n light_vec = Vector.fromAbsPhi(1/lambda_, 0)\n angle = alpha / 180.0 * pi\n b2 = Vector.fromAbsPhi(b2_mag, b2_phi/180.0 * pi)\n fig, ax = plt.subplots()\n\n fig.set_size_inches(9, 9, forward=True)\n\n b1r = b1.rotate(angle)\n b2r = b2.rotate(angle)\n epsilon = 1e-4\n size = 8\n num_1points = int(ceil(2*size / max(abs(b1.y) + abs(b2.y), 0.2)))\n num_2points = int(ceil(2*size / max(abs(b1.x) + abs(b2.x), 0.2)))\n num_2points = max(num_1points, num_2points)\n num_1points = num_2points\n\n num1 = len(range(-num_1points, num_1points))\n num2 = len(range(-num_2points, num_2points))\n points_x = np.zeros((num1, num2))\n points_y = np.zeros((num1, num2))\n points_all = np.empty((num1, num2), dtype=Vector)\n\n \n for col in range(-num_1points, num_1points):\n row = (np.arange(-num_2points, num_2points) * b2r)\n points = row + (b1r * col)\n points_x[col,:] = [p.x for p in points]\n points_y[col,:] = [p.y for p in points]\n \n points_all[col,:] = points\n\n points_all = points_all + light_vec\n l = abs(light_vec.length_sq())\n def distance(p):\n if abs(p.x - light_vec.x) < epsilon and abs(p.y - light_vec.y) < epsilon:\n return float(\"inf\")\n return abs(abs(p.length_sq()) - l)\n closest = min(points_all.flatten(), key= distance)\n print(f\"x = {closest.x}\")\n print(f\"y = {closest.y}\")\n print(f\"delta = {closest.length() - sqrt(l)}\")\n\n ax.arrow(0, 0, light_vec.x, light_vec.y, head_width=0.25, head_length=0.3, length_includes_head=True, fc='maroon', ec='maroon')\n ax.arrow(0, 0, closest.x, closest.y, head_width=0.25, head_length=0.3, length_includes_head=True, fc='b', ec='b')\n\n ax.add_patch(matplotlib.patches.Circle( (0, 0), light_vec.length(), fill = False, ec = \"black\", lw=1))\n\n ax.scatter(points_x + light_vec.x, points_y + light_vec.y, s=[4])\n ax.scatter([0], [0], s=[4], c = [\"r\"])\n ax.set(xlim=(-size, size),\n ylim=(-size, size))\n\n ax.set_aspect(1)\n #ax.set_axis_off()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n plt.show()\n #alpha= 103, lambda = 0.3: good fit", "_____no_output_____" ], [ "w = iw.interactive(draw, \n alpha = iw.IntSlider(min = 0, max = 360, value = 45), \n lambda_ = iw.FloatSlider(min = 0.2, max = 2, value = 0.5), \n b2_mag = iw.FloatSlider(min = 0.5, max = 4, value = 2.0, continuous_update=False),\n b2_phi = iw.IntSlider(min = 0, max = 360, value = 90, continuous_update=False))\noutput = w.children[-1]\noutput.layout.height = '650px'\ndisplay(w)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
e7d39c66a98b721a639483febabe2b6ea57ce4ce
74,961
ipynb
Jupyter Notebook
src/ab_testing/code/.ipynb_checkpoints/walkable_api-checkpoint.ipynb
AntoninJoly/book
257c641fd52d0e9499093247727b135ed361d7c4
[ "Apache-2.0" ]
null
null
null
src/ab_testing/code/.ipynb_checkpoints/walkable_api-checkpoint.ipynb
AntoninJoly/book
257c641fd52d0e9499093247727b135ed361d7c4
[ "Apache-2.0" ]
null
null
null
src/ab_testing/code/.ipynb_checkpoints/walkable_api-checkpoint.ipynb
AntoninJoly/book
257c641fd52d0e9499093247727b135ed361d7c4
[ "Apache-2.0" ]
null
null
null
38.109304
431
0.57722
[ [ [ "import requests\nimport pandas as pd\nimport pprint as pprint\nimport json\nfrom itertools import izip\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId", "_____no_output_____" ], [ "cd ../seattle", "/Users/michaelhood/Documents/projects/project/data/seattle\n" ], [ "df = pd.read_csv('trx_sea.csv', index_col=0)\ndf = df.reset_index().drop(['index'], axis=1)", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "def query_api(df):\n \n WSAPI_KEY = \"e54c50263609ee055aece36205c4827c\"\n TRANSIT_BASE_URL = \"http://transit.walkscore.com/transit/score/?lat={0}&lon={1}&city={2}&state={3}&wsapikey={4}&research=yes\"\n WALK_BASE_URL = \"http://api.walkscore.com/score?format={0}&address={1}%20{2}%20{3}%20{4}&lat={5}&lon={6}&wsapikey={7}\"\n \n walk_scores = []\n transit_scores = []\n \n df['city'] = df['city'].apply(lambda city: city.replace(' ', '-'))\n df['city'] = df['city'].apply(lambda city: \"Seattle\" if city==\"Des-Moines\" else city) \n df['street'] = df['street'].apply(lambda street: street[:street.find('#')-1] if '#' in street else street)\n df['street'] = df['street'].apply(lambda street: street.replace(' ', '-'))\n \n count = 1\n for row in df.iterrows():\n city = row[1][1]\n lat = row[1][2]\n lon = row[1][3]\n state = row[1][4]\n address = row[1][5]\n zip_code = row[1][6]\n walk_api_query = WALK_BASE_URL.format('json', address, zip_code, city, state, lat, lon, WSAPI_KEY) \n walk_api_response = requests.get(walk_api_query)\n walk_scores.append(walk_api_response.content)\n transit_api_query = TRANSIT_BASE_URL.format(lat, lon, city, state, WSAPI_KEY)\n transit_api_response = requests.get(transit_api_query)\n transit_scores.append(transit_api_response.content)\n print \"Querying row: {0}\".format(count)\n count += 1\n return walk_scores, transit_scores", "_____no_output_____" ], [ "walk_scores, transit_scores = query_api(df)", "Querying row: 1\nQuerying row: 2\nQuerying row: 3\nQuerying row: 4\nQuerying row: 5\nQuerying row: 6\nQuerying row: 7\nQuerying row: 8\nQuerying row: 9\nQuerying row: 10\nQuerying row: 11\nQuerying row: 12\nQuerying row: 13\nQuerying row: 14\nQuerying row: 15\nQuerying row: 16\nQuerying row: 17\nQuerying row: 18\nQuerying row: 19\nQuerying row: 20\nQuerying row: 21\nQuerying row: 22\nQuerying row: 23\nQuerying row: 24\nQuerying row: 25\nQuerying row: 26\nQuerying row: 27\nQuerying row: 28\nQuerying row: 29\nQuerying row: 30\nQuerying row: 31\nQuerying row: 32\nQuerying row: 33\nQuerying row: 34\nQuerying row: 35\nQuerying row: 36\nQuerying row: 37\nQuerying row: 38\nQuerying row: 39\nQuerying row: 40\nQuerying row: 41\nQuerying row: 42\nQuerying row: 43\nQuerying row: 44\nQuerying row: 45\nQuerying row: 46\nQuerying row: 47\nQuerying row: 48\nQuerying row: 49\nQuerying row: 50\nQuerying row: 51\nQuerying row: 52\nQuerying row: 53\nQuerying row: 54\nQuerying row: 55\nQuerying row: 56\nQuerying row: 57\nQuerying row: 58\nQuerying row: 59\nQuerying row: 60\nQuerying row: 61\nQuerying row: 62\nQuerying row: 63\nQuerying row: 64\nQuerying row: 65\nQuerying row: 66\nQuerying row: 67\nQuerying row: 68\nQuerying row: 69\nQuerying row: 70\nQuerying row: 71\nQuerying row: 72\nQuerying row: 73\nQuerying row: 74\nQuerying row: 75\nQuerying row: 76\nQuerying row: 77\nQuerying row: 78\nQuerying row: 79\nQuerying row: 80\nQuerying row: 81\nQuerying row: 82\nQuerying row: 83\nQuerying row: 84\nQuerying row: 85\nQuerying row: 86\nQuerying row: 87\nQuerying row: 88\nQuerying row: 89\nQuerying row: 90\nQuerying row: 91\nQuerying row: 92\nQuerying row: 93\nQuerying row: 94\nQuerying row: 95\nQuerying row: 96\nQuerying row: 97\nQuerying row: 98\nQuerying row: 99\nQuerying row: 100\nQuerying row: 101\nQuerying row: 102\nQuerying row: 103\nQuerying row: 104\nQuerying row: 105\nQuerying row: 106\nQuerying row: 107\nQuerying row: 108\nQuerying row: 109\nQuerying row: 110\nQuerying row: 111\nQuerying row: 112\nQuerying row: 113\nQuerying row: 114\nQuerying row: 115\nQuerying row: 116\nQuerying row: 117\nQuerying row: 118\nQuerying row: 119\nQuerying row: 120\nQuerying row: 121\nQuerying row: 122\nQuerying row: 123\nQuerying row: 124\nQuerying row: 125\nQuerying row: 126\nQuerying row: 127\nQuerying row: 128\nQuerying row: 129\nQuerying row: 130\nQuerying row: 131\nQuerying row: 132\nQuerying row: 133\nQuerying row: 134\nQuerying row: 135\nQuerying row: 136\nQuerying row: 137\nQuerying row: 138\nQuerying row: 139\nQuerying row: 140\nQuerying row: 141\nQuerying row: 142\nQuerying row: 143\nQuerying row: 144\nQuerying row: 145\nQuerying row: 146\nQuerying row: 147\nQuerying row: 148\nQuerying row: 149\nQuerying row: 150\nQuerying row: 151\nQuerying row: 152\nQuerying row: 153\nQuerying row: 154\nQuerying row: 155\nQuerying row: 156\nQuerying row: 157\nQuerying row: 158\nQuerying row: 159\nQuerying row: 160\nQuerying row: 161\nQuerying row: 162\nQuerying row: 163\nQuerying row: 164\nQuerying row: 165\nQuerying row: 166\nQuerying row: 167\nQuerying row: 168\nQuerying row: 169\nQuerying row: 170\nQuerying row: 171\nQuerying row: 172\nQuerying row: 173\nQuerying row: 174\nQuerying row: 175\nQuerying row: 176\nQuerying row: 177\nQuerying row: 178\nQuerying row: 179\nQuerying row: 180\nQuerying row: 181\nQuerying row: 182\nQuerying row: 183\nQuerying row: 184\nQuerying row: 185\nQuerying row: 186\nQuerying row: 187\nQuerying row: 188\nQuerying row: 189\nQuerying row: 190\nQuerying row: 191\nQuerying row: 192\nQuerying row: 193\nQuerying row: 194\nQuerying row: 195\nQuerying row: 196\nQuerying row: 197\nQuerying row: 198\nQuerying row: 199\nQuerying row: 200\nQuerying row: 201\nQuerying row: 202\nQuerying row: 203\nQuerying row: 204\nQuerying row: 205\nQuerying row: 206\nQuerying row: 207\nQuerying row: 208\nQuerying row: 209\nQuerying row: 210\nQuerying row: 211\nQuerying row: 212\nQuerying row: 213\nQuerying row: 214\nQuerying row: 215\nQuerying row: 216\nQuerying row: 217\nQuerying row: 218\nQuerying row: 219\nQuerying row: 220\nQuerying row: 221\nQuerying row: 222\nQuerying row: 223\nQuerying row: 224\nQuerying row: 225\nQuerying row: 226\nQuerying row: 227\nQuerying row: 228\nQuerying row: 229\nQuerying row: 230\nQuerying row: 231\nQuerying row: 232\nQuerying row: 233\nQuerying row: 234\nQuerying row: 235\nQuerying row: 236\nQuerying row: 237\nQuerying row: 238\nQuerying row: 239\nQuerying row: 240\nQuerying row: 241\nQuerying row: 242\nQuerying row: 243\nQuerying row: 244\nQuerying row: 245\nQuerying row: 246\nQuerying row: 247\nQuerying row: 248\nQuerying row: 249\nQuerying row: 250\nQuerying row: 251\nQuerying row: 252\nQuerying row: 253\nQuerying row: 254\nQuerying row: 255\nQuerying row: 256\nQuerying row: 257\nQuerying row: 258\nQuerying row: 259\nQuerying row: 260\nQuerying row: 261\nQuerying row: 262\nQuerying row: 263\nQuerying row: 264\nQuerying row: 265\nQuerying row: 266\nQuerying row: 267\nQuerying row: 268\nQuerying row: 269\nQuerying row: 270\nQuerying row: 271\nQuerying row: 272\nQuerying row: 273\nQuerying row: 274\nQuerying row: 275\nQuerying row: 276\nQuerying row: 277\nQuerying row: 278\nQuerying row: 279\nQuerying row: 280\nQuerying row: 281\nQuerying row: 282\nQuerying row: 283\nQuerying row: 284\nQuerying row: 285\nQuerying row: 286\nQuerying row: 287\nQuerying row: 288\nQuerying row: 289\nQuerying row: 290\nQuerying row: 291\nQuerying row: 292\nQuerying row: 293\nQuerying row: 294\nQuerying row: 295\nQuerying row: 296\nQuerying row: 297\nQuerying row: 298\nQuerying row: 299\nQuerying row: 300\nQuerying row: 301\nQuerying row: 302\nQuerying row: 303\nQuerying row: 304\nQuerying row: 305\nQuerying row: 306\nQuerying row: 307\nQuerying row: 308\nQuerying row: 309\nQuerying row: 310\nQuerying row: 311\nQuerying row: 312\nQuerying row: 313\nQuerying row: 314\nQuerying row: 315\nQuerying row: 316\nQuerying row: 317\nQuerying row: 318\nQuerying row: 319\nQuerying row: 320\nQuerying row: 321\nQuerying row: 322\nQuerying row: 323\nQuerying row: 324\nQuerying row: 325\nQuerying row: 326\nQuerying row: 327\nQuerying row: 328\nQuerying row: 329\nQuerying row: 330\nQuerying row: 331\nQuerying row: 332\nQuerying row: 333\nQuerying row: 334\nQuerying row: 335\nQuerying row: 336\nQuerying row: 337\nQuerying row: 338\nQuerying row: 339\nQuerying row: 340\nQuerying row: 341\nQuerying row: 342\nQuerying row: 343\nQuerying row: 344\nQuerying row: 345\nQuerying row: 346\nQuerying row: 347\nQuerying row: 348\nQuerying row: 349\nQuerying row: 350\nQuerying row: 351\nQuerying row: 352\nQuerying row: 353\nQuerying row: 354\nQuerying row: 355\nQuerying row: 356\nQuerying row: 357\nQuerying row: 358\nQuerying row: 359\nQuerying row: 360\nQuerying row: 361\nQuerying row: 362\nQuerying row: 363\nQuerying row: 364\nQuerying row: 365\nQuerying row: 366\nQuerying row: 367\nQuerying row: 368\nQuerying row: 369\nQuerying row: 370\nQuerying row: 371\nQuerying row: 372\nQuerying row: 373\nQuerying row: 374\nQuerying row: 375\nQuerying row: 376\nQuerying row: 377\nQuerying row: 378\nQuerying row: 379\nQuerying row: 380\nQuerying row: 381\nQuerying row: 382\nQuerying row: 383\nQuerying row: 384\nQuerying row: 385\nQuerying row: 386\nQuerying row: 387\nQuerying row: 388\nQuerying row: 389\nQuerying row: 390\nQuerying row: 391\nQuerying row: 392\nQuerying row: 393\nQuerying row: 394\nQuerying row: 395\nQuerying row: 396\nQuerying row: 397\nQuerying row: 398\nQuerying row: 399\nQuerying row: 400\nQuerying row: 401\nQuerying row: 402\nQuerying row: 403\nQuerying row: 404\nQuerying row: 405\nQuerying row: 406\nQuerying row: 407\nQuerying row: 408\nQuerying row: 409\nQuerying row: 410\nQuerying row: 411\nQuerying row: 412\nQuerying row: 413\nQuerying row: 414\nQuerying row: 415\nQuerying row: 416\nQuerying row: 417\nQuerying row: 418\nQuerying row: 419\nQuerying row: 420\nQuerying row: 421\nQuerying row: 422\nQuerying row: 423\nQuerying row: 424\nQuerying row: 425\nQuerying row: 426\nQuerying row: 427\nQuerying row: 428\nQuerying row: 429\nQuerying row: 430\nQuerying row: 431\nQuerying row: 432\nQuerying row: 433\nQuerying row: 434\nQuerying row: 435\nQuerying row: 436\nQuerying row: 437\nQuerying row: 438\nQuerying row: 439\nQuerying row: 440\nQuerying row: 441\nQuerying row: 442\nQuerying row: 443\nQuerying row: 444\nQuerying row: 445\nQuerying row: 446\nQuerying row: 447\nQuerying row: 448\nQuerying row: 449\nQuerying row: 450\nQuerying row: 451\nQuerying row: 452\nQuerying row: 453\nQuerying row: 454\nQuerying row: 455\nQuerying row: 456\nQuerying row: 457\nQuerying row: 458\nQuerying row: 459\nQuerying row: 460\nQuerying row: 461\nQuerying row: 462\nQuerying row: 463\nQuerying row: 464\nQuerying row: 465\nQuerying row: 466\nQuerying row: 467\nQuerying row: 468\nQuerying row: 469\nQuerying row: 470\nQuerying row: 471\nQuerying row: 472\nQuerying row: 473\nQuerying row: 474\nQuerying row: 475\nQuerying row: 476\nQuerying row: 477\nQuerying row: 478\nQuerying row: 479\nQuerying row: 480\nQuerying row: 481\nQuerying row: 482\nQuerying row: 483\nQuerying row: 484\nQuerying row: 485\nQuerying row: 486\nQuerying row: 487\nQuerying row: 488\nQuerying row: 489\nQuerying row: 490\nQuerying row: 491\nQuerying row: 492\nQuerying row: 493\nQuerying row: 494\nQuerying row: 495\nQuerying row: 496\nQuerying row: 497\nQuerying row: 498\nQuerying row: 499\nQuerying row: 500\nQuerying row: 501\nQuerying row: 502\nQuerying row: 503\nQuerying row: 504\nQuerying row: 505\nQuerying row: 506\nQuerying row: 507\nQuerying row: 508\nQuerying row: 509\nQuerying row: 510\nQuerying row: 511\nQuerying row: 512\nQuerying row: 513\nQuerying row: 514\nQuerying row: 515\nQuerying row: 516\nQuerying row: 517\nQuerying row: 518\nQuerying row: 519\nQuerying row: 520\nQuerying row: 521\nQuerying row: 522\nQuerying row: 523\nQuerying row: 524\nQuerying row: 525\nQuerying row: 526\nQuerying row: 527\nQuerying row: 528\nQuerying row: 529\nQuerying row: 530\nQuerying row: 531\nQuerying row: 532\nQuerying row: 533\nQuerying row: 534\nQuerying row: 535\nQuerying row: 536\nQuerying row: 537\nQuerying row: 538\nQuerying row: 539\nQuerying row: 540\nQuerying row: 541\nQuerying row: 542\nQuerying row: 543\nQuerying row: 544\nQuerying row: 545\nQuerying row: 546\nQuerying row: 547\nQuerying row: 548\nQuerying row: 549\nQuerying row: 550\nQuerying row: 551\nQuerying row: 552\nQuerying row: 553\nQuerying row: 554\nQuerying row: 555\nQuerying row: 556\nQuerying row: 557\nQuerying row: 558\nQuerying row: 559\nQuerying row: 560\nQuerying row: 561\nQuerying row: 562\nQuerying row: 563\nQuerying row: 564\nQuerying row: 565\nQuerying row: 566\nQuerying row: 567\nQuerying row: 568\nQuerying row: 569\nQuerying row: 570\nQuerying row: 571\nQuerying row: 572\nQuerying row: 573\nQuerying row: 574\nQuerying row: 575\nQuerying row: 576\nQuerying row: 577\nQuerying row: 578\nQuerying row: 579\nQuerying row: 580\nQuerying row: 581\nQuerying row: 582\nQuerying row: 583\nQuerying row: 584\nQuerying row: 585\nQuerying row: 586\nQuerying row: 587\nQuerying row: 588\nQuerying row: 589\nQuerying row: 590\nQuerying row: 591\nQuerying row: 592\nQuerying row: 593\nQuerying row: 594\nQuerying row: 595\nQuerying row: 596\nQuerying row: 597\nQuerying row: 598\nQuerying row: 599\nQuerying row: 600\nQuerying row: 601\nQuerying row: 602\nQuerying row: 603\nQuerying row: 604\nQuerying row: 605\nQuerying row: 606\nQuerying row: 607\nQuerying row: 608\nQuerying row: 609\nQuerying row: 610\nQuerying row: 611\nQuerying row: 612\nQuerying row: 613\nQuerying row: 614\nQuerying row: 615\nQuerying row: 616\nQuerying row: 617\nQuerying row: 618\nQuerying row: 619\nQuerying row: 620\nQuerying row: 621\nQuerying row: 622\nQuerying row: 623\nQuerying row: 624\nQuerying row: 625\nQuerying row: 626\nQuerying row: 627\nQuerying row: 628\nQuerying row: 629\nQuerying row: 630\nQuerying row: 631\nQuerying row: 632\nQuerying row: 633\nQuerying row: 634\nQuerying row: 635\nQuerying row: 636\nQuerying row: 637\nQuerying row: 638\nQuerying row: 639\nQuerying row: 640\nQuerying row: 641\nQuerying row: 642\nQuerying row: 643\nQuerying row: 644\nQuerying row: 645\nQuerying row: 646\nQuerying row: 647\nQuerying row: 648\nQuerying row: 649\nQuerying row: 650\nQuerying row: 651\nQuerying row: 652\nQuerying row: 653\nQuerying row: 654\nQuerying row: 655\nQuerying row: 656\nQuerying row: 657\nQuerying row: 658\nQuerying row: 659\nQuerying row: 660\nQuerying row: 661\nQuerying row: 662\nQuerying row: 663\nQuerying row: 664\nQuerying row: 665\nQuerying row: 666\nQuerying row: 667\nQuerying row: 668\nQuerying row: 669\nQuerying row: 670\nQuerying row: 671\nQuerying row: 672\nQuerying row: 673\nQuerying row: 674\nQuerying row: 675\nQuerying row: 676\nQuerying row: 677\nQuerying row: 678\nQuerying row: 679\nQuerying row: 680\nQuerying row: 681\nQuerying row: 682\nQuerying row: 683\nQuerying row: 684\nQuerying row: 685\nQuerying row: 686\nQuerying row: 687\nQuerying row: 688\nQuerying row: 689\nQuerying row: 690\nQuerying row: 691\nQuerying row: 692\nQuerying row: 693\nQuerying row: 694\nQuerying row: 695\nQuerying row: 696\nQuerying row: 697\nQuerying row: 698\nQuerying row: 699\nQuerying row: 700\nQuerying row: 701\nQuerying row: 702\nQuerying row: 703\nQuerying row: 704\nQuerying row: 705\nQuerying row: 706\nQuerying row: 707\nQuerying row: 708\nQuerying row: 709\nQuerying row: 710\nQuerying row: 711\nQuerying row: 712\nQuerying row: 713\nQuerying row: 714\nQuerying row: 715\nQuerying row: 716\nQuerying row: 717\nQuerying row: 718\nQuerying row: 719\nQuerying row: 720\nQuerying row: 721\nQuerying row: 722\nQuerying row: 723\nQuerying row: 724\nQuerying row: 725\nQuerying row: 726\nQuerying row: 727\nQuerying row: 728\nQuerying row: 729\nQuerying row: 730\nQuerying row: 731\nQuerying row: 732\nQuerying row: 733\nQuerying row: 734\nQuerying row: 735\nQuerying row: 736\nQuerying row: 737\nQuerying row: 738\nQuerying row: 739\nQuerying row: 740\nQuerying row: 741\nQuerying row: 742\nQuerying row: 743\nQuerying row: 744\nQuerying row: 745\nQuerying row: 746\nQuerying row: 747\nQuerying row: 748\nQuerying row: 749\nQuerying row: 750\nQuerying row: 751\nQuerying row: 752\nQuerying row: 753\nQuerying row: 754\nQuerying row: 755\nQuerying row: 756\nQuerying row: 757\nQuerying row: 758\nQuerying row: 759\nQuerying row: 760\nQuerying row: 761\nQuerying row: 762\nQuerying row: 763\nQuerying row: 764\nQuerying row: 765\nQuerying row: 766\nQuerying row: 767\nQuerying row: 768\nQuerying row: 769\nQuerying row: 770\nQuerying row: 771\nQuerying row: 772\nQuerying row: 773\nQuerying row: 774\nQuerying row: 775\nQuerying row: 776\nQuerying row: 777\nQuerying row: 778\nQuerying row: 779\nQuerying row: 780\nQuerying row: 781\nQuerying row: 782\nQuerying row: 783\nQuerying row: 784\nQuerying row: 785\nQuerying row: 786\nQuerying row: 787\n" ], [ "def score_api_responses(walk_scores, transit_scores):\n response_status = {}\n walk_succeed_string = '\"status\": 1' \n transit_fail_string = \"You must provide a valid 'city' and 'state'\"\n \n for index, score in enumerate(izip(walk_scores, transit_scores)):\n status = {'walkscore_status': 1,\n 'transitscore_status': 1} # 1 --> status is good\n if walk_succeed_string not in score[0]:\n status['transitscore_status'] = 0 # status if bad\n if transit_fail_string in score[1]:\n status['transitscore_status'] = 0\n #response_status[index] = 400\n #else:\n #response_status[index] = 0\n response_status[index] = status\n return response_status\n ", "_____no_output_____" ], [ "def get_api_report(response_status):\n success_count = 0\n incomplete_count = 0\n incompletes = []\n \n for key, value in response_status.items():\n if (value['transitscore_status'] == 0) or (value['walkscore_status'] == 0):\n incomplete_count += 1\n incompletes.append({key: value})\n else:\n success_count += 1\n print \"{0} API calls were successful\".format(success_count)\n print \"{0} API calls were incomplete or unsuccessful\".format(incomplete_count)\n return incompletes ", "_____no_output_____" ], [ "response_status = score_api_responses(walk_scores, transit_scores)", "_____no_output_____" ], [ "incompletes = get_api_report(response_status)", "787 API calls were successful\n0 API calls were incomplete or unsuccessful\n" ], [ "def to_json(query_response):\n return json.loads(query_response.replace('\\n', ''))", "_____no_output_____" ], [ "def get_and_format_data(df, walk_scores, transit_scores):\n \n key_ids = []\n walkscore_descriptions = []\n walkscore_scores = []\n trans_descriptions = []\n trans_summaries = []\n trans_scores = []\n \n for i in range(min([len(walk_scores), len(transit_scores)])): # take the smaller of the two lists\n key_id = df.ix[i][0]\n key_ids.append(key_id)\n try:\n walkscore_desc = to_json(walk_scores[i])['description']\n except:\n walkscore_desc = 'None'\n walkscore_descriptions.append(walkscore_desc)\n try:\n walkscore_score = to_json(walk_scores[i])['walkscore']\n except:\n walkscore_score = 'None'\n walkscore_scores.append(walkscore_score)\n try:\n trans_desc = to_json(transit_scores[i])['description']\n except:\n trans_desc = 'None'\n trans_descriptions.append(trans_desc)\n try:\n trans_summ = to_json(transit_scores[i])['summary']\n except:\n trans_summ = 'None'\n trans_summaries.append(trans_summ)\n try:\n trans_score = to_json(transit_scores[i])['transit_score']\n except:\n trans_score = \"None\"\n trans_scores.append(trans_score)\n \n return pd.DataFrame({'_id': key_ids, \n 'walkscore_desc': walkscore_descriptions, \n 'walkscore_score': walkscore_scores,\n 'trans_desc': trans_desc,\n 'trans_summary': trans_summaries,\n 'trans_score': trans_scores}, index=range(len(key_ids))) ", "_____no_output_____" ], [ "df_api = get_and_format_data(df, walk_scores, transit_scores)", "_____no_output_____" ], [ "client = MongoClient()\ndb = client.proj", "_____no_output_____" ], [ "df_new = pd.concat([df, df_api], axis=1, join='outer')", "_____no_output_____" ], [ "def update_db(df):\n for row in df.iterrows():\n mongo_id = ObjectId(row[1][0])\n trans_desc = row[1][46]\n trans_score = row[1][47]\n trans_summary = row[1][48]\n walkscore_desc = row[1][49]\n walkscore_score = row[1][50]\n print \"updating... \", mongo_id\n db.listings.update(\n {'_id': mongo_id},\n { '$set': { \"trans_desc\": trans_desc,\n \"trans_score\": trans_score,\n \"trans_summary\": trans_summary,\n \"walkscore_desc\": walkscore_desc,\n \"walkscore_score\": walkscore_score\n }}\n )", "_____no_output_____" ], [ "update_db(df_new)", "updating... 55f3add23ae74048e6bff0b9\nupdating... 55f3add23ae74048e6bff0b5\nupdating... 55f3add23ae74048e6bff0b6\nupdating... 55f3add23ae74048e6bff0b7\nupdating... 55f3add23ae74048e6bff0b8\nupdating... 55f3add23ae74048e6bff0ba\nupdating... 55f3add23ae74048e6bff0bd\nupdating... 55f3add23ae74048e6bff0be\nupdating... 55f3add23ae74048e6bff0c0\nupdating... 55f3add23ae74048e6bff0c2\nupdating... 55f3add23ae74048e6bff0c3\nupdating... 55f3add23ae74048e6bff0c4\nupdating... 55f3add23ae74048e6bff0c5\nupdating... 55f3add23ae74048e6bff0c6\nupdating... 55f3add23ae74048e6bff0c8\nupdating... 55f3add23ae74048e6bff0c9\nupdating... 55f3add23ae74048e6bff0ca\nupdating... 55f3add23ae74048e6bff0cb\nupdating... 55f3add23ae74048e6bff0cc\nupdating... 55f3add23ae74048e6bff0cd\nupdating... 55f3add23ae74048e6bff0ce\nupdating... 55f3add23ae74048e6bff0cf\nupdating... 55f3add23ae74048e6bff0d1\nupdating... 55f3add23ae74048e6bff0d2\nupdating... 55f3add23ae74048e6bff0d3\nupdating... 55f3add23ae74048e6bff0d4\nupdating... 55f3add23ae74048e6bff0d6\nupdating... 55f3add23ae74048e6bff0d7\nupdating... 55f3add23ae74048e6bff0d8\nupdating... 55f3add23ae74048e6bff0d9\nupdating... 55f3add23ae74048e6bff0da\nupdating... 55f3add23ae74048e6bff0db\nupdating... 55f3add23ae74048e6bff0dc\nupdating... 55f3add23ae74048e6bff0dd\nupdating... 55f3add23ae74048e6bff0e0\nupdating... 55f3ae603ae74048e6bff0e1\nupdating... 55f3ae603ae74048e6bff0e2\nupdating... 55f3ae603ae74048e6bff0e4\nupdating... 55f3ae603ae74048e6bff0e5\nupdating... 55f3ae603ae74048e6bff0e6\nupdating... 55f3ae603ae74048e6bff0e7\nupdating... 55f3ae603ae74048e6bff0e8\nupdating... 55f3ae603ae74048e6bff0e9\nupdating... 55f3ae603ae74048e6bff0ea\nupdating... 55f3ae603ae74048e6bff0eb\nupdating... 55f3ae603ae74048e6bff0ec\nupdating... 55f3ae603ae74048e6bff0ed\nupdating... 55f3ae603ae74048e6bff0ee\nupdating... 55f3ae603ae74048e6bff0ef\nupdating... 55f3ae603ae74048e6bff0f0\nupdating... 55f3ae603ae74048e6bff0f1\nupdating... 55f3ae603ae74048e6bff0f2\nupdating... 55f3ae603ae74048e6bff0f3\nupdating... 55f3ae603ae74048e6bff0f4\nupdating... 55f3ae603ae74048e6bff0f5\nupdating... 55f3ae603ae74048e6bff0f6\nupdating... 55f3ae603ae74048e6bff0f7\nupdating... 55f3ae603ae74048e6bff0f8\nupdating... 55f3ae603ae74048e6bff0f9\nupdating... 55f3ae603ae74048e6bff0fa\nupdating... 55f3ae603ae74048e6bff0fb\nupdating... 55f3ae603ae74048e6bff0fc\nupdating... 55f3aeb63ae74048e6bff0fd\nupdating... 55f3aeb63ae74048e6bff0fe\nupdating... 55f3aeb63ae74048e6bff0ff\nupdating... 55f3aeb63ae74048e6bff100\nupdating... 55f3aeb63ae74048e6bff101\nupdating... 55f3aeb63ae74048e6bff102\nupdating... 55f3aeb63ae74048e6bff103\nupdating... 55f3aeb63ae74048e6bff104\nupdating... 55f3aeb63ae74048e6bff105\nupdating... 55f3aeb63ae74048e6bff106\nupdating... 55f3aeb63ae74048e6bff107\nupdating... 55f3aeb63ae74048e6bff108\nupdating... 55f3aeb63ae74048e6bff109\nupdating... 55f3aeb63ae74048e6bff10a\nupdating... 55f3aeb63ae74048e6bff10b\nupdating... 55f3aeb63ae74048e6bff10c\nupdating... 55f3aeb63ae74048e6bff10d\nupdating... 55f3aeb63ae74048e6bff10e\nupdating... 55f3aeb63ae74048e6bff10f\nupdating... 55f3aeb63ae74048e6bff110\nupdating... 55f3aeb63ae74048e6bff111\nupdating... 55f3aeb63ae74048e6bff112\nupdating... 55f3aeb63ae74048e6bff113\nupdating... 55f3aeb63ae74048e6bff114\nupdating... 55f3aeb63ae74048e6bff115\nupdating... 55f3aeb63ae74048e6bff116\nupdating... 55f3aeb63ae74048e6bff117\nupdating... 55f3aeb63ae74048e6bff118\nupdating... 55f3aeb63ae74048e6bff119\nupdating... 55f3aeb63ae74048e6bff11a\nupdating... 55f3aeb63ae74048e6bff11b\nupdating... 55f3aeb63ae74048e6bff11c\nupdating... 55f3aeb63ae74048e6bff11d\nupdating... 55f3aeb63ae74048e6bff11e\nupdating... 55f3aeb63ae74048e6bff11f\nupdating... 55f3aeb63ae74048e6bff120\nupdating... 55f3aeb63ae74048e6bff121\nupdating... 55f3aeb63ae74048e6bff122\nupdating... 55f3aeb63ae74048e6bff123\nupdating... 55f3aeb63ae74048e6bff124\nupdating... 55f3aeb63ae74048e6bff125\nupdating... 55f3aeb63ae74048e6bff126\nupdating... 55f3aeb63ae74048e6bff127\nupdating... 55f3aeb63ae74048e6bff128\nupdating... 55f3af1d3ae74048e6bff129\nupdating... 55f3af1d3ae74048e6bff12a\nupdating... 55f3af1d3ae74048e6bff12b\nupdating... 55f3af1d3ae74048e6bff12c\nupdating... 55f3af1d3ae74048e6bff12d\nupdating... 55f3af1d3ae74048e6bff12e\nupdating... 55f3af1d3ae74048e6bff12f\nupdating... 55f3af1d3ae74048e6bff130\nupdating... 55f3af1d3ae74048e6bff131\nupdating... 55f3af1d3ae74048e6bff132\nupdating... 55f3af1d3ae74048e6bff133\nupdating... 55f650e03ae7404c5edef06b\nupdating... 55f650e03ae7404c5edef06c\nupdating... 55f650e03ae7404c5edef070\nupdating... 55f650e03ae7404c5edef06d\nupdating... 55f650e03ae7404c5edef06e\nupdating... 55f650e03ae7404c5edef06f\nupdating... 55f650e03ae7404c5edef071\nupdating... 55f650e03ae7404c5edef072\nupdating... 55f650e03ae7404c5edef073\nupdating... 55f650e03ae7404c5edef074\nupdating... 55f650e03ae7404c5edef075\nupdating... 55f650e03ae7404c5edef076\nupdating... 55f650e03ae7404c5edef077\nupdating... 55f650e03ae7404c5edef078\nupdating... 55f650e03ae7404c5edef079\nupdating... 55f650e03ae7404c5edef07a\nupdating... 55f650e03ae7404c5edef07b\nupdating... 55f650e03ae7404c5edef07c\nupdating... 55f650e03ae7404c5edef07d\nupdating... 55f650e03ae7404c5edef07e\nupdating... 55f650e03ae7404c5edef07f\nupdating... 55f650e03ae7404c5edef080\nupdating... 55f650e03ae7404c5edef081\nupdating... 55f650e03ae7404c5edef082\nupdating... 55f650e03ae7404c5edef083\nupdating... 55f650e03ae7404c5edef084\nupdating... 55f650e03ae7404c5edef085\nupdating... 55f650e03ae7404c5edef086\nupdating... 55f650e03ae7404c5edef087\nupdating... 55f650e03ae7404c5edef088\nupdating... 55f650e03ae7404c5edef089\nupdating... 55f650e03ae7404c5edef08a\nupdating... 55f650e03ae7404c5edef08b\nupdating... 55f650e03ae7404c5edef08c\nupdating... 55f650e03ae7404c5edef08d\nupdating... 55f650e03ae7404c5edef08e\nupdating... 55f650e03ae7404c5edef08f\nupdating... 55f650e03ae7404c5edef090\nupdating... 55f650e03ae7404c5edef091\nupdating... 55f650e03ae7404c5edef092\nupdating... 55f650e03ae7404c5edef093\nupdating... 55f650e03ae7404c5edef094\nupdating... 55f650e03ae7404c5edef095\nupdating... 55f650e03ae7404c5edef096\nupdating... 55f650e03ae7404c5edef097\nupdating... 55f650e03ae7404c5edef098\nupdating... 55f650e03ae7404c5edef099\nupdating... 55f650e03ae7404c5edef09a\nupdating... 55f650e03ae7404c5edef09b\nupdating... 55f650e03ae7404c5edef09c\nupdating... 55f650e03ae7404c5edef09d\nupdating... 55f650e03ae7404c5edef09e\nupdating... 55f650e03ae7404c5edef09f\nupdating... 55f650e03ae7404c5edef0a0\nupdating... 55f650e03ae7404c5edef0a1\nupdating... 55f650e03ae7404c5edef0a2\nupdating... 55f650e03ae7404c5edef0a3\nupdating... 55f650e03ae7404c5edef0a4\nupdating... 55f650e03ae7404c5edef0a5\nupdating... 55f650e03ae7404c5edef0a6\nupdating... 55f650e03ae7404c5edef0a7\nupdating... 55f650e03ae7404c5edef0a8\nupdating... 55f650e03ae7404c5edef0a9\nupdating... 55f650e03ae7404c5edef0aa\nupdating... 55f650e03ae7404c5edef0ab\nupdating... 55f650e03ae7404c5edef0ac\nupdating... 55f650e03ae7404c5edef0ad\nupdating... 55f650e03ae7404c5edef0ae\nupdating... 55f650e03ae7404c5edef0af\nupdating... 55f650e03ae7404c5edef0b0\nupdating... 55f650e03ae7404c5edef0b1\nupdating... 55f650e03ae7404c5edef0b2\nupdating... 55f650e03ae7404c5edef0b3\nupdating... 55f650e03ae7404c5edef0b4\nupdating... 55f650e03ae7404c5edef0b5\nupdating... 55f650e03ae7404c5edef0b6\nupdating... 55f650e03ae7404c5edef0b7\nupdating... 55f650e03ae7404c5edef0b8\nupdating... 55f650e03ae7404c5edef0b9\nupdating... 55f650e03ae7404c5edef0ba\nupdating... 55f650e03ae7404c5edef0bb\nupdating... 55f650e03ae7404c5edef0bc\nupdating... 55f650e03ae7404c5edef0bd\nupdating... 55f650e03ae7404c5edef0be\nupdating... 55f650e03ae7404c5edef0bf\nupdating... 55f650e03ae7404c5edef0c0\nupdating... 55f650e03ae7404c5edef0c1\nupdating... 55f650e03ae7404c5edef0c2\nupdating... 55f650e03ae7404c5edef0c3\nupdating... 55f650e03ae7404c5edef0c4\nupdating... 55f650e03ae7404c5edef0c5\nupdating... 55f650e03ae7404c5edef0c6\nupdating... 55f650e03ae7404c5edef0c7\nupdating... 55f650e03ae7404c5edef0c8\nupdating... 55f650e03ae7404c5edef0c9\nupdating... 55f650f13ae7404c5edef0ca\nupdating... 55f650f13ae7404c5edef0cb\nupdating... 55f650f13ae7404c5edef0cc\nupdating... 55f650f13ae7404c5edef0cd\nupdating... 55f650f13ae7404c5edef0ce\nupdating... 55f650f13ae7404c5edef0cf\nupdating... 55f650f13ae7404c5edef0d0\nupdating... 55f650f13ae7404c5edef0d1\nupdating... 55f650f13ae7404c5edef0d2\nupdating... 55f650f13ae7404c5edef0d3\nupdating... 55f650f13ae7404c5edef0d4\nupdating... 55f650f13ae7404c5edef0d5\nupdating... 55f650f13ae7404c5edef0d6\nupdating... 55f650f13ae7404c5edef0d7\nupdating... 55f650f13ae7404c5edef0d8\nupdating... 55f650f13ae7404c5edef0d9\nupdating... 55f650f13ae7404c5edef0da\nupdating... 55f650f13ae7404c5edef0db\nupdating... 55f650f13ae7404c5edef0dc\nupdating... 55f650f13ae7404c5edef0dd\nupdating... 55f650f13ae7404c5edef0de\nupdating... 55f650f13ae7404c5edef0df\nupdating... 55f650f13ae7404c5edef0e0\nupdating... 55f650f13ae7404c5edef0e1\nupdating... 55f650f13ae7404c5edef0e2\nupdating... 55f650f13ae7404c5edef0e3\nupdating... 55f650f13ae7404c5edef0e4\nupdating... 55f650f13ae7404c5edef0e5\nupdating... 55f650f13ae7404c5edef0e6\nupdating... 55f650f13ae7404c5edef0e7\nupdating... 55f650f13ae7404c5edef0e8\nupdating... 55f650f13ae7404c5edef0e9\nupdating... 55f650f13ae7404c5edef0ea\nupdating... 55f650f13ae7404c5edef0eb\nupdating... 55f650f13ae7404c5edef0ec\nupdating... 55f650f13ae7404c5edef0ed\nupdating... 55f650f13ae7404c5edef0ee\nupdating... 55f650f13ae7404c5edef0ef\nupdating... 55f650f13ae7404c5edef0f0\nupdating... 55f650f13ae7404c5edef0f1\nupdating... 55f650f13ae7404c5edef0f2\nupdating... 55f650f13ae7404c5edef0f3\nupdating... 55f650f13ae7404c5edef0f4\nupdating... 55f650f13ae7404c5edef0f5\nupdating... 55f650f13ae7404c5edef0f6\nupdating... 55f650f13ae7404c5edef0f7\nupdating... 55f650f13ae7404c5edef0f8\nupdating... 55f650f13ae7404c5edef0f9\nupdating... 55f650f13ae7404c5edef0fa\nupdating... 55f650f13ae7404c5edef0fc\nupdating... 55f650f13ae7404c5edef0ff\nupdating... 55f650f13ae7404c5edef100\nupdating... 55f650f13ae7404c5edef103\nupdating... 55f650f13ae7404c5edef104\nupdating... 55f650f13ae7404c5edef105\nupdating... 55f650f13ae7404c5edef107\nupdating... 55f650f13ae7404c5edef108\nupdating... 55f650f13ae7404c5edef109\nupdating... 55f650f13ae7404c5edef10c\nupdating... 55f650f13ae7404c5edef10e\nupdating... 55f650f13ae7404c5edef10f\nupdating... 55f650f13ae7404c5edef111\nupdating... 55f650f13ae7404c5edef112\nupdating... 55f650f13ae7404c5edef114\nupdating... 55f650f13ae7404c5edef115\nupdating... 55f650f13ae7404c5edef116\nupdating... 55f650f13ae7404c5edef117\nupdating... 55f650f13ae7404c5edef118\nupdating... 55f650f13ae7404c5edef119\nupdating... 55f650f13ae7404c5edef11a\nupdating... 55f650f13ae7404c5edef11b\nupdating... 55f650f13ae7404c5edef11c\nupdating... 55f650f13ae7404c5edef11d\nupdating... 55f650f13ae7404c5edef11e\nupdating... 55f650f13ae7404c5edef11f\nupdating... 55f650f13ae7404c5edef120\nupdating... 55f650f13ae7404c5edef121\nupdating... 55f650f13ae7404c5edef122\nupdating... 55f650f13ae7404c5edef123\nupdating... 55f650f13ae7404c5edef124\nupdating... 55f650f13ae7404c5edef125\nupdating... 55f650f13ae7404c5edef126\nupdating... 55f650f13ae7404c5edef127\nupdating... 55f650f13ae7404c5edef128\nupdating... 55f650f13ae7404c5edef129\nupdating... 55f650f13ae7404c5edef12a\nupdating... 55f650f13ae7404c5edef12b\nupdating... 55f650f13ae7404c5edef12c\nupdating... 55f650f13ae7404c5edef12d\nupdating... 55f650f13ae7404c5edef12e\nupdating... 55f650f13ae7404c5edef12f\nupdating... 55f650f13ae7404c5edef130\nupdating... 55f650f13ae7404c5edef131\nupdating... 55f650f13ae7404c5edef132\nupdating... 55f650f13ae7404c5edef133\nupdating... 55f650f13ae7404c5edef134\nupdating... 55f650f13ae7404c5edef135\nupdating... 55f650f13ae7404c5edef136\nupdating... 55f650f13ae7404c5edef137\nupdating... 55f650f13ae7404c5edef138\nupdating... 55f650f13ae7404c5edef139\nupdating... 55f650f13ae7404c5edef13a\nupdating... 55f650f13ae7404c5edef13b\nupdating... 55f650f13ae7404c5edef13c\nupdating... 55f650f13ae7404c5edef13d\nupdating... 55f650f13ae7404c5edef13e\nupdating... 55f650f13ae7404c5edef13f\nupdating... 55f650f13ae7404c5edef140\nupdating... 55f650f13ae7404c5edef141\nupdating... 55f650f13ae7404c5edef142\nupdating... 55f650f13ae7404c5edef143\nupdating... 55f650f13ae7404c5edef144\nupdating... 55f650f13ae7404c5edef145\nupdating... 55f650f13ae7404c5edef146\nupdating... 55f650f13ae7404c5edef147\nupdating... 55f650f13ae7404c5edef148\nupdating... 55f650f13ae7404c5edef149\nupdating... 55f650f13ae7404c5edef14a\nupdating... 55f650f13ae7404c5edef14b\nupdating... 55f650f13ae7404c5edef14c\nupdating... 55f650f13ae7404c5edef14d\nupdating... 55f650f13ae7404c5edef14e\nupdating... 55f650f13ae7404c5edef14f\nupdating... 55f650f13ae7404c5edef150\nupdating... 55f650f13ae7404c5edef151\nupdating... 55f650f13ae7404c5edef152\nupdating... 55f650f13ae7404c5edef153\nupdating... 55f650f13ae7404c5edef154\nupdating... 55f650f13ae7404c5edef155\nupdating... 55f650f13ae7404c5edef156\nupdating... 55f650f13ae7404c5edef157\nupdating... 55f650f13ae7404c5edef158\nupdating... 55f650f13ae7404c5edef159\nupdating... 55f650f13ae7404c5edef15a\nupdating... 55f650f13ae7404c5edef15b\nupdating... 55f650f13ae7404c5edef15c\nupdating... 55f650f13ae7404c5edef15d\nupdating... 55f650f13ae7404c5edef15e\nupdating... 55f650f13ae7404c5edef15f\nupdating... 55f650f13ae7404c5edef160\nupdating... 55f650f13ae7404c5edef161\nupdating... 55f650f13ae7404c5edef162\nupdating... 55f650f13ae7404c5edef163\nupdating... 55f650f13ae7404c5edef164\nupdating... 55f650f13ae7404c5edef165\nupdating... 55f650f13ae7404c5edef166\nupdating... 55f650f13ae7404c5edef167\nupdating... 55f650f13ae7404c5edef168\nupdating... 55f650f13ae7404c5edef169\nupdating... 55f650f13ae7404c5edef16a\nupdating... 55f650f13ae7404c5edef16b\nupdating... 55f650f13ae7404c5edef16c\nupdating... 55f650f13ae7404c5edef16d\nupdating... 55f650f13ae7404c5edef16e\nupdating... 55f650f13ae7404c5edef16f\nupdating... 55f650f13ae7404c5edef170\nupdating... 55f650f13ae7404c5edef171\nupdating... 55f650f13ae7404c5edef172\nupdating... 55f650f13ae7404c5edef173\nupdating... 55f650f13ae7404c5edef174\nupdating... 55f650f13ae7404c5edef175\nupdating... 55f650f13ae7404c5edef176\nupdating... 55f650f13ae7404c5edef177\nupdating... 55f650f13ae7404c5edef178\nupdating... 55f650f13ae7404c5edef179\nupdating... 55f650f13ae7404c5edef17a\nupdating... 55f650f13ae7404c5edef17b\nupdating... 55f650f13ae7404c5edef17c\nupdating... 55f650f13ae7404c5edef17d\nupdating... 55f650f13ae7404c5edef17e\nupdating... 55f650f13ae7404c5edef17f\nupdating... 55f650f13ae7404c5edef180\nupdating... 55f650f13ae7404c5edef181\nupdating... 55f650f13ae7404c5edef182\nupdating... 55f650f13ae7404c5edef183\nupdating... 55f650f13ae7404c5edef184\nupdating... 55f650f13ae7404c5edef185\nupdating... 55f650f13ae7404c5edef186\nupdating... 55f650f13ae7404c5edef187\nupdating... 55f650f13ae7404c5edef188\nupdating... 55f650f13ae7404c5edef189\nupdating... 55f650f13ae7404c5edef18a\nupdating... 55f650f13ae7404c5edef18b\nupdating... 55f650f13ae7404c5edef18c\nupdating... 55f650f13ae7404c5edef18d\nupdating... 55f650f13ae7404c5edef18e\nupdating... 55f650f13ae7404c5edef18f\nupdating... 55f650f13ae7404c5edef190\nupdating... 55f650f13ae7404c5edef191\nupdating... 55f650f13ae7404c5edef192\nupdating... 55f650f13ae7404c5edef193\nupdating... 55f650f13ae7404c5edef194\nupdating... 55f650f13ae7404c5edef195\nupdating... 55f650f13ae7404c5edef196\nupdating... 55f650f13ae7404c5edef197\nupdating... 55f650f13ae7404c5edef198\nupdating... 55f650f13ae7404c5edef199\nupdating... 55f650f13ae7404c5edef19a\nupdating... 55f650f13ae7404c5edef19b\nupdating... 55f650f13ae7404c5edef19c\nupdating... 55f650f13ae7404c5edef19d\nupdating... 55f650f13ae7404c5edef19e\nupdating... 55f650f13ae7404c5edef19f\nupdating... 55f650f13ae7404c5edef1a0\nupdating... 55f650f13ae7404c5edef1a1\nupdating... 55f650f13ae7404c5edef1a2\nupdating... 55f650f13ae7404c5edef1a3\nupdating... 55f650f13ae7404c5edef1a4\nupdating... 55f650f13ae7404c5edef1a5\nupdating... 55f650f13ae7404c5edef1a6\nupdating... 55f650f13ae7404c5edef1a7\nupdating... 55f650f13ae7404c5edef1a8\nupdating... 55f650f13ae7404c5edef1a9\nupdating... 55f650f13ae7404c5edef1aa\nupdating... 55f650f13ae7404c5edef1ab\nupdating... 55f650f13ae7404c5edef1ac\nupdating... 55f650f13ae7404c5edef1ad\nupdating... 55f650f13ae7404c5edef1ae\nupdating... 55f650f13ae7404c5edef1af\nupdating... 55f650f13ae7404c5edef1b0\nupdating... 55f650f13ae7404c5edef1b1\nupdating... 55f650f13ae7404c5edef1b2\nupdating... 55f650f13ae7404c5edef1b3\nupdating... 55f650f13ae7404c5edef1b4\nupdating... 55f650f13ae7404c5edef1b5\nupdating... 55f650f13ae7404c5edef1b6\nupdating... 55f650f13ae7404c5edef1b7\nupdating... 55f650f13ae7404c5edef1b8\nupdating... 55f650f13ae7404c5edef1b9\nupdating... 55f650f13ae7404c5edef1ba\nupdating... 55f650f13ae7404c5edef1bb\nupdating... 55f650f13ae7404c5edef1bc\nupdating... 55f650f13ae7404c5edef1bd\nupdating... 55f650f13ae7404c5edef1be\nupdating... 55f650f13ae7404c5edef1bf\nupdating... 55f650f13ae7404c5edef1c0\nupdating... 55f650f13ae7404c5edef1c1\nupdating... 55f650f13ae7404c5edef1c2\nupdating... 55f650f13ae7404c5edef1c3\nupdating... 55f650f13ae7404c5edef1c4\nupdating... 55f650f13ae7404c5edef1c5\nupdating... 55f650f13ae7404c5edef1c6\nupdating... 55f650f13ae7404c5edef1c7\nupdating... 55f650f13ae7404c5edef1c8\nupdating... 55f650f13ae7404c5edef1c9\nupdating... 55f650f13ae7404c5edef1ca\nupdating... 55f650f13ae7404c5edef1cb\nupdating... 55f650f13ae7404c5edef1cc\nupdating... 55f650f13ae7404c5edef1cd\nupdating... 55f650f13ae7404c5edef1ce\nupdating... 55f650f13ae7404c5edef1cf\nupdating... 55f650f13ae7404c5edef1d0\nupdating... 55f650f13ae7404c5edef1d1\nupdating... 55f650f13ae7404c5edef1d2\nupdating... 55f650f13ae7404c5edef1d3\nupdating... 55f650f13ae7404c5edef1d4\nupdating... 55f650f13ae7404c5edef1d5\nupdating... 55f650f13ae7404c5edef1d6\nupdating... 55f650f13ae7404c5edef1d7\nupdating... 55f650f13ae7404c5edef1d8\nupdating... 55f650f13ae7404c5edef1d9\nupdating... 55f650f13ae7404c5edef1da\nupdating... 55f650f13ae7404c5edef1db\nupdating... 55f650f13ae7404c5edef1dc\nupdating... 55f650f13ae7404c5edef1dd\nupdating... 55f650f13ae7404c5edef1de\nupdating... 55f650f13ae7404c5edef1df\nupdating... 55f650f13ae7404c5edef1e0\nupdating... 55f650f13ae7404c5edef1e1\nupdating... 55f650f13ae7404c5edef1e2\nupdating... 55f650f13ae7404c5edef1e3\nupdating... 55f650f13ae7404c5edef1e4\nupdating... 55f650f13ae7404c5edef1e5\nupdating... 55f6f5e03ae7404f74f89830\nupdating... 55f6f5e03ae7404f74f89831\nupdating... 55f6f5e03ae7404f74f89832\nupdating... 55f6f5e03ae7404f74f89833\nupdating... 55f6f5e03ae7404f74f89834\nupdating... 55f6f5e03ae7404f74f89835\nupdating... 55f6f5e03ae7404f74f89836\nupdating... 55f6f5e03ae7404f74f89837\nupdating... 55f6f5e03ae7404f74f89838\nupdating... 55f6f5e03ae7404f74f89839\nupdating... 55f6f5e03ae7404f74f8983a\nupdating... 55f6f5e03ae7404f74f8983b\nupdating... 55f6f5e03ae7404f74f8983c\nupdating... 55f6f5e03ae7404f74f8983d\nupdating... 55f6f5e03ae7404f74f8983e\nupdating... 55f6f5e03ae7404f74f8983f\nupdating... 55f6f5e03ae7404f74f89840\nupdating... 55f6f5e03ae7404f74f89841\nupdating... 55f6f5e03ae7404f74f89842\nupdating... 55f6f5e03ae7404f74f89843\nupdating... 55f6f5e03ae7404f74f89844\nupdating... 55f6f5e03ae7404f74f89845\nupdating... 55f6f5e03ae7404f74f89846\nupdating... 55f6f5e03ae7404f74f89847\nupdating... 55f6f5e03ae7404f74f89848\nupdating... 55f6f5e03ae7404f74f89849\nupdating... 55f6f5e03ae7404f74f8984a\nupdating... 55f6f5e03ae7404f74f8984b\nupdating... 55f6f5e03ae7404f74f8984c\nupdating... 55f6f5e03ae7404f74f8984d\nupdating... 55f6f5e03ae7404f74f8984e\nupdating... 55f6f5e03ae7404f74f8984f\nupdating... 55f6f5e03ae7404f74f89850\nupdating... 55f6f5e03ae7404f74f89851\nupdating... 55f6f5e03ae7404f74f89852\nupdating... 55f6f5e03ae7404f74f89853\nupdating... 55f6f5e03ae7404f74f89854\nupdating... 55f6f5e03ae7404f74f89855\nupdating... 55f6f5e03ae7404f74f89856\nupdating... 55f6f5e03ae7404f74f89857\nupdating... 55f6f5e03ae7404f74f89858\nupdating... 55f6f5e03ae7404f74f89859\nupdating... 55f6f5e03ae7404f74f8985a\nupdating... 55f6f5e03ae7404f74f8985b\nupdating... 55f6f5e03ae7404f74f8985c\nupdating... 55f6f5e03ae7404f74f8985d\nupdating... 55f6f5e03ae7404f74f8985e\nupdating... 55f6f5e03ae7404f74f8985f\nupdating... 55f6f5e03ae7404f74f89860\nupdating... 55f6f5e03ae7404f74f89861\nupdating... 55f6f5e03ae7404f74f89862\nupdating... 55f6f5e03ae7404f74f89863\nupdating... 55f6f5e03ae7404f74f89864\nupdating... 55f6f5e03ae7404f74f89865\nupdating... 55f6f5e03ae7404f74f89866\nupdating... 55f6f5e03ae7404f74f89867\nupdating... 55f6f5e03ae7404f74f89868\nupdating... 55f6f5e03ae7404f74f89869\nupdating... 55f6f5e03ae7404f74f8986a\nupdating... 55f6f5e03ae7404f74f8986b\nupdating... 55f6f5e03ae7404f74f8986c\nupdating... 55f6f5e03ae7404f74f8986d\nupdating... 55f6f5e03ae7404f74f8986e\nupdating... 55f6f5e03ae7404f74f8986f\nupdating... 55f6f5e03ae7404f74f89870\nupdating... 55f6f5e03ae7404f74f89871\nupdating... 55f6f5e03ae7404f74f89872\nupdating... 55f6f5e03ae7404f74f89873\nupdating... 55f6f5e03ae7404f74f89874\nupdating... 55f6f5e03ae7404f74f89875\nupdating... 55f6f5e03ae7404f74f89876\nupdating... 55f6f5e03ae7404f74f89877\nupdating... 55f6f5e03ae7404f74f89878\nupdating... 55f6f5e03ae7404f74f89879\nupdating... 55f6f5e03ae7404f74f8987a\nupdating... 55f6f5e03ae7404f74f8987b\nupdating... 55f6f5e03ae7404f74f8987c\nupdating... 55f6f5e03ae7404f74f8987d\nupdating... 55f6f5e03ae7404f74f8987e\nupdating... 55f6f5e03ae7404f74f8987f\nupdating... 55f6f5e03ae7404f74f89880\nupdating... 55f6f5e03ae7404f74f89881\nupdating... 55f6f5e03ae7404f74f89882\nupdating... 55f6f5e03ae7404f74f89883\nupdating... 55f6f5e03ae7404f74f89884\nupdating... 55f6f5e03ae7404f74f89885\nupdating... 55f6f5e03ae7404f74f89886\nupdating... 55f6f5e03ae7404f74f89887\nupdating... 55f6f5e03ae7404f74f89888\nupdating... 55f6f5e03ae7404f74f89889\nupdating... 55f6f5e03ae7404f74f8988a\nupdating... 55f6f5e03ae7404f74f8988b\nupdating... 55f6f5e03ae7404f74f8988c\nupdating... 55f6f5e03ae7404f74f8988d\nupdating... 55f6f5e03ae7404f74f8988e\nupdating... 55f6f5e03ae7404f74f8988f\nupdating... 55f6f5e03ae7404f74f89890\nupdating... 55f6f5e03ae7404f74f89891\nupdating... 55f6f5e03ae7404f74f89892\nupdating... 55f6f5e03ae7404f74f89893\nupdating... 55f6f5e03ae7404f74f89894\nupdating... 55f6f5e03ae7404f74f89895\nupdating... 55f6f5e03ae7404f74f89896\nupdating... 55f6f5e03ae7404f74f89897\nupdating... 55f6f5e03ae7404f74f89898\nupdating... 55f6f5e03ae7404f74f89899\nupdating... 55f6f5e03ae7404f74f8989a\nupdating... 55f6f5e03ae7404f74f8989b\nupdating... 55f6f5e03ae7404f74f8989c\nupdating... 55f6f5e03ae7404f74f8989d\nupdating... 55f6f5e03ae7404f74f8989e\nupdating... 55f6f5e03ae7404f74f8989f\nupdating... 55f6f5e03ae7404f74f898a0\nupdating... 55f6f5e03ae7404f74f898a1\nupdating... 55f6f5e03ae7404f74f898a2\nupdating... 55f6f5e03ae7404f74f898a3\nupdating... 55f6f5e03ae7404f74f898a4\nupdating... 55f6f5e03ae7404f74f898a5\nupdating... 55f6f5e03ae7404f74f898a6\nupdating... 55f6f5e03ae7404f74f898a7\nupdating... 55f6f5e03ae7404f74f898a8\nupdating... 55f6f5e03ae7404f74f898a9\nupdating... 55f6f5e03ae7404f74f898aa\nupdating... 55f6f5e03ae7404f74f898ab\nupdating... 55f6f5e03ae7404f74f898ac\nupdating... 55f6f5e03ae7404f74f898ad\nupdating... 55f6f5e03ae7404f74f898ae\nupdating... 55f6f5e03ae7404f74f898af\nupdating... 55f6f5e03ae7404f74f898b0\nupdating... 55f6f5e03ae7404f74f898b1\nupdating... 55f6f5e03ae7404f74f898b2\nupdating... 55f6f5e03ae7404f74f898b3\nupdating... 55f6f5e03ae7404f74f898b4\nupdating... 55f6f5e03ae7404f74f898b5\nupdating... 55f6f5e03ae7404f74f898b6\nupdating... 55f6f5e03ae7404f74f898b7\nupdating... 55f6f5e03ae7404f74f898b8\nupdating... 55f6f5e03ae7404f74f898b9\nupdating... 55f6f5e03ae7404f74f898ba\nupdating... 55f6f5e03ae7404f74f898bb\nupdating... 55f6f5e03ae7404f74f898bc\nupdating... 55f6f5e03ae7404f74f898bd\nupdating... 55f6f5e03ae7404f74f898be\nupdating... 55f6f5e03ae7404f74f898bf\nupdating... 55f6f5e03ae7404f74f898c0\nupdating... 55f6f5e03ae7404f74f898c1\nupdating... 55f6f5e03ae7404f74f898c2\nupdating... 55f6f5e03ae7404f74f898c3\nupdating... 55f6f5e03ae7404f74f898c4\nupdating... 55f6f5e03ae7404f74f898c5\nupdating... 55f6f5e03ae7404f74f898c6\nupdating... 55f6f5e03ae7404f74f898c7\nupdating... 55f6f5e03ae7404f74f898c8\nupdating... 55f6f5e03ae7404f74f898c9\nupdating... 55f6f5e03ae7404f74f898ca\nupdating... 55f6f5e03ae7404f74f898cb\nupdating... 55f6f5e03ae7404f74f898cc\nupdating... 55f6f5e03ae7404f74f898cd\nupdating... 55f6f5e03ae7404f74f898ce\nupdating... 55f6f5e03ae7404f74f898cf\nupdating... 55f6f5e03ae7404f74f898d0\nupdating... 55f6f5e03ae7404f74f898d1\nupdating... 55f6f5e03ae7404f74f898d2\nupdating... 55f6f5e03ae7404f74f898d3\nupdating... 55f6f5e03ae7404f74f898d4\nupdating... 55f6f5e03ae7404f74f898d5\nupdating... 55f6f5e03ae7404f74f898d6\nupdating... 55f6f5e03ae7404f74f898d7\nupdating... 55f6f5e03ae7404f74f898d8\nupdating... 55f6f5e03ae7404f74f898d9\nupdating... 55f6f5e03ae7404f74f898da\nupdating... 55f6f5e03ae7404f74f898db\nupdating... 55f6f5e03ae7404f74f898dc\nupdating... 55f6f5e03ae7404f74f898dd\nupdating... 55f6f5e03ae7404f74f898de\nupdating... 55f6f5e03ae7404f74f898df\nupdating... 55f6f5e03ae7404f74f898e0\nupdating... 55f6f5e03ae7404f74f898e1\nupdating... 55f6f5e03ae7404f74f898e2\nupdating... 55f6f5e03ae7404f74f898e3\nupdating... 55f6f5e03ae7404f74f898e4\nupdating... 55f6f5e03ae7404f74f898e5\nupdating... 55f6f5e03ae7404f74f898e6\nupdating... 55f6f5e03ae7404f74f898e7\nupdating... 55f6f5e03ae7404f74f898e8\nupdating... 55f6f5e03ae7404f74f898e9\nupdating... 55f6f5e03ae7404f74f898ea\nupdating... 55f6f5e03ae7404f74f898eb\nupdating... 55f6f5e03ae7404f74f898ec\nupdating... 55f6f5e03ae7404f74f898ed\nupdating... 55f6f5e03ae7404f74f898ee\nupdating... 55f6f5e03ae7404f74f898ef\nupdating... 55f6f5e03ae7404f74f898f0\nupdating... 55f6f5e03ae7404f74f898f1\nupdating... 55f6f5e03ae7404f74f898f2\nupdating... 55f6f5e03ae7404f74f898f3\nupdating... 55f6f5e03ae7404f74f898f4\nupdating... 55f6f5e03ae7404f74f898f5\nupdating... 55f6f5e03ae7404f74f898f6\nupdating... 55f6f5e03ae7404f74f898f7\nupdating... 55f6f5e03ae7404f74f898f8\nupdating... 55f6f5e03ae7404f74f898f9\nupdating... 55f6f5e03ae7404f74f898fa\nupdating... 55f6f5e03ae7404f74f898fb\nupdating... 55f6f5e03ae7404f74f898fc\nupdating... 55f6f5e03ae7404f74f898fd\nupdating... 55f6f5e03ae7404f74f898fe\nupdating... 55f6f5e03ae7404f74f898ff\nupdating... 55f6f5e03ae7404f74f89900\nupdating... 55f6f5e03ae7404f74f89901\nupdating... 55f6f5e03ae7404f74f89902\nupdating... 55f6f5e03ae7404f74f89903\nupdating... 55f6f5e03ae7404f74f89904\nupdating... 55f6f5e03ae7404f74f89905\nupdating... 55f6f5e03ae7404f74f89906\nupdating... 55f6f5e03ae7404f74f89907\nupdating... 55f6f5e03ae7404f74f89908\nupdating... 55f6f5e03ae7404f74f89909\nupdating... 55f6f5e03ae7404f74f8990a\nupdating... 55f6f5e03ae7404f74f8990b\nupdating... 55f6f5e03ae7404f74f8990c\nupdating... 55f6f5e03ae7404f74f8990d\nupdating... 55f6f5e03ae7404f74f8990e\nupdating... 55f6f5e03ae7404f74f8990f\nupdating... 55f6f5e03ae7404f74f89910\nupdating... 55f6f5e03ae7404f74f89911\nupdating... 55f6f5e03ae7404f74f89912\nupdating... 55f6f5e03ae7404f74f89913\nupdating... 55f6f5e03ae7404f74f89914\nupdating... 55f6f5e03ae7404f74f89915\nupdating... 55f6f5e03ae7404f74f89916\nupdating... 55f6f5e03ae7404f74f89917\nupdating... 55f6f5e03ae7404f74f89918\nupdating... 55f6f5e03ae7404f74f89919\nupdating... 55f6f5e03ae7404f74f8991a\nupdating... 55f6f5e03ae7404f74f8991b\nupdating... 55f6f5e03ae7404f74f8991c\nupdating... 55f6f5e03ae7404f74f8991d\nupdating... 55f6f5e03ae7404f74f8991e\nupdating... 55f6f5e03ae7404f74f8991f\nupdating... 55f6f5e03ae7404f74f89920\nupdating... 55f6f5e03ae7404f74f89921\nupdating... 55f6f5e03ae7404f74f89922\nupdating... 55f6f5e03ae7404f74f89923\nupdating... 55f6f5e03ae7404f74f89924\nupdating... 55f6f5e03ae7404f74f89925\nupdating... 55f6f5e03ae7404f74f89926\nupdating... 55f6f5e03ae7404f74f89927\nupdating... 55f6f5e03ae7404f74f89928\nupdating... 55f6f5e03ae7404f74f89929\nupdating... 55f6f5e03ae7404f74f8992a\nupdating... 55f6f5e03ae7404f74f8992b\nupdating... 55f6f5e03ae7404f74f8992c\nupdating... 55f6f5e03ae7404f74f8992d\nupdating... 55f6f5e03ae7404f74f8992e\nupdating... 55f6f5e03ae7404f74f8992f\nupdating... 55f6f5e03ae7404f74f89930\nupdating... 55f6f5e03ae7404f74f89931\nupdating... 55f6f5e03ae7404f74f89932\nupdating... 55f6f5e03ae7404f74f89933\nupdating... 55f6f5e03ae7404f74f89934\nupdating... 55f6f5e03ae7404f74f89935\nupdating... 55f6f5e03ae7404f74f89936\nupdating... 55f6f5e03ae7404f74f89937\nupdating... 55f6f5e03ae7404f74f89938\nupdating... 55f6f5e03ae7404f74f89939\nupdating... 55f6f5e03ae7404f74f8993a\nupdating... 55f6f5e03ae7404f74f8993b\nupdating... 55f6f5e03ae7404f74f8993c\nupdating... 55f6f5e03ae7404f74f8993d\nupdating... 55f6f5e03ae7404f74f8993e\nupdating... 55f6f5e03ae7404f74f8993f\nupdating... 55f6f5e03ae7404f74f89940\nupdating... 55f6f5e03ae7404f74f89941\nupdating... 55f6f5e03ae7404f74f89942\nupdating... 55f6f5e03ae7404f74f89943\nupdating... 55f6f5e03ae7404f74f89944\nupdating... 55f6f5e03ae7404f74f89945\nupdating... 55f6f5e03ae7404f74f89946\nupdating... 55f6f5e03ae7404f74f89947\nupdating... 55f6f5e03ae7404f74f89948\nupdating... 55f6f5e03ae7404f74f89949\nupdating... 55f6f5e03ae7404f74f8994a\nupdating... 55f6f5e03ae7404f74f8994b\nupdating... 55f6f5e03ae7404f74f8994c\nupdating... 55f6f5e03ae7404f74f8994d\nupdating... 55f6f5e03ae7404f74f8994e\nupdating... 55f6f5e03ae7404f74f8994f\nupdating... 55f6f5e03ae7404f74f89950\nupdating... 55f6f5e03ae7404f74f89951\nupdating... 55f6f5e03ae7404f74f89952\nupdating... 55f6f5e03ae7404f74f89953\nupdating... 55f6f5e03ae7404f74f89954\nupdating... 55f6f5e03ae7404f74f89955\nupdating... 55f6f5e03ae7404f74f89956\nupdating... 55f6f5e03ae7404f74f89957\nupdating... 55f6f5e03ae7404f74f89958\nupdating... 55f6f5e03ae7404f74f89959\nupdating... 55f6f5e03ae7404f74f8995a\nupdating... 55f6f5e03ae7404f74f8995b\nupdating... 55f6f5e03ae7404f74f8995c\nupdating... 55f6f5e03ae7404f74f8995d\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d39d7a595715cf80bfdaa2a807d67a92d89a71
49,515
ipynb
Jupyter Notebook
1_diagnosis/week1/lecture_exercise/AI4M_C1_W1_lecture_ex_02.ipynb
amitbcp/ai_for_medicine_specialisation
07bb0bf1c63ed3f3bef4c7c91bc7211dec820601
[ "Apache-2.0" ]
1
2020-07-25T04:56:55.000Z
2020-07-25T04:56:55.000Z
1_diagnosis/week1/lecture_exercise/AI4M_C1_W1_lecture_ex_02.ipynb
amitbcp/ai_for_medicine_specialisation
07bb0bf1c63ed3f3bef4c7c91bc7211dec820601
[ "Apache-2.0" ]
2
2020-06-15T04:42:00.000Z
2021-08-29T03:48:28.000Z
AI/AI_for_Medical_Diagnosis/week01/utf-8''AI4M_C1_W1_lecture_ex_02.ipynb
unimauro/Courses
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
[ "Apache-2.0" ]
null
null
null
48.783251
20,984
0.723983
[ [ [ "## AI for Medicine Course 1 Week 1 lecture exercises", "_____no_output_____" ], [ "<a name=\"counting-labels\"></a>\n# Counting labels\n\nAs you saw in the lecture videos, one way to avoid having class imbalance impact the loss function is to weight the losses differently. To choose the weights, you first need to calculate the class frequencies.\n\nFor this exercise, you'll just get the count of each label. Later on, you'll use the concepts practiced here to calculate frequencies in the assignment!", "_____no_output_____" ] ], [ [ "# Import the necessary packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "# Read csv file containing training datadata\ntrain_df = pd.read_csv(\"nih/train-small.csv\")", "_____no_output_____" ], [ "# Count up the number of instances of each class (drop non-class columns from the counts)\nclass_counts = train_df.sum().drop(['Image','PatientId'])\n", "_____no_output_____" ], [ "for column in class_counts.keys():\n print(f\"The class {column} has {train_df[column].sum()} samples\")", "The class Atelectasis has 106 samples\nThe class Cardiomegaly has 20 samples\nThe class Consolidation has 33 samples\nThe class Edema has 16 samples\nThe class Effusion has 128 samples\nThe class Emphysema has 13 samples\nThe class Fibrosis has 14 samples\nThe class Hernia has 2 samples\nThe class Infiltration has 175 samples\nThe class Mass has 45 samples\nThe class Nodule has 54 samples\nThe class Pleural_Thickening has 21 samples\nThe class Pneumonia has 10 samples\nThe class Pneumothorax has 38 samples\n" ], [ "# Plot up the distribution of counts\nsns.barplot(class_counts.values, class_counts.index, color='b')\nplt.title('Distribution of Classes for Training Dataset', fontsize=15)\nplt.xlabel('Number of Patients', fontsize=15)\nplt.ylabel('Diseases', fontsize=15)\nplt.show()", "_____no_output_____" ] ], [ [ "<a name=\"weighted-loss\"></a>\n# Weighted Loss function\n", "_____no_output_____" ], [ "Below is an example of calculating weighted loss. In the assignment, you will calculate a weighted loss function. This sample code will give you some intuition for what the weighted loss function is doing, and also help you practice some syntax you will use in the graded assignment.\n\nFor this example, you'll first define a hypothetical set of true labels and then a set of predictions.\n\nRun the next cell to create the 'ground truth' labels.", "_____no_output_____" ] ], [ [ "# Generate an array of 4 binary label values, 3 positive and 1 negative\ny_true = np.array(\n [[1],\n [1],\n [1],\n [0]])\nprint(f\"y_true: \\n{y_true}\")", "y_true: \n[[1]\n [1]\n [1]\n [0]]\n" ] ], [ [ "### Two models\nTo better understand the loss function, you will pretend that you have two models.\n- Model 1 always outputs a 0.9 for any example that it's given. \n- Model 2 always outputs a 0.1 for any example that it's given.", "_____no_output_____" ] ], [ [ "# Make model predictions that are always 0.9 for all examples\ny_pred_1 = 0.9 * np.ones(y_true.shape)\nprint(f\"y_pred_1: \\n{y_pred_1}\")\nprint()\ny_pred_2 = 0.1 * np.ones(y_true.shape)\nprint(f\"y_pred_2: \\n{y_pred_2}\")", "y_pred_1: \n[[0.9]\n [0.9]\n [0.9]\n [0.9]]\n\ny_pred_2: \n[[0.1]\n [0.1]\n [0.1]\n [0.1]]\n" ] ], [ [ "### Problems with the regular loss function\nThe learning goal here is to notice that with a regular loss function (not a weighted loss), the model that always outputs 0.9 has a smaller loss (performs better) than model 2.\n- This is because there is a class imbalance, where 3 out of the 4 labels are 1.\n- If the data were perfectly balanced, (two labels were 1, and two labels were 0), model 1 and model 2 would have the same loss. Each would get two examples correct and two examples incorrect.\n- However, since the data is not balanced, the regular loss function implies that model 1 is better than model 2.", "_____no_output_____" ], [ "### Notice the shortcomings of a regular non-weighted loss\n\nSee what loss you get from these two models (model 1 always predicts 0.9, and model 2 always predicts 0.1), see what the regular (unweighted) loss function is for each model.", "_____no_output_____" ] ], [ [ "loss_reg_1 = -1 * np.sum(y_true * np.log(y_pred_1)) + \\\n -1 * np.sum((1 - y_true) * np.log(1 - y_pred_1))\nprint(f\"loss_reg_1: {loss_reg_1:.4f}\")", "loss_reg_1: 2.6187\n" ], [ "loss_reg_2 = -1 * np.sum(y_true * np.log(y_pred_2)) + \\\n -1 * np.sum((1 - y_true) * np.log(1 - y_pred_2))\nprint(f\"loss_reg_2: {loss_reg_2:.4f}\")", "loss_reg_2: 7.0131\n" ], [ "print(f\"When the model 1 always predicts 0.9, the regular loss is {loss_reg_1:.4f}\")\nprint(f\"When the model 2 always predicts 0.1, the regular loss is {loss_reg_2:.4f}\")", "When the model 1 always predicts 0.9, the regular loss is 2.6187\nWhen the model 2 always predicts 0.1, the regular loss is 7.0131\n" ] ], [ [ "Notice that the loss function gives a greater loss when the predictions are always 0.1, because the data is imbalanced, and has three labels of `1` but only one label for `0`.\n\nGiven a class imbalance with more positive labels, the regular loss function implies that the model with the higher prediction of 0.9 performs better than the model with the lower prediction of 0.1", "_____no_output_____" ], [ "### How a weighted loss treats both models the same\nWith a weighted loss function, you will get the same weighted loss when the predictions are all 0.9 versus when the predictions are all 0.1. \n- Notice how a prediction of 0.9 is 0.1 away from the positive label of 1.\n- Also notice how a prediction of 0.1 is 0.1 away from the negative label of 0\n- So model 1 and 2 are \"symmetric\" along the midpoint of 0.5, if you plot them on a number line between 0 and 1.", "_____no_output_____" ], [ "### Weighted Loss Equation\nCalculate the loss for the zero-th label (column at index 0)\n\n- The loss is made up of two terms. To make it easier to read the code, you will calculate each of these terms separately. We are giving each of these two terms a name for explanatory purposes, but these are not officially called $loss_{pos}$ or $loss_{neg}$\n\n - $loss_{pos}$: we'll use this to refer to the loss where the actual label is positive (the positive examples).\n - $loss_{neg}$: we'll use this to refer to the loss where the actual label is negative (the negative examples). \n\n$$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$\n\n$$loss_{pos}^{(i)} = -1 \\times weight_{pos}^{(i)} \\times y^{(i)} \\times log(\\hat{y}^{(i)})$$\n\n$$loss_{neg}^{(i)} = -1 \\times weight_{neg}^{(i)} \\times (1- y^{(i)}) \\times log(1 - \\hat{y}^{(i)})$$", "_____no_output_____" ], [ "Since this sample dataset is small enough, you can calculate the positive weight to be used in the weighted loss function. To get the positive weight, count how many NEGATIVE labels are present, divided by the total number of examples.\n\nIn this case, there is one negative label, and four total examples.\n\nSimilarly, the negative weight is the fraction of positive labels.\n\nRun the next cell to define positive and negative weights.", "_____no_output_____" ] ], [ [ "# calculate the positive weight as the fraction of negative labels\nw_p = 1/4\n\n# calculate the negative weight as the fraction of positive labels\nw_n = 3/4\n\nprint(f\"positive weight w_p: {w_p}\")\nprint(f\"negative weight w_n {w_n}\")", "positive weight w_p: 0.25\nnegative weight w_n 0.75\n" ] ], [ [ "### Model 1 weighted loss\nRun the next two cells to calculate the two loss terms separately.\n\nHere, `loss_1_pos` and `loss_1_neg` are calculated using the `y_pred_1` predictions.", "_____no_output_____" ] ], [ [ "# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'\nloss_1_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_1 ))\nprint(f\"loss_1_pos: {loss_1_pos:.4f}\")", "loss_1_pos: 0.0790\n" ], [ "# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'\nloss_1_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_1 ))\nprint(f\"loss_1_neg: {loss_1_neg:.4f}\")", "loss_1_neg: 1.7269\n" ], [ "# Sum positive and negative losses to calculate total loss\nloss_1 = loss_1_pos + loss_1_neg\nprint(f\"loss_1: {loss_1:.4f}\")", "loss_1: 1.8060\n" ] ], [ [ "### Model 2 weighted loss\n\nNow do the same calculations for when the predictions are from `y_pred_2'. Calculate the two terms of the weighted loss function and add them together.", "_____no_output_____" ] ], [ [ "# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'\nloss_2_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_2))\nprint(f\"loss_2_pos: {loss_2_pos:.4f}\")", "loss_2_pos: 1.7269\n" ], [ "# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'\nloss_2_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_2))\nprint(f\"loss_2_neg: {loss_2_neg:.4f}\")", "loss_2_neg: 0.0790\n" ], [ "# Sum positive and negative losses to calculate total loss when the prediction is y_pred_2\nloss_2 = loss_2_pos + loss_2_neg\nprint(f\"loss_2: {loss_2:.4f}\")", "loss_2: 1.8060\n" ] ], [ [ "### Compare model 1 and model 2 weighted loss", "_____no_output_____" ] ], [ [ "print(f\"When the model always predicts 0.9, the total loss is {loss_1:.4f}\")\nprint(f\"When the model always predicts 0.1, the total loss is {loss_2:.4f}\")", "When the model always predicts 0.9, the total loss is 1.8060\nWhen the model always predicts 0.1, the total loss is 1.8060\n" ] ], [ [ "### What do you notice?\nSince you used a weighted loss, the calculated loss is the same whether the model always predicts 0.9 or always predicts 0.1. \n\nYou may have also noticed that when you calculate each term of the weighted loss separately, there is a bit of symmetry when comparing between the two sets of predictions.", "_____no_output_____" ] ], [ [ "print(f\"loss_1_pos: {loss_1_pos:.4f} \\t loss_1_neg: {loss_1_neg:.4f}\")\nprint()\nprint(f\"loss_2_pos: {loss_2_pos:.4f} \\t loss_2_neg: {loss_2_neg:.4f}\")", "loss_1_pos: 0.0790 \t loss_1_neg: 1.7269\n\nloss_2_pos: 1.7269 \t loss_2_neg: 0.0790\n" ] ], [ [ "Even though there is a class imbalance, where there are 3 positive labels but only one negative label, the weighted loss accounts for this by giving more weight to the negative label than to the positive label.", "_____no_output_____" ], [ "### Weighted Loss for more than one class\n\nIn this week's assignment, you will calculate the multi-class weighted loss (when there is more than one disease class that your model is learning to predict). Here, you can practice working with 2D numpy arrays, which will help you implement the multi-class weighted loss in the graded assignment.\n\nYou will work with a dataset that has two disease classes (two columns)", "_____no_output_____" ] ], [ [ "# View the labels (true values) that you will practice with\ny_true = np.array(\n [[1,0],\n [1,0],\n [1,0],\n [1,0],\n [0,1]\n ])\ny_true", "_____no_output_____" ] ], [ [ "### Choosing axis=0 or axis=1\nYou will use `numpy.sum` to count the number of times column `0` has the value 0. \nFirst, notice the difference when you set axis=0 versus axis=1", "_____no_output_____" ] ], [ [ "# See what happens when you set axis=0\nprint(f\"using axis = 0 {np.sum(y_true,axis=0)}\")\n\n# Compare this to what happens when you set axis=1\nprint(f\"using axis = 1 {np.sum(y_true,axis=1)}\")", "using axis = 0 [4 1]\nusing axis = 1 [1 1 1 1 1]\n" ] ], [ [ "Notice that if you choose `axis=0`, the sum is taken for each of the two columns. This is what you want to do in this case. If you set `axis=1`, the sum is taken for each row.", "_____no_output_____" ], [ "### Calculate the weights\nPreviously, you visually inspected the data to calculate the fraction of negative and positive labels. Here, you can do this programmatically.", "_____no_output_____" ] ], [ [ "# set the positive weights as the fraction of negative labels (0) for each class (each column)\nw_p = np.sum(y_true == 0,axis=0) / y_true.shape[0]\nw_p", "_____no_output_____" ], [ "# set the negative weights as the fraction of positive labels (1) for each class\nw_n = np.sum(y_true == 1, axis=0) / y_true.shape[0]\nw_n", "_____no_output_____" ] ], [ [ "In the assignment, you will train a model to try and make useful predictions. In order to make this example easier to follow, you will pretend that your model always predicts the same value for every example.", "_____no_output_____" ] ], [ [ "# Set model predictions where all predictions are the same\ny_pred = np.ones(y_true.shape)\ny_pred[:,0] = 0.3 * y_pred[:,0]\ny_pred[:,1] = 0.7 * y_pred[:,1]\ny_pred", "_____no_output_____" ] ], [ [ "As before, calculate the two terms that make up the loss function. Notice that you are working with more than one class (represented by columns). In this case, there are two classes.\n\nStart by calculating the loss for class `0`.\n\n$$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$\n\n$$loss_{pos}^{(i)} = -1 \\times weight_{pos}^{(i)} \\times y^{(i)} \\times log(\\hat{y}^{(i)})$$\n\n$$loss_{neg}^{(i)} = -1 \\times weight_{neg}^{(i)} \\times (1- y^{(i)}) \\times log(1 - \\hat{y}^{(i)})$$", "_____no_output_____" ], [ "View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the positive predictions.", "_____no_output_____" ] ], [ [ "# Print and view column zero of the weight\nprint(f\"w_p[0]: {w_p[0]}\")\nprint(f\"y_true[:,0]: {y_true[:,0]}\")\nprint(f\"y_pred[:,0]: {y_pred[:,0]}\")", "w_p[0]: 0.2\ny_true[:,0]: [1 1 1 1 0]\ny_pred[:,0]: [0.3 0.3 0.3 0.3 0.3]\n" ], [ "# calculate the loss from the positive predictions, for class 0\nloss_0_pos = -1 * np.sum(w_p[0] * \n y_true[:, 0] * \n np.log(y_pred[:, 0])\n )\nprint(f\"loss_0_pos: {loss_0_pos:.4f}\")", "loss_0_pos: 0.9632\n" ] ], [ [ "View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the negative predictions.", "_____no_output_____" ] ], [ [ "# Print and view column zero of the weight\nprint(f\"w_n[0]: {w_n[0]}\")\nprint(f\"y_true[:,0]: {y_true[:,0]}\")\nprint(f\"y_pred[:,0]: {y_pred[:,0]}\")", "w_n[0]: 0.8\ny_true[:,0]: [1 1 1 1 0]\ny_pred[:,0]: [0.3 0.3 0.3 0.3 0.3]\n" ], [ "# Calculate the loss from the negative predictions, for class 0\nloss_0_neg = -1 * np.sum( \n w_n[0] * \n (1 - y_true[:, 0]) * \n np.log(1 - y_pred[:, 0])\n )\nprint(f\"loss_0_neg: {loss_0_neg:.4f}\")", "loss_0_neg: 0.2853\n" ], [ "# add the two loss terms to get the total loss for class 0\nloss_0 = loss_0_neg + loss_0_pos\nprint(f\"loss_0: {loss_0:.4f}\")", "loss_0: 1.2485\n" ] ], [ [ "Now you are familiar with the array slicing that you would use when there are multiple disease classes stored in a two-dimensional array.\n\n#### Now it's your turn!\n* Can you calculate the loss for class (column) `1`? ", "_____no_output_____" ] ], [ [ "# calculate the loss from the positive predictions, for class 1\nloss_1_pos = None", "_____no_output_____" ] ], [ [ "Expected output\n```CPP\nloss_1_pos: 0.2853\n```", "_____no_output_____" ] ], [ [ "# Calculate the loss from the negative predictions, for class 1\nloss_1_neg = None", "_____no_output_____" ] ], [ [ "#### Expected output\n```CPP\nloss_1_neg: 0.9632\n```", "_____no_output_____" ] ], [ [ "# add the two loss terms to get the total loss for class 0\nloss_1 = None", "_____no_output_____" ] ], [ [ "#### Expected output\n```CPP\nloss_1: 1.2485\n```", "_____no_output_____" ], [ "### Note\nThe data for the two classes (two columns) as well as the predictions were chosen so that you end up getting the same weighted loss for both categories. \n - In general, you will expect to calculate different weighted loss values for each disease category, as the model predictions and data will differ from one category to another.", "_____no_output_____" ], [ "If you want some help, please click on the green \"Solution\" cell below to reveal the solution.", "_____no_output_____" ], [ "<details> \n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Solution</b></font>\n</summary>\n<p>\n<code>\n-- # calculate the loss from the positive predictions, for class 1\nloss_1_pos = -1 * np.sum(w_p[1] * \n y_true[:, 1] * \n np.log(y_pred[:, 1])\n )\nprint(f\"loss_1_pos: {loss_1_pos:.4f}\")\n \n-- # Calculate the loss from the negative predictions, for class 1\nloss_1_neg = -1 * np.sum( \n w_n[1] * \n (1 - y_true[:, 1]) * \n np.log(1 - y_pred[:, 1])\n )\nprint(f\"loss_1_neg: {loss_1_neg:.4f}\")\n\n-- # add the two loss terms to get the total loss for class 1\nloss_1 = loss_1_neg + loss_1_pos\nprint(f\"loss_1: {loss_1:.4f}\")\n </code>\n</p>\n", "_____no_output_____" ], [ "### How this practice relates to and differs from the upcoming graded assignment\n- In the assignment, you will generalize this to calculating the loss for any number of classes.\n- Also in the assignment, you will learn how to avoid taking the log of zero by adding a small number (more details will be explained in the assignment).\n- Note that in the lecture videos and in this lecture notebook, you are taking the **sum** of losses for all examples. In the assignment, you will take the **average (the mean)** for all examples.\n- Finally, in the assignment, you will work with \"tensors\" in TensorFlow, so you will use the TensorFlow equivalents of the numpy operations (keras.mean instead of numpy.mean).", "_____no_output_____" ], [ "#### That's all for this lab. You now have a couple more tools you'll need for this week's assignment!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
e7d3a70c0a6b23e3b15c33a05fa62e8e95a33b79
26,083
ipynb
Jupyter Notebook
lightgbm_cu_20201123.ipynb
AlvinAi96/serverless_prediction
4e4c8c29afef7af6d7cb903e5b0c63f99cc1d587
[ "MIT" ]
18
2020-12-08T15:35:31.000Z
2022-03-22T09:47:18.000Z
lightgbm_cu_20201123.ipynb
AlvinAi96/serverless_prediction
4e4c8c29afef7af6d7cb903e5b0c63f99cc1d587
[ "MIT" ]
1
2021-01-20T07:19:31.000Z
2021-01-20T09:59:59.000Z
lightgbm_cu_20201123.ipynb
AlvinAi96/serverless_prediction
4e4c8c29afef7af6d7cb903e5b0c63f99cc1d587
[ "MIT" ]
1
2021-09-12T06:39:18.000Z
2021-09-12T06:39:18.000Z
35.730137
179
0.388951
[ [ [ "# LightGBM Model\n作者:艾宏峰<br>\n创建时间:2020.11.15<br>", "_____no_output_____" ] ], [ [ "import gc\nimport pandas as pd\nimport lightgbm as lgb\nimport numpy as np\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom sklearn.model_selection import StratifiedKFold, TimeSeriesSplit\nfrom sklearn.metrics import accuracy_score\nimport copy\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "LGBMRegressor有下列参数可调整:\n- boosting_type(gbdt):提升方法,默认是gbdt。有四种:(1)gbdt:传统梯度提升决策树。(2)rf:随机森林。(3)dart:Dropouts meet Multiple Additive Regression Trees。(4)goss:Gradient-based One-Side Sampling。<br>\n- num_leaves(31):数的最大叶子数量。<br>\n- max_depth(-1):树的最大深度。<br>\n- learning_rate(0.1):学习率。<br>\n- n_estimators(100):提升树数量。<br>\n- subsample_for_bin(200000):构建bin时的样本数。<br>\n- objective:默认regression对于LGBMRegressor。<br>\n- class_weight(None):对多分类任务而言,这里不用管。<br>\n- min_split_gain(0):进一步拆分树的叶子节点的最小损失减少量。<br>\n- min_child_weight(1e-3):在子叶子上需要的最小实例权重之和。<br>\n- min_child_samples(20):在子叶子上需要的数据最小量。<br>\n- subsample(1):训练集下采样率。<br>\n- subsample_freq(0):下采样频率,<=0代表不允许。<br>\n- colsample_bytree(1):构造每棵树时列的子采样率。\n- reg_alpha(0):有关权重的L1正则化值。\n- reg_lambda(0):权重的L2正则化项值。\n- random_state(None):种子数。\n- important_type('split‘):要填写的功能重要性的类型feature_importances_。如果为“ split”,则结果包含该特征在模型中使用的次数。如果为“ gain”,则结果包含使用该功能的分割的总增益。", "_____no_output_____" ] ], [ [ "# LightGBM参数\nparams = {\n 'metric':'mse',\n 'objective':'regression',\n 'seed':2022,\n 'boosting_type':'gbdt', # 也可用其他的,一个个试着先,dart不支持early stopping\n 'early_stopping_rounds':10,\n 'subsample':0.8,\n 'feature_fraction':0.75,\n 'bagging_fraction': 0.75,\n 'reg_lambda': 10\n}\n\n\nverbose_flag = False # 是否展示模型训练验证详细信息\nfolds = 5 # 5折交叉验证\n\n# 由eda.ipynb得到含缺失值特征的队列\n# miss_qids = [297, 298, 20889, 21487, 21671, 21673, 81221, 82695, 82697, 82929, 83109, 83609]\nmiss_qids = []\n\n# 导入数据\ndata_path = r'/media/alvinai/Documents/serverless/data/'\n# 训练集非自变量特征:'QUEUE_ID', 'NEXT_5_CPU_USAGE', 'NEXT_5_LAUNCHING_JOB_NUMS'\n# 测试集非自变量特征:'ID', 'QUEUE_ID', 'NEXT_5_CPU_USAGE', 'NEXT_5_LAUNCHING_JOB_NUMS'\ndf_train = pd.read_csv(data_path + 'train_v30b1.csv')\ndf_test = pd.read_csv(data_path + 'test_v30b1.csv')\nsub_sample = pd.read_csv(data_path + 'submit_example.csv')\n\ndf_train.drop(['DOTTING_MINUTE_4','CPU_USAGE_3_std'], axis = 1, inplace = True)\ndf_test.drop(['DOTTING_MINUTE_4','CPU_USAGE_3_std'], axis = 1, inplace = True)\n\n# # 导入lightgbm_ljn.ipynb预测好的NEXT_5_LAUNCHING_JOB_NUMS结果\n# ljn_predictions = pd.read_csv(r'/media/alvinai/Documents/serverless/result/lgb_ljn_sub_20201108_2156.csv')", "_____no_output_____" ], [ "def cu_error(y, y_pred):\n '''根据官网提供对CPU_USAGE的误差测评公式进行打分'''\n return np.abs(y - y_pred) * 0.9", "_____no_output_____" ], [ "# def get_import_feats(X_train, Y_train, X_val, Y_val, import_feat_num, params):\n# model = lgb.LGBMRegressor(**params)\n# lgb_model = model.fit(X_train, \n# Y_train,\n# eval_names=['train', 'valid'],\n# eval_set=[(X_train, Y_train), (X_val, Y_val)],\n# verbose=0,\n# eval_metric=params['metric'],\n# early_stopping_rounds=params['early_stopping_rounds'])\n# import_feat_df = pd.DataFrame({\n# 'feature': list(X_train),\n# 'importance': lgb_model.feature_importances_,\n# }).sort_values(by='importance',ascending=False)\n# import_feats = list(import_feat_df['feature'].values)[:import_feat_num]\n# # print(import_feat_df['feature'].values)\n# # print(import_feats)\n# # print(Y_train[import_feats])\n# return X_train[import_feats], Y_train, X_val[import_feats], Y_val, import_feats", "_____no_output_____" ], [ "def run_lgb_qid(df_train, df_test, target, qid, params):\n '''针对给定预测目标和队列进行LGB训练、验证和评估\n 输入:\n 1. df_train (pd.DataFrame):训练集\n 2. df_test (pd.DataFrame):测试集\n 3. target (str) : 当前预测目标变量名\n 4. qid (int) : 当前针对的队列id\n 5. params (dict) : 模型参数字典\n 输出:\n 1. prediction (pd.DataFrame): 测试集预测结果\n 2. score (float) : 验证集MSE分数\n '''\n if qid not in miss_qids:\n # 正常队列:过滤不相干特征,得到训练模型的输入特征\n feature_names = list(\n filter(lambda x: x not in ['QUEUE_ID'] + [f'cpu_{i}' for i in range(1,6)], df_train.columns))\n else: \n # 对含缺失值特征的队列:过滤掉含缺失值的特征,得到训练模型的输入特征\n feature_names = list(\n filter(lambda x: x not in ['QUEUE_ID'] + [f'cpu_{i}' for i in range(1,6)] + [f for f in df_train.columns if f.startswith('DISK_USAGE')], df_train.columns))\n\n # 提取 QUEUE_ID 对应的数据集\n df_train = df_train[df_train['QUEUE_ID'] == qid]\n df_test = df_test[df_test['QUEUE_ID'] == qid]\n \n# # 打印当前训练信息\n# if verbose_flag == True:\n# print(f\"QUEUE_ID:{qid}, target:{target}, train样本量:{len(df_train)}, test样本量:{len(df_test)}\")\n \n # 构建模型\n model = lgb.LGBMRegressor(**params)\n \n prediction = df_test[['ID', 'QUEUE_ID']] # 用于存放不同折下预测结果的平均值\n prediction['pred_' + target] = 0 # 初始化\n scores = [] # 用于存放不同折下的预测分数\n pred_valid = np.zeros((len(df_train),)) # 初始化验证集预测结果\n \n kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=params['seed'])\n\n for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(df_train, df_train[target])):\n # 划分数据集\n X_train = df_train.iloc[trn_idx][feature_names]\n Y_train = df_train.iloc[trn_idx][target]\n X_val = df_train.iloc[val_idx][feature_names]\n Y_val = df_train.iloc[val_idx][target]\n \n# # 获取特征重要性\n# import_feat_num = 50\n# X_train, Y_train, X_val, Y_val, import_feats = get_import_feats(X_train, Y_train, X_val, Y_val, import_feat_num, params)\n# feature_names = import_feats\n \n # 训练模型\n lgb_model = model.fit(X_train, \n Y_train,\n eval_names=['train', 'valid'],\n eval_set=[(X_train, Y_train), (X_val, Y_val)],\n verbose=0,\n eval_metric=params['metric'],\n early_stopping_rounds=params['early_stopping_rounds'])\n \n \n \n # 预测划分后的测试集和验证集\n pred_test = lgb_model.predict(df_test[feature_names], num_iteration = lgb_model.best_iteration_)\n pred_valid[val_idx] = lgb_model.predict(X_val, num_iteration = lgb_model.best_iteration_)\n # 记录每次fold下的模型原始分数\n scores.append(lgb_model.best_score_['valid']['l2']) # 追加当前第k折下模型的最佳分数\n # 追加预测结果\n prediction['pred_' + target] += pred_test / kfold.n_splits\n# # 打印特征重要性\n# print(pd.DataFrame({\n# 'feature': list(X_train),\n# 'importance': lgb_model.feature_importances_,\n# }).sort_values(by='importance',ascending=False))\n # 删除冗余变量\n del lgb_model, pred_test, X_train, Y_train, X_val, Y_val\n gc.collect()\n \n # 计算测评分数\n formal_score = np.mean([cu_error(y_true, y_pred) for y_true, y_pred in zip(df_train[target].values.ravel(), pred_valid)])\n \n if verbose_flag == True:\n print(\"每折下的MSE分数:{}, 平均每折MSE分数:{:.4f}\".format([np.round(v,2) for v in scores], np.mean(scores)))\n print(\"-\"*60)\n return prediction, np.mean(scores), formal_score\n", "_____no_output_____" ], [ "predictions = list()\nscores = list()\nformal_scores = list()\n\nfor qid in tqdm(df_test['QUEUE_ID'].unique()): \n df = pd.DataFrame()\n for t in [f'cpu_{i}' for i in range(1,6)]:\n prediction, score, formal_score = run_lgb_qid(df_train, df_test, t, qid, params)\n if t == 'cpu_1':\n df = prediction.copy()\n else:\n df = pd.merge(df, prediction, on=['ID', 'QUEUE_ID'], how='left') \n scores.append(score)\n formal_scores.append(formal_score)\n\n predictions.append(df)", "100%|██████████| 23/23 [03:20<00:00, 8.83s/it]\n" ], [ "print('mean MSE score: ', np.mean(scores))\nprint('mean 测评 score:', np.mean(formal_scores))", "mean MSE score: 38.951149911096294\nmean 测评 score: 2.3944517282261444\n" ], [ "sub = pd.concat(predictions)\n\nsub = sub.sort_values(by='ID').reset_index(drop=True)\nsub.drop(['QUEUE_ID'], axis=1, inplace=True)\nsub.columns = ['ID'] + [f'CPU_USAGE_{i}' for i in range(1,6)]\n\n# 全置 0 都比训练出来的结果好\nfor col in [f'LAUNCHING_JOB_NUMS_{i}' for i in range(1,6)]:\n sub[col] = 0\n \nsub = sub[['ID',\n 'CPU_USAGE_1', 'LAUNCHING_JOB_NUMS_1', \n 'CPU_USAGE_2', 'LAUNCHING_JOB_NUMS_2', \n 'CPU_USAGE_3', 'LAUNCHING_JOB_NUMS_3', \n 'CPU_USAGE_4', 'LAUNCHING_JOB_NUMS_4', \n 'CPU_USAGE_5', 'LAUNCHING_JOB_NUMS_5']]\n\nprint(sub.shape)\nsub.head()", "(2996, 11)\n" ], [ "# 注意: 提交要求预测结果需为非负整数, 包括 ID 也需要是整数\nsub['ID'] = sub['ID'].astype(int)\n\nfor col in [i for i in sub.columns if i != 'ID']:\n sub[col] = sub[col].round()\n sub[col] = sub[col].apply(np.floor)\n sub[col] = sub[col].apply(lambda x: 0 if x<0 else x)\n sub[col] = sub[col].apply(lambda x: 100 if x>100 else x)\n sub[col] = sub[col].astype(int)\n \nsub.head(10)", "_____no_output_____" ], [ "# 保存最终结果\ncurrent_time = datetime.now()\ncurrent_time = current_time.strftime('%Y%m%d_%H%M')\nresult_name = 'lgb_cu_ljn_sub_' + current_time + '_seed2022.csv'\nsub.to_csv(r'/media/alvinai/Documents/serverless/result/' + result_name, index = False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d3b4a8399401edff080023a7b9e6ff3995acef
184,777
ipynb
Jupyter Notebook
week4/retraival_img_classification.ipynb
servidal/-M5-T7-Project
ce6c0594017a6b5cc3c3eff759a8741395a14a18
[ "Unlicense", "MIT" ]
null
null
null
week4/retraival_img_classification.ipynb
servidal/-M5-T7-Project
ce6c0594017a6b5cc3c3eff759a8741395a14a18
[ "Unlicense", "MIT" ]
null
null
null
week4/retraival_img_classification.ipynb
servidal/-M5-T7-Project
ce6c0594017a6b5cc3c3eff759a8741395a14a18
[ "Unlicense", "MIT" ]
null
null
null
338.419414
139,362
0.913831
[ [ [ "import os\nimport datetime\nfrom time import time\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n#from torchsummary import summary\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch.optim as optim\nimport cv2\nimport dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import neighbors, datasets", "_____no_output_____" ], [ "# Variables\nDATASET_DIR = \"../MIT_split/\"\nLABELS_DICT = {\"Opencountry\":0 , \"coast\":1, \"forest\":2, \"highway\":3, \"inside_city\":4, \"mountain\":5 , \"street\":6, \"tallbuilding\":7}\nMODEL_FNAME = 'model.h5'\nBATCH_SIZE = 16\nEPOCHS = 20\nINPUT_SIZE = 64\nEXPERIMENTS_PATH = 'experiments'\n", "_____no_output_____" ], [ "# check for CUDA availability\nif torch.cuda.is_available():\n print('CUDA is available, setting device to CUDA')\n# set device to CUDA for training\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "CUDA is available, setting device to CUDA\n" ], [ "##Instantiate Tensorboard Writer\n#Create log folders\ntrain_logdir = os.path.join(EXPERIMENTS_PATH, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"), 'train')\nval_logdir = os.path.join(EXPERIMENTS_PATH, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"), 'validation')\n#Create summary writer\ntrain_writer = SummaryWriter(log_dir=train_logdir)\nval_writer = SummaryWriter(log_dir=val_logdir)\n", "_____no_output_____" ], [ "kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}\ntrain_loader, test_loader = dataset.get_dataloaders(DATASET_DIR, INPUT_SIZE, BATCH_SIZE, kwargs)", "_____no_output_____" ], [ "len(train_loader)", "_____no_output_____" ], [ "# Set up the network and training parameters\n\nfrom losses import ContrastiveLoss\nimport torchvision.models as models\nmargin = 1.\n\nmodel = models.resnet18(pretrained=True)\nmodel.fc = nn.Linear(512, 8)\ncuda = torch.cuda.is_available()\nif torch.cuda.is_available():\n model.cuda()\ncriterion = nn.CrossEntropyLoss()\nlr = 1e-3\noptimizer = optim.Adam(model.parameters(), lr=lr)\nscheduler = optim.lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)\nlog_interval = 100", "_____no_output_____" ], [ "model.layer2[-2].conv1", "_____no_output_____" ], [ "from train import fit\nfit(train_loader, test_loader, model, criterion, optimizer, scheduler, EPOCHS, cuda, log_interval)\n", "/home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:134: UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n \"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\", UserWarning)\n" ], [ "%matplotlib inline\nfrom plot_emb import plot_embeddings, extract_embeddings\nimport numpy as np\ncuda = torch.cuda.is_available()\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nclasses = ['0', '1', '2', '3', '4', '5', '6', '7']\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n ]\n\ndef plot_embeddings(embeddings, targets, xlim=None, ylim=None):\n plt.figure(figsize=(10,10))\n for i in range(len(classes)):\n inds = np.where(targets==i)[0]\n plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=colors[i])\n if xlim:\n plt.xlim(xlim[0], xlim[1])\n if ylim:\n plt.ylim(ylim[0], ylim[1])\n plt.legend(classes)\n\ndef extract_embeddings(dataloader, model,layer_lvl=2):\n model_cut = nn.Sequential(*list(model.children())[:-layer_lvl])\n with torch.no_grad(): \n model.eval()\n embeddings = np.empty((0,2048))\n labels = np.zeros(len(dataloader.dataset))\n k = 0\n for images, target in dataloader:\n if cuda:\n images = images.cuda()\n #print(images.shape)\n #print(model(images.float()).shape)\n #print(model_cut(images.float()).data.cpu().numpy().shape)\n embeddings = np.vstack((embeddings,model_cut(images.float()).data.cpu().numpy().reshape(-1,2048)))\n #print(embeddings.shape)\n labels[k:k+len(images)] = target.numpy()\n k += len(images)\n return embeddings, labels\ndef fit_retrival(embeddings,labels,mode='knn'):\n if mode == 'knn':\n clf = neighbors.KNeighborsClassifier(n_neighbors=8)\n clf.fit(embeddings, labels)\n return clf\ndef predict_retrival(model,embeddings,labels):\n return model.predict(embeddings),model.score(embeddings,labels)\ndef plot_TSNE(embeddings):\n from sklearn.manifold import TSNE\n X_embedded = TSNE(n_components=2,\n init='random').fit_transform(embeddings)\n return X_embedded\n\ntrain_embeddings_otl, train_labels_otl = extract_embeddings(train_loader, model)\n\nTSNE_embedding = plot_TSNE(train_embeddings_otl)\nretrival_module = fit_retrival(train_embeddings_otl,train_labels_otl,mode='knn')\n\nplot_embeddings(TSNE_embedding, train_labels_otl)\n\n\ntest_embeddings_otl, test_labels_otl = extract_embeddings(test_loader, model)\nlabels,score = predict_retrival(retrival_module,test_embeddings_otl, test_labels_otl)\nprint('score obtained equal to {}'.format(score))", "score obtained equal to 0.9107806691449815\n" ], [ "def precision_recall_curve(X,Y,module):\n from sklearn.metrics import (precision_recall_curve,\n PrecisionRecallDisplay)\n from sklearn import preprocessing\n predictions = module.predict(X)\n y_score = module.predict_proba(X)\n precision = dict()\n clf = preprocessing.LabelBinarizer()\n clf.fit(Y)\n Y = clf.transform(Y)\n recall = dict()\n for i in range(8):\n precision[i], recall[i], _ = precision_recall_curve(Y[:, i],\n y_score[:, i])\n plt.plot(recall[i], precision[i], lw=2, label='class {}'.format(i))\n \n plt.xlabel(\"recall\")\n plt.ylabel(\"precision\")\n plt.legend(loc=\"best\")\n plt.title(\"precision vs. recall curve\")\n plt.show()", "_____no_output_____" ], [ "precision_recall_curve(test_embeddings_otl, test_labels_otl,retrival_module)", "_____no_output_____" ], [ "test_labels_otl", "_____no_output_____" ], [ "!python -m pip install scikit-learn --upgrade\n", "Requirement already satisfied: scikit-learn in /home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages (0.24.2)\nRequirement already satisfied: scipy>=0.19.1 in /home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages (from scikit-learn) (1.5.2)\nRequirement already satisfied: numpy>=1.13.3 in /home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages (from scikit-learn) (1.19.2)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages (from scikit-learn) (3.0.0)\nRequirement already satisfied: joblib>=0.11 in /home/marcelo/miniconda3/envs/T3-M1/lib/python3.6/site-packages (from scikit-learn) (1.1.0)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d3bda2e2556a3e615e48b0947805cd9600126c
468,534
ipynb
Jupyter Notebook
test_object_detection.ipynb
OpenSuze/mot_neural_solver
44a5c8270b238535fc0ca83cb5758d43757e2637
[ "MIT" ]
null
null
null
test_object_detection.ipynb
OpenSuze/mot_neural_solver
44a5c8270b238535fc0ca83cb5758d43757e2637
[ "MIT" ]
null
null
null
test_object_detection.ipynb
OpenSuze/mot_neural_solver
44a5c8270b238535fc0ca83cb5758d43757e2637
[ "MIT" ]
null
null
null
1,621.224913
460,148
0.956998
[ [ [ "# process image detect", "_____no_output_____" ] ], [ [ "import cv2\nimport numpy as np\nimport torch\nimport torchvision\nimport os.path as osp\nfrom mot_neural_solver.path_cfg import OUTPUT_PATH, DATA_PATH\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nimport matplotlib.pyplot as plt\n\nnum_classes = 2\n_config = {'dataset_dir': 'synthShrimps/test', 'train_params': {'num_epochs': 27, 'batch_size': 16, 'start_ckpt': 'trained_models/frcnn/mot20_frcnn_epoch_27-30mar21.pt.tar', 'save_only_last_ckpt': True}, 'optimizer_params': {'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0.0005}, 'seed': 620124203}\nmodel_path = osp.join(OUTPUT_PATH, _config['train_params']['start_ckpt'])\nmodel_path", "_____no_output_____" ], [ "img_path='/mnt/gpu_storage/hugo/mot_neural_solver/data/synthShrimps/test/SHRIMP_0009/img1/0001.jpg'\nimg = cv2.imread(img_path)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)/255\nimg.shape", "_____no_output_____" ], [ "imgT = torch.tensor(np.transpose(img, (2, 0, 1))).type(torch.cuda.FloatTensor) # channel first\nimgT.shape", "_____no_output_____" ], [ "imgT", "_____no_output_____" ], [ "def get_detection_model(num_classes):\n # load an instance segmentation model pre-trained on COCO\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n\n # get the number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n model.roi_heads.nms_thresh = 0.3\n\n return model\n\n\ndef plot(img, boxes):\n fig, ax = plt.subplots(1, dpi=96)\n\n img = img.mul(255).permute(1, 2, 0).byte().numpy()\n width, height, _ = img.shape\n \n ax.imshow(img, cmap='gray')\n fig.set_size_inches(width / 80, height / 80)\n\n for box in boxes:\n rect = plt.Rectangle(\n (box[0], box[1]),\n box[2] - box[0],\n box[3] - box[1],\n fill=False,\n linewidth=1.0)\n ax.add_patch(rect)\n\n plt.axis('off')\n plt.show()\n", "_____no_output_____" ], [ "device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n# get the model using our helper function\nmodel = get_detection_model(num_classes)\nmodel.to(device)\n\nmodel_state_dict = torch.load(model_path)\nmodel.load_state_dict(model_state_dict)", "_____no_output_____" ], [ "# put the model in evaluation mode\nmodel.eval()\nwith torch.no_grad():\n prediction = model([imgT.to(device)])[0]\n\nprint(\"prediction\")\nplot(imgT.cpu(), prediction['boxes'])\nprint(prediction)", "prediction\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d3c52b27125abc8892f3a5b4828f1b4e8d02eb
68,280
ipynb
Jupyter Notebook
Class ,Constractor,Inheritance,overriding,Exception.ipynb
Ks226/upgard_code
b3f0bb3bd4deb0687293ccb30881aed2721523d2
[ "MIT" ]
null
null
null
Class ,Constractor,Inheritance,overriding,Exception.ipynb
Ks226/upgard_code
b3f0bb3bd4deb0687293ccb30881aed2721523d2
[ "MIT" ]
null
null
null
Class ,Constractor,Inheritance,overriding,Exception.ipynb
Ks226/upgard_code
b3f0bb3bd4deb0687293ccb30881aed2721523d2
[ "MIT" ]
null
null
null
30.756757
1,383
0.484651
[ [ [ "class Point():\n pass\npoint1 = Point()\npoint2 = point()\nprint(point1)\nprint(point2)\n", "<__main__.Point object at 0x000001D7DA8A0A08>\n<__main__.point object at 0x000001D7DA8A0BC8>\n" ], [ "class point():\n pass\npoint1 = point()\npoint2 = point()\n\npoint1.x = 6\npoint2.x = 10\n\nprint(point1.x)\nprint(point2.x)", "6\n10\n" ], [ "class Point():\n def pointer(getx):\n return getx.x\npoint1 = Point()\npoint2 = Point()\n\npoint1.x = 6\npoint2.x = 10\n\nprint(point1.pointer())\nprint(point2.pointer())", "6\n10\n" ], [ "class Point:\n def __init__(self):\n self.x = 0\n self.y = 0\np = Point()\nq = Point()\nprint(\"there is not difference\")\n ", "there is not difference\n" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self):\n\n self.x = 0\n self.y = 0\n\np = Point() # Instantiate an object of type Point\nq = Point() # and make a second point\n\nprint(\"Nothing seems to have happened with the points\")\n", "Nothing seems to have happened with the points\n" ], [ "class Point:\n def __init__(self):\n self.x = 0\n self.y = 0\np = Point()\nq = Point()\n\nprint(p)\nprint(q)\n\nprint(p is q)", "<__main__.Point object at 0x000001FD260E3808>\n<__main__.Point object at 0x000001FD260E3848>\nFalse\n" ], [ "class singh:\n pass\nsingh1 = singh()\nsingh2 = singh()\nprint(singh1)\nprint(singh2)", "<__main__.singh object at 0x000001FD260E5408>\n<__main__.singh object at 0x000001FD260E5448>\n" ], [ "class NumberSet():\n def __init__(self , num1,num2):\n self.num1 = num1\n self.num2 = num2\n def getX(self):\n return self.num1 \nt = NumberSet(6,10)\nprint(t.getX())\n", "6\n" ], [ "class NumberSet():\n def __init__(self,num1,num2):\n self.num1 = num1\n self.num2 = num2\n def getX(self):\n return self.x\n def getY(self):\n return self.y\nt = NumberSet(10,20)\nprint(t.getX)\nprint(t.getY)", "<bound method NumberSet.getX of <__main__.NumberSet object at 0x0000022D861C6788>>\n<bound method NumberSet.getY of <__main__.NumberSet object at 0x0000022D861C6788>>\n" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n\np = Point(7,6)\nprint(p.distanceFromOrigin())\n", "9.219544457292887\n" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n\np = Point(7,6)\nprint(p.getX())\nprint(p.getY())\n", "7\n6\n" ], [ "name = [\"Kajal\",\"Tarun\",\"Ashutosh\",\"Renu\"]\nsurname = [\"singh\",\"Singh\",\"Singh\",\"Singh\"]\nage = [19,56,24,54]\ninformation = zip(name,surname,age)\nclass Name():\n def __init__(self,name,surname,age):\n self.name = name\n self.surname = surname \n self.age = age\n def __str__(self):\n return '{} , {} (pop: {})'.formate(self.name,self.surname,self.age)\nFull_name = []\nfor informations in information:\n name , surname,age = informations\n print(name,surname,age)", "Kajal singh 19\nTarun Singh 56\nAshutosh Singh 24\nRenu Singh 54\n" ], [ "import math\n\nclass Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\ndef distance(point1, point2):\n xdiff = point2.getX()-point1.getX()\n ydiff = point2.getY()-point1.getY()\n\n dist = math.sqrt(xdiff**2 + ydiff**2)\n return dist\n\np = Point(4,3)\nq = Point(0,0)\nprint(distance(p,q))\n", "5.0\n" ], [ "import math\n\nclass Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def distance(self, point2):\n xdiff = point2.getX()-self.getX()\n ydiff = point2.getY()-self.getY()\n\n dist = math.sqrt(xdiff**2 + ydiff**2)\n return dist\n\np = Point(4,3)\nq = Point(0,0)\nprint(p.distance(q))\n", "5.0\n" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def __str__(self):\n return \"x = {}, y = {}\".format(self.x, self.y)\n\np = Point(7,6)\nprint(p)\n", "_____no_output_____" ], [ "class Cereal():\n def __init__(self,name,brand,fiber):\n self.name = name\n self.brand = brand\n self.fiber = fiber\n \n def __str__(self):\n return '{} cereal is produced by {} and has {} grams of fiber in every serving!'.format(self.name,self.brand,self.fiber)\n \nc1 = Cereal(\"Corn Flakes\" ,\"Kellogg's\",2)\nc2 = Cereal(\"Honey Nut Cheerios\",\"General Mills\",3)\nprint(c1)\nprint(c2)", "_____no_output_____" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n\np = Point(7,6)\nprint(p)\n", "_____no_output_____" ], [ "class Pointer():\n def __init__(self,x,y):\n self.x = x\n self.y = y\n def getX(self):\n return self.x\n def getY(self):\n return self.y \n def __str__(self):\n return \"x = {} , y={}\".format(self.x,self.y)\n def __add__(self , otherwise):\n return (self.x + otherwise.x,self.y + otherwise.y)\ncls = Pointer(12,3)\nprint(cls)", "x = 12 , y=3\n" ], [ "class Point:\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def __str__(self):\n return \"x = {}, y = {}\".format(self.x, self.y)\n\n def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)\n\np = Point(3,4)\nq = Point(5,12)\nmid = p.halfway(q)\n# note that you would have exactly the same result if you instead wrote\n# mid = q.halfway(p)\n# because they are both Point objects, and the middle is the same no matter what\n\nprint(mid)\nprint(mid.getX())\nprint(mid.getY())\n", "x = 4.0, y = 8.0\n4.0\n8.0\n" ], [ "L = [\"Cherry\", \"Apple\", \"Blueberry\"]\nprint(sorted(L, key = len))\nprint(sorted(L,key = lambda k:len(k)))", "['Apple', 'Cherry', 'Blueberry']\n['Apple', 'Cherry', 'Blueberry']\n" ], [ "class Fruit():\n def __init__(self,name,price):\n self.name = name\n self.price = price\nL = [Fruit(\"Cherry\", 10), Fruit(\"Apple\", 5), Fruit(\"Blueberry\", 20)]\nfor i in sorted(L , key =lambda k:k.price):\n print(i.name)\n", "Apple\nCherry\nBlueberry\n" ], [ "class Fruit():\n def __init__(self,name,price):\n self.name = name\n self.price = price\n def getPrice(self):\n return self.price\nL = [Fruit(\"Cherry\", 10), Fruit(\"Apple\", 5), Fruit(\"Blueberry\", 20)]\nprint(\"-----sorted by price, referencing a class method-----\")\nfor i in sorted(L,key = Fruit.getPrice):\n print(i.name)\nprint(\"---- one more way to do the same thing-----\")\n\nfor j in sorted(L , key = lambda pric:pric.getPrice()):\n print(j.name)\n\n", "-----sorted by price, referencing a class method-----\nApple\nCherry\nBlueberry\n---- one more way to do the same thing-----\nApple\nCherry\nBlueberry\n" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n printed_rep = \"*\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def graph(self):\n rows = []\n size = max(int(self.x), int(self.y)) + 2\n for j in range(size-1) :\n if (j+1) == int(self.y):\n special_row = str((j+1) % 10) + (\" \"*(int(self.x) -1)) + self.printed_rep\n rows.append(special_row)\n else:\n rows.append(str((j+1) % 10))\n rows.reverse() # put higher values of y first\n x_axis = \"\"\n for i in range(size):\n x_axis += str(i % 10)\n rows.append(x_axis)\n\n return \"\\n\".join(rows)\n\n\np1 = Point(2, 3)\np2 = Point(3, 12)\nprint(p1.graph())\nprint()\nprint(p2.graph())\n", "4\n3 *\n2\n1\n01234\n\n3\n2 *\n1\n0\n9\n8\n7\n6\n5\n4\n3\n2\n1\n01234567890123\n" ], [ "from random import randrange\n\nclass Pet():\n boredom_decrement = 4\n hunger_decrement = 6\n boredom_threshold = 5\n hunger_threshold = 10\n sounds = ['Mrrp']\n def __init__(self, name = \"Kitty\"):\n self.name = name\n self.hunger = randrange(self.hunger_threshold)\n self.boredom = randrange(self.boredom_threshold)\n self.sounds = self.sounds[:] # copy the class attribute, so that when we make changes to it, we won't affect the other Pets in the class\n\n def clock_tick(self):\n self.boredom += 1\n self.hunger += 1\n\n def mood(self):\n if self.hunger <= self.hunger_threshold and self.boredom <= self.boredom_threshold:\n return \"happy\"\n elif self.hunger > self.hunger_threshold:\n return \"hungry\"\n else:\n return \"bored\"\n\n def __str__(self):\n state = \" I'm \" + self.name + \". \"\n state += \" I feel \" + self.mood() + \". \"\n # state += \"Hunger {} Boredom {} Words {}\".format(self.hunger, self.boredom, self.sounds)\n return state\n\n def hi(self):\n print(self.sounds[randrange(len(self.sounds))])\n self.reduce_boredom()\n\n def teach(self, word):\n self.sounds.append(word)\n self.reduce_boredom()\n\n def feed(self):\n self.reduce_hunger()\n\n def reduce_hunger(self):\n self.hunger = max(0, self.hunger - self.hunger_decrement)\n\n def reduce_boredom(self):\n self.boredom = max(0, self.boredom - self.boredom_decrement)\n\np1 = Pet(\"Fido\")\nprint(p1)\nfor i in range(10):\n p1.clock_tick()\n print(p1)\np1.feed()\np1.hi()\np1.teach(\"Boo\")\nfor i in range(10):\n p1.hi()\nprint(p1)\n", " I'm Fido. I feel happy. \n I'm Fido. I feel happy. \n I'm Fido. I feel happy. \n I'm Fido. I feel bored. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \n I'm Fido. I feel hungry. \nMrrp\nBoo\nMrrp\nMrrp\nMrrp\nMrrp\nBoo\nMrrp\nMrrp\nMrrp\nBoo\n I'm Fido. I feel hungry. \n" ], [ "import sys\nsys.setExecutionLimit(60000)\n\ndef whichone(petlist, name):\n for pet in petlist:\n if pet.name == name:\n return pet\n return None # no pet matched\n\ndef play():\n animals = []\n\n option = \"\"\n base_prompt = \"\"\"\n Quit\n Adopt <petname_with_no_spaces_please>\n Greet <petname>\n Teach <petname> <word>\n Feed <petname>\n\n Choice: \"\"\"\n feedback = \"\"\n while True:\n action = input(feedback + \"\\n\" + base_prompt)\n feedback = \"\"\n words = action.split()\n if len(words) > 0:\n command = words[0]\n else:\n command = None\n if command == \"Quit\":\n print(\"Exiting...\")\n return\n elif command == \"Adopt\" and len(words) > 1:\n if whichone(animals, words[1]):\n feedback += \"You already have a pet with that name\\n\"\n else:\n animals.append(Pet(words[1]))\n elif command == \"Greet\" and len(words) > 1:\n pet = whichone(animals, words[1])\n if not pet:\n feedback += \"I didn't recognize that pet name. Please try again.\\n\"\n print()\n else:\n pet.hi()\n elif command == \"Teach\" and len(words) > 2:\n pet = whichone(animals, words[1])\n if not pet:\n feedback += \"I didn't recognize that pet name. Please try again.\"\n else:\n pet.teach(words[2])\n elif command == \"Feed\" and len(words) > 1:\n pet = whichone(animals, words[1])\n if not pet:\n feedback += \"I didn't recognize that pet name. Please try again.\"\n else:\n pet.feed()\n else:\n feedback+= \"I didn't understand that. Please try again.\"\n\n for pet in animals:\n pet.clock_tick()\n feedback += \"\\n\" + pet.__str__()\n\n\n\nplay()\n", "_____no_output_____" ] ], [ [ "#Inheritance", "_____no_output_____" ] ], [ [ "from random import randrange\n\n# Here's the original Pet class\nclass Pet():\n boredom_decrement = 4\n hunger_decrement = 6\n boredom_threshold = 5\n hunger_threshold = 10\n sounds = ['Mrrp']\n def __init__(self, name = \"Kitty\"):\n self.name = name\n self.hunger = randrange(self.hunger_threshold)\n self.boredom = randrange(self.boredom_threshold)\n self.sounds = self.sounds[:] # copy the class attribute, so that when we make changes to it, we won't affect the other Pets in the class\n\n def clock_tick(self):\n self.boredom += 1\n self.hunger += 1\n\n def mood(self):\n if self.hunger <= self.hunger_threshold and self.boredom <= self.boredom_threshold:\n return \"happy\"\n elif self.hunger > self.hunger_threshold:\n return \"hungry\"\n else:\n return \"bored\"\n\n def __str__(self):\n state = \" I'm \" + self.name + \". \"\n state += \" I feel \" + self.mood() + \". \"\n # state += \"Hunger %d Boredom %d Words %s\" % (self.hunger, self.boredom, self.sounds)\n return state\n\n def hi(self):\n print(self.sounds[randrange(len(self.sounds))])\n self.reduce_boredom()\n\n def teach(self, word):\n self.sounds.append(word)\n self.reduce_boredom()\n\n def feed(self):\n self.reduce_hunger()\n\n def reduce_hunger(self):\n self.hunger = max(0, self.hunger - self.hunger_decrement)\n\n def reduce_boredom(self):\n self.boredom = max(0, self.boredom - self.boredom_decrement)\n\n# Here's the new definition of class Cat, a subclass of Pet.\nclass Cat(Pet): # the class name that the new class inherits from goes in the parentheses, like so.\n sounds = ['Meow']\n\n def chasing_rats(self):\n return \"What are you doing, Pinky? Taking over the world?!\"\n p1 = Pet(\"Fido\")\nprint(p1) # we've seen this stuff before!\n\np1.feed()\np1.hi()\nprint(p1)\n\ncat1 = Cat(\"Fluffy\")\nprint(cat1) # this uses the same __str__ method as the Pets do\n\ncat1.feed() # Totally fine, because the cat class inherits from the Pet class!\ncat1.hi()\nprint(cat1)\n\nprint(cat1.chasing_rats())\n\n#print(p1.chasing_rats()) # This line will give us an error. The Pet class doesn't have this method!\nclass Cheshire(Cat): # this inherits from Cat, which inherits from Pet\n\n def smile(self): # this method is specific to instances of Cheshire\n print(\":D :D :D\")\n\n# Let's try it with instances.\ncat1 = Cat(\"Fluffy\")\ncat1.feed() # Totally fine, because the cat class inherits from the Pet class!\ncat1.hi() # Uses the special Cat hello.\nprint(cat1)\n\nprint(cat1.chasing_rats())\n\nnew_cat = Cheshire(\"Pumpkin\") # create a Cheshire cat instance with name \"Pumpkin\"\nnew_cat.hi() # same as Cat!\nnew_cat.chasing_rats() # OK, because Cheshire inherits from Cat\nnew_cat.smile() # Only for Cheshire instances (and any classes that you make inherit from Cheshire)\n\n# cat1.smile() # This line would give you an error, because the Cat class does not have this method!\n\n# None of the subclass methods can be used on the parent class, though.\np1 = Pet(\"Teddy\")\np1.hi() # just the regular Pet hello\n#p1.chasing_rats() # This will give you an error -- this method doesn't exist on instances of the Pet class.\n#p1.smile() # This will give you an error, too. This method does not exist on instances of the Pet class.\n\n", " I'm Fido. I feel happy. \nBoo\n I'm Fido. I feel happy. \n I'm Fluffy. I feel happy. \nMeow\n I'm Fluffy. I feel happy. \nWhat are you doing, Pinky? Taking over the world?!\nMeow\n I'm Fluffy. I feel happy. \nWhat are you doing, Pinky? Taking over the world?!\nMeow\n:D :D :D\nMrrp\n" ], [ "CurrentYear = 2019\nclass Students():\n def __init__(self,name,year):\n self.name = name\n self.year = year\n def getYear(self):\n return CurrentYear - self.year\n def __str__(self):\n return \"{} ({})\".format(self.name , self.getYear())\nclass Details(Students):\n def __init__(self,name,year):\n Students.__init__(self,name,year)\n self.knowledge = 0\n def study(self):\n self.knowledge = + 1\n \nFinal = Details(\"kajal\",2)\nFinal.study()\nprint(Final.getYear())\nprint(Final.knowledge)\nprint(Final)", "2017\n1\nkajal (2017)\n" ], [ "class Book():\n def __init__(self , bookname , author):\n self.bookname = bookname\n self.author = author\n def BookPages(self):\n self.pages\n def __str__(self):\n return \"'{}' by {}\".format(self.bookname,self.author)\nclass EBook(Book):\n def __init__(self , bookname , author ,totalpages):\n Book.__init__(self,bookname,author)\n self.totalpages = totalpages\nclass Number_of_book(Book):\n def __init__(self,bookname,author,NumBook):\n Book.__init__(self,bookname,author)\n self.NumBook = NumBook\n \nclass Library:\n def __init__(self):\n self.book = []\n def addBook(self,books):\n self.book.append(books)\n def sizeBook(self):\n return len(self.book)\n def __str__(self):\n return \"{} {}\".format(self.author , self.name , self.sizeBook)\n\naddL = Library()\naddL.addBook(Number)\naddL.addBook(NumTotal)\nprint(addL.sizeBook)\n \n \n \nNumber = Number_of_book(\"jungke Book\" ,\"Kishore Kumar\" , 2)\nNumTotal = EBook(\"jungle mumma\" , \"kajal singh\",500)\nprint(NumTotal.totalpages)\nprint(Number.NumBook)\n ", "<bound method Library.sizeBook of <__main__.Library object at 0x000001AD89588408>>\n500\n2\n" ], [ "from random import randrange\n\n# Here's the original Pet class\nclass Pet():\n boredom_decrement = 4\n hunger_decrement = 6\n boredom_threshold = 5\n hunger_threshold = 10\n sounds = ['Mrrp']\n def __init__(self, name = \"Kitty\"):\n self.name = name\n self.hunger = randrange(self.hunger_threshold)\n self.boredom = randrange(self.boredom_threshold)\n self.sounds = self.sounds[:] # copy the class attribute, so that when we make changes to it, we won't affect the other Pets in the class\n\n def clock_tick(self):\n self.boredom += 1\n self.hunger += 1\n\n def mood(self):\n if self.hunger <= self.hunger_threshold and self.boredom <= self.boredom_threshold:\n return \"happy\"\n elif self.hunger > self.hunger_threshold:\n return \"hungry\"\n else:\n return \"bored\"\n\n def __str__(self):\n state = \" I'm \" + self.name + \". \"\n state += \" I feel \" + self.mood() + \". \"\n # state += \"Hunger %d Boredom %d Words %s\" % (self.hunger, self.boredom, self.sounds)\n return state\n\n def hi(self):\n print(self.sounds[randrange(len(self.sounds))])\n self.reduce_boredom()\n\n def teach(self, word):\n self.sounds.append(word)\n self.reduce_boredom()\n\n def feed(self):\n self.reduce_hunger()\n\n def reduce_hunger(self):\n self.hunger = max(0, self.hunger - self.hunger_decrement)\n\n def reduce_boredom(self):\n self.boredom = max(0, self.boredom - self.boredom_decrement)\nclass Cat(Pet):\n sounds = ['Meow']\n\n def mood(self):\n if self.hunger > self.hunger_threshold:\n return \"hungry\"\n if self.boredom <2:\n return \"grumpy; leave me alone\"\n elif self.boredom > self.boredom_threshold:\n return \"bored\"\n elif randrange(2) == 0:\n return \"randomly annoyed\"\n else:\n return \"happy\"\n\nclass Dog(Pet):\n sounds = ['Woof', 'Ruff']\n\n def mood(self):\n if (self.hunger > self.hunger_threshold) and (self.boredom > self.boredom_threshold):\n return \"bored and hungry\"\n else:\n return \"happy\"\n\nc1 = Cat(\"Fluffy\")\nd1 = Dog(\"Astro\")\n\nc1.boredom = 1\nprint(c1.mood())\nc1.boredom = 3\nfor i in range(10):\n print(c1.mood())\nprint(d1.mood())\n", "grumpy; leave me alone\nrandomly annoyed\nrandomly annoyed\nhappy\nhappy\nhappy\nhappy\nhappy\nrandomly annoyed\nhappy\nrandomly annoyed\nhappy\n" ], [ "from random import randrange\n\n# Here's the original Pet class\nclass Pet():\n boredom_decrement = 4\n hunger_decrement = 6\n boredom_threshold = 5\n hunger_threshold = 10\n sounds = ['Mrrp']\n def __init__(self, name = \"Kitty\"):\n self.name = name\n self.hunger = randrange(self.hunger_threshold)\n self.boredom = randrange(self.boredom_threshold)\n self.sounds = self.sounds[:] # copy the class attribute, so that when we make changes to it, we won't affect the other Pets in the class\n\n def clock_tick(self):\n self.boredom += 1\n self.hunger += 1\n\n def mood(self):\n if self.hunger <= self.hunger_threshold and self.boredom <= self.boredom_threshold:\n return \"happy\"\n elif self.hunger > self.hunger_threshold:\n return \"hungry\"\n else:\n return \"bored\"\n\n def __str__(self):\n state = \" I'm \" + self.name + \". \"\n state += \" I feel \" + self.mood() + \". \"\n # state += \"Hunger %d Boredom %d Words %s\" % (self.hunger, self.boredom, self.sounds)\n return state\n\n def hi(self):\n print(self.sounds[randrange(len(self.sounds))])\n self.reduce_boredom()\n\n def teach(self, word):\n self.sounds.append(word)\n self.reduce_boredom()\n\n def feed(self):\n self.reduce_hunger()\n\n def reduce_hunger(self):\n self.hunger = max(0, self.hunger - self.hunger_decrement)\n\n def reduce_boredom(self):\n self.boredom = max(0, self.boredom - self.boredom_decrement)\n \nfrom random import randrange\n\nclass Dog(Pet):\n sounds = ['Woof', 'Ruff']\n\n def feed(self):\n Pet.feed(self)\n print(\"Arf! Thanks!\")\n\nd1 = Dog(\"Astro\")\n\nd1.feed()\n \n", "Arf! Thanks!\n" ], [ "class Bird(Pet):\n sounds = [\"chirp\"]\n def __init__(self, name=\"Kitty\", chirp_number=2):\n Pet.__init__(self, name) # call the parent class's constructor\n # basically, call the SUPER -- the parent version -- of the constructor, with all the parameters that it needs.\n self.chirp_number = chirp_number # now, also assign the new instance variable\n\n def hi(self):\n for i in range(self.chirp_number):\n print(self.sounds[randrange(len(self.sounds))])\n self.reduce_boredom()\n\nb1 = Bird('tweety', 5)\nb1.teach(\"Polly wanna cracker\")\nb1.hi()\n", "Polly wanna cracker\nchirp\nPolly wanna cracker\nchirp\nchirp\n" ], [ "\nclass Pokemon(object):\n attack = 12\n defense = 10\n health = 15\n p_type = \"Normal\"\n\n def __init__(self, name, level = 5):\n self.name = name\n self.level = level\n\n def train(self):\n self.update()\n self.attack_up()\n self.defense_up()\n self.health_up()\n self.level = self.level + 1\n if self.level%self.evolve == 0:\n return self.level, \"Evolved!\"\n else:\n return self.level\n\n def attack_up(self):\n self.attack = self.attack + self.attack_boost\n return self.attack\n\n def defense_up(self):\n self.defense = self.defense + self.defense_boost\n return self.defense\n\n def health_up(self):\n self.health = self.health + self.health_boost\n return self.health\n\n def update(self):\n self.health_boost = 5\n self.attack_boost = 3\n self.defense_boost = 2\n self.evolve = 10\n\n def __str__(self):\n return \"Pokemon name: {}, Type: {}, Level: {}\".format(self.name, self.p_type, self.level)\n\nclass Grass_Pokemon(Pokemon):\n attack = 15\n defense = 14\n health = 12\n p_type = \"Grass\"\n attack_boost=10\n def update(self):\n self.health_boost = 6\n self.attack_boost = 2\n self.defense_boost = 3\n self.evolve = 12\n\n def moves(self):\n self.p_moves = [\"razor leaf\", \"synthesis\", \"petal dance\"]\n\n\np2=Grass_Pokemon(\"Bulby\")\np3=Grass_Pokemon(\"Pika\")", "_____no_output_____" ], [ "class Pokemon(object):\n attack = 12\n defense = 10\n health = 15\n p_type = \"Normal\"\n\n def __init__(self, name, level = 5):\n self.name = name\n self.level = level\n\n def train(self):\n self.update()\n self.attack_up()\n self.defense_up()\n self.health_up()\n self.level = self.level + 1\n if self.level%self.evolve == 0:\n return self.level, \"Evolved!\"\n else:\n return self.level\n\n def attack_up(self):\n self.attack = self.attack + self.attack_boost\n return self.attack\n\n def defense_up(self):\n self.defense = self.defense + self.defense_boost\n return self.defense\n\n def health_up(self):\n self.health = self.health + self.health_boost\n return self.health\n\n def update(self):\n self.health_boost = 5\n self.attack_boost = 3\n self.defense_boost = 2\n self.evolve = 10\n\n def __str__(self):\n return \"Pokemon name: {}, Type: {}, Level: {}\".format(self.name, self.p_type, self.level)\n\nclass Grass_Pokemon(Pokemon):\n attack = 15\n defense = 14\n health = 12\n p_type = \"Grass\"\n\n def update(self):\n self.health_boost = 6\n self.attack_boost = 2\n self.defense_boost = 3\n self.evolve = 12\n\n def moves(self):\n self.p_moves = [\"razor leaf\", \"synthesis\", \"petal dance\"]\np2 = Grass_Pokemon(\"Bulby\")\np3 =Grass_Pokemon(\"Pika\")\nprint(p2)\nprint(p3)\n\n", "_____no_output_____" ], [ "class Pokemon():\n attack = 12\n defense = 10\n health = 15\n p_type = \"Normal\"\n\n def __init__(self, name,level = 5):\n self.name = name\n self.level = level\n self.weak = \"Normal\"\n self.strong = \"Normal\"\n\n def train(self):\n self.update()\n self.attack_up()\n self.defense_up()\n self.health_up()\n self.level = self.level + 1\n if self.level%self.evolve == 0:\n return self.level, \"Evolved!\"\n else:\n return self.level\n\n def attack_up(self):\n self.attack = self.attack + self.attack_boost\n return self.attack\n\n def defense_up(self):\n self.defense = self.defense + self.defense_boost\n return self.defense\n\n def health_up(self):\n self.health = self.health + self.health_boost\n return self.health\n\n def update(self):\n self.health_boost = 5\n self.attack_boost = 3\n self.defense_boost = 2\n self.evolve = 10\n\n def __str__(self):\n self.update()\n return \"Pokemon name: {}, Type: {}, Level: {}\".format(self.name, self.p_type, self.level)\n \n def opponent(self):\n return self.weak, self.strong\n\nclass Grass_Pokemon(Pokemon):\n attack = 15\n defense = 14\n health = 12\n p_type = \"Grass\"\n \n def __init__(self, name,level = 5):\n self.name = name\n self.level = level\n self.weak = \"Fire\"\n self.strong = \"Water\"\n\n def update(self):\n self.health_boost = 6\n self.attack_boost = 2\n self.defense_boost = 3\n self.evolve = 12\n\nclass Ghost_Pokemon(Pokemon):\n p_type = \"Ghost\"\n \n def __init__(self, name,level = 5):\n self.name = name\n self.level = level\n self.weak = \"Dark\"\n self.strong = \"Psychic\"\n \n def update(self):\n self.health_boost = 3\n self.attack_boost = 4\n self.defense_boost = 3\n\nclass Fire_Pokemon(Pokemon):\n p_type = \"Fire\"\n \n def __init__(self, name,level = 5):\n self.name = name\n self.level = level\n self.weak = \"Water\"\n self.strong = \"Grass\" \n\nclass Flying_Pokemon(Pokemon):\n p_type = \"Flying\"\n\n def __init__(self, name,level = 5):\n self.name = name\n self.level = level\n self.weak = \"Electric\"\n self.strong = \"Fighting\" ", "_____no_output_____" ], [ "def Square(x):\n return x*x\nimport test\ntest.testEqual(Square(10),100)", "_____no_output_____" ], [ "x = 3\ny = 4\nif x < y:\n z = x\n print(z)\nelse:\n if x > y:\n z = y\n print(z)\n else:\n ## x must be equal to y\n assert x==y\n z = 0\n", "3\n" ], [ "nums = [1, 5, 8]\n\naccum = 0\nfor w in nums:\n accum = accum + w\nassert accum == 14\nprint(accum)\n", "14\n" ], [ "nums = []\n\naccum = 0\nfor w in nums:\n accum = accum + w\nassert accum == None\n", "_____no_output_____" ], [ "nums = []\n\nif len(nums) == 0:\n accum = None\nelse:\n accum = 0\n for w in nums:\n accum = accum + w\nassert accum == None\n", "_____no_output_____" ], [ "def distance(x1, y1, x2, y2):\n return 0\nimport test\ntest.testEqual(distance(1, 2, 1, 2), 0)\ntest.testEqual(distance(1,2, 4,6),5)\n\n", "_____no_output_____" ], [ "class Point:\n \"\"\" Point class for representing and manipulating x,y coordinates. \"\"\"\n\n def __init__(self, initX, initY):\n\n self.x = initX\n self.y = initY\n\n def distanceFromOrigin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def move(self, dx, dy):\n self.x = self.x + dx\n self.y = self.y + dy\n\nimport test\n#testing class constructor (__init__ method)\np = Point(3, 4)\ntest.testEqual(p.y,4)\ntest.testEqual(p.x,3)\n\n#testing the distance method\np = Point(3, 4)\ntest.testEqual(p.distanceFromOrigin(),5.0)\n\n#testing the move method\np = Point(3, 4)\np.move(-2, 3)\ntest.testEqual(p.x ,1)\ntest.testEqual(p.y,7)\n", "_____no_output_____" ], [ "try:\n items = ['a', 'b']\n third = items[2]\n print(\"This won't print\")\nexcept Exception:\n print(\"got an error\")\n\nprint(\"continuing\")\n", "_____no_output_____" ], [ "try:\n items = ['a', 'b']\n third = items[2]\n print(\"This won't print\")\nexcept IndexError:\n print(\"error 1\")\n\nprint(\"continuing\")\n\ntry:\n x = 5\n y = x/0\n print(\"This won't print, either\")\nexcept IndexError:\n print(\"error 2\")\n\nexcept ZeroDivisionError:\n print(\"continuing again\")\n", "_____no_output_____" ], [ "try:\n items = ['a', 'b']\n third = items[2]\n print(\"This won't print\")\nexcept Exception as e:\n print(\"got an error\")\n print(e)\n\nprint(\"continuing\")\n", "_____no_output_____" ], [ "d = [1,2,3,45,6]\nif somekey in d:\n # it's there; extract the data\n extract_data(d)\nelse:\n skip_this_one(d)\n\ntry:\n extract_data(d)\nexcept:\n skip_this_one(d) ", "_____no_output_____" ] ], [ [ "Provided is a buggy for loop that tries to accumulate some values out of some dictionaries. Insert a try/except so that the code passes.", "_____no_output_____" ] ], [ [ "\ndi = [{\"Puppies\": 17, 'Kittens': 9, \"Birds\": 23, 'Fish': 90, \"Hamsters\": 49}, {\"Puppies\": 23, \"Birds\": 29, \"Fish\": 20, \"Mice\": 20, \"Snakes\": 7}, {\"Fish\": 203, \"Hamsters\": 93, \"Snakes\": 25, \"Kittens\": 89}, {\"Birds\": 20, \"Puppies\": 90, \"Snakes\": 21, \"Fish\": 10, \"Kittens\": 67}]\ntotal = 0\nfor diction in di:\n try:\n diction.keys == \"Puppies\"\n total = total + diction['Puppies']\n except:\n pass\nprint(\"Total number of puppies:\", total)\n\n\n", "_____no_output_____" ] ], [ [ "The code below takes the list of country, country, and searches to see if it is in the dictionary gold which shows some countries who won gold during the Olympics. However, this code currently does not work. Correctly add try/except clause in the code so that it will correctly populate the list, country_gold, with either the number of golds won or the string “Did not get gold”.", "_____no_output_____" ] ], [ [ "\ngold = {\"US\":46, \"Fiji\":1, \"Great Britain\":27, \"Cuba\":5, \"Thailand\":2, \"China\":26, \"France\":10}\ncountry = [\"Fiji\", \"Chile\", \"Mexico\", \"France\", \"Norway\", \"US\"]\ncountry_gold = []\nprint(gold.keys())\nfor x in country:\n try:\n x in gold.keys()\n country_gold.append(gold[x])\n except KeyError:\n country_gold.append(\"Did not get gold\")\n\nprint(country_gold)", "_____no_output_____" ] ], [ [ "The list, numb, contains integers. Write code that populates the list remainder with the remainder of 36 divided by each number in numb. For example, the first element should be 0, because 36/6 has no remainder. If there is an error, have the string “Error” appear in the remainder.", "_____no_output_____" ] ], [ [ "\nnumb = [6, 0, 36, 8, 2, 36, 0, 12, 60, 0, 45, 0, 3, 23]\n\nremainder = []\nfor i in numb:\n if (i == 0):\n remainder.append(\"Error\")\n elif (36 % i):\n remainder.append(36 % i)\n elif (36 % i == 0):\n remainder.append(0)\nprint(remainder)\n", "_____no_output_____" ] ], [ [ "Provided is buggy code, insert a try/except so that the code passes.", "_____no_output_____" ] ], [ [ "\nlst = [2,4,10,42,12,0,4,7,21,4,83,8,5,6,8,234,5,6,523,42,34,0,234,1,435,465,56,7,3,43,23]\n\nlst_three = []\n\nfor num in lst:\n try:\n if 3 % num == 0:\n lst_three.append(num)\n except ZeroDivisionError:\n pass\nprint(lst_three)\n\n", "_____no_output_____" ] ], [ [ "Write code so that the buggy code provided works using a try/except. When the codes does not work in the try, have it append to the list attempt the string “Error”.", "_____no_output_____" ] ], [ [ "\nfull_lst = [\"ab\", 'cde', 'fgh', 'i', 'jkml', 'nop', 'qr', 's', 'tv', 'wxy', 'z']\n\nattempt = []\n\nfor elem in full_lst:\n try:\n attempt.append(elem[1])\n except:\n attempt.append(\"Error\")\n", "_____no_output_____" ] ], [ [ "The following code tries to append the third element of each list in conts to the new list third_countries. Currently, the code does not work. Add a try/except clause so the code runs without errors, and the string ‘Continent does not have 3 countries’ is appended to countries instead of producing an error.", "_____no_output_____" ] ], [ [ "\nconts = [['Spain', 'France', 'Greece', 'Portugal', 'Romania', 'Germany'], ['USA', 'Mexico', 'Canada'], ['Japan', 'China', 'Korea', 'Vietnam', 'Cambodia'], ['Argentina', 'Chile', 'Brazil', 'Ecuador', 'Uruguay', 'Venezuela'], ['Australia'], ['Zimbabwe', 'Morocco', 'Kenya', 'Ethiopa', 'South Africa'], ['Antarctica']]\n\nthird_countries = []\n\nfor c in conts:\n try:\n third_countries.append(c[2])\n except IndexError:\n third_countries.append(\"Continent does not have 3 countries\")\n\n", "_____no_output_____" ] ], [ [ "The buggy code below prints out the value of the sport in the list sport. Use try/except so that the code will run properly. If the sport is not in the dictionary, ppl_play, add it in with the value of 1.", "_____no_output_____" ] ], [ [ "\nsport = [\"hockey\", \"basketball\", \"soccer\", \"tennis\", \"football\", \"baseball\"]\n\nppl_play = {\"hockey\":4, \"soccer\": 10, \"football\": 15, \"tennis\": 8}\n\nfor x in sport:\n try:\n print(ppl_play[x])\n except KeyError:\n ppl_play[x] = 1", "_____no_output_____" ] ], [ [ "Provided is a buggy for loop that tries to accumulate some values out of some dictionaries. Insert a try/except so that the code passes. If the key is not there, initialize it in the dictionary and set the value to zero.", "_____no_output_____" ] ], [ [ "\ndi = [{\"Puppies\": 17, 'Kittens': 9, \"Birds\": 23, 'Fish': 90, \"Hamsters\": 49}, {\"Puppies\": 23, \"Birds\": 29, \"Fish\": 20, \"Mice\": 20, \"Snakes\": 7}, {\"Fish\": 203, \"Hamsters\": 93, \"Snakes\": 25, \"Kittens\": 89}, {\"Birds\": 20, \"Puppies\": 90, \"Snakes\": 21, \"Fish\": 10, \"Kittens\": 67}]\ntotal = 0\nfor diction in di:\n try:\n diction.keys() == \"Puppies\"\n total = total + diction['Puppies']\n except :\n pass\n if(\"Puppies\" not in diction.keys()):\n diction[\"Puppies\"] = 0\n\nprint(\"Total number of puppies:\", total)\n\n\n", "Total number of puppies: 130\n" ], [ "VOWEL_COST = 250\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nVOWELS = 'AEIOU'\n\n\n# Write the WOFPlayer class definition (part A) here\nclass WOFPlayer:\n prizeMoney = 0\n prizes = []\n\n def __init__(self, name):\n self.name = name\n\n def addMoney(self, amt):\n self.prizeMoney += amt\n\n def goBankrupt(self):\n self.prizeMoney = 0\n\n def addPrize(prizes,prize):\n prizes.append(prize)\n\n def __str__(self):\n return \"%s (%s)\" % (self.name, self.prize)\n\n\n# Write the WOFHumanPlayer class definition (part B) here\nclass WOFHumanPlayer(WOFPlayer):\n def getMove(self, category, obscuredPhrase, guesse):\n input(\n \"{%s} has ${%s}\\n\"\n \"Category: {%s}\\n\"\n \"Phrase: {%s}\\n\"\n \"Guessed: {%s}\\n\"\n \"Guess a letter, phrase, or type 'exit' or 'pass':\\n\") % (\n self.name, self.prizeMoney, category, obscuredPhrase, guesse)\n return (\"%s\") % (guesse)\n\n\n# Write the WOFComputerPlayer class definition (part C) here\nclass WOFComputerPlayer(WOFPlayer):\n SORTED_FREQUENCIES = \"ZQXJKVBPYGFWMUCLDRHSNIOATE\"\n VOWEL_COST = 250\n VOWELS = \"AEIOU\"\n\n def __init__(self, difficulty):\n self.difficulty = difficulty\n\n def smartCoinFlip(self):\n random_num = random.randint(1, 10)\n if random_num > self.difficulty:\n return True\n else:\n return False\n\n def getPossibleLetters(self, guessed):\n return guessed.upper()\n\n def getMove(category, obscuredPhrase, guessed):\n return getPossibleLetters(guessed)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
e7d3c850e8f9df5cc1a26f867a01307ffce1d15d
22,676
ipynb
Jupyter Notebook
notebooks/08-data_types.ipynb
chendaniely/2018-10-python2
380e87bc5968fabf10732da12d1fdb8baff095db
[ "MIT" ]
1
2020-04-27T16:18:42.000Z
2020-04-27T16:18:42.000Z
notebooks/08-data_types.ipynb
chendaniely/2018-10-python2
380e87bc5968fabf10732da12d1fdb8baff095db
[ "MIT" ]
null
null
null
notebooks/08-data_types.ipynb
chendaniely/2018-10-python2
380e87bc5968fabf10732da12d1fdb8baff095db
[ "MIT" ]
1
2019-02-24T17:27:10.000Z
2019-02-24T17:27:10.000Z
47.241667
1,281
0.539734
[ [ [ "import pandas as pd\nimport seaborn as sns", "_____no_output_____" ], [ "tips = sns.load_dataset('tips')", "_____no_output_____" ], [ "tips.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 244 entries, 0 to 243\nData columns (total 7 columns):\ntotal_bill 244 non-null float64\ntip 244 non-null float64\nsex 244 non-null category\nsmoker 244 non-null category\nday 244 non-null category\ntime 244 non-null category\nsize 244 non-null int64\ndtypes: category(4), float64(2), int64(1)\nmemory usage: 7.2 KB\n" ], [ "tips.dtypes", "_____no_output_____" ], [ "tips['sex_str'] = tips['sex'].astype(str)", "_____no_output_____" ], [ "tips.dtypes", "_____no_output_____" ], [ "tips_sub_miss = tips.head()", "_____no_output_____" ], [ "tips_sub_miss.loc[[1, 3], 'total_bill'] = 'missing'", "C:\\Users\\Danie\\Anaconda3\\lib\\site-packages\\pandas\\core\\indexing.py:543: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n self.obj[item] = s\n" ], [ "tips_sub_miss", "_____no_output_____" ], [ "tips_sub_miss['total_bill'].astype(float)", "_____no_output_____" ], [ "pd.to_numeric(tips_sub_miss['total_bill'])", "_____no_output_____" ], [ "tips_sub_miss['total_bill'] = pd.to_numeric(tips_sub_miss['total_bill'], errors='coerce')", "C:\\Users\\Danie\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "tips_sub_miss", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d3ca6dca662ce7954d2f13fcd40b001755d7b6
304,404
ipynb
Jupyter Notebook
docs/manual/user_guide/disaggregation_and_metrics.ipynb
Ming-er/nilmtk
6ddeff982da761d6fe5a9c527fbe77987f0ae69f
[ "Apache-2.0" ]
null
null
null
docs/manual/user_guide/disaggregation_and_metrics.ipynb
Ming-er/nilmtk
6ddeff982da761d6fe5a9c527fbe77987f0ae69f
[ "Apache-2.0" ]
null
null
null
docs/manual/user_guide/disaggregation_and_metrics.ipynb
Ming-er/nilmtk
6ddeff982da761d6fe5a9c527fbe77987f0ae69f
[ "Apache-2.0" ]
1
2019-08-17T06:16:29.000Z
2019-08-17T06:16:29.000Z
269.861702
96,092
0.914246
[ [ [ "# Disaggregation", "_____no_output_____" ] ], [ [ "from __future__ import print_function, division\nimport time\n\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom six import iteritems\n\nfrom nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore\nfrom nilmtk.disaggregate import CombinatorialOptimisation, FHMM\nimport nilmtk.utils\n\n%matplotlib inline", "_____no_output_____" ], [ "rcParams['figure.figsize'] = (13, 6)", "_____no_output_____" ] ], [ [ "### Dividing data into train and test set", "_____no_output_____" ] ], [ [ "train = DataSet('/data/redd.h5')\ntest = DataSet('/data/redd.h5')", "_____no_output_____" ] ], [ [ "Let us use building 1 for demo purposes", "_____no_output_____" ] ], [ [ "building = 1", "_____no_output_____" ] ], [ [ "Let's split data at April 30th", "_____no_output_____" ] ], [ [ "train.set_window(end=\"2011-04-30\")\ntest.set_window(start=\"2011-04-30\")\n\ntrain_elec = train.buildings[1].elec\ntest_elec = test.buildings[1].elec", "_____no_output_____" ] ], [ [ "### Visualizing the data", "_____no_output_____" ] ], [ [ "train_elec.plot()", "Loading data for meter ElecMeterID(instance=4, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\nLoading data for meter ElecMeterID(instance=20, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\n" ], [ "test_elec.mains().plot()", "_____no_output_____" ] ], [ [ "REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.", "_____no_output_____" ] ], [ [ "fridge_meter = train_elec['fridge']", "_____no_output_____" ], [ "fridge_df = next(fridge_meter.load())", "_____no_output_____" ], [ "fridge_df.head()", "_____no_output_____" ], [ "mains = train_elec.mains()", "_____no_output_____" ], [ "mains_df = next(mains.load())", "Loading data for meter ElecMeterID(instance=2, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\n" ], [ "mains_df.head()", "_____no_output_____" ] ], [ [ "Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.", "_____no_output_____" ], [ "### Selecting top-5 appliances", "_____no_output_____" ] ], [ [ "top_5_train_elec = train_elec.submeters().select_top_k(k=5)", "15/16 MeterGroup(meters==19, building=1, dataset='REDD', appliances=[Appliance(type='unknown', instance=2)])e=1)])ce=1)])\n ElecMeter(instance=3, building=1, dataset='REDD', appliances=[Appliance(type='electric oven', instance=1)])\n ElecMeter(instance=4, building=1, dataset='REDD', appliances=[Appliance(type='electric oven', instance=1)])\n16/16 MeterGroup(meters= for ElecMeterID(instance=4, building=1, dataset='REDD') ... \n ElecMeter(instance=10, building=1, dataset='REDD', appliances=[Appliance(type='washer dryer', instance=1)])\n ElecMeter(instance=20, building=1, dataset='REDD', appliances=[Appliance(type='washer dryer', instance=1)])\nCalculating total_energy for ElecMeterID(instance=10, building=1, dataset='REDD') ... total_energy for ElecMeterID(instance=20, building=1, dataset='REDD') ... " ], [ "top_5_train_elec", "_____no_output_____" ] ], [ [ "### Training and disaggregation", "_____no_output_____" ], [ "#### A function to disaggregate the mains data to constituent appliances and return the predictions", "_____no_output_____" ] ], [ [ "def predict(clf, test_elec, sample_period, timezone):\n pred = {}\n gt= {}\n \n # \"ac_type\" varies according to the dataset used. \n # Make sure to use the correct ac_type before using the default parameters in this code. \n for i, chunk in enumerate(test_elec.mains().load(physical_quantity = 'power', ac_type = 'apparent', sample_period=sample_period)):\n chunk_drop_na = chunk.dropna()\n pred[i] = clf.disaggregate_chunk(chunk_drop_na)\n gt[i]={}\n\n for meter in test_elec.submeters().meters:\n # Only use the meters that we trained on (this saves time!) \n gt[i][meter] = next(meter.load(physical_quantity = 'power', ac_type = 'active', sample_period=sample_period))\n gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()\n \n # If everything can fit in memory\n gt_overall = pd.concat(gt)\n gt_overall.index = gt_overall.index.droplevel()\n pred_overall = pd.concat(pred)\n pred_overall.index = pred_overall.index.droplevel()\n\n # Having the same order of columns\n gt_overall = gt_overall[pred_overall.columns]\n \n #Intersection of index\n gt_index_utc = gt_overall.index.tz_convert(\"UTC\")\n pred_index_utc = pred_overall.index.tz_convert(\"UTC\")\n common_index_utc = gt_index_utc.intersection(pred_index_utc)\n \n common_index_local = common_index_utc.tz_convert(timezone)\n gt_overall = gt_overall.loc[common_index_local]\n pred_overall = pred_overall.loc[common_index_local]\n appliance_labels = [m for m in gt_overall.columns.values]\n gt_overall.columns = appliance_labels\n pred_overall.columns = appliance_labels\n return gt_overall, pred_overall", "_____no_output_____" ] ], [ [ "#### Train using 2 benchmarking algorithms - Combinatorial Optimisation (CO) and Factorial Hidden Markov Model (FHMM)", "_____no_output_____" ] ], [ [ "classifiers = {'CO':CombinatorialOptimisation(), 'FHMM':FHMM()}\npredictions = {}\nsample_period = 120\nfor clf_name, clf in classifiers.items():\n print(\"*\"*20)\n print(clf_name)\n print(\"*\" *20)\n start = time.time()\n # Note that we have given the sample period to downsample the data to 1 minute. \n # If instead of top_5 we wanted to train on all appliance, we would write \n # fhmm.train(train_elec, sample_period=60)\n clf.train(top_5_train_elec, sample_period=sample_period)\n end = time.time()\n print(\"Runtime =\", end-start, \"seconds.\")\n gt, predictions[clf_name] = predict(clf, test_elec, sample_period, train.metadata['timezone'])\n ", "********************\nCO\n********************\nTraining model for submeter 'ElecMeter(instance=11, building=1, dataset='REDD', appliances=[Appliance(type='microwave', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=8, building=1, dataset='REDD', appliances=[Appliance(type='sockets', instance=2)])'\nTraining model for submeter 'ElecMeter(instance=9, building=1, dataset='REDD', appliances=[Appliance(type='light', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=5, building=1, dataset='REDD', appliances=[Appliance(type='fridge', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=6, building=1, dataset='REDD', appliances=[Appliance(type='dish washer', instance=1)])'\nDone training!\nRuntime = 1.8285462856292725 seconds.\nLoading data for meter ElecMeterID(instance=2, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\nEstimating power demand for 'ElecMeter(instance=11, building=1, dataset='REDD', appliances=[Appliance(type='microwave', instance=1)])'\nEstimating power demand for 'ElecMeter(instance=8, building=1, dataset='REDD', appliances=[Appliance(type='sockets', instance=2)])'\nEstimating power demand for 'ElecMeter(instance=9, building=1, dataset='REDD', appliances=[Appliance(type='light', instance=1)])'\nEstimating power demand for 'ElecMeter(instance=5, building=1, dataset='REDD', appliances=[Appliance(type='fridge', instance=1)])'\nEstimating power demand for 'ElecMeter(instance=6, building=1, dataset='REDD', appliances=[Appliance(type='dish washer', instance=1)])'\nLoading data for meter ElecMeterID(instance=4, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\nLoading data for meter ElecMeterID(instance=20, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\n********************\nFHMM\n********************\nTraining model for submeter 'ElecMeter(instance=11, building=1, dataset='REDD', appliances=[Appliance(type='microwave', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=8, building=1, dataset='REDD', appliances=[Appliance(type='sockets', instance=2)])'\nTraining model for submeter 'ElecMeter(instance=9, building=1, dataset='REDD', appliances=[Appliance(type='light', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=5, building=1, dataset='REDD', appliances=[Appliance(type='fridge', instance=1)])'\nTraining model for submeter 'ElecMeter(instance=6, building=1, dataset='REDD', appliances=[Appliance(type='dish washer', instance=1)])'\nRuntime = 2.4450082778930664 seconds.\nLoading data for meter ElecMeterID(instance=2, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\nLoading data for meter ElecMeterID(instance=4, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\nLoading data for meter ElecMeterID(instance=20, building=1, dataset='REDD') \nDone loading data all meters for this chunk.\n" ] ], [ [ "Using prettier labels!", "_____no_output_____" ] ], [ [ "appliance_labels = [m.label() for m in gt.columns.values]", "_____no_output_____" ], [ "gt.columns = appliance_labels\npredictions['CO'].columns = appliance_labels\npredictions['FHMM'].columns = appliance_labels", "_____no_output_____" ] ], [ [ "#### Taking a look at the ground truth of top 5 appliance power consumption", "_____no_output_____" ] ], [ [ "gt.head()", "_____no_output_____" ], [ "predictions['CO'].head()", "_____no_output_____" ], [ "predictions['FHMM'].head()", "_____no_output_____" ] ], [ [ "### Plotting the predictions against the actual usage", "_____no_output_____" ] ], [ [ "predictions['CO']['Fridge'].head(300).plot(label=\"Pred\")\ngt['Fridge'].head(300).plot(label=\"GT\")\nplt.legend()", "_____no_output_____" ], [ "predictions['FHMM']['Fridge'].head(300).plot(label=\"Pred\")\ngt['Fridge'].head(300).plot(label=\"GT\")\nplt.legend()", "_____no_output_____" ] ], [ [ "### Comparing NILM algorithms (CO vs FHMM)", "_____no_output_____" ], [ "`nilmtk.utils.compute_rmse` is an extended of the following, handling both missing values and labels better:\n```python\ndef compute_rmse(gt, pred):\n from sklearn.metrics import mean_squared_error\n rms_error = {}\n for appliance in gt.columns:\n rms_error[appliance] = np.sqrt(mean_squared_error(gt[appliance], pred[appliance]))\n return pd.Series(rms_error)\n```", "_____no_output_____" ] ], [ [ "? nilmtk.utils.compute_rmse", "_____no_output_____" ], [ "rmse = {}\nfor clf_name in classifiers.keys():\n rmse[clf_name] = nilmtk.utils.compute_rmse(gt, predictions[clf_name])\n\nrmse = pd.DataFrame(rmse)\nrmse", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
e7d3f2b17e27fdb7c4b7a5bcedba3310aad42683
48,638
ipynb
Jupyter Notebook
Logic test.ipynb
aiazm496/Practice-ML
2177ef867bcf131a633f5f13789dde076726f3b0
[ "Unlicense" ]
1
2020-12-20T15:25:57.000Z
2020-12-20T15:25:57.000Z
.ipynb_checkpoints/Logic test-checkpoint.ipynb
aiazm496/Practice-ML
2177ef867bcf131a633f5f13789dde076726f3b0
[ "Unlicense" ]
null
null
null
.ipynb_checkpoints/Logic test-checkpoint.ipynb
aiazm496/Practice-ML
2177ef867bcf131a633f5f13789dde076726f3b0
[ "Unlicense" ]
null
null
null
53.156284
16,624
0.663864
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "import seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv('titanic_train.csv')\ndf.head()\n", "_____no_output_____" ], [ "df.drop(['PassengerId','Pclass','Name','Age','SibSp','Parch','Ticket','Fare','Cabin','Embarked'],axis = 1 , inplace=True)\ndf.head()", "_____no_output_____" ], [ "Sex = pd.get_dummies(df['Sex'],drop_first=True)\nSex", "_____no_output_____" ], [ "df = pd.concat([df,Sex],axis = 1)\ndf.head()", "_____no_output_____" ], [ "df.drop(['Sex'],axis = 1 , inplace=True)\ndf.head()", "_____no_output_____" ], [ "sns.countplot(x = 'Survived',hue = 'male',data = df)", "_____no_output_____" ], [ "sns.lmplot(x = 'male',y = 'Survived',data =df)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X = df[['male']]\ny = df['Survived']", "_____no_output_____" ], [ " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "lm = LinearRegression()", "_____no_output_____" ], [ "lm.fit(X_train,y_train)", "_____no_output_____" ], [ "lm.coef_", "_____no_output_____" ], [ "lm.intercept_", "_____no_output_____" ], [ "lm.predict(X_test)", "_____no_output_____" ], [ "y_test", "_____no_output_____" ], [ "#since linear regressions gives cont var not 0 or 1(discrete), we use logistic regression", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "log = LogisticRegression()", "_____no_output_____" ], [ "log.fit(X_train,y_train)", "_____no_output_____" ], [ "log.predict(X_test)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d403f4749b6927351a0d0533c235e2af1f6220
11,753
ipynb
Jupyter Notebook
notebooks/TuneDPW-Walk1D.ipynb
rcnlee/CMDPs.jl
cbdc60f84d4c832bd62c9623e3556de32c207dbd
[ "MIT" ]
null
null
null
notebooks/TuneDPW-Walk1D.ipynb
rcnlee/CMDPs.jl
cbdc60f84d4c832bd62c9623e3556de32c207dbd
[ "MIT" ]
null
null
null
notebooks/TuneDPW-Walk1D.ipynb
rcnlee/CMDPs.jl
cbdc60f84d4c832bd62c9623e3556de32c207dbd
[ "MIT" ]
null
null
null
60.271795
4,324
0.431379
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
e7d4071f0de0fa8a5fe7353e83e75b04dcd47cc7
21,442
ipynb
Jupyter Notebook
Formacao Data Science/Curso 01/Alura_DS01_Aula_04.ipynb
iratuan/Cursos-Alura
4af4c85a55a23911d28b747558640cef2f8726ce
[ "Apache-2.0" ]
null
null
null
Formacao Data Science/Curso 01/Alura_DS01_Aula_04.ipynb
iratuan/Cursos-Alura
4af4c85a55a23911d28b747558640cef2f8726ce
[ "Apache-2.0" ]
null
null
null
Formacao Data Science/Curso 01/Alura_DS01_Aula_04.ipynb
iratuan/Cursos-Alura
4af4c85a55a23911d28b747558640cef2f8726ce
[ "Apache-2.0" ]
null
null
null
50.810427
8,594
0.500886
[ [ [ "import pandas as pd\nfrom datetime import datetime\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n\nuri = 'https://gist.githubusercontent.com/guilhermesilveira/4d1d4a16ccbf6ea4e0a64a38a24ec884/raw/afd05cb0c796d18f3f5a6537053ded308ba94bf7/car-prices.csv'\ndados = pd.read_csv(uri)\n\nlabels = {\n \"mileage_per_year\":\"milhas_por_ano\",\n \"model_year\":\"ano_modelo\",\n \"price\":\"preco\",\n \"sold\":\"vendido\"\n}\ndados = dados.rename(columns=labels)\ndados[\"vendido\"] = dados[\"vendido\"].map({\"yes\":1,\"no\":0})\nano_atual = datetime.today().year\ndados[\"idade_modelo\"] = ano_atual - dados.ano_modelo \ndados[\"km_ano\"] = dados.milhas_por_ano * 1.60934\ndados = dados.drop(columns = [\"Unnamed: 0\",\"milhas_por_ano\", \"ano_modelo\"], axis=1)\n", "_____no_output_____" ], [ "dados.head()", "_____no_output_____" ], [ "x = dados[[\"preco\",\"idade_modelo\",\"km_ano\"]]\ny = dados[\"vendido\"]", "_____no_output_____" ], [ "# SEED controla a aleatoriedade da separação dos dados\n# stratify diz que o algoritmo deve estratificar a amostra com base na classificação (Y)\nSEED = 5\nnp.random.seed(SEED)\ntrain_x, test_x, train_y, test_y = train_test_split(x,y, test_size = 0.25, stratify = y)\n# Treinando o modelo\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import accuracy_score\nmodel = LinearSVC()\nmodel.fit(train_x, train_y)\n\nprevisoes = model.predict(test_x)\naccuracy_score(test_y, previsoes)", "/usr/local/lib/python3.6/dist-packages/sklearn/svm/_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\n" ], [ "from sklearn.dummy import DummyClassifier\ndummy_stratified = DummyClassifier()\ndummy_stratified.fit(train_x, train_y)\ndummy_stratified.score(test_x, test_y)", "/usr/local/lib/python3.6/dist-packages/sklearn/dummy.py:132: FutureWarning: The default value of strategy will change from stratified to prior in 0.24.\n \"stratified to prior in 0.24.\", FutureWarning)\n" ], [ "dummy_most_frequent = DummyClassifier(strategy=\"most_frequent\")\ndummy_most_frequent.fit(train_x, train_y)\ndummy_stratified.score(test_x, test_y)", "_____no_output_____" ], [ "# SEED controla a aleatoriedade da separação dos dados\n# stratify diz que o algoritmo deve estratificar a amostra com base na classificação (Y)\nSEED = 5\nnp.random.seed(SEED)\nraw_train_x, raw_test_x, train_y, test_y = train_test_split(x,y, test_size = 0.25, stratify = y)\n# Treinando o modelo\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nscaler.fit(raw_train_x) # treina\ntrain_x = scaler.transform(raw_train_x)\ntest_x = scaler.transform(raw_test_x)\n\nmodel = SVC()\nmodel.fit(train_x, train_y)\n\nprevisoes = model.predict(test_x)\naccuracy_score(test_y, previsoes)", "_____no_output_____" ], [ "# SEED controla a aleatoriedade da separação dos dados\n# stratify diz que o algoritmo deve estratificar a amostra com base na classificação (Y)\nSEED = 5\nnp.random.seed(SEED)\nraw_train_x, raw_test_x, train_y, test_y = train_test_split(x,y, test_size = 0.25, stratify = y)\n# Treinando o modelo\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nscaler.fit(raw_train_x) # treina\ntrain_x = scaler.transform(raw_train_x)\ntest_x = scaler.transform(raw_test_x)\n\nmodel = DecisionTreeClassifier(max_depth=2)\nmodel.fit(train_x, train_y)\n\nprevisoes = model.predict(test_x)\naccuracy_score(test_y, previsoes)", "_____no_output_____" ], [ "!pip install graphviz\nfrom sklearn.tree import export_graphviz\nimport graphviz\n\nfeatures = x.columns\ndot_data = export_graphviz(model, feature_names=features, class_names=[\"não\",\"sim\"])\ngrafico = graphviz.Source(dot_data)\ngrafico", "Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (0.10.1)\n" ], [ "The ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d41613a69d28ae586fd4cc145605797dd94fd9
16,313
ipynb
Jupyter Notebook
notebooks/doc_exploration_v1.ipynb
JainyJoy/document-formatting
5510e3553c17b3f6207d1f018829830f2140c474
[ "MIT" ]
1
2021-02-11T08:04:30.000Z
2021-02-11T08:04:30.000Z
notebooks/doc_exploration_v1.ipynb
JainyJoy/document-formatting
5510e3553c17b3f6207d1f018829830f2140c474
[ "MIT" ]
null
null
null
notebooks/doc_exploration_v1.ipynb
JainyJoy/document-formatting
5510e3553c17b3f6207d1f018829830f2140c474
[ "MIT" ]
2
2020-09-21T05:21:00.000Z
2020-09-25T05:15:36.000Z
32.112205
142
0.488629
[ [ [ "import os\nimport sys\nimport time\nimport base64\nimport uuid\nimport pandas as pd\nfrom zipfile import ZipFile\nfrom lxml import etree\nimport xml.etree.ElementTree as ET\n\nutilities_dir = '/Users/kd/Workspace/python/helpers'\nsys.path.append(utilities_dir)\n\nfrom file_directory_utils import (create_directory, read_directory_files, get_subdirectories, get_all_file_paths)\n", "_____no_output_____" ], [ "input_filepath = '/Users/kd/Workspace/python/DOCX/document-formatting/data/input/template_1.docx'\noutput_dir = '/Users/kd/Workspace/python/DOCX/document-formatting/data/output'\nfilename = os.path.splitext(os.path.basename(input_filepath))[0]\n", "_____no_output_____" ], [ "def get_string_xmltree(xml):\n return etree.tostring(xml)\n\ndef get_xml_tree(xml_string):\n return etree.fromstring(xml_string)\n\ndef get_xmltree(filepath, parse='xml'):\n if parse == 'html':\n parser = etree.HTMLParser()\n tree = etree.parse(open(filepath, mode='r', encoding='utf-8'), parser)\n return tree\n else:\n with open(filepath,'r') as file:\n xml_string = file.read()\n return etree.fromstring(bytes(xml_string, encoding='utf-8'))\n return None\n\ndef check_element_is(element, type_char):\n word_schema1 = 'http://www.w3.org/1999/xhtml'\n word_schema2 = 'http://purl.oclc.org/ooxml/wordprocessingml/main'\n \n return (element.tag == '{%s}%s' % (word_schema1, type_char)) or (element.tag == '{%s}%s' % (word_schema2, type_char))\n\ndef check_element_is(element, type_char): \n return (element.tag == type_char)\n\ndef get_specific_tags(node, type_char):\n nodes = []\n for elem in node.iter():\n if check_element_is(elem, type_char):\n nodes.append(elem)\n return nodes\n", "_____no_output_____" ], [ "def extract_docx(filepath, working_dir):\n filename = os.path.splitext(os.path.basename(filepath))[0]\n extract_dir = os.path.join(working_dir, filename)\n \n with ZipFile(filepath, 'r') as file:\n file.extractall(path=extract_dir)\n filenames = file.namelist()\n \n return extract_dir, filenames\n\ndef save_docx(extracted_dir, filenames, output_filename):\n with ZipFile(output_filename, 'w') as docx:\n for filename in filenames: \n docx.write(os.path.join(extracted_dir, filename), filename)", "_____no_output_____" ], [ "extracted_dir, filenames = extract_docx(input_filepath, output_dir)\n\n", "_____no_output_____" ], [ "document_xml = get_xmltree(os.path.join(extracted_dir, 'word', 'document.xml'))\n# get_string_xmltree(document_xml)", "_____no_output_____" ], [ "from lxml.etree import Element, SubElement, QName, tounicode\n \nclass DOCX_NS_NSDEF_FACTORY:\n def __init__(self, ns, nsdef):\n self.ns = ns\n self.nsdef = nsdef\n self.name = None\n self.attribs = []\n self.root = None\n\n def add_name(self, name):\n self.name = name\n \n def add_attribs(self, without_qname, value):\n self.attribs.append({'qname': QName(self.ns, without_qname), 'val':value})\n \n def get_node(self):\n if len(self.attribs) > 0:\n attrib = {}\n for attr in self.attribs:\n self.root.set(attr['qname'], attr['val'])\n\n return self.root\n \n def add_child(self, parent, child):\n return parent.append(child)\n \n def create_root_node(self, name):\n self.name = name\n self.attribs = []\n self.root = Element(QName(self.ns, self.name), nsmap={self.nsdef:self.ns})\n\nclass DOCX_NS_W_FACTORY(DOCX_NS_NSDEF_FACTORY):\n def __init__(self):\n self.ns = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'\n self.nsdef = 'w'\n super().__init__(self.ns, self.nsdef)\n \nclass DOCX_NS_PIC_FACTORY(DOCX_NS_NSDEF_FACTORY):\n def __init__(self):\n self.ns = 'http://schemas.openxmlformats.org/drawingml/2006/picture'\n self.nsdef = 'pic'\n super().__init__(self.ns, self.nsdef)\n \nclass DOCX_NS_A_FACTORY(DOCX_NS_NSDEF_FACTORY):\n def __init__(self):\n self.ns = 'http://schemas.openxmlformats.org/drawingml/2006/picture'\n self.nsdef = 'a'\n super().__init__(self.ns, self.nsdef)\n", "_____no_output_____" ], [ "class PageSection (DOCX_NS_W_FACTORY):\n def __init__(self):\n super().__init__()\n\n def get_node(self):\n '''\n <w:sectPr w:rsidR=\"00A66D74\" w:rsidSect=\"00034616\">\n <w:pgSz w:w=\"11893\" w:h=\"16840\"/>\n <w:pgMar w:top=\"720\" w:right=\"720\" w:bottom=\"720\" w:left=\"720\" w:header=\"720\" w:footer=\"720\" w:gutter=\"0\"/>\n <w:cols w:space=\"720\"/>\n <w:docGrid w:linePitch=\"360\"/>\n </w:sectPr>\n '''\n self.create_root_node('sectPr')\n self.add_attribs('rsidR', '00A66D74')\n self.add_attribs('rsidSect', '00034616')\n root = super().get_node()\n \n self.create_root_node('pgSz')\n self.add_attribs('w', '11893')\n self.add_attribs('h', '16840')\n self.add_child(root, super().get_node())\n \n self.create_root_node('pgMar')\n self.add_attribs('top', '720')\n self.add_attribs('right', '720')\n self.add_attribs('bottom', '720')\n self.add_attribs('left', '720')\n self.add_attribs('header', '720')\n self.add_attribs('footer', '720')\n self.add_attribs('gutter', '0') \n self.add_child(root, super().get_node())\n\n self.create_root_node('cols')\n self.add_attribs('space', '720')\n self.add_child(root, super().get_node())\n\n self.create_root_node('docGrid')\n self.add_attribs('linePitch', '360')\n self.add_child(root, super().get_node())\n \n return root\n", "_____no_output_____" ], [ "node = PageSection()\nprint (tounicode(node.get_node(), pretty_print=True))", "<w:sectPr xmlns:w=\"http://schemas.openxmlformats.org/wordprocessingml/2006/main\" w:rsidR=\"00A66D74\" w:rsidSect=\"00034616\">\n <w:pgSz w:w=\"11893\" w:h=\"16840\"/>\n <w:pgMar w:top=\"720\" w:right=\"720\" w:bottom=\"720\" w:left=\"720\" w:header=\"720\" w:footer=\"720\" w:gutter=\"0\"/>\n <w:cols w:space=\"720\"/>\n <w:docGrid w:linePitch=\"360\"/>\n</w:sectPr>\n\n" ], [ "class A_NODE (DOCX_NS_A_FACTORY):\n def __init__(self):\n super().__init__()\n \n def get_node_blip(self):\n '''\n <a:blip r:embed=\"rId8\">\n <a:extLst>\n <a:ext uri=\"{28A0092B-C50C-407E-A947-70E740481C1C}\"/>\n </a:extLst>\n </a:blip>\n '''\n self.create_root_node('blip')\n self.add_attribs('embed', 'rId8')\n blip = super().get_node()\n \n return blip\n \n \n\nclass Pic (DOCX_NS_PIC_FACTORY):\n def __init__(self):\n super().__init__()\n \n def get_node(self):\n '''\n <pic:pic\n xmlns:pic=\"http://schemas.openxmlformats.org/drawingml/2006/picture\">\n <pic:nvPicPr>\n <pic:cNvPr id=\"0\" name=\"Picture 1\"/>\n <pic:cNvPicPr>\n <a:picLocks noChangeAspect=\"1\" noChangeArrowheads=\"1\"/>\n </pic:cNvPicPr>\n </pic:nvPicPr>\n <pic:blipFill>\n <a:blip r:embed=\"rId8\">\n <a:extLst>\n <a:ext uri=\"{28A0092B-C50C-407E-A947-70E740481C1C}\"/>\n </a:extLst>\n </a:blip>\n <a:srcRect/>\n <a:stretch>\n <a:fillRect/>\n </a:stretch>\n </pic:blipFill>\n <pic:spPr bwMode=\"auto\">\n <a:xfrm>\n <a:off x=\"0\" y=\"0\"/>\n <a:ext cx=\"4181475\" cy=\"5353050\"/>\n </a:xfrm>\n <a:prstGeom prst=\"rect\">\n <a:avLst/>\n </a:prstGeom>\n <a:noFill/>\n </pic:spPr>\n </pic:pic>\n '''\n self.create_root_node('pic')\n pic = super().get_node()\n \n self.create_root_node('nvPicPr')\n nvPicPr = super().get_node()\n \n self.create_root_node('cNvPr')\n cNvPr = super().get_node()\n \n self.create_root_node('cNvPicPr')\n cNvPicPr = super().get_node()\n \n self.create_root_node('')\n \n return root", "_____no_output_____" ], [ "node = A_NODE()\nprint (tounicode(node.get_node_blip(), pretty_print=True))\n", "<a:blip xmlns:a=\"http://schemas.openxmlformats.org/drawingml/2006/picture\" a:embed=\"rId8\"/>\n\n" ], [ "factory = DOCX_NS_W_FACTORY('sectPr')\nfactory.add_attribs('rsidR', '00A66D74')\nfactory.add_attribs('rsidSect', '00034616')\nsection = factory.get_node()\n\nfactory = DOCX_NS_W_FACTORY('sectPr')\n\nprint (tounicode(p, pretty_print=True))\n", "<w:sectPr xmlns:w=\"http://schemas.openxmlformats.org/wordprocessingml/2006/main\" w:rsidR=\"00A66D74\" w:rsidSect=\"00034616\"/>\n\n" ], [ "from lxml.etree import Element, SubElement, QName, tounicode\nclass XMLNamespaces:\n s = 'http://www.w3.org/2003/05/soap-envelope'\n a = 'http://www.w3.org/2005/08/addressing'\n\nroot = Element(QName(XMLNamespaces.s, 'Envelope'), nsmap={'s':XMLNamespaces.s, 'a':XMLNamespaces.a})\n\nheader = SubElement(root, QName(XMLNamespaces.s, 'Header'))\naction = SubElement(header, QName(XMLNamespaces.a, 'Action'), attrib={\n 'notUnderstand':'1',\n QName(XMLNamespaces.s, 'mustUnderstand'):'1'\n })\nprint (tounicode(root, pretty_print=True))\n", "<s:Envelope xmlns:a=\"http://www.w3.org/2005/08/addressing\" xmlns:s=\"http://www.w3.org/2003/05/soap-envelope\">\n <s:Header>\n <a:Action notUnderstand=\"1\" s:val=\"1\"/>\n </s:Header>\n</s:Envelope>\n\n" ], [ "from lxml.etree import Element, SubElement, QName, tounicode\n\nclass DOCX_NS:\n w = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'\n \np = Element(QName(DOCX_NS.w, 'p'), nsmap={'w':DOCX_NS.w})\npPr = SubElement(p, QName(DOCX_NS.w, 'pPr'))\n\nframePr = SubElement(pPr, QName(DOCX_NS.w, 'framePr'), attrib = {\n QName(DOCX_NS.w, 'w'):'3500',\n QName(DOCX_NS.w, 'h'):'3500',\n QName(DOCX_NS.w, 'wrap'):'auto',\n QName(DOCX_NS.w, 'hAnchor'):'page',\n QName(DOCX_NS.w, 'xAlign'):'right',\n QName(DOCX_NS.w, 'yAlign'):'top',\n })\n\nrPr = SubElement(pPr, QName(DOCX_NS.w, 'rPr'), attrib = {})\n\nprint (tounicode(p, pretty_print=True))\n", "<w:p xmlns:w=\"http://schemas.openxmlformats.org/wordprocessingml/2006/main\">\n <w:pPr>\n <w:framePr w:h=\"3500\" w:hAnchor=\"page\" w:w=\"3500\" w:wrap=\"auto\" w:xAlign=\"right\" w:yAlign=\"top\"/>\n </w:pPr>\n</w:p>\n\n" ], [ "#save_docx(extracted_dir, filenames, os.path.join(output_dir, \"kd.docx\"))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d41aae4b732383fb898eb00b1c22b64410b3b3
19,461
ipynb
Jupyter Notebook
Python for Data Science, AI & Development/4. Working with Data in Python/Writing Files with Open.ipynb
aqafridi/Data-Analytics
93492e4a5aa09d4735f3841f4429098c8c48b0d9
[ "MIT" ]
2
2021-08-24T07:07:29.000Z
2021-08-31T14:06:11.000Z
Python for Data Science, AI & Development/4. Working with Data in Python/Writing Files with Open.ipynb
aqafridi/Data-Analytics
93492e4a5aa09d4735f3841f4429098c8c48b0d9
[ "MIT" ]
null
null
null
Python for Data Science, AI & Development/4. Working with Data in Python/Writing Files with Open.ipynb
aqafridi/Data-Analytics
93492e4a5aa09d4735f3841f4429098c8c48b0d9
[ "MIT" ]
1
2022-03-20T13:09:06.000Z
2022-03-20T13:09:06.000Z
9,730.5
19,460
0.638919
[ [ [ "<center>\n <img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Write and Save Files in Python\n\nEstimated time needed: **25** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n* Write to files using Python libraries\n", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li><a href=\"write\">Writing Files</a></li>\n <li><a href=\"Append\">Appending Files</a></li>\n <li><a href=\"add\">Additional File modes</a></li>\n <li><a href=\"copy\">Copy a File</a></li>\n </ul>\n\n</div>\n\n<hr>\n", "_____no_output_____" ], [ "<h2 id=\"write\">Writing Files</h2>\n", "_____no_output_____" ], [ "We can open a file object using the method <code>write()</code> to save the text file to a list. To write to a file, the mode argument must be set to **w**. Let’s write a file **Example2.txt** with the line: **“This is line A”**\n", "_____no_output_____" ] ], [ [ "# Write line to file\nexmp2 = '/resources/data/Example2.txt'\nwith open(exmp2, 'w') as writefile:\n writefile.write(\"This is line A\")", "_____no_output_____" ] ], [ [ "We can read the file to see if it worked:\n", "_____no_output_____" ] ], [ [ "# Read file\n\nwith open(exmp2, 'r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "We can write multiple lines:\n", "_____no_output_____" ] ], [ [ "# Write lines to file\n\nwith open(exmp2, 'w') as writefile:\n writefile.write(\"This is line A\\n\")\n writefile.write(\"This is line B\\n\")", "_____no_output_____" ] ], [ [ "The method <code>.write()</code> works similar to the method <code>.readline()</code>, except instead of reading a new line it writes a new line. The process is illustrated in the figure. The different colour coding of the grid represents a new line added to the file after each method call.\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%204/images/WriteLine.png\" width=\"500\" />\n", "_____no_output_____" ], [ "You can check the file to see if your results are correct\n", "_____no_output_____" ] ], [ [ "# Check whether write to file\n\nwith open(exmp2, 'r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "We write a list to a **.txt** file as follows:\n", "_____no_output_____" ] ], [ [ "# Sample list of text\n\nLines = [\"This is line A\\n\", \"This is line B\\n\", \"This is line C\\n\"]\nLines", "_____no_output_____" ], [ "# Write the strings in the list to text file\n\nwith open('Example2.txt', 'w') as writefile:\n for line in Lines:\n print(line)\n writefile.write(line)", "_____no_output_____" ] ], [ [ "We can verify the file is written by reading it and printing out the values:\n", "_____no_output_____" ] ], [ [ "# Verify if writing to file is successfully executed\n\nwith open('Example2.txt', 'r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "However, note that setting the mode to **w** overwrites all the existing data in the file.\n", "_____no_output_____" ] ], [ [ "with open('Example2.txt', 'w') as writefile:\n writefile.write(\"Overwrite\\n\")\nwith open('Example2.txt', 'r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "<hr>\n<h2 id=\"Append\">Appending Files</h2>\n", "_____no_output_____" ], [ "We can write to files without losing any of the existing data as follows by setting the mode argument to append: **a**. you can append a new line as follows:\n", "_____no_output_____" ] ], [ [ "# Write a new line to text file\n\nwith open('Example2.txt', 'a') as testwritefile:\n testwritefile.write(\"This is line C\\n\")\n testwritefile.write(\"This is line D\\n\")\n testwritefile.write(\"This is line E\\n\")", "_____no_output_____" ] ], [ [ "You can verify the file has changed by running the following cell:\n", "_____no_output_____" ] ], [ [ "# Verify if the new line is in the text file\n\nwith open('Example2.txt', 'r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "<hr>\n<h2 id=\"add\">Additional modes</h2> \n", "_____no_output_____" ], [ "It's fairly ineffecient to open the file in **a** or **w** and then reopening it in **r** to read any lines. Luckily we can access the file in the following modes:\n\n* **r+** : Reading and writing. Cannot truncate the file.\n* **w+** : Writing and reading. Truncates the file.\n* **a+** : Appending and Reading. Creates a new file, if none exists.\n You dont have to dwell on the specifics of each mode for this lab.\n", "_____no_output_____" ], [ "Let's try out the **a+** mode:\n", "_____no_output_____" ] ], [ [ "with open('Example2.txt', 'a+') as testwritefile:\n testwritefile.write(\"This is line E\\n\")\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "There were no errors but <code>read()</code> also did not output anything. This is because of our location in the file.\n", "_____no_output_____" ], [ "Most of the file methods we've looked at work in a certain location in the file. <code>.write() </code> writes at a certain location in the file. <code>.read()</code> reads at a certain location in the file and so on. You can think of this as moving your pointer around in the notepad to make changes at specific location.\n", "_____no_output_____" ], [ "Opening the file in **w** is akin to opening the .txt file, moving your cursor to the beginning of the text file, writing new text and deleting everything that follows.\nWhereas opening the file in **a** is similiar to opening the .txt file, moving your cursor to the very end and then adding the new pieces of text. <br>\nIt is often very useful to know where the 'cursor' is in a file and be able to control it. The following methods allow us to do precisely this -\n\n* <code>.tell()</code> - returns the current position in bytes\n* <code>.seek(offset,from)</code> - changes the position by 'offset' bytes with respect to 'from'. From can take the value of 0,1,2 corresponding to beginning, relative to current position and end\n", "_____no_output_____" ], [ "Now lets revisit **a+**\n", "_____no_output_____" ] ], [ [ "with open('Example2.txt', 'a+') as testwritefile:\n print(\"Initial Location: {}\".format(testwritefile.tell()))\n \n data = testwritefile.read()\n if (not data): #empty strings return false in python\n print('Read nothing') \n else: \n print(testwritefile.read())\n \n testwritefile.seek(0,0) # move 0 bytes from beginning.\n \n print(\"\\nNew Location : {}\".format(testwritefile.tell()))\n data = testwritefile.read()\n if (not data): \n print('Read nothing') \n else: \n print(data)\n \n print(\"Location after read: {}\".format(testwritefile.tell()) )", "_____no_output_____" ] ], [ [ "Finally, a note on the difference between **w+** and **r+**. Both of these modes allow access to read and write methods, however, opening a file in **w+** overwrites it and deletes all pre-existing data. <br>\nTo work with a file on existing data, use **r+** and **a+**. While using **r+**, it can be useful to add a <code>.truncate()</code> method at the end of your data. This will reduce the file to your data and delete everything that follows. <br>\nIn the following code block, Run the code as it is first and then run it with the <code>.truncate()</code>.\n", "_____no_output_____" ] ], [ [ "with open('Example2.txt', 'r+') as testwritefile:\n data = testwritefile.readlines()\n testwritefile.seek(0,0) #write at beginning of file\n \n testwritefile.write(\"Line 1\" + \"\\n\")\n testwritefile.write(\"Line 2\" + \"\\n\")\n testwritefile.write(\"Line 3\" + \"\\n\")\n testwritefile.write(\"finished\\n\")\n #Uncomment the line below\n #testwritefile.truncate()\n testwritefile.seek(0,0)\n print(testwritefile.read())\n ", "_____no_output_____" ] ], [ [ "<hr>\n", "_____no_output_____" ], [ "<h2 id=\"copy\">Copy a File</h2> \n", "_____no_output_____" ], [ "Let's copy the file **Example2.txt** to the file **Example3.txt**:\n", "_____no_output_____" ] ], [ [ "# Copy file to another\n\nwith open('Example2.txt','r') as readfile:\n with open('Example3.txt','w') as writefile:\n for line in readfile:\n writefile.write(line)", "_____no_output_____" ] ], [ [ "We can read the file to see if everything works:\n", "_____no_output_____" ] ], [ [ "# Verify if the copy is successfully executed\n\nwith open('Example3.txt','r') as testwritefile:\n print(testwritefile.read())", "_____no_output_____" ] ], [ [ "After reading files, we can also write data into files and save them in different file formats like **.txt, .csv, .xls (for excel files) etc**. You will come across these in further examples\n", "_____no_output_____" ], [ "Now go to the directory to ensure the **.txt** file exists and contains the summary data that we wrote.\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "<h2> Exercise </h2>\n", "_____no_output_____" ], [ "Your local university's Raptors fan club maintains a register of its active members on a .txt document. Every month they update the file by removing the members who are not active. You have been tasked with automating this with your Python skills. <br>\nGiven the file currentMem, Remove each member with a 'no' in their Active coloumn. Keep track of each of the removed members and append them to the exMem file. Make sure the format of the original files in preserved. (*Hint: Do this by reading/writing whole lines and ensuring the header remains* ) <br>\nRun the code block below prior to starting the exercise. The skeleton code has been provided for you, Edit only the cleanFiles function.\n", "_____no_output_____" ] ], [ [ "#Run this prior to starting the exercise\nfrom random import randint as rnd\n\nmemReg = 'members.txt'\nexReg = 'inactive.txt'\nfee =('yes','no')\n\ndef genFiles(current,old):\n with open(current,'w+') as writefile: \n writefile.write('Membership No Date Joined Active \\n')\n data = \"{:^13} {:<11} {:<6}\\n\"\n\n for rowno in range(20):\n date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25))\n writefile.write(data.format(rnd(10000,99999),date,fee[rnd(0,1)]))\n\n\n with open(old,'w+') as writefile: \n writefile.write('Membership No Date Joined Active \\n')\n data = \"{:^13} {:<11} {:<6}\\n\"\n for rowno in range(3):\n date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25))\n writefile.write(data.format(rnd(10000,99999),date,fee[1]))\n\n\ngenFiles(memReg,exReg)\n", "_____no_output_____" ] ], [ [ "Start your solution below:\n", "_____no_output_____" ] ], [ [ "\ndef cleanFiles(currentMem,exMem):\n '''\n currentMem: File containing list of current members\n exMem: File containing list of old members\n \n Removes all rows from currentMem containing 'no' and appends them to exMem\n '''\n \n pass \n\n\n# Code to help you see the files\n# Leave as is\nmemReg = 'members.txt'\nexReg = 'inactive.txt'\ncleanFiles(memReg,exReg)\n\n\nheaders = \"Membership No Date Joined Active \\n\"\nwith open(memReg,'r') as readFile:\n print(\"Active Members: \\n\\n\")\n print(readFile.read())\n \nwith open(exReg,'r') as readFile:\n print(\"Inactive Members: \\n\\n\")\n print(readFile.read())\n \n ", "_____no_output_____" ] ], [ [ "Run the following to verify your code:\n", "_____no_output_____" ] ], [ [ "def testMsg(passed):\n if passed:\n return 'Test Passed'\n else :\n return 'Test Failed'\n\ntestWrite = \"testWrite.txt\"\ntestAppend = \"testAppend.txt\" \npassed = True\n\ngenFiles(testWrite,testAppend)\n\nwith open(testWrite,'r') as file:\n ogWrite = file.readlines()\n\nwith open(testAppend,'r') as file:\n ogAppend = file.readlines()\n\ntry:\n cleanFiles(testWrite,testAppend)\nexcept:\n print('Error')\n\nwith open(testWrite,'r') as file:\n clWrite = file.readlines()\n\nwith open(testAppend,'r') as file:\n clAppend = file.readlines()\n \n# checking if total no of rows is same, including headers\n\nif (len(ogWrite) + len(ogAppend) != len(clWrite) + len(clAppend)):\n print(\"The number of rows do not add up. Make sure your final files have the same header and format.\")\n passed = False\n \nfor line in clWrite:\n if 'no' in line:\n passed = False\n print(\"Inactive members in file\")\n break\n else:\n if line not in ogWrite:\n print(\"Data in file does not match original file\")\n passed = False\nprint (\"{}\".format(testMsg(passed)))\n \n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\ndef cleanFiles(currentMem,exMem):\n with open(currentMem,'r+') as writeFile: \n with open(exMem,'a+') as appendFile:\n #get the data\n writeFile.seek(0)\n members = writeFile.readlines()\n #remove header\n header = members[0]\n members.pop(0)\n \n inactive = [member for member in members if ('no' in member)]\n '''\n The above is the same as \n\n for member in active:\n if 'no' in member:\n inactive.append(member)\n '''\n #go to the beginning of the write file\n writeFile.seek(0) \n writeFile.write(header)\n for member in members:\n if (member in inactive):\n appendFile.write(member)\n else:\n writeFile.write(member) \n writeFile.truncate()\n \nmemReg = 'members.txt'\nexReg = 'inactive.txt'\ncleanFiles(memReg,exReg)\n\n# code to help you see the files\n\nheaders = \"Membership No Date Joined Active \\n\"\n\nwith open(memReg,'r') as readFile:\n print(\"Active Members: \\n\\n\")\n print(readFile.read())\n \nwith open(exReg,'r') as readFile:\n print(\"Inactive Members: \\n\\n\")\n print(readFile.read())\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>\n", "_____no_output_____" ], [ "## Author\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\" target=\"_blank\">Joseph Santarcangelo</a>\n\n### Other Contributors\n\n<a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n|---|---|---|---|\n| 2020-10-16 | 1.3 | Arjun Swani | Added exercise |\n| 2020-10-16 | 1.2 | Arjun Swani | Added section additional file modes |\n| 2020-10-16 | 1.1 | Arjun Swani | Made append a different section |\n| 2020-08-28 | 0.2 | Lavanya | Moved lab to course repo in GitLab |\n\n<hr>\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
e7d45c0eb6146666a2c96a7a726d95086573666d
132,419
ipynb
Jupyter Notebook
_notebooks/2017-09-15-predict-house-price.ipynb
phucnsp/blog
343e5987628276ff2c74e4e3ebdcb5a4a1baa1df
[ "Apache-2.0" ]
2
2021-06-06T07:17:53.000Z
2022-01-18T17:12:17.000Z
_notebooks/2017-09-15-predict-house-price.ipynb
phucnsp/blog
343e5987628276ff2c74e4e3ebdcb5a4a1baa1df
[ "Apache-2.0" ]
7
2020-03-08T02:50:29.000Z
2022-02-26T06:55:02.000Z
_notebooks/2017-09-15-predict-house-price.ipynb
phucnsp/blog
343e5987628276ff2c74e4e3ebdcb5a4a1baa1df
[ "Apache-2.0" ]
2
2021-08-30T07:19:54.000Z
2022-01-18T17:12:26.000Z
36.885515
193
0.274213
[ [ [ "# Predict house price in America\n> Analyse and predict on house price dataset.\n\n- toc: true \n- badges: true\n- comments: true\n- categories: [self-taught]\n- image: images/chart-preview.png", "_____no_output_____" ], [ "# Introduction", "_____no_output_____" ] ], [ [ "import pandas as pd\npd.options.display.max_columns = 999\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn import linear_model\nfrom sklearn.model_selection import KFold", "_____no_output_____" ], [ "df = pd.read_csv(\"AmesHousing.tsv\", delimiter=\"\\t\")", "_____no_output_____" ], [ "def transform_features(df):\n return df\n\ndef select_features(df):\n return df[[\"Gr Liv Area\", \"SalePrice\"]]\n\ndef train_and_test(df): \n train = df[:1460]\n test = df[1460:]\n \n ## You can use `pd.DataFrame.select_dtypes()` to specify column types\n ## and return only those columns as a data frame.\n numeric_train = train.select_dtypes(include=['integer', 'float'])\n numeric_test = test.select_dtypes(include=['integer', 'float'])\n \n ## You can use `pd.Series.drop()` to drop a value.\n features = numeric_train.columns.drop(\"SalePrice\")\n lr = linear_model.LinearRegression()\n lr.fit(train[features], train[\"SalePrice\"])\n predictions = lr.predict(test[features])\n mse = mean_squared_error(test[\"SalePrice\"], predictions)\n rmse = np.sqrt(mse)\n \n return rmse\n\ntransform_df = transform_features(df)\nfiltered_df = select_features(transform_df)\nrmse = train_and_test(filtered_df)\n\nrmse", "_____no_output_____" ] ], [ [ "# Feature Engineering", "_____no_output_____" ], [ "Handle missing values: \nAll columns: \nDrop any with 5% or more missing values for now. \nText columns: \nDrop any with 1 or more missing values for now. \nNumerical columns: \nFor columns with missing values, fill in with the most common value in that column \n", "_____no_output_____" ], [ "1: All columns: Drop any with 5% or more missing values for now. ", "_____no_output_____" ] ], [ [ "## Series object: column name -> number of missing values\nnum_missing = df.isnull().sum()", "_____no_output_____" ], [ "# Filter Series to columns containing >5% missing values\ndrop_missing_cols = num_missing[(num_missing > len(df)/20)].sort_values()\n\n# Drop those columns from the data frame. Note the use of the .index accessor\ndf = df.drop(drop_missing_cols.index, axis=1)", "_____no_output_____" ], [ "## Series object: column name -> number of missing values\ntext_mv_counts = df.select_dtypes(include=['object']).isnull().sum().sort_values(ascending=False)\n\n## Filter Series to columns containing *any* missing values\ndrop_missing_cols_2 = text_mv_counts[text_mv_counts > 0]\n\ndf = df.drop(drop_missing_cols_2.index, axis=1)", "_____no_output_____" ], [ "## Compute column-wise missing value counts\nnum_missing = df.select_dtypes(include=['int', 'float']).isnull().sum()\nfixable_numeric_cols = num_missing[(num_missing < len(df)/20) & (num_missing > 0)].sort_values()\nfixable_numeric_cols", "_____no_output_____" ], [ "## Compute the most common value for each column in `fixable_nmeric_missing_cols`.\nreplacement_values_dict = df[fixable_numeric_cols.index].mode().to_dict(orient='records')[0]\nreplacement_values_dict", "_____no_output_____" ], [ "## Use `pd.DataFrame.fillna()` to replace missing values.\ndf = df.fillna(replacement_values_dict)", "_____no_output_____" ], [ "## Verify that every column has 0 missing values\ndf.isnull().sum().value_counts()", "_____no_output_____" ], [ "years_sold = df['Yr Sold'] - df['Year Built']\nyears_sold[years_sold < 0]", "_____no_output_____" ], [ "years_since_remod = df['Yr Sold'] - df['Year Remod/Add']\nyears_since_remod[years_since_remod < 0]", "_____no_output_____" ], [ "## Create new columns\ndf['Years Before Sale'] = years_sold\ndf['Years Since Remod'] = years_since_remod\n\n## Drop rows with negative values for both of these new features\ndf = df.drop([1702, 2180, 2181], axis=0)\n\n## No longer need original year columns\ndf = df.drop([\"Year Built\", \"Year Remod/Add\"], axis = 1)", "_____no_output_____" ] ], [ [ "Drop columns that: \na. that aren't useful for ML \nb. leak data about the final sale ", "_____no_output_____" ] ], [ [ "## Drop columns that aren't useful for ML\ndf = df.drop([\"PID\", \"Order\"], axis=1)\n\n## Drop columns that leak info about the final sale\ndf = df.drop([\"Mo Sold\", \"Sale Condition\", \"Sale Type\", \"Yr Sold\"], axis=1)", "_____no_output_____" ] ], [ [ "Let's update transform_features()", "_____no_output_____" ] ], [ [ "def transform_features(df):\n num_missing = df.isnull().sum()\n drop_missing_cols = num_missing[(num_missing > len(df)/20)].sort_values()\n df = df.drop(drop_missing_cols.index, axis=1)\n \n text_mv_counts = df.select_dtypes(include=['object']).isnull().sum().sort_values(ascending=False)\n drop_missing_cols_2 = text_mv_counts[text_mv_counts > 0]\n df = df.drop(drop_missing_cols_2.index, axis=1)\n \n num_missing = df.select_dtypes(include=['int', 'float']).isnull().sum()\n fixable_numeric_cols = num_missing[(num_missing < len(df)/20) & (num_missing > 0)].sort_values()\n replacement_values_dict = df[fixable_numeric_cols.index].mode().to_dict(orient='records')[0]\n df = df.fillna(replacement_values_dict)\n \n years_sold = df['Yr Sold'] - df['Year Built']\n years_since_remod = df['Yr Sold'] - df['Year Remod/Add']\n df['Years Before Sale'] = years_sold\n df['Years Since Remod'] = years_since_remod\n df = df.drop([1702, 2180, 2181], axis=0)\n\n df = df.drop([\"PID\", \"Order\", \"Mo Sold\", \"Sale Condition\", \"Sale Type\", \"Year Built\", \"Year Remod/Add\"], axis=1)\n return df\n\ndef select_features(df):\n return df[[\"Gr Liv Area\", \"SalePrice\"]]\n\ndef train_and_test(df): \n train = df[:1460]\n test = df[1460:]\n \n ## You can use `pd.DataFrame.select_dtypes()` to specify column types\n ## and return only those columns as a data frame.\n numeric_train = train.select_dtypes(include=['integer', 'float'])\n numeric_test = test.select_dtypes(include=['integer', 'float'])\n \n ## You can use `pd.Series.drop()` to drop a value.\n features = numeric_train.columns.drop(\"SalePrice\")\n lr = linear_model.LinearRegression()\n lr.fit(train[features], train[\"SalePrice\"])\n predictions = lr.predict(test[features])\n mse = mean_squared_error(test[\"SalePrice\"], predictions)\n rmse = np.sqrt(mse)\n \n return rmse\n\ndf = pd.read_csv(\"AmesHousing.tsv\", delimiter=\"\\t\")\ntransform_df = transform_features(df)\nfiltered_df = select_features(transform_df)\nrmse = train_and_test(filtered_df)\n\nrmse", "_____no_output_____" ] ], [ [ "# Feature Selection", "_____no_output_____" ] ], [ [ "numerical_df = transform_df.select_dtypes(include=['int', 'float'])\nnumerical_df", "_____no_output_____" ], [ "abs_corr_coeffs = numerical_df.corr()['SalePrice'].abs().sort_values()\nabs_corr_coeffs", "_____no_output_____" ], [ "## Let's only keep columns with a correlation coefficient of larger than 0.4 (arbitrary, worth experimenting later!)\nabs_corr_coeffs[abs_corr_coeffs > 0.4]", "_____no_output_____" ], [ "## Drop columns with less than 0.4 correlation with SalePrice\ntransform_df = transform_df.drop(abs_corr_coeffs[abs_corr_coeffs < 0.4].index, axis=1)", "_____no_output_____" ] ], [ [ "Which categorical columns should we keep?", "_____no_output_____" ] ], [ [ "## Create a list of column names from documentation that are *meant* to be categorical\nnominal_features = [\"PID\", \"MS SubClass\", \"MS Zoning\", \"Street\", \"Alley\", \"Land Contour\", \"Lot Config\", \"Neighborhood\", \n \"Condition 1\", \"Condition 2\", \"Bldg Type\", \"House Style\", \"Roof Style\", \"Roof Matl\", \"Exterior 1st\", \n \"Exterior 2nd\", \"Mas Vnr Type\", \"Foundation\", \"Heating\", \"Central Air\", \"Garage Type\", \n \"Misc Feature\", \"Sale Type\", \"Sale Condition\"]", "_____no_output_____" ] ], [ [ "Which columns are currently numerical but need to be encoded as categorical instead (because the numbers don't have any semantic meaning)? \nIf a categorical column has hundreds of unique values (or categories), should we keep it? When we dummy code this column, hundreds of columns will need to be added back to the data frame.", "_____no_output_____" ] ], [ [ "## Which categorical columns have we still carried with us? We'll test tehse \ntransform_cat_cols = []\nfor col in nominal_features:\n if col in transform_df.columns:\n transform_cat_cols.append(col)\n\n## How many unique values in each categorical column?\nuniqueness_counts = transform_df[transform_cat_cols].apply(lambda col: len(col.value_counts())).sort_values()\n## Aribtrary cutoff of 10 unique values (worth experimenting)\ndrop_nonuniq_cols = uniqueness_counts[uniqueness_counts > 10].index\ntransform_df = transform_df.drop(drop_nonuniq_cols, axis=1)", "_____no_output_____" ], [ "## Select just the remaining text columns and convert to categorical\ntext_cols = transform_df.select_dtypes(include=['object'])\nfor col in text_cols:\n transform_df[col] = transform_df[col].astype('category')\n \n## Create dummy columns and add back to the dataframe!\ntransform_df = pd.concat([\n transform_df, \n pd.get_dummies(transform_df.select_dtypes(include=['category']))\n], axis=1)", "_____no_output_____" ] ], [ [ "Update select_features()", "_____no_output_____" ] ], [ [ "def transform_features(df):\n num_missing = df.isnull().sum()\n drop_missing_cols = num_missing[(num_missing > len(df)/20)].sort_values()\n df = df.drop(drop_missing_cols.index, axis=1)\n \n text_mv_counts = df.select_dtypes(include=['object']).isnull().sum().sort_values(ascending=False)\n drop_missing_cols_2 = text_mv_counts[text_mv_counts > 0]\n df = df.drop(drop_missing_cols_2.index, axis=1)\n \n num_missing = df.select_dtypes(include=['int', 'float']).isnull().sum()\n fixable_numeric_cols = num_missing[(num_missing < len(df)/20) & (num_missing > 0)].sort_values()\n replacement_values_dict = df[fixable_numeric_cols.index].mode().to_dict(orient='records')[0]\n df = df.fillna(replacement_values_dict)\n \n years_sold = df['Yr Sold'] - df['Year Built']\n years_since_remod = df['Yr Sold'] - df['Year Remod/Add']\n df['Years Before Sale'] = years_sold\n df['Years Since Remod'] = years_since_remod\n df = df.drop([1702, 2180, 2181], axis=0)\n\n df = df.drop([\"PID\", \"Order\", \"Mo Sold\", \"Sale Condition\", \"Sale Type\", \"Year Built\", \"Year Remod/Add\"], axis=1)\n return df\n\ndef select_features(df, coeff_threshold=0.4, uniq_threshold=10):\n numerical_df = df.select_dtypes(include=['int', 'float'])\n abs_corr_coeffs = numerical_df.corr()['SalePrice'].abs().sort_values()\n df = df.drop(abs_corr_coeffs[abs_corr_coeffs < coeff_threshold].index, axis=1)\n \n nominal_features = [\"PID\", \"MS SubClass\", \"MS Zoning\", \"Street\", \"Alley\", \"Land Contour\", \"Lot Config\", \"Neighborhood\", \n \"Condition 1\", \"Condition 2\", \"Bldg Type\", \"House Style\", \"Roof Style\", \"Roof Matl\", \"Exterior 1st\", \n \"Exterior 2nd\", \"Mas Vnr Type\", \"Foundation\", \"Heating\", \"Central Air\", \"Garage Type\", \n \"Misc Feature\", \"Sale Type\", \"Sale Condition\"]\n \n transform_cat_cols = []\n for col in nominal_features:\n if col in df.columns:\n transform_cat_cols.append(col)\n\n uniqueness_counts = df[transform_cat_cols].apply(lambda col: len(col.value_counts())).sort_values()\n drop_nonuniq_cols = uniqueness_counts[uniqueness_counts > 10].index\n df = df.drop(drop_nonuniq_cols, axis=1)\n \n text_cols = df.select_dtypes(include=['object'])\n for col in text_cols:\n df[col] = df[col].astype('category')\n df = pd.concat([df, pd.get_dummies(df.select_dtypes(include=['category']))], axis=1)\n \n return df\n\ndef train_and_test(df, k=0):\n numeric_df = df.select_dtypes(include=['integer', 'float'])\n features = numeric_df.columns.drop(\"SalePrice\")\n lr = linear_model.LinearRegression()\n \n if k == 0:\n train = df[:1460]\n test = df[1460:]\n\n lr.fit(train[features], train[\"SalePrice\"])\n predictions = lr.predict(test[features])\n mse = mean_squared_error(test[\"SalePrice\"], predictions)\n rmse = np.sqrt(mse)\n\n return rmse\n \n if k == 1:\n # Randomize *all* rows (frac=1) from `df` and return\n shuffled_df = df.sample(frac=1, )\n train = df[:1460]\n test = df[1460:]\n \n lr.fit(train[features], train[\"SalePrice\"])\n predictions_one = lr.predict(test[features]) \n \n mse_one = mean_squared_error(test[\"SalePrice\"], predictions_one)\n rmse_one = np.sqrt(mse_one)\n \n lr.fit(test[features], test[\"SalePrice\"])\n predictions_two = lr.predict(train[features]) \n \n mse_two = mean_squared_error(train[\"SalePrice\"], predictions_two)\n rmse_two = np.sqrt(mse_two)\n \n avg_rmse = np.mean([rmse_one, rmse_two])\n print(rmse_one)\n print(rmse_two)\n return avg_rmse\n else:\n kf = KFold(n_splits=k, shuffle=True)\n rmse_values = []\n for train_index, test_index, in kf.split(df):\n train = df.iloc[train_index]\n test = df.iloc[test_index]\n lr.fit(train[features], train[\"SalePrice\"])\n predictions = lr.predict(test[features])\n mse = mean_squared_error(test[\"SalePrice\"], predictions)\n rmse = np.sqrt(mse)\n rmse_values.append(rmse)\n print(rmse_values)\n avg_rmse = np.mean(rmse_values)\n return avg_rmse\n\ndf = pd.read_csv(\"AmesHousing.tsv\", delimiter=\"\\t\")\ntransform_df = transform_features(df)\nfiltered_df = select_features(transform_df)\nrmse = train_and_test(filtered_df, k=4)\n\nrmse", "[25761.875549560471, 36527.812968130842, 24956.485193881424, 28486.738135675929]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
e7d47d38c576cb6ce6ee2930ffb832092777ed8b
18,167
ipynb
Jupyter Notebook
neural-machine-translation/43.memory-network-basic.ipynb
aiqoai/NLP-Models-Tensorflow
b14e5f39665476f94e27e088bd39006a3dddcf7f
[ "MIT" ]
3
2019-06-19T03:43:57.000Z
2019-08-21T10:27:46.000Z
neural-machine-translation/43.memory-network-basic.ipynb
LeeKLTW/NLP-Models-Tensorflow
85b6a85cc5af7223ea8cbf064074e21d4c18fe03
[ "MIT" ]
null
null
null
neural-machine-translation/43.memory-network-basic.ipynb
LeeKLTW/NLP-Models-Tensorflow
85b6a85cc5af7223ea8cbf064074e21d4c18fe03
[ "MIT" ]
4
2019-06-18T09:26:56.000Z
2019-10-30T20:53:22.000Z
39.153017
190
0.524743
[ [ [ "import numpy as np\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\nimport re\nimport time\nimport collections\nimport os", "_____no_output_____" ], [ "def build_dataset(words, n_words, atleast=1):\n count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]\n counter = collections.Counter(words).most_common(n_words)\n counter = [i for i in counter if i[1] >= atleast]\n count.extend(counter)\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n if index == 0:\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reversed_dictionary", "_____no_output_____" ], [ "with open('english-train', 'r') as fopen:\n text_from = fopen.read().lower().split('\\n')[:-1]\nwith open('vietnam-train', 'r') as fopen:\n text_to = fopen.read().lower().split('\\n')[:-1]\nprint('len from: %d, len to: %d'%(len(text_from), len(text_to)))", "len from: 500, len to: 500\n" ], [ "concat_from = ' '.join(text_from).split()\nvocabulary_size_from = len(list(set(concat_from)))\ndata_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)\nprint('vocab from size: %d'%(vocabulary_size_from))\nprint('Most common words', count_from[4:10])\nprint('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])", "vocab from size: 1935\nMost common words [(',', 564), ('.', 477), ('the', 368), ('and', 286), ('to', 242), ('of', 220)]\nSample data [482, 483, 78, 6, 137, 484, 10, 226, 787, 14] ['rachel', 'pike', ':', 'the', 'science', 'behind', 'a', 'climate', 'headline', 'in']\n" ], [ "concat_to = ' '.join(text_to).split()\nvocabulary_size_to = len(list(set(concat_to)))\ndata_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)\nprint('vocab to size: %d'%(vocabulary_size_to))\nprint('Most common words', count_to[4:10])\nprint('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])", "vocab to size: 1461\nMost common words [(',', 472), ('.', 430), ('tôi', 283), ('và', 230), ('có', 199), ('chúng', 196)]\nSample data [84, 22, 668, 73, 10, 389, 110, 34, 81, 299] ['khoa', 'học', 'đằng', 'sau', 'một', 'tiêu', 'đề', 'về', 'khí', 'hậu']\n" ], [ "GO = dictionary_from['GO']\nPAD = dictionary_from['PAD']\nEOS = dictionary_from['EOS']\nUNK = dictionary_from['UNK']", "_____no_output_____" ], [ "for i in range(len(text_to)):\n text_to[i] += ' EOS'", "_____no_output_____" ], [ "def str_idx(corpus, dic):\n X = []\n for i in corpus:\n ints = []\n for k in i.split():\n ints.append(dic.get(k,UNK))\n X.append(ints)\n return X\n\ndef pad_sentence_batch(sentence_batch, pad_int, maxlen):\n padded_seqs = []\n seq_lens = []\n max_sentence_len = maxlen\n for sentence in sentence_batch:\n padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n seq_lens.append(maxlen)\n return padded_seqs, seq_lens", "_____no_output_____" ], [ "X = str_idx(text_from, dictionary_from)\nY = str_idx(text_to, dictionary_to)", "_____no_output_____" ], [ "maxlen_question = max([len(x) for x in X]) * 2\nmaxlen_answer = max([len(y) for y in Y]) * 2", "_____no_output_____" ], [ "def hop_forward(memory_o, memory_i, response_proj, inputs_len, questions_len):\n match = memory_i\n match = pre_softmax_masking(match, inputs_len)\n match = tf.nn.softmax(match)\n match = post_softmax_masking(match, questions_len)\n response = tf.multiply(match, memory_o)\n return response_proj(response)\n\n\ndef pre_softmax_masking(x, seq_len):\n paddings = tf.fill(tf.shape(x), float('-inf'))\n T = tf.shape(x)[1]\n max_seq_len = tf.shape(x)[2]\n masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)\n masks = tf.tile(tf.expand_dims(masks, 1), [1, T, 1])\n return tf.where(tf.equal(masks, 0), paddings, x)\n\n\ndef post_softmax_masking(x, seq_len):\n T = tf.shape(x)[2]\n max_seq_len = tf.shape(x)[1]\n masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)\n masks = tf.tile(tf.expand_dims(masks, -1), [1, 1, T])\n return x * masks\n\n\ndef shift_right(x):\n batch_size = tf.shape(x)[0]\n start = tf.to_int32(tf.fill([batch_size, 1], GO))\n return tf.concat([start, x[:, :-1]], 1)\n\n\ndef embed_seq(x, vocab_size, zero_pad = True):\n lookup_table = tf.get_variable(\n 'lookup_table', [vocab_size, size_layer], tf.float32\n )\n if zero_pad:\n lookup_table = tf.concat(\n (tf.zeros([1, size_layer]), lookup_table[1:, :]), axis = 0\n )\n return tf.nn.embedding_lookup(lookup_table, x)\n\n\ndef position_encoding(sentence_size, embedding_size):\n encoding = np.ones((embedding_size, sentence_size), dtype = np.float32)\n ls = sentence_size + 1\n le = embedding_size + 1\n for i in range(1, le):\n for j in range(1, ls):\n encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2)\n encoding = 1 + 4 * encoding / embedding_size / sentence_size\n return np.transpose(encoding)\n\ndef quest_mem(x, vocab_size, max_quest_len):\n x = embed_seq(x, vocab_size)\n pos = position_encoding(max_quest_len, size_layer)\n return x * pos\n\nclass QA:\n def __init__(self, vocab_size_from, vocab_size_to, size_layer, learning_rate, n_hops = 3):\n self.X = tf.placeholder(tf.int32,[None,None])\n self.Y = tf.placeholder(tf.int32,[None,None])\n self.X_seq_len = tf.fill([tf.shape(self.X)[0]],maxlen_question)\n self.Y_seq_len = tf.fill([tf.shape(self.X)[0]],maxlen_answer)\n max_quest_len = maxlen_question\n max_answer_len = maxlen_answer\n \n lookup_table = tf.get_variable('lookup_table', [vocab_size_from, size_layer], tf.float32)\n \n with tf.variable_scope('memory_o'):\n memory_o = quest_mem(self.X, vocab_size_from, max_quest_len)\n \n with tf.variable_scope('memory_i'):\n memory_i = quest_mem(self.X, vocab_size_from, max_quest_len)\n \n with tf.variable_scope('interaction'):\n response_proj = tf.layers.Dense(size_layer)\n for _ in range(n_hops):\n answer = hop_forward(memory_o,\n memory_i,\n response_proj,\n self.X_seq_len,\n self.X_seq_len)\n memory_i = answer\n \n embedding = tf.Variable(tf.random_uniform([vocab_size_to, size_layer], -1, 1))\n cell = tf.nn.rnn_cell.BasicRNNCell(size_layer)\n vocab_proj = tf.layers.Dense(vocab_size_to)\n state_proj = tf.layers.Dense(size_layer)\n init_state = state_proj(tf.layers.flatten(answer))\n \n helper = tf.contrib.seq2seq.TrainingHelper(\n inputs = tf.nn.embedding_lookup(embedding, shift_right(self.Y)),\n sequence_length = tf.to_int32(self.Y_seq_len))\n decoder = tf.contrib.seq2seq.BasicDecoder(cell = cell,\n helper = helper,\n initial_state = init_state,\n output_layer = vocab_proj)\n decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder = decoder,\n maximum_iterations = max_answer_len)\n \n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = embedding,\n start_tokens = tf.tile(\n tf.constant([GO], \n dtype=tf.int32), \n [tf.shape(init_state)[0]]),\n end_token = EOS)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell = cell,\n helper = helper,\n initial_state = init_state,\n output_layer = vocab_proj)\n predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder = decoder,\n maximum_iterations = max_answer_len)\n self.training_logits = decoder_output.rnn_output\n self.predicting_ids = predicting_decoder_output.sample_id\n self.logits = decoder_output.sample_id\n masks = tf.sequence_mask(self.Y_seq_len, max_answer_len, dtype=tf.float32)\n self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,\n targets = self.Y,\n weights = masks)\n self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)\n y_t = tf.argmax(self.training_logits,axis=2)\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(y_t, masks)\n mask_label = tf.boolean_mask(self.Y, masks)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "_____no_output_____" ], [ "epoch = 20\nbatch_size = 16\nsize_layer = 256\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = QA(len(dictionary_from), len(dictionary_to), size_layer, 1e-3)\nsess.run(tf.global_variables_initializer())", "WARNING:tensorflow:From <ipython-input-11-325e092a6241>:87: BasicRNNCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis class is equivalent as tf.keras.layers.SimpleRNNCell, and will be replaced by that in Tensorflow 2.0.\n" ], [ "for i in range(epoch):\n total_loss, total_accuracy = 0, 0\n for k in range(0, len(text_to), batch_size):\n index = min(k+batch_size, len(text_to))\n batch_x, seq_x = pad_sentence_batch(X[k: index], PAD, maxlen_question)\n batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD, maxlen_answer)\n predicted, accuracy,loss, _ = sess.run([model.predicting_ids, \n model.accuracy, model.cost, model.optimizer], \n feed_dict={model.X:batch_x,\n model.Y:batch_y})\n total_loss += loss\n total_accuracy += accuracy\n total_loss /= (len(text_to) / batch_size)\n total_accuracy /= (len(text_to) / batch_size)\n print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))", "epoch: 1, avg loss: 1.538514, avg accuracy: 0.885373\nepoch: 2, avg loss: 0.724011, avg accuracy: 0.915764\nepoch: 3, avg loss: 0.650569, avg accuracy: 0.920936\nepoch: 4, avg loss: 0.620413, avg accuracy: 0.923500\nepoch: 5, avg loss: 0.597482, avg accuracy: 0.925209\nepoch: 6, avg loss: 0.575253, avg accuracy: 0.927236\nepoch: 7, avg loss: 0.552628, avg accuracy: 0.929255\nepoch: 8, avg loss: 0.529827, avg accuracy: 0.931227\nepoch: 9, avg loss: 0.507104, avg accuracy: 0.933382\nepoch: 10, avg loss: 0.484589, avg accuracy: 0.935700\nepoch: 11, avg loss: 0.462313, avg accuracy: 0.938055\nepoch: 12, avg loss: 0.440341, avg accuracy: 0.940618\nepoch: 13, avg loss: 0.418683, avg accuracy: 0.943500\nepoch: 14, avg loss: 0.397441, avg accuracy: 0.946464\nepoch: 15, avg loss: 0.376639, avg accuracy: 0.949491\nepoch: 16, avg loss: 0.356326, avg accuracy: 0.952673\nepoch: 17, avg loss: 0.336615, avg accuracy: 0.955545\nepoch: 18, avg loss: 0.317487, avg accuracy: 0.958845\nepoch: 19, avg loss: 0.299116, avg accuracy: 0.961973\nepoch: 20, avg loss: 0.281513, avg accuracy: 0.965700\n" ], [ "for i in range(len(batch_x)):\n print('row %d'%(i+1))\n print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))\n print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))\n print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\\n')", "row 1\nQUESTION: or , if you had to choose between the last two , which one would you choose ?\nREAL ANSWER: sau khi thôi không làm đau mọi người nữa , tôi sẽ hỏi họ bạn có đau không ? đau như thế nào ?\nPREDICTED ANSWER: và tôi sẽ hỏi họ bạn có thể làm việc này . \n\nrow 2\nQUESTION: i kept on doing this for a while .\nREAL ANSWER: hoặc nếu được chọn giữa 2 kiểu đau cuối , bạn sẽ chọn cái nào ?\nPREDICTED ANSWER: và tôi sẽ hỏi họ bạn có thể làm việc này . \n\nrow 3\nQUESTION: and then , like all good academic projects , i got more funding .\nREAL ANSWER: tôi tiếp tục làm thí nghiệm này 1 thời gian\nPREDICTED ANSWER: và tôi sẽ hỏi họ bạn có thể làm việc này . \n\nrow 4\nQUESTION: i moved to sounds , electrical shocks -- i even had a pain suit that i could get people to feel much more pain .\nREAL ANSWER: và sau đó , giống các đề tài nghiên cứu hay khác , tôi nhận thêm nguồn tài trợ .\nPREDICTED ANSWER: và tôi sẽ hỏi họ bạn có thể làm việc này . \n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
e7d48c209b69dcd48fa5523692549331a80a5464
12,057
ipynb
Jupyter Notebook
SpeechSynthesis/FastPitch/notebooks/FastPitch_voice_modification.ipynb
eba472/mongolian_tts
a2bae20a01aef182e14fad2faf5505261b46482a
[ "MIT" ]
null
null
null
SpeechSynthesis/FastPitch/notebooks/FastPitch_voice_modification.ipynb
eba472/mongolian_tts
a2bae20a01aef182e14fad2faf5505261b46482a
[ "MIT" ]
null
null
null
SpeechSynthesis/FastPitch/notebooks/FastPitch_voice_modification.ipynb
eba472/mongolian_tts
a2bae20a01aef182e14fad2faf5505261b46482a
[ "MIT" ]
24
2021-11-30T14:56:07.000Z
2021-12-15T22:31:45.000Z
30.601523
345
0.606867
[ [ [ "# Copyright 2020 NVIDIA Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "_____no_output_____" ] ], [ [ "# FastPitch: Voice Modification with Pre-defined Pitch Transformations", "_____no_output_____" ], [ "The [FastPitch](https://arxiv.org/abs/2006.06873) model is based on the [FastSpeech](https://arxiv.org/abs/1905.09263) model. Similarly to [FastSpeech2](https://arxiv.org/abs/2006.04558), which has been developed concurrently, it learns to predict the pitch contour and conditions the generation on such contour.\n\nThe simple mechanism of predicting the pitch on grapheme-level (rather than frame-level, as FastSpeech2 does) allows to easily alter the pitch during synthesis. FastPitch can thus change the perceived emotional state of the speaker, or slightly emphasise certain lexical units.", "_____no_output_____" ], [ "## Requirements", "_____no_output_____" ], [ "Run the notebook inside the container. By default the container forwards port `8888`.\n```\nbash scripts/docker/interactive.sh\n\n# inside the container\ncd notebooks\njupyter notebook --ip='*' --port=8888\n```\nPlease refer the Requirement section in `README.md` for more details and running outside the container.", "_____no_output_____" ] ], [ [ "import os\nassert os.getcwd().split('/')[-1] == 'notebooks'", "_____no_output_____" ] ], [ [ "## Generate audio samples", "_____no_output_____" ], [ "Training a FastPitch model from scrath takes 3 to 27 hours depending on the type and number of GPUs, performance numbers can be found in Section \"Training performance results\" in `README.md`. Therefore, to save the time of running this notebook, we recommend to download the pretrained FastPitch checkpoints on NGC for inference.\n\nYou can find FP32 checkpoint at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:fastpitch_pyt_fp32_ckpt_v1/files) , and AMP (Automatic Mixed Precision) checkpoint at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:fastpitch_pyt_amp_ckpt_v1/files).\n\nTo synthesize audio, you will need a WaveGlow model, which generates waveforms based on mel-spectrograms generated by FastPitch.You can download a pre-trained WaveGlow AMP model at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:waveglow256pyt_fp16).", "_____no_output_____" ] ], [ [ "! mkdir -p output\n! MODEL_DIR='../pretrained_models' ../scripts/download_fastpitch.sh\n! MODEL_DIR='../pretrained_models' ../scripts/download_waveglow.sh", "_____no_output_____" ] ], [ [ "You can perform inference using the respective checkpoints that are passed as `--fastpitch` and `--waveglow` arguments. Next, you will use FastPitch model to generate audio samples for input text, including the basic version and the variations i npace, fade out, and pitch transforms, etc.", "_____no_output_____" ] ], [ [ "import IPython\n\n# store paths in aux variables\nfastp = '../pretrained_models/fastpitch/nvidia_fastpitch_200518.pt'\nwaveg = '../pretrained_models/waveglow/waveglow_1076430_14000_amp.pt'\nflags = f'--cuda --fastpitch {fastp} --waveglow {waveg} --wn-channels 256'", "_____no_output_____" ] ], [ [ "### 1. Basic speech synthesis", "_____no_output_____" ], [ "You need to create an input file with some text, or just input the text in the below cell:", "_____no_output_____" ] ], [ [ "%%writefile text.txt\nThe forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves.", "_____no_output_____" ] ], [ [ "Run the script below to generate audio from the input text file:", "_____no_output_____" ] ], [ [ "# basic systhesis\n!python ../inference.py {flags} -i text.txt -o output/original > /dev/null\n\nIPython.display.Audio(\"output/original/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2. Add variations to the generated speech", "_____no_output_____" ], [ "FastPitch allows us to exert additional control over the synthesized utterances, the key parameters are the pace, fade out, and pitch transforms in particular.", "_____no_output_____" ], [ "### 2.1 Pace", "_____no_output_____" ], [ "FastPitch allows you to linearly adjust the pace of synthesized speech, similar to [FastSpeech](https://arxiv.org/abs/1905.09263) model. For instance, pass --pace 0.5 for a twofold decrease in speed, --pace 1.0 = unchanged.", "_____no_output_____" ] ], [ [ "# Change the pace of speech to double with --pace 0.5\n# (1.0 = unchanged)\n!python ../inference.py {flags} -i text.txt -o output/pace --pace 0.5 > /dev/null\n\nIPython.display.Audio(\"output/pace/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2.2 Raise or lower the pitch", "_____no_output_____" ], [ "For every input character, the model predicts a pitch cue - an average pitch over a character in Hz. Pitch can be adjusted by transforming those pitch cues. A few simple examples are provided below.", "_____no_output_____" ] ], [ [ "# Raise/lower pitch by --pitch-transform-shift <Hz>\n# Synthesize with a -50 Hz shift\n!python ../inference.py {flags} -i text.txt -o output/riselowpitch --pitch-transform-shift -50 > /dev/null\n\nIPython.display.Audio(\"output/riselowpitch/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2.3 Flatten the pitch", "_____no_output_____" ] ], [ [ "# Flatten the pitch to a constant value with --pitch-transform-flatten\n!python ../inference.py {flags} -i text.txt -o output/flattenpitch --pitch-transform-flatten > /dev/null\n\nIPython.display.Audio(\"output/flattenpitch/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2.4 Invert the pitch", "_____no_output_____" ] ], [ [ "# Invert pitch wrt. to the mean pitch with --pitch-transform-invert\n!python ../inference.py {flags} -i text.txt -o output/invertpitch --pitch-transform-invert > /dev/null\n\nIPython.display.Audio(\"output/invertpitch/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2.5 Amplify the pitch ", "_____no_output_____" ] ], [ [ "# Amplify pitch wrt. to the mean pitch with --pitch-transform-amplify 2.0\n# values in the (1.0, 3.0) range work the best\n!python ../inference.py {flags} -i text.txt -o output/amplifypitch --pitch-transform-amplify 2.0 > /dev/null\n\nIPython.display.Audio(\"output/amplifypitch/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 2.6 Combine the flags", "_____no_output_____" ], [ "The flags can be combined. You can find all the available options by calling python inference.py --help.", "_____no_output_____" ] ], [ [ "!python ../inference.py --help", "_____no_output_____" ] ], [ [ "Below example shows how to generate an audio with a combination of the flags --pace --pitch-transform-flatten --pitch-transform-shift --pitch-transform-invert --pitch-transform-amplify", "_____no_output_____" ] ], [ [ "# Dobuble the speed and combine multiple transformations\n!python ../inference.py {flags} -i text.txt -o output/combine \\\n --pace 2.0 --pitch-transform-flatten --pitch-transform-shift 50 \\\n --pitch-transform-invert --pitch-transform-amplify 1.5 > /dev/null\n\nIPython.display.Audio(\"output/combine/audio_0.wav\")", "_____no_output_____" ] ], [ [ "### 3. Inference performance benchmark", "_____no_output_____" ] ], [ [ "# Benchmark inference using AMP\n!python ../inference.py {flags} \\\n --include-warmup --batch-size 8 --repeats 100 --torchscript --amp \\\n -i ../phrases/benchmark_8_128.tsv -o output/benchmark", "_____no_output_____" ] ], [ [ "### 4. Next step", "_____no_output_____" ], [ "Now you have learnt how to generate high quality audio from text using FastPitch, as well as add variations to the audio using the flags. You can experiment with more input texts, or change the hyperparameters of the models, such as pitch flags, batch size, different precisions, etc, to see if they could improve the inference results.\n\nIf you are interested in learning more about FastPitch, please check more samples (trained with multi-speaker) presented at [samples page](https://fastpitch.github.io/).", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
e7d4ada2e79d1665f7ae86be77fb30caf3a7116e
140,063
ipynb
Jupyter Notebook
Notebooks/development/1-bit sum.ipynb
maksimt/empirical_privacy
e032f869c7bfa5f0e31035e08ce33cdfcaff1326
[ "MIT" ]
2
2019-03-19T03:16:40.000Z
2019-08-14T10:49:24.000Z
Notebooks/development/1-bit sum.ipynb
maksimt/empirical_privacy
e032f869c7bfa5f0e31035e08ce33cdfcaff1326
[ "MIT" ]
null
null
null
Notebooks/development/1-bit sum.ipynb
maksimt/empirical_privacy
e032f869c7bfa5f0e31035e08ce33cdfcaff1326
[ "MIT" ]
null
null
null
157.374157
73,804
0.882853
[ [ [ "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools\nimport numpy as np\nfrom scipy.stats import binom, norm\nfrom scipy import integrate\nfrom collections import namedtuple\nfrom matplotlib import cm\nimport pandas as pd\nimport six\nif six.PY3:\n from importlib import reload\nimport luigi\nimport pickle\nfrom pprint import pprint\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "# k-NN, Function Expectation, Density Estimation", "_____no_output_____" ] ], [ [ "from experiment_framework.helpers import build_convergence_curve_pipeline\nfrom empirical_privacy.one_bit_sum import GenSampleOneBitSum\n\n# from empirical_privacy import one_bit_sum_joblib as one_bit_sum\n# from empirical_privacy import lsdd\n# reload(one_bit_sum)", "_____no_output_____" ], [ "\ndef B_pmf(k, n, p):\n return binom(n, p).pmf(k)\ndef B0_pmf(k, n, p):\n return B_pmf(k, n-1, p)\ndef B1_pmf(k, n, p):\n return B_pmf(k-1, n-1, p)\ndef sd(N, P):\n return 0.5*np.sum(abs(B0_pmf(i, N, P) - B1_pmf(i, N, P)) for i in range(N+1))\ndef optimal_correctness(n, p):\n return 0.5 + 0.5*sd(n, p)", "_____no_output_____" ], [ "n_max = 2**10\nntri=30\nn=7\np=0.5\nsd(n,p)", "_____no_output_____" ], [ "B0 = [B0_pmf(i, n, p) for i in range(n+1)]\nB1 = [B1_pmf(i, n, p) for i in range(n+1)]\ndif = np.abs(np.array(B0)-np.array(B1))\nsdv = 0.5*np.sum(dif)\npc = 0.5+0.5*sdv\nprint(f'n={n} coin flips p={p} probability of heads'\\\n '\\nB0 has first outcome=0, B1 has first outcome=1')\nprint(f'Statistic is the total number of heads sum')\nprint(f'N_heads=\\t{\" \".join(np.arange(n+1).astype(str))}')\nprint(f'PMF of B0=\\t{B0}\\nPMF of B1=\\t{B1}')\nprint(f'|B0-B1|=\\t{dif}')\nprint(f'sd = 0.5 * sum(|B0-B1|) = {sdv}')\nprint(f'P(Correct) = 0.5 + 0.5*sd = {pc}')", "n=7 coin flips p=0.5 probability of heads\nB0 has first outcome=0, B1 has first outcome=1\nStatistic is the total number of heads sum\nN_heads=\t0 1 2 3 4 5 6 7\nPMF of B0=\t[0.015625000000000007, 0.093750000000000028, 0.23437500000000003, 0.31250000000000022, 0.23437500000000003, 0.093750000000000028, 0.015625000000000007, 0.0]\nPMF of B1=\t[0.0, 0.015625000000000007, 0.093750000000000028, 0.23437500000000003, 0.31250000000000022, 0.23437500000000003, 0.093750000000000028, 0.015625000000000007]\n|B0-B1|=\t[ 0.015625 0.078125 0.140625 0.078125 0.078125 0.140625 0.078125\n 0.015625]\nsd = 0.5 * sum(|B0-B1|) = 0.3125000000000002\nP(Correct) = 0.5 + 0.5*sd = 0.6562500000000001\n" ], [ "ccc_kwargs = {\n 'confidence_interval_width':10,\n 'n_max':2**13,\n 'dataset_settings' : {\n 'n_trials':n,\n 'prob_success':p,\n 'gen_distr_type':'binom'\n },\n 'validation_set_size' : 2000\n}\n\nCCCs = []\nFits = ['knn', 'density', 'expectation']\nfor fit in Fits:\n CCCs.append(build_convergence_curve_pipeline(\n GenSampleOneBitSum,\n gensample_kwargs = {'generate_in_batch':True},\n fitter=fit,\n fitter_kwargs={} if fit=='knn' else {'statistic_column':0}\n )(**ccc_kwargs)\n )\nluigi.build(CCCs, local_scheduler=True, workers=4, log_level='ERROR')\n\ncolors = cm.Accent(np.linspace(0,1,len(CCCs)+1))\n\nax = plt.figure(figsize=(10,5))\nax = plt.gca()\nleg_handles = []\nfor (i, CC) in enumerate(CCCs):\n with CC.output().open() as f:\n res = pickle.load(f)\n handle=sns.tsplot(res['sd_matrix'], ci='sd', color=colors[i], ax=ax, legend=False, time=res['training_set_sizes'])\n\nj=0\nfor i in range(len(CCCs), 2*len(CCCs)):\n handle.get_children()[i].set_label('{}'.format(Fits[j]))\n j+=1\nplt.semilogx()\nplt.axhline(optimal_correctness(n, p), linestyle='--', color='r', label='_nolegend_')\nplt.axhline(0.5, linestyle='-', color='b', label='_nolegend_')\nplt.title('n={n} p={p} $\\delta$={d:.3f}'.format(n=n, p=p, d=sd(n,p)), fontsize=20)\nplt.xlabel('num samples')\nplt.ylabel('Correctness Rate')\nplt.legend(loc=(0,1.1))\n", "/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n" ] ], [ [ "### Repeat the above using joblib to make sure the luigi implementation is correct", "_____no_output_____" ] ], [ [ "from math import ceil, log\none_bit_sum.n_jobs=1\nN = int(ceil(log(n_max) / log(2)))\nN_samples = np.logspace(4,N,num=N-3, base=2).astype(np.int)\n\nax = plt.figure(figsize=(10,5))\nax = plt.gca()\nAlgArg = namedtuple('AlgArg', field_names=['f_handle', 'f_kwargs'])\nalgs = [\n AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'sqrt'}),\n AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'sqrt_random_tiebreak'}),\n\n AlgArg(one_bit_sum.get_density_est_correctness_rate_cached, {'bandwidth_method':None}),\n AlgArg(one_bit_sum.get_expectation_correctness_rate_cached, {'bandwidth_method':None}),\n AlgArg(one_bit_sum.get_lsdd_correctness_rate_cached, {})\n #AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'cv'})\n]\ncolors = cm.Accent(np.linspace(0,1,len(algs)+1))\nleg_handles = []\nfor (i,alg) in enumerate(algs):\n res = one_bit_sum.get_res(n,p,ntri, alg.f_handle, alg.f_kwargs, n_max=n_max)\n handle=sns.tsplot(res, ci='sd', color=colors[i], ax=ax, legend=False, time=N_samples)\n# f, coef = get_fit(res, N_samples)\n# print alg, coef\n# lim = coef[0]\n# plt.plot(N_samples, f(N_samples), linewidth=3)\n# plt.text(N_samples[-1], lim, '{:.3f}'.format(lim),fontsize=16)\nj=0\nfor i in range(len(algs), 2*len(algs)):\n #print i, i/2-1 if i%2==0 else (i)/2\n handle.get_children()[i].set_label('{} {}'.format(algs[j].f_handle.func.__name__, algs[j].f_kwargs))\n j+=1\nplt.semilogx()\nplt.axhline(optimal_correctness(n, p), linestyle='--', color='r', label='_nolegend_')\nplt.axhline(0.5, linestyle='-', color='b', label='_nolegend_')\nplt.title('n={n} p={p} $\\delta$={d:.3f}'.format(n=n, p=p, d=sd(n,p)), fontsize=20)\nplt.xlabel('num samples')\nplt.ylabel('Correctness Rate')\nplt.legend(loc=(0,1.1))\n#print ax.get_legend_handles_labels()", "[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.5s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 1.3s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.0s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.0s finished\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.7s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 1.3s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.1s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.1s finished\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 1.9s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 3.5s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 5.1s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 5.1s finished\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.8s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 1.4s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.0s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 2.0s finished\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 7.4s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 14.3s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 22.8s remaining: 0.0s\n[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 22.8s finished\n/opt/conda/lib/python3.6/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n" ] ], [ [ "### Timing GenSamples", "_____no_output_____" ], [ "Without halving: 7.5sec\nWith halving: 8.1sec (i.e. not much overhead)", "_____no_output_____" ] ], [ [ "from luigi_utils.sampling_framework import GenSamples\nimport time\n\nclass GS(GenSamples(GenSampleOneBitSum, generate_in_batch=True)):\n pass\nGSi = GS(dataset_settings = ccc_kwargs['dataset_settings'],\n random_seed='0',\n generate_positive_samples=True,\n num_samples=2**15)\n\nstart = time.time()\nluigi.build([GSi], local_scheduler=True, workers=8, log_level='ERROR')\ncputime = time.time() - start\nprint(cputime)", "8.109845638275146\n" ], [ "res['training_set_sizes'].shape", "_____no_output_____" ], [ "np.concatenate((np.array([]), np.array([1,2,3])))", "_____no_output_____" ] ], [ [ "### More exp", "_____no_output_____" ] ], [ [ "def get_fit(res, N_samples):\n ntri, nsamp = res.shape\n sqrt2 = np.sqrt(2)\n Xlsq = np.hstack((np.ones((nsamp,1)),\n sqrt2/(N_samples.astype(np.float)**0.25)[:, np.newaxis]))\n y = 1.0 - res.reshape((nsamp*ntri, 1))\n \n Xlsq = reduce(lambda x,y: np.vstack((x,y)), [Xlsq]*ntri)\n coef = np.linalg.lstsq(Xlsq, y)[0].ravel()\n f = lambda n: 1.0 - coef[0] - coef[1]*sqrt2/n.astype(np.float)**0.25, coef\n return f\n", "_____no_output_____" ], [ "trial=0\nnum_samples=2**11\nbandwidth_method=None\nfrom scipy.stats import gaussian_kde\n\nX0, X1, y0, y1 = one_bit_sum.gen_data(n, p, num_samples, trial)\nX0 = X0.ravel()\nX1 = X1.ravel()\nbw = None\nif hasattr(bandwidth_method, '__call__'):\n bw = float(bandwidth_method(num_samples)) / num_samples # eg log\nif type(bandwidth_method) == float:\n bw = num_samples**(1-bandwidth_method)\nf0 = gaussian_kde(X0, bw_method = bw)\nf1 = gaussian_kde(X1, bw_method = bw)\n#Omega = np.unique(np.concatenate((X0, X1)))\n_min = 0\n_max = n\nx = np.linspace(_min, _max, num=10*num_samples)\nprint('difference of densities=',0.5 + 0.5 * 0.5 * np.mean(np.abs(f0(x)-f1(x))))\ndenom = f0(x)+f1(x)\nnumer = np.abs(f0(x)-f1(x))\nprint('expectation = ',0.5 + 0.5*np.mean(numer/denom))", "_____no_output_____" ] ], [ [ "# Uniforml distributed random variables", "_____no_output_____" ], [ "$$g_0 = U[0,0.5]+\\sum_{i=1}^{n-1} U[0,1]$$\n\n$$g_1 = U[0.5,1.0]+\\sum_{i=1}^{n-1} U[0,1]$$", "_____no_output_____" ], [ "Let $\\mu_n = \\frac{n-1}{2}$ and $\\sigma_n = \\sqrt{\\frac{n-0.75}{12}}$\n\nBy the CLT $g_0\\sim N(\\mu_n+0.25, \\sigma_n)$ and $g_1\\sim N(\\mu_n+0.75, \\sigma_n)$.", "_____no_output_____" ] ], [ [ "from math import sqrt\nn=3\nx = np.linspace(n/2.0-sqrt(n), n/2.0+sqrt(n))\nsigma = sqrt((n-0.75)/12.0)\nsqrt2 = sqrt(2)\nmu = (n-1.0)/2\n\ndef g0_pdf(x):\n return norm.pdf(x, loc=mu+0.25, scale=sigma)\ndef g1_pdf(x):\n return norm.pdf(x, loc=mu+0.75, scale=sigma)\ndef d_pdf(x):\n return norm.pdf(x, loc=-0.5, scale=sigma*sqrt2)\ndef g_int(n):\n sigma = sqrt((n-0.75)/12.0)\n mu = (n-1.0)/2\n N0 = norm(loc=mu+0.25, scale=sigma)\n N1 = norm(loc=mu+0.75, scale=sigma)\n I0 = N0.cdf(n*0.5)-N0.cdf(0)\n I1 = N1.cdf(n*0.5)-N1.cdf(0)\n return 2*(I0-I1)\ndef g_stat_dist(n):\n return 0.5 * g_int(n)\ndef g_optimal_correctness(n):\n return 0.5 + 0.5*g_stat_dist(n)\n\nplt.plot(x, g0_pdf(x), label='$g_0$')\nplt.plot(x, g1_pdf(x), label='$g_1$')\n#plt.plot(x, d_pdf(x), label='$d$')\nplt.axvline(x=n/2.0, color='r')\nassert g0_pdf(n/2.0)==g1_pdf(n/2.0)\nplt.legend()\nprint(g_optimal_correctness(n))", "_____no_output_____" ], [ "from math import ceil, log\nif n_max >= 2**13:\n one_bit_sum.n_jobs=1\nelse:\n one_bit_sum.n_jobs=-1\n \nN = int(ceil(log(n_max) / log(2)))\nN_samples = np.logspace(4,N,num=N-3, base=2).astype(np.int)\n\nax = plt.figure(figsize=(10,5))\nax = plt.gca()\nAlgArg = namedtuple('AlgArg', field_names=['f_handle', 'f_kwargs'])\nalgs = [\n AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'sqrt'}),\n AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'sqrt_random_tiebreak'}),\n\n AlgArg(one_bit_sum.get_density_est_correctness_rate_cached, {'bandwidth_method':None}),\n AlgArg(one_bit_sum.get_expectation_correctness_rate_cached, {'bandwidth_method':None}),\n AlgArg(one_bit_sum.get_lsdd_correctness_rate_cached, {})\n #AlgArg(one_bit_sum.get_knn_correctness_rate_cached, {'neighbor_method':'cv'})\n]\nfor A in algs:\n A.f_kwargs['type']='norm'\ncolors = cm.Accent(np.linspace(0,1,len(algs)+1))\nleg_handles = []\nfor (i,alg) in enumerate(algs):\n res = one_bit_sum.get_res(n,p,ntri, alg.f_handle, alg.f_kwargs, n_max=n_max)\n handle=sns.tsplot(res, ci='sd', color=colors[i], ax=ax, legend=False, time=N_samples)\n# f, coef = get_fit(res, N_samples)\n# print alg, coef\n# lim = coef[0]\n# plt.plot(N_samples, f(N_samples), linewidth=3)\n# plt.text(N_samples[-1], lim, '{:.3f}'.format(lim),fontsize=16)\nj=0\nfor i in range(len(algs), 2*len(algs)):\n #print i, i/2-1 if i%2==0 else (i)/2\n handle.get_children()[i].set_label(algs[j].f_handle.func.__name__)\n j+=1\n #print handle.get_children()[i].get_label()\nplt.semilogx()\nplt.axhline(g_optimal_correctness(n), linestyle='--', color='r', label='_nolegend_')\nplt.axhline(0.5, linestyle='-', color='b', label='_nolegend_')\nplt.title('n={n} $\\delta$={d:.3f}'.format(n=n, d=g_stat_dist(n)), fontsize=20)\nplt.xlabel('num samples')\nplt.ylabel('Correctness Rate')\nplt.legend(loc=(1.1,0))\n#print ax.get_legend_handles_labels()", "_____no_output_____" ], [ "true_value = g_optimal_correctness(n)\nprint(true_value)", "_____no_output_____" ], [ "trial=0\nnum_samples=2**15\nbandwidth_method=None\nfrom scipy.stats import gaussian_kde\n\nX0, X1, y0, y1 = one_bit_sum.gen_data(n, p, num_samples, trial, type='norm')\nX0 = X0.ravel()\nX1 = X1.ravel()\nbw = None\nif hasattr(bandwidth_method, '__call__'):\n bw = float(bandwidth_method(num_samples)) / num_samples # eg log\nif type(bandwidth_method) == float:\n bw = num_samples**(1-bandwidth_method)\nf0 = gaussian_kde(X0, bw_method = bw)\nf1 = gaussian_kde(X1, bw_method = bw)\n#Omega = np.unique(np.concatenate((X0, X1)))\n_min = 0\n_max = n\nx = np.linspace(_min, _max, num=num_samples)\n", "_____no_output_____" ], [ "print('difference of densities=',0.5 + 0.5 * 0.5 * integrate.quad(lambda x: np.abs(f0(x)-f1(x)), -np.inf, np.inf)[0])", "_____no_output_____" ], [ "X = np.concatenate((X0,X1))\nf0x = f0(X)\nf1x = f1(X)\ndenom = (f0x+f1x+np.spacing(1))\nnumer = np.abs(f0x-f1x)\nprint('expectation = ',0.5 + 0.5*np.mean(numer/denom))", "_____no_output_____" ], [ "print('exact=',g_optimal_correctness(n))", "_____no_output_____" ], [ "plt.plot(x, f0(x),label='$\\hat g_0$', linestyle='--')\nplt.plot(x, f1(x),label='$\\hat g_1$', linestyle='--')\nplt.plot(x, g0_pdf(x), label='$g_0$')\nplt.plot(x, g1_pdf(x), label='$g_1$')\nplt.legend(loc=(1.05,0))", "_____no_output_____" ] ], [ [ "### Comparing different numerical integration techniques", "_____no_output_____" ] ], [ [ "to_int = [f0,f1]\n\nprint 'Quad'\n# for (i,f) in enumerate(to_int):\n# intr = integrate.quad(f, -np.inf, np.inf)\n# print 'func={0} err={1:.3e}'.format(i, abs(1-intr[0]))\ng_int(n)-integrate.quad(lambda x: np.abs(f0(x)-f1(x)), -np.inf, np.inf)[0]", "_____no_output_____" ], [ "to_int = [f0,f1]\n\nprint 'Quad'\ng_int(n)-integrate.quad(lambda x: np.abs(f0(x)-f1(x)), -np.inf, np.inf)[0]", "_____no_output_____" ], [ "g_int(n)", "_____no_output_____" ], [ "print 'Simps'\ndef delta(x):\n return np.abs(f0(x)-f1(x))\nX = np.unique(np.concatenate((X0,X1)))\ny = delta(X)\ng_int(n)-integrate.simps(y,X)", "_____no_output_____" ], [ "import empirical_privacy.lsdd", "_____no_output_____" ], [ "rtv = lsdd.lsdd(X0[np.newaxis, :], X1[np.newaxis, :])", "_____no_output_____" ], [ "plt.hist(rtv[1])", "_____no_output_____" ], [ "np.mean(rtv[1])", "_____no_output_____" ] ], [ [ "## Sympy-based analysis", "_____no_output_____" ] ], [ [ "import sympy as sy\nn,k = sy.symbols('n k', integer=True)\n#k = sy.Integer(k)\np = sy.symbols('p', real=True)\nq=1-p\n\ndef binom_pmf(k, n, p):\n return sy.binomial(n,k)*(p**k)*(q**(n-k))\ndef binom_cdf(x, n, p):\n return sy.Sum([binom_pmf(j, n, p) for j in sy.Range(x+1)])\n\nB0 = binom_pmf(k, n-1, p)\nB1 = binom_pmf(k-1, n-1, p)", "_____no_output_____" ], [ "def stat_dist(N,P):\n return 0.5*sum([sy.Abs(B0.subs([(n,N),(p,P), (k,i)])-B1.subs([(n,N),(p,P), (k,i)])) for i in range(N+1)])\ndef sd(N, P):\n return 0.5*np.sum(abs(B0(i, N, P) - B1(i, N, P)) for i in range(N+1))", "_____no_output_____" ], [ "stat_dist(50,0.5)", "_____no_output_____" ], [ "sd(5000,0.5)", "_____no_output_____" ], [ "N=2\nterms =[(B0.subs([(n,N), (k,i)]).simplify(),B1.subs([(n,N), (k,i)]).simplify()) for i in range(N+1)]\nprint terms", "_____no_output_____" ], [ "0.5*sum(map(lambda t: sy.Abs(t[0]-t[1]), terms)).subs([(p,0.5)])", "_____no_output_____" ], [ "stat_dist(4,0.5)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
e7d4bd8ee90592fd16e60f1e022220283dcb56d7
23,408
ipynb
Jupyter Notebook
Machine_Learning_Regression/.ipynb_checkpoints/week-2-multiple-regression-assignment-2-blank-checkpoint.ipynb
nkmah2/ML_Uni_Washington_Coursera
a5852f1716189a1126919b12d767f894ad8490ac
[ "MIT" ]
null
null
null
Machine_Learning_Regression/.ipynb_checkpoints/week-2-multiple-regression-assignment-2-blank-checkpoint.ipynb
nkmah2/ML_Uni_Washington_Coursera
a5852f1716189a1126919b12d767f894ad8490ac
[ "MIT" ]
null
null
null
Machine_Learning_Regression/.ipynb_checkpoints/week-2-multiple-regression-assignment-2-blank-checkpoint.ipynb
nkmah2/ML_Uni_Washington_Coursera
a5852f1716189a1126919b12d767f894ad8490ac
[ "MIT" ]
null
null
null
34.172263
392
0.63175
[ [ [ "# Regression Week 2: Multiple Regression (gradient descent)", "_____no_output_____" ], [ "In the first notebook we explored multiple regression using graphlab create. Now we will use graphlab along with numpy to solve for the regression weights with gradient descent.\n\nIn this notebook we will cover estimating multiple regression weights via gradient descent. You will:\n* Add a constant column of 1's to a graphlab SFrame to account for the intercept\n* Convert an SFrame into a Numpy array\n* Write a predict_output() function using Numpy\n* Write a numpy function to compute the derivative of the regression weights with respect to a single feature\n* Write gradient descent function to compute the regression weights given an initial weight vector, step size and tolerance.\n* Use the gradient descent function to estimate regression weights for multiple features", "_____no_output_____" ], [ "# Fire up graphlab create", "_____no_output_____" ], [ "Make sure you have the latest version of graphlab (>= 1.7)", "_____no_output_____" ] ], [ [ "import graphlab", "_____no_output_____" ] ], [ [ "# Load in house sales data\n\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.", "_____no_output_____" ] ], [ [ "sales = graphlab.SFrame('kc_house_data.gl/')", "[INFO] \u001b[1;32m1449884188 : INFO: (initialize_globals_from_environment:282): Setting configuration variable GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_FILE to /home/nitin/anaconda/lib/python2.7/site-packages/certifi/cacert.pem\n\u001b[0m\u001b[1;32m1449884188 : INFO: (initialize_globals_from_environment:282): Setting configuration variable GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_DIR to \n\u001b[0mThis non-commercial license of GraphLab Create is assigned to [email protected] and will expire on October 14, 2016. For commercial licensing options, visit https://dato.com/buy/.\n\n[INFO] Start server at: ipc:///tmp/graphlab_server-4201 - Server binary: /home/nitin/anaconda/lib/python2.7/site-packages/graphlab/unity_server - Server log: /tmp/graphlab_server_1449884188.log\n[INFO] GraphLab Server Version: 1.7.1\n" ] ], [ [ "If we want to do any \"feature engineering\" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the other Week 2 notebook. For this notebook, however, we will work with the existing features.", "_____no_output_____" ], [ "# Convert to Numpy Array", "_____no_output_____" ], [ "Although SFrames offer a number of benefits to users (especially when using Big Data and built-in graphlab functions) in order to understand the details of the implementation of algorithms it's important to work with a library that allows for direct (and optimized) matrix operations. Numpy is a Python solution to work with matrices (or any multi-dimensional \"array\").\n\nRecall that the predicted value given the weights and the features is just the dot product between the feature and weight vector. Similarly, if we put all of the features row-by-row in a matrix then the predicted value for *all* the observations can be computed by right multiplying the \"feature matrix\" by the \"weight vector\". \n\nFirst we need to take the SFrame of our data and convert it into a 2D numpy array (also called a matrix). To do this we use graphlab's built in .to_dataframe() which converts the SFrame into a Pandas (another python library) dataframe. We can then use Panda's .as_matrix() to convert the dataframe into a numpy matrix.", "_____no_output_____" ] ], [ [ "import numpy as np # note this allows us to refer to numpy as np instead ", "_____no_output_____" ] ], [ [ "Now we will write a function that will accept an SFrame, a list of feature names (e.g. ['sqft_living', 'bedrooms']) and an target feature e.g. ('price') and will return two things:\n* A numpy matrix whose columns are the desired features plus a constant column (this is how we create an 'intercept')\n* A numpy array containing the values of the output\n\nWith this in mind, complete the following function (where there's an empty line you should write a line of code that does what the comment above indicates)\n\n**Please note you will need GraphLab Create version at least 1.7.1 in order for .to_numpy() to work!**", "_____no_output_____" ] ], [ [ "def get_numpy_data(data_sframe, features, output):\n data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame\n # add the column 'constant' to the front of the features list so that we can extract it along with the others:\n features = ['constant'] + features # this is how you combine two lists\n # select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):\n\n # the following line will convert the features_SFrame into a numpy matrix:\n feature_matrix = features_sframe.to_numpy()\n # assign the column of data_sframe associated with the output to the SArray output_sarray\n\n # the following will convert the SArray into a numpy array by first converting it to a list\n output_array = output_sarray.to_numpy()\n return(feature_matrix, output_array)", "_____no_output_____" ] ], [ [ "For testing let's use the 'sqft_living' feature and a constant as our features and price as our output:", "_____no_output_____" ] ], [ [ "(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price') # the [] around 'sqft_living' makes it a list\nprint example_features[0,:] # this accesses the first row of the data the ':' indicates 'all columns'\nprint example_output[0] # and the corresponding output", "_____no_output_____" ] ], [ [ "# Predicting output given regression weights", "_____no_output_____" ], [ "Suppose we had the weights [1.0, 1.0] and the features [1.0, 1180.0] and we wanted to compute the predicted output 1.0\\*1.0 + 1.0\\*1180.0 = 1181.0 this is the dot product between these two arrays. If they're numpy arrayws we can use np.dot() to compute this:", "_____no_output_____" ] ], [ [ "my_weights = np.array([1., 1.]) # the example weights\nmy_features = example_features[0,] # we'll use the first data point\npredicted_value = np.dot(my_features, my_weights)\nprint predicted_value", "_____no_output_____" ] ], [ [ "np.dot() also works when dealing with a matrix and a vector. Recall that the predictions from all the observations is just the RIGHT (as in weights on the right) dot product between the features *matrix* and the weights *vector*. With this in mind finish the following predict_output function to compute the predictions for an entire matrix of features given the matrix and the weights:", "_____no_output_____" ] ], [ [ "def predict_output(feature_matrix, weights):\n # assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array\n # create the predictions vector by using np.dot()\n\n return(predictions)", "_____no_output_____" ] ], [ [ "If you want to test your code run the following cell:", "_____no_output_____" ] ], [ [ "test_predictions = predict_output(example_features, my_weights)\nprint test_predictions[0] # should be 1181.0\nprint test_predictions[1] # should be 2571.0", "_____no_output_____" ] ], [ [ "# Computing the Derivative", "_____no_output_____" ], [ "We are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output.\n\nSince the derivative of a sum is the sum of the derivatives we can compute the derivative for a single data point and then sum over data points. We can write the squared difference between the observed output and predicted output for a single point as follows:\n\n(w[0]\\*[CONSTANT] + w[1]\\*[feature_1] + ... + w[i] \\*[feature_i] + ... + w[k]\\*[feature_k] - output)^2\n\nWhere we have k features and a constant. So the derivative with respect to weight w[i] by the chain rule is:\n\n2\\*(w[0]\\*[CONSTANT] + w[1]\\*[feature_1] + ... + w[i] \\*[feature_i] + ... + w[k]\\*[feature_k] - output)\\* [feature_i]\n\nThe term inside the paranethesis is just the error (difference between prediction and output). So we can re-write this as:\n\n2\\*error\\*[feature_i]\n\nThat is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself. In the case of the constant then this is just twice the sum of the errors!\n\nRecall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors. \n\nWith this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points).", "_____no_output_____" ] ], [ [ "def feature_derivative(errors, feature):\n # Assume that errors and feature are both numpy arrays of the same length (number of data points)\n # compute twice the dot product of these vectors as 'derivative' and return the value\n\n return(derivative)", "_____no_output_____" ] ], [ [ "To test your feature derivartive run the following:", "_____no_output_____" ] ], [ [ "(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price') \nmy_weights = np.array([0., 0.]) # this makes all the predictions 0\ntest_predictions = predict_output(example_features, my_weights) \n# just like SFrames 2 numpy arrays can be elementwise subtracted with '-': \nerrors = test_predictions - example_output # prediction errors in this case is just the -example_output\nfeature = example_features[:,0] # let's compute the derivative with respect to 'constant', the \":\" indicates \"all rows\"\nderivative = feature_derivative(errors, feature)\nprint derivative\nprint -np.sum(example_output)*2 # should be the same as derivative", "_____no_output_____" ] ], [ [ "# Gradient Descent", "_____no_output_____" ], [ "Now we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of *increase* and therefore the negative gradient is the direction of *decrease* and we're trying to *minimize* a cost function. \n\nThe amount by which we move in the negative gradient *direction* is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. We define this by requiring that the magnitude (length) of the gradient vector to be smaller than a fixed 'tolerance'.\n\nWith this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent we update the weight for each feature befofe computing our stopping criteria", "_____no_output_____" ] ], [ [ "from math import sqrt # recall that the magnitude/length of a vector [g[0], g[1], g[2]] is sqrt(g[0]^2 + g[1]^2 + g[2]^2)", "_____no_output_____" ], [ "def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):\n converged = False \n weights = np.array(initial_weights) # make sure it's a numpy array\n while not converged:\n # compute the predictions based on feature_matrix and weights using your predict_output() function\n\n # compute the errors as predictions - output\n\n gradient_sum_squares = 0 # initialize the gradient sum of squares\n # while we haven't reached the tolerance yet, update each feature's weight\n for i in range(len(weights)): # loop over each weight\n # Recall that feature_matrix[:, i] is the feature column associated with weights[i]\n # compute the derivative for weight[i]:\n\n # add the squared value of the derivative to the gradient magnitude (for assessing convergence)\n\n # subtract the step size times the derivative from the current weight\n \n # compute the square-root of the gradient sum of squares to get the gradient matnigude:\n gradient_magnitude = sqrt(gradient_sum_squares)\n if gradient_magnitude < tolerance:\n converged = True\n return(weights)", "_____no_output_____" ] ], [ [ "A few things to note before we run the gradient descent. Since the gradient is a sum over all the data points and involves a product of an error and a feature the gradient itself will be very large since the features are large (squarefeet) and the output is large (prices). So while you might expect \"tolerance\" to be small, small is only relative to the size of the features. \n\nFor similar reasons the step size will be much smaller than you might expect but this is because the gradient has such large values.", "_____no_output_____" ], [ "# Running the Gradient Descent as Simple Regression", "_____no_output_____" ], [ "First let's split the data into training and test data.", "_____no_output_____" ] ], [ [ "train_data,test_data = sales.random_split(.8,seed=0)", "_____no_output_____" ] ], [ [ "Although the gradient descent is designed for multiple regression since the constant is now a feature we can use the gradient descent function to estimat the parameters in the simple regression on squarefeet. The folowing cell sets up the feature_matrix, output, initial weights and step size for the first model:", "_____no_output_____" ] ], [ [ "# let's test out the gradient descent\nsimple_features = ['sqft_living']\nmy_output = 'price'\n(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)\ninitial_weights = np.array([-47000., 1.])\nstep_size = 7e-12\ntolerance = 2.5e7", "_____no_output_____" ] ], [ [ "Next run your gradient descent with the above parameters.", "_____no_output_____" ], [ "How do your weights compare to those achieved in week 1 (don't expect them to be exactly the same)? \n\n**Quiz Question: What is the value of the weight for sqft_living -- the second element of ‘simple_weights’ (rounded to 1 decimal place)?**", "_____no_output_____" ], [ "Use your newly estimated weights and your predict_output() function to compute the predictions on all the TEST data (you will need to create a numpy array of the test feature_matrix and test output first:", "_____no_output_____" ] ], [ [ "(test_simple_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)", "_____no_output_____" ] ], [ [ "Now compute your predictions using test_simple_feature_matrix and your weights from above.", "_____no_output_____" ], [ "**Quiz Question: What is the predicted price for the 1st house in the TEST data set for model 1 (round to nearest dollar)?**", "_____no_output_____" ], [ "Now that you have the predictions on test data, compute the RSS on the test data set. Save this value for comparison later. Recall that RSS is the sum of the squared errors (difference between prediction and output).", "_____no_output_____" ], [ "# Running a multiple regression", "_____no_output_____" ], [ "Now we will use more than one actual feature. Use the following code to produce the weights for a second model with the following parameters:", "_____no_output_____" ] ], [ [ "model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors. \nmy_output = 'price'\n(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)\ninitial_weights = np.array([-100000., 1., 1.])\nstep_size = 4e-12\ntolerance = 1e9", "_____no_output_____" ] ], [ [ "Use the above parameters to estimate the model weights. Record these values for your quiz.", "_____no_output_____" ], [ "Use your newly estimated weights and the predict_output function to compute the predictions on the TEST data. Don't forget to create a numpy array for these features from the test set first!", "_____no_output_____" ], [ "**Quiz Question: What is the predicted price for the 1st house in the TEST data set for model 2 (round to nearest dollar)?**", "_____no_output_____" ], [ "What is the actual price for the 1st house in the test data set?", "_____no_output_____" ], [ "**Quiz Question: Which estimate was closer to the true price for the 1st house on the Test data set, model 1 or model 2?**", "_____no_output_____" ], [ "Now use your predictions and the output to compute the RSS for model 2 on TEST data.", "_____no_output_____" ], [ "**Quiz Question: Which model (1 or 2) has lowest RSS on all of the TEST data? **", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]