hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e71221c6f5a5a2ec3c1f9feebf4b1307e2dcdd31 | 163,393 | ipynb | Jupyter Notebook | Week2/04. Linear Regression_seungju.ipynb | Seungju182/pytorch-basic | 89051631484ed9ecb0fa17917158e0d63d13addc | [
"MIT"
] | null | null | null | Week2/04. Linear Regression_seungju.ipynb | Seungju182/pytorch-basic | 89051631484ed9ecb0fa17917158e0d63d13addc | [
"MIT"
] | null | null | null | Week2/04. Linear Regression_seungju.ipynb | Seungju182/pytorch-basic | 89051631484ed9ecb0fa17917158e0d63d13addc | [
"MIT"
] | null | null | null | 369.667421 | 16,492 | 0.9375 | [
[
[
"# 4. Linear Regression",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## 4.1 Generate Data",
"_____no_output_____"
]
],
[
[
"x_data = torch.Tensor([1, 2, 3, 4, 5])\ny_data = torch.Tensor([2, 4, 6, 8, 10])\n\nx = x_data.view(5, 1)\ny = y_data.view(5, 1)\n\nprint(\"x :\", x)\nprint(\"y :\", y)",
"x : tensor([[1.],\n [2.],\n [3.],\n [4.],\n [5.]])\ny : tensor([[ 2.],\n [ 4.],\n [ 6.],\n [ 8.],\n [10.]])\n"
],
[
"plt.scatter(x.numpy(), y.numpy())\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 4.2 Derive Normal Equation (참고)",
"_____no_output_____"
]
],
[
[
"xtx = torch.mm(x.t(),x)\nxtx_inv = xtx.inverse()\nxtx_inv_xt = torch.mm(xtx_inv, x.t())",
"_____no_output_____"
],
[
"w = torch.mm(xtx_inv_xt, y)\nw.item()",
"_____no_output_____"
]
],
[
[
"## 4.3 Define Model with Grad, nn.loss",
"_____no_output_____"
]
],
[
[
"w = torch.rand(1,1)\nw.item()",
"_____no_output_____"
],
[
"w*x",
"_____no_output_____"
],
[
"lr = 0.01\n\nfor step in range(20):\n pre = w*x\n cost = ((pre - y) ** 2).sum() / len(x)\n #(wx-y)^2 미분 시 2(wx-y)*x\n grad = 2*(pre-y).view(5).dot(x.view(5))/len(x)\n w -= lr*grad\n \n if step % 5 == 0 :\n plt.scatter(x.numpy(), y.numpy())\n plt.plot(x.numpy(), pre.numpy(), 'r-')\n # w.size() = 1*1, grad.size() = 1\n plt.title('step %d : cost=%.4f, w=%.4f, grad=%.4f' % (step, cost.item(), w.item(), grad.item()), fontdict={'size':15})\n plt.show()\n",
"_____no_output_____"
],
[
"x_new = torch.Tensor([6])\ny_new = w*x_new\ny_new.item()",
"_____no_output_____"
]
],
[
[
"## 4.4 Define Model with nn.Linear, Optimizer",
"_____no_output_____"
]
],
[
[
"model = nn.Linear(1, 1, bias = False)\nmodel.weight",
"_____no_output_____"
],
[
"loss = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)",
"_____no_output_____"
],
[
"for step in range(30):\n pre = model(x)\n cost = loss(pre, y)\n \n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n \n if step % 5 == 0:\n plt.scatter(x.numpy(), y.numpy())\n # grad를 가진 tensor는 numpy()를 바로 사용할 수 없음\n # RuntimeError: Can't call numpy() on Variable that requires grad.\n plt.plot(x.numpy(), pre.data.numpy(), 'b-')\n plt.title('step %d, cost=%.4f, w=%.4f, grad=%4.f' \n % (step, cost.item() ,model.weight.item(), model.weight.grad.item()), fontdict={'size':15})\n plt.show()",
"_____no_output_____"
],
[
"x_new = torch.Tensor([6])\ny_new = model(x_new)\ny_new.item()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e71223be8f46e2334ab77d5dd6f8326e0768ab78 | 9,266 | ipynb | Jupyter Notebook | notebooks/20201030_Bakeout.ipynb | dineshpinto/qudiamond-analysis | 6d3669f609b94ef0dcbd6201a85a5152dceabb17 | [
"MIT"
] | 2 | 2021-05-18T18:46:57.000Z | 2022-03-27T14:14:37.000Z | notebooks/20201030_Bakeout.ipynb | dineshpinto/qudiamond-analysis | 6d3669f609b94ef0dcbd6201a85a5152dceabb17 | [
"MIT"
] | null | null | null | notebooks/20201030_Bakeout.ipynb | dineshpinto/qudiamond-analysis | 6d3669f609b94ef0dcbd6201a85a5152dceabb17 | [
"MIT"
] | null | null | null | 31.732877 | 141 | 0.570689 | [
[
[
"import os\nimport sys\nsys.path.append('../')\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.pyplot as plt\nimport datetime\nimport pandas as pd\nfrom pprint import pprint\nfrom scipy.optimize import curve_fit\n\nimport src.io as sio\nimport src.preprocessing as spp\nimport src.fitting as sft\nimport ipympl\nimport pytz",
"_____no_output_____"
],
[
"BAKEOUT_FOLDER = sio.get_folderpath(\"20201030_Bakeout\")",
"_____no_output_____"
]
],
[
[
"# Extrapolation",
"_____no_output_____"
]
],
[
[
"df1 = sio.read_tm224_data(\"temperature-monitoring04.xls\", BAKEOUT_FOLDER)\ndf2 = sio.read_tm224_data(\"temperature-monitoring05.xls\", BAKEOUT_FOLDER)\ndf3 = sio.read_tm224_data(\"temperature-monitoring06.xls\", BAKEOUT_FOLDER)\n\ndf = pd.concat([df1, df2, df3])\n\nfig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%H:%M\\n%a\")\nax.xaxis.set_major_formatter(myFmt)\nax.plot(df[\"MPL_datetimes\"], df[\"Baseplate\"])\n\nx, y, x_mod, model = sft.time_extrapolation_lmfit(df, \"Baseplate\", end_date=\"11-Nov-20 9:00\", start_index=0, fit=\"linear\")\nax.plot(df[\"MPL_datetimes\"], model.best_fit)",
"_____no_output_____"
],
[
"df = sio.read_tm224_data(\"temperature-monitoring07.xls\", BAKEOUT_FOLDER)\n\nfig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%H:%M\\n%a\")\nax.xaxis.set_major_formatter(myFmt)\nax.plot(df[\"MPL_datetimes\"], df[\"Baseplate\"], color=\"C2\")\n\nx, y, x_mod, model = sft.time_extrapolation_lmfit(df, \"Baseplate\", end_date=\"11-Nov-20 12:00\", fit=\"quadratic\")\n#ax.plot(x_mod, model.best_fit)\nax.plot(x, y, \"-\", color=\"C1\")",
"_____no_output_____"
],
[
"df = sio.read_tm224_data(\"temperature-monitoring07.xls\", BAKEOUT_FOLDER)\n\nfig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%H:%M\\n%a\")\nax.xaxis.set_major_formatter(myFmt)\nax.plot(df[\"MPL_datetimes\"], df[\"Baseplate\"], color=\"C2\")\n\nx, y = sft.time_extrapolation(df, \"Baseplate\", end_date=\"11-Nov-20 12:00\", fit=\"linear\")\n#ax.plot(x_mod, model.best_fit)\nax.plot(x, y, \"-\", color=\"C1\")",
"_____no_output_____"
]
],
[
[
"# Pressure",
"_____no_output_____"
]
],
[
[
"df1 = sio.read_tpg_data(\"pressure-monitoring01\", BAKEOUT_FOLDER)\ndf2 = sio.read_tpg_data(\"pressure-monitoring02\", BAKEOUT_FOLDER)\ndf3 = sio.read_tpg_data(\"pressure-monitoring03\", BAKEOUT_FOLDER)\ndf4 = sio.read_tpg_data(\"pressure-monitoring04\", BAKEOUT_FOLDER)\n\ndf = pd.concat([df1, df2, df3, df4])\n\nfig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%H:%M\\n%a\")\nax.xaxis.set_major_formatter(myFmt)\nax.set_yscale(\"log\")\n\nax.set_ylabel(r\"Pressure (mbar)\")\n\nax.plot(df[\"MPL_datetimes\"], df[\"Prep\"], \".\", label=\"Main\")\nax.legend()",
"_____no_output_____"
]
],
[
[
"# Increasing controllers to 80%",
"_____no_output_____"
]
],
[
[
"df1 = sio.read_tm224_data(\"temperature-monitoring08.xls\", BAKEOUT_FOLDER)\ndf2 = sio.read_tm224_data(\"temperature-monitoring09.xls\", BAKEOUT_FOLDER)\n\ndf = pd.concat([df1, df2])\nfig, ax = plt.subplots()\n\na = 4500\n\nmyFmt = DateFormatter(\"%H:%M\\n%a\")\nax.xaxis.set_major_formatter(myFmt)\nax.plot(df[\"MPL_datetimes\"][a:], df[\"Baseplate\"][a:], \".\", color=\"C2\")\n\nx, y, x_mod, model = sft.time_extrapolation_lmfit(df, \"Baseplate\", start_index=5500, end_date=\"12-Nov-20 12:00\", fit=\"linear\")\n \nprint(sft.setpointy_reach_time(x, y, setpointy=377.6))\nprint(model.fit_report())\n\nax.plot(x, y, \"--\", color=\"C1\")",
"_____no_output_____"
]
],
[
[
"# Combined pressure and temperature",
"_____no_output_____"
]
],
[
[
"# Pressure\ndf1 = sio.read_tpg_data(\"pressure-monitoring01\", BAKEOUT_FOLDER)\ndf2 = sio.read_tpg_data(\"pressure-monitoring02\", BAKEOUT_FOLDER)\ndf3 = sio.read_tpg_data(\"pressure-monitoring03\", BAKEOUT_FOLDER)\ndf4 = sio.read_tpg_data(\"pressure-monitoring04\", BAKEOUT_FOLDER)\ndfp = pd.concat([df1, df2, df3, df4])\n\n# Get rid of error value recorded when restarting gauge\ndfp = dfp.drop(dfp.index[dfp['Prep'] == 0].tolist())\n\n\n# Temperature\ndf1 = sio.read_tm224_data(\"temperature-monitoring01.xls\", BAKEOUT_FOLDER)\ndf2 = sio.read_tm224_data(\"temperature-monitoring02.xls\", BAKEOUT_FOLDER)\ndf3 = sio.read_tm224_data(\"temperature-monitoring03.xls\", BAKEOUT_FOLDER)\ndf4 = sio.read_tm224_data(\"temperature-monitoring04.xls\", BAKEOUT_FOLDER)\ndf5 = sio.read_tm224_data(\"temperature-monitoring05.xls\", BAKEOUT_FOLDER)\ndf6 = sio.read_tm224_data(\"temperature-monitoring06.xls\", BAKEOUT_FOLDER)\ndf7 = sio.read_tm224_data(\"temperature-monitoring07.xls\", BAKEOUT_FOLDER)\ndf8 = sio.read_tm224_data(\"temperature-monitoring08.xls\", BAKEOUT_FOLDER)\ndf9 = sio.read_tm224_data(\"temperature-monitoring09.xls\", BAKEOUT_FOLDER)\ndf10 = sio.read_tm224_data(\"temperature-monitoring10.xls\", BAKEOUT_FOLDER)\ndf11 = sio.read_tm224_data(\"temperature-monitoring11.xls\", BAKEOUT_FOLDER)\ndf12 = sio.read_tm224_data(\"temperature-monitoring12.xls\", BAKEOUT_FOLDER)\n\ndft = pd.concat([df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11, df12])",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%a\\n%d-%m\")\nax.xaxis.set_major_formatter(myFmt)\n\n# Pressure\nax.set_ylabel(r\"Pressure (mbar)\", color=\"C0\")\nax.set_yscale(\"log\", base=10)\nax.tick_params(axis='y', labelcolor=\"C0\")\nax.plot(dfp[\"MPL_datetimes\"], dfp[\"Prep\"], \"-\", color=\"C0\")\n\n#x, y, x_mod, model = sft.time_extrapolation_lmfit(dfp, \"Prep\", start_index=130000, end_date=\"20-Nov-20 12:00\", fit=\"linear\")\n#ax.plot(x, y, \"--\")\n\nax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\nax2.set_ylabel(r\"Baseplate Temperature (K)\", color=\"C1\")\nax2.plot(dft[\"MPL_datetimes\"], dft[\"Baseplate\"], \"-\", color=\"C1\")\nax2.tick_params(axis='y', labelcolor=\"C1\")\n\n#sio.savefig(\"pressure-monitoring01-04-temperature-monitoring01-12\", BAKEOUT_FOLDER)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\n\nmyFmt = DateFormatter(\"%a\\n%d-%m\")\nax.xaxis.set_major_formatter(myFmt)\n\n# Pressure\nax.set_ylabel(r\"Pressure (mbar)\", color=\"C0\")\nax.set_yscale(\"log\", base=10)\nax.tick_params(axis='y', labelcolor=\"C0\")\nax.plot(dfp[\"MPL_datetimes\"], dfp[\"Prep\"], \"-\", color=\"C0\")\n\nx, y, x_mod, model = sft.time_extrapolation_lmfit(dfp, \"Prep\", start_index=150000, end_date=\"22-Nov-20 12:00\", fit=\"linear\")\nax.plot(x, y, \"--\", color=\"C1\", zorder=3)\n\nsft.setpointy_reach_time(x, y, 1e-8)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7123082e2346d70870b6413d49c2f60095345b9 | 30,194 | ipynb | Jupyter Notebook | Math/Math 20-2-2018 .ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | null | null | null | Math/Math 20-2-2018 .ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | null | null | null | Math/Math 20-2-2018 .ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | 1 | 2018-09-05T21:48:57.000Z | 2018-09-05T21:48:57.000Z | 172.537143 | 20,180 | 0.908525 | [
[
[
"https://docs.python.org/3/library/math.html",
"_____no_output_____"
]
],
[
[
"import math\nimport random\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"random.seed(0)",
"_____no_output_____"
],
[
"math.pi",
"_____no_output_____"
],
[
"math.sin(math.radians(90)) # Return the sine of x radians.",
"_____no_output_____"
]
],
[
[
"Plot a noisy sin wave",
"_____no_output_____"
]
],
[
[
"degrees = list(range(0,360))",
"_____no_output_____"
],
[
"sin_x = [100*math.sin(math.radians(x)) + random.gauss(mu=0, sigma=5) for x in degrees]",
"_____no_output_____"
],
[
"plt.plot(degrees, sin_x);",
"_____no_output_____"
],
[
"normals = [random.gauss(mu=0, sigma=1) for r in range(1000000)] # Generate normally distributed random values",
"_____no_output_____"
],
[
"plt.hist(normals, bins = 30);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7123950dfdb4bc09f042967a998f831e137d139 | 22,284 | ipynb | Jupyter Notebook | 03_Introduction_To_Supervised_Machine_Learning.ipynb | alexbovet/network_lesson | 759ee66c4e79672214179d83c48f75394014156a | [
"MIT"
] | 23 | 2017-08-14T08:19:02.000Z | 2022-03-13T04:04:50.000Z | 03_Introduction_To_Supervised_Machine_Learning.ipynb | alexbovet/network_lesson | 759ee66c4e79672214179d83c48f75394014156a | [
"MIT"
] | null | null | null | 03_Introduction_To_Supervised_Machine_Learning.ipynb | alexbovet/network_lesson | 759ee66c4e79672214179d83c48f75394014156a | [
"MIT"
] | 12 | 2017-04-27T17:10:16.000Z | 2022-03-25T15:17:29.000Z | 39.651246 | 792 | 0.565473 | [
[
[
"In this section we will see the basics of supervised machine learning with a logistic regression classifier. We will see a simple example and see how to evaluate the performance of a binary classifier and avoid over-fitting.\n# Supervised machine learning\n\nThis section is partially inspired by the following Reference: https://see.stanford.edu/materials/aimlcs229/cs229-notes1.pdf\n\nSupervised learning consists of inferring a function from a labeled training set. On the other hand, unsupervised learning is a machine learning technique used when the input data is not labeled. Clustering is a example of unsupervised learning. \n\nFor supervised learning, we define:\n\n- The **features** (input variables) $x^{(i)}\\in \\mathbb{X}$ \n- The **target** (output we are trying to predict) $y^{(i)} \\in \\mathbb{Y}$\n\nA pair $(x^{(i)},y^{(i)})$ is a **training example**.\n\nThe set $\\{(x^{(i)},y^{(i)}); i = 1,...,m\\}$ is the **training set**:\n\nThe goal of supervised learning is to learn a function $h: \\mathbb{X}\\mapsto\\mathbb{Y}$, called the hypothesis, so that $h(x)$ is a good \npredictor of the corresponding $y$.\n\n- **Regression** correspond to the case where $y$ is a continuous variable\n- **Classification** correspond to the case where $y$ can only take a small number of discrete values\n\nExamples: \n- Univariate Linear Regression: $h_w(x) = w_0+w_1x$, with $\\mathbb{X} = \\mathbb{Y} = \\mathbb{R}$\n- Multivariate Linear Regression: $$h_w(x) = w_0+w_1x_1 + ... + w_nx_n = \\sum_{i=0}^{n}w_ix_i = w^Tx,$$\nwith $\\mathbb{Y} = \\mathbb{R}$ and $\\mathbb{X} = \\mathbb{R^n}$.\nHere $w_0$ is the intercept with the convention that $x_0=1$ to simplify notation.\n\n\n\n## Binary Classification with Logistic Regression\n\n- $y$ can take only two values, 0 or 1. For example, if $y$ is the sentiment associated with the tweet,\n$y=1$ if the tweet is \"positive\" and $y=0$ is the tweet is \"negative\".\n\n- $x^{(i)}$ represents the features of a tweet. For example the presence or absence of certain words.\n\n- $y^{(i)}$ is the **label** of the training example represented by $x^{(i)}$.\n\n\nSince $y\\in\\{0,1\\}$ we want to limit $h_w(x)$ between $[0,1]$.\n\nThe **Logistic regression** consists of choosing $h_w(x)$ as\n\n$$\nh_w(x) = \\frac{1}{1+e^{-w^Tx}}\n$$\n\nwhere $w^Tx = \\sum_{i=0}^{n}w_ix_i$ and $h_w(x) = g(w^Tx)$ with\n\n$$\ng(x)=\\frac{1}{1+e^{-x}}.\n$$\n\n$g(x)$ is the **logistic function** or **sigmoid function**\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nx = np.linspace(-10,10)\ny = 1/(1+np.exp(-x))\n\np = plt.plot(x,y)\nplt.grid(True)",
"_____no_output_____"
]
],
[
[
"- $g(x)\\rightarrow 1$ for $x\\rightarrow\\infty$\n- $g(x)\\rightarrow 0$ for $x\\rightarrow -\\infty$\n- $g(0) = 1/2$\n\nFinally, to go from the regression to the classification, we can simply apply the following condition:\n\n$$\ny=\\left\\{\n \\begin{array}{@{}ll@{}}\n 1, & \\text{if}\\ h_w(x)>=1/2 \\\\\n 0, & \\text{otherwise}\n \\end{array}\\right.\n$$\n\nLet's clarify the notation. We have **$m$ training samples** and **$n$ features**, our training examples can be represented by a **$m$-by-$n$ matrix** $\\underline{\\underline{X}}=(x_{ij})$ ($m$-by-$n+1$, if we include the intercept term) that contains the training examples, $x^{(i)}$, in its rows.\n\nThe target values of the training set can be represented as a $m$-dimensional vector $\\underline{y}$ and the parameters \nof our model as\na $n$-dimensional vector $\\underline{w}$ ($n+1$ if we take into account the intercept).\n\nNow, for a given training example $x^{(i)}$, the function that we want to learn (or fit) can be written:\n\n$$\nh_\\underline{w}(x^{(i)}) = \\frac{1}{1+e^{-\\sum_{j=0}^n w_j x_{ij}}}\n$$\n",
"_____no_output_____"
]
],
[
[
"# Simple example:\n# we have 20 students that took an exam and we want to know if we can use \n# the number of hours they studied to predict if they pass or fail the\n# exam\n\n# m = 20 training samples \n# n = 1 feature (number of hours)\n\nX = np.array([0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50,\n 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50])\n# 1 = pass, 0 = fail\ny = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n\nprint(X.shape)\n\nprint(y.shape)\n\np = plt.plot(X,y,'o')\ntx = plt.xlabel('x [h]')\nty = plt.ylabel('y ')\n\n",
"_____no_output_____"
]
],
[
[
"### Likelihood of the model\n\nHow to find the parameters, also called *weights*, $\\underline{w}$ that best fit our training data?\nWe want to find the weights $\\underline{w}$ that maximize the likelihood of observing the target $\\underline{y}$ given the observed features $\\underline{\\underline{X}}$.\nWe need a probabilistic model that gives us the probability of observing the value $y^{(i)}$ given the features $x^{(i)}$.\n\nThe function $h_\\underline{w}(x^{(i)})$ can be used precisely for that:\n\n$$\nP(y^{(i)}=1|x^{(i)};\\underline{w}) = h_\\underline{w}(x^{(i)})\n$$\n\n$$\nP(y^{(i)}=0|x^{(i)};\\underline{w}) = 1 - h_\\underline{w}(x^{(i)})\n$$\n\n\nwe can write it more compactly as:\n\n$$\nP(y^{(i)}|x^{(i)};\\underline{w}) = (h_\\underline{w}(x^{(i)}))^{y^{(i)}} ( 1 - h_\\underline{w}(x^{(i)}))^{1-y^{(i)}}\n$$\nwhere $y^{(i)}\\in\\{0,1\\}$\n\n\nWe see that $y^{(i)}$ is a random variable following a Bernouilli distribution with expectation $h_\\underline{w}(x^{(i)})$.\n\n\n\nThe **Likelihood function** of a statistical model is defined as:\n$$\n\\mathcal{L}(\\underline{w}) = \\mathcal{L}(\\underline{w};\\underline{\\underline{X}},\\underline{y}) = P(\\underline{y}|\\underline{\\underline{X}};\\underline{w}).\n$$\n\nThe likelihood takes into account all the $m$ training samples of our training dataset and estimates the likelihood \nof observing $\\underline{y}$ given $\\underline{\\underline{X}}$ and $\\underline{w}$. Assuming that the $m$ training examples were generated independently, we can write:\n\n$$\n\\mathcal{L}(\\underline{w}) = P(\\underline{y}|\\underline{\\underline{X}};\\underline{w}) = \\prod_{i=1}^m P(y^{(i)}|x^{(i)};\\underline{w}) = \\prod_{i=1}^m (h_\\underline{w}(x^{(i)}))^{y^{(i)}} ( 1 - h_\\underline{w}(x^{(i)}))^{1-y^{(i)}}.\n$$\n\nThis is the function that we want to maximize. It is usually much simpler to maximize the logarithm of this function, which is equivalent.\n\n$$\nl(\\underline{w}) = \\log\\mathcal{L}(\\underline{w}) = \\sum_{i=1}^{m} \\left(y^{(i)} \\log h_\\underline{w}(x^{(i)}) + (1- y^{(i)})\\log\\left(1- h_\\underline{w}(x^{(i)})\\right) \\right)\n$$\n\n### Loss function and linear models\n\nAn other way of formulating this problem is by defining a Loss function $L\\left(y^{(i)}, f(x^{(i)})\\right)$ such that:\n\n$$\n\\sum_{i=1}^{m} L\\left(y^{(i)}, f(x^{(i)})\\right) = - l(\\underline{w}).\n$$\n\nAnd now the problem consists of minimizing $\\sum_{i=1}^{m} L\\left(y^{(i)}, f(x^{(i)})\\right)$ over all the possible values of $\\underline{w}$.\n\nUsing the definition of $h_\\underline{w}(x^{(i)})$ you can show that $L$ can be written as:\n$$\nL\\left(y^{(i)}=1, f(x^{(i)})\\right) = \\log_2\\left(1+e^{-f(x^{(i)})}\\right)\n$$\nand\n$$\nL\\left(y^{(i)}=0, f(x^{(i)})\\right) = \\log_2\\left(1+e^{-f(x^{(i)})}\\right) - \\log_2\\left(e^{-f(x^{(i)})}\\right)\n$$\n\nwhere $f(x^{(i)}) = \\sum_{j=0}^n w_j x_{ij}$ is called the **decision function**.\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfx = np.linspace(-5,5)\nLy1 = np.log2(1+np.exp(-fx))\nLy0 = np.log2(1+np.exp(-fx)) - np.log2(np.exp(-fx))\n\np = plt.plot(fx,Ly1,label='L(1,f(x))')\np = plt.plot(fx,Ly0,label='L(0,f(x))')\ntx = plt.xlabel('f(x)')\nty = plt.ylabel('L')\nl = plt.legend()",
"_____no_output_____"
],
[
"# coming back to our simple example\n\ndef Loss(x_i,y_i, w0, w1):\n fx = w0 + x_i*w1\n \n if y_i == 1:\n return np.log2(1+np.exp(-fx))\n if y_i == 0:\n return np.log2(1+np.exp(-fx)) - np.log2(np.exp(-fx))\n else:\n raise Exception('y_i must be 0 or 1')\n \ndef sumLoss(x,y, w0, w1):\n sumloss = 0\n for x_i, y_i in zip(x,y):\n sumloss += Loss(x_i,y_i, w0, w1)\n return sumloss\n \n\n# lets compute the loss function for several values\nw0s = np.linspace(-10,20,100)\nw1s = np.linspace(-10,20,100)\n\nsumLoss_vals = np.zeros((w0s.size, w1s.size))\nfor k, w0 in enumerate(w0s):\n for l, w1 in enumerate(w1s):\n sumLoss_vals[k,l] = sumLoss(X,y,w0,w1)\n \n",
"_____no_output_____"
],
[
"# let's find the values of w0 and w1 that minimize the loss\nind0, ind1 = np.where(sumLoss_vals == sumLoss_vals.min())\n\nprint('position of the minimum:', w0s[ind0], w1s[ind1])\n\n# plot the loss function\np = plt.pcolor(w0s, w1s, sumLoss_vals, shading='auto')\nc = plt.colorbar()\n\np2 = plt.plot(w1s[ind1], w0s[ind0], 'ro')\n\ntx = plt.xlabel('w1')\nty = plt.ylabel('w0')\n\n\n",
"_____no_output_____"
]
],
[
[
"Here we found the minimum of the loss function simply by computing it over a large range of values. In practice, this approach is not possible when the dimensionality of the loss function (number of weights) is very large. To find the minimum of the loss function, the gradient descent algorithm (or [stochastic gradient descent](http://scikit-learn.org/stable/modules/sgd.html)) is often used.",
"_____no_output_____"
]
],
[
[
"# plot the solution\n\nx = np.linspace(0,6,100)\n\ndef h_w(x, w0=w0s[ind0], w1=w1s[ind1]):\n return 1/(1+np.exp(-(w0+x*w1)))\n\np1 = plt.plot(x, h_w(x))\np2 = plt.plot(X,y,'ro')\ntx = plt.xlabel('x [h]')\nty = plt.ylabel('y ')\n",
"_____no_output_____"
],
[
"# probability of passing the exam if you worked 5 hours:\nprint(h_w(5))",
"_____no_output_____"
]
],
[
[
"We will use the package sci-kit learn (http://scikit-learn.org/) that provide access to many tools for machine learning, data mining and data analysis.",
"_____no_output_____"
]
],
[
[
"# The same thing using the sklearn module\nfrom sklearn.linear_model import LogisticRegression\n\nmodel = LogisticRegression(C=1e10)\n\n# to train our model we use the \"fit\" method\n# we have to reshape X because we have only one feature here\nmodel.fit(X.reshape(-1,1),y)\n\n# to see the weights\nprint('w1 =', model.coef_)\nprint('w0 =', model.intercept_)\n\n# use the trained model to predict new values\nprint('prediction probabilities:', model.predict_proba(np.array(5).reshape(-1,1)))\nprint('predicted label:', model.predict((np.array(5).reshape(-1,1))))",
"_____no_output_____"
]
],
[
[
"Note that although the loss function is not linear, the decision function is a **linear function of the weights and features**. This is why the Logistic regression is called a **linear model**.\n\nOther linear models are defined by different loss functions. For example:\n- Perceptron: $L \\left(y^{(i)}, f(x^{(i)})\\right) = \\max(0, -y^{(i)}\\cdot f(x^{(i)}))$\n- Hinge-loss (soft-margin Support vector machine (SVM) classification): $L \\left(y^{(i)}, f(x^{(i)})\\right) = \\max(0, 1-y^{(i)}\\cdot f(x^{(i)}))$\n\nSee http://scikit-learn.org/stable/modules/sgd.html for more examples.\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfx = np.linspace(-5,5, 200)\nLogit = np.log2(1+np.exp(-fx))\nPercep = np.maximum(0,- fx) \nHinge = np.maximum(0, 1- fx)\nZeroOne = np.ones(fx.size)\nZeroOne[fx>=0] = 0\n\np = plt.plot(fx,Logit,label='Logistic Regression')\np = plt.plot(fx,Percep,label='Perceptron')\np = plt.plot(fx,Hinge,label='Hinge-loss')\np = plt.plot(fx,ZeroOne,label='Zero-One loss')\nplt.xlabel('f(x)')\nplt.ylabel('L')\nplt.legend()\nylims = plt.ylim((0,7))",
"_____no_output_____"
]
],
[
[
"### Evaluating the performance of a binary classifier\n\nThe confusion matrix allows to visualize the performance of a classifier:\n\n| | Predicted negative | Predicted positive |\n| --- |:---:|:---:|\n| real negative | TN | FP |\n| real positive | FN | TP | \n\nFor each prediction $y_p$, we put it in one of the four categories based on the true value of $y$:\n- TP = True Positive\n- FP = False Positive\n- TN = True Negative\n- FN = False Negative\n\nWe can then evalute several measures, for example:\n\n#### Accuracy:\n\n$\\text{Accuracy}=\\frac{TP+TN}{TP+TN+FP+FN}$\n\nAccuracy is the proportion of true results (both true positives and true negatives) among the total number of cases examined. However, accuracy is not necessarily a good measure of the predictive power of a model. See the example below:\n\n#### Accuracy paradox:\nA classifier with these results:\n\n| \t|Predicted Negative | \tPredicted Positive|\n| --- |---|---|\n|Negative Cases \t|9,700 |\t150|\n|Positive Cases \t|50 \t|100|\n\nhas an accuracy = 98%.\n\nNow consider the results of a classifier that systematically predict a negative result independently of the input:\n\n| |Predicted Negative| \tPredicted Positive|\n|---|---|---|\n|Negative Cases| \t9,850 | \t0|\n|Positive Cases| \t150 |0 |\n\nThe accuracy of this classifier is 98.5% while it is clearly useless. Here the less accurate model is more useful than the more accurate one. This is why accuracy should not be used (alone) to evaluate the performance of a classifier. \nPrecision and Recall are usually prefered:\n\n#### Precision:\n\n$\\text{Precision}=\\frac{TP}{TP+FP}$\n\nPrecision measures the fraction of correct positive or the lack of false positive.\nIt answers the question: \"Given a positive prediction from the classifier, how likely is it to be correct ?\"\n\n#### Recall:\n\n$\\text{Recall}=\\frac{TP}{TP+FN}$\n\nRecall measures the proportion of positives that are correctly identified as such or the lack of false negative.\nIt answers the question: \"Given a positive example, will the classifier detect it ?\"\n\n#### $F_1$ score:\n\nIn order to account for the precision and recall of a classifier, $F_1$ score takes the harmonic mean of both measures:\n\n$F_1 = 2 \\cdot \\frac{\\mathrm{precision} \\cdot \\mathrm{recall}}{ \\mathrm{precision} + \\mathrm{recall}} = 2 \\frac{TP}{2TP +FP+FN}$",
"_____no_output_____"
],
[
"When evaluating the performance of a classifier it is important to test is on a different set of values than then set we used to train it. Indeed, we want to know how the classifier performs on new data not on the training data. For this purpose we separate the training set in two: a part that we use to train the model and a part that we use to test it. This method is called **cross-validation**. Usually, we split the training set in N parts (typically 3 or 10), train the model on N-1 parts and test it on the remaining part. We then repeat this procedure with all the combination of training and testing parts and average the performance metrics from each tests. Sci-kit learn allows to easily perform cross-validation: http://scikit-learn.org/stable/modules/cross_validation.html",
"_____no_output_____"
],
[
"### Regularization and over-fitting\nOverfitting happens when your model is too complicated to generalise for new data. When your model fits your data perfectly, it is unlikely to fit new data well.\n\n<img src=\"https://upload.wikimedia.org/wikipedia/commons/1/19/Overfitting.svg\" style=\"width: 250px;\"/>\n\nThe model in green is over-fitted. It performs very well on the training set, but it does not generalize well to new data compared to the model in black.\n\nTo avoid over-fitting, it is important to have a large training set and to use cross-validation to evaluate the performance of a model. Additionally, **regularization** is used to make the model less \"complex\" and more general.\n\nRegularization consists in adding a term $R(\\underline{w})$, that penalizes too \"complex\" models, to the loss function, so that the training error that we want to minimize is:\n\n$E(\\underline{w}) = \\sum_{i=1}^{m} L\\left(y^{(i)}, f(x^{(i)})\\right) + \\lambda R(\\underline{w})$,\n\nwhere $\\lambda$ is a parameter that controls the strength of the regularization.\n\nUsual choices for $R(\\underline{w})$ are:\n- L2 norm of the weights: $R(\\underline{w}) := \\frac{1}{2} \\sum_{i=1}^{n} w_j^2$, which forces small weights in the solution,\n- L1 norm of the weights: $R(\\underline{w}) := \\sum_{i=1}^{n} |w_j|$, (also refered as Lasso) which leads to sparse solutions (with several zero weights).\n\nThe choice of the regularization and of the its strength are usually done by selecting the best choice during the cross-validation.",
"_____no_output_____"
]
],
[
[
"# for example\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix\n\n# logistic regression with L2 regularization, C controls the strength of the regularization\n# C = 1/lambda\nmodel = LogisticRegression(C=1, penalty='l2')\n\n# cross validation using 10 folds\ny_pred = cross_val_predict(model, X.reshape(-1,1), y=y, cv=10)\n\nprint(confusion_matrix(y,y_pred))\n\n\nprint('Accuracy = ' + str(accuracy_score(y, y_pred)))\nprint('Precision = ' + str(precision_score(y, y_pred)))\nprint('Recall = ' + str(precision_score(y, y_pred)))\nprint('F_1 = ' + str(f1_score(y, y_pred)))\n\n# try to run it with different number of folds for the cross-validation \n# and different values of the regularization strength\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e71251b48bb1b07e48c77f90ec213da589aefc0b | 2,063 | ipynb | Jupyter Notebook | tools/count-geometries/run.ipynb | crosscompute/crosscompute-examples | b341994d9353206fe0ee6b69688e70999bc2196e | [
"MIT"
] | 6 | 2015-12-18T15:54:24.000Z | 2021-05-19T17:30:39.000Z | tools/count-geometries/run.ipynb | crosscompute/crosscompute-examples | b341994d9353206fe0ee6b69688e70999bc2196e | [
"MIT"
] | 7 | 2016-04-22T16:31:08.000Z | 2022-02-07T17:54:46.000Z | tools/count-geometries/run.ipynb | crosscompute/crosscompute-examples | b341994d9353206fe0ee6b69688e70999bc2196e | [
"MIT"
] | 7 | 2016-04-20T21:03:57.000Z | 2022-02-04T16:45:35.000Z | 21.715789 | 67 | 0.542414 | [
[
[
"import subprocess\nsubprocess.run('pip install geotable'.split())",
"_____no_output_____"
],
[
"from os import environ\n\ninput_folder = environ.get(\n 'CROSSCOMPUTE_INPUT_FOLDER', 'tests/standard/input')\noutput_folder = environ.get(\n 'CROSSCOMPUTE_OUTPUT_FOLDER', 'tests/standard/output')",
"_____no_output_____"
],
[
"import geotable\nfrom os.path import join\ngeometries_path = join(input_folder, 'geometries.geojson')\ngeometries_geotable = geotable.load(geometries_path)\n# geometries_geotable.draw()",
"_____no_output_____"
],
[
"'''\nfrom shapely.geometry import (\n GeometryCollection,\n LineString,\n MultiLineString,\n MultiPolygon,\n Point,\n Polygon)\n\nfor wkt in geometries_geotable['geometry_object']:\n print(type(wkt))\n''';",
"_____no_output_____"
],
[
"import json\njson.dump({\n 'feature_count': len(geometries_geotable),\n}, open(join(output_folder, 'statistics.json'), 'wt'))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7126b0b71e25788a0c6a4b105c1a562d5e25b2f | 441,572 | ipynb | Jupyter Notebook | docs/notebooks/Example_IQMs_analysis_dimensionality_reduction.ipynb | nipreps/mriqc-learn | 3a794a88add314dcc1b47db66bf0ccfd18481a18 | [
"Apache-2.0"
] | 1 | 2022-01-21T15:12:49.000Z | 2022-01-21T15:12:49.000Z | docs/notebooks/Example_IQMs_analysis_dimensionality_reduction.ipynb | nipreps/mriqc-learn | 3a794a88add314dcc1b47db66bf0ccfd18481a18 | [
"Apache-2.0"
] | 13 | 2022-02-24T14:53:59.000Z | 2022-03-30T02:51:51.000Z | docs/notebooks/Example_IQMs_analysis_dimensionality_reduction.ipynb | nipreps/mriqc-learn | 3a794a88add314dcc1b47db66bf0ccfd18481a18 | [
"Apache-2.0"
] | null | null | null | 420.144624 | 142,536 | 0.928376 | [
[
[
"# Examples of analysis on image quality metrics (IQMs)",
"_____no_output_____"
],
[
"This notebook gives an example of the type of analysis that can be ran on the IQMs automatically computed by [MRIQC](https://mriqc.readthedocs.io/en/stable/). Specifically, this analysis presented here consiste in applying different methods of dimensionality reduction on IQMs. ",
"_____no_output_____"
],
[
"## Load data",
"_____no_output_____"
],
[
"We first load the ABIDE dataset, one of the default datasets distributed with MRIQC-learn. The data comes with the already computed IQMs and quality grades manually assigned by raters.",
"_____no_output_____"
]
],
[
[
"from mriqc_learn.datasets import load_dataset\n\n(iqms, manual_ratings), (_, _) = load_dataset(split_strategy=\"none\")\n\n#Remove field that do not correspond to IQMs\niqms = iqms.drop(columns = ['size_x', 'size_y', 'size_z', 'spacing_x', 'spacing_y', 'spacing_z'], inplace = False)\n#Keep only rater 3\nmanual_ratings = manual_ratings[[\"rater_3\"]].values.squeeze().astype(int)",
"_____no_output_____"
]
],
[
[
"Debugging : If running the previous cells indicates `ModuleNotFoundError: No module named 'mriqc_learn'`. You need to go into the mriqc-learn repository, run `pip install -e .[all]` to install the package in editable mode and all the dependencies, restart the kernel and the cell should run now.",
"_____no_output_____"
],
[
"Let's print out a pretty view of the data table:",
"_____no_output_____"
]
],
[
[
"iqms",
"_____no_output_____"
]
],
[
[
"## IQMs dimensionality reduction",
"_____no_output_____"
]
],
[
[
"#reload automatically external packages so that modifications are taken into account\n%load_ext autoreload \n%autoreload 2\n#magic function of IPython to visualize plots directly inside the notebook\n%matplotlib inline\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom scipy import stats\nfrom sklearn.decomposition import PCA\nfrom factor_analyzer import FactorAnalyzer\nfrom factor_analyzer.factor_analyzer import calculate_bartlett_sphericity, calculate_kmo\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom mlxtend.feature_selection import SequentialFeatureSelector as SFS\nfrom sklearn.pipeline import Pipeline",
"_____no_output_____"
]
],
[
[
"We want to perform dimensionality reduction on the 62 IQMs not only to reduce the feature space, but also to try extracting interpretable latent variables, so we could get a better grasp on what features IQMs capture and which IQMs are most important. This dimensionality reduction is particularly motivated by the fact that many of these metrics are knowingly highly correlated. We can verify it this claim ourselves",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(20,20))\nax = sns.heatmap(iqms.corr())",
"_____no_output_____"
]
],
[
[
"They are indeed blocks of IQMs where the correlation coefficients almost reaches 1 or -1. Principal component analysis (PCA) seems therefore to be perfect dimensionality reduction method, as it transforms a set of correlated variables into a smaller set of uncorrelated variables, while retaining as much of the variation in the original dataset as possible. Let's thus run PCA.",
"_____no_output_____"
],
[
"### PCA",
"_____no_output_____"
]
],
[
[
"# PCA\npca = PCA(n_components=None)\n# Z-score so that each IQMs live in the same range of value\niqms_z = stats.zscore(iqms,axis=0)\npca.fit(iqms_z) #n_samples x n_features",
"_____no_output_____"
]
],
[
[
"One tricky point in running PCA is to choose how many components to keep. Let's visualize the cumulative variance explained to inform our decision.",
"_____no_output_____"
]
],
[
[
"#Plot cumulative variance explained\nfig = plt.figure(figsize=(15,6))\nplt.plot(np.cumsum(pca.explained_variance_ratio_ * 100), '-x')\nplt.ylabel('Cumulative variance explained [%]')\nplt.xlabel('Nbr of components')\nplt.xticks(list(range(0,pca.explained_variance_ratio_.size)))\nplt.show()",
"_____no_output_____"
]
],
[
[
"Here we decide to choose 16 components so to reach 90% of variance explained. This decision is a bit arbitrary, I encourage you to play with the number of components to see how it modifies your results.",
"_____no_output_____"
]
],
[
[
"n_components = 15 #remember python convention that the first element is at index 0",
"_____no_output_____"
]
],
[
[
"To be able to interpret components found by PCA, we would like PCA components to present just a few IQMs that stand out through high weights. Then through the definition of those IQMs, we could try to assign what feature does this component capture in the image. Let's plot the weights for each component in the search of such a scenario.",
"_____no_output_____"
]
],
[
[
"#Plot PCA components' weights\nbasis = pca.components_[:n_components,:]\nfig, axs = plt.subplots(5,3,sharex=False,sharey=False,figsize=(12,12))\nfor k in range(basis.shape[0]):\n axs[k//3,k%3].plot(abs(basis[k,:]),'x')\n axs[k//3,k%3].set_title('PC {}'.format(k))",
"_____no_output_____"
]
],
[
[
"Unfortunately, for most of the PC there aren't really IQMs that stand out from the crowd; mostly it is a continuous sea of points when I would really like to observe outliers. But actually, if we are looking for interpretability, a more suitable dimensionality reduction method is factor analysis (FA), as it is tailored to finding interpretable latent variables. Let's thus try to perform FA on our data.",
"_____no_output_____"
],
[
"### Factor analysis",
"_____no_output_____"
],
[
"Factor analysis, as any other statistical method, requires a certain number of assumptions to be verified, before the method can trusthworthily be applied. The two following tests are standard tests to verify that the data are respecting those assumptions (cf https://www.datacamp.com/community/tutorials/introduction-factor-analysis))",
"_____no_output_____"
]
],
[
[
"#Check that correlation matrix is not identity = sphericity check\nchi_square_value,p_value=calculate_bartlett_sphericity(iqms)\nif p_value > 0.5:\n raise ValueError('Their should be correlations in your data') \n \n#Check suitability of data for FA\nkmo_all,kmo_model=calculate_kmo(iqms)\nif kmo_model<0.6:\n raise ValueError('Data not suitable for FA')",
"/home/cprovins/miniconda3/lib/python3.8/site-packages/factor_analyzer/factor_analyzer.py:111: RuntimeWarning: invalid value encountered in log\n statistic = -np.log(corr_det) * (n - 1 - (2 * p + 5) / 6)\n/home/cprovins/miniconda3/lib/python3.8/site-packages/factor_analyzer/utils.py:249: UserWarning: The inverse of the variance-covariance matrix was calculated using the Moore-Penrose generalized matrix inversion, due to its determinant being at or very close to zero.\n warnings.warn('The inverse of the variance-covariance matrix '\n"
]
],
[
[
"The data passed both tests, we can therefore run FA.",
"_____no_output_____"
]
],
[
[
"fa = FactorAnalyzer()\nfa.fit(iqms)",
"_____no_output_____"
]
],
[
[
"As for PCA, a tricky choice is the number of factors to extract. They are several methods assisting that choice. \nThe first method, so called scree plot, is to plot the eigenvalues associated to the number of factors and keep the value at the elbow. The second method is to keep the number of factors for which the eigenvalue is bigger than 1. The third method is called parallel analysis. The idea is to generate a random normally distributed data, perform FA on this synthetic dataset and keep the number of factors for which the eigenvalue curve from the original and the synthetic data intersect. It is a good idea to try the several methods to observe if they point toward the same number of factors. We will thus plot the different strategies. ",
"_____no_output_____"
]
],
[
[
"# Helper functions to decide run parallel analysis (Thanks to Mikkel Schoettner for sharing his code)\n\ndef parallel_analysis(data, k=20, method=\"minres\", return_ev=False):\n import numpy as np\n import matplotlib.pyplot as plt\n from factor_analyzer import FactorAnalyzer\n\n # get shape of the data\n n, m = data.shape\n\n # initialize FactorAnalyzer\n fa = FactorAnalyzer(n_factors=m, rotation=\"varimax\", method=method)\n\n # list to store eigenvalues\n eig = np.ones((k, m))\n\n # loop for k iterations\n for i in range(k):\n # print(\"Iteration\", i+1)\n # generate random data\n rnd_data = np.random.normal(size=(n, m))\n # run factor analysis\n fa.fit(rnd_data)\n # extract eigenvalues\n ev, v = fa.get_eigenvalues()\n eig[i] = eig[i] * ev\n\n # average eigenvalues for random data\n avg_eig = np.mean(eig, axis=0)\n\n # run factor analysis on data\n fa.fit(data)\n ev, v = fa.get_eigenvalues()\n\n # determine suggested no. of factors\n suggestedFactors = sum((ev - avg_eig) > 0)\n if return_ev:\n return suggestedFactors, ev, avg_eig\n else:\n return suggestedFactors\n \n \ndef plot_pa_single(ev, avg_eig, save_plot = False):\n # plot eigenvalues of actual and random data\n sns.set_theme(style='white')\n fig, ax = plt.subplots(1, 1, figsize=(6,5))\n ax.plot(range(len(ev)), ev, marker=\"1\", label=\"Real Data\")\n ax.plot(range(len(ev)), avg_eig, marker=\"2\", label=\"Synthetic Data\")\n ax.set_xticks(range(0, len(ev), 10))\n ax.set_yticks(range(0, 13, 1))\n ax.set_xlabel('Factor Number')\n ax.set_ylabel('Eigenvalue')\n ax.set_title('Parallel Analysis')\n ax.legend()\n sns.despine()\n if save_plot == True:\n fig.savefig(outpath_efa+\"parallel_analysis.svg\")\n plt.show()\n return\n\n# Scree Plot\nev, v = fa.get_eigenvalues()\nplt.scatter(range(1,iqms.shape[1]+1-45),ev[:-45])\nplt.plot(range(1,iqms.shape[1]+1-45),ev[:-45])\nplt.title('Scree Plot')\nplt.xlabel('Factors')\nplt.ylabel('Eigenvalue')\n#plot the line corresponding the an eigenvalue of 1\nplt.axhline(y=1, color='r')\nplt.grid()\nplt.show()\n\n#Parallel analysis\nsuggestedFactors, ev22, avg_ev = parallel_analysis(iqms, k=20, method=\"ml\", return_ev=True)\nplot_pa_single(ev, avg_ev)\nprint('Parallel analysis suggests to keep {} factors'.format(suggestedFactors))",
"_____no_output_____"
]
],
[
[
"Evidences from the different methods converge that 11 is a good number of factors to keep",
"_____no_output_____"
]
],
[
[
"n_factors = 11",
"_____no_output_____"
]
],
[
[
"Now that we have determined the number of factors, we can run FA and tell it how many factors to extract. Furthermore, we apply rotation to the factors, because this rotation convert factors into uncorrelated factors improving in the process interpretabiity.",
"_____no_output_____"
]
],
[
[
"fa = FactorAnalyzer(rotation=\"varimax\", n_factors=n_factors)\nfa.fit(iqms)",
"_____no_output_____"
]
],
[
[
"By looking at the loadings of each factor, comes now the hard task of assigning interpretation on each factor. The heatmap is a good visualisation tool to help grasp all factors in one glimpse.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,18))\nax = sns.heatmap(fa.loadings_, center=0)\nax.set_xlabel('Factor nbr')\nax.set_ylabel('IQMs')\nax.set_yticklabels(list(iqms.columns), rotation=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Backward feature elimination",
"_____no_output_____"
],
[
"Another approach to dimensionality reduction would be instead of combining the different IQMs together, we could extract only the most important ones. This can be achieved via backward feature elimination.\n\nThe concept is that the model is first trained with all features, the performance is assessed using the target data to give a referencial performance. Then the model is retrained by droping one feature at a time and performance is assessed. The feature that affected the performance the least is eliminated. This process iterates until only the most important feature remains.\n\nThe inverse process exists, that is forward feature selection. However the latter is advised only when the number of variables under consideration is very large, even larger than the sample size. If it is not the case, backward feature elimination should be used, especially in case of collinearity as it considers the effects of all variables simultaneously. This means that backward elimination may force to keep all collinear feature while forward selection might select none of them. (cf https://quantifyinghealth.com/stepwise-selection/)\n\nFor implementing the method, we choose the Linear Discriminant Analysis (LDA) classifier with as target the manual ratings. The choice of LDA is arbitrary, you can try to play with other multi-class classifiers. Moreover, we choose the backward implementation, as we know our IQMs are collinear and we keep the 5 most important IQMs, which is also an arbitrary decision.",
"_____no_output_____"
]
],
[
[
"k_features = 5\nclf = LinearDiscriminantAnalysis()\nsfs = SFS(clf, \n k_features=k_features, \n forward=False, \n verbose=0)\nsfs.fit(iqms_z, manual_ratings)\n\nkept_features = list(sfs.k_feature_names_)\nprint('The 5 most important IQMs are {}'.format(list(sfs.k_feature_names_)))",
"The 5 most important IQMs are ['fwhm_avg', 'fwhm_y', 'summary_bg_n', 'summary_csf_mean', 'summary_wm_mad']\n"
]
],
[
[
"It is also possible to extract the order in which features have been eliminated. Here is my implementation.",
"_____no_output_____"
]
],
[
[
"feat_names = list(sfs.k_feature_names_)\n\n#feature eliminated in order\neliminated_feat = []\nfor i in reversed(range(k_features,iqms.shape[1]-1)):\n feat0 = sfs.subsets_[i]['feature_names']\n feat1 = sfs.subsets_[i+1]['feature_names']\n el_feat = list(set(feat1) - set(feat0))\n eliminated_feat.append(el_feat[0])\n\nprint('Here is the order in which the features have been eliminated \\\n(1st in the list is the 1st feature to have been eliminated): {}'.format(eliminated_feat))",
"Here is the order in which the features have been eliminated (1st in the list is the 1st feature to have been eliminated): ['cjv', 'summary_csf_median', 'summary_bg_stdv', 'qi_1', 'wm2max', 'summary_wm_n', 'summary_gm_median', 'summary_wm_stdv', 'rpve_csf', 'cnr', 'inu_range', 'summary_bg_mean', 'summary_bg_p95', 'summary_gm_mad', 'summary_bg_p05', 'summary_bg_k', 'tpm_overlap_csf', 'inu_med', 'summary_csf_k', 'summary_bg_mad', 'summary_csf_mad', 'snrd_wm', 'snr_wm', 'qi_2', 'icvs_wm', 'fwhm_z', 'summary_csf_p05', 'summary_wm_median', 'summary_gm_p05', 'rpve_gm', 'summary_gm_n', 'summary_wm_k', 'summary_bg_median', 'summary_gm_stdv', 'tpm_overlap_wm', 'tpm_overlap_gm', 'summary_csf_p95', 'summary_gm_k', 'summary_gm_mean', 'snrd_total', 'snrd_gm', 'summary_wm_p05', 'summary_wm_mean', 'snrd_csf', 'summary_gm_p95', 'summary_csf_n', 'rpve_wm', 'snr_gm', 'snr_total', 'efc', 'summary_wm_p95', 'icvs_csf', 'icvs_gm', 'summary_csf_stdv', 'snr_csf', 'fwhm_x']\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e71272d1ad7cbab28eb825c8bd8e0e2d553ca7ff | 29,226 | ipynb | Jupyter Notebook | examples/tokenizers.ipynb | VedantMadane/text | 66e250ed638e8161c28c654b4d37403871d23a71 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:44:59.000Z | 2021-05-10T10:44:59.000Z | examples/tokenizers.ipynb | VedantMadane/text | 66e250ed638e8161c28c654b4d37403871d23a71 | [
"Apache-2.0"
] | null | null | null | examples/tokenizers.ipynb | VedantMadane/text | 66e250ed638e8161c28c654b4d37403871d23a71 | [
"Apache-2.0"
] | null | null | null | 34.222482 | 529 | 0.525046 | [
[
[
"##### Copyright 2020 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Tokenizing with TF Text",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/tensorflow_text/tokenizers\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/text/blob/master/examples/tokenizers.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/text/blob/master/examples/tokenizers.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/text/examples/tokenizers.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Overview\n\nTokenization is the process of breaking up a string into tokens. Commonly, these tokens are words, numbers, and/or punctuation. The `tensorflow_text` package provides a number of tokenizers available for preprocessing text required by your text-based models. By performing the tokenization in the TensorFlow graph, you will not need to worry about differences between the training and inference workflows and managing preprocessing scripts.\n\nThis guide discusses the many tokenization options provided by TensorFlow Text, when you might want to use one option over another, and how these tokenizers are called from within your model.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"!pip install -q tensorflow-text-nightly",
"\u001b[K |████████████████████████████████| 4.4MB 5.7MB/s \n\u001b[?25h"
],
[
"import requests\nimport tensorflow as tf\nimport tensorflow_text as tf_text",
"_____no_output_____"
]
],
[
[
"## Splitter API\n\nThe main interfaces are `Splitter` and `SplitterWithOffsets` which have single methods `split` and `split_with_offsets`. The `SplitterWithOffsets` variant (which extends `Splitter`) includes an option for getting byte offsets. This allows the caller to know which bytes in the original string the created token was created from.\n\nThe `Tokenizer` and `TokenizerWithOffsets` are specialized versions of the `Splitter` that provide the convenience methods `tokenize` and `tokenize_with_offsets` respectively.\n\nGenerally, for any N-dimensional input, the returned tokens are in a N+1-dimensional [RaggedTensor](https://www.tensorflow.org/guide/ragged_tensor) with the inner-most dimension of tokens mapping to the original individual strings.\n\n```python\nclass Splitter {\n @abstractmethod\n def split(self, input)\n}\n\nclass SplitterWithOffsets(Splitter) {\n @abstractmethod\n def split_with_offsets(self, input)\n}\n```\n\nThere is also a `Detokenizer` interface. Any tokenizer implementing this interface can accept a N-dimensional ragged tensor of tokens, and normally returns a N-1-dimensional tensor or ragged tensor that has the given tokens assembled together.\n\n```python\nclass Detokenizer {\n @abstractmethod\n def detokenize(self, input)\n}\n```",
"_____no_output_____"
],
[
"## Tokenizers\n\nBelow is the suite of tokenizers provided by TensorFlow Text. String inputs are assumed to be UTF-8. Please review the [Unicode guide](https://www.tensorflow.org/tutorials/load_data/unicode) for converting strings to UTF-8.",
"_____no_output_____"
],
[
"### Whole word tokenizers\n\nThese tokenizers attempt to split a string by words, and is the most intuitive way to split text.\n\n",
"_____no_output_____"
],
[
"#### WhitespaceTokenizer\n\nThe `WhitespaceTokenizer` is the most basic tokenizer which splits strings on ICU defined whitespace characters (eg. space, tab, new line). This is often good for quickly building out prototype models.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.WhitespaceTokenizer()\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[b'What', b'you', b'know', b'you', b\"can't\", b'explain,', b'but', b'you', b'feel', b'it.']]\n"
]
],
[
[
"You may notice a shortcome of this tokenizer is that punctuation is included with the word to make up a token. To split the words and punctuation into separate tokens, the `UnicodeScriptTokenizer` should be used.",
"_____no_output_____"
],
[
"#### UnicodeScriptTokenizer\n\nThe `UnicodeScriptTokenizer` splits strings based on Unicode script boundaries. The script codes used correspond to International Components for Unicode (ICU) UScriptCode values. See: http://icu-project.org/apiref/icu4c/uscript_8h.html\n\nIn practice, this is similar to the `WhitespaceTokenizer` with the most apparent difference being that it will split punctuation (USCRIPT_COMMON) from language texts (eg. USCRIPT_LATIN, USCRIPT_CYRILLIC, etc) while also separating language texts from each other. Note that this will also split contraction words into separate tokens.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.UnicodeScriptTokenizer()\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[b'What', b'you', b'know', b'you', b'can', b\"'\", b't', b'explain', b',', b'but', b'you', b'feel', b'it', b'.']]\n"
]
],
[
[
"### Subword tokenizers\n\nSubword tokenizers can be used with a smaller vocabulary, and allow the model to have some information about novel words from the subwords that make create it.\n\nWe briefly discuss the Subword tokenization options below, but the [Subword Tokenization tutorial](https://www.tensorflow.org/tutorials/tensorflow_text/subwords_tokenizer) goes more in depth and also explains how to generate the vocab files.",
"_____no_output_____"
],
[
"#### WordpieceTokenizer\n\nWordPiece tokenization is a data-driven tokenization scheme which generates a set of sub-tokens. These sub tokens may correspond to linguistic morphemes, but this is often not the case.\n\nThe WordpieceTokenizer expects the input to already be split into tokens. Because of this prerequisite, you will often want to split using the `WhitespaceTokenizer` or `UnicodeScriptTokenizer` beforehand.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.WhitespaceTokenizer()\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[b'What', b'you', b'know', b'you', b\"can't\", b'explain,', b'but', b'you', b'feel', b'it.']]\n"
]
],
[
[
"After the string is split into tokens, the `WordpieceTokenizer` can be used to split into subtokens.",
"_____no_output_____"
]
],
[
[
"url = \"https://github.com/tensorflow/text/blob/master/tensorflow_text/python/ops/test_data/test_wp_en_vocab.txt?raw=true\"\nf = requests.get(url)\nfilepath = \"vocab.txt\"\nopen(filepath, 'wb').write(r.content)",
"_____no_output_____"
],
[
"subtokenizer = tf_text.UnicodeScriptTokenizer(filepath)\nsubtokens = tokenizer.tokenize(tokens)\nprint(subtokens.to_list())",
"[[[b'What'], [b'you'], [b'know'], [b'you'], [b\"can't\"], [b'explain,'], [b'but'], [b'you'], [b'feel'], [b'it.']]]\n"
]
],
[
[
"#### BertTokenizer\n\nThe BertTokenizer mirrors the original implemenation of tokenization from the BERT paper. This is backed by the WordpieceTokenizer, but also performs additional tasks such as normalization and tokenizing to words first.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.BertTokenizer(filename, token_out_type=tf.string, lower_case=True)\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[[b'what'], [b'you'], [b'know'], [b'you'], [b'can'], [b\"'\"], [b't'], [b'explain'], [b','], [b'but'], [b'you'], [b'feel'], [b'it'], [b'.']]]\n"
]
],
[
[
"#### SentencepieceTokenizer\n\nThe SentencepieceTokenizer is a sub-token tokenizer that is highly configurable. This is backed by the Sentencepiece library. Like the BertTokenizer, it can include normalization and token splitting before splitting into sub-tokens.\n",
"_____no_output_____"
]
],
[
[
"import requests\nurl = \"https://github.com/tensorflow/text/blob/master/tensorflow_text/python/ops/test_data/test_oss_model.model?raw=true\"\nsp_model = requests.get(url).content",
"_____no_output_____"
],
[
"tokenizer = tf_text.SentencepieceTokenizer(sp_model, out_type=tf.string)\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[b'\\xe2\\x96\\x81What', b'\\xe2\\x96\\x81you', b'\\xe2\\x96\\x81know', b'\\xe2\\x96\\x81you', b'\\xe2\\x96\\x81can', b\"'\", b't', b'\\xe2\\x96\\x81explain', b',', b'\\xe2\\x96\\x81but', b'\\xe2\\x96\\x81you', b'\\xe2\\x96\\x81feel', b'\\xe2\\x96\\x81it', b'.']]\n"
]
],
[
[
"### Other splitters\n\n",
"_____no_output_____"
],
[
"#### UnicodeCharTokenizer\n\nThis splits a string into UTF-8 characters. It is useful for CJK languages that do not have spaces between words.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.UnicodeCharTokenizer()\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())",
"[[87, 104, 97, 116, 32, 121, 111, 117, 32, 107, 110, 111, 119, 32, 121, 111, 117, 32, 99, 97, 110, 39, 116, 32, 101, 120, 112, 108, 97, 105, 110, 44, 32, 98, 117, 116, 32, 121, 111, 117, 32, 102, 101, 101, 108, 32, 105, 116, 46]]\n"
]
],
[
[
"The output is Unicode codepoints. This can be also useful for creating character ngrams, such as bigrams. To convert back into UTF-8 characters.",
"_____no_output_____"
]
],
[
[
"characters = tf.strings.unicode_encode(tf.expand_dims(tokens, -1), \"UTF-8\")\nbigrams = tf_text.ngrams(characters, 2, reduction_type=tf_text.Reduction.STRING_JOIN, string_separator='')\nprint(bigrams.to_list())",
"[[b'Wh', b'ha', b'at', b't ', b' y', b'yo', b'ou', b'u ', b' k', b'kn', b'no', b'ow', b'w ', b' y', b'yo', b'ou', b'u ', b' c', b'ca', b'an', b\"n'\", b\"'t\", b't ', b' e', b'ex', b'xp', b'pl', b'la', b'ai', b'in', b'n,', b', ', b' b', b'bu', b'ut', b't ', b' y', b'yo', b'ou', b'u ', b' f', b'fe', b'ee', b'el', b'l ', b' i', b'it', b't.']]\n"
]
],
[
[
"#### HubModuleTokenizer\n\nThis is a wrapper around models deployed to TF Hub to make the calls easier since TF Hub currently does not support ragged tensors. Having a model perform tokenization is particularly useful for CJK languages when you want to split into words, but do not have spaces to provide a heuristic guide. At this time, we have a single segmentation model for Chinese.",
"_____no_output_____"
]
],
[
[
"MODEL_HANDLE = \"https://tfhub.dev/google/zh_segmentation/1\"\nsegmenter = tf_text.HubModuleTokenizer(MODEL_HANDLE)\ntokens = segmenter.tokenize([\"新华社北京\"])\nprint(tokens.to_list())",
"[[b'\\xe6\\x96\\xb0\\xe5\\x8d\\x8e\\xe7\\xa4\\xbe', b'\\xe5\\x8c\\x97\\xe4\\xba\\xac']]\n"
]
],
[
[
"It may be difficult to view the results of the UTF-8 encoded byte strings. Decode the list values to make viewing easier.",
"_____no_output_____"
]
],
[
[
"def decode_list(x):\n if type(x) is list:\n return list(map(decode_list, x))\n return x.decode(\"UTF-8\")\n\ndef decode_utf8_tensor(x):\n return list(map(decode_list, x.to_list()))\n\nprint(decode_utf8_tensor(tokens))",
"[['新华社', '北京']]\n"
]
],
[
[
"#### SplitMergeTokenizer\n\nThe `SplitMergeTokenizer` & `SplitMergeFromLogitsTokenizer` have a targeted purpose of splitting a string based on provided values that indicate where the string should be split. This is useful when building your own segmentation models like the previous Segmentation example.\n\nFor the `SplitMergeTokenizer`, a value of 0 is used to indicate the start of a new string, and the value of 1 indicates the character is part of the current string.",
"_____no_output_____"
]
],
[
[
"strings = [\"新华社北京\"]\nlabels = [[0, 1, 1, 0, 1]]\ntokenizer = tf_text.SplitMergeTokenizer()\ntokens = tokenizer.tokenize(strings, labels)\nprint(decode_utf8_tensor(tokens))",
"[['新华社', '北京']]\n"
]
],
[
[
"The `SplitMergeFromLogitsTokenizer` is similar, but it instead accepts logit value pairs from a neural network that predict if each character should be split into a new string or merged into the current one.",
"_____no_output_____"
]
],
[
[
"strings = [[\"新华社北京\"]]\nlabels = [[[5.0, -3.2], [0.2, 12.0], [0.0, 11.0], [2.2, -1.0], [-3.0, 3.0]]]\ntokenizer = tf_text.SplitMergeFromLogitsTokenizer()\ntokenizer.tokenize(strings, labels)\nprint(decode_utf8_tensor(tokens))",
"[['新华社', '北京']]\n"
]
],
[
[
"#### RegexSplitter\n\nThe `RegexSplitter` is able to segment strings at arbitrary breakpoints defined by a provided regular expression.",
"_____no_output_____"
]
],
[
[
"splitter = tf_text.RegexSplitter(\"\\s\")\ntokens = splitter.split([\"What you know you can't explain, but you feel it.\"], )\nprint(tokens.to_list())",
"[[b'What', b'you', b'know', b'you', b\"can't\", b'explain,', b'but', b'you', b'feel', b'it.']]\n"
]
],
[
[
"## Offsets\n\nWhen tokenizing strings, it is often desired to know where in the original string the token originated from. For this reason, each tokenizer which implements `TokenizerWithOffsets` has a *tokenize_with_offsets* method that will return the byte offsets along with the tokens. The start_offsets lists the bytes in the original string each token starts at, and the end_offsets lists the bytes immediately after the point where each token ends. To refrase, the start offsets are inclusive and the end offsets are exclusive.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.UnicodeScriptTokenizer()\n(tokens, start_offsets, end_offsets) = tokenizer.tokenize_with_offsets(['Everything not saved will be lost.'])\nprint(tokens.to_list())\nprint(start_offsets.to_list())\nprint(end_offsets.to_list())",
"[[b'Everything', b'not', b'saved', b'will', b'be', b'lost', b'.']]\n[[0, 11, 15, 21, 26, 29, 33]]\n[[10, 14, 20, 25, 28, 33, 34]]\n"
]
],
[
[
"## Detokenization\n\nTokenizers which implement the `Detokenizer` provide a `detokenize` method which attempts to combine the strings. This has the chance of being lossy, so the detokenized string may not always match exactly the original, pre-tokenized string.",
"_____no_output_____"
]
],
[
[
"tokenizer = tf_text.UnicodeCharTokenizer()\ntokens = tokenizer.tokenize([\"What you know you can't explain, but you feel it.\"])\nprint(tokens.to_list())\nstrings = tokenizer.detokenize(tokens)\nprint(strings.numpy())",
"[[87, 104, 97, 116, 32, 121, 111, 117, 32, 107, 110, 111, 119, 32, 121, 111, 117, 32, 99, 97, 110, 39, 116, 32, 101, 120, 112, 108, 97, 105, 110, 44, 32, 98, 117, 116, 32, 121, 111, 117, 32, 102, 101, 101, 108, 32, 105, 116, 46]]\n[b\"What you know you can't explain, but you feel it.\"]\n"
]
],
[
[
"## TF Data\n\nTF Data is a powerful API for creating an input pipeline for training models. Tokenizers work as expected with the API.",
"_____no_output_____"
]
],
[
[
"docs = tf.data.Dataset.from_tensor_slices([['Never tell me the odds.'], [\"It's a trap!\"]])\ntokenizer = tf_text.WhitespaceTokenizer()\ntokenized_docs = docs.map(lambda x: tokenizer.tokenize(x))\niterator = iter(tokenized_docs)\nprint(next(iterator).to_list())\nprint(next(iterator).to_list())",
"[[b'Never', b'tell', b'me', b'the', b'odds.']]\n[[b\"It's\", b'a', b'trap!']]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7127c4fafb461e4e8c01cb3af6a936012199ba0 | 5,903 | ipynb | Jupyter Notebook | unittesting.ipynb | crestdsl/thesis-code | aaedeb0d032218ec20e8bfb5343dcbe0e8da301e | [
"MIT"
] | null | null | null | unittesting.ipynb | crestdsl/thesis-code | aaedeb0d032218ec20e8bfb5343dcbe0e8da301e | [
"MIT"
] | null | null | null | unittesting.ipynb | crestdsl/thesis-code | aaedeb0d032218ec20e8bfb5343dcbe0e8da301e | [
"MIT"
] | null | null | null | 30.427835 | 111 | 0.50991 | [
[
[
"# Define the entity to be tested",
"_____no_output_____"
]
],
[
[
"import crestdsl.model as crest\nfrom crestdsl.simulation import Simulator",
"_____no_output_____"
],
[
"# define the required resources\nonOff = crest.Resource(unit=\"onOff\", domain=[\"on\", \"off\"])\nwatt = crest.Resource(unit=\"Watt\", domain=crest.REAL) \ncelsius = crest.Resource(unit=\"Celsius\", domain=crest.REAL)\ntime = crest.Resource(unit=\"Time\", domain=crest.REAL)\n\nclass HeatModule(crest.Entity):\n switch = crest.Input(resource=onOff, value=\"on\")\n electricity = crest.Input(resource=watt, value=0)\n internal_temp = crest.Local(resource=celsius, value=0)\n timer = crest.Local(resource=time, value=0)\n heating = crest.Output(resource=watt, value=0)\n \n # states\n off = current = crest.State()\n on = crest.State()\n error = crest.State()\n \n # transitions\n @crest.transition(source=off, target=on)\n def to_on(self):\n return self.switch.value == \"on\" and self.timer.value <= 0 and self.electricity.value >= 200\n @crest.transition(source=on, target=off)\n def to_off(self):\n return self.switch.value != \"on\" or self.timer.value >= 30 or self.electricity.value < 200\n @crest.transition(source=on, target=error)\n def to_error(self):\n return self.internal_temp.value >= 400\n \n # updates for heat energy output\n @crest.update(state=on, target=heating)\n def on_update_output(self, dt):\n # 50 per cent efficiency\n return self.electricity.value * 0.5 \n @crest.update(state=off, target=heating)\n def off_update_output(self, dt):\n return 0\n @crest.update(state=error, target=heating)\n def error_update_output(self, dt):\n return 0\n \n # update timer:\n @crest.update(state=on, target=timer)\n def on_update_timer(self, dt):\n return self.timer.value + dt\n @crest.update(state=off, target=timer)\n def off_update_timer(self, dt):\n new_value = self.timer.value - 2 * dt\n if new_value <= 0: # don't go below 0\n return 0\n else:\n return new_value\n\n # updates for internal_temp\n @crest.update(state=on, target=internal_temp)\n def on_update_internal_temp(self, dt):\n # if more than 200 watt, we grow\n # one tenth degree per extra watt per time unit\n # if lower, we sink at the same rate\n factor = (self.electricity.value - 200) / 10\n \n if self.electricity.value >= 200:\n return self.internal_temp.value + factor * dt\n else:\n new_value = self.internal_temp.value + factor * dt\n return max(new_value, 22) # don't go below 22\n \n @crest.update(state=[off,error], target=internal_temp)\n def off_error_update_internal_temp(self, dt):\n # see formula above\n new_value = self.internal_temp.value - 20 * dt\n return max(new_value, 22) # don't go below 22\n",
"_____no_output_____"
]
],
[
[
"# Define unit test",
"_____no_output_____"
]
],
[
[
"import unittest\n\nclass HeatModuleTest(unittest.TestCase):\n\n def test_switch_on(self):\n \"\"\"Test to assert correct transition to on state.\"\"\"\n # setup\n hm = HeatModule()\n hm.current = hm.off\n hm.electricity.value = 250\n hm.timer.value = 0\n hm.switch.value = \"on\"\n # action\n sim = Simulator(hm)\n sim.stabilise()\n # assert\n self.assertEqual(hm.current, hm.on, \"The heatmodule...\")",
"_____no_output_____"
]
],
[
[
"# Run test",
"_____no_output_____"
]
],
[
[
"tests = unittest.TestLoader().loadTestsFromTestCase(HeatModuleTest)\nunittest.TextTestRunner().run(tests)",
".\n----------------------------------------------------------------------\nRan 1 test in 0.016s\n\nOK\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e71280ae3ca9d85f3e7799ed2195c2eaa62fc408 | 13,580 | ipynb | Jupyter Notebook | .ipynb_checkpoints/MITIE-ATIS_Ent-checkpoint.ipynb | py-ranoid/FAQ-Bot-Notebooks | 0288239afa6bcfffc71f100afe9a229f910fcd64 | [
"MIT"
] | 1 | 2020-03-24T20:30:54.000Z | 2020-03-24T20:30:54.000Z | .ipynb_checkpoints/MITIE-ATIS_Ent-checkpoint.ipynb | py-ranoid/factbot | 0288239afa6bcfffc71f100afe9a229f910fcd64 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/MITIE-ATIS_Ent-checkpoint.ipynb | py-ranoid/factbot | 0288239afa6bcfffc71f100afe9a229f910fcd64 | [
"MIT"
] | null | null | null | 30.585586 | 246 | 0.485935 | [
[
[
"# NER using MITIE on ATIS dataset\n- Reference \n - [MITIE Github Repository](https://github.com/mit-nlp/MITIE)\n - [https://github.com/mit-nlp/MITIE/blob/master/examples/python/train_ner.py](https://github.com/mit-nlp/MITIE/blob/master/examples/python/train_ner.py)\n- Steps\n - Installing and importing MITIE\n - Importing and displaying ATIS (ATIS (Airline travel information system) dataset) data\n - Getting entity tuples from label lists\n - Comparing NER learnt on ATIS dataset with generic NER\n - Conclusion",
"_____no_output_____"
],
[
"## Installing and importing MITIE\n- Clone the MITIE repository\n- Generate the binaries with `make`\n```\ngit clone https://github.com/mit-nlp/MITIE.git\ncd MITIE\nmake\ncd mitielib && echo $PWD\n```\n\n- Add the path of `mitielib` to sys path before importing\n- Download and extract models.\n```\nwget https://github.com/mit-nlp/MITIE/releases/download/v0.4/MITIE-models-v0.2.tar.bz2\ntar xvf MITIE-models-v0.2.tar.bz2\n```",
"_____no_output_____"
]
],
[
[
"import sys, os\nsys.path.append('/home/b/gitpository/MITIE/mitielib')\nMODELS_PATH = \"/home/b/gitpository/MITIE/MITIE-models/english/\"\nimport mitie",
"_____no_output_____"
]
],
[
[
"## Importing and displaying ATIS data",
"_____no_output_____"
]
],
[
[
"from utils import fetch_data, read_method\nimport pandas as pd\nimport numpy as np\nimport random\n\ntr_sents,tr_labels,tr_intents = fetch_data('data2/atis.train.w-intent.iob')\n\ndef display(n,intents,sents,labels):\n sense = []\n print (\"INTENT : \",intents[n])\n for i in range(len(sents[n])):\n sense.append({\"word\":sents[n][i],\"label\":labels[n][i]})\n return pd.DataFrame(sense)\n\nprint (\"Number of sentences :\",len(tr_sents))",
"Number of sentences : 4978\n"
],
[
"display(random.randint(0,len(tr_sents)),tr_intents,tr_sents,tr_labels)",
"INTENT : atis_flight\n"
]
],
[
[
"## Getting entity tuples from label lists",
"_____no_output_____"
]
],
[
[
"def get_entities(labels):\n idx = 0\n last_begin = -1\n entity = \"\"\n entities = []\n while idx < len(labels):\n if labels[idx].startswith('B'):\n last_begin = idx;\n entity = labels[idx][2:]\n elif labels[idx].startswith('O'):\n if last_begin > 0:\n entities.append((last_begin-1, idx-1, entity))\n last_begin = -1\n idx += 1\n if last_begin > 0:\n entities.append((last_begin-1, idx-1, entity))\n\n return entities",
"_____no_output_____"
],
[
"label_sample = tr_labels[0]\nprint (\"List of label :\\n\",label_sample)\nprint (\"List of entities :\\n\",get_entities(label_sample))",
"List of label :\n ['O', 'O', 'O', 'O', 'O', 'O', 'B-fromloc.city_name', 'O', 'B-depart_time.time', 'I-depart_time.time', 'O', 'O', 'O', 'B-toloc.city_name', 'O', 'B-arrive_time.time', 'O', 'O', 'B-arrive_time.period_of_day']\nList of entities :\n [(5, 6, 'fromloc.city_name'), (7, 9, 'depart_time.time'), (12, 13, 'toloc.city_name'), (14, 15, 'arrive_time.time'), (17, 18, 'arrive_time.period_of_day')]\n"
],
[
"# Load trainer from total_word_feature_extractor.dat\ntrainer = mitie.ner_trainer(MODELS_PATH+\"total_word_feature_extractor.dat\")\n\n# Adding sentences and labels to get trained\nlimit = 10\nfor sentence, labels in zip(tr_sents[:limit], tr_labels):\n sample = mitie.ner_training_instance(sentence) \n for entity in get_entities(labels):\n sample.add_entity(range(entity[0], entity[1]), entity[2])\n trainer.add(sample)\n\n# Training the NER model\ntrainer.num_threads = 4\nner_atis = trainer.train()",
"_____no_output_____"
]
],
[
[
"## Comparing NER learnt on ATIS dataset with generic NER",
"_____no_output_____"
]
],
[
[
"# Loading generic NER model\nner_gen = mitie.named_entity_extractor(MODELS_PATH+\"ner_model.dat\")",
"_____no_output_____"
]
],
[
[
"### Labels ",
"_____no_output_____"
]
],
[
[
"print(\"Generic NER tags:\\n\", ner_gen.get_possible_ner_tags())\nprint(\"ATIS-trained NER tags:\\n\", ner_atis.get_possible_ner_tags())",
"Generic NER tags:\n ['PERSON', 'LOCATION', 'ORGANIZATION', 'MISC']\nATIS-trained NER tags:\n ['fromloc.city_name', 'depart_time.time', 'toloc.city_name', 'arrive_time.time', 'arrive_time.period_of_day', 'depart_time.period_of_day', 'flight_time', 'fare_amount', 'depart_date.today_relative', 'depart_date.day_name', 'city_name']\n"
]
],
[
[
"### Entities Recognised",
"_____no_output_____"
]
],
[
[
"from IPython.display import display, HTML\npd.set_option('display.max_colwidth',300)\n\n# To print multi-line columns in DataFrame\ndef pretty_print(df):\n return display(HTML(df.to_html().replace(\"\\\\n\",\"<br>\")))\n\n# Loading test dataset\nte_sents,te_labels,te_intents = fetch_data('data2/atis.train.w-intent.iob')\n# te_sents,te_labels,te_intents = fetch_data('data2/atis.test.w-intent.iob')\n\n# Returns string of `\\n`-seperated entities detected by given ner on sentence\ndef get_ner_results(ner,sentence):\n entities = ner.extract_entities(sentence)\n ent_strings = []\n for e in entities:\n ent_strings.append(e[1] + \": \" + \" \".join(sentence[i] for i in e[0]))\n ents_str = '\\n'.join(ent_strings)\n return ents_str\n\n# NER Results on test dataset\nresults = []\nlimit = 1\nfor sentence, labels in zip(te_sents[:limit], te_labels):\n sent_str = ' '.join(sentence)\n results.append({\"Sentence\":sent_str,\n \"Generic-NER : Entities\":get_ner_results(ner_gen,sentence),\n \"ATIS-NER : Entities\":get_ner_results(ner_atis,sentence),\n })\n\npretty_print(pd.DataFrame(results)[[\"Sentence\",\"Generic-NER : Entities\",\"ATIS-NER : Entities\"]])",
"_____no_output_____"
]
],
[
[
"## Conclusion\n- Trained NER is capable of identifying more types of entities\n - Generic NER is only capable of detecting PERSON, LOCATION, ORGANIZATION and MISC\n - Trained NER is capable of detecting City Names, Date & Time and Fare Amount\n- Trained NER is capable of identifying more refined entities\n - Generic NER can only detect `LOCATION`\n - For example : *i want to fly from boston at 838 am and arrive in denver at 1110 in the morning*\n - Generic NER considers *boston* and *denver* to be the same entity (`LOCATION`)\n - Trained NER can detect `fromloc.city_name`, `toloc.city_name` and `city_name`\n - For example : *i would like to find a flight from charlotte to las vegas that makes a stop in st. louis*\n - Trained NER considers *boston* to be `fromloc.city_name` and *denver* to be `toloc.city_name`",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e71285b47f08afa1842d7407eefcd198652ed601 | 50,252 | ipynb | Jupyter Notebook | general/error-function-for-linear-regression.ipynb | machine-learning-helpers/induction-books-python | d26816f92d4f6a64e8c4c2ed6c7c8343c77cd3ad | [
"RSA-MD"
] | 3 | 2018-02-11T12:34:19.000Z | 2021-09-22T18:06:01.000Z | general/error-function-for-linear-regression.ipynb | machine-learning-helpers/induction-books-python | d26816f92d4f6a64e8c4c2ed6c7c8343c77cd3ad | [
"RSA-MD"
] | 17 | 2019-11-22T00:48:20.000Z | 2022-01-16T11:00:50.000Z | general/error-function-for-linear-regression.ipynb | machine-learning-helpers/induction-python | 631a735a155f0feb7012472fbca13efbc273dfb0 | [
"RSA-MD"
] | null | null | null | 299.119048 | 45,448 | 0.923545 | [
[
[
"%%markdown\n# Overview\nIn a linear regression, the outcome, $y$, is approximated by a hidden function of the type $\\forall x \\in S, f(x) = a + bx$,\nand the error function is $J(x) = E_{x\\in S} (f(x) - y)^2$\n\nFor a $n$ samples of $x$:\n\\begin{align}\nJ_{a,b}(x) = \\frac{1}{n} \\sum_{i=1}^{i=n} (f(x_i) - y_i)^2 = \\frac{1}{n} \\sum_{i=1}^{i=n} (a + bx_i - y_i)^2\n\\end{align}\n\nThe gradient of $J(x)$ is then:\n\\begin{align}\n\\frac{\\partial J(x)}{\\partial a} &= \\frac{2}{n} \\sum_{i=1}^{i=n} (a + bx_i - y_i) \\\\\n\\frac{\\partial J(x)}{\\partial b} &= \\frac{2}{n} \\sum_{i=1}^{i=n} (a + bx_i - y_i)x_i\n\\end{align}\n\n# References\n## Gradient descent\n* http://en.wikipedia.org/wiki/Gradient_descent\n* http://en.wikipedia.org/wiki/Stochastic_gradient_descent#Example\n## Formatting\n* [Tex/$\\LaTeX$ support in Jupyter Markdown](http://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Typesetting%20Equations.html)",
"_____no_output_____"
],
[
"from mpl_toolkits import mplot3d\nimport numpy as np\nimport matplotlib.pyplot as plt\n",
"_____no_output_____"
],
[
"# Error function for x = {1, 2}: 1/2 [(a+b-1)^2 + (a+2b-2)^2]\n# Derivative:\n# a. dJa = (a+b-1) + (a+2b-2) = 2a + 3b - 3\n# b. dJb = (a+b-1) + 2(a+2b-2) = 3a + 5b - 5\n# a = 0, b = 0 => dJa = -3 ; dJb = -5\ndef error_function(a, b):\n total_error = 0.0\n n = 2\n for x in range(1, n):\n total_error += (a + b*x - x) ** 2\n \n return total_error / n\n\nx = np.linspace(-5, 5, 30)\ny = np.linspace(-5, 5, 30)\n\nX, Y = np.meshgrid(x, y)\nZ = error_function(X, Y)",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = plt.axes(projection='3d')\nax.contour3D(X, Y, Z, 50, cmap='binary')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e71285dc2a9848ca6ea41a76d79b2d4258268149 | 9,068 | ipynb | Jupyter Notebook | tutorials/NeMo_voice_swap_app.ipynb | qmpzzpmq/NeMo | acca8d0bf558aa2466954c2222e61cd8fbf2b2c1 | [
"Apache-2.0"
] | 10 | 2021-04-01T05:55:18.000Z | 2022-02-15T01:41:41.000Z | tutorials/NeMo_voice_swap_app.ipynb | qmpzzpmq/NeMo | acca8d0bf558aa2466954c2222e61cd8fbf2b2c1 | [
"Apache-2.0"
] | null | null | null | tutorials/NeMo_voice_swap_app.ipynb | qmpzzpmq/NeMo | acca8d0bf558aa2466954c2222e61cd8fbf2b2c1 | [
"Apache-2.0"
] | 2 | 2021-02-04T14:45:50.000Z | 2021-02-04T14:56:05.000Z | 29.157556 | 210 | 0.613697 | [
[
[
"# NeMo voice swap demo\nThis notebook shows how to use NVIDIA NeMo (https://github.com/NVIDIA/NeMo) to construct a toy demo which will swap a voice in the audio fragment with a computer generated one.\n\nAt its core the demo does: \n\n* Automatic speech recognition of what is said in the file. E.g. converting audio to text\n* Adding punctuation and capitalization to the text\n* Generating spectrogram from resulting text\n* Generating waveform audio from the spectrogram.",
"_____no_output_____"
],
[
"## Installation\nNeMo can be installed via simple pip command.",
"_____no_output_____"
]
],
[
[
"BRANCH = 'v1.0.0b2'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n",
"_____no_output_____"
],
[
"# Ignore pre-production warnings\nimport warnings\nwarnings.filterwarnings('ignore')\nimport nemo\n# Import Speech Recognition collection\nimport nemo.collections.asr as nemo_asr\n# Import Natural Language Processing colleciton\nimport nemo.collections.nlp as nemo_nlp\n# Import Speech Synthesis collection\nimport nemo.collections.tts as nemo_tts\n# We'll use this to listen to audio\nimport IPython",
"_____no_output_____"
],
[
"# Download audio sample which we'll try\n# This is a sample from LibriSpeech Dev Clean dataset - the model hasn't seen it before\nAudio_sample = '2086-149220-0033.wav'\n!wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav\n# Listen to it\nIPython.display.Audio(Audio_sample)",
"_____no_output_____"
]
],
[
[
"## Instantiate pre-trained NeMo models which we'll use\n``from_pretrained(...)`` API downloads and initialized model directly from the cloud.\n\nWe will load audio_sample and convert it to text with QuartzNet ASR model (an action called transcribe).\nTo convert text back to audio, we actually need to generate spectrogram with Tacotron2 first and then convert it to actual audio signal using WaveGlow vocoder.",
"_____no_output_____"
]
],
[
[
"# Speech Recognition model - QuartzNet\nquartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name=\"QuartzNet15x5Base-En\").cuda()\n# Punctuation and capitalization model\npunctuation = nemo_nlp.models.PunctuationCapitalizationModel.from_pretrained(model_name='Punctuation_Capitalization_with_DistilBERT').cuda()\n# Spectrogram generator which takes text as an input and produces spectrogram\nspectrogram_generator = nemo_tts.models.Tacotron2Model.from_pretrained(model_name=\"Tacotron2-22050Hz\").cuda()\n# Vocoder model which takes spectrogram and produces actual audio\nvocoder = nemo_tts.models.WaveGlowModel.from_pretrained(model_name=\"WaveGlow-22050Hz\").cuda()",
"_____no_output_____"
]
],
[
[
"## Using the models",
"_____no_output_____"
]
],
[
[
"# Convert our audio sample to text\nfiles = [Audio_sample]\nraw_text = ''\ntext = ''\nfor fname, transcription in zip(files, quartznet.transcribe(paths2audio_files=files)):\n raw_text = transcription\n\n# Add capitalization and punctuation\nres = punctuation.add_punctuation_capitalization(queries=[raw_text])\ntext = res[0]\nprint(f'\\nRaw recognized text: {raw_text}. \\nText with capitalization and punctuation: {text}')",
"_____no_output_____"
],
[
"# A helper function which combines Tacotron2 and WaveGlow to go directly from \n# text to audio\ndef text_to_audio(text):\n parsed = spectrogram_generator.parse(text)\n spectrogram = spectrogram_generator.generate_spectrogram(tokens=parsed)\n audio = vocoder.convert_spectrogram_to_audio(spec=spectrogram)\n return audio.to('cpu').numpy()",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
]
],
[
[
"# This is our original audio sample\nIPython.display.Audio(Audio_sample)",
"_____no_output_____"
],
[
"# This is what was recognized by the ASR model\nprint(raw_text)",
"_____no_output_____"
],
[
"# This is how punctuation model changed it\nprint(text)",
"_____no_output_____"
]
],
[
[
"Compare how the synthesized audio sounds when using text with and without punctuation.",
"_____no_output_____"
]
],
[
[
"# Without punctuation\nIPython.display.Audio(text_to_audio(raw_text), rate=22050)",
"_____no_output_____"
],
[
"# Final result - with punctuation\nIPython.display.Audio(text_to_audio(text), rate=22050)",
"_____no_output_____"
]
],
[
[
"## Next steps\nA demo like this is great for prototyping and experimentation. However, for real production deployment, you would want to use a service like [NVIDIA Jarvis](https://developer.nvidia.com/nvidia-jarvis).\n\n**NeMo is built for training.** You can fine-tune, or train from scratch on your data all models used in this example. We recommend you checkout the following, more in-depth, tutorials next:\n\n* [NeMo fundamentals](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb)\n* [NeMo models](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/01_NeMo_Models.ipynb)\n* [Speech Recognition](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/asr/01_ASR_with_NeMo.ipynb)\n* [Punctuation and Capitalization](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Punctuation_and_Capitalization.ipynb)\n* [Speech Synthesis](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/tts/1_TTS_inference.ipynb)\n\n\nYou can find scripts for training and fine-tuning ASR, NLP and TTS models [here](https://github.com/NVIDIA/NeMo/tree/main/examples). ",
"_____no_output_____"
],
[
"That's it folks! Head over to NeMo GitHub for more examples: https://github.com/NVIDIA/NeMo",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e712b7ec3a4556059095852a85090ffded93193a | 85,630 | ipynb | Jupyter Notebook | Face Recognition/Face_Recognition_With_SVM.ipynb | gaurav-kabra-official/Face-Reconition-SupportVectorMachine-SVM | 6e4e50ca159424731c849863c5ab60aa1db4f9b5 | [
"MIT"
] | null | null | null | Face Recognition/Face_Recognition_With_SVM.ipynb | gaurav-kabra-official/Face-Reconition-SupportVectorMachine-SVM | 6e4e50ca159424731c849863c5ab60aa1db4f9b5 | [
"MIT"
] | null | null | null | Face Recognition/Face_Recognition_With_SVM.ipynb | gaurav-kabra-official/Face-Reconition-SupportVectorMachine-SVM | 6e4e50ca159424731c849863c5ab60aa1db4f9b5 | [
"MIT"
] | null | null | null | 272.707006 | 75,622 | 0.899334 | [
[
[
"## Face Recognition",
"_____no_output_____"
],
[
"#### Note : SVMs are least effective when applied to data that are noisy and contain overlapping points. The algorithm struggles to draw hyperplanes without a high misclassification rate.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import fetch_lfw_people\n\nfaces = fetch_lfw_people(min_faces_per_person=60)\nfaces.target_names, faces.images.shape",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(3,3)\nfor i, axi in enumerate(ax.flat):\n axi.imshow(faces.images[i])\n axi.set(xlabel=faces.target_names[faces.target[i]] , xticks=[], yticks=[])",
"_____no_output_____"
],
[
"from sklearn.svm import SVC\n\nsvc = SVC(class_weight='balanced')",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(faces.data, faces.target, test_size=0.25, random_state=101)",
"_____no_output_____"
],
[
"y_pred = svc.fit(X_train, y_train).predict(X_test)",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report\nimport pandas as pd\nreport = classification_report(y_test, y_pred, output_dict=True, target_names=faces.target_names)\ndf = pd.DataFrame(report).transpose()\ndf",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e712c261a57b1508930540b92cadd8fa7c425beb | 2,997 | ipynb | Jupyter Notebook | analyses/seasonality_paper_st/no_temporal_shifts/variable_diagnostics.ipynb | akuhnregnier/fuel-build-up | c8a5ad55ab287ff97ed267ceb0aff61455abed37 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/no_temporal_shifts/variable_diagnostics.ipynb | akuhnregnier/fuel-build-up | c8a5ad55ab287ff97ed267ceb0aff61455abed37 | [
"MIT"
] | null | null | null | analyses/seasonality_paper_st/no_temporal_shifts/variable_diagnostics.ipynb | akuhnregnier/fuel-build-up | c8a5ad55ab287ff97ed267ceb0aff61455abed37 | [
"MIT"
] | null | null | null | 22.877863 | 99 | 0.484151 | [
[
[
"## Setup",
"_____no_output_____"
]
],
[
[
"from specific import *",
"_____no_output_____"
]
],
[
[
"### Get shifted data",
"_____no_output_____"
]
],
[
[
"(\n endog_data,\n exog_data,\n master_mask,\n filled_datasets,\n masked_datasets,\n land_mask,\n) = get_offset_data()",
"_____no_output_____"
]
],
[
[
"## Mapping",
"_____no_output_____"
]
],
[
[
"with figure_saver(\"high_fapar_high_dry_day_period\", sub_directory=\"map_plots\"):\n mpl.rc(\"figure\", figsize=(11, 4))\n constrained_map_plot(\n {\"FAPAR\": (0.36, None), \"Dry Day Period\": (18, None)},\n exog_data,\n master_mask,\n plot_variable=\"FAPAR\",\n coastline_kwargs={\"linewidth\": 0.5},\n )",
"_____no_output_____"
],
[
"with figure_saver(\"high_dry_day_period_18_medium_agbtree\", sub_directory=\"map_plots\"):\n mpl.rc(\"figure\", figsize=(11, 4))\n constrained_map_plot(\n {\"Dry Day Period -18 - -6 Month\": (22, None), \"AGB Tree\": (0.9, 20)},\n exog_data,\n master_mask,\n plot_variable=\"AGB Tree\",\n coastline_kwargs={\"linewidth\": 0.5},\n )",
"_____no_output_____"
],
[
"with figure_saver(\"high_pftCrop\", sub_directory=\"map_plots\"):\n mpl.rc(\"figure\", figsize=(11, 4))\n constrained_map_plot(\n {\"pftCrop\": (0.6, None)},\n exog_data,\n master_mask,\n plot_variable=\"pftCrop\",\n coastline_kwargs={\"linewidth\": 0.5},\n )",
"_____no_output_____"
]
],
[
[
"## Correlation Plot",
"_____no_output_____"
]
],
[
[
"with figure_saver(\"corr_plot\"):\n corr_plot(\n shorten_columns(exog_data[sort_features(exog_data.columns)]),\n fig_kwargs={\"figsize\": (8, 5)},\n )",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e712c4f753ff8de70e52cee42c6532a7f4d5e5ae | 670,377 | ipynb | Jupyter Notebook | 1- Machine Learning Foundations: A Case Study Approach/projects/Week 3/.ipynb_checkpoints/Analyzing Product Sentiment-checkpoint.ipynb | Magho/Ml-regression-washintgon-course | cfacd62d4741153dcf6116ad9319de550370b066 | [
"MIT"
] | 8 | 2020-11-03T19:45:03.000Z | 2022-02-20T14:04:19.000Z | 1- Machine Learning Foundations: A Case Study Approach/projects/Week 3/.ipynb_checkpoints/Analyzing Product Sentiment-checkpoint.ipynb | chitrita/ML-Washington-specialization-coursera | cfacd62d4741153dcf6116ad9319de550370b066 | [
"MIT"
] | null | null | null | 1- Machine Learning Foundations: A Case Study Approach/projects/Week 3/.ipynb_checkpoints/Analyzing Product Sentiment-checkpoint.ipynb | chitrita/ML-Washington-specialization-coursera | cfacd62d4741153dcf6116ad9319de550370b066 | [
"MIT"
] | 3 | 2019-03-30T06:09:35.000Z | 2020-12-02T14:30:01.000Z | 171.058178 | 230,611 | 0.574692 | [
[
[
"# Predicting sentiment from product reviews\n\n# Fire up GraphLab Create\n(See [Getting Started with SFrames](/notebooks/Week%201/Getting%20Started%20with%20SFrames.ipynb) for setup instructions)",
"_____no_output_____"
]
],
[
[
"import graphlab",
"Vendor: Continuum Analytics, Inc.\nPackage: mkl\nMessage: trial mode expires in 30 days\n/opt/conda/lib/python2.7/site-packages/pandas/computation/__init__.py:19: UserWarning: The installed version of numexpr 2.4.4 is not supported in pandas and will be not be used\n\n UserWarning)\n"
]
],
[
[
"# Read some product review data\n\nLoading reviews for a set of baby products. ",
"_____no_output_____"
]
],
[
[
"products = graphlab.SFrame('amazon_baby.gl/')",
"/opt/conda/lib/python2.7/site-packages/requests/packages/urllib3/connection.py:266: SubjectAltNameWarning: Certificate for beta.graphlab.com has no `subjectAltName`, falling back to check for a `commonName` for now. This feature is being removed by major browsers and deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 for details.)\n SubjectAltNameWarning\n[INFO] graphlab.cython.cy_server: GraphLab Create v2.1 started. Logging: /tmp/graphlab_server_1518161624.log\n"
]
],
[
[
"# Let's explore this data together\n\nData includes the product name, the review text and the rating of the review. ",
"_____no_output_____"
]
],
[
[
"products.head()",
"_____no_output_____"
]
],
[
[
"# Build the word count vector for each review",
"_____no_output_____"
]
],
[
[
"products['word_count'] = graphlab.text_analytics.count_words(products['review'])",
"_____no_output_____"
],
[
"products.head()",
"_____no_output_____"
],
[
"graphlab.canvas.set_target('ipynb')",
"_____no_output_____"
],
[
"products['name'].show()",
"_____no_output_____"
]
],
[
[
"# Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether'",
"_____no_output_____"
]
],
[
[
"giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']",
"_____no_output_____"
],
[
"len(giraffe_reviews)",
"_____no_output_____"
],
[
"giraffe_reviews['rating'].show(view='Categorical')",
"_____no_output_____"
]
],
[
[
"# Build a sentiment classifier",
"_____no_output_____"
]
],
[
[
"products['rating'].show(view='Categorical')",
"_____no_output_____"
]
],
[
[
"## Define what's a positive and a negative sentiment\n\nWe will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment. ",
"_____no_output_____"
]
],
[
[
"# ignore all 3* reviews -unwanted data-\nproducts = products[products['rating'] != 3]",
"_____no_output_____"
],
[
"# positive sentiment = 4* or 5* reviews - specify what is thumb up and what is thump down -\nproducts['sentiment'] = products['rating'] >=4",
"_____no_output_____"
],
[
"products.head()",
"_____no_output_____"
]
],
[
[
"## Let's train the sentiment classifier",
"_____no_output_____"
]
],
[
[
"train_data,test_data = products.random_split(.8, seed=0)",
"_____no_output_____"
],
[
"sentiment_model = graphlab.logistic_classifier.create(train_data,\n target='sentiment',\n features=['word_count'],\n validation_set=test_data)",
"_____no_output_____"
]
],
[
[
"# Evaluate the sentiment model",
"_____no_output_____"
]
],
[
[
"sentiment_model.evaluate(test_data, metric='roc_curve')",
"_____no_output_____"
],
[
"sentiment_model.evaluate(test_data)",
"_____no_output_____"
],
[
"sentiment_model.show(view='Evaluation')",
"_____no_output_____"
]
],
[
[
"# Applying the learned model to understand sentiment for Giraffe",
"_____no_output_____"
]
],
[
[
"giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')",
"_____no_output_____"
],
[
"giraffe_reviews.head()",
"_____no_output_____"
]
],
[
[
"## Sort the reviews based on the predicted sentiment and explore",
"_____no_output_____"
]
],
[
[
"giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)",
"_____no_output_____"
],
[
"giraffe_reviews.head()",
"_____no_output_____"
]
],
[
[
"## Most positive reviews for the giraffe",
"_____no_output_____"
]
],
[
[
"giraffe_reviews[0]['review']",
"_____no_output_____"
],
[
"giraffe_reviews[1]['review']",
"_____no_output_____"
]
],
[
[
"## Show most negative reviews for giraffe",
"_____no_output_____"
]
],
[
[
"giraffe_reviews[-1]['review']",
"_____no_output_____"
],
[
"giraffe_reviews[-2]['review']",
"_____no_output_____"
]
],
[
[
"# prepare data to model with specific selected words ",
"_____no_output_____"
]
],
[
[
"selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']",
"_____no_output_____"
],
[
"def select_specific_words (dec) :\n for i in dec.keys():\n if i not in selected_words :\n del dec[i]\n return dec",
"_____no_output_____"
]
],
[
[
"# define functions to ciunt each word ",
"_____no_output_____"
]
],
[
[
"def select_awesome_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'awesome' :\n num +=1\n return num\n\n\ndef select_great_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'great' :\n num +=1\n return num\n\n\n\ndef select_fantastic_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'fantastic' :\n num +=1\n return num\n\n\ndef select_amazing_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'amazing' :\n num +=1\n return num\n\n\ndef select_love_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'love' :\n num +=1\n return num\n\n\ndef select_horrible_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'horrible' :\n num +=1\n return num\n\n\ndef select_terrible_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'terrible' :\n num +=1\n return num\n\ndef select_bad_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'bad' :\n num +=1\n return num\n\n\ndef select_terrible_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'terrible' :\n num +=1\n return num\n\ndef select_awful_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'awful' :\n num +=1\n return num\n\n\ndef select_wow_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'wow' :\n num +=1\n return num\n\n\ndef select_hate_word (dec) :\n num = 0\n for i in dec.keys():\n if i == 'hate' :\n num +=1\n return num",
"_____no_output_____"
]
],
[
[
"## apply functions on the data",
"_____no_output_____"
]
],
[
[
"products['awesome'] = graphlab.text_analytics.count_words(products['review']).apply(select_awesome_word)\nproducts['great'] = graphlab.text_analytics.count_words(products['review']).apply(select_great_word)\nproducts['fantastic'] = graphlab.text_analytics.count_words(products['review']).apply(select_fantastic_word)\nproducts['amazing'] = graphlab.text_analytics.count_words(products['review']).apply(select_amazing_word)\nproducts['love'] = graphlab.text_analytics.count_words(products['review']).apply(select_love_word)\nproducts['horrible'] = graphlab.text_analytics.count_words(products['review']).apply(select_horrible_word)\nproducts['bad'] = graphlab.text_analytics.count_words(products['review']).apply(select_bad_word)\nproducts['terrible'] = graphlab.text_analytics.count_words(products['review']).apply(select_terrible_word)\nproducts['awful'] = graphlab.text_analytics.count_words(products['review']).apply(select_awful_word)\nproducts['wow'] = graphlab.text_analytics.count_words(products['review']).apply(select_wow_word)\nproducts['hate'] = graphlab.text_analytics.count_words(products['review']).apply(select_hate_word)\nproducts['terrible'] = graphlab.text_analytics.count_words(products['review']).apply(select_terrible_word)",
"_____no_output_____"
],
[
"products['selected_word'] = graphlab.text_analytics.count_words(products['review']).apply(select_specific_words)",
"_____no_output_____"
],
[
"products.head()",
"_____no_output_____"
]
],
[
[
"## sum each word",
"_____no_output_____"
]
],
[
[
"products['awesome'].sum()",
"_____no_output_____"
],
[
"products['great'].sum()",
"_____no_output_____"
],
[
"products['fantastic'].sum()",
"_____no_output_____"
],
[
"products['amazing'].sum()",
"_____no_output_____"
],
[
"products['love'].sum()",
"_____no_output_____"
],
[
"products['horrible'].sum()",
"_____no_output_____"
],
[
"products['bad'].sum()",
"_____no_output_____"
],
[
"products['terrible'].sum()",
"_____no_output_____"
],
[
"products['awful'].sum()",
"_____no_output_____"
],
[
"products['wow'].sum()",
"_____no_output_____"
],
[
"products['hate'].sum()",
"_____no_output_____"
]
],
[
[
"# Build model with specific selected words ",
"_____no_output_____"
]
],
[
[
"train_data,test_data = products.random_split(0.8,seed = 0)",
"_____no_output_____"
],
[
"selected_words_model = graphlab.logistic_classifier.create(train_data, target='sentiment',features=selected_words, validation_set=test_data)",
"_____no_output_____"
]
],
[
[
"## Examine the weights of the features - coloumn value -",
"_____no_output_____"
]
],
[
[
"selected_words_model['coefficients']",
"_____no_output_____"
]
],
[
[
"## calculate accuracy of features",
"_____no_output_____"
]
],
[
[
"selected_words_model.evaluate(test_data)",
"_____no_output_____"
]
],
[
[
"## compare with majority class",
"_____no_output_____"
]
],
[
[
"products['rating'].show(view='Categorical')",
"_____no_output_____"
],
[
"accuracy_of_predicting_majority_class = 64.2 + 19.913",
"_____no_output_____"
],
[
"print (accuracy_of_predicting_majority_class)",
"84.113\n"
]
],
[
[
"# compare sentiment_model with selected_words_model on an product",
"_____no_output_____"
],
[
"## sentiment_model",
"_____no_output_____"
]
],
[
[
"Baby_Trend_Diaper_Champ_sentiment_model = products[products['name'] == 'Baby Trend Diaper Champ']",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_sentiment_model['prob'] = sentiment_model.predict(Baby_Trend_Diaper_Champ_sentiment_model, output_type='probability')",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_sentiment_model = Baby_Trend_Diaper_Champ_sentiment_model.sort('prob',ascending=False)",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_sentiment_model.head()",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_sentiment_model[0]['review']",
"_____no_output_____"
]
],
[
[
"## selected_words_model",
"_____no_output_____"
]
],
[
[
"Baby_Trend_Diaper_Champ_selected_words_model = products[products['name'] == 'Baby Trend Diaper Champ']",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_selected_words_model['prob'] = selected_words_model.predict(Baby_Trend_Diaper_Champ_selected_words_model, output_type='probability')",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_selected_words_model = Baby_Trend_Diaper_Champ_selected_words_model.sort('prob',ascending=False)",
"_____no_output_____"
],
[
"Baby_Trend_Diaper_Champ_selected_words_model.head()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e712e14b870e7c4ddedbaa471cdb332796ff443d | 14,684 | ipynb | Jupyter Notebook | dakota/oat/fleet-share/fleet-share-pu.ipynb | arfc/dcwrapper | 82226f601580be464668fa63df64f037962db57e | [
"BSD-3-Clause"
] | 1 | 2020-03-26T14:09:30.000Z | 2020-03-26T14:09:30.000Z | dakota/oat/fleet-share/fleet-share-pu.ipynb | mehmeturkmen/dcwrapper | 82226f601580be464668fa63df64f037962db57e | [
"BSD-3-Clause"
] | 10 | 2019-10-08T18:46:36.000Z | 2019-11-14T19:23:05.000Z | dakota/oat/fleet-share/fleet-share-pu.ipynb | mehmeturkmen/dcwrapper | 82226f601580be464668fa63df64f037962db57e | [
"BSD-3-Clause"
] | 3 | 2019-10-29T19:23:44.000Z | 2020-09-18T13:09:49.000Z | 33.834101 | 354 | 0.419845 | [
[
[
"** In this Jupyter Notebook, the absolute + sensitivity analysis results are generated for a One-at-a-time sensitivity analysis of fleet share percentage for the proliferation risk evaluation metric. The reason why this metric is separated from the others is that you need the -exp version of each sqlite and pyne on your machine to make it work **",
"_____no_output_____"
]
],
[
[
"import cymetric as cym\nfrom cymetric import timeseries\nimport matplotlib as plt \nimport pandas as pd\nimport numpy as np\nimport sys \nsys.path.insert(0, '../../../scripts/')\nimport output as oup",
"/Users/gwenchee/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: QAWarning: pyne.data is not yet QA compliant.\n return f(*args, **kwds)\n/Users/gwenchee/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: QAWarning: pyne.material is not yet QA compliant.\n return f(*args, **kwds)\n/Users/gwenchee/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: QAWarning: pyne.enrichment is not yet QA compliant.\n return f(*args, **kwds)\n"
]
],
[
[
"# This is when you already have the results ",
"_____no_output_____"
]
],
[
[
"df_p = pd.read_csv('fs-df-pu.csv',index_col='FS')\ndf_p",
"_____no_output_____"
]
],
[
[
"# The rest of the code below is to generate the above results",
"_____no_output_____"
]
],
[
[
"starter_string = 'FS'\nscenario_nums = ['0','5','10','15','20']",
"_____no_output_____"
],
[
"df_p = oup.initialize_df(scenario_index=starter_string,\n scenarios_nums=scenario_nums)",
"_____no_output_____"
],
[
"df_p['Max Pu in all CP'] = 0\ndf_p['Pu Quality in all CP at Max Pu'] = 0\ndf_p['Max Pu in HLW'] = 0\ndf_p['Pu Quality in HLW at Max Pu'] = 0\ndf_p['Max Pu in all RPR'] = 0\ndf_p['Pu Quality in all RPR at Max Pu'] = 0",
"_____no_output_____"
],
[
"output_start = '../cyclus-files/oat/fleet-share/fs'\nev_dict = {}\nfor x in range(len(scenario_nums)): \n output_file = output_start + scenario_nums[x]+'-exp.sqlite'\n ev_dict[scenario_nums[x]] = cym.Evaluator(db=cym.dbopen(output_file),write=True)",
"_____no_output_____"
],
[
"for x in range(len(scenario_nums)): \n cp = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['lwrstorage','moxstorage','frstorage'],nucs=['pu-238','pu-239','pu-240','pu-241','pu-242','pu-244'])['Quantity']\n fissile_cp = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['lwrstorage','moxstorage','frstorage'],nucs=['pu-239','pu-241'])['Quantity']\n df_p.loc[scenario_nums[x],'Max Pu in all CP'] = cp.max()\n df_p.loc[scenario_nums[x],'Pu Quality in all CP at Max Pu'] = fissile_cp[cp.idxmax()]/cp.max()\n hlw = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['enrichmentsink','lwrsink','moxsink','frsink'],nucs=['pu-238','pu-239','pu-240','pu-241','pu-242','pu-244'])['Quantity']\n fissile_hlw = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['enrichmentsink','lwrsink','moxsink','frsink'],nucs=['pu-239','pu-241'])['Quantity']\n df_p.loc[scenario_nums[x],'Max Pu in HLW'] = hlw.max()\n df_p.loc[scenario_nums[x],'Pu Quality in HLW at Max Pu'] = fissile_hlw[hlw.idxmax()]/hlw.max()\n rpr = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['lwrreprocessing','moxreprocessing','frreprocessing'],nucs=['pu-238','pu-239','pu-240','pu-241','pu-242','pu-244'])['Quantity']\n fissile_rpr = cym.timeseries.inventories(ev_dict[scenario_nums[x]],facilities=['lwrreprocessing','moxreprocessing','frreprocessing'],nucs=['pu-239','pu-241'])['Quantity']\n df_p.loc[scenario_nums[x],'Max Pu in all RPR'] = rpr.max()\n df_p.loc[scenario_nums[x],'Pu Quality in all RPR at Max Pu'] = fissile_rpr[rpr.idxmax()]/rpr.max()",
"_____no_output_____"
],
[
"df_p.to_csv('fs-df-pu.csv')",
"_____no_output_____"
],
[
"df_p_sa = oup.sensitivity(15,df_p)\ndf_p_sa",
"_____no_output_____"
],
[
"df_p_sa.to_csv('fs-df-pu-sa.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e712e7e24c7f2105e6ab45d7765b2c06f8963583 | 47,844 | ipynb | Jupyter Notebook | PercorsoIntroML/notebooks/02-regression-tuning.ipynb | nick87ds/MaterialeSerate | 51627e47ff1d3c3ecfc9ce6741c04b91b3295359 | [
"MIT"
] | 12 | 2021-12-12T22:19:52.000Z | 2022-03-18T11:45:17.000Z | PercorsoIntroML/notebooks/02-regression-tuning.ipynb | PythonGroupBiella/MaterialeLezioni | 58b45ecda7b9a8a298b9ca966d2806618a277372 | [
"MIT"
] | 1 | 2022-03-23T13:58:33.000Z | 2022-03-23T14:05:08.000Z | PercorsoIntroML/notebooks/02-regression-tuning.ipynb | PythonGroupBiella/MaterialeLezioni | 58b45ecda7b9a8a298b9ca966d2806618a277372 | [
"MIT"
] | 5 | 2021-11-30T19:38:41.000Z | 2022-01-30T14:50:44.000Z | 86.361011 | 28,812 | 0.796359 | [
[
[
"# Scelta del modello migliore\nIn questo notebook testeremo diversi modelli.\n\nUn volta identificato il migliore proseguiremo con il tuning dei parametri.\n\nInfine vedremo come effettuare una validazione più robusta del modello ottimizzato.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split, KFold, GridSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler",
"_____no_output_____"
],
[
"df = pd.read_pickle(\"../data/weather/processed_weather_data.pkl\")",
"_____no_output_____"
],
[
"# Individuo alcune colonne su cui addestrare il modello\nTRAIN_COLS = [\n 'Precip',\n 'MaxTemp', \n 'MinTemp'\n]\n\n# TARGET DEFINITION\nTARGET_COL = \"Snowfall\"",
"_____no_output_____"
],
[
"X = df[TRAIN_COLS]\ny = df[TARGET_COL]",
"_____no_output_____"
],
[
"# train / test split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)",
"_____no_output_____"
],
[
"X_train.shape, X_test.shape",
"_____no_output_____"
]
],
[
[
"### TRY DIFFERENT ALGORITHMS",
"_____no_output_____"
]
],
[
[
"colnames = X_train.columns.tolist()\n\ndef get_model():\n # definizione delle trasformazioni delle colonne\n imputer = SimpleImputer(strategy=\"median\") # se ci sono valori mancanti li sostituisce con la mediana (su tutte le colonne)\n\n std_scaler_columns = [colnames.index(x) for x in [\"MinTemp\", \"MaxTemp\"]]\n minmax_scaler_columns = [colnames.index(x) for x in [\"Precip\"]]\n ct = ColumnTransformer([\n (\"stdscaler\", StandardScaler(), std_scaler_columns), # applica standard scaler solo a Min e Max Temp\n (\"minmaxscaler\", MinMaxScaler(), minmax_scaler_columns) # applica il MinMax Scaler a Precip\n ], remainder='passthrough') \n\n # definizione del estimator (regressore)\n regressor = LinearRegression() \n\n # creazione della pipeline\n pipe = Pipeline(steps=[\n (\"imputer\", imputer), \n (\"col_transformer\", ct), \n (\"regressor\", None)\n ])\n \n return pipe",
"_____no_output_____"
],
[
"param_grid = {\n 'regressor': [\n LinearRegression(),\n Ridge(alpha=0.5, random_state=42),\n Lasso(alpha=0.5, random_state=42),\n RandomForestRegressor(max_depth=5, random_state=42)\n ]\n}",
"_____no_output_____"
],
[
"search = GridSearchCV(\n get_model(), \n param_grid, \n cv=KFold(n_splits=5, shuffle=True, random_state=123), \n scoring=\"neg_mean_squared_error\",\n return_train_score=True, \n verbose=3\n)",
"_____no_output_____"
],
[
"%%time\nsearch.fit(X, y)",
"Fitting 5 folds for each of 4 candidates, totalling 20 fits\n[CV 1/5] END regressor=LinearRegression();, score=(train=-6.176, test=-5.818) total time= 0.0s\n[CV 2/5] END regressor=LinearRegression();, score=(train=-6.102, test=-6.112) total time= 0.0s\n[CV 3/5] END regressor=LinearRegression();, score=(train=-6.200, test=-5.722) total time= 0.0s\n[CV 4/5] END regressor=LinearRegression();, score=(train=-5.960, test=-6.683) total time= 0.0s\n[CV 5/5] END regressor=LinearRegression();, score=(train=-6.082, test=-6.192) total time= 0.0s\n[CV 1/5] END regressor=Ridge(alpha=0.5, random_state=42);, score=(train=-6.176, test=-5.818) total time= 0.1s\n[CV 2/5] END regressor=Ridge(alpha=0.5, random_state=42);, score=(train=-6.102, test=-6.112) total time= 0.0s\n[CV 3/5] END regressor=Ridge(alpha=0.5, random_state=42);, score=(train=-6.200, test=-5.722) total time= 0.0s\n[CV 4/5] END regressor=Ridge(alpha=0.5, random_state=42);, score=(train=-5.960, test=-6.683) total time= 0.0s\n[CV 5/5] END regressor=Ridge(alpha=0.5, random_state=42);, score=(train=-6.082, test=-6.192) total time= 0.0s\n[CV 1/5] END regressor=Lasso(alpha=0.5, random_state=42);, score=(train=-6.442, test=-6.013) total time= 0.0s\n[CV 2/5] END regressor=Lasso(alpha=0.5, random_state=42);, score=(train=-6.368, test=-6.324) total time= 0.0s\n[CV 3/5] END regressor=Lasso(alpha=0.5, random_state=42);, score=(train=-6.465, test=-5.988) total time= 0.0s\n[CV 4/5] END regressor=Lasso(alpha=0.5, random_state=42);, score=(train=-6.225, test=-7.022) total time= 0.0s\n[CV 5/5] END regressor=Lasso(alpha=0.5, random_state=42);, score=(train=-6.348, test=-6.509) total time= 0.0s\n[CV 1/5] END regressor=RandomForestRegressor(max_depth=5, random_state=42);, score=(train=-4.488, test=-4.562) total time= 5.7s\n[CV 2/5] END regressor=RandomForestRegressor(max_depth=5, random_state=42);, score=(train=-4.357, test=-4.928) total time= 5.9s\n[CV 3/5] END regressor=RandomForestRegressor(max_depth=5, random_state=42);, score=(train=-4.566, test=-4.116) total time= 5.1s\n[CV 4/5] END regressor=RandomForestRegressor(max_depth=5, random_state=42);, score=(train=-4.310, test=-5.242) total time= 4.9s\n[CV 5/5] END regressor=RandomForestRegressor(max_depth=5, random_state=42);, score=(train=-4.462, test=-4.567) total time= 5.1s\nWall time: 37.5 s\n"
],
[
"search.best_estimator_",
"_____no_output_____"
],
[
"search.best_score_",
"_____no_output_____"
]
],
[
[
"#### BEST ALGORITHM = RandomForestRegressor",
"_____no_output_____"
]
],
[
[
"search.cv_results_",
"_____no_output_____"
],
[
"search.cv_results_.keys()",
"_____no_output_____"
],
[
"[r[\"regressor\"].__class__.__name__ for r in search.cv_results_[\"params\"]]",
"_____no_output_____"
],
[
"plot_res_df = pd.DataFrame([\n search.cv_results_['split0_test_score'], \n search.cv_results_['split1_test_score'],\n search.cv_results_['split2_test_score'], \n search.cv_results_['split3_test_score'], \n search.cv_results_['split4_test_score']\n], columns=[r[\"regressor\"].__class__.__name__ for r in search.cv_results_[\"params\"]])",
"_____no_output_____"
],
[
"plot_res_df.plot(xlabel=\"Fold\", ylabel=\"TestScore\", xticks=range(plot_res_df.shape[1]+1))",
"_____no_output_____"
]
],
[
[
"### Esercizio: HYPER PARAMETER TUNING\n\nCon la stessa metodologia esposta è possibile fare tuning parametri.\nLascio a voi questo esercizio.\n\nSuggerimenti: \n- provare a lavorare sui seguenti parametri dell'algoritmo RandomForest\n - max_depth\n - min_samples_split\n - min_samples_leaf\n- random forest è un algoritmo basato su alberi, teoricamente non richiede scaling delle features\n- è possibile eseguire nuovamente la selezione del modello e tuning dei parametri aggiungendo nuove colonne (vedi pairplot nel notebook precedente)",
"_____no_output_____"
]
],
[
[
"# param_grid = {\n# 'base_estimator__max_depth': [2, 4, 6, 8, 12]\n# .......\n# }",
"_____no_output_____"
],
[
"# search = GridSearchCV(.........)\n# search.fit(X, y)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e712e8c1b60d12d502d0030523b1ed7fcdda8786 | 6,214 | ipynb | Jupyter Notebook | examples/For ArcGIS API for Python/hub03-Working with Events.ipynb | raykendo/hub-py | aebcd5031a2be43c725f7453682bcb01169080fc | [
"Apache-2.0"
] | 19 | 2020-03-14T14:40:20.000Z | 2022-03-15T20:40:13.000Z | examples/For ArcGIS API for Python/hub03-Working with Events.ipynb | raykendo/hub-py | aebcd5031a2be43c725f7453682bcb01169080fc | [
"Apache-2.0"
] | 61 | 2018-03-14T14:15:05.000Z | 2020-03-06T04:04:56.000Z | examples/For ArcGIS API for Python/hub03-Working with Events.ipynb | raykendo/hub-py | aebcd5031a2be43c725f7453682bcb01169080fc | [
"Apache-2.0"
] | 6 | 2020-03-09T16:02:53.000Z | 2021-12-06T14:58:31.000Z | 21.136054 | 285 | 0.513679 | [
[
[
"### Working with Events",
"_____no_output_____"
],
[
"ArcGIS Hub supports engagement through in-person and virtual events.\n\nEvents are meetings for people to support an Initiative. Events are scheduled by an organizer and have many attendees. An Event has a Group so that they can include content for preparation as well as gather and archive content during the event for later retrieval or analysis.\n\nA Hub has many Events that can be associated with an Initiative.",
"_____no_output_____"
]
],
[
[
"from arcgis.gis import GIS",
"_____no_output_____"
],
[
"gis = GIS(\"https://dcdev.maps.arcgis.com\", 'mmajumdar_dcdev')",
"Enter password: ········\n"
],
[
"myHub = gis.hub",
"_____no_output_____"
]
],
[
[
"#### Searching for events\n\nYou can search for `events` of a Hub using the following parameters:\n* `initiative_id`\n* `title`\n* `location`\n* `organizer_name`",
"_____no_output_____"
]
],
[
[
"myHub.events.search()",
"_____no_output_____"
],
[
"myHub.events.search(title='Test')",
"_____no_output_____"
],
[
"event1 = myHub.events.search(initiative_id='30d22500fdb54e9699f97c3b74523394')[0]\nevent1",
"_____no_output_____"
]
],
[
[
"#### Accessing properties of an event",
"_____no_output_____"
],
[
"You can access properties of an event as follows:",
"_____no_output_____"
]
],
[
[
"event1.start_date",
"_____no_output_____"
],
[
"event1.siteid",
"_____no_output_____"
],
[
"event1.organizers",
"_____no_output_____"
]
],
[
[
"#### Visualize all events on a map",
"_____no_output_____"
]
],
[
[
"myHub.events.get_map()",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e712ee33e50b10baf6df9c3c0f52bb3211f6e5dd | 1,810 | ipynb | Jupyter Notebook | tf_keras_5_adverserial.ipynb | kornellewy/tf_keras_5_deepDream | 20dfe74444a63f5126ef12463ca60ba601f9abdb | [
"MIT"
] | null | null | null | tf_keras_5_adverserial.ipynb | kornellewy/tf_keras_5_deepDream | 20dfe74444a63f5126ef12463ca60ba601f9abdb | [
"MIT"
] | null | null | null | tf_keras_5_adverserial.ipynb | kornellewy/tf_keras_5_deepDream | 20dfe74444a63f5126ef12463ca60ba601f9abdb | [
"MIT"
] | null | null | null | 21.547619 | 122 | 0.570166 | [
[
[
"%reload_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
],
[
"import keras\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing.image import ImageDataGenerator, image\nfrom keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Activation, Conv2D, MaxPooling2D, Input\nfrom keras.models import Model, Sequential\nfrom keras.applications.inception_v3 import InceptionV3\nimport matplotlib.pyplot as plt\nimport inception",
"_____no_output_____"
],
[
"from keras import backend as K\nK.tensorflow_backend._get_available_gpus()",
"_____no_output_____"
],
[
"model = InceptionV3(include_top=True, weights='imagenet')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e712fa2d10f386a4f933caeaf1fde863ebdfe006 | 21,004 | ipynb | Jupyter Notebook | ThinkDSP-master/code/chap08.ipynb | beckermy/popcorn-analyzer | 6090c21f6e15557ce64cd2be3c28d6877c067b59 | [
"MIT"
] | null | null | null | ThinkDSP-master/code/chap08.ipynb | beckermy/popcorn-analyzer | 6090c21f6e15557ce64cd2be3c28d6877c067b59 | [
"MIT"
] | null | null | null | ThinkDSP-master/code/chap08.ipynb | beckermy/popcorn-analyzer | 6090c21f6e15557ce64cd2be3c28d6877c067b59 | [
"MIT"
] | null | null | null | 22.955191 | 204 | 0.531518 | [
[
[
"## ThinkDSP\n\nThis notebook contains code examples from Chapter 8: Filtering and Convolution\n\nCopyright 2015 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function, division\n\n%matplotlib inline\n\nimport thinkdsp\nimport thinkplot\nimport thinkstats2\n\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\n\nnp.set_printoptions(precision=3, suppress=True)",
"_____no_output_____"
],
[
"PI2 = 2 * np.pi\nGRAY = '0.7'",
"_____no_output_____"
]
],
[
[
"### Smoothing\n\nAs the first example, I'll look at daily closing stock prices for Facebook, from its IPO on 17 May 2012 to 8 December 2015 (note: the dataset includes only trading days )",
"_____no_output_____"
]
],
[
[
"names = ['date', 'open', 'high', 'low', 'close', 'volume']\ndf = pd.read_csv('fb.csv', header=0, names=names, parse_dates=[0])\ndf.head()",
"_____no_output_____"
]
],
[
[
"Extract the close prices and days since start of series:",
"_____no_output_____"
]
],
[
[
"close = df.close.values[::-1]\ndates = df.date.values[::-1]\ndays = (dates - dates[0]) / np.timedelta64(1,'D')",
"_____no_output_____"
]
],
[
[
"Make a window to compute a 30-day moving average and convolve the window with the data. The `valid` flag means the convolution is only computed where the window completely overlaps with the signal.",
"_____no_output_____"
]
],
[
[
"M = 30\nwindow = np.ones(M)\nwindow /= sum(window)\nsmoothed = np.convolve(close, window, mode='valid')\nsmoothed_days = days[M//2: len(smoothed) + M//2]",
"_____no_output_____"
]
],
[
[
"Plot the original and smoothed signals.",
"_____no_output_____"
]
],
[
[
"thinkplot.plot(days, close, color=GRAY, label='daily close')\nthinkplot.plot(smoothed_days, smoothed, label='30 day average')\n\nlast = days[-1]\nthinkplot.config(xlabel='Time (days)', \n ylabel='Price ($)',\n xlim=[-7, last+7],\n legend=True,\n loc='lower right')",
"_____no_output_____"
]
],
[
[
"### Smoothing sound signals\n\nGenerate a 440 Hz sawtooth signal.",
"_____no_output_____"
]
],
[
[
"signal = thinkdsp.SawtoothSignal(freq=440)\nwave = signal.make_wave(duration=1.0, framerate=44100)\nwave.make_audio()",
"_____no_output_____"
]
],
[
[
"Make a moving average window.",
"_____no_output_____"
]
],
[
[
"window = np.ones(11)\nwindow /= sum(window)\nthinkplot.plot(window)",
"_____no_output_____"
]
],
[
[
"Plot the wave.",
"_____no_output_____"
]
],
[
[
"segment = wave.segment(duration=0.01)\nsegment.plot()\nthinkplot.config(xlabel='Time (s)', ylim=[-1.05, 1.05])",
"_____no_output_____"
]
],
[
[
"Pad the window so it's the same length as the signal, and plot it.",
"_____no_output_____"
]
],
[
[
"N = len(segment)\npadded = thinkdsp.zero_pad(window, N)\nthinkplot.plot(padded)\nthinkplot.config(xlabel='Index')",
"_____no_output_____"
]
],
[
[
"Apply the window to the signal (with lag=0).",
"_____no_output_____"
]
],
[
[
"prod = padded * segment.ys\nprint(sum(prod))",
"_____no_output_____"
]
],
[
[
"Compute a convolution by rolling the window to the right.",
"_____no_output_____"
]
],
[
[
"smoothed = np.zeros(N)\nrolled = padded.copy()\nfor i in range(N):\n smoothed[i] = sum(rolled * segment.ys)\n rolled = np.roll(rolled, 1)",
"_____no_output_____"
]
],
[
[
"Plot the result of the convolution and the original.",
"_____no_output_____"
]
],
[
[
"segment.plot(color=GRAY)\nsmooth = thinkdsp.Wave(smoothed, framerate=wave.framerate)\nsmooth.plot()\nthinkplot.config(xlabel='Time(s)', ylim=[-1.05, 1.05])",
"_____no_output_____"
]
],
[
[
"Compute the same convolution using `numpy.convolve`.",
"_____no_output_____"
]
],
[
[
"segment.plot(color=GRAY)\nys = np.convolve(segment.ys, window, mode='valid')\nsmooth2 = thinkdsp.Wave(ys, framerate=wave.framerate)\nsmooth2.plot()\nthinkplot.config(xlabel='Time(s)', ylim=[-1.05, 1.05])",
"_____no_output_____"
]
],
[
[
"## Frequency domain\n\nLet's see what's happening in the frequency domain.",
"_____no_output_____"
],
[
"Compute the smoothed wave using `np.convolve`, which is much faster than my version above.",
"_____no_output_____"
]
],
[
[
"convolved = np.convolve(wave.ys, window, mode='same')\nsmooth = thinkdsp.Wave(convolved, framerate=wave.framerate)\nsmooth.make_audio()",
"_____no_output_____"
]
],
[
[
"Plot spectrums of the original and smoothed waves:",
"_____no_output_____"
]
],
[
[
"spectrum = wave.make_spectrum()\nspectrum.plot(color=GRAY)\n\nspectrum2 = smooth.make_spectrum()\nspectrum2.plot()\n\nthinkplot.config(xlabel='Frequency (Hz)',\n ylabel='Amplitude',\n xlim=[0, 22050])",
"_____no_output_____"
]
],
[
[
"For each harmonic, compute the ratio of the amplitudes before and after smoothing.",
"_____no_output_____"
]
],
[
[
"amps = spectrum.amps\namps2 = spectrum2.amps\nratio = amps2 / amps \nratio[amps<280] = 0\n\nthinkplot.plot(ratio)\nthinkplot.config(xlabel='Frequency (Hz)',\n ylabel='Amplitude ratio',\n xlim=[0, 22050])",
"_____no_output_____"
]
],
[
[
"Plot the ratios again, but also plot the FFT of the window.",
"_____no_output_____"
]
],
[
[
"padded = thinkdsp.zero_pad(window, len(wave))\ndft_window = np.fft.rfft(padded)\n\nthinkplot.plot(abs(dft_window), color=GRAY, label='DFT(window)')\nthinkplot.plot(ratio, label='amplitude ratio')\n\nthinkplot.config(xlabel='Frequency (Hz)',\n ylabel='Amplitude ratio',\n xlim=[0, 22050], loc='upper right')",
"_____no_output_____"
]
],
[
[
"### Gaussian window\n\nLet's compare boxcar and Gaussian windows.",
"_____no_output_____"
],
[
"Make the boxcar window.",
"_____no_output_____"
]
],
[
[
"boxcar = np.ones(11)\nboxcar /= sum(boxcar)",
"_____no_output_____"
]
],
[
[
"Make the Gaussian window.",
"_____no_output_____"
]
],
[
[
"gaussian = scipy.signal.gaussian(M=11, std=2)\ngaussian /= sum(gaussian)",
"_____no_output_____"
]
],
[
[
"Plot the two windows.",
"_____no_output_____"
]
],
[
[
"thinkplot.preplot(2)\nthinkplot.plot(boxcar, label='boxcar')\nthinkplot.plot(gaussian, label='Gaussian')\nthinkplot.config(xlabel='Index',\n loc='upper right')",
"_____no_output_____"
]
],
[
[
"Convolve the square wave with the Gaussian window.",
"_____no_output_____"
]
],
[
[
"ys = np.convolve(wave.ys, gaussian, mode='same')\nsmooth = thinkdsp.Wave(ys, framerate=wave.framerate)\nspectrum2 = smooth.make_spectrum()",
"_____no_output_____"
]
],
[
[
"Compute the ratio of the amplitudes.",
"_____no_output_____"
]
],
[
[
"amps = spectrum.amps\namps2 = spectrum2.amps\nratio = amps2 / amps \nratio[amps<560] = 0",
"_____no_output_____"
]
],
[
[
"Compute the FFT of the window.",
"_____no_output_____"
]
],
[
[
"padded = thinkdsp.zero_pad(gaussian, len(wave))\ndft_gaussian = np.fft.rfft(padded)",
"_____no_output_____"
]
],
[
[
"Plot the ratios and the FFT of the window.",
"_____no_output_____"
]
],
[
[
"thinkplot.plot(abs(dft_gaussian), color='0.7', label='Gaussian filter')\nthinkplot.plot(ratio, label='amplitude ratio')\n\nthinkplot.config(xlabel='Frequency (Hz)',\n ylabel='Amplitude ratio',\n xlim=[0, 22050])",
"_____no_output_____"
]
],
[
[
"Combine the preceding example into one big function so we can interact with it.",
"_____no_output_____"
]
],
[
[
"def plot_filter(M=11, std=2):\n signal = thinkdsp.SquareSignal(freq=440)\n wave = signal.make_wave(duration=1, framerate=44100)\n spectrum = wave.make_spectrum()\n\n gaussian = scipy.signal.gaussian(M=M, std=std)\n gaussian /= sum(gaussian)\n high = gaussian.max()\n \n thinkplot.preplot(cols=2)\n thinkplot.plot(gaussian)\n thinkplot.config(xlabel='Index', ylabel='Window', \n xlim=[0, len(gaussian)-1], ylim=[0, 1.1*high])\n\n ys = np.convolve(wave.ys, gaussian, mode='same')\n smooth = thinkdsp.Wave(ys, framerate=wave.framerate)\n spectrum2 = smooth.make_spectrum()\n\n # plot the ratio of the original and smoothed spectrum\n amps = spectrum.amps\n amps2 = spectrum2.amps\n ratio = amps2 / amps \n ratio[amps<560] = 0\n\n # plot the same ratio along with the FFT of the window\n padded = thinkdsp.zero_pad(gaussian, len(wave))\n dft_gaussian = np.fft.rfft(padded)\n\n thinkplot.subplot(2)\n thinkplot.plot(abs(dft_gaussian), color=GRAY, label='Gaussian filter')\n thinkplot.plot(ratio, label='amplitude ratio')\n\n thinkplot.show(xlabel='Frequency (Hz)',\n ylabel='Amplitude ratio',\n xlim=[0, 22050],\n ylim=[0, 1.05])",
"_____no_output_____"
]
],
[
[
"Try out different values of `M` and `std`.",
"_____no_output_____"
]
],
[
[
"from ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\n\nslider = widgets.IntSlider(min=2, max=100, value=11)\nslider2 = widgets.FloatSlider(min=0, max=20, value=2)\ninteract(plot_filter, M=slider, std=slider2);",
"_____no_output_____"
]
],
[
[
"## Convolution theorem\n\nLet's use the Convolution theorem to compute convolutions using FFT. Read the Facebook data again, and smooth it using `np.convolve` and a 30-day Gaussian window.\n\nI'll ignore the dates and treat the values as if they are equally spaced in time.",
"_____no_output_____"
]
],
[
[
"names = ['date', 'open', 'high', 'low', 'close', 'volume']\ndf = pd.read_csv('fb.csv', header=0, names=names, parse_dates=[0])\nclose = df.close.values[::-1]\n\nwindow = scipy.signal.gaussian(M=30, std=6)\nwindow /= window.sum()\nsmoothed = np.convolve(close, window, mode='valid')\n\nlen(close), len(smoothed)",
"_____no_output_____"
]
],
[
[
"Plot the original and smoothed data.",
"_____no_output_____"
]
],
[
[
"thinkplot.plot(close, color=GRAY)\nthinkplot.plot(smoothed)",
"_____no_output_____"
]
],
[
[
"Pad the window and compute its FFT.",
"_____no_output_____"
]
],
[
[
"N = len(close)\npadded = thinkdsp.zero_pad(window, N)\nfft_window = np.fft.fft(padded)\nthinkplot.plot(np.absolute(fft_window))",
"_____no_output_____"
]
],
[
[
"Apply the convolution theorem.",
"_____no_output_____"
]
],
[
[
"fft_signal = np.fft.fft(close)\nsmoothed2 = np.fft.ifft(fft_signal * fft_window)\nM = len(window)\nsmoothed2 = smoothed2[M-1:]",
"_____no_output_____"
]
],
[
[
"Plot the two signals (smoothed with numpy and FFT).",
"_____no_output_____"
]
],
[
[
"thinkplot.plot(smoothed)\nthinkplot.plot(smoothed2.real)",
"_____no_output_____"
]
],
[
[
"Confirm that the difference is small.",
"_____no_output_____"
]
],
[
[
"diff = smoothed - smoothed2\nmax(abs(diff))",
"_____no_output_____"
]
],
[
[
"`scipy.signal` provides `fftconvolve`, which computes convolutions using FFT.",
"_____no_output_____"
]
],
[
[
"smoothed3 = scipy.signal.fftconvolve(close, window, mode='valid')",
"_____no_output_____"
]
],
[
[
"Confirm that it gives the same answer, at least approximately.",
"_____no_output_____"
]
],
[
[
"diff = smoothed - smoothed3\nmax(abs(diff))",
"_____no_output_____"
]
],
[
[
"We can encapsulate the process in a function:",
"_____no_output_____"
]
],
[
[
"def fft_convolve(signal, window):\n fft_signal = np.fft.fft(signal)\n fft_window = np.fft.fft(window)\n return np.fft.ifft(fft_signal * fft_window)",
"_____no_output_____"
]
],
[
[
"And confirm that it gives the same answer.",
"_____no_output_____"
]
],
[
[
"smoothed4 = fft_convolve(close, padded)[M-1:]\nlen(smoothed4)",
"_____no_output_____"
],
[
"diff = smoothed - smoothed4\nmax(abs(diff))",
"_____no_output_____"
]
],
[
[
"### Autocorrelation\n\nWe can also use the convolution theorem to compute autocorrelation functions.\n\nCompute autocorrelation using `numpy.correlate`:\n",
"_____no_output_____"
]
],
[
[
"corrs = np.correlate(close, close, mode='same')\ncorrs[:7]",
"_____no_output_____"
]
],
[
[
"Compute autocorrelation using my `fft_convolve`. The window is a reversed copy of the signal. We have to pad the window and signal with zeros and then select the middle half from the result.",
"_____no_output_____"
]
],
[
[
"def fft_autocorr(signal):\n N = len(signal)\n signal = thinkdsp.zero_pad(signal, 2*N)\n window = np.flipud(signal)\n\n corrs = fft_convolve(signal, window)\n corrs = np.roll(corrs, N//2+1)[:N]\n return corrs",
"_____no_output_____"
]
],
[
[
"Test the function.",
"_____no_output_____"
]
],
[
[
"corrs2 = fft_autocorr(close)\ncorrs2[:7]",
"_____no_output_____"
]
],
[
[
"Plot the results.",
"_____no_output_____"
]
],
[
[
"lags = np.arange(N) - N//2\nthinkplot.plot(lags, corrs, color=GRAY, linewidth=7, label='np.convolve')\nthinkplot.plot(lags, corrs2.real, linewidth=2, label='fft_convolve')\nthinkplot.config(xlabel='Lag', ylabel='Correlation')\nlen(corrs), len(corrs2)",
"_____no_output_____"
]
],
[
[
"Confirm that the difference is small.",
"_____no_output_____"
]
],
[
[
"diff = corrs - corrs2.real\nmax(abs(diff))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e712fd3bbc2d622077b1119f5bafa6c093fb39ce | 373,685 | ipynb | Jupyter Notebook | cvnd/CVND_Exercises/1_2_Convolutional_Filters_Edge_Detection/2. Finding Edges and Custom Kernels.ipynb | sijoonlee/deep_learning | 437ee14b478688d671257310c33071f75764e3ed | [
"MIT"
] | 20 | 2019-09-29T13:32:00.000Z | 2022-03-28T09:57:51.000Z | cvnd/CVND_Exercises/1_2_Convolutional_Filters_Edge_Detection/2. Finding Edges and Custom Kernels.ipynb | sijoonlee/deep_learning | 437ee14b478688d671257310c33071f75764e3ed | [
"MIT"
] | 11 | 2021-06-08T20:32:58.000Z | 2022-03-12T00:05:43.000Z | cvnd/CVND_Exercises/1_2_Convolutional_Filters_Edge_Detection/2. Finding Edges and Custom Kernels.ipynb | sijoonlee/deep_learning | 437ee14b478688d671257310c33071f75764e3ed | [
"MIT"
] | null | null | null | 1,525.244898 | 135,464 | 0.958345 | [
[
[
"# Creating a Filter, Edge Detection",
"_____no_output_____"
],
[
"### Import resources and display image",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2\nimport numpy as np\n\n%matplotlib inline\n\n# Read in the image\nimage = mpimg.imread('images/curved_lane.jpg')\n\nplt.imshow(image)",
"_____no_output_____"
]
],
[
[
"### Convert the image to grayscale",
"_____no_output_____"
]
],
[
[
"# Convert to grayscale for filtering\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\nplt.imshow(gray, cmap='gray')",
"_____no_output_____"
]
],
[
[
"### TODO: Create a custom kernel\n\nBelow, you've been given one common type of edge detection filter: a Sobel operator.\n\nThe Sobel filter is very commonly used in edge detection and in finding patterns in intensity in an image. Applying a Sobel filter to an image is a way of **taking (an approximation) of the derivative of the image** in the x or y direction, separately. The operators look as follows.\n\n<img src=\"images/sobel_ops.png\" width=200 height=200>\n\n**It's up to you to create a Sobel x operator and apply it to the given image.**\n\nFor a challenge, see if you can put the image through a series of filters: first one that blurs the image (takes an average of pixels), and then one that detects the edges.",
"_____no_output_____"
]
],
[
[
"# Create a custom kernel\n\n# 3x3 array for edge detection\nsobel_y = np.array([[ -1, -2, -1], \n [ 0, 0, 0], \n [ 1, 2, 1]])\n\n# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel) \nfiltered_image = cv2.filter2D(gray, -1, sobel_y)\n\nplt.imshow(filtered_image, cmap='gray')",
"_____no_output_____"
],
[
"## TODO: Create and apply a Sobel x operator\nsobel_x = np.array([[ -1, 0, 1], \n [ -2, 0, 2], \n [ -1, 0, 1]])\n\n# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel) \nfiltered_image = cv2.filter2D(gray, -1, sobel_x)\n\nplt.imshow(filtered_image, cmap='gray')",
"_____no_output_____"
]
],
[
[
"### Test out other filters!\n\nYou're encouraged to create other kinds of filters and apply them to see what happens! As an **optional exercise**, try the following:\n* Create a filter with decimal value weights.\n* Create a 5x5 filter\n* Apply your filters to the other images in the `images` directory.\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e712feb1e75146383e70ec5613dad3efe769fb4a | 60,567 | ipynb | Jupyter Notebook | MDT_ProstateX/results.ipynb | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | MDT_ProstateX/results.ipynb | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | MDT_ProstateX/results.ipynb | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | 80.541223 | 16,292 | 0.7778 | [
[
[
"# Test results",
"_____no_output_____"
],
[
"This Notebook loads the results (list of detections) generated by the model, and computes some metrics ans visualizations using them\n\nThis directory contains a copy of the Medical Detection Toolkit (https://github.com/MIC-DKFZ/medicaldetectiontoolkit/tree/torch1x) slightly adapted to the problem of prostate lesion detection. \n\nBefore running this Notebook, make sure to:\n - Go to https://github.com/MIC-DKFZ/medicaldetectiontoolkit/tree/torch1x and read the instructions in order to be able to run the Toolkit. These instructions have also been cloned to this repository, in `README.md`\n - The current experiment can be found in `experiments\\exp0`.\n - In file ``experiments\\exp0\\config.py``, the paths pointing to the data might need to be updated:\n - ``self.root_dir``\n - ``self.pp_dir``\n - Train the model and test it: `%run exec.py --mode train_test --exp_source experiments/exp0 --exp_dir experiments/exp0`\n - Generate the final test results from the ensemble of trained models: `%run exec.py --mode analysis --exp_source experiments/exp0 --exp_dir experiments/exp0`",
"_____no_output_____"
],
[
"Load `plot_lib` (https://github.com/OscarPellicer/plot_lib)",
"_____no_output_____"
]
],
[
[
"from pathlib import Path\nimport sys, os\nfrom plot_lib import plot, plot_multi_mask\nfrom IPython.display import display, HTML\nbr= lambda: print(' '*150)\nCSS = \"\"\"output_subarea { flex-direction: row; flex-wrap: wrap; }\n .output { flex-direction: row; flex-wrap: wrap; }\n .widget-hslider { width: auto !important}\"\"\"\nHTML('<style>{}</style>'.format(CSS))",
"_____no_output_____"
]
],
[
[
"Load other required libraries",
"_____no_output_____"
]
],
[
[
"#Import required libs\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nimport pandas as pd\nimport pickle\nimport numpy as np\n\nfrom result_computation_lib import (plot_patient, compact_class_detections, composite_score, \n plot_auc, get_optimal_thresholds, match_lesions, gen_dsc)\npd.set_option('display.float_format', lambda x: '%.5f' % x)\npd.set_option('display.max_rows', 200)\nimport matplotlib.pyplot as plt\n\nSPACING= (0.5, 0.5, 3)",
"_____no_output_____"
]
],
[
[
"These are the main configuration parameters for the analysis of the results",
"_____no_output_____"
]
],
[
[
"GGG_THRESHOLD= 2 #Mark lesions as significant if their GGG is >= GGG_THRESHOLD\nHOLD_OUT= True #Should be True\nENSEMBLE_FOLDS= True #Set to true load results from fold ensembling\nFOLD= 3 #Fold to use if ENSEMBLE_FOLDS == False\nUSE_RAW= False #Use raw predictions (before WBC)\nSUBSET= '_test' #Must be renamed manually if any subset is to be specified here\nN_classes= 4 #GGG0, GGG1, GGG2, GGG3+\nCLASS_WEIGHTS= {0:-1, 1:-1, 2:1*12.70/17.28, 3:1, 4:1} #Model classes: BG, GGG0, GGG1, GGG2, GGG3+\nCLASS_CONFIDENCE_THRESHOLD= 0.2 #Lesions below the threshold are marked as being class 0 (NOT USED)\nUSE_MODEL= 'experiments/exp0/'\n \nprint('Using model located at:', USE_MODEL, 'Subset:', SUBSET)\nif not ENSEMBLE_FOLDS:\n print('Using Fold %d'%FOLD)\n print(np.load('%s/fold_%d/epoch_ranking.npy'%(USE_MODEL, FOLD)))\nelse:\n print('Using model ensembling')\n \nsys.path.append(USE_MODEL)\nfrom configs import configs\ncf= configs()",
"Using model located at: experiments/exp0/ Subset: _test\nUsing model ensembling\n"
]
],
[
[
"First, we load all necessary data:\n - `test_results_list`: Contains the predictions from the model\n - `patient_info`: Contains the GTs",
"_____no_output_____"
]
],
[
[
"#This is all the data that we will need to read\nif not ENSEMBLE_FOLDS:\n test_results_list= pd.read_pickle('%s/fold_%d/%s_pred_boxes_%slist.pickle'%(\n USE_MODEL, FOLD, 'processed' if not USE_RAW else 'raw', 'hold_out_' if HOLD_OUT else ''))\nelse: \n test_results_list= pd.read_pickle('%s/test/%s_pred_boxes_overall_hold_out_list%s.pickle'%(\n USE_MODEL, 'processed' if not USE_RAW else 'raw', SUBSET))\n\n#Load info of the lesions\npatient_info= pd.read_pickle(os.path.join(cf.pp_dir, 'info_df.pickle'))\npatient_info= patient_info.set_index('pid') ",
"_____no_output_____"
]
],
[
[
"Then, we will create a dataframe `patient_df` with patient-wide results (combined as the maximum score of all individual lesions)",
"_____no_output_____"
]
],
[
[
"ignore_first_N_classes= 1 #Ignore first class (bening)\ndata= []\nfor pix, patient_results in enumerate(test_results_list):\n #Extract patient results\n pid, boxes= patient_results[1], patient_results[0]['boxes'][0]\n\n #Get det boxes scores (& coords) sorted by score and divided by class\n scores_all= [sorted( [[b['box_score'], b['box_coords']] \n for b in boxes if b['box_type'] == 'det' and b['box_pred_class_id'] == cl], \n key=lambda a: a[0], reverse=True)\n for cl in range (1 + ignore_first_N_classes, N_classes + 1)]\n score_per_class= [scores[0][0] if len(scores) else 0. for scores in scores_all]\n z_slice_per_class= [(scores[0][1][4] + scores[0][1][5])/2 if len(scores) else None for scores in scores_all]\n detected_class= np.argmax(score_per_class) + 1\n detected_class_thresholded= detected_class if max(score_per_class) > CLASS_CONFIDENCE_THRESHOLD else 0\n \n #The score depends on the GGG_THRESHOLD\n score= max(score_per_class[GGG_THRESHOLD - ignore_first_N_classes:])\n \n #Find the z-axis to which the max-scoring lesions from each category belong\n \n #Add to list\n data.append( [pid, score, detected_class, detected_class_thresholded, *score_per_class, *z_slice_per_class] )\n \npatient_df= pd.DataFrame(data=data, columns=['pid', 'pred_score', 'det_class', 'det_class_thresholded', \n 'GGG1_score', 'GGG2_score', 'GGG3+_score', 'z1', 'z2', 'z3+']).set_index('pid')\n#patient_df",
"_____no_output_____"
]
],
[
[
"### Plot patients",
"_____no_output_____"
],
[
"Load processed results and plot them patient by patient.\n\nOnly the highest-scoring prediction from highly overlapped predictions is being plotted. Change `match_iou` to 1. in `compact_class_detections` to plot all predictions.",
"_____no_output_____"
]
],
[
[
"#Keep only non-ovelapping (mostly) predictions. Set match_iou=1 to disable it\n#Default: benign_class=1 (i.e. class 0)\ntest_results_list_compacted= compact_class_detections(test_results_list, match_iou=0.25, \n benign_class=1, class_weights=CLASS_WEIGHTS)\n\nfor pix, patient_results in enumerate(test_results_list_compacted): \n #Get and print patient data:\n pid= patient_results[1]\n \n #Stop after the fifith\n if pix == 5: break\n \n #Print some info\n pat_df= patient_info.loc[pid, :]\n print('%d: %s | Lesion GGGs: %s'%\n (pix, pid, pat_df.class_target))\n print('Max scores: GGG1: %.2f | GGG2: %.2f | GGG3+: %.2f'%\n tuple(patient_df.loc[pid, ['GGG1_score', 'GGG2_score', 'GGG3+_score']].values.tolist()) )\n br()\n \n #Show plot at highest scoring lesion\n z= np.round(patient_df.loc[pid, ['z0', 'z1', 'z2', 'z3+'][patient_df.loc[pid, 'det_class']]])\n z= 12 if np.isnan(z) else z\n \n #Plot\n plot_patient(patient_results, cf.pp_test_data_path, N_classes= N_classes, seg_threshold=0.5, \n min_threshold= 0.08, plot_max_per_class=100, spacing=SPACING, scale='auto', ct=0, z=z, \n dpi=100, text_kwargs={'linespacing':0.9},\n class_colors={i:plt.get_cmap('Paired')(ci) for i, ci in enumerate(np.arange(0, 1, step=1/(N_classes + 2)))})",
"0: ProstateX-0001 | Lesion GGGs: [1]\nMax scores: GGG1: 0.19 | GGG2: 0.24 | GGG3+: 0.16\n \n"
]
],
[
[
"### Prostatex-like lesion-level results\n\nDetections must be matched with ground truth lesions to get the lesion-level results. They can be matched either by looking at the intersection of their bounding boxes, or by looking at their distance (``USE_DISTANCE= True``). In both cases, the highest-score non-benign lesion from the matched lesions is considered as the score for that lesion.",
"_____no_output_____"
]
],
[
[
"USE_DISTANCE= True #Use distance instead of IoU for matching lesions\nDISTANCE_THRESHOLD= 15 #Consider lesions within a 15mm radius of the GT lesion\nthreshold_list= [] #[0.4, 0.2, 0.1, 0.05, 0.025]",
"_____no_output_____"
],
[
"#First, we expand the list of lesions of patient_info, setting their oreder as a lesion index\n#We do it by hand to make sure that lists are expanded in order, so that lesion_id can be added\ndata= []\nfor row in patient_info.itertuples():\n lst = row[1]\n for i,col2 in enumerate(lst):\n data.append([row[0], i, col2, row[-1]])\n \npatient_info_expanded = pd.DataFrame(data=data, columns=['pid', 'lesion_id', 'class_target', 'massive_class_target'])\n\n#Keep from patient info only the patients in test set\npids= [p[1] for p in test_results_list]\npatients_test= patient_info_expanded.loc[patient_info_expanded.pid.isin(pids),].set_index(['pid', 'lesion_id'])\npatients_test['score']= 0.\npatients_test.loc[patients_test.class_target == 10.,:]= 0.\npatients_test.loc[patients_test['massive_class_target'].isna(), 'massive_class_target']= -1\npatients_test['ClinSig']= patients_test['class_target'] >= GGG_THRESHOLD\npatients_test['ClinSig2']= np.nan\npatients_test['det_class']= 1\ncolumns_to_keep= ['ClinSig', 'ClinSig2', 'massive_class_target', 'class_target', 'score']\npatients_test= patients_test[columns_to_keep]\n\n#Match lesions\npatients_test= match_lesions(test_results_list, patients_test, drop_nans=True, class_col='det_class', \n cl=GGG_THRESHOLD + 1, match_iou= 1e-5, spacing=SPACING,\n use_distance=USE_DISTANCE, distance_threshold=DISTANCE_THRESHOLD )\npatients_test['det_class']-= 1 #To make it correspond with GGG\npatients_test.loc[patients_test.det_class.isna(), 'det_class']=0\npatients_test['det_class']= patients_test['det_class'].astype(int)\npatients_test['det_class_thresholded']= patients_test['det_class']\npatients_test.loc[patients_test['score'] < CLASS_CONFIDENCE_THRESHOLD, 'det_class_thresholded']= 0\n\n#Prepare more dataframes in case that we want to individualize\npatients_test_prostatex= patients_test.loc[\n patients_test.index.isin([p for p in patients_test.index.get_level_values(0) \n if p.lower().startswith('prostatex') and int(p[-4:]) < 204], level=0), :]\n\n#print results at different thresholds\nprint('Lesion-level (prostatex-like). Sig if GGG >= %d'%GGG_THRESHOLD)\nfor plot_subset, name in zip([patients_test_prostatex], ['ProstateX']):\n print(' ', name)\n print(' N=%3d AUC | Acc. | Sens. | Specif.'%(len(plot_subset)))\n for t in threshold_list + [*get_optimal_thresholds(plot_subset.ClinSig, plot_subset.score)]:\n print(' %s: %.3f | %.3f | %.3f | %.3f'%('t=%.2f'%t, *composite_score(\n plot_subset.ClinSig, plot_subset.score, t=t)))\n\n #Plot AUC\n plot_auc(plot_subset.ClinSig, plot_subset.score, new_fig=name=='All', legend=name + ' ', annotate=False)\n \n_=plt.title('Lesion-level ROC Curve')\n#plt.gcf().savefig('lesion_auc.png', dpi=300)",
"Lesion-level (prostatex-like). Sig if GGG >= 2\n ProstateX\n N= 69 AUC | Acc. | Sens. | Specif.\n t=0.38: 0.890 | 0.870 | 0.308 | 1.000\n t=0.09: 0.890 | 0.855 | 0.923 | 0.839\n t=0.09: 0.890 | 0.855 | 0.923 | 0.839\n"
]
],
[
[
"### Patient-level results",
"_____no_output_____"
],
[
"The worst lesion result for a given patient is taken as the Grount Truth, while the highest scoring non-bening lesion is taken as the prediction score.",
"_____no_output_____"
]
],
[
[
"#Prepare two more dataframes in case that we want to individualize\ndf= patient_df.reset_index()\ndf_prostatex= df.loc[df.pid.isin([p for p in df.pid if p.lower().startswith('prostatex') and int(p[-4:]) < 204]), :]\n\n#Process dataframes to contain all, prostatex and ivo patients & plot results\nprint('Patient-level. Sig if GGG >= %d'%GGG_THRESHOLD)\nfor subset, name in zip([df_prostatex], ['ProstateX']):\n #Fix patients\n spec_df= subset.merge(patient_info, how='left', on='pid')\n def get_worst(l):\n l= np.array(l + [0])\n l= l[l != 10]\n return np.max(l)\n spec_df['lesions_ggg']= spec_df['class_target'].apply(get_worst)\n spec_df['ggg']= spec_df['lesions_ggg']\n spec_df['ClinSig']= spec_df['ggg'] >= GGG_THRESHOLD\n spec_df.loc[spec_df['pred_score'] < CLASS_CONFIDENCE_THRESHOLD, 'det_class_thresholded']= 0\n\n #Print results at different thresholds\n print(' %s'%name)\n print(' N=%3d AUC | Acc. | Sens. | Specif.'%(len(spec_df)))\n for t in threshold_list + [*get_optimal_thresholds(spec_df.ClinSig, spec_df.pred_score)]:\n print(' %s: %.3f | %.3f | %.3f | %.3f'%('t=%.2f'%t, *composite_score(\n spec_df.ClinSig, spec_df.pred_score, t=t)))\n \n #Plot AUC\n plot_auc(spec_df.ClinSig, spec_df.pred_score, new_fig=name=='All', legend=name + ' ', annotate=name=='All')\n\n_=plt.title('Patient-level ROC Curve')\n#plt.gcf().savefig('patient_auc.png', dpi=300)",
"Patient-level. Sig if GGG >= 2\n ProstateX\n N= 45 AUC | Acc. | Sens. | Specif.\n t=0.40: 0.814 | 0.800 | 0.308 | 1.000\n t=0.26: 0.814 | 0.822 | 0.692 | 0.875\n t=0.09: 0.814 | 0.689 | 0.923 | 0.594\n"
]
],
[
[
"## Compute DSC scores",
"_____no_output_____"
]
],
[
[
"dice_threshold= 0.25\n\ndscs_prostatex= []\nfor patient_results, pid in test_results_list:\n seg_p= patient_results['seg_preds'][0] > dice_threshold\n seg_p= np.transpose(seg_p, axes=(2,0,1))\n seg= np.load(os.path.join(cf.pp_test_data_path,'{}_rois.npy'.format(pid)))\n dscs_prostatex.append(gen_dsc(seg, seg_p))\n \nmetric_name= 'DSC'\nfor name, metric in zip(['ProstateX'], [dscs_prostatex]):\n print('%10s | %s: mean: %.4f +/- %.4f (N=%d) | median : %.4f | min : %.4f | max : %.4f'%\n (name, metric_name, np.mean(metric), np.std(metric), len(metric), np.median(metric), \n np.min(metric), np.max(metric)))",
" ProstateX | DSC: mean: 0.2705 +/- 0.2231 (N=45) | median : 0.2841 | min : 0.0001 | max : 0.7712\n"
]
],
[
[
"## Compete in ProstateX challenge",
"_____no_output_____"
],
[
"To compete in ProstateX, go to `config.py` and set `self.test_set= 'train'`. Then run `exec.py` in `test` mode, and in `analysis` mode after.\nYou may want to manually rename the previous test results to avoid overwriting them.\n\n**Alternatively**, the predictions are already provided as a zip file in `train_boxes.zip`\n\nYou must also set the path to the challenge ``.csv`` test file provided alongside the challenge images.",
"_____no_output_____"
]
],
[
[
"if 'train' in SUBSET:\n #Load dataset and results\n GGG_THRESHOLD_PROSTATEX= 2 #In prostateX clinically significant prostate cancer is considered as GGG >= 2\n prostatex_test= pd.read_csv('../ProstateX-Findings-Test.csv') #Path to the ProstateX test data\n\n #Keep only prostatex test patients\n prostatex_test_results_list= [p for p in test_results_list if p[1] in prostatex_test.ProxID.values] \n\n #Set a new column 'lesion_id' that contains the actual ID used in the model for each lesion\n prostatex_test['lesion_id']= prostatex_test.groupby('ProxID').cumcount()\n\n #Set the index to ProxID and fid2\n prostatex_test= prostatex_test.set_index(['ProxID', 'lesion_id'])\n prostatex_test['ClinSig']= 0.\n prostatex_test['ClinSig2']= np.nan #Will be true for all since class=20 > 0\n\n #Match lesions\n prostatex_test= match_lesions(prostatex_test_results_list, prostatex_test, drop_nans=False, score_col='ClinSig',\n cl=GGG_THRESHOLD_PROSTATEX + 1,\n use_distance=USE_DISTANCE, distance_threshold=DISTANCE_THRESHOLD, \n spacing=SPACING, normalize_by_distance=False)\n\n #Save with correct formatting\n prostatex_submission= prostatex_test.reset_index().set_index(['ProxID', 'fid']).sort_index(\n ).reset_index()[['ProxID', 'fid', 'ClinSig']]\n prostatex_submission.to_csv('prostatex_submission.csv', index=False)\n\n #Check that the scores are not all zeros\n prostatex_submission",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e712ff51ccd12cd3e82e1e22f03456c29b20b9f5 | 894,179 | ipynb | Jupyter Notebook | .ipynb_checkpoints/lpsm-checkpoint.ipynb | constellationcolon/simplexity | 4c5722f4e7d6f77a189fed466bbb526e504caf0b | [
"MIT"
] | null | null | null | .ipynb_checkpoints/lpsm-checkpoint.ipynb | constellationcolon/simplexity | 4c5722f4e7d6f77a189fed466bbb526e504caf0b | [
"MIT"
] | null | null | null | .ipynb_checkpoints/lpsm-checkpoint.ipynb | constellationcolon/simplexity | 4c5722f4e7d6f77a189fed466bbb526e504caf0b | [
"MIT"
] | null | null | null | 100.053597 | 68,329 | 0.766635 | [
[
[
"%matplotlib notebook\n# %matplotlib inline\n\nfrom IPython.display import display, HTML\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"## The (College Student) Diet Problem\n\nConsider the canonical college student. After a hard afternoon's work of solving way too many partial differential equations, she emerges from her room to obtain sustenance for the day.\n\nShe has a choice between getting chicken over rice (\\$5) from the halal cart on her street ($r$), or subs (\\$7) from the deli ($s$). She's a poor college student, so she will obviously want to get her money's worth. This is obviously an optimisation problem: she wants to find the amount of chicken over rice and subs she has to buy in order to minimise the total cost she spends on food.\n\n$$\n\\text{minimise} \\quad 5r + 7s\n$$\n\nIn optimisation, we like to call this expression the **objective function**.\n\nWell, it's not as simple as that. A girl's got to get her fill of daily nutrients. Fibre, protein, and carbohydrates are all important, and however far away food pyramids are from the quotidien thoughts of college students, a girl can still dream of a pseudo-healthy diet with at least 4 servings of fibre, 3 servings of protein, and 6 servings of carbohydrates.\n\nA chicken over rice has 2 servings of fibre, 3 servings of protein, and 3 servings of carbohydrates, while a sub has 1 serving of fibre, 3 servings of protein, and 4 servings of carbohydrates. To find the combination of meals that satisfies the daily nutritional requirements, we impose the following **constraints**:\n\n\\begin{align}\n\\text{Fibre: } &2r + s \\geq 4 \\\\\n\\text{Protein: } &3r + 3s \\geq 3 \\\\\n\\text{Carbohydrates: } &3r + 4s \\geq 6\n\\end{align}",
"_____no_output_____"
],
[
"### Visualising the Problem",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.add_subplot(1,1,1)\n\n# define view\nr_min = 0.0\nr_max = 3.0\ns_min = 0.0\ns_max = 5.0\nres = 50\n\nr = numpy.linspace(r_min, r_max, res)\n\n# plot axes\naxes.axhline(0, color='#B3B3B3', linewidth=5)\naxes.axvline(0, color='#B3B3B3', linewidth=5)\n\n# plot constraints\nc_1 = lambda x: 4 - 2*x\nc_2 = lambda x: 1 - x\nc_3 = lambda x: 0.25 * ( 6 - 3*x )\nc_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \\geq 4\nc_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \\geq 3\nc_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \\geq 6\n\n# plot objective\ns = numpy.linspace(s_min, s_max, res)\nc = numpy.empty([r.size, s.size])\nfor i, r_i in enumerate(r):\n c[:,i] = 5 * r_i + 12 * s\n\naxes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)\nr_cut = numpy.linspace(0.0, 2.0, 100)\naxes.fill_between(r_cut, c_1(r_cut), color='w')\n\n# plot cost minimising point\naxes.plot(2.0, 0, 'o')\n\n# label graph\naxes.set_title('Visualising the Diet Problem')\naxes.set_xlabel('Chicken Over Rice')\naxes.set_ylabel('Sub')\naxes.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can visualise our diet problem on a graph of \"Number of Subs vs. Number of Chicken or Rice\", where lines each represent a constraint, and our cost function can be represented in shades of blue: the deeper the blue, the more we will spend on meals.\n\nThe regions where we will satisfy our constraints will be the regions _above_ our constraint lines, since we want more than or equal to the number of minimum servings. Obviously, we can't buy a negative number of subs or chicken over rice, so we have the implicit constraints that $r>0$ and $s>0$.\n\nThe intersection of all the regions that satisfy each of our constraints is what we call the **feasible region**, or the **feasible set**, the region where solutions that satisfy all constraints. In the graph, this is the region with the blue gradient fill.\n\nSo our problem of deciding how much of what food to buy has been essentially reduced to finding the point in the feasible set with the minimum cost (i.e. the lightest shade of blue.) With one glance, we can tell that this point is $(0, 2)$, so we should buy 2 chicken over rice, and 0 subs. Interestingly, our feasible region is determined largely by the fibre constraint—read from this what you want.\n\nWell, you think to yourself, that was easy; I can stop reading now!\n\nThat's true, if you only have 2 foods to choose between. But in general, life isn't as simple as this; if, say, you're a functioning adult and actually cook, you'll want to choose between the 1000's of grocery items available to you at the local supermarket. In that case, you'll have to draw out one axis for each food item (how you'll do that, I don't know), and then compare the colors across this unvisualisable space. This shit gets real, and fast.",
"_____no_output_____"
],
[
"## Linear Programming\n\nWell, luckily for us, a clever guy by the name of **George Dantzig** managed to solve exactly this type of problem for us while he was working for the U.S. Air Force in WWII, when computers were just starting to come out of the realm of science fiction. They faced a similar problem then, as many do now: they only had a set amount of men and resources, and wanted to maximise the amount of work they could do in winning the war.\n\n\n\nIn other areas, you could also imagine say, a furniture manufacturer wanting to find the most efficient way of using the manpower and planks, screws, tools, and whatever they use to build furniture these days, to produce the combination of furniture that will maximise their profits. Or, on Wall Street, a trader wanting to find the best combination of differently priced assets that maximises projected profits, or minimises risk (or something along those lines; I know nuts about finance).\n\nWe call these sorts of problems, wherein we want to maximise (or minimise!) some linear objective function subject to a set of linear constraints **linear optimisation problems**, and the methods we use to solve these problems **linear programming**.",
"_____no_output_____"
],
[
"### Standard Form and Duality\nLinear optimisation problems can always be expressed as\n\n\\begin{align}\n\\text{maximise} \\quad & b_1 x_1 + b_2 x_2 + \\ldots + b_m x_m \\\\\n\\text{subject to} \\quad & a_{11} x_{1} + a_{21} x_{2} + \\ldots + a_{m1} x_{m} \\leq c_1 \\\\\n & a_{12} x_{1} + a_{22} x_{2} + \\ldots + a_{m2} x_{m} \\leq c_2 \\\\\n & \\vdots \\\\\n & a_{1n} x_{1} + a_{2n} x_{2} + \\ldots + a_{mn} x_{m} \\leq c_n \n\\end{align}",
"_____no_output_____"
],
[
"In less symbols, this is\n\n\\begin{align}\n\\text{maximise} \\quad & b^T x \\\\\n\\text{subject to} \\quad & Ax \\leq c\n\\end{align}",
"_____no_output_____"
],
[
"This is what is commonly known as the **dual form** of the problem. Well, so if there is a dual, then there must actually be 2 problems, right? So what was the first?\n\nTurns out, we call the \"first\" problem the **primal problem**, and surprisingly (or not), the solution of the primal problem will give us an upper bound on the corresponding solution of the dual problem. It looks like this:\n\n\\begin{align}\n\\text{minimise} \\quad & c_1 y_1 + c_2 y_2 + \\ldots + c_m y_ n\\\\\n\\text{subject to} \\quad & a_{11} y_{1} + a_{12} y_{2} + \\ldots + a_{1n} y_{n} = b_1 \\\\\n & a_{21} y_{1} + a_{22} y_{2} + \\ldots + a_{m2} y_{n} = b_2 \\\\\n & \\vdots \\\\\n & a_{m1} y_{1} + a_{m2} y_{2} + \\ldots + a_{nm} y_{n} = b_m \\\\\n\\text{and} \\quad & \\{ y_i \\geq 0 \\}_{i=1}^m\n\\end{align}",
"_____no_output_____"
],
[
"aka\n\n\\begin{align}\n\\text{minimise} \\quad & c^T y \\\\\n\\text{subject to} \\quad & A^T y = b \\\\\n\\text{and} \\quad & y \\geq 0\n\\end{align}",
"_____no_output_____"
],
[
"We basically interchange the constraints' constants and the coefficients in our objective function, and turn the inequalities into equalities. The nice thing about the dual problem and its primal, is that the primal problem has an optimal solution $x^*$, then the dual also has an optimal solution $y^*$ related by $b^Tx^*=c^Ty^*$, i.e. the two problems have the same optimum value!\n\nThe dual problem for linear optimisation problems was first conjectured by von Neumann, who was then working on game theory. We can think of the fact that any linear programme has a dual problem as 2 players are playing a zero-sum game; any gains on the part of one player must necessarily result in losses for the other player. When you maximise utility for one player, you are at the same time minimising utility for the other.",
"_____no_output_____"
],
[
"So what does our college student diet problem look like in the standard form (and its primal?)\n\nSince maximising a function is just minimising the negative of the function, the problem becomes\n\n\\begin{align}\n\\text{maximise} \\quad & - 5r - 7s \\\\\n\\text{subject to} \\quad & - 2r - s \\leq - 4 \\\\\n & - 3r - 3s \\leq - 3 \\\\\n & - 3r - 4s \\leq - 6\n\\end{align}",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.add_subplot(1,1,1)\n\n# plot axes\naxes.axhline(0, color='k')\naxes.axvline(0, color='k')\n\n# plot constraints\nc_1 = lambda x: 4 - 2*x\nc_2 = lambda x: 1 - x\nc_3 = lambda x: - 0.25 * ( - 6 + 3*x )\nc_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \\geq 4\nc_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \\geq 3\nc_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \\geq 6\n\n# plot objective\ns = numpy.linspace(s_min, s_max, res)\nc = numpy.empty([r.size, s.size])\nfor i, r_i in enumerate(r):\n c[:,i] = - 5 * r_i - 12 * s\n\naxes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)\nr_cut = numpy.linspace(0.0, 2.0, 100)\naxes.fill_between(r_cut, c_1(r_cut), color='w')\n\n# plot cost minimising point\naxes.plot(2.0, 0, 'o')\n\n# label graph\naxes.set_title('Visualising the Diet Problem, Standard Form')\naxes.set_xlabel('Chicken Over Rice')\naxes.set_ylabel('Sub')\naxes.legend(loc=1)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"In dual form, this is\n\n\\begin{align}\n\\text{minimise} \\quad & - 4y_1 - 3y_2 - 6y_3 \\\\\n\\text{subject to} \\quad & 2y_1 + 3y_2 + 3y_3 = 5 \\\\\n & y_1 + 3y_2 + 4y_3 = 7 \\\\\n\\text{and} \\quad & \\{ y_i \\geq 0 \\}_{i=1}^3 \\\\\n\\end{align}\n\nWhich can be seen as minimising the objective function on the line segment formed by intersecting the 2 constraint planes. We can also interpret this as wanting to **maximise** the nutritional value of our meals, given that trying to increase the quantity of one nutrient will necessarily mean that we have to give up some amount of another nutrient.",
"_____no_output_____"
],
[
"## The Simplex Method\n\n### Standard Form (for the Simplex Method)\n",
"_____no_output_____"
],
[
"\\begin{align}\n\\text{maximise} \\quad & c_1 x_1 + c_2 x_2 + \\ldots + c_m x_m \\\\\n\\text{subject to} \\quad & a_{11} x_1 + a_{12} x_2 + \\ldots + a_{1m} x_m \\leq b_1 \\\\\n & a_{21} x_2 + a_{22} x_2 + \\ldots + a_{2m} x_m \\leq b_2 \\\\\n & \\vdots \\\\\n & a_{n1} x_n + a_{n2} x_n + \\ldots + a_{nm} x_m \\leq b_n \\\\\n\\text{and} \\quad & \\{ x_i \\geq 0 \\}_{i=1}^m \\text{ and } \\{ b_j \\geq 0 \\}_{j=1}^n\n\\end{align}",
"_____no_output_____"
],
[
"1. If you are currently trying to minimise the objective function, turn it into a maximisation problem by taking the negative of the expression\n2. Turn all the inequality constraints into equality constraints by adding **slack variables**\n3. If these transformation still don't allow your system of equations to fit the form, solve the **dual form** of the problem!",
"_____no_output_____"
],
[
"#### System of Constraint Equations\n\n\\begin{align}\n\\text{maximise} \\quad & c_1 x_1 + c_2 x_2 + \\ldots + c_m x_m = z \\\\\n\\text{subject to} \\quad & a_11 x_1 + a_12 x_2 + \\ldots + a_1m x_m + s_1 = b_1 \\\\\n & a_21 x_2 + a_22 x_2 + \\ldots + a_2m x_m + s_2 = b_2 \\\\\n & \\vdots \\\\\n & a_n1 x_n + a_n2 x_n + \\ldots + a_nm x_m + s_n = b_n \\\\\n\\text{and} \\quad & \\{ x_i \\geq 0 \\}_{i=1}^{m}, ~ \\{ s_i \\geq 0 \\}_{j=1}^{n}, ~ \\text{ and } \\{ b_j \\geq 0 \\}_{j=1}^n\n\\end{align}",
"_____no_output_____"
],
[
"Taking another look at our diet problem, we can put this problem\n\n\\begin{align}\n\\text{maximise} \\quad & - 5r - 7s \\\\\n\\text{subject to} \\quad & - 2r - s \\leq - 4 \\\\\n & - 3r - 3s \\leq - 3 \\\\\n & - 3r - 4s \\leq - 6 \\\\\n\\text{and} \\quad & r, s \\geq 0\n\\end{align}\n\ninto standard form for the simplex method by putting it into its dual form:\n\n\\begin{align}\n\\text{maximise} \\quad & 6y_1 + 3y_2 + 4y_3 \\\\\n\\text{subject to} \\quad & 3y_1 + 3y_2 + 2y_3 \\leq 5 \\\\\n & 4y_1 + 3y_2 + y_3 \\leq 7 \\\\\n\\text{and} \\quad & \\{ y_i \\geq 0 \\}_{i=1}^3 \\\\\n\\end{align}\n\nHence, the constraint equations are\n\n\\begin{align}\n\\text{maximise} \\quad & 6y_1 + 3y_2 + 4y_3 = z \\\\\n\\text{subject to} \\quad & 3y_1 + 3y_2 + 2y_3 + s_1 = 5 \\\\\n & 4y_1 + 3y_2 + y_3 + s_2 = 7 \\\\\n\\text{and} \\quad & \\{ y_i \\geq 0 \\}_{i=1}^3 \\text{ and } \\{ s_i \\geq 0 \\}_{i=1}^2 \\\\\n\\end{align}",
"_____no_output_____"
],
[
"### The Algorithm",
"_____no_output_____"
]
],
[
[
"import pandas as pd\npd.set_option('display.notebook_repr_html', True)",
"_____no_output_____"
],
[
"def pivot(departing, entering, tab):\n dpi = tab[tab['basic_variable']==departing].index[0] # index of the departing row\n \n # update basic variable\n tab['basic_variable'][dpi] = entering\n\n # normalise departing_row\n tab.ix[dpi,0:-1] = tab.ix[dpi,0:-1] / tab[entering][dpi]\n\n departing_row = tab.ix[dpi,0:-1]\n\n # do gauss-jordan on entering variable column\n for row in tab.index[tab.index!=dpi]:\n tab.ix[row, 0:-1] = tab.ix[row, 0:-1] - tab[entering][row] * departing_row",
"_____no_output_____"
],
[
"# Bland's rule\ndef calculate_ratio(entering, tab):\n ratios = tab.ix[0:-1, 'value'] * 0 - 1\n \n for index, is_valid in enumerate(tab.ix[0:-1, entering] > 0):\n if is_valid==True:\n ratios[index] = tab.ix[index, 'value']/tab.ix[index, entering]\n return ratios",
"_____no_output_____"
],
[
"def find_entering(tab):\n return tab.ix['z',0:-2].idxmin()",
"_____no_output_____"
],
[
"def find_departing(ratios, tab):\n return tab.ix[ratios[ratios>=0].idxmin(),'basic_variable']",
"_____no_output_____"
],
[
"def update_stats(tab):\n \n print \"Basic variables: \"\n basic_variables = tab.ix[0:-1, 'basic_variable'].values\n print basic_variables\n \n print \"Non-basic variables: \"\n non_basic_variables = numpy.setdiff1d(tab.columns[0:-2], basic_variables)\n print non_basic_variables\n \n print \"Entering variable: \"\n entering_variable = find_entering(tab)\n print entering_variable\n \n print \"Ratios: \"\n ratios = calculate_ratio(entering_variable, tab)\n print ratios\n \n print \"Departing variable: \"\n departing_variable = find_departing(ratios, tab)\n print departing_variable\n \n return departing_variable, entering_variable",
"_____no_output_____"
],
[
"def is_optimum(tab):\n return (tab.ix['z',0:-2] >= 0).all()",
"_____no_output_____"
],
[
"def run_simplex(tableau_dict, tableau_orig, max_iterations=10, force_iterations=0):\n if force_iterations == 0:\n for i in xrange(max_iterations):\n tableau_dict[i] = tableau_orig.copy()\n display(tableau_orig)\n if is_optimum(tableau_orig):\n break\n departing_variable, entering_variable = update_stats(tableau_orig)\n pivot(departing_variable, entering_variable, tableau_orig)\n else:\n for i in xrange(force_iterations):\n tableau_dict[i] = tableau_orig.copy()\n display(tableau_orig)\n departing_variable, entering_variable = update_stats(tableau_orig)\n pivot(departing_variable, entering_variable, tableau_orig)",
"_____no_output_____"
],
[
"c_1 = numpy.array([[ 3, 3, 2, 1, 0, 5, 's_1']])\nc_2 = numpy.array([[ 4, 3, 1, 0, 1, 7, 's_2']])\nz = numpy.array([[-6, -3, -4, 0, 0, 0, '']])\nrows= numpy.concatenate((c_1, c_2, z), axis=0)\n\ntableau = pd.DataFrame(rows, columns=['y_1','y_2','y_3','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z']) \ntableau.ix[:,0:-1] = tableau.ix[:,0:-1].astype('float')",
"_____no_output_____"
],
[
"tableaux = dict()\nrun_simplex(tableaux, tableau)",
"_____no_output_____"
],
[
"from ipywidgets import interact\n\ndef diet_problem(step):\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n\n # plot axes\n axes.axhline(0, color='k')\n axes.axvline(0, color='k')\n\n # plot constraints\n c_1 = lambda x: 4 - 2*x\n c_2 = lambda x: 1 - x\n c_3 = lambda x: - 0.25 * ( - 6 + 3*x )\n c_1_line = axes.plot( r, c_1(r), label='Fibre' ) # 2r + s \\geq 4\n c_2_line = axes.plot( r, c_2(r), label='Protein' ) # 3r + 3s \\geq 3\n c_3_line = axes.plot( r, c_3(r), label='Carbohydrate' ) # 3r + 4s \\geq 6\n\n # plot objective\n for i, r_i in enumerate(r):\n c[:,i] = - 5 * r_i - 12 * s\n\n axes.contourf(r, s, c, res, cmap='Oranges', alpha=0.5)\n axes.fill_between(r_cut, c_1(r_cut), color='w')\n\n step_coords = numpy.array([[0.0, 0.0], [2.0, 0.0]])\n \n # plot point\n axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)\n\n # label graph\n axes.set_title('Simplex Method on the College Diet Problem, Iteration ' + str(step))\n axes.set_xlabel('Chicken Over Rice')\n axes.set_ylabel('Sub')\n axes.legend(loc=1)\n\n plt.show()\n display(tableaux[step])",
"_____no_output_____"
],
[
"interact(diet_problem, step=(0,1));",
"_____no_output_____"
]
],
[
[
"### Bland's Rule\n\nThis seemingly arbitrary rule will seem less arbitrary in just a while.",
"_____no_output_____"
],
[
"### Multiple Optimal Solutions\n\nSo, given the graphical intuition we now have for how the simplex method works, do we know if there ever a time when we would encounter more than 1 optimal solution for a given problem?",
"_____no_output_____"
],
[
"\\begin{align}\n\\text{maximise} \\quad & 5x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & 2x_1 + x_2 \\leq 4 \\\\\n & 10x_1 + 14x_2 \\leq 30 \\\\\n\\text{and} \\quad & x_1, x_2 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.add_subplot(1,1,1)\n\n# define view\nx_1_min = 0.0\nx_1_max = 3.0\nx_2_min = 0.0\nx_2_max = 5.0\nres = 50\n\n# plot axes\naxes.axhline(0, color='k')\naxes.axvline(0, color='k')\n\n# plot constraints\nx_1 = numpy.linspace(x_1_min, x_1_max, res)\nc_1 = lambda x: 4.0 - 2.0*x\nc_2 = lambda x: (30.0 - 10.0*x)/14.0\nc_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \\leq 4\nc_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \\leq 30\n\n# plot objective\nx_2 = numpy.linspace(x_2_min, x_2_max, res)\nc = numpy.empty([x_1.size, x_2.size])\nfor i, x_1_i in enumerate(x_1):\n c[:,i] = 5 * x_1_i + 7 * x_2\n\naxes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)\n# shade feasible region\nc_1_bottom = numpy.linspace(0.0, 2.0, res)\nc_2_bottom = numpy.linspace(0.0, 3.0, res)\naxes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)\naxes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)\n\n# label graph\naxes.set_title('How many solutions?')\naxes.set_xlabel(r'x_1')\naxes.set_ylabel(r'x_2')\naxes.legend(loc=1)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"\\begin{align}\n\\text{maximise} \\quad & 5x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & 2x_1 + x_2 + s_1 = 4 \\\\\n & 10x_1 + 14x_2 + s_2 = 30 \\\\\n\\text{and} \\quad & x_1, x_2, s_1, s_2 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"c_1 = numpy.array([[ 2, 1, 1, 0, 4, 's_1']])\nc_2 = numpy.array([[10, 14, 0, 1, 30, 's_2']])\nz = numpy.array([[-5, -7, 0, 0, 0, '']])\nrows= numpy.concatenate((c_1, c_2, z), axis=0)\n\ntableau_multiple = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z']) \ntableau_multiple.ix[:,0:-1] = tableau_multiple.ix[:,0:-1].astype('float')",
"_____no_output_____"
],
[
"tableaux_multiple = dict()\nrun_simplex(tableaux_multiple, tableau_multiple, force_iterations=3)",
"_____no_output_____"
],
[
"step_coords = numpy.array([[0.0, 0.0], [0.0, 2.14286], [tableaux_multiple[2].ix['c_1','value'], tableaux_multiple[2].ix['c_2','value']]])\nstep_value = numpy.array([tableaux_multiple[0].ix['z','value'], tableaux_multiple[1].ix['z','value'], tableaux_multiple[2].ix['z','value']])\n\ndef multiple_problem(step):\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n\n # define view\n x_1_min = 0.0\n x_1_max = 3.0\n x_2_min = 0.0\n x_2_max = 5.0\n\n # plot axes\n axes.axhline(0, color='k')\n axes.axvline(0, color='k')\n\n # plot constraints\n x_1 = numpy.linspace(x_1_min, x_1_max, res)\n c_1 = lambda x: 4.0 - 2.0*x\n c_2 = lambda x: (30.0 - 10.0*x)/14.0\n c_1_line = axes.plot( r, c_1(r), label='Constraint 1' ) # 2x_1 + x_2 \\leq 4\n c_2_line = axes.plot( r, c_2(r), label='Constraint 2' ) # 10x_1 + 14x_2 \\leq 30\n\n # plot objective\n x_2 = numpy.linspace(x_2_min, x_2_max, res)\n c = numpy.empty([x_1.size, x_2.size])\n for i, x_1_i in enumerate(x_1):\n c[:,i] = 5 * x_1_i + 7 * x_2\n\n # color map of objective function values\n axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)\n \n # shade feasible region\n c_1_bottom = numpy.linspace(0.0, 2.0, res)\n c_2_bottom = numpy.linspace(0.0, 3.0, res)\n axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)\n axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)\n \n # plot point\n axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)\n axes.text(step_coords[step][0]+0.1, step_coords[step][1], step_value[step])\n \n # label graph\n axes.set_title('How many solutions?')\n axes.set_xlabel('x_1')\n axes.set_ylabel('x_2')\n axes.legend(loc=1)\n\n plt.show()\n display(tableaux_multiple[step])",
"_____no_output_____"
],
[
"interact(multiple_problem, step=(0,2));",
"_____no_output_____"
]
],
[
[
"### Unbounded Optima",
"_____no_output_____"
],
[
"\\begin{align}\n\\text{maximise} \\quad & 5x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & -x_1 + x_2 \\leq 5 \\\\\n & -\\frac{1}{2}x_1 + x_2 \\leq 7 \\\\\n\\text{and} \\quad & x_1, x_2 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.add_subplot(1,1,1)\n\n# define view\nx_1_min = 0.0\nx_1_max = 10.0\nx_2_min = 0.0\nx_2_max = 15.0\n# res = 100\n\n# plot axes\naxes.axhline(0, color='k')\naxes.axvline(0, color='k')\n\n# plot constraints\nx_1 = numpy.linspace(x_1_min, x_1_max, res)\nc_1 = lambda x: 5.0 + x\nc_2 = lambda x: 7 + 0.5*x\nc_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # -x_1 + x_2 \\leq 5\nc_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # -\\frac{1}{2}x_1 + x_2 \\leq 7\n\n# plot objective\nx_2 = numpy.linspace(x_2_min, x_2_max, res)\nc = numpy.empty([x_1.size, x_2.size])\nfor i, x_1_i in enumerate(x_1):\n c[:,i] = 5 * x_1_i + 7 * x_2\n\naxes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)\n# shade feasible region\n# c_1_bottom = numpy.linspace(0.0, 2.0, res)\n# c_2_bottom = numpy.linspace(0.0, 3.0, res)\naxes.fill_between(x_1, c_1(x_1), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)\naxes.fill_between(x_1, c_2(x_1), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)\n\n# label graph\naxes.set_title('Unbounded Optima')\naxes.set_xlabel(r'$x_1$')\naxes.set_ylabel(r'$x_2$')\naxes.legend(loc=2)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"\\begin{align}\n\\text{maximise} \\quad & 5x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & -x_1 + x_2 + s_1 = 5 \\\\\n & -\\frac{1}{2}x_1 + x_2 + s_2 = 7 \\\\\n\\text{and} \\quad & x_1, x_2, s_1, s_2 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"c_1 = numpy.array([[ -1, 1, 1, 0, 5, 's_1']])\nc_2 = numpy.array([[-0.5, 1, 0, 1, 7, 's_2']])\nz = numpy.array([[ -5, -7, 0, 0, 0, '']])\nrows= numpy.concatenate((c_1, c_2, z), axis=0)\n\ntableau_unbounded = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','value', 'basic_variable'], index=['c_1','c_2','z']) \ntableau_unbounded.ix[:,0:-1] = tableau_unbounded.ix[:,0:-1].astype('float')",
"_____no_output_____"
],
[
"tableaux_unbounded = dict()\nrun_simplex(tableaux_unbounded, tableau_unbounded)",
"_____no_output_____"
]
],
[
[
"We got an error!\n\n`ValueError: attempt to get argmin of an empty sequence`\n\nUsually, errors are bad things, but in this case, the error is trying to tell us something\n\nIn the code:\n\n`return tab.ix[ratios[ratios>=0].idxmin(),'basic_variable']`\n\nWhich is telling us that no non-negative ratio was found! Why is this a problem for us? Let's take a look at the equations our tableau, and our equations at this point in time:",
"_____no_output_____"
]
],
[
[
"display(tableau_unbounded)",
"_____no_output_____"
]
],
[
[
"\\begin{gather}\n z = 83 + 17 s_1 - 24 s_2 \\\\\nx_1 = 4 + 2 s_1 - 2 s_2 \\\\\nx_2 = 9 + s_1 - 2 s_2\n\\end{gather}",
"_____no_output_____"
],
[
"At this point, we want to pick $s_1$ as our entering variable because it is has most negative coefficient, and increasing the value of $s_1$ would most increase the value of $z$.\n\nUsually, increasing the value of $s_1$ would also mean that we have to decrease the value of one of the basic variables to 0 (so that we stay within our feasible region).\n\nHere, what we have is that **increasing the value of $s_1$ would also increase the value of both our basic variables**, which means that our objective function will be able to increase without bound.\n\nSo the simplex method is able to tell us when our problem is unbounded, by virtue of the fact that the negative coefficient in the tableau indicates that we have not attained the optimum, but we are also unable to find a positive ratio to choose our departing variable.",
"_____no_output_____"
],
[
"### Degeneracy and Cycling\n\nDisclaimer: this example was stolen from [here](http://mat.gsia.cmu.edu/classes/QUANT/NOTES/chap7.pdf).\n\n\\begin{align}\n\\text{maximise} \\quad & 2x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & -x_1 + x_2 \\leq 3 \\\\\n & x_1 - x_2 \\leq 3 \\\\\n & x_2 \\leq 2 \\\\\n\\text{and} \\quad & x_1, x_2 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"fig = plt.figure()\naxes = fig.add_subplot(1,1,1)\n\n# define view\nx_1_min = 0.0\nx_1_max = 3.0\nx_2_min = 0.0\nx_2_max = 5.0\n# res = 100\n\n# plot axes\naxes.axhline(0, color='k')\naxes.axvline(0, color='k')\n\n# plot constraints\nx_1 = numpy.linspace(x_1_min, x_1_max, res)\nc_1 = lambda x: 3.0 - x\nc_2 = lambda x: -3.0 + x\nc_3 = lambda x: 2.0 * numpy.ones(x.size)\nc_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \\leq 4\nc_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \\leq 30\nc_3_line = axes.plot( x_1, c_3(x_1), label='Constraint 3' ) # -2x_1 + x_2 \\leq 0\n\n# plot objective\nx_2 = numpy.linspace(x_2_min, x_2_max, res)\nc = numpy.empty([x_1.size, x_2.size])\nfor i, x_1_i in enumerate(x_1):\n c[:,i] = 2.0 * x_1_i + x_2\n\naxes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)\n# shade feasible region\nc_1_bottom = numpy.linspace(0.0, 3.0, res)\nc_2_bottom = numpy.linspace(0.0, 3.0, res)\nc_3_bottom = numpy.linspace(0.0, 3.0, res)\naxes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)\naxes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)\naxes.fill_between(c_3_bottom, c_3(c_3_bottom), color=plt.rcParams['axes.color_cycle'][2], alpha=0.5)\n\n# label graph\naxes.set_title('Degeneracy and Cycling')\naxes.set_xlabel(r'x_1')\naxes.set_ylabel(r'x_2')\naxes.legend(loc=1)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"\\begin{align}\n\\text{maximise} \\quad & 2x_1 + 7x_2 \\\\\n\\text{subject to} \\quad & -x_1 + x_2 + s_1 = 3 \\\\\n & x_1 - x_2 + s_2 = 3 \\\\\n & x_2 + s_3 = 2 \\\\\n\\text{and} \\quad & \\{x_i\\}_{i=1}^2, \\{s_j\\}_{j=1}^3 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"c_1 = numpy.array([[ 3, 1, 1, 0, 0, 6, 's_1']])\nc_2 = numpy.array([[ 1, -1, 0, 1, 0, 2, 's_2']])\nc_3 = numpy.array([[ 0, 1, 0, 0, 1, 3, 's_3']])\nz = numpy.array([[-2, -1, 0, 0, 0, 0, '']])\nrows= numpy.concatenate((c_1, c_2, c_3, z), axis=0)\n\ntableau_degenerate = pd.DataFrame(rows, columns=['x_1','x_2','s_1','s_2','s_3','value', 'basic_variable'], index=['c_1','c_2','c_3','z']) \ntableau_degenerate.ix[:,0:-1] = tableau_degenerate.ix[:,0:-1].astype('float')",
"_____no_output_____"
],
[
"tableaux_degenerate = dict()\nrun_simplex(tableaux_degenerate, tableau_degenerate)",
"_____no_output_____"
],
[
"step_coords = numpy.transpose([numpy.zeros(len(tableaux_degenerate)), 2.0*numpy.ones(len(tableaux_degenerate))])\nstep_coords[0][1] = 0.0\n\ndef degeneracy_plot(step):\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n\n # define view\n x_1_min = 0.0\n x_1_max = 3.0\n x_2_min = 0.0\n x_2_max = 5.0\n# res = 100\n\n # plot axes\n axes.axhline(0, color='k')\n axes.axvline(0, color='k')\n\n # plot constraints\n x_1 = numpy.linspace(x_1_min, x_1_max, res)\n c_1 = lambda x: 3.0 - x\n c_2 = lambda x: -3.0 + x\n c_3 = lambda x: 2.0 * numpy.ones(x.size)\n c_1_line = axes.plot( x_1, c_1(x_1), label='Constraint 1' ) # 2x_1 + x_2 \\leq 4\n c_2_line = axes.plot( x_1, c_2(x_1), label='Constraint 2' ) # 10x_1 + 14x_2 \\leq 30\n c_3_line = axes.plot( x_1, c_3(x_1), label='Constraint 3' ) # -2x_1 + x_2 \\leq 0\n\n # plot objective\n x_2 = numpy.linspace(x_2_min, x_2_max, res)\n c = numpy.empty([x_1.size, x_2.size])\n for i, x_1_i in enumerate(x_1):\n c[:,i] = 2.0 * x_1_i + x_2\n\n axes.contourf(x_1, x_2, c, res, cmap='Oranges', alpha=0.5)\n # shade feasible region\n c_1_bottom = numpy.linspace(0.0, 3.0, res)\n c_2_bottom = numpy.linspace(0.0, 3.0, res)\n c_3_bottom = numpy.linspace(0.0, 3.0, res)\n axes.fill_between(c_1_bottom, c_1(c_1_bottom), color=plt.rcParams['axes.color_cycle'][0], alpha=0.5)\n axes.fill_between(c_2_bottom, c_2(c_2_bottom), color=plt.rcParams['axes.color_cycle'][1], alpha=0.5)\n axes.fill_between(c_3_bottom, c_3(c_3_bottom), color=plt.rcParams['axes.color_cycle'][2], alpha=0.5)\n\n # plot point\n axes.plot(step_coords[step][0], step_coords[step][1], 'ro', markersize=10)\n \n # label graph\n axes.set_title('Degeneracy and Cycling, Iteration ' + str(step))\n axes.set_xlabel(r'x_1')\n axes.set_ylabel(r'x_2')\n axes.legend(loc=1)\n\n plt.show()\n display(tableaux_degenerate[step])",
"_____no_output_____"
],
[
"interact(degeneracy_plot, step=(0,len(tableaux_degenerate)-1))",
"_____no_output_____"
]
],
[
[
"> You think you're moving, but you get nowhere. — _Stop and Stare_, OneRepublic\n\nAs its name suggests, degeneracy is when you get a basic variable (that's supposed to have a non-zero value) with a value of 0, and you are able to modify the value of the objective function without moving on the simplex.\n\nIn general, predicting when degeneracy will occur is non-trivial; one [source](http://theory.stanford.edu/~megiddo/pdf/degen.pdf) claims that it is NP-complete. You can read more about it [here](http://www.amazon.com/Degeneracy-Simplex-Cycling-Economics-Mathematical/dp/354054593X).\n\n#### Bland's Rule\n\n* Choose the non-basic variable with the most negative coefficient as the entering variable\n* Choose the basic variable producing the smallest value/pivot ratio as the departing variable\n\nUsing Bland's Rule, the Simplex Method will never cycle if it encounters degeneracy (i.e. it halts on all inputs)\n\n#### So what is cycling?",
"_____no_output_____"
]
],
[
[
"tableaux_degenerate[1]",
"_____no_output_____"
]
],
[
[
"Without Bland's Rule, one could potentially choose to pivot on $s_2$, which will give us",
"_____no_output_____"
]
],
[
[
"pivot('s_2', 'x_2', tableaux_degenerate[1])\ntableaux_degenerate[1]",
"_____no_output_____"
]
],
[
[
"Choosing $x_2$ to pivot back to seems like a good idea, right? Nope.",
"_____no_output_____"
]
],
[
[
"pivot('x_2', 's_2', tableaux_degenerate[1])\ntableaux_degenerate[1]",
"_____no_output_____"
]
],
[
[
"Cycling, ladies and gentlemen, aka a slow spiral into insanity.",
"_____no_output_____"
],
[
"##### $\\epsilon-$perturbations\n\nAnother earlier (and nowadays less popular) method for avoiding degeneracy is by introducing $\\epsilon$-perturbations into the problem. Recall that the standard system goes like\n\n\\begin{align}\n\\text{maximise} \\quad & c^T x \\\\\n\\text{subject to} \\quad & Ax = b \\\\\n\\text{and} \\quad & x \\geq 0\n\\end{align}\n\nWith $\\epsilon$-perturbations, we will instead solve\n\n\\begin{align}\n\\text{maximise} \\quad & c^T x \\\\\n\\text{subject to} \\quad & Ax = b + \\epsilon \\\\\n\\text{and} \\quad & x \\geq 0\n\\end{align}\n\nwhich will give us a close enough answer to the original problem, and help us avoid the problem with the 0's. This kind of happens automatically as a bonus if you're running the simplex algorithm on a computer; as the program runs, errors from truncation, etc. build up, and you eventually get out of the cycle because your computer is doing floating point arithmetic.\n\nWhich is just about the one good thing about floating point arithmetic, I guess.",
"_____no_output_____"
],
[
"### Time Complexity of the Simplex Method\n#### The Klee-Minty Cube",
"_____no_output_____"
],
[
"\\begin{align}\n\\text{maximise} \\quad & 100x_1 + 10x_2 + x_3 \\\\\n\\text{subject to} \\quad & x_1 \\leq 1 \\\\\n & 20x_1 + x_2 \\leq 100 \\\\\n & 200x_1 + 20x_2 + x_3 \\leq 10000\\\\\n\\text{and} \\quad & x_1, x_2, x_3 \\geq 0\n\\end{align}",
"_____no_output_____"
]
],
[
[
"c_1 = numpy.array([[ 1, 0, 0, 1, 0, 0, 1, 's_1']])\nc_2 = numpy.array([[ 20, 1, 0, 0, 1, 0, 100, 's_2']])\nc_3 = numpy.array([[ 200, 20, 1, 0, 0, 1, 10000, 's_3']])\nz = numpy.array([[-100, -10, -1, 0, 0, 0, 0, '']])\nrows= numpy.concatenate((c_1, c_2, c_3, z), axis=0)\n\ntableau_klee_minty = pd.DataFrame(rows, columns=['x_1','x_2', 'x_3','s_1','s_2','s_3','value', 'basic_variable'], index=['c_1','c_2','c_3','z']) \ntableau_klee_minty.ix[:,0:-1] = tableau_klee_minty.ix[:,0:-1].astype('float')",
"_____no_output_____"
],
[
"tableaux_klee_minty = dict()\nrun_simplex(tableaux_klee_minty, tableau_klee_minty)",
"_____no_output_____"
]
],
[
[
"Notice that our ending basic variables are $s_1$, $s_2$, and $x_3$, which means that if we had chosen $x_3$ to enter instead of $x_1$, we could have been done in one step! The greedy algoritm of the simplex method backfires on us here, since choosing the non-basic variable with the **least negative** coefficient would have gotten us to our solution much faster!\n\nInstead, using the simplex method, we are forced to visit all 8 corners of the constraint cube in our quest to find the optimum.",
"_____no_output_____"
],
[
"#### Defeating the Klee-Minty Cube\n\nThe Klee-Minty Cube is the de-facto standard nowadays for determining how well a linear programming algorithm would perform. Algorithms like the Simplex Method and the Criss-Cross algorithm have exponential worst-case time complexities.\n\nOther methods that choose pivots randomly have been shown to have polynomial worst-case time bounds, but perform worst than the Simplex method on average.",
"_____no_output_____"
],
[
"## Open Problems in Linear Programming\n\n1. Integer solutions?\n2. \"Strongly polynomial\" solutions",
"_____no_output_____"
],
[
"## References\n\nGeorge B. Dantzig, Mukund N. Thapa _Linear Programming 1- Introduction_ (Springer Series in Operations Research and Financial Engineering)\nhttp://mat.gsia.cmu.edu/classes/QUANT/NOTES/chap7.pdf\nhttp://college.cengage.com/mathematics/larson/elementary_linear/4e/shared/downloads/c09s3.pdf\nhttp://college.cengage.com/mathematics/larson/elementary_linear/4e/shared/downloads/c09s4.pdf\nhttp://www.iip.ist.i.kyoto-u.ac.jp/member/cuturi/Teaching/ORF522/lec8v2.pdf\nhttp://math.stackexchange.com/questions/82006/in-simplex-method-if-the-leaving-variable-fails-for-all-candidates-of-mrt-what",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7130377b362af6977554f7fdace89b8cfe041fc | 313,329 | ipynb | Jupyter Notebook | Final Project/CM_Decision Tree.ipynb | tmarissa/DATA-601 | 9c2b72c5dc2cf7e0e620a254d1c8a894a3706062 | [
"MIT"
] | 1 | 2022-02-03T18:33:24.000Z | 2022-02-03T18:33:24.000Z | Final Project/CM_Decision Tree.ipynb | tmarissa/DATA-601 | 9c2b72c5dc2cf7e0e620a254d1c8a894a3706062 | [
"MIT"
] | null | null | null | Final Project/CM_Decision Tree.ipynb | tmarissa/DATA-601 | 9c2b72c5dc2cf7e0e620a254d1c8a894a3706062 | [
"MIT"
] | null | null | null | 638.144603 | 170,620 | 0.947311 | [
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n",
"_____no_output_____"
]
],
[
[
"# DATASET\nThe dataset below has been explained in earlier notebooks and is being used here to carry on the data. ",
"_____no_output_____"
]
],
[
[
"contraceptive_data = pd.read_csv(\"contraceptive_method_dataset.csv\", \n encoding = \"ISO-8859-1\", engine='python')\nX = contraceptive_data.drop('children', axis=1).copy()\ncontraceptive_data['predictor_population']= pd.cut(contraceptive_data['children'],\n [-1,2,16], labels=[0,1])\ncontraceptive_data['predictor_population_i']= contraceptive_data['predictor_population'].astype(int)\ny= contraceptive_data['predictor_population_i']",
"_____no_output_____"
]
],
[
[
"# Splitting of the Training and Testing Data\nTraining mean and std calculated when the X train was fitted into the std scaler. \nFor the X train scaled, the training mean and std was used only. \nWhile for the X test scaled, the test data was transformed.\n",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, \n random_state = 24, stratify = y )\nstd_scaler = StandardScaler()\nstd_scaler.fit(X_train)\nX_train_scaled = std_scaler.transform(X_train)\nX_test_scaled = std_scaler.transform(X_test)",
"_____no_output_____"
]
],
[
[
"# Decision Tree\n",
"_____no_output_____"
]
],
[
[
"#Import Decision Tree and instantiate\nfrom sklearn.tree import DecisionTreeClassifier\n\ndt_clf = DecisionTreeClassifier()\ndt_clf = dt_clf.fit(X_train, y_train)\ndt_clf_score = dt_clf.score(X_train, y_train)\nprint('Decision Tree Score with X_train and y_train: ', dt_clf_score)",
"Decision Tree Score with X_train and y_train: 0.9815712900096993\n"
],
[
"dt_clf.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"_Note_\n- The decision tree train score (98.15%) is higher than the test score (65.61%).",
"_____no_output_____"
],
[
"## Preliminary Classification of a Decision Tree",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import plot_tree\nimport matplotlib.pyplot as plt\n\n#Plot the tree\nplt.figure(figsize = (8, 14))\nplot_tree(dt_clf, filled = True, rounded=True,\n class_names = [\"No Population Increase\", \"Population Increase\"], feature_names= X.columns);",
"_____no_output_____"
]
],
[
[
"## Plotting of the Confusion Matrix",
"_____no_output_____"
]
],
[
[
"#Import confusion matric and plot confusion matrix\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\n\nplot_confusion_matrix(dt_clf, X_test, y_test, \n display_labels=[\"No Population Increase\", \"Population Increase\"])",
"_____no_output_____"
]
],
[
[
"_Notes_\n- Of the No Population Increase 195 which is 128 + 67, 128 (65.65%) is correctly classified. While for the Population Increase 247 (74 + 173), 173 (70.04%) is correctly classified.",
"_____no_output_____"
],
[
"## Cross Validation Score Plot",
"_____no_output_____"
]
],
[
[
"#Import Cross Validation Score\nfrom sklearn.model_selection import cross_val_score\n\ndt_clf = DecisionTreeClassifier(random_state= 42, ccp_alpha=0.002)\nscores = cross_val_score(dt_clf, X_train, y_train, cv=5)\ndf = pd.DataFrame(data={'tree':range(5), 'accuracy':scores})\ndf.plot(x='tree', y='accuracy', marker='o', linestyle='--')",
"_____no_output_____"
]
],
[
[
"## Cross Validation in 5 Split\n",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_validate\ncv_fivefold = cross_validate(estimator= dt_clf, \n X = X_train,\n y = y_train,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n"
],
[
"cv_fivefold['test_score']",
"_____no_output_____"
],
[
"cv_fivefold['train_score']",
"_____no_output_____"
],
[
"## fit best regularization logreg and plot the confusion matrix\nvalidation = cv_fivefold['test_score']\nvalidation_mean = cv_fivefold['test_score'].mean()\n\nvalidation_std = cv_fivefold['test_score'].std()\n\nprint('Decision Tree Log Regression 5-fold cv results (Accuracy) %.3f +/- %.3f'%(validation_mean, validation_std))",
"Decision Tree Log Regression 5-fold cv results (Accuracy) 0.734 +/- 0.015\n"
]
],
[
[
"_Note_\n- Decision Tree Log Regression 5-fold cv results (Accuracy) 0.734 +/- 0.015",
"_____no_output_____"
]
],
[
[
"clf_dt_pruned = DecisionTreeClassifier(random_state = 42, ccp_alpha= cv_fivefold)\nclf_dt_pruned = dt_clf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"plot_confusion_matrix(clf_dt_pruned, X_test, y_test, \n display_labels=[\"No Increase Pop.\", \"Increase Pop.\"])",
"_____no_output_____"
]
],
[
[
"_Notes_\n- Of the No Population Increase 195 which is 114 + 81, 114 (58.46%) is correctly classified. While for the Population Increase 247 (38 + 209), 209 (84.61%) is correctly classified.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (10, 15))\nplot_tree(clf_dt_pruned, filled = True, rounded=True,\n class_names = [\"No Increase Pop.\", \"Increase Pop.\"], feature_names= X.columns);",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7130fe5c52b47e106cc5ba56cc553a170663a10 | 1,524 | ipynb | Jupyter Notebook | _downloads/plot_subplot-vertical.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 48 | 2015-01-13T22:15:34.000Z | 2022-01-04T20:17:41.000Z | _downloads/plot_subplot-vertical.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 1 | 2017-04-25T09:01:00.000Z | 2017-04-25T13:48:56.000Z | _downloads/plot_subplot-vertical.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 21 | 2015-03-16T17:52:23.000Z | 2021-02-19T00:02:13.000Z | 28.222222 | 401 | 0.498031 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\nSubplot plot arrangement vertical\n==================================\n\nAn example showing vertical arrangement of subplots with matplotlib.\n\n",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\n\nplt.figure(figsize=(6, 4))\nplt.subplot(1, 2, 1)\nplt.xticks([])\nplt.yticks([])\nplt.text(0.5, 0.5, 'subplot(1,2,1)', ha='center', va='center',\n size=24, alpha=.5)\n\nplt.subplot(1, 2, 2)\nplt.xticks([])\nplt.yticks([])\nplt.text(0.5, 0.5, 'subplot(1,2,2)', ha='center', va='center',\n size=24, alpha=.5)\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e713237e1230a2a4985797f38af176732dad620b | 39,671 | ipynb | Jupyter Notebook | examples/experimental/DP Tensor Experimentation - v4.ipynb | sashank06/PySyft | dd288be096dba8cb9074de9d27de97c9c659ba98 | [
"Apache-2.0"
] | 3 | 2020-09-23T14:09:09.000Z | 2020-09-23T19:26:28.000Z | examples/experimental/DP Tensor Experimentation - v4.ipynb | sashank06/PySyft | dd288be096dba8cb9074de9d27de97c9c659ba98 | [
"Apache-2.0"
] | 3 | 2019-05-24T01:16:56.000Z | 2019-09-18T13:02:30.000Z | examples/experimental/DP Tensor Experimentation - v4.ipynb | sashank06/PySyft | dd288be096dba8cb9074de9d27de97c9c659ba98 | [
"Apache-2.0"
] | 1 | 2021-12-31T09:27:55.000Z | 2021-12-31T09:27:55.000Z | 39.473632 | 371 | 0.431272 | [
[
[
"# DP Tensor Experiment - v4\n\n### Purpose:\nI realized that there was some core functionality i got wrong in the last one - namely becuse I used Counter() objects intead of dictionaries - which set every min/max value to be 0 by default. This can cause wrong errors (see conclusions in v2 and v3). So I need to swap out counter() bjects with disctionaries and add functionality for when there is a KeyError\n\n### Conclusions:\n\nSeems to be a pretty simple modification! Time to vectorize!\n\n",
"_____no_output_____"
]
],
[
[
"from collections import Counter\nimport numpy as np\n\nclass PrivateNumber():\n \n def __init__(self, value, max_val, min_val):\n self.value = value\n self.max_val = max_val\n self.min_val = min_val\n \n def __add__(self, other):\n \n # add to a private number\n \n if(isinstance(other, PrivateNumber)):\n\n entities = self.entities.union(other.entities)\n \n new_val = self.value + other.value\n\n entities = set(self.max_val.keys()).union(set(other.max_val.keys()))\n\n new_max_val = {}\n new_min_val = {}\n for entity in entities:\n \n self_max = self.max_val[entity] if entity in self.max_val else 0\n other_max = other.max_val[entity] if entity in other.max_val else 0\n \n self_min = self.min_val[entity] if entity in self.min_val else 0\n other_min = other.min_val[entity] if entity in other.min_val else 0\n \n new_max_val[entity] = self_max + other_max\n new_min_val[entity] = self_min + other_min\n\n else:\n\n entities = self.entities\n\n # add to a public number\n new_val = self.value + other\n \n new_max_val = {}\n new_min_val = {}\n for entity in entities:\n new_max_val[entity] = self.max_val[entity] + other\n new_min_val[entity] = self.min_val[entity] + other\n\n return PrivateNumber(new_val,\n new_max_val,\n new_min_val)\n \n def __mul__(self, other):\n \n if(isinstance(other, PrivateNumber)):\n \n new_self_max_val = {}\n new_self_min_val = {}\n for entity in self.entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_self_max_val[entity] = max(self.min_val[entity] * other.xmin, \n self.max_val[entity] * other.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_self_min_val[entity] = min(self.min_val[entity] * other.xmax,\n self.max_val[entity] * other.xmin)\n \n new_other_max_val = {}\n new_other_min_val = {}\n for entity in other.entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_other_max_val[entity] = max(other.min_val[entity] * self.xmin, \n other.max_val[entity] * self.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_other_min_val[entity] = min(other.min_val[entity] * self.xmax,\n other.max_val[entity] * self.xmin)\n \n new_max_val = {}\n new_min_val = {}\n \n for entity in self.entities:\n left = new_self_max_val[entity] if entity in new_self_max_val else -(2**32)\n right = new_other_max_val[entity] if entity in new_other_max_val else -(2**32)\n \n new_max_val[entity] = max(left, right)\n \n left = new_self_min_val[entity] if entity in new_self_min_val else 2**32\n right = new_other_min_val[entity] if entity in new_other_min_val else 2**32\n \n new_min_val[entity] = min(left, right)\n\n return PrivateNumber(self.value * other.value,\n new_max_val,\n new_min_val)\n \n \n new_max_val = {}\n for entity in self.entities:\n new_max_val[entity] = self.max_val[entity] * other\n\n new_min_val = {}\n for entity in self.entities:\n new_min_val[entity] = self.min_val[entity] * other\n \n if(other > 0):\n return PrivateNumber(self.value * other,\n new_max_val,\n new_min_val)\n else:\n return PrivateNumber(self.value * other,\n new_min_val, \n new_max_val)\n \n def __neg__(self):\n return self * -1\n \n def __sub__(self, other):\n return self + (-other)\n\n def __gt__(self, other):\n\n if(isinstance(other, PrivateNumber)):\n \n new_self_max_val = {}\n new_self_min_val = {}\n for entity in self.entities:\n \n if not (self.min_val[entity] > other.xmax or self.max_val[entity] < other.xmin):\n new_self_max_val[entity] = 1\n else:\n new_self_max_val[entity] = 0\n \n new_self_min_val[entity] = 0\n \n new_other_max_val = {}\n new_other_min_val = {}\n for entity in other.entities:\n \n if not (other.min_val[entity] > self.xmax or other.max_val[entity] < self.xmin):\n new_other_max_val[entity] = 1\n else:\n new_other_max_val[entity] = 0\n \n new_other_min_val[entity] = 0\n \n new_max_val = {}\n new_min_val = {}\n \n entities = self.entities.union(other.entities)\n \n for entity in entities:\n \n new_self_max = new_self_max_val[entity] if entity in new_self_max_val else -999999999\n new_other_max = new_other_max_val[entity] if entity in new_other_max_val else -999999999\n \n new_self_min = new_self_min_val[entity] if entity in new_self_min_val else 99999999\n new_other_min = new_other_min_val[entity] if entity in new_other_min_val else 99999999\n \n new_max_val[entity] = max(new_self_max, new_other_max)\n new_min_val[entity] = min(new_self_min, new_other_min)\n\n result = int(self.value > other.value)\n else:\n\n entities = self.entities.union(other.entities)\n\n new_max_val = {}\n new_min_val = {}\n for entity in entities:\n\n new_min_val[entity] = 0\n\n if(other <= self.max_val[entity] and other >= self.min_val[entity]): \n new_max_val[entity] = 1\n else:\n new_max_val[entity] = 0\n\n result = int(self.value > other)\n \n return PrivateNumber(result,\n new_max_val,\n new_min_val)\n \n def __lt__(self, other):\n \n if(isinstance(other, PrivateNumber)):\n \n entities = self.entities.union(other.entities)\n \n new_self_max_val = {}\n new_self_min_val = {}\n \n for entity in self.entities:\n \n if not (self.min_val[entity] > other.xmax or self.max_val[entity] < other.xmin):\n new_self_max_val[entity] = 1\n else:\n new_self_max_val[entity] = 0\n \n new_self_min_val[entity] = 0\n \n new_other_max_val = {}\n new_other_min_val = {}\n \n for entity in other.entities:\n \n if not (other.min_val[entity] > self.xmax or other.max_val[entity] < self.xmin):\n new_other_max_val[entity] = 1\n else:\n new_other_max_val[entity] = 0\n \n new_other_min_val[entity] = 0\n \n new_max_val = {}\n new_min_val = {}\n \n entities = self.entities.union(other.entities)\n \n for entity in entities:\n \n new_self_max = new_self_max_val[entity] if entity in new_self_max_val else -999999999\n new_other_max = new_other_max_val[entity] if entity in new_other_max_val else -999999999\n \n new_self_min = new_self_min_val[entity] if entity in new_self_min_val else 99999999\n new_other_min = new_other_min_val[entity] if entity in new_other_min_val else 99999999\n \n new_max_val[entity] = max(new_self_max, new_other_max)\n new_min_val[entity] = min(new_self_min, new_other_min)\n\n result = int(self.value < other.value)\n \n else:\n\n entities = self.entities\n\n new_max_val = {}\n new_min_val = {}\n for entity in entities:\n\n new_min_val[entity] = 0\n\n if(other <= self.max_val[entity] and other >= self.min_val[entity]): \n new_max_val[entity] = 1\n else:\n new_max_val[entity] = 0\n\n result = int(self.value < other)\n \n return PrivateNumber(result,\n new_max_val,\n new_min_val)\n \n def max(self, other):\n \n if(isinstance(other, PrivateNumber)):\n raise Exception(\"Not implemented yet\")\n \n entities = self.entities\n \n new_min_val = {}\n for entity in entities:\n new_min_val[entity] = max(self.min_val[entity], other)\n \n return PrivateNumber(max(self.value, other),\n self.max_val,\n new_min_val)\n \n def min(self, other):\n \n if(isinstance(other, PrivateNumber)):\n raise Exception(\"Not implemented yet\")\n \n entities = self.entities\n \n new_max_val = {}\n for entity in entities:\n new_max_val[entity] = min(self.max_val[entity], other)\n \n return PrivateNumber(min(self.value, other),\n new_max_val,\n self.min_val)\n \n def __repr__(self):\n return str(self.value) + \" \" + str(self.max_val) + \" \" + str(self.min_val)\n \n def hard_sigmoid(self):\n return self.min(1).max(0)\n \n def hard_sigmoid_deriv(self):\n return ((self < 1) * (self > 0)) + (self < 0) * 0.01 - (self > 1) * 0.01\n \n @property\n def xmin(self):\n items = list(self.min_val.items())\n out = items[0][1]\n for k,v in items[1:]:\n if(v < out):\n out = v\n return out\n \n @property\n def xmax(self):\n items = list(self.max_val.items())\n out = items[0][1]\n for k,v in items[1:]:\n if(v > out):\n out = v\n return out\n \n @property\n def entities(self):\n return set(self.max_val.keys())\n \n @property\n def sensitivity(self):\n sens = Counter()\n for entity, value in self.max_val.items():\n sens[entity] = value - self.min_val[entity]\n return sens.most_common()[0][1]",
"_____no_output_____"
],
[
"x = PrivateNumber(0.5,{\"bob\":4, \"amos\":3},{\"bob\":3, \"amos\":2})\ny = PrivateNumber(1,{\"bob\":1},{\"bob\":-1})\nz = PrivateNumber(-0.5,{\"sue\":2},{\"sue\":-1})",
"_____no_output_____"
],
[
"a = y < z",
"_____no_output_____"
],
[
"a.sensitivity",
"_____no_output_____"
],
[
"from collections import Counter\nimport numpy as np\nclass PrivateNumber():\n \n def __init__(self, value, max_val, min_val):\n self.value = value\n self.max_val = max_val\n self.min_val = min_val\n \n def __add__(self, other):\n \n # add to a private number\n \n if(isinstance(other, PrivateNumber)):\n\n entities = self.entities.union(other.entities)\n \n new_val = self.value + other.value\n\n entities = set(self.max_val.keys()).union(set(other.max_val.keys()))\n\n new_max_val = Counter()\n new_min_val = Counter() \n for entity in entities:\n new_max_val[entity] = self.max_val[entity] + other.max_val[entity]\n new_min_val[entity] = self.min_val[entity] + other.min_val[entity]\n\n return PrivateNumber(self.value + other.value,\n new_max_val,\n new_min_val)\n \n entities = self.entities\n \n # add to a public number\n \n new_max_val = Counter()\n new_min_val = Counter() \n for entity in entities:\n new_max_val[entity] = self.max_val[entity] + other\n new_min_val[entity] = self.min_val[entity] + other\n \n return PrivateNumber(self.value + other,\n new_max_val,\n new_min_val)\n\n def __mul__(self, other):\n \n if(isinstance(other, PrivateNumber)):\n \n entities = self.entities.union(other.entities)\n \n new_self_max_val = Counter()\n new_self_min_val = Counter() \n for entity in entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_self_max_val[entity] = max(self.min_val[entity] * other.xmin, \n self.max_val[entity] * other.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_self_min_val[entity] = min(self.min_val[entity] * other.xmax,\n self.max_val[entity] * other.xmin)\n \n new_other_max_val = Counter()\n new_other_min_val = Counter() \n for entity in entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_other_max_val[entity] = max(other.min_val[entity] * self.xmin, \n other.max_val[entity] * self.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_other_min_val[entity] = min(other.min_val[entity] * self.xmax,\n other.max_val[entity] * self.xmin)\n \n new_max_val = Counter()\n new_min_val = Counter()\n \n for entity in entities:\n new_max_val[entity] = max(new_self_max_val[entity], new_other_max_val[entity])\n new_min_val[entity] = min(new_self_min_val[entity], new_other_min_val[entity])\n\n return PrivateNumber(self.value * other.value,\n new_max_val,\n new_min_val)\n \n entities = self.entities\n \n new_max_val = Counter()\n for entity in entities:\n new_max_val[entity] = self.max_val[entity] * other\n\n new_min_val = Counter()\n for entity in entities:\n new_min_val[entity] = self.min_val[entity] * other\n \n if(other > 0):\n return PrivateNumber(self.value * other,\n new_max_val,\n new_min_val)\n else:\n return PrivateNumber(self.value * other,\n new_min_val, \n new_max_val)\n \n def __sub__(self, other):\n return self + (-other)\n \n def __mul__(self, other):\n \n if(isinstance(other, PrivateNumber)):\n \n entities = self.entities.union(other.entities)\n \n new_self_max_val = Counter()\n new_self_min_val = Counter() \n for entity in entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_self_max_val[entity] = max(self.min_val[entity] * other.xmin, \n self.max_val[entity] * other.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_self_min_val[entity] = min(self.min_val[entity] * other.xmax,\n self.max_val[entity] * other.xmin)\n \n new_other_max_val = Counter()\n new_other_min_val = Counter() \n for entity in entities:\n \n # the biggest positive number this entity could contribute is when\n # it is multiplied by the largest value of the same sign from other\n new_other_max_val[entity] = max(other.min_val[entity] * self.xmin, \n other.max_val[entity] * self.xmax)\n \n # the smallest negative number this entity could contribute is when\n # it is multiplied by the largest value of the opposite sign from other\n new_other_min_val[entity] = min(other.min_val[entity] * self.xmax,\n other.max_val[entity] * self.xmin)\n \n new_max_val = Counter()\n new_min_val = Counter()\n \n for entity in entities:\n new_max_val[entity] = max(new_self_max_val[entity], new_other_max_val[entity])\n new_min_val[entity] = min(new_self_min_val[entity], new_other_min_val[entity])\n\n return PrivateNumber(self.value * other.value,\n new_max_val,\n new_min_val)\n \n entities = self.entities\n \n new_max_val = Counter()\n for entity in entities:\n new_max_val[entity] = self.max_val[entity] * other\n\n new_min_val = Counter()\n for entity in entities:\n new_min_val[entity] = self.min_val[entity] * other\n \n if(other > 0):\n return PrivateNumber(self.value * other,\n new_max_val,\n new_min_val)\n else:\n return PrivateNumber(self.value * other,\n new_min_val, \n new_max_val)\n \n def __truediv__(self, other):\n \n if(isinstance(other, PrivateNumber)):\n raise Exception(\"probably best not to do this - it's gonna be inf a lot\")\n \n entities = self.entities\n \n new_max_val = Counter()\n for entity in entities:\n new_max_val[entity] = self.max_val[entity] / other\n\n new_min_val = Counter()\n for entity in entities:\n new_min_val[entity] = self.min_val[entity] / other\n \n return PrivateNumber(self.value / other,\n new_max_val,\n new_min_val)\n\n def __gt__(self, other):\n \"\"\"BUG!: Counter() defaults to 0\"\"\"\n if(isinstance(other, PrivateNumber)):\n \n entities = self.entities.union(other.entities)\n \n new_self_max_val = Counter()\n new_self_min_val = Counter() \n for entity in entities:\n \n if not (self.min_val[entity] > other.xmax or self.max_val[entity] < other.xmin):\n new_self_max_val[entity] = 1\n else:\n new_self_max_val[entity] = 0\n \n new_self_min_val[entity] = 0\n \n new_other_max_val = Counter()\n new_other_min_val = Counter() \n for entity in entities:\n \n if not (other.min_val[entity] > self.xmax or other.max_val[entity] < self.xmin):\n new_other_max_val[entity] = 1\n else:\n new_other_max_val[entity] = 0\n \n new_other_min_val[entity] = 0\n \n new_max_val = Counter()\n new_min_val = Counter()\n \n for entity in entities:\n new_max_val[entity] = max(new_self_max_val[entity], new_other_max_val[entity])\n new_min_val[entity] = min(new_self_min_val[entity], new_other_min_val[entity])\n\n return PrivateNumber(int(self.value > other.value),\n new_max_val,\n new_min_val)\n \n entities = self.entities\n \n new_max_val = Counter()\n new_min_val = Counter()\n for entity in entities:\n \n new_min_val[entity] = 0\n \n if(other <= self.max_val[entity] and other >= self.min_val[entity]): \n new_max_val[entity] = 1\n else:\n new_max_val[entity] = 0\n\n return PrivateNumber(int(self.value > other),\n new_max_val,\n new_min_val)\n \n\n def __lt__(self, other):\n \"\"\"BUG!: Counter() defaults to 0\"\"\"\n if(isinstance(other, PrivateNumber)):\n \n entities = self.entities.union(other.entities)\n \n new_self_max_val = Counter()\n new_self_min_val = Counter() \n for entity in entities:\n \n if not (self.min_val[entity] > other.xmax or self.max_val[entity] < other.xmin):\n new_self_max_val[entity] = 1\n else:\n new_self_max_val[entity] = 0\n \n new_self_min_val[entity] = 0\n \n new_other_max_val = Counter()\n new_other_min_val = Counter() \n for entity in entities:\n \n if not (other.min_val[entity] > self.xmax or other.max_val[entity] < self.xmin):\n new_other_max_val[entity] = 1\n else:\n new_other_max_val[entity] = 0\n \n new_other_min_val[entity] = 0\n \n new_max_val = Counter()\n new_min_val = Counter()\n \n for entity in entities:\n new_max_val[entity] = max(new_self_max_val[entity], new_other_max_val[entity])\n new_min_val[entity] = min(new_self_min_val[entity], new_other_min_val[entity])\n\n return PrivateNumber(int(self.value < other.value),\n new_max_val,\n new_min_val)\n \n entities = self.entities\n \n new_max_val = Counter()\n new_min_val = Counter()\n for entity in entities:\n \n new_min_val[entity] = 0\n \n if(other <= self.max_val[entity] and other >= self.min_val[entity]): \n new_max_val[entity] = 1\n else:\n new_max_val[entity] = 0\n\n return PrivateNumber(int(self.value < other),\n new_max_val,\n new_min_val)\n \n def __neg__(self):\n return self * -1\n \n def max(self, other):\n \n if(isinstance(other, PrivateNumber)):\n raise Exception(\"Not implemented yet\")\n \n entities = self.entities\n \n new_min_val = Counter()\n for entity in entities:\n new_min_val[entity] = max(self.min_val[entity], other)\n \n return PrivateNumber(max(self.value, other),\n self.max_val,\n new_min_val)\n \n def min(self, other):\n \n if(isinstance(other, PrivateNumber)):\n raise Exception(\"Not implemented yet\")\n \n entities = self.entities\n \n new_max_val = Counter()\n for entity in entities:\n new_max_val[entity] = min(self.max_val[entity], other)\n \n return PrivateNumber(min(self.value, other),\n new_max_val,\n self.min_val)\n \n def hard_sigmoid(self):\n return self.min(1).max(0)\n \n def hard_sigmoid_deriv(self):\n return ((self < 1) * (self > 0)) + (self < 0) * 0.01 - (self > 1) * 0.01\n \n def __repr__(self):\n return str(self.value) + \" \" + str(self.max_val) + \" \" + str(self.min_val)\n \n @property\n def xmin(self):\n return self.min_val.most_common(len(self.min_val))[-1][1]\n \n @property\n def xmax(self):\n return self.max_val.most_common(1)[0][1]\n \n @property\n def entities(self):\n return set(self.max_val.keys())\n \n @property\n def sensitivity(self):\n sens = Counter()\n for entity, value in self.max_val.items():\n sens[entity] = value - self.min_val[entity]\n return sens.most_common()[0][1]\n \nx = PrivateNumber(0.5,Counter({\"bob\":4, \"amos\":3}),Counter({\"bob\":3, \"amos\":2}))\ny = PrivateNumber(1,Counter({\"bob\":1}),Counter({\"bob\":-1}))\nz = PrivateNumber(-0.5,Counter({\"sue\":2}),Counter({\"sue\":-1}))\n",
"_____no_output_____"
],
[
"a = x > y",
"_____no_output_____"
],
[
"a.sensitivity",
"_____no_output_____"
],
[
"a = x + y",
"_____no_output_____"
],
[
"b = a * z",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"# class PrivacyAccountant():\n \n# def __init__(self, default_budget = 0.1):\n \n# self.entity2epsilon = {}\n# self.entity2id = {}\n# self.default_budget = default_budget\n \n# def add_entity(self, entity_id, budget=None):\n# \"\"\"Add another entity to the system to be tracked.\n \n# Args:\n# entity_id: a string or other unique identifier of the entity\n# budget: the epsilon level defining this user's privacy budget\n# \"\"\"\n \n# if(budget is None):\n# budget = self.default_budget\n \n# self.entity2id[entity_id] = len(self.entity2id)\n# self.entity2epsilon[self.entity2id[entity_id]] = budget\n \n \n# accountant = PrivacyAccountant()\n\n# class DPTensor():\n \n# def __init__(self, data, entities, max_values=None, min_values=None):\n \n# assert data.shape == entities.shape#[0:-1]\n\n# self.data = data\n# self.entities = entities\n \n# if max_values is None:\n# max_values = np.inf + np.zeros_like(self.data)\n \n# assert max_values.shape == data.shape\n# self.max_values = max_values \n \n# if min_values is None:\n# min_values = -np.inf + np.zeros_like(self.data) \n \n# assert min_values.shape == data.shape \n# self.min_values = min_values\n\n# def sum(self, dim=0):\n \n# _new_data = self.data.sum(dim)\n \n# return _new_data\n \n# @property\n# def sensitivity(self):\n# return self.max_values - self.min_values\n",
"_____no_output_____"
],
[
"# results, tags = grid.search(\"diabetes\",\"#data\", verbose=False)\n# dataset = results['alice'][0][0:5][:,0:4]\n# n_ent = dataset.shape[0]\n# n_classes = dataset.shape[1]\n\n# for i in range(n_ent):\n# accountant.add_entity(\"Diabetes Patient #\" + str(i))\n \n# d2 = dataset.clone().get()\n# entities = th.arange(0,n_ent).view(-1,1).expand(n_ent,n_classes)#.unsqueeze(2)\n# db = DPTensor(data=d2, \n# entities=entities, \n# max_values=d2.max(0)[0].expand(n_ent,n_classes), \n# min_values=d2.min(0)[0].expand(n_ent,n_classes))\n",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71330a04ef417ec8b542b24e662ee9d732418b0 | 216,037 | ipynb | Jupyter Notebook | _notebooks/2020-02-22-confidence-interval-plot.ipynb | anaveenan/anaveenan | 6b2022adf17de4be0e4720501508a366ebcdc182 | [
"Apache-2.0"
] | null | null | null | _notebooks/2020-02-22-confidence-interval-plot.ipynb | anaveenan/anaveenan | 6b2022adf17de4be0e4720501508a366ebcdc182 | [
"Apache-2.0"
] | 3 | 2021-05-20T21:17:03.000Z | 2022-02-26T09:52:27.000Z | _notebooks/2020-02-22-confidence-interval-plot.ipynb | anaveenan/anaveenan | 6b2022adf17de4be0e4720501508a366ebcdc182 | [
"Apache-2.0"
] | null | null | null | 333.390432 | 95,733 | 0.57498 | [
[
[
"# Confidence Interval Plot Python\n> A tutorial on how to create confidence interval plot in python.\n\n- toc: false \n- badges: true\n- comments: true\n- categories: [altair, python]\n- image: images/chart-preview.png",
"_____no_output_____"
],
[
"# About\n\nThis blog post details how to create confidence interval plot in python using Altair Visualization package. Altair is a declarative statistical visualization library based on vega and vega-lite. This is one my favorite visualization package in python. More details can be found [here](https://altair-viz.github.io/getting_started/overview.html)",
"_____no_output_____"
],
[
"Lets load the package and get data from cars data set. ",
"_____no_output_____"
]
],
[
[
"import altair as alt\nimport numpy as np\nimport pandas as pd\nfrom vega_datasets import data\n\nsource = data.cars()\n\nsource.head()",
"_____no_output_____"
]
],
[
[
"### Create a plot showing how mile per gallon change by year \nAltair has built in capabilities to create this visualization \n1. Lets create a base line chart showing the average mile per gallon per year \n2. Create a confidence interval band chart using the mark_errorband() \n3. Layer the line and CI band chart to create the final visualization",
"_____no_output_____"
]
],
[
[
"line = (alt\n .Chart(source).mark_line(color='blue')\n .encode(x='Year',\n y='mean(Miles_per_Gallon)'))\n\nband = (alt\n .Chart(source)\n .mark_errorband(extent='ci',color='blue')\n .encode(x='Year',\n y=alt.Y('Miles_per_Gallon', title='Miles/Gallon')))\n\n(band + line).properties(title='Confidence Interval Plot of miles per gallon')",
"_____no_output_____"
]
],
[
[
"Lets say if you want to understand how mileage varies by origin. This can be done by simply encoding color in the plot ",
"_____no_output_____"
]
],
[
[
"line = (alt\n .Chart(source).mark_line(color='blue')\n .encode(x='Year',\n y='mean(Miles_per_Gallon)',\n color='Origin'))\n\nband = (alt\n .Chart(source)\n .mark_errorband(extent='ci',color='blue')\n .encode(x='Year',\n y=alt.Y('Miles_per_Gallon', title='Miles/Gallon'),\n color='Origin'))\n\n(band + line).properties(title='Confidence Interval of miles per gallon by country')",
"_____no_output_____"
]
],
[
[
"### Create confidence interval plot from grouped data ",
"_____no_output_____"
],
[
"Most of situation in real world you have large a dataset and still need to plot confidence interval plots.In this scenario it is better to pre compute the confidence interval based on mean and margin of error. Lets create a pandas data frame with required fields as show below : ",
"_____no_output_____"
]
],
[
[
"df=(source\n .groupby(['Year'])\n .agg(avg_mpg=('Miles_per_Gallon','mean'),\n std_mpg=('Miles_per_Gallon','std'),\n n=('Miles_per_Gallon','count'))\n .assign(ul=lambda x:x['avg_mpg']+1.96*x['std_mpg']/np.sqrt(x['n']),\n ll=lambda x:x['avg_mpg']-1.96*x['std_mpg']/np.sqrt(x['n']))\n .reset_index()\n)\n\ndf.head()",
"_____no_output_____"
]
],
[
[
"Few lines of code below create the custom confidence interval plot required",
"_____no_output_____"
]
],
[
[
"line = (alt\n .Chart()\n .mark_line(color='blue')\n .encode(x='Year',\n y='avg_mpg'))\n\nband = (alt\n .Chart()\n .mark_area(opacity=0.5,color='blue')\n .encode(x='Year',\n y=alt.Y('ll', axis=alt.Axis(title='Miles/Gallon',ticks=False)),\n y2=alt.Y2('ul')))\n\nalt.layer(band + line,data=df).properties(title='Confidence Interval of miles per gallon by country(Custom)')",
"_____no_output_____"
]
],
[
[
"### Conclusion",
"_____no_output_____"
],
[
"Confidence interval plot is one the most important tool in a data scientist tool kit to understand uncertainty of the metrics. Altair provides excellent visualization capabilities to make this plot with few line of python code. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e71335364415bfcc1ea02d071fdf85209960c459 | 31,307 | ipynb | Jupyter Notebook | src/FM_Data_Subset_1.ipynb | Prasham8897/Rec_Sys_Yelp_Businesses | dc25131ce44e0332097c8a4825d9e8cc9c18b932 | [
"MIT"
] | 4 | 2021-07-06T05:33:49.000Z | 2021-11-24T11:23:24.000Z | src/FM_Data_Subset_1.ipynb | Prasham8897/Rec_Sys_Yelp_Businesses | dc25131ce44e0332097c8a4825d9e8cc9c18b932 | [
"MIT"
] | null | null | null | src/FM_Data_Subset_1.ipynb | Prasham8897/Rec_Sys_Yelp_Businesses | dc25131ce44e0332097c8a4825d9e8cc9c18b932 | [
"MIT"
] | null | null | null | 61.627953 | 15,920 | 0.715239 | [
[
[
"## Installing Packages",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive/', force_remount=True)",
"Mounted at /content/drive/\n"
],
[
"#!pip install git+https://github.com/coreylynch/pyFM\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport json\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import MultiLabelBinarizer\nimport itertools\nimport scipy as sp\nfrom sklearn.feature_extraction import DictVectorizer\nfrom pyfm import pylibfm\nfrom sklearn.preprocessing import normalize\nfrom scipy import sparse\nfrom sklearn.metrics import mean_squared_error\nimport math\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Data Loading and Preparation",
"_____no_output_____"
]
],
[
[
"data2 = pd.read_csv(\"/content/drive/My Drive/data1.csv\")",
"_____no_output_____"
],
[
"list_rmse = []",
"_____no_output_____"
],
[
"user_counts_1 = data2[\"user_id\"].value_counts()\nactive_users_1 = user_counts_1.loc[user_counts_1 >= 5].index.tolist()\n\ndata2['isActive'] = data2.user_id.isin(active_users_1).astype(int)\ndata2 = data2[data2.loc[:,'isActive'] == 1]\ndata2 = data2.iloc[:,:36]",
"_____no_output_____"
]
],
[
[
"## Defining a function for Factorization machine with categories as embeddings ",
"_____no_output_____"
]
],
[
[
"def FM_E(data2):\n \n #Category Embedding\n temp = data2.copy()\n temp['categories'] = temp['categories'].apply(lambda x : x.split(\",\"))\n cat_list = list(temp['categories'])\n mlb = MultiLabelBinarizer()\n temp1 = mlb.fit_transform(cat_list)\n names = mlb.classes_\n cat_df = pd.DataFrame(temp1,columns=names)\n merged_2 = pd.concat([data2.reset_index(),cat_df],axis=1)\n\n merged_2 = merged_2.drop(columns=['address',\n 'state', \n 'postal_code', \n 'hours'])\n data3 = merged_2.copy()\n data3 = data3.drop(columns=['Unnamed: 0',\n 'name',\n 'city',\n 'yelping_since'])\n data3.rename(columns = {'rating_x':'rating'}, inplace = True) \n\n #Splitting test and train. Test set comprises of the users and the restaurants that the user rated last.\n data_test = data3[data3.groupby('user_id')['date'].transform('max') == data3['date']]\n data_train = pd.concat([data3, data_test]).drop_duplicates(keep=False)\n\n\n #Preparing data for the FM model\n y_train = np.squeeze(data_train[['rating']].to_numpy())\n y_test = np.squeeze(data_test[['rating']].to_numpy())\n\n x_train = data_train.iloc[:,1:29]\n x_train = x_train.drop(columns=['date','attributes','categories','yrs_elite','rating'])\n x_train_final = x_train.to_dict('records')\n\n\n x_train_rem = data_train.iloc[:,29:]\n x_train_rem_final = x_train_rem.to_numpy()\n x_train_rem_final = sp.sparse.csr_matrix(x_train_rem_final)\n\n\n x_test = data_test.iloc[:,1:29]\n x_test = x_test.drop(columns=['date','attributes','categories','yrs_elite','rating'])\n x_test_final = x_test.to_dict('records')\n\n\n x_test_rem = data_test.iloc[:,29:]\n x_test_rem_final = x_test_rem.to_numpy()\n x_test_rem_final = sp.sparse.csr_matrix(x_test_rem)\n\n v = DictVectorizer()\n X_train = v.fit_transform(x_train_final)\n X_test = v.transform(x_test_final)\n\n X_train_final = sp.sparse.hstack([X_train, x_train_rem_final])\n X_test_final = sp.sparse.hstack([X_test, x_test_rem_final])\n\n #Fitting the model and predicting\n fm = pylibfm.FM(num_iter=10, task=\"regression\", learning_rate_schedule=\"optimal\")\n\n fm.fit(normalize(sparse.csr_matrix(X_train_final)), y_train)\n\n #Calculating the RMSE value\n prediction = fm.predict(normalize(sparse.csr_matrix(X_test_final)))\n list_rmse.append(mean_squared_error(y_test,preds))\n print(\"FM MSE: %.4f\" % mean_squared_error(y_test,predcition))\n",
"_____no_output_____"
]
],
[
[
"## Defining a function for Factorization machine with no embeddings",
"_____no_output_____"
]
],
[
[
"def FM_NE(data1):\n #Clean data\n merged = data1\n merged = merged.drop(columns=['address',\n 'state', \n 'postal_code', \n 'hours'])\n\n data3 = merged\n\n data3 = data3.drop(columns=['name',\n 'city',\n 'yelping_since'])\n data3.rename(columns = {'rating_x':'rating'}, inplace = True) \n\n #Splitting test and train. Test set comprises of the users and the restaurants that the user rated last.\n data_test = data3[data3.groupby('user_id')['date'].transform('max') == data3['date']]\n data_train = pd.concat([data3, data_test]).drop_duplicates(keep=False)\n\n \n #Preparing data for the FM model\n y_train = np.squeeze(data_train[['rating']].to_numpy())\n y_test = np.squeeze(data_test[['rating']].to_numpy())\n\n\n x_train = data_train\n x_train = x_train.drop(columns=['date','attributes','categories','yrs_elite','rating'])\n x_train_final = x_train.to_dict('records')\n\n\n x_test = data_test\n x_test = x_test.drop(columns=['date','attributes','categories','yrs_elite','rating'])\n x_test_final = x_test.to_dict('records')\n\n v = DictVectorizer()\n X_train = v.fit_transform(x_train_final)\n X_test = v.transform(x_test_final)\n\n \n #Fitting the model and predicting\n fm = pylibfm.FM(num_iter=10, task=\"regression\", learning_rate_schedule=\"optimal\")\n fm.fit(normalize(sparse.csr_matrix(X_train)), y_train)\n\n #Calculating the RMSE value\n prediction = fm.predict(normalize(sparse.csr_matrix(X_test)))\n list_rmse.append(mean_squared_error(y_test,preds))\n print(\"FM MSE: %.4f\" % mean_squared_error(y_test,prediction))",
"_____no_output_____"
]
],
[
[
"## With category as embeddings:",
"_____no_output_____"
]
],
[
[
"FM_E(data2)",
"Creating validation dataset of 0.01 of training for adaptive regularization\n-- Epoch 1\nTraining MSE: 0.68428\n-- Epoch 2\nTraining MSE: 0.71101\n-- Epoch 3\nTraining MSE: 0.71038\n-- Epoch 4\nTraining MSE: 0.71059\n-- Epoch 5\nTraining MSE: 0.71007\n-- Epoch 6\nTraining MSE: 0.70997\n-- Epoch 7\nTraining MSE: 0.70952\n-- Epoch 8\nTraining MSE: 0.71016\n-- Epoch 9\nTraining MSE: 0.70949\n-- Epoch 10\nTraining MSE: 0.71275\nFM MSE: 1.8470\n"
]
],
[
[
"## Without category as embeddings",
"_____no_output_____"
]
],
[
[
"FM_NE(data2)",
"Creating validation dataset of 0.01 of training for adaptive regularization\n-- Epoch 1\nTraining MSE: 0.74014\n-- Epoch 2\nTraining MSE: 0.73707\n-- Epoch 3\nTraining MSE: 0.73566\n-- Epoch 4\nTraining MSE: 0.73511\n-- Epoch 5\nTraining MSE: 0.73429\n-- Epoch 6\nTraining MSE: 0.73428\n-- Epoch 7\nTraining MSE: 0.73418\n-- Epoch 8\nTraining MSE: 0.73374\n-- Epoch 9\nTraining MSE: 0.73391\n-- Epoch 10\nTraining MSE: 0.73370\nFM MSE: 1.8344\n"
]
],
[
[
"## Plot for Model Type vs RMSE",
"_____no_output_____"
]
],
[
[
"list_rmse = [1.8470,1.8344]\nnames = [\"Category Embeddings\", \"No embeddings\", \"Baseline-Surprise\"]\nfor i in range(len(list_rmse)):\n list_rmse[i] = math.sqrt(list_rmse[i])\ntype_1 = [\"Sample1\",\"Sample1\", \"Sample1\"]\nlist_rmse.append(1.352)",
"_____no_output_____"
],
[
"df = pd.DataFrame(list(zip(names, list_rmse, type_1)), \n columns =['Model type', 'RMSE', 'Sample Type'])",
"_____no_output_____"
],
[
"plt.bar(df['Model type'], df['RMSE'], align='center', alpha=0.5)\nplt.xlabel('Model Type')\nplt.ylabel('RMSE')\nplt.ylim((1.3,1.40))\nplt.xticks(rotation=45)\nplt.title('Model Type vs RMSE')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e71339d264c0c1e4542cc5255d587f8c72a3bd79 | 11,468 | ipynb | Jupyter Notebook | Section 14 - Advanced Python Modules/Lec 105 - Shutil & OS Modules.ipynb | sansjha4900/Udemy-Python-Notes | 9d748006e926f42f5b0161a3c6c9a4a1e7e3ff7f | [
"MIT"
] | null | null | null | Section 14 - Advanced Python Modules/Lec 105 - Shutil & OS Modules.ipynb | sansjha4900/Udemy-Python-Notes | 9d748006e926f42f5b0161a3c6c9a4a1e7e3ff7f | [
"MIT"
] | null | null | null | Section 14 - Advanced Python Modules/Lec 105 - Shutil & OS Modules.ipynb | sansjha4900/Udemy-Python-Notes | 9d748006e926f42f5b0161a3c6c9a4a1e7e3ff7f | [
"MIT"
] | null | null | null | 24.556745 | 182 | 0.50218 | [
[
[
"pwd",
"_____no_output_____"
],
[
"f = open(\"practice.txt\", \"w+\")\nf.write (\"This is a test string\")\nf.close()",
"_____no_output_____"
],
[
"import os",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"os.listdir()",
"_____no_output_____"
],
[
"os.listdir(\"C:\\\\Users\")",
"_____no_output_____"
],
[
"import shutil",
"_____no_output_____"
],
[
"shutil.move(\"practice.txt\",\"C:\\\\Users\\\\RITES\\\\Desktop\\\\Udemy Python\")",
"_____no_output_____"
],
[
"os.listdir(\"C:\\\\Users\\\\RITES\\\\Desktop\\\\Udemy Python\")",
"_____no_output_____"
],
[
"import send2trash",
"_____no_output_____"
],
[
"shutil.move(\"C:\\\\Users\\\\RITES\\\\Desktop\\\\Udemy Python\\\\practice.txt\", os.getcwd())",
"_____no_output_____"
],
[
"os.listdir()",
"_____no_output_____"
],
[
"send2trash.send2trash(\"practice.txt\")",
"_____no_output_____"
],
[
"os.listdir()",
"_____no_output_____"
],
[
"os.listdir() # Recovering file from trash",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"file_path = 'C:\\\\Users\\\\RITES\\\\Desktop\\\\Udemy Python\\\\Complete-Python-3-Bootcamp\\\\12-Advanced Python Modules\\\\Example_Top_Level'",
"_____no_output_____"
],
[
"for folder, sub_folders, files in os.walk(file_path):\n \n print (f\"Currently looking at: {folder}\")\n print (\"\\n\")\n print (\"The sub folders are: \")\n for sub_fold in sub_folders:\n print (f\"\\t Subfolder: {sub_fold}\")\n print (\"\\n\")\n print (\"The files are: \")\n for f in files:\n print (f\"\\t Files: {f}\")\n print (\"\\n\")",
"Currently looking at: C:\\Users\\RITES\\Desktop\\Udemy Python\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules\\Example_Top_Level\n\n\nThe sub folders are: \n\t Subfolder: Mid-Example-One\n\n\nThe files are: \n\t Files: Mid-Example.txt\n\n\nCurrently looking at: C:\\Users\\RITES\\Desktop\\Udemy Python\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules\\Example_Top_Level\\Mid-Example-One\n\n\nThe sub folders are: \n\t Subfolder: Bottom-Level-One\n\t Subfolder: Bottom-Level-Two\n\n\nThe files are: \n\t Files: Mid-Level-Doc.txt\n\n\nCurrently looking at: C:\\Users\\RITES\\Desktop\\Udemy Python\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules\\Example_Top_Level\\Mid-Example-One\\Bottom-Level-One\n\n\nThe sub folders are: \n\n\nThe files are: \n\t Files: One_Text.txt\n\n\nCurrently looking at: C:\\Users\\RITES\\Desktop\\Udemy Python\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules\\Example_Top_Level\\Mid-Example-One\\Bottom-Level-Two\n\n\nThe sub folders are: \n\n\nThe files are: \n\t Files: Bottom-Text-Two.txt\n\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7135edc215389659b0fd6e10590a9a315143938 | 7,594 | ipynb | Jupyter Notebook | demos/quickstart.ipynb | kachiann/QMCSoftware | 0ed9da2f10b9ac0004c993c01392b4c86002954c | [
"Apache-2.0"
] | 1 | 2021-08-18T08:14:32.000Z | 2021-08-18T08:14:32.000Z | demos/quickstart.ipynb | kachiann/QMCSoftware | 0ed9da2f10b9ac0004c993c01392b4c86002954c | [
"Apache-2.0"
] | null | null | null | demos/quickstart.ipynb | kachiann/QMCSoftware | 0ed9da2f10b9ac0004c993c01392b4c86002954c | [
"Apache-2.0"
] | null | null | null | 39.34715 | 424 | 0.595075 | [
[
[
"# A QMCPy Quick Start\n\nIn this tutorial, we introduce QMCPy [1] by an example. QMCPy can be installed with **pip install qmcpy** or cloned from the [QMCSoftware GitHub repository](https://github.com/QMCSoftware/QMCSoftware).",
"_____no_output_____"
],
[
"Consider the problem of integrating the Keister function [2] with respect to a $d$-dimensional Gaussian measure: \n\n$$f(\\boldsymbol{x}) = \\pi^{d/2} \\cos(||\\boldsymbol{x}||), \\qquad \\boldsymbol{x} \\in \\mathbb{R}^d, \\qquad \\boldsymbol{X} \\sim \\mathcal{N}(\\boldsymbol{0}_d,\\mathsf{I}_d/2), \n\\\\ \\mu = \\mathbb{E}[f(\\boldsymbol{X})] := \\int_{\\mathbb{R}^d} f(\\boldsymbol{x}) \\, \\pi^{-d/2} \\exp( - ||\\boldsymbol{x}||^2) \\, \\rm d \\boldsymbol{x} \n\\\\ = \\int_{[0,1]^d} \\pi^{d/2} \\cos\\left(\\sqrt{ \\frac 12 \\sum_{j=1}^d\\Phi^{-1}(x_j)}\\right) \\, \\rm d \\boldsymbol{x},$$ where $||\\boldsymbol{x}||$ is the Euclidean norm, $\\mathsf{I}_d$ is the $d$-dimensional identity matrix, and \n$\\Phi$ denotes the standard normal cumulative distribution function. When $d=2$, $\\mu \\approx 1.80819$ and we can visualize the Keister function and realizations of the sampling points depending on the tolerance values, $\\varepsilon$, in the following figure:\n\n\n\nThe Keister function is implemented below with help from NumPy [3] in the following code snippet:",
"_____no_output_____"
]
],
[
[
"import numpy as np\ndef keister(x):\n \"\"\"\n x: nxd numpy ndarray\n n samples\n d dimensions\n\n returns n-vector of the Kesiter function\n evaluated at the n input samples\n \"\"\"\n d = x.shape[1]\n norm_x = np.sqrt((x**2).sum(1))\n k = np.pi**(d/2) * np.cos(norm_x)\n return k # size n vector",
"_____no_output_____"
]
],
[
[
"In addition to our Keister integrand and Gaussian true measure, we must select a discrete distribution, and a stopping criterion [4]. The stopping criterion determines the number of points at which to evaluate the integrand in order for the mean approximation to be accurate within a user-specified error tolerance, $\\varepsilon$. The discrete distribution determines the sites at which the integrand is evaluated.\n\nFor this Keister example, we select the lattice sequence as the discrete distribution and corresponding cubature-based stopping criterion [5]. The discrete distribution, true measure, integrand, and stopping criterion are then constructed within the QMCPy framework below. ",
"_____no_output_____"
]
],
[
[
"import qmcpy\ndiscrete_distrib = qmcpy.Lattice(dimension = 2)\ntrue_measure = qmcpy.Gaussian(distribution = discrete_distrib, mean = 0, covariance = 1/2)\nintegrand = qmcpy.CustomFun(measure = true_measure, custom_fun = keister)\nstopping_criterion = qmcpy.CubQMCLatticeG(integrand = integrand, abs_tol = 1e-3)",
"_____no_output_____"
]
],
[
[
"Calling *integrate* on the *stopping_criterion* instance returns the numerical solution and a data object. Printing the data object will provide a neat summary of the integration problem. For details of the output fields, refer to the online, searchable QMCPy Documentation at [https://qmcpy.readthedocs.io/](https://qmcpy.readthedocs.io/en/latest/algorithms.html#module-qmcpy.integrand.keister).",
"_____no_output_____"
]
],
[
[
"solution, data = stopping_criterion.integrate()\nprint(data)",
"Solution: 1.8081 \nCustomFun (Integrand Object)\nLattice (DiscreteDistribution Object)\n dimension 2^(1)\n randomize 1\n order natural\n seed 2797520481\n mimics StdUniform\nGaussian (TrueMeasure Object)\n mean 0\n covariance 2^(-1)\n decomp_type pca\nCubQMCLatticeG (StoppingCriterion Object)\n abs_tol 0.001\n rel_tol 0\n n_init 2^(10)\n n_max 2^(35)\nLDTransformData (AccumulateData Object)\n n_total 2^(13)\n solution 1.808\n error_bound 5.08e-04\n time_integrate 0.014\n"
]
],
[
[
"This guide is not meant to be exhaustive but rather a quick introduction to the QMCPy framework and syntax. In an upcoming blog, we will take a closer look at low-discrepancy sequences such as the lattice sequence from the above example.\n\n## References\n\n1. Choi, S.-C. T., Hickernell, F., McCourt, M., Rathinavel J., & Sorokin, A. QMCPy: A quasi-Monte Carlo Python Library. https://qmcsoftware.github.io/QMCSoftware/. 2020.\n2. Keister, B. D. Multidimensional Quadrature Algorithms. Computers in Physics 10, 119–122 (1996).\n3. Oliphant, T., Guide to NumPy https://ecs.wgtn.ac.nz/foswiki/pub/Support/ManualPagesAndDocumentation/numpybook.pdf (Trelgol Publishing USA, 2006).\n4. Hickernell, F., Choi, S.-C. T., Jiang, L. & Jimenez Rugama, L. A. in WileyStatsRef-Statistics Reference Online (eds Davidian, M.et al.) (John Wiley & Sons Ltd., 2018).\n5. Jimenez Rugama, L. A. & Hickernell, F. Adaptive Multidimensional Inte-gration Based on Rank-1 Lattices in Monte Carlo and Quasi-Monte Carlo Methods: MCQMC, Leuven, Belgium, April 2014 (eds Cools, R. & Nuyens, D.) 163.arXiv:1411.1966 (Springer-Verlag, Berlin, 2016), 407–422.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7136a9cb5304621bddca0f97141dd4ff78f09ef | 2,227 | ipynb | Jupyter Notebook | main/2021/d14.ipynb | ljk233/Advent.py | f21c9ffc1ae5e490c35ca8c63e5000892b9dc5f2 | [
"MIT"
] | null | null | null | main/2021/d14.ipynb | ljk233/Advent.py | f21c9ffc1ae5e490c35ca8c63e5000892b9dc5f2 | [
"MIT"
] | null | null | null | main/2021/d14.ipynb | ljk233/Advent.py | f21c9ffc1ae5e490c35ca8c63e5000892b9dc5f2 | [
"MIT"
] | null | null | null | 17.816 | 81 | 0.501123 | [
[
[
"# 2021, Day 14: Extended Polymerization",
"_____no_output_____"
]
],
[
[
"from advent import get\nfrom advent.y2021 import d14",
"_____no_output_____"
]
],
[
[
"## Tests",
"_____no_output_____"
]
],
[
[
"d14.solve(get.sample(2021, 14))",
"_____no_output_____"
]
],
[
[
"## Solution",
"_____no_output_____"
]
],
[
[
"d14.solve(get.input(2021, 14))",
"_____no_output_____"
]
],
[
[
"## Benchmarking",
"_____no_output_____"
]
],
[
[
"%timeit d14.solve(get.input(2021, 14))",
"3.57 ms ± 15.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7136f349c2bb9b35b4014eb31376bcde0ff24be | 13,929 | ipynb | Jupyter Notebook | Give Life: Predict Blood Donations/notebook.ipynb | ariz-ahmad/ML-DL-NLP | 447f0648596d3074bb21e55292aa69a4d9b15a4f | [
"MIT"
] | null | null | null | Give Life: Predict Blood Donations/notebook.ipynb | ariz-ahmad/ML-DL-NLP | 447f0648596d3074bb21e55292aa69a4d9b15a4f | [
"MIT"
] | null | null | null | Give Life: Predict Blood Donations/notebook.ipynb | ariz-ahmad/ML-DL-NLP | 447f0648596d3074bb21e55292aa69a4d9b15a4f | [
"MIT"
] | null | null | null | 13,929 | 13,929 | 0.697609 | [
[
[
"## 1. Inspecting transfusion.data file\n<p><img src=\"https://assets.datacamp.com/production/project_646/img/blood_donation.png\" style=\"float: right;\" alt=\"A pictogram of a blood bag with blood donation written in it\" width=\"200\"></p>\n<p>Blood transfusion saves lives - from replacing lost blood during major surgery or a serious injury to treating various illnesses and blood disorders. Ensuring that there's enough blood in supply whenever needed is a serious challenge for the health professionals. According to <a href=\"https://www.webmd.com/a-to-z-guides/blood-transfusion-what-to-know#1\">WebMD</a>, \"about 5 million Americans need a blood transfusion every year\".</p>\n<p>Our dataset is from a mobile blood donation vehicle in Taiwan. The Blood Transfusion Service Center drives to different universities and collects blood as part of a blood drive. We want to predict whether or not a donor will give blood the next time the vehicle comes to campus.</p>\n<p>The data is stored in <code>datasets/transfusion.data</code> and it is structured according to RFMTC marketing model (a variation of RFM). We'll explore what that means later in this notebook. First, let's inspect the data.</p>",
"_____no_output_____"
]
],
[
[
"# Print out the first 5 lines from the transfusion.data file\n!... ... datasets/transfusion.data",
"_____no_output_____"
]
],
[
[
"## 2. Loading the blood donations data\n<p>We now know that we are working with a typical CSV file (i.e., the delimiter is <code>,</code>, etc.). We proceed to loading the data into memory.</p>",
"_____no_output_____"
]
],
[
[
"# Import pandas\nimport ... as pd\n\n# Read in dataset\ntransfusion = ...\n\n# Print out the first rows of our dataset\n# ... YOUR CODE FOR TASK 2 ...",
"_____no_output_____"
]
],
[
[
"## 3. Inspecting transfusion DataFrame\n<p>Let's briefly return to our discussion of RFM model. RFM stands for Recency, Frequency and Monetary Value and it is commonly used in marketing for identifying your best customers. In our case, our customers are blood donors.</p>\n<p>RFMTC is a variation of the RFM model. Below is a description of what each column means in our dataset:</p>\n<ul>\n<li>R (Recency - months since the last donation)</li>\n<li>F (Frequency - total number of donation)</li>\n<li>M (Monetary - total blood donated in c.c.)</li>\n<li>T (Time - months since the first donation)</li>\n<li>a binary variable representing whether he/she donated blood in March 2007 (1 stands for donating blood; 0 stands for not donating blood)</li>\n</ul>\n<p>It looks like every column in our DataFrame has the numeric type, which is exactly what we want when building a machine learning model. Let's verify our hypothesis.</p>",
"_____no_output_____"
]
],
[
[
"# Print a concise summary of transfusion DataFrame\n# ... YOUR CODE FOR TASK 3 ...",
"_____no_output_____"
]
],
[
[
"## 4. Creating target column\n<p>We are aiming to predict the value in <code>whether he/she donated blood in March 2007</code> column. Let's rename this it to <code>target</code> so that it's more convenient to work with.</p>",
"_____no_output_____"
]
],
[
[
"# Rename target column as 'target' for brevity \ntransfusion.rename(\n columns={'whether he/she donated blood in March 2007': ...},\n inplace=True\n)\n\n# Print out the first 2 rows\n# ... YOUR CODE FOR TASK 4 ...",
"_____no_output_____"
]
],
[
[
"## 5. Checking target incidence\n<p>We want to predict whether or not the same donor will give blood the next time the vehicle comes to campus. The model for this is a binary classifier, meaning that there are only 2 possible outcomes:</p>\n<ul>\n<li><code>0</code> - the donor will not give blood</li>\n<li><code>1</code> - the donor will give blood</li>\n</ul>\n<p>Target incidence is defined as the number of cases of each individual target value in a dataset. That is, how many 0s in the target column compared to how many 1s? Target incidence gives us an idea of how balanced (or imbalanced) is our dataset.</p>",
"_____no_output_____"
]
],
[
[
"# Print target incidence proportions, rounding output to 3 decimal places\n# ... YOUR CODE FOR TASK 5 ...",
"_____no_output_____"
]
],
[
[
"## 6. Splitting transfusion into train and test datasets\n<p>We'll now use <code>train_test_split()</code> method to split <code>transfusion</code> DataFrame.</p>\n<p>Target incidence informed us that in our dataset <code>0</code>s appear 76% of the time. We want to keep the same structure in train and test datasets, i.e., both datasets must have 0 target incidence of 76%. This is very easy to do using the <code>train_test_split()</code> method from the <code>scikit learn</code> library - all we need to do is specify the <code>stratify</code> parameter. In our case, we'll stratify on the <code>target</code> column.</p>",
"_____no_output_____"
]
],
[
[
"# Import train_test_split method\nfrom sklearn.model_selection import ...\n\n# Split transfusion DataFrame into\n# X_train, X_test, y_train and y_test datasets,\n# stratifying on the `target` column\n... = train_test_split(\n transfusion.drop(columns='target'),\n transfusion.target,\n test_size=0.25,\n random_state=42,\n stratify=...\n)\n\n# Print out the first 2 rows of X_train\n# ... YOUR CODE FOR TASK 6 ...",
"_____no_output_____"
]
],
[
[
"## 7. Selecting model using TPOT\n<p><a href=\"https://github.com/EpistasisLab/tpot\">TPOT</a> is a Python Automated Machine Learning tool that optimizes machine learning pipelines using genetic programming.</p>\n<p><img src=\"https://assets.datacamp.com/production/project_646/img/tpot-ml-pipeline.png\" alt=\"TPOT Machine Learning Pipeline\"></p>\n<p>TPOT will automatically explore hundreds of possible pipelines to find the best one for our dataset. Note, the outcome of this search will be a <a href=\"https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html\">scikit-learn pipeline</a>, meaning it will include any pre-processing steps as well as the model.</p>\n<p>We are using TPOT to help us zero in on one model that we can then explore and optimize further.</p>",
"_____no_output_____"
]
],
[
[
"# Import TPOTClassifier and roc_auc_score\nfrom tpot import ...\nfrom sklearn.metrics import ...\n\n# Instantiate TPOTClassifier\ntpot = ...(\n generations=5,\n population_size=20,\n verbosity=2,\n scoring='roc_auc',\n random_state=42,\n disable_update_check=True,\n config_dict='TPOT light'\n)\ntpot.fit(X_train, y_train)\n\n# AUC score for tpot model\ntpot_auc_score = roc_auc_score(y_test, tpot.predict_proba(X_test)[:, 1])\nprint(f'\\nAUC score: {...:.4f}')\n\n# Print best pipeline steps\nprint('\\nBest pipeline steps:', end='\\n')\nfor idx, (name, transform) in enumerate(tpot.fitted_pipeline_.steps, start=1):\n # Print idx and transform\n print(f'{...}. {...}')",
"_____no_output_____"
]
],
[
[
"## 8. Checking the variance\n<p>TPOT picked <code>LogisticRegression</code> as the best model for our dataset with no pre-processing steps, giving us the AUC score of 0.7850. This is a great starting point. Let's see if we can make it better.</p>\n<p>One of the assumptions for linear models is that the data and the features we are giving it are related in a linear fashion, or can be measured with a linear distance metric. If a feature in our dataset has a high variance that's orders of magnitude greater than the other features, this could impact the model's ability to learn from other features in the dataset.</p>\n<p>Correcting for high variance is called normalization. It is one of the possible transformations you do before training a model. Let's check the variance to see if such transformation is needed.</p>",
"_____no_output_____"
]
],
[
[
"# X_train's variance, rounding the output to 3 decimal places\n# ... YOUR CODE FOR TASK 8 ...",
"_____no_output_____"
]
],
[
[
"## 9. Log normalization\n<p><code>Monetary (c.c. blood)</code>'s variance is very high in comparison to any other column in the dataset. This means that, unless accounted for, this feature may get more weight by the model (i.e., be seen as more important) than any other feature.</p>\n<p>One way to correct for high variance is to use log normalization.</p>",
"_____no_output_____"
]
],
[
[
"# Import numpy\nimport numpy as np\n\n# Copy X_train and X_test into X_train_normed and X_test_normed\n... = X_train.copy(), X_test.copy()\n\n# Specify which column to normalize\ncol_to_normalize = ...\n\n# Log normalization\nfor df_ in [X_train_normed, X_test_normed]:\n # Add log normalized column\n df_['monetary_log'] = np.log(df_[...])\n # Drop the original column\n df_.drop(columns=..., inplace=True)\n\n# Check the variance for X_train_normed\n# ... YOUR CODE FOR TASK 9 ...",
"_____no_output_____"
]
],
[
[
"## 10. Training the logistic regression model\n<p>The variance looks much better now. Notice that now <code>Time (months)</code> has the largest variance, but it's not the <a href=\"https://en.wikipedia.org/wiki/Order_of_magnitude\">orders of magnitude</a> higher than the rest of the variables, so we'll leave it as is.</p>\n<p>We are now ready to train the logistic regression model.</p>",
"_____no_output_____"
]
],
[
[
"# Importing modules\nfrom sklearn import ...\n\n# Instantiate LogisticRegression\nlogreg = ...(\n solver='liblinear',\n random_state=42\n)\n\n# Train the model\n...(X_train_normed, y_train)\n\n# AUC score for tpot model\nlogreg_auc_score = roc_auc_score(y_test, logreg.predict_proba(X_test_normed)[:, 1])\nprint(f'\\nAUC score: {...:.4f}')",
"_____no_output_____"
]
],
[
[
"## 11. Conclusion\n<p>The demand for blood fluctuates throughout the year. As one <a href=\"https://www.kjrh.com/news/local-news/red-cross-in-blood-donation-crisis\">prominent</a> example, blood donations slow down during busy holiday seasons. An accurate forecast for the future supply of blood allows for an appropriate action to be taken ahead of time and therefore saving more lives.</p>\n<p>In this notebook, we explored automatic model selection using TPOT and AUC score we got was 0.7850. This is better than simply choosing <code>0</code> all the time (the target incidence suggests that such a model would have 76% success rate). We then log normalized our training data and improved the AUC score by 0.5%. In the field of machine learning, even small improvements in accuracy can be important, depending on the purpose.</p>\n<p>Another benefit of using logistic regression model is that it is interpretable. We can analyze how much of the variance in the response variable (<code>target</code>) can be explained by other variables in our dataset.</p>",
"_____no_output_____"
]
],
[
[
"# Importing itemgetter\nfrom operator import ...\n\n# Sort models based on their AUC score from highest to lowest\nsorted(\n [('tpot', tpot_auc_score), ('logreg', logreg_auc_score)],\n key=itemgetter(1),\n ...=...\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e71376c11253bfe0bffd45ac94e62b347fc34d21 | 29,578 | ipynb | Jupyter Notebook | Mnist_colle.ipynb | ankitchauhan0108/handwritten-character-recognition | e9fb7b45ad0a1d24ae6e006f089edabf3281c384 | [
"Unlicense"
] | null | null | null | Mnist_colle.ipynb | ankitchauhan0108/handwritten-character-recognition | e9fb7b45ad0a1d24ae6e006f089edabf3281c384 | [
"Unlicense"
] | null | null | null | Mnist_colle.ipynb | ankitchauhan0108/handwritten-character-recognition | e9fb7b45ad0a1d24ae6e006f089edabf3281c384 | [
"Unlicense"
] | null | null | null | 30.842544 | 148 | 0.485665 | [
[
[
"# Hand-Written English Character Recognition System",
"_____no_output_____"
],
[
"### Introduction\n- We are classifying 0-9 digit images into one of the 10 classes from 0-9.\n- For this, we are using mnist dataset.\n- Mnist dataset contains 42000 data for training and 28000 for testing.\n- It contains images of 28x28 pixels.\n- It is a multiclass classification problem where number of classes is 10.\n- We are using convolutional neural network with our own specific architecture which we are building from scratch.\n- We have also used image preprocessing for better results.",
"_____no_output_____"
],
[
"# importing important packages:-",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport cv2\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten,Conv2D, MaxPooling2D, BatchNormalization\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n#setting GPU Configuration\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth=True\nconfig.gpu_options.per_process_gpu_memory_fraction = .25\n\n\nimport os\n",
"_____no_output_____"
]
],
[
[
"## Data Loading (train_data, validation_data)",
"_____no_output_____"
]
],
[
[
"num_classes = 66\n\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n featurewise_center=True,\n featurewise_std_normalization=True,\n validation_split=0.2)\n\n\n\ntrain_generator = train_datagen.flow_from_directory(\n 'trainingSet/',\n target_size=(64, 64),\n color_mode=\"grayscale\",\n batch_size=32,\n class_mode='categorical',\n subset='training') # set as training data\n\nvalidation_generator = train_datagen.flow_from_directory(\n 'trainingSet/', # same directory as training data\n target_size=(64, 64),\n color_mode=\"grayscale\",\n batch_size = 32,\n class_mode='categorical',\n subset='validation') # set as validation data\n",
"Found 91395 images belonging to 66 classes.\nFound 22824 images belonging to 66 classes.\n"
],
[
"class_labels = train_generator.class_indices",
"_____no_output_____"
],
[
"class_labels",
"_____no_output_____"
],
[
"rev_class_labels = {v: k for k, v in class_labels.items()}",
"_____no_output_____"
],
[
"rev_class_labels",
"_____no_output_____"
]
],
[
[
"## CNN Architecture",
"_____no_output_____"
]
],
[
[
"input_shape = (64,64,1)\n\nmodel = Sequential()\n\n#input Layer\nmodel.add(Conv2D(128, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape,kernel_initializer='he_normal'))\nmodel.add(Dropout(0.5))\n\n#hidden Layer 1\nmodel.add(Conv2D(256, (3, 3), activation='relu',kernel_initializer='he_normal'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.5))\n\n#hidden Layer 2\nmodel.add(Conv2D(256, (3, 3), activation='relu',kernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\n#hidden Layer 3\nmodel.add(Conv2D(128, (3, 3), activation='relu',kernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#hidden Layer 3\nmodel.add(Conv2D(64, (3, 3), activation='relu',kernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n#Dense Layer 1\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu',kernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.5))\n\n#Dense Layer 2\nmodel.add(Dense(128, activation='relu',kernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.25))\n\n#Output layer\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 62, 62, 128) 1280 \n_________________________________________________________________\ndropout (Dropout) (None, 62, 62, 128) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 60, 60, 256) 295168 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 30, 30, 256) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 30, 30, 256) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 28, 28, 256) 590080 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 28, 28, 256) 1024 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 14, 14, 256) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 14, 14, 256) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 12, 12, 128) 295040 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 12, 12, 128) 512 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 6, 6, 128) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 4, 4, 64) 73792 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 4, 4, 64) 256 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 2, 2, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 256) 0 \n_________________________________________________________________\ndense (Dense) (None, 256) 65792 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 256) 1024 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 128) 32896 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 128) 512 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 66) 8514 \n=================================================================\nTotal params: 1,365,890\nTrainable params: 1,364,226\nNon-trainable params: 1,664\n_________________________________________________________________\n"
]
],
[
[
"## compiling and Training the model",
"_____no_output_____"
]
],
[
[
"model.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n\n\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples //32,\n validation_data = validation_generator, \n validation_steps = validation_generator.samples // 32,\n epochs=20)\n\n",
"Epoch 1/20\n2860/2860 [==============================] - 748s 261ms/step - loss: 1.7648 - accuracy: 0.5032 - val_loss: 11.4312 - val_accuracy: 0.0323\nEpoch 2/20\n2860/2860 [==============================] - 741s 259ms/step - loss: 0.7506 - accuracy: 0.7390 - val_loss: 3.3292 - val_accuracy: 0.2542\nEpoch 3/20\n2860/2860 [==============================] - 744s 260ms/step - loss: 0.6238 - accuracy: 0.7782 - val_loss: 1.1445 - val_accuracy: 0.6513\nEpoch 4/20\n2860/2860 [==============================] - 743s 260ms/step - loss: 0.5584 - accuracy: 0.7987 - val_loss: 1.2076 - val_accuracy: 0.6433\nEpoch 5/20\n2860/2860 [==============================] - 742s 260ms/step - loss: 0.5141 - accuracy: 0.8120 - val_loss: 0.8646 - val_accuracy: 0.7139\nEpoch 6/20\n2860/2860 [==============================] - 743s 260ms/step - loss: 0.4841 - accuracy: 0.8227 - val_loss: 0.6407 - val_accuracy: 0.7949\nEpoch 7/20\n2860/2860 [==============================] - 741s 259ms/step - loss: 0.4511 - accuracy: 0.8327 - val_loss: 0.5285 - val_accuracy: 0.8287\nEpoch 8/20\n2860/2860 [==============================] - 736s 257ms/step - loss: 0.4328 - accuracy: 0.8377 - val_loss: 2.2997 - val_accuracy: 0.5165\nEpoch 9/20\n2860/2860 [==============================] - 734s 257ms/step - loss: 0.4123 - accuracy: 0.8455 - val_loss: 0.5202 - val_accuracy: 0.8345\nEpoch 10/20\n2860/2860 [==============================] - 746s 261ms/step - loss: 0.4007 - accuracy: 0.8500 - val_loss: 0.9604 - val_accuracy: 0.7003\nEpoch 11/20\n2860/2860 [==============================] - 745s 260ms/step - loss: 0.3856 - accuracy: 0.8544 - val_loss: 0.6645 - val_accuracy: 0.7824\nEpoch 12/20\n2860/2860 [==============================] - 765s 267ms/step - loss: 0.3723 - accuracy: 0.8606 - val_loss: 0.5449 - val_accuracy: 0.8206\nEpoch 13/20\n2860/2860 [==============================] - 741s 259ms/step - loss: 0.3640 - accuracy: 0.8624 - val_loss: 0.5078 - val_accuracy: 0.8416\nEpoch 14/20\n2860/2860 [==============================] - 758s 265ms/step - loss: 0.3513 - accuracy: 0.8655 - val_loss: 0.8064 - val_accuracy: 0.7368\nEpoch 15/20\n2860/2860 [==============================] - 784s 274ms/step - loss: 0.3463 - accuracy: 0.8684 - val_loss: 1.5045 - val_accuracy: 0.6409\nEpoch 16/20\n2860/2860 [==============================] - 775s 271ms/step - loss: 0.3341 - accuracy: 0.8711 - val_loss: 0.6516 - val_accuracy: 0.8021\nEpoch 17/20\n2860/2860 [==============================] - 776s 271ms/step - loss: 0.3303 - accuracy: 0.8730 - val_loss: 0.6496 - val_accuracy: 0.8060\nEpoch 18/20\n2860/2860 [==============================] - 771s 270ms/step - loss: 0.3212 - accuracy: 0.8755 - val_loss: 0.6002 - val_accuracy: 0.8228\nEpoch 19/20\n2860/2860 [==============================] - 741s 259ms/step - loss: 0.3153 - accuracy: 0.8784 - val_loss: 0.7682 - val_accuracy: 0.7609\nEpoch 20/20\n2860/2860 [==============================] - 738s 258ms/step - loss: 0.3082 - accuracy: 0.8815 - val_loss: 0.6895 - val_accuracy: 0.7884\n"
]
],
[
[
"### saving the model for future use",
"_____no_output_____"
]
],
[
[
"# saving the model\nsave_model = model.save(\"model_epoch20.h5\")",
"_____no_output_____"
]
],
[
[
"## Testing on Real World Data - Handwritten and Picture Taken from Mobile Camera",
"_____no_output_____"
]
],
[
[
"model = load_model(\"model_epoch20.h5\")",
"_____no_output_____"
],
[
"rev_class_labels={0: '#',\n 1: '$',\n 2: '0__',\n 3: '1',\n 4: '2',\n 5: '3',\n 6: '4',\n 7: '5',\n 8: '6',\n 9: '7',\n 10: '8',\n 11: '9',\n 12: '@',\n 13: 'A',\n 14: 'B',\n 15: 'C',\n 16: 'D',\n 17: 'E',\n 18: 'F',\n 19: 'G',\n 20: 'H',\n 21: 'J',\n 22: 'K',\n 23: 'L',\n 24: 'M',\n 25: 'N',\n 26: 'O',\n 27: 'P',\n 28: 'Q',\n 29: 'R',\n 30: 'S',\n 31: 'T',\n 32: 'U',\n 33: 'V',\n 34: 'W',\n 35: 'X',\n 36: 'Y',\n 37: 'Z',\n 38: '&',\n 39: 'a_',\n 40: 'b_',\n 41: 'c_',\n 42: 'd_',\n 43: 'e_',\n 44: 'f__',\n 45: 'g___',\n 46: 'h_',\n 47: 'i_',\n 48: 'i__',\n 49: 'j_',\n 50: 'k_',\n 51: 'l_',\n 52: 'm_',\n 53: 'n_',\n 54: 'o_',\n 55: 'p___',\n 56: 'q_',\n 57: 'r_',\n 58: 's_',\n 59: 't_',\n 60: 'u_',\n 61: 'v_',\n 62: 'w_',\n 63: 'x_',\n 64: 'y__',\n 65: 'z_'}",
"_____no_output_____"
],
[
"# Test Data Preparation\ndef image_preprocessing(file_path, count):\n img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE) # reading in grayscale\n th, im_th = cv2.threshold(img, 128, 255, cv2.THRESH_OTSU)\n im_resize = cv2.resize(im_th, (64,64),interpolation = cv2.INTER_NEAREST)\n cv2.imwrite(\"testSet2//test_{}.png\".format(count),im_resize)\n \n\ncount = 0\nfor img in os.listdir('testSet'):\n count += 1\n image_preprocessing(\"testSet/\" + img, count)",
"_____no_output_____"
],
[
"# Prediction on Test Data\nres = []\ndata = []\nimg_name = []\nfor img in os.listdir('test_T'):\n\n image = cv2.imread('test_T/{}'.format(img),cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image , (64,64))\n image = image.reshape(64,64,1)\n image = image/255\n \n data.append(image)\n \n img_name.append(img)\n \n if len(data) % 27 == 0:\n pred = model.predict_on_batch(np.array(data))\n \n for p in pred:\n res.append(rev_class_labels[np.argmax(p)])\n \n data = []\n# res.append([img,*list(np.argmax(pred,axis=1))])",
"_____no_output_____"
],
[
"pred[1]",
"_____no_output_____"
],
[
"result = []\nfor name, r in zip(img_name, res):\n result.append([name, r])",
"_____no_output_____"
],
[
"result = np.array(result)\nresult_df = pd.DataFrame(result,columns = ['Image Name','Predicted_Category'])\n\n# saving the result in csv format\nexport_result = result_df.to_csv('result.csv')",
"_____no_output_____"
],
[
"res.shape",
"_____no_output_____"
],
[
"still lot of error\n we will test on batch",
"_____no_output_____"
]
],
[
[
"# Testing on real_world data",
"_____no_output_____"
]
],
[
[
"def image_preprocessing(file_name):\n name = file_name.split('.')[0]\n alpha,beta,th = d[name]\n img = cv2.imread('test_real/{}'.format(file_name),cv2.IMREAD_GRAYSCALE) # reading in grayscale\n new_img = cv2.resize(img,(28,28)) # resizing it to (28x28) matrix\n\n new_img = alpha*new_img + beta # changing contrast\n for i in range(28):\n for j in range(28):\n if new_img[i][j] < th:\n new_img[i][j] = 0\n else:\n new_img[i][j] = 255\n plt.imshow(new_img)\n plt.show()\n return (new_img.reshape(1,28,28,1))",
"_____no_output_____"
]
],
[
[
"# testing on real data and saving it to csv file",
"_____no_output_____"
]
],
[
[
"res = []\nfor img in os.listdir('test_real'):\n print(img)\n final_img = image_preprocessing(img)\n pred = model.predict(final_img)\n res.append([img,*list(np.argmax(pred,axis=1))])",
"_____no_output_____"
],
[
"res = np.array(res)\nresult_df = pd.DataFrame(res,columns = ['Name','Predicted_class'])\n\n# saving the result in csv format\nexport_result = result_df.to_csv('result_real.csv')",
"_____no_output_____"
],
[
"result_df.head()",
"_____no_output_____"
]
],
[
[
"## Testing on Sequences of data",
"_____no_output_____"
]
],
[
[
"img = cv2.imread('seq.jpg',cv2.IMREAD_GRAYSCALE) # reading in grayscale\nplt.imshow(img)\nplt.show()",
"_____no_output_____"
],
[
"def image_preprocessing2(img):\n alpha,beta,th = 3, 170, 110\n new_img = cv2.resize(img,(28,28)) # resizing it to (28x28) matrix\n new_img = alpha*new_img + beta # changing contrast\n for i in range(28):\n for j in range(28):\n if new_img[i][j] < th:\n new_img[i][j] = 0\n else:\n new_img[i][j] = 255\n plt.imshow(new_img)\n plt.show()\n return (new_img.reshape(1,28,28,1))\n\n\ndef thresh_callback(file_name):\n thresh = 127\n max_thresh = 255\n img = cv2.imread(file_name)\n img = cv2.resize(img,(450,450))\n \n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n src_gray = cv2.blur(img_gray, (3,3))\n \n \n canny_output = cv2.Canny(src_gray, thresh, max_thresh)\n \n \n _,contours,_ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n \n contours_poly = [None]*len(contours)\n boundRect = []\n for i, c in enumerate(contours):\n if cv2.contourArea(c) > 300:\n contours_poly[i] = cv2.approxPolyDP(c, 1, False)\n boundRect.append(cv2.boundingRect(contours_poly[i]))\n \n drawing = img\n \n \n for i in range(len(boundRect)):\n color = (255,0,0)\n cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \\\n (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)\n plt.figure(figsize=(14,8))\n plt.imshow(drawing)\n plt.show()\n s = list(set(boundRect))\n for i in range(len(s)):\n crop_img = img_gray[s[i][1]:s[i][1]+s[i][3] ,s[i][0]:s[i][0]+s[i][2]]\n# plt.imshow(crop_img)\n# plt.show()\n final_img = image_preprocessing2(crop_img)\n predicted_value = model.predict(final_img)\n print(predicted_value)\n print(\"Predicted Result is: \",*list(np.argmax(predicted_value,axis=1)))\n \n# file_name = 'data/{}.jpeg'.format(str(i))\n# cv2.imwrite(file_name,crop_img)\n return drawing\n \n\ndrawing = thresh_callback('seq.jpg')\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e71399375222d9a55e7e57ddf0ef4e6040463f7b | 30,365 | ipynb | Jupyter Notebook | SpinorBECSimulation/MeanField/RF rotations.ipynb | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | SpinorBECSimulation/MeanField/RF rotations.ipynb | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | SpinorBECSimulation/MeanField/RF rotations.ipynb | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | 201.092715 | 26,974 | 0.897283 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import ode\nfrom numpy.lib import scimath\n%matplotlib inline",
"_____no_output_____"
],
[
"def rf(t,y,*args):\n \"\"\"rf ode\n need omega - rf frequence\n rabi - rabi frequency\n delta - quadratic shift\"\"\"\n c1 = y[0]\n c0 = y[1]\n cm1 = y[2]\n s = 1j*rabi/np.sqrt(2) *np.cos(omega*t)\n #now define equations\n f0 = s * c0 * np.exp(-1j*dm*t)\n f1 = s*cm1*np.exp(-1j*dp*t)+s*c1*np.exp(1j*dm*t)\n f2 = s*c0*np.exp(1j*dp*t)\n return [f0,f1,f2] \n",
"_____no_output_____"
],
[
"rabi = 10\ndm = 1\ndp = dm\ndef normalize_state(y):\n y = np.asarray(y)\n norm = np.abs(np.dot(np.conj(y),y))\n return y/norm\ny0 = [np.complex(.2,0),np.complex(1,0),np.complex(.8,0)]\ny0 = normalize_state(y0)\nprint(y0)\nr = ode(rf).set_integrator('zvode')\nr.set_initial_value(y0,0)\ndt = 1e-3\ntfinal = 2*np.pi / rabi+.1\nt = np.linspace(0,tfinal, tfinal/dt+1) \nans = np.zeros((len(t),3),dtype = complex)\nstep = 0\nwhile r.successful() and r.t < tfinal:\n ans[step] = np.asarray(r.integrate(r.t + dt))\n step += 1",
"[ 0.11904762+0.j 0.59523810+0.j 0.47619048+0.j]\n"
],
[
"toplot = (np.conj(ans) * ans).real\n\nplt.plot(t,toplot[:,0], label = r'$c_1$')\nplt.plot(t,toplot[:,1], label = r'$c_0$')\nplt.plot(t,toplot[:,2], label = r'$c_{-1}$')\nplt.legend()\nplt.axvline(2*np.pi/rabi,0,1,ls='--',c='black')\nplt.axvline(np.pi/rabi,0,1,ls='--',c='black')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7139d5345eaa0ecd09f720b0131c6bd8b2f287a | 11,513 | ipynb | Jupyter Notebook | Chapter05/Pandas.ipynb | AcornPublishing/healthcare-analytics | 7a6f81fd03da0c32bbc123f77c62e30718c8e08e | [
"MIT"
] | 30 | 2018-11-13T14:46:32.000Z | 2022-01-13T12:53:47.000Z | Chapter05/Pandas.ipynb | AcornPublishing/healthcare-analytics | 7a6f81fd03da0c32bbc123f77c62e30718c8e08e | [
"MIT"
] | 1 | 2019-06-08T04:42:29.000Z | 2019-06-08T04:42:29.000Z | Chapter05/Pandas.ipynb | PacktPublishing/Healthcare-Analytics-Made-Simple | e51cd01a74d9f63223dc2c0e858b2fd386618c59 | [
"MIT"
] | 13 | 2018-08-07T10:13:57.000Z | 2022-01-13T12:53:48.000Z | 25.247807 | 86 | 0.345349 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"data = {\n 'col1': [1, 2, 3],\n 'col2': [4, 5, 6],\n 'col3': ['x', 'y', 'z']\n}\n\ndf = pd.DataFrame(data)\nprint(df)",
" col1 col2 col3\n0 1 4 x\n1 2 5 y\n2 3 6 z\n"
],
[
"df['new_col1'] = \"\"\ndf['new_col2'] = 0\nprint(df)",
" col1 col2 col3 new_col1 new_col2\n0 1 4 x 0\n1 2 5 y 0\n2 3 6 z 0\n"
],
[
"df['new_col3'] = df[[\n 'col1','col2'\n]].sum(axis=1)\n\nprint(df)",
" col1 col2 col3 new_col1 new_col2 new_col3\n0 1 4 x 0 5\n1 2 5 y 0 7\n2 3 6 z 0 9\n"
],
[
"old_column_list = ['col1','col2']\ndf['new_col4'] = df[old_column_list].apply(sum, axis=1)\nprint(df)",
" col1 col2 col3 new_col1 new_col2 new_col3 new_col4\n0 1 4 x 0 5 5\n1 2 5 y 0 7 7\n2 3 6 z 0 9 9\n"
],
[
"df.drop(['col1','col2'], axis=1, inplace=True)\nprint(df)",
" col3 new_col1 new_col2 new_col3 new_col4\n0 x 0 5 5\n1 y 0 7 7\n2 z 0 9 9\n"
],
[
"df['new_col5'] = ['7', '8', '9']\ndf['new_col6'] = ['10', '11', '12']\n\nfor str_col in ['new_col5','new_col6']:\n df[[str_col]] = df[[str_col]].apply(pd.to_numeric)\n \nprint(df)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n0 x 0 5 5 7 10\n1 y 0 7 7 8 11\n2 z 0 9 9 9 12\n"
],
[
"df2 = pd.DataFrame({\n 'col3': ['a', 'b', 'c', 'd'],\n 'new_col1': '',\n 'new_col2': 0,\n 'new_col3': [11, 13, 15, 17],\n 'new_col4': [17, 19, 21, 23],\n 'new_col5': [7.5, 8.5, 9.5, 10.5],\n 'new_col6': [13, 14, 15, 16]\n});\nprint(df2)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n0 a 0 11 17 7.5 13\n1 b 0 13 19 8.5 14\n2 c 0 15 21 9.5 15\n3 d 0 17 23 10.5 16\n"
],
[
"df3 = pd.concat([df, df2], ignore_index=True)\nprint(df3)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n0 x 0 5 5 7.0 10\n1 y 0 7 7 8.0 11\n2 z 0 9 9 9.0 12\n3 a 0 11 17 7.5 13\n4 b 0 13 19 8.5 14\n5 c 0 15 21 9.5 15\n6 d 0 17 23 10.5 16\n"
],
[
"my_list = df3['new_col3'].tolist()\nprint(my_list)",
"[5, 7, 9, 11, 13, 15, 17]\n"
],
[
"value = df3.loc[0,'new_col5']\nprint(value)",
"7.0\n"
],
[
"df3.loc[[2,3,4],['new_col4','new_col5']] = 1\nprint(df3)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n0 x 0 5 5 7.0 10\n1 y 0 7 7 8.0 11\n2 z 0 9 1 1.0 12\n3 a 0 11 1 1.0 13\n4 b 0 13 1 1.0 14\n5 c 0 15 21 9.5 15\n6 d 0 17 23 10.5 16\n"
],
[
"value2 = df3.iloc[0,5]\nprint(value2)",
"7.0\n"
],
[
"partial_df3 = df3.loc[1:3,'new_col2':'new_col4']\nprint(partial_df3)",
" new_col2 new_col3 new_col4\n1 0 7 7\n2 0 9 1\n3 0 11 1\n"
],
[
"value3 = df3.iat[3,3]\nprint(value3)",
"11\n"
],
[
"df3_filt = df3[df3['new_col3'] > 10]\nprint(df3_filt)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n3 a 0 11 1 1.0 13\n4 b 0 13 1 1.0 14\n5 c 0 15 21 9.5 15\n6 d 0 17 23 10.5 16\n"
],
[
"df3 = df3.sort_values('new_col4', ascending=True)\nprint(df3)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6\n2 z 0 9 1 1.0 12\n3 a 0 11 1 1.0 13\n4 b 0 13 1 1.0 14\n0 x 0 5 5 7.0 10\n1 y 0 7 7 8.0 11\n5 c 0 15 21 9.5 15\n6 d 0 17 23 10.5 16\n"
],
[
"df_join_df2 = df.join(df2, how='outer', rsuffix='r')\nprint(df_join_df2)",
" col3 new_col1 new_col2 new_col3 new_col4 new_col5 new_col6 col3r \\\n0 x 0.0 5.0 5.0 7.0 10.0 a \n1 y 0.0 7.0 7.0 8.0 11.0 b \n2 z 0.0 9.0 9.0 9.0 12.0 c \n3 NaN NaN NaN NaN NaN NaN NaN d \n\n new_col1r new_col2r new_col3r new_col4r new_col5r new_col6r \n0 0 11 17 7.5 13 \n1 0 13 19 8.5 14 \n2 0 15 21 9.5 15 \n3 0 17 23 10.5 16 \n"
],
[
"tallies = df3.groupby('new_col4').size()\nprint(tallies)",
"new_col4\n1 3\n5 1\n7 1\n21 1\n23 1\ndtype: int64\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e713afed79146c8e125969cc7b7b2783bcca8740 | 4,362 | ipynb | Jupyter Notebook | labtory-master/VAE/crop_large_image.ipynb | yumion/onodera-lab | 34f06e1f0eff8ce3a8d02ddc07e90ce4d0635c9c | [
"Apache-2.0"
] | null | null | null | labtory-master/VAE/crop_large_image.ipynb | yumion/onodera-lab | 34f06e1f0eff8ce3a8d02ddc07e90ce4d0635c9c | [
"Apache-2.0"
] | null | null | null | labtory-master/VAE/crop_large_image.ipynb | yumion/onodera-lab | 34f06e1f0eff8ce3a8d02ddc07e90ce4d0635c9c | [
"Apache-2.0"
] | null | null | null | 31.157143 | 107 | 0.507795 | [
[
[
"## 大きい2D画像を切り取る",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np\nimport glob",
"_____no_output_____"
],
[
"files = sorted(glob.glob('/media/dl-box/HD-PNFU3/20171210T-C-009TotalScanning'+'/*.tif'))",
"_____no_output_____"
],
[
"class CropWindow:\n def __init__(self, slide_step=100, window_shape=(512, 512), output_shape=(512, 512)): \n self.slide_step = slide_step # スライドウィンドウを動かすピクセル数\n self.height = window_shape[0] # 切り取るウィンドウサイズ\n self.width = window_shape[1]\n self.output_shape = output_shape\n self.background_rate = 0.5 # この割合以上背景がある場合は無視する\n self.height_ex = False # 画像がぴったり切れなくて、余分にもう一度カットするか\n self.width_ex = False\n \n # 画像をウィンドウサイズに切り取って保存する \n def crop_imgs(self, filenames):\n if type(filenames) == list:\n for filename in filenames:\n self.crop_save(filename)\n # ファイル名が配列でない場合\n elif type(filenames) == str:\n self.crop_save(filenames)\n \n def crop_save(self, filename):\n img = cv2.imread(filename)\n crop_img = []\n # 何回スライドできるか、最後がぴったり画像が切り取れるかあまるか。\n if (img.shape[0] - self.height) % self.slide_step == 0:\n height_steps = (img.shape[0] - self.height)//self.slide_step\n else:\n height_steps = (img.shape[0] - self.height)//self.slide_step + 1\n self.heigth_ex = True\n if (img.shape[1] - self.height) % self.slide_step == 0:\n width_steps = (img.shape[1] - self.height)//self.slide_step\n else:\n width_steps = (img.shape[1] - self.height)//self.slide_step + 1\n self.width_ex = True\n for i in range(height_steps):\n # 最後のステップかつ、最後にずれがある場合(高さ方向)\n if i == height_steps-1 and self.height_ex:\n cropped_height = img[-self.height:, :, :]\n else:\n cropped_height = img[i*self.slide_step:i*self.slide_step+self.height, :, :]\n for j in range(width_steps):\n # 最後のステップかつ、最後にずれがある場合(横方向)\n if j == width_steps-1 and self.width_ex:\n cropped = cropped_height[:, -self.width: , :]\n else:\n cropped = cropped_height[:, j*self.slide_step:j*self.slide_step+self.width, :]\n if np.sum(cropped==0)/np.prod(cropped.shape) < self.background_rate:\n crop_img.append(cropped)\n for i,pic in enumerate(crop_img):\n pic = cv2.resize(pic, self.output_shape)\n cv2.imwrite('./cropped_test/'+str(i)+'-'+filename.split('/')[-1],pic)",
"_____no_output_____"
],
[
"cropping = CropWindow(slide_step=512, window_shape=(512,512), output_shape=(512, 512))",
"_____no_output_____"
],
[
"cropping.crop_imgs(files[60])",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e713c191d6322db1a1ad3ecda2171b8f83ab02b7 | 16,796 | ipynb | Jupyter Notebook | Clean_data.ipynb | jlivshots/Response-time-analytics | c0d0b175360a32aa50194dafb0da136df7b6147c | [
"MIT"
] | 1 | 2021-07-23T04:37:22.000Z | 2021-07-23T04:37:22.000Z | Clean_data.ipynb | jlivshots/Response-time-analytics | c0d0b175360a32aa50194dafb0da136df7b6147c | [
"MIT"
] | null | null | null | Clean_data.ipynb | jlivshots/Response-time-analytics | c0d0b175360a32aa50194dafb0da136df7b6147c | [
"MIT"
] | null | null | null | 35.965739 | 910 | 0.444749 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\nfrom dateutil import parser\nfrom datetime import datetime\n",
"_____no_output_____"
]
],
[
[
"# CLEAN dataframe for training\n## Scale\n* Minutes for arrival\n* Time of day (minutes since midnight)\n* Day of week (0-6)\n* Day of month (0-30)\n* Day of the year (0-364)\n* Month (0-11)\n* Year\n* Lat\n* Long\n\n## Categorical\n* Crime category\n* Geo Code\n\n",
"_____no_output_____"
]
],
[
[
"incidents_df = pd.read_csv(\"data/Police_Incidents_Since_1988.csv\", sep = None, dtype={'INCIDENT': np.str_, 'DATE OCCURRED': np.str_, 'TIME OCCURED' : np.str_, 'YEAR OCCURRED': np.int_, 'MONTH OCCURRED': np.int_, 'DATE REPORTED': np.str_, 'TIME ARRIVED': np.str_, 'DATE ARRIVED': np.str_, 'GEO CODE': np.str_, 'HOUSE NUMBER BLOCK': np.str_, 'STREET': np.str_, 'CRIME CODE': np.float64, 'CRIME DESCRIPTION': np.str_, 'CRIME CATEGORY': np.str_, 'CRIME CATEGORY DESCRIPTION': np.str_, 'CSA DESCRIPTION': np.str_, 'PLACE CODE DESCRIPTION': np.str_, 'WEAPONS CODE 1': np.str_, 'WEAPON 1 DESCRIPTION': np.str_, 'WEAPONS CODE 2': np.str_, 'WEAPON 2 DESCRIPTION': np.str_, 'WEAPONS CODE 3': np.str_, 'WEAPON 3 DESCRIPTION': np.str_, 'BIAS CODE': np.float64, 'BIAS DESCRIPTION': np.str_, 'STATUS CODE': np.float64, 'STATUS DESCRIPTION': np.str_, 'COUNTER': int, 'MAPPING ADDRESS': np.str_}, engine = 'python')\nincidents_df = incidents_df.drop(columns=['MONTH OCCURRED', 'YEAR OCCURRED'])\n",
"_____no_output_____"
],
[
"#pd.set_option('display.max_columns', None)\n\nincidents_df.head()",
"_____no_output_____"
],
[
"length = len(incidents_df)\nlatitude = [np.nan] * length\nlongitude =[np.nan] * length\ntotal_min =[np.nan] * length\nday_of_week =[np.nan] * length\nday_of_month =[np.nan] * length\nday_of_year =[np.nan] * length\nmonth = [np.nan] * length\nyear = [np.nan] * length\ntime =[np.nan] * length\nfor index in range(length):\n if index %50000==0:\n print(\"currently on row \", index, \"out of \", length)\n\n #lat/long\n value = str(incidents_df['MAPPING ADDRESS'][index])\n left_ind = value.find('(')\n mid_ind = value.find(',', left_ind)\n right_ind = value.find(')')\n if left_ind!=-1 and mid_ind !=-1 and right_ind !=-1:\n latitude[index] = float(value[left_ind+1: mid_ind])\n longitude[index] = float(value[mid_ind+1:right_ind])\n\n arrive = (str(incidents_df['DATE REPORTED'][index]) + ' ' + str(incidents_df['TIME REPORTED'][index]))\n report = (str(incidents_df['DATE ARRIVED'][index]) + ' ' + str(incidents_df['TIME ARRIVED'][index]))\n \n #response time\n try:\n a = parser.parse(arrive)\n b = parser.parse(report)\n delt = b-a\n elapsed = delt.total_seconds()//60\n if elapsed>=0 and elapsed<300: #ignoring 0 minutes because it was probably not a caller\n time[index] = int(elapsed)\n except:\n pass\n \n #total minutes\n try:\n a = parser.parse(str(incidents_df['DATE REPORTED'][index]))\n \n # day_of_week\n day_of_week[index] = int(a.weekday())\n \n # day_of_month\n day_of_month[index] = int(a.day)\n \n # day_of_year\n day_of_year[index] = int(a.timetuple().tm_yday)\n \n month[index] = a.month\n year[index] = a.year\n except:\n pass\n \n try:\n a = parser.parse(incidents_df['TIME REPORTED'][index])\n \n # total_min\n total_min[index] = int(a.hour*60+a.minute)\n except:\n pass\n \nprint('done!')\nincidents_df['MONTH OCCURRED'] = month\nincidents_df['YEAR OCCURRED'] = year\nincidents_df['LATITUDE'] = latitude\nincidents_df['LONGITUDE'] = longitude\nincidents_df['RESPONSE TIME'] = time\nincidents_df['MINS OF DAY'] = total_min\nincidents_df['DAY OF WEEK'] = day_of_week\nincidents_df['DAY OF MONTH'] = day_of_month\nincidents_df['DAY OF YEAR'] = day_of_year\npd.set_option('display.max_rows', None)\n# incidents_df.head()",
"currently on row 0 out of 474609\ncurrently on row 50000 out of 474609\ncurrently on row 100000 out of 474609\ncurrently on row 150000 out of 474609\ncurrently on row 200000 out of 474609\ncurrently on row 250000 out of 474609\ncurrently on row 300000 out of 474609\ncurrently on row 350000 out of 474609\ncurrently on row 400000 out of 474609\ncurrently on row 450000 out of 474609\ndone!\n"
]
],
[
[
"## Two dataframes are created:\n* cleaned_data.csv has the cleaned data\n* cleaned_data_no_zeros.csv has cleaned data where all rows have response time > 0",
"_____no_output_____"
]
],
[
[
"streamlined_df = incidents_df[['CRIME CATEGORY DESCRIPTION','RESPONSE TIME','MINS OF DAY', 'DAY OF WEEK', 'DAY OF MONTH', 'DAY OF YEAR', 'MONTH OCCURRED', 'YEAR OCCURRED','GEO CODE', 'LATITUDE', 'LONGITUDE']]\n\nstreamlined_df = streamlined_df[streamlined_df['RESPONSE TIME'].notna()]\n\nstreamlined_df = streamlined_df[streamlined_df['CRIME CATEGORY DESCRIPTION'] != 'None']\n\nstreamlined_df.to_csv('data/cleaned_data.csv')\n\nstreamlined_no_zeros_df = streamlined_df[streamlined_df['RESPONSE TIME']>0]\n\nstreamlined_no_zeros_df.to_csv('data/cleaned_data_no_zeros.csv')\n# streamlined_df.head(10)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e713cdf873cee813eca2ba1a6fdc71d23d2dd7a8 | 770,514 | ipynb | Jupyter Notebook | Scikit - 03 Linear Regression.ipynb | nishantm9/machine-learning | 3bb8f4ea29d36a1ef2fda2b71c1fa62efc2456d6 | [
"Apache-2.0"
] | 51 | 2017-09-28T05:38:48.000Z | 2022-02-27T02:57:02.000Z | Scikit - 03 Linear Regression.ipynb | nishantm9/machine-learning | 3bb8f4ea29d36a1ef2fda2b71c1fa62efc2456d6 | [
"Apache-2.0"
] | 2 | 2018-07-25T10:47:06.000Z | 2019-01-16T11:22:53.000Z | Scikit - 03 Linear Regression.ipynb | nishantm9/machine-learning | 3bb8f4ea29d36a1ef2fda2b71c1fa62efc2456d6 | [
"Apache-2.0"
] | 93 | 2017-08-28T08:59:49.000Z | 2022-03-30T09:45:21.000Z | 284.008109 | 486,834 | 0.9008 | [
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport numpy as np\n\nimport xgboost as xgb\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\n\nfrom sklearn.linear_model import LassoCV, Lasso\n\nfrom math import sqrt\n\nimport seaborn as sns\n\n\nnp.set_printoptions(suppress=True, precision=4)\nplt.rcParams['figure.figsize'] = 10, 6\n%matplotlib inline",
"_____no_output_____"
],
[
"df = pd.read_csv(\"https://raw.githubusercontent.com/abulbasar/data/master/startups.csv\")\ndf",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50 entries, 0 to 49\nData columns (total 5 columns):\nR&D Spend 48 non-null float64\nAdministration 50 non-null float64\nMarketing Spend 47 non-null float64\nState 50 non-null object\nProfit 50 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 2.0+ KB\n"
]
],
[
[
"There are 50 observations and 5 columns. 4 columns - R&D Spend, Administration and Marketing Spend, and Profile are numeric and one is categorical - State. There is 2 null values in columns R&D Spend feature and and 3 in Marketing Spend. \n\nReplace the null values with median for respective state. ",
"_____no_output_____"
]
],
[
[
"df_null_idx = df[df.isnull().sum(axis = 1) > 0].index\ndf.iloc[df_null_idx]",
"_____no_output_____"
],
[
"median_values = df.groupby(\"State\")[[\"R&D Spend\", \"Marketing Spend\"]].median()\nmedian_values",
"_____no_output_____"
],
[
"df[\"R&D Spend\"] = df.apply(lambda row: median_values.loc[row[\"State\"], \"R&D Spend\"] if np.isnan(row[\"R&D Spend\"]) else row[\"R&D Spend\"], axis = 1 )\ndf[\"Marketing Spend\"] = df.apply(lambda row: median_values.loc[row[\"State\"], \"Marketing Spend\"] if np.isnan(row[\"Marketing Spend\"]) else row[\"Marketing Spend\"], axis = 1 )\ndf.iloc[df_null_idx]",
"_____no_output_____"
],
[
"# Check if there are any more null values.\ndf.isnull().sum()",
"_____no_output_____"
]
],
[
[
"Let's see the distribution of the Profit using a histogram plot and see if there is any outliers in the data using bosplot.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (8, 6))\nplt.subplot(2, 1, 1)\ndf.Profit.plot.hist(bins = 10, normed = True)\ndf.Profit.plot.kde(title = \"Historgram of Profit\")\n\nplt.subplot(2, 1, 2)\ndf.Profit.plot.box(vert = False, title = \"Boxplot of Profit\")\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"Profit has one outlier. We can try to take log scale to remove the outlier value before doing any prediction. But for now, let ignore the outlier.\n\nLet's plot association between each pair of columns. ",
"_____no_output_____"
]
],
[
[
"sns.pairplot(df)",
"_____no_output_____"
]
],
[
[
"Displays only the numeric column. Let's how the avg Profit plays for each State.",
"_____no_output_____"
]
],
[
[
"df.groupby(\"State\").Profit.mean().sort_values().plot.bar(title = \"Avg Profit by State\")\nplt.xlabel(\"State\")\nplt.ylabel(\"Profit\")",
"_____no_output_____"
]
],
[
[
"Avg Profit is highest in state of Florida and least in California.\n\nLet's create the y vector containing the outcome column.",
"_____no_output_____"
]
],
[
[
"y = df.Profit.values\ny",
"_____no_output_____"
]
],
[
[
"Create dummy variables for categorical feature.",
"_____no_output_____"
]
],
[
[
"df_features = df.iloc[:, 0:4]\ndf_dummied = pd.get_dummies(df_features, columns=[\"State\"], drop_first=True)\ndf_dummied.sample(10)",
"_____no_output_____"
]
],
[
[
"State column has been replaced by two additional column - one for Florida and one NY. First value in the categorical values CA has been dropped to avoid collinearity issue.\n\nNow, let's create X feature matrix and y outcome vector. ",
"_____no_output_____"
]
],
[
[
"X = df_dummied.values\nX[0, :]",
"_____no_output_____"
]
],
[
[
"Let's normalize the feature values to bring them to a similar scale.",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler() \nX_std = scaler.fit_transform(X)\npd.DataFrame(X_std).head()",
"_____no_output_____"
]
],
[
[
"Split the X and y into training and test sets.",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X_std, y, \n test_size = 0.3, random_state = 100)",
"_____no_output_____"
],
[
"print(\"Training set: \", X_train.shape, y_train.shape)",
"Training set: (35, 5) (35,)\n"
],
[
"print(\"Test set: \", X_test.shape, y_test.shape)",
"Test set: (15, 5) (15,)\n"
]
],
[
[
"Ratio of the size of the training data",
"_____no_output_____"
]
],
[
[
"X_train.shape[0] / df.shape[0]",
"_____no_output_____"
]
],
[
[
"Fit linear regression model",
"_____no_output_____"
]
],
[
[
"lr = LinearRegression()\nlr.fit(X_train, y_train)",
"_____no_output_____"
],
[
"lr.intercept_, lr.coef_",
"_____no_output_____"
]
],
[
[
"By looking at the cofficients, we can conclude that R&D Spend has the higest influence on the outcome variable.\n\nPredict the outcome based on the model",
"_____no_output_____"
]
],
[
[
"y_test_pred = lr.predict(X_test)",
"_____no_output_____"
],
[
"output = pd.DataFrame({\"actual\": y_test, \"prediction\": y_test_pred})\noutput[\"error\"] = output.actual - output.prediction\noutput",
"_____no_output_____"
]
],
[
[
"A simpliest prediction model could have been the average. Let's how the model did overall against one feature.",
"_____no_output_____"
]
],
[
[
"X_test_inv = scaler.inverse_transform(X_test)\nplt.scatter(X_test_inv[:, 0], y_test, alpha = 0.3, c = \"blue\", label = \"Actual\")\nplt.scatter(X_test_inv[:, 0], y_test_pred, c = \"red\", label = \"Predicted\")\n\nplt.xlabel(\"R&D Spend\")\nplt.ylabel(\"Profit\")\nplt.title(\"Profit Actual vs Estimate\")\nplt.legend()",
"_____no_output_____"
],
[
"np.mean((y_test_pred - y_test) ** 2)",
"_____no_output_____"
],
[
"y_train_pred = lr.predict(X_train)",
"_____no_output_____"
]
],
[
[
"Compare the root mean squared error (RMSE) of test dataset against the training.",
"_____no_output_____"
]
],
[
[
"print(\"Test rmse: \", sqrt(mean_squared_error(y_test, y_test_pred)), \n \"\\nTraining rmse:\", sqrt(mean_squared_error(y_train, y_train_pred)))",
"Test rmse: 24074.424825793423 \nTraining rmse: 12413.672826747377\n"
]
],
[
[
"r2 score can have a max value 1, negative values of R2 means suboptimal model ",
"_____no_output_____"
]
],
[
[
"r2_score(y_test, y_test_pred), r2_score(y_train, y_train_pred)",
"_____no_output_____"
]
],
[
[
"On the training the both RMSE and R2 scores perform natually better than those on the test dataset.",
"_____no_output_____"
],
[
"Let's calculate R2 score manually. ",
"_____no_output_____"
]
],
[
[
"SSR = np.sum((y_train - y_train_pred) ** 2) # Sum of squared residuals\nSST = np.sum((y_train - np.mean(y_train_pred)) ** 2) # Sum of squared totals\nR2 = 1 - SSR/SST\nR2",
"_____no_output_____"
]
],
[
[
"R2 can be viewed as (1 - mse/variance(y))",
"_____no_output_____"
],
[
"### Significance Scores for feature selection",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_selection import f_regression",
"_____no_output_____"
],
[
"_, p_vals = f_regression(X_train, y_train)\np_vals",
"_____no_output_____"
],
[
"pd.DataFrame({\"feature\": df_dummied.columns, \"p_value\": p_vals})",
"_____no_output_____"
]
],
[
[
"p-value indicates the significant scores for each feature. p-value < 0.05 indicates, the corresponding feature is statistically significant. We can rebuild the model excluding the non-significant features one by one until all remaining features are significant. ",
"_____no_output_____"
],
[
"# Power Plant Dataset\nLet's look at another dataset",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"/data/Combined_Cycle_Power_Plant.csv\")\ndf.head()",
"_____no_output_____"
],
[
"X = df.iloc[:, 0:4].values\ny = df.PE.values",
"_____no_output_____"
],
[
"sns.pairplot(df)",
"_____no_output_____"
],
[
"scaler = StandardScaler()\nX_std = scaler.fit_transform(X)",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X_std, y, test_size = 0.3, random_state = 1)",
"_____no_output_____"
],
[
"def rmse(y_true, y_pred):\n return sqrt(mean_squared_error(y_true, y_pred))",
"_____no_output_____"
],
[
"lr = LinearRegression(normalize=False)\nlr.fit(X_train, y_train)\ny_train_pred = lr.predict(X_train)\ny_test_pred = lr.predict(X_test)\nrmse(y_test, y_test_pred)",
"_____no_output_____"
],
[
"from scipy import stats",
"_____no_output_____"
],
[
"residuals = y_test - y_test_pred\n\nplt.figure(figsize=(15, 6))\nplt.subplot(1, 2, 1)\nplt.scatter(y_test, residuals)\nplt.xlabel(\"y_test\")\nplt.ylabel(\"Residuals\")\nplt.hlines([0], xmin = 420, xmax = 500, linestyles = \"dashed\")\n\nplt.subplot(1, 2, 2)\nstats.probplot(residuals, plot=plt)",
"_____no_output_____"
]
],
[
[
"Residual plots show there are outliers in the lower end of the y_test values. qqPlot shows that residuals do not exhibit normaality, indicating non linearity in the model.",
"_____no_output_____"
]
],
[
[
"poly = PolynomialFeatures(degree=2)\n\nX = df.iloc[:, 0:4].values\nX_poly = poly.fit_transform(X)\nX_poly_train, X_poly_test, y_train, y_test = train_test_split(X_poly, y, test_size = 0.3, random_state = 100)\nX_poly_train_std = scaler.fit_transform(X_poly_train)\nX_poly_test_std = scaler.transform(X_poly_test)\n\npd.DataFrame(X_poly_train_std).head()",
"_____no_output_____"
],
[
"lr.fit(X_poly_train_std, y_train)\nprint(\"Train rmse: \", rmse(y_train, lr.predict(X_poly_train_std)))\nprint(\"Test rmse: \", rmse(y_test, lr.predict(X_poly_test_std)))",
"Train rmse: 4.246283890430433\nTest rmse: 4.276988298022075\n"
],
[
"print(lr.intercept_, lr.coef_)",
"454.309651218 [ 0. -39.9701 -42.9304 92.6406 61.3886 4.9224 7.0201 20.0357\n -2.6156 -1.2008 36.9954 0.0829 -91.48 -57.9414 -3.7064]\n"
]
],
[
[
"Polynomial regression generally sufferes from overfitting. Let's regularize the model using Lasso.",
"_____no_output_____"
]
],
[
[
"lasso = Lasso(alpha=0.03, max_iter=10000, normalize=False, random_state=100)\nlasso.fit(X_poly_train_std, y_train)\nprint(\"Train rmse: \", rmse(y_train, lasso.predict(X_poly_train_std)))\nprint(\"Test rmse: \", rmse(y_test, lasso.predict(X_poly_test_std)))\nprint(lasso.intercept_, lasso.coef_)",
"Train rmse: 4.330404075518093\nTest rmse: 4.35914090833783\n454.309651218 [ 0. -17.3624 -1.4063 0.4679 0. 4.471 0. -0.\n -1.0568 -0. -0.435 -1.6751 0. 0. -0. ]\n"
]
],
[
[
"Let's find cross validation score that accuracy score is more reliable in a sense that it incorporates every piece of is incorporated in both training and testing.",
"_____no_output_____"
]
],
[
[
"X_poly_std = scaler.fit_transform(X_poly)\nlasso = Lasso(alpha=0.03, max_iter=10000, random_state=100)\nscores = cross_val_score(lasso, X_poly_std, y, cv = 10, scoring=\"neg_mean_squared_error\")\nscores = np.sqrt(-scores)\nprint(\"RMSE scores\", scores)\nprint(\"Mean rmse: \", np.mean(scores))",
"RMSE scores [ 4.307 4.371 4.3509 4.3279 4.3506 4.3315 4.4165 4.2604 4.3743\n 4.3033]\nMean rmse: 4.33935182329\n"
]
],
[
[
"### Encapsulate the steps in a pipeline",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import Pipeline",
"_____no_output_____"
],
[
"pipeline = Pipeline(steps = [\n (\"poly\", PolynomialFeatures(degree=2, include_bias=False)),\n (\"scaler\", StandardScaler()),\n (\"lasso\", Lasso(alpha=0.03, max_iter=10000, normalize=False, random_state=1))\n])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1)\npipeline.fit(X_train, y_train)\nrmse(y_test, pipeline.predict(X_test))",
"_____no_output_____"
]
],
[
[
"LassoCV helps find the best alpha. We could also use model tuning techqniues to find best alpha as well.",
"_____no_output_____"
]
],
[
[
"# Find best alpha\nlassocv = LassoCV(cv = 10, max_iter=10000, tol=1e-5)\nlassocv.fit(X_poly_std, y)\nprint(\"Lassocv alpha: \", lassocv.alpha_)\n\n# Apply the best alpha to find cross validation score\nlasso = Lasso(alpha = lassocv.alpha_, max_iter=10000, random_state=100)\nscores = cross_val_score(lasso, X_poly_std, y, cv = 10, scoring=\"neg_mean_squared_error\")\nprint(\"Mean rmse: \", np.mean(np.sqrt(-scores)))",
"Lassocv alpha: 0.0161806999848\nMean rmse: 4.32254217026\n"
]
],
[
[
"Look at the cofficients values. Many of the features are not zero making the model parsimonious hence more robust - that is less prone to overfitting.",
"_____no_output_____"
],
[
"Let's plot how coefficient reached 0 values by varying the alpha valuess.",
"_____no_output_____"
]
],
[
[
"coefs = []\nalphas = 10 ** np.linspace(-5, 5, 20)\nfor alpha in alphas:\n lasso = Lasso(alpha=alpha, max_iter=10000, tol=1e-5,random_state=100)\n lasso.fit(X_poly_std, y)\n coefs.append(lasso.coef_)\n\nplt.plot(alphas, coefs)\nplt.xscale(\"log\")\nplt.xlabel(\"Alpha (penalty term on the coefficients)\")\nplt.ylabel(\"Coefficients of the features\")\n ",
"/Users/abulbasar/anaconda/lib/python3.6/site-packages/sklearn/linear_model/coordinate_descent.py:491: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations. Fitting data with very small alpha may cause precision problems.\n ConvergenceWarning)\n"
]
],
[
[
"From this graph, which alpha values should we select. That question can be answered by looking which alpha values gives the best performance (rmse for example). lassocv function does that for us, or we can use model tuning techniques using grid search - that will be explained later.",
"_____no_output_____"
],
[
"### Xgboost\n",
"_____no_output_____"
]
],
[
[
"poly = PolynomialFeatures(degree=2)\n\nX = df.iloc[:, 0:4].values\nX_poly = poly.fit_transform(X)\nX_poly_train, X_poly_test, y_train, y_test = train_test_split(X_poly, y, test_size = 0.3, random_state = 100)\nX_poly_train_std = scaler.fit_transform(X_poly_train)\nX_poly_test_std = scaler.transform(X_poly_test)",
"_____no_output_____"
],
[
"gbm = xgb.XGBRegressor(max_depth=10, learning_rate=0.1, n_estimators=100, \n objective='reg:linear', booster='gbtree', \n reg_alpha=0.01, reg_lambda=1, random_state=0)\ngbm.fit(X_poly_train_std, y_train)\nprint(\"rmse:\", rmse(y_test, gbm.predict(X_poly_test_std)))",
"rmse: 1.8581129118988502\n"
],
[
"param = {'silent':1, \n 'objective':'reg:linear', \n 'booster':'gbtree',\n 'alpha': 0.01, \n 'lambda': 1\n }\n\ndtrain = xgb.DMatrix(X_poly_train_std, label=y_train)\ndtest = xgb.DMatrix(X_poly_test_std, label=y_test)\nwatchlist = [(dtrain,'eval'), (dtest, 'train')]\nnum_round = 100\nbst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=False)\nprint(\"rmse:\", rmse(y_test, bst.predict(dtest)))\n\nplt.figure(figsize=(8, 10))\nxgb.plot_importance(bst)",
"rmse: 2.019509243562749\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
e713d4ea2e1a43b9f33d7c57bf319129e2cffb08 | 38,463 | ipynb | Jupyter Notebook | lectures/lec15/lec15.ipynb | anmolsahoo25/cs3100_f19 | 10a7be3a1bc1f5622f82353d62b66394528f4927 | [
"MIT"
] | null | null | null | lectures/lec15/lec15.ipynb | anmolsahoo25/cs3100_f19 | 10a7be3a1bc1f5622f82353d62b66394528f4927 | [
"MIT"
] | null | null | null | lectures/lec15/lec15.ipynb | anmolsahoo25/cs3100_f19 | 10a7be3a1bc1f5622f82353d62b66394528f4927 | [
"MIT"
] | null | null | null | 24.009363 | 394 | 0.460209 | [
[
[
"<center>\n\n<h1 style=\"text-align:center\"> Monads </h1>\n<h2 style=\"text-align:center\"> CS3100 Fall 2019 </h2>\n</center>",
"_____no_output_____"
],
[
"## Review\n\n### Previously\n\n* Streams, laziness and memoization\n\n### This lecture\n\n* Monads\n + Dealing with **effects** in a **pure** setting",
"_____no_output_____"
],
[
"## Whence Monads\n\n* The term \"monad\" come from **Category Theory**\n + Category theory is the study of mathematical abstractions\n + Out of scope for this course\n + We will focus on **programming with monads**.\n* Monads were popularized by the Haskell programming language\n + Haskell is **purely functional** programming languages\n + Unlike OCaml, Haskell separates pure code from side-effecting code through the use of monads. ",
"_____no_output_____"
],
[
"## What is a Monad?\n\nA monad is any implementation that satisfies the following signature:",
"_____no_output_____"
]
],
[
[
"module type Monad = sig\n type 'a t\n val return : 'a -> 'a t\n val bind : 'a t -> ('a -> 'b t) -> 'b t\nend",
"_____no_output_____"
]
],
[
[
"and the **monad laws**.",
"_____no_output_____"
],
[
"## Example: Interpreter\n\n* All of this seems very abstract (as many FP concepts are).\n + Monad is a design pattern rather than a language feature.\n* An example will help us see the pattern.\n + Overtime, you'll spot monads everywhere.\n* Let's write an interpreter for artihmetic expressions",
"_____no_output_____"
],
[
"## Interpreting artihmetic expressions",
"_____no_output_____"
]
],
[
[
"type expr = Val of int | Plus of expr * expr | Div of expr * expr",
"_____no_output_____"
]
],
[
[
"* Our goal is to make the interpreter a **total function**.\n + Produces a **value** for every arithmetic expression.",
"_____no_output_____"
]
],
[
[
"let rec eval e = match e with\n | Val v -> v\n | Plus (v1,v2) -> eval v1 + eval v2\n | Div (v1,v2) -> eval v1 / eval v2",
"_____no_output_____"
]
],
[
[
"## Division by zero",
"_____no_output_____"
],
[
"This looks fine. But what happens if the denominator in the division is a 0.",
"_____no_output_____"
]
],
[
[
"eval (Div (Val 1, Val 0))",
"_____no_output_____"
]
],
[
[
"How can we avoid this?",
"_____no_output_____"
],
[
"## Interpreting Arithmetic Expressions: Take 2\n\n* Rewrite `eval` function to have the type `expr -> int option`\n + Return `None` for division by zero.",
"_____no_output_____"
]
],
[
[
"let rec eval e = match e with\n | Val v -> Some v\n | Plus (e1,e2) ->\n begin match eval e1 with \n | None -> None\n | Some v1 -> \n match eval e2 with\n | None -> None \n | Some v2 -> Some (v1 + v2)\n end\n | Div (e1,e2) ->\n match eval e1 with \n | None -> None\n | Some v1 -> \n match eval e2 with\n | None -> None \n | Some v2 -> if v2 = 0 then None else Some (v1 / v2)",
"_____no_output_____"
],
[
"eval (Div (Val 1, Val 0))",
"_____no_output_____"
]
],
[
[
"## Abstraction\n\n* There is a lot of repeated code in the interpreter above.\n + Factor out common code.",
"_____no_output_____"
]
],
[
[
"let return v = Some v",
"_____no_output_____"
],
[
"let bind m f = match m with\n | None -> None \n | Some v -> f v",
"_____no_output_____"
]
],
[
[
"## Abstraction \n\nLet's rewrite the interpreter using these functions.",
"_____no_output_____"
]
],
[
[
"let rec eval e = match e with\n | Val v -> return v\n | Plus (e1,e2) ->\n bind (eval e1) (fun v1 -> \n bind (eval e2) (fun v2 ->\n return (v1+v2)))\n | Div (e1,e2) ->\n bind (eval e1) (fun v1 -> \n bind (eval e2) (fun v2 ->\n if v2 = 0 then None else return (v1 / v2)))",
"_____no_output_____"
]
],
[
[
"## Infix bind operation\n\nUsually `bind` is defined as an infix function `>>=`.",
"_____no_output_____"
]
],
[
[
"let (>>=) = bind",
"_____no_output_____"
],
[
"let rec eval e = match e with\n | Val v -> return v\n | Plus (e1,e2) ->\n eval e1 >>= fun v1 -> \n eval e2 >>= fun v2 ->\n return (v1+v2)\n | Div (e1,e2) ->\n eval e1 >>= fun v1 -> \n eval e2 >>= fun v2 ->\n if v2 = 0 then None else return (v1 / v2)",
"_____no_output_____"
]
],
[
[
"## Modularise\n\n* The `return` and `>>=` we have defined for the interpreter works for any computation on option type. \n + Put them in a module, we get the Option Monad.",
"_____no_output_____"
]
],
[
[
"module type MONAD = sig\n type 'a t\n val return : 'a -> 'a t\n val (>>=) : 'a t -> ('a -> 'b t) -> 'b t\nend\n\nmodule OptionMonad : (MONAD with type 'a t = 'a option) = struct\n type 'a t = 'a option\n let return v = Some v\n let (>>=) m f = match m with\n | Some v -> f v\n | None -> None\nend",
"_____no_output_____"
]
],
[
[
"## Monad Laws\n\nAny implementation of the monad signature must satisfy the following laws:\n\n\n```ocaml\n1. return v >>= k ≡ k v (* Left Identity *)\n2. v >>= return ≡ v (* Right Identity *)\n3. (m >>= f) >>= g ≡ m >>= (fun x -> f x >>= g) (* Associativity *)\n```",
"_____no_output_____"
],
[
"## Option monad satisifies monad laws\n\n**Left Identity**: `return v >>= k ≡ k v`\n\n```ocaml\n return v >>= k\n≡ (Some v) >>= k (* by definition of return *)\n≡ match Some v with None -> None | Some v -> k v (* by definition of >>= *)\n≡ k v (* by beta reduction *)\n```\n\n**Exercice:** Prove other laws.",
"_____no_output_____"
],
[
"## State Monad\n\n* Each monad implementation typically extends the signature with additional operations.\n* A State Monad introduces a **single, typed mutable cell**.\n* Here's a signature for dealing with mutable state, which adds\n + `get` and `put` functions for reading and writing the state, and \n + a `runState` function for actually running computations.",
"_____no_output_____"
]
],
[
[
"module type STATE = sig\n type state\n include MONAD\n val get : state t\n val put : state -> unit t\n val run_state : 'a t -> init:state -> state * 'a\nend",
"_____no_output_____"
]
],
[
[
"## State Monad\n\nThe idea of a state monad is to simulate a single, typed mutable location in the program. Values can be `put` into this location and read from this location using `get`. How might we implement such a feature without using references?\n\nWe can *thread* the state through every computation in the monad. Suppose you were interested in implementing an successor function in the state monad. This function does not read or write to the state, but simply passes the state through. \n\nThe usual successor function is:\n\n```ocaml\nlet succ x = x + 1\n```\n\nThe successor function that would pass the state through would be:\n\n```ocaml\nlet succ_st x s = (s, x+1)\n```\n\nThe extra argument `s` is the state being passed to this function. Unlike the usual successor function, the `succ_st` function returns a pair with the new state (which happens to be the same state as that was passed in) and the result which is `x+1`.\n\nNow, a function `get` which only reads the current state can be written as:\n\n```ocaml\nlet get s = (s,s)\n```\n\n`get` does not modify the current state, and hence, returns the pair `(s,s)` where the first argument is the new state (which is the same as that was passed in) and the second argument is the result of the `get` function.\n\nA function `put s'` which updates the state can be written as:\n\n```ocaml\nlet put s' s = (s',())\n```\n\n`put` updates the state to `s'` and the result of put is `()`.\n\nObserve that the last argument of each of the functions `put`, `get` and `succ` is the previous state and they all return a pair of new state and the result of the computation. If the type of state is `state`, we can assign the following type to the functions:\n\n```ocaml\nval put : state -> state -> (state, unit)\nval get : state -> (state, state)\nval succ_st : int -> state -> (state, int)\n```\n\nWe can make it better by:\n\n```ocaml\ntype 'a t = state -> state * 'a\nval put : state -> unit t\nval get : state t\nval succ_st : int -> int t\n```\n\nHow do we build up larger programs with these individual functions? We can write a function that \n\n1. puts 10 and\n2. gets the current state and\n3. returns 5 + the current state as\n\n```ocaml\nlet p s0 (* initial state *) =\n let (s1,()) = put 10 s0 in\n let (s2,s) = get s1 in\n let (s3,result) = succ_st s s2 in\n (s3, result)\n```\n\nRather than explicitly threading the state through, which is tedious, we can use a monad to hide the tedious bits. The subsequent computation may also use results from the previous computation (as in the case of `get` and `succ_st` in the example above). So we define a function:\n\n```ocaml\nlet (>>=) (m : state -> state * 'a) (f : 'a -> state -> state * 'b) : state -> state * 'b = \n fun s (* previous state *) ->\n let (s': state, v : 'a) = m s in\n let (s'': state, res: 'b) = f a s in\n (s'', res)\n```\n\nRecall that `(>>=)` is an infix function. Using this function, we can write the program `p` as:\n\n```ocaml\nlet p s0 =\n let computation = \n put 10 >>= (fun () ->\n get >>= (fun s ->\n succ_st s))\n in\n computation s0\n```\nWe can drop the extra parenthesis to get\n\n```ocaml\nlet p s0 =\n let computation = \n put 10 >>= fun () ->\n get >>= fun s ->\n succ_st s\n in\n computation s0\n```\n\nWe can also rewrite `(>>=)` as:\n\n```ocaml\nlet (>>=) (m : 'a t) (f : 'a -> 'b t) : 'b t = \n fun s (* previous state *) ->\n let (s': state, v : 'a) = m s in\n f a s\n```",
"_____no_output_____"
],
[
"## State Monad\n\nHere's an implementation of `State`, parameterised by the type of the state:",
"_____no_output_____"
]
],
[
[
"module State (S : sig type t end)\n : STATE with type state = S.t = struct\n type state = S.t\n type 'a t = state -> state * 'a\n let return v = fun s -> (s, v)\n let (>>=) m f = fun s -> \n let (s', a) = m s in \n f a s'\n let get s = (s, s)\n let put s' _ = (s', ())\n let run_state m ~init = m init\nend",
"_____no_output_____"
]
],
[
[
"## Using State Monad",
"_____no_output_____"
]
],
[
[
"module IntState = State (struct type t = int end)\nopen IntState \n\nlet inc v = \n get >>= fun s ->\n put (s+v)\n\nlet dec v = \n get >>= fun s -> \n put (s-v)\n \nlet double =\n get >>= fun s ->\n put (s*2)",
"_____no_output_____"
]
],
[
[
"## Using State Monad",
"_____no_output_____"
]
],
[
[
"IntState.run_state ~init:10 (\n inc 5 >>= fun () -> \n dec 10 >>= fun () ->\n double)",
"_____no_output_____"
],
[
"let module FloatState = State (struct type t = float end) in \nlet open FloatState in\nFloatState.run_state ~init:5.4 (\n get >>= fun v ->\n put (v +. 1.0))",
"_____no_output_____"
]
],
[
[
"## State monad satisfies monad laws\n\n**Right Associativity**: `v >>= return ≡ v`\n\n```ocaml\n v >>= return\n≡ fun s -> let (s', a) = v s in return a s' (* by definition of >>= *)\n≡ fun s -> let (s', a) = v s in (fun v s -> (s,v)) a s' (* by definition of return *)\n≡ fun s -> let (s', a) = v s in (s',a) (* by beta reduction *)\n≡ fun s -> (fun (s', a) -> (s', a)) (v s) (* rewrite `let` to `fun` *)\n≡ fun s -> v s (* by eta reduction *)\n≡ v\n```\n\n**Exercise**: Prove other laws.",
"_____no_output_____"
],
[
"## Type of State\n\n* State in the state monad is of a single type\n + In our example, the state was of `int` type\n* *Can we change type of state as the computation evolves?*",
"_____no_output_____"
],
[
"## Parameterised monads\n\n* Parameterised monads add two additional type parameters to `t` representing the start and end states of a computation.\n* A computation of type `('p, 'q, 'a) t` has \n + *precondition* (or starting state) `'p`\n + *postcondition* (or ending state) `'q`\n + *produces a result* of type `'a`.\n\ni.e. `('p, 'q, 'a) t` is a kind of Hoare triple `{P} M {Q}`.\n\n",
"_____no_output_____"
],
[
"## Parameterised monads\n\nHere's the parameterised monad signature:",
"_____no_output_____"
]
],
[
[
"module type PARAMETERISED_MONAD =\nsig\n type ('s,'t,'a) t\n val return : 'a -> ('s,'s,'a) t\n val (>>=) : ('r,'s,'a) t ->\n ('a -> ('s,'t,'b) t) ->\n ('r,'t,'b) t\nend",
"_____no_output_____"
]
],
[
[
"## Parameterised state monad\n\nHere's a parameterised monad version of the `STATE` signature, using the extra parameters to represent the type of the reference cell.",
"_____no_output_____"
]
],
[
[
"module type PSTATE =\nsig\n include PARAMETERISED_MONAD\n val get : ('s,'s,'s) t\n val put : 's -> (_,'s,unit) t\n val runState : ('s,'t,'a) t -> init:'s -> 't * 'a\nend",
"_____no_output_____"
]
],
[
[
"## Parameterised state monad\n\n\nHere's an implementation of `PSTATE`.",
"_____no_output_____"
]
],
[
[
"module PState : PSTATE =\nstruct\n type ('s, 't, 'a) t = 's -> 't * 'a\n let return v s = (s, v)\n let (>>=) m k s = let t, a = m s in k a t\n let put s _ = (s, ())\n let get s = (s, s)\n let runState m ~init = m init\nend",
"_____no_output_____"
]
],
[
[
"## Computation with changing state",
"_____no_output_____"
]
],
[
[
"open PState\n\nlet inc v = get >>= fun s -> put (s+v)\nlet dec v = get >>= fun s -> put (s-v)\nlet double = get >>= fun s -> put (s*2)\n \nlet to_string = get >>= fun i -> put (string_of_int i)\nlet of_string = get >>= fun s -> put (int_of_string s)",
"_____no_output_____"
]
],
[
[
"## Computation with changing state",
"_____no_output_____"
]
],
[
[
"let foo = inc 5 >>= fun () -> to_string\nlet bar = get >>= fun s -> put (s ^ \"00\")\n \nlet baz = foo >>= fun () -> bar\nlet quz = bar >>= fun () -> foo",
"_____no_output_____"
]
],
[
[
"## A well-typed stack machine\n\n* Let's build a tiny stack machine with 3 instructions\n + `push` pushes a constant on to the stack. Constant could be of any type. \n + `add` adds the top two integers on the stack and pushes the result\n + `_if_` expects a `[b;v1;v2] @ rest_of_stack` on top of the stack.\n * if `b` is true then result stack will be `v1::rest_of_stack`\n * otherwise, `v2::rest_of_stack`.\n* Our stack machine will not get stuck! \n + recall the definition from lambda calculus lectures\n* This is how WASM operational semantics is defined!",
"_____no_output_____"
],
[
"## Stack operations\n\n* Because our stack will have values of different types, encode then using pairs.\n + `[]` will be `()`\n + `[1;2;3]` will be `(1, (2, (3, ())))`\n + `[1;true;3]` (which is not a well-typed OCaml expression) will be `(1, (true, (3, ()))))`",
"_____no_output_____"
],
[
"## Stack Operations",
"_____no_output_____"
]
],
[
[
"module type STACK_OPS =\nsig\n type ('s,'t,'a) t\n val add : unit -> (int * (int * 's), \n int * 's, \n unit) t\n val _if_ : unit -> (bool * ('a * ('a * 's)), \n 'a * 's, \n unit) t\n val push_const : 'a -> ('s, \n 'a * 's, \n unit) t\nend",
"_____no_output_____"
]
],
[
[
"## Stack Machine\n\nWe can combine the stack operations with the parameterised monad signature to\nbuild a signature for a stack machine:",
"_____no_output_____"
]
],
[
[
"module type STACKM = sig\n include PARAMETERISED_MONAD\n include STACK_OPS\n with type ('s,'t,'a) t := ('s,'t,'a) t\n val execute : ('s,'t,'a) t -> 's -> 't * 'a\nend",
"_____no_output_____"
]
],
[
[
"## Stack Machine\n\nHere is the implementation of the stack machine",
"_____no_output_____"
]
],
[
[
"module StackM : STACKM =\nstruct\n include PState\n \n let add ()=\n get >>= fun (x,(y,s)) ->\n put (x+y,s)\n \n let _if_ () =\n get >>= fun (c,(t,(e,s))) ->\n put ((if c then t else e),s)\n\n let push_const k =\n get >>= fun s ->\n put (k, s)\n\n let execute c s = runState ~init:s c\nend",
"_____no_output_____"
]
],
[
[
"## Using the stack machine",
"_____no_output_____"
]
],
[
[
"let program = let open StackM in\n push_const 4 >>= fun () ->\n push_const 5 >>= fun () ->\n push_const true >>= fun () ->\n _if_ () >>= fun () ->\n add ()",
"_____no_output_____"
],
[
"StackM.execute program (20,(10,()))",
"_____no_output_____"
]
],
[
[
"## Using the stack machine",
"_____no_output_____"
]
],
[
[
"StackM.execute (StackM._if_ ()) (false,(10,()))",
"_____no_output_____"
],
[
"StackM.execute (StackM.add ()) ()",
"_____no_output_____"
]
],
[
[
"<center>\n\n<h1 style=\"text-align:center\"> Fin. </h1>\n</center>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e713d4f6cafab3009591c0a8e8e95c387a0751a8 | 106,565 | ipynb | Jupyter Notebook | notebooks/ReferenceFrame.ipynb | gbiomech/BMC | fec9413b17a54f00ba6818438f7a50b132353e42 | [
"CC-BY-4.0"
] | 1 | 2022-01-07T22:30:39.000Z | 2022-01-07T22:30:39.000Z | notebooks/ReferenceFrame.ipynb | gbiomech/BMC | fec9413b17a54f00ba6818438f7a50b132353e42 | [
"CC-BY-4.0"
] | null | null | null | notebooks/ReferenceFrame.ipynb | gbiomech/BMC | fec9413b17a54f00ba6818438f7a50b132353e42 | [
"CC-BY-4.0"
] | null | null | null | 84.174566 | 46,436 | 0.722845 | [
[
[
"# Frame of reference\n\n> Marcos Duarte \n> Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/)) \n> Federal University of ABC, Brazil",
"_____no_output_____"
],
[
"<a href=\"http://en.wikipedia.org/wiki/Motion_(physics)\">Motion</a> (a change of position in space with respect to time) is not an absolute concept; a reference is needed to describe the motion of the object in relation to this reference. Likewise, the state of such reference cannot be absolute in space and so motion is relative. \nA [frame of reference](http://en.wikipedia.org/wiki/Frame_of_reference) is the place with respect to we choose to describe the motion of an object. In this reference frame, we define a [coordinate system](http://en.wikipedia.org/wiki/Coordinate_system) (a set of axes) within which we measure the motion of an object (but frame of reference and coordinate system are often used interchangeably). \n\nOften, the choice of reference frame and coordinate system is made by convenience. However, there is an important distinction between reference frames when we deal with the dynamics of motion, where we are interested to understand the forces related to the motion of the object. In dynamics, we refer to [inertial frame of reference](http://en.wikipedia.org/wiki/Inertial_frame_of_reference) (a.k.a., Galilean reference frame) when the Newton's laws of motion in their simple form are valid in this frame and to non-inertial frame of reference when the Newton's laws in their simple form are not valid (in such reference frame, fictitious accelerations/forces appear). An inertial reference frame is at rest or moves at constant speed (because there is no absolute rest!), whereas a non-inertial reference frame is under acceleration (with respect to an inertial reference frame).\n\nThe concept of frame of reference has changed drastically since Aristotle, Galileo, Newton, and Einstein. To read more about that and its philosophical implications, see [Space and Time: Inertial Frames](http://plato.stanford.edu/entries/spacetime-iframes/).",
"_____no_output_____"
],
[
"## Frame of reference for human motion analysis\n\nIn anatomy, we use a simplified reference frame composed by perpendicular planes to provide a standard reference for qualitatively describing the structures and movements of the human body, as shown in the next figure.\n\n<div class='center-align'><figure><img src=\"http://upload.wikimedia.org/wikipedia/commons/3/34/BodyPlanes.jpg\" width=\"300\" alt=\"Anatomical body position\"/><figcaption><center><i>Figure. Anatomical body position and body planes (<a href=\"http://en.wikipedia.org/wiki/Human_anatomical_terms\" target=\"_blank\">image from Wikipedia</a>).</i></center></figcaption> </figure></div> ",
"_____no_output_____"
],
[
"## Cartesian coordinate system\n\nAs we perceive the surrounding space as three-dimensional, a convenient coordinate system is the [Cartesian coordinate system](http://en.wikipedia.org/wiki/Cartesian_coordinate_system) in the [Euclidean space](http://en.wikipedia.org/wiki/Euclidean_space) with three orthogonal axes as shown below. The axes directions are commonly defined by the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule) and attributed the letters X, Y, Z. The orthogonality of the Cartesian coordinate system is convenient for its use in classical mechanics, most of the times the structure of space is assumed having the [Euclidean geometry](http://en.wikipedia.org/wiki/Euclidean_geometry) and as consequence, the motion in different directions are independent of each other. \n\n<div class='center-align'><figure><img src=\"https://raw.githubusercontent.com/demotu/BMC/master/images/CCS.png\" width=350/><figcaption><center><i>Figure. A point in three-dimensional Euclidean space described in a Cartesian coordinate system.</i></center></figcaption> </figure></div>",
"_____no_output_____"
],
[
"### Standardizations in movement analysis\n\nThe concept of reference frame in Biomechanics and motor control is very important and central to the understanding of human motion. For example, do we see, plan and control the movement of our hand with respect to reference frames within our body or in the environment we move? Or a combination of both? For three-dimensional motion analysis in Biomechanics, we may use several different references frames for convenience and refer to them as global, laboratory, local, anatomical, or technical reference frames or coordinate systems (we will study this later).\n\nThere has been proposed different standardizations on how to define frame of references for the main segments and joints of the human body. For instance, the International Society of Biomechanics has a [page listing standardization proposals](https://isbweb.org/activities/standards) by its standardization committee and subcommittees:",
"_____no_output_____"
]
],
[
[
"from IPython.display import IFrame\nIFrame('https://isbweb.org/activities/standards', width='100%', height=400)",
"_____no_output_____"
]
],
[
[
" Another initiative for the standardization of references frames is from the [Virtual Animation of the Kinematics of the Human for Industrial, Educational and Research Purposes (VAKHUM)](https://raw.githubusercontent.com/demotu/BMC/master/refs/VAKHUM.pdf) project.",
"_____no_output_____"
],
[
"## Determination of a coordinate system\n\nIn Biomechanics, we may use different coordinate systems for convenience and refer to them as global, laboratory, local, anatomical, or technical reference frames or coordinate systems. For example, in a standard gait analysis, we define a global or laboratory coordinate system and a different coordinate system for each segment of the body to be able to describe the motion of a segment in relation to anatomical axes of another segment. To define this anatomical coordinate system, we need to place markers on anatomical landmarks on each segment. We also may use other markers (technical markers) on the segment to improve the motion analysis and then we will also have to define a technical coordinate system for each segment.\n\nAs we perceive the surrounding space as three-dimensional, a convenient coordinate system to use is the [Cartesian coordinate system](http://en.wikipedia.org/wiki/Cartesian_coordinate_system) with three orthogonal axes in the [Euclidean space](http://en.wikipedia.org/wiki/Euclidean_space). From [linear algebra](http://en.wikipedia.org/wiki/Linear_algebra), a set of unit linearly independent vectors (orthogonal in the Euclidean space and each with norm (length) equals to one) that can represent any vector via [linear combination](http://en.wikipedia.org/wiki/Linear_combination) is called a <a href=\"http://en.wikipedia.org/wiki/Basis_(linear_algebra)\">basis</a> (or orthonormal basis). The figure below shows a point and its position vector in the Cartesian coordinate system and the corresponding versors (unit vectors) of the basis for this coordinate system. See the notebook [Scalar and vector](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ScalarVector.ipynb) for a description on vectors. \n<br>\n<figure><img src=\"https://raw.githubusercontent.com/demotu/BMC/master/images/vector3Dijk.png\" width=350/><figcaption><center><i>Figure. Representation of a point **P** and its position vector $\\overrightarrow{\\mathbf{r}}$ in a Cartesian coordinate system. The versors $\\hat{\\mathbf{i}}, \\hat{\\mathbf{j}}, \\hat{\\mathbf{k}}$ form a basis for this coordinate system and are usually represented in the color sequence RGB (red, green, blue) for easier visualization.</i></center></figcaption></figure>\n\nOne can see that the versors of the basis shown in the figure above have the following coordinates in the Cartesian coordinate system:\n\n$$ \\hat{\\mathbf{i}} = \\begin{bmatrix}1\\\\0\\\\0 \\end{bmatrix}, \\quad \\hat{\\mathbf{j}} = \\begin{bmatrix}0\\\\1\\\\0 \\end{bmatrix}, \\quad \\hat{\\mathbf{k}} = \\begin{bmatrix} 0 \\\\ 0 \\\\ 1 \\end{bmatrix}$$\n\nUsing the notation described in the figure above, the position vector $\\overrightarrow{\\mathbf{r}}$ (or the point $\\overrightarrow{\\mathbf{P}}$) can be expressed as:\n\n$$ \\overrightarrow{\\mathbf{r}} = x\\hat{\\mathbf{i}} + y\\hat{\\mathbf{j}} + z\\hat{\\mathbf{k}} $$",
"_____no_output_____"
],
[
"### Definition of a basis\n\nThe mathematical problem of determination of a coordinate system is to find a basis and an origin for it (a basis is only a set of vectors, with no origin). There are different methods to calculate a basis given a set of points (coordinates), for example, one an use the scalar product or the cross product for this problem. A classical procedure in mathematics, employing the scalar product, is known as the [Gram–Schmidt process](http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process). See the notebook [Scalar and Vector](http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/ScalarVector.ipynb) for a demonstration of the Gram–Schmidt process and how to implement it in Python.",
"_____no_output_____"
],
[
"Let's now define a basis using a common method in motion analysis (employing the cross product): \nGiven the coordinates of three noncollinear points in 3D space (points that do not all lie on the same line), $\\overrightarrow{\\mathbf{m}}_1, \\overrightarrow{\\mathbf{m}}_2, \\overrightarrow{\\mathbf{m}}_3$, which would represent the positions of markers captured from a motion analysis session, a basis can be found following these steps:\n\n1. First axis, $\\overrightarrow{\\mathbf{v}}_1$, the vector $\\overrightarrow{\\mathbf{m}}_2-\\overrightarrow{\\mathbf{m}}_1$ (or any other vector difference); \n2. Second axis, $\\overrightarrow{\\mathbf{v}}_2$, the cross or vector product between the vectors $\\overrightarrow{\\mathbf{v}}_1$ and $\\overrightarrow{\\mathbf{m}}_3-\\overrightarrow{\\mathbf{m}}_1$ (or $\\overrightarrow{\\mathbf{m}}_3-\\overrightarrow{\\mathbf{m}}_2$); \n3. Third axis, $\\overrightarrow{\\mathbf{v}}_3$, the cross product between the vectors $\\overrightarrow{\\mathbf{v}}_1$ and $\\overrightarrow{\\mathbf{v}}_2$. \n4. Make all vectors to have norm 1 dividing each vector by its norm.\n\nThe positions of the points used to construct a coordinate system have, by definition, to be specified in relation to an already existing coordinate system. In motion analysis, this coordinate system is the coordinate system from the motion capture system and it is established in the calibration phase. In this phase, the positions of markers placed on an object with perpendicular axes and known distances between the markers are captured and used as the reference (laboratory) coordinate system.\n\nFor example, given the positions $\\overrightarrow{\\mathbf{m}}_1 = [1,2,5], \\overrightarrow{\\mathbf{m}}_2 = [2,3,3], \\overrightarrow{\\mathbf{m}}_3 = [4,0,2]$, a basis can be found with:",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nm1 = np.array([1, 2, 5])\nm2 = np.array([2, 3, 3])\nm3 = np.array([4, 0, 2])\n\nv1 = m2 - m1 # first axis\nv2 = np.cross(v1, m3 - m1) # second axis\nv3 = np.cross(v1, v2) # third axis\n\n# Vector normalization\ne1 = v1/np.linalg.norm(v1)\ne2 = v2/np.linalg.norm(v2)\ne3 = v3/np.linalg.norm(v3)\n\nprint('Versors:', '\\ne1 =', e1, '\\ne2 =', e2, '\\ne3 =', e3)\nprint('\\nTest of orthogonality (cross product between versors):',\n '\\ne1 x e2:', np.linalg.norm(np.cross(e1, e2)),\n '\\ne1 x e3:', np.linalg.norm(np.cross(e1, e3)),\n '\\ne2 x e3:', np.linalg.norm(np.cross(e2, e3)))\nprint('\\nNorm of each versor:',\n '\\n||e1|| =', np.linalg.norm(e1),\n '\\n||e2|| =', np.linalg.norm(e2),\n '\\n||e3|| =', np.linalg.norm(e3))",
"Versors: \ne1 = [ 0.40824829 0.40824829 -0.81649658] \ne2 = [-0.76834982 -0.32929278 -0.5488213 ] \ne3 = [-0.49292179 0.85141036 0.17924429]\n\nTest of orthogonality (cross product between versors): \ne1 x e2: 1.0 \ne1 x e3: 1.0 \ne2 x e3: 1.0\n\nNorm of each versor: \n||e1|| = 1.0 \n||e2|| = 1.0 \n||e3|| = 1.0\n"
]
],
[
[
"To define a coordinate system using the calculated basis, we also need to define an origin. In principle, we could use any point as origin, but if the calculated coordinate system should follow anatomical conventions, e.g., the coordinate system origin should be at a joint center, we will have to calculate the basis and origin according to standards used in motion analysis as discussed before. \n\nIf the coordinate system is a technical basis and not anatomic-based, a common procedure in motion analysis is to define the origin for the coordinate system as the centroid (average) position among the markers at the reference frame. Using the average position across markers potentially reduces the effect of noise (for example, from soft tissue artifact) on the calculation. \n\nFor the markers in the example above, the origin of the coordinate system will be:",
"_____no_output_____"
]
],
[
[
"origin = np.mean((m1, m2, m3), axis=0)\nprint('Origin: ', origin)",
"Origin: [ 2.33333333 1.66666667 3.33333333]\n"
]
],
[
[
"Let's plot the coordinate system and the basis using the custom Python function `CCS.py`:",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.insert(1, r'./../functions') # add to pythonpath\nfrom CCS import CCS",
"_____no_output_____"
],
[
"markers = np.vstack((m1, m2, m3))\nbasis = np.vstack((e1, e2, e3))",
"_____no_output_____"
]
],
[
[
"Create figure in this page (inline):",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook",
"_____no_output_____"
],
[
"markers = np.vstack((m1, m2, m3))\nbasis = np.vstack((e1, e2, e3))\nCCS(xyz=[], Oijk=origin, ijk=basis, point=markers, vector=True);",
"_____no_output_____"
]
],
[
[
"## Polar and spherical coordinate systems\n\nWhen studying circular motion in two or three dimensions, the use of a polar (for 2D) or spherical (for 3D) coordinate system can be more convenient than the Cartesian coordinate system.",
"_____no_output_____"
],
[
"### Polar coordinate system\n\nIn the polar coordinate system, a point in a plane is described by its distance $r$ to the origin (the ray from the origin to this point is the polar axis) and the angle $\\theta$ (measured counterclockwise) between the polar axis and an axis of the coordinate system as shown next. \n<br>\n<figure><img src=\"./../images/polar.png\"/><figcaption><center><i>Figure. Representation of a point in a polar coordinate system.</i></center></figcaption></figure>\n\nThe relation of the coordinates in the Cartesian and polar coordinate systems is:\n\n$$\\begin{array}{l l}\nx = r\\cos\\theta \\\\\ny = r\\sin\\theta \\\\\nr = \\sqrt{x^2 + y^2}\n\\end{array} $$",
"_____no_output_____"
],
[
"### Spherical coordinate system\n\nThe spherical coordinate system can be seen as an extension of the polar coordinate system to three dimensions where an orthogonal axis is added and a second angle is used to describe the point with respect to this third axis as shown next. \n<br>\n<figure><img src=\"./../images/spherical.png\"/><figcaption><center><i>Figure. Representation of a point in a spherical coordinate system.</i></center></figcaption></figure>\n\nThe relation of the coordinates in the Cartesian and spherical coordinate systems is:\n\n$$\\begin{array}{l l}\nx = r\\sin\\theta\\cos\\phi \\\\\ny = r\\sin\\theta\\sin\\phi \\\\\nz = r\\cos\\theta \\\\\nr = \\sqrt{x^2 + y^2 + z^2}\n\\end{array} $$",
"_____no_output_____"
],
[
"## Generalized coordinates\n\nIn mechanics, generalized coordinates are a set of coordinates that describes the configuration of a system. Generalized coordinates are usually selected for convenience (e.g., simplifies the resolution of the problem) or to provide the minimum number of coordinates to describe the configuration of a system. \n\nFor instance, generalized coordinates are used to describe the motion of a system with multiple links where instead of using Cartesian coordinates, it's more convenient to use the angles between links as coordinates.",
"_____no_output_____"
],
[
"## Problems\n\n1. Right now, how fast are you moving? In your answer, consider your motion in relation to Earth and in relation to Sun.\n\n2. Go to the website [http://www.wisc-online.com/Objects/ViewObject.aspx?ID=AP15305](http://www.wisc-online.com/Objects/ViewObject.aspx?ID=AP15305) and complete the interactive lesson to learn about the anatomical terminology to describe relative position in the human body.\n\n3. To learn more about Cartesian coordinate systems go to the website [http://www.mathsisfun.com/data/cartesian-coordinates.html](http://www.mathsisfun.com/data/cartesian-coordinates.html), study the material, and answer the 10 questions at the end.\n\n4. Given the points in the 3D space, $m1 = [2,2,0], m2 = [0,1,1], m3 = [1,2,0]$, find an orthonormal basis.\n\n5. Determine if the following points form a basis in the 3D space, $m1 = [2,2,0], m2 = [1,1,1], m3 = [1,1,0]$.\n\n6. Derive expressions for the three axes of the pelvic basis considering the convention of the [Virtual Animation of the Kinematics of the Human for Industrial, Educational and Research Purposes (VAKHUM)](https://raw.githubusercontent.com/demotu/BMC/master/refs/VAKHUM.pdf) project (use RASIS, LASIS, RPSIS, and LPSIS as names for the pelvic anatomical landmarks and indicate the expression for each axis).\n\n7. Determine the basis for the pelvis following the convention of the [Virtual Animation of the Kinematics of the Human for Industrial, Educational and Research Purposes (VAKHUM)](https://raw.githubusercontent.com/demotu/BMC/master/refs/VAKHUM.pdf) project for the following anatomical landmark positions (units in meters): $RASIS=[0.5,0.8,0.4], LASIS=[0.55,0.78,0.1], RPSIS=[0.3,0.85,0.2], LPSIS=[0.29,0.78,0.3]$.",
"_____no_output_____"
],
[
"## References\n\n- [Standards - International Society of Biomechanics](https://isbweb.org/activities/standards). \n- Stanford Encyclopedia of Philosophy. [Space and Time: Inertial Frames](http://plato.stanford.edu/entries/spacetime-iframes/). \n- [Virtual Animation of the Kinematics of the Human for Industrial, Educational and Research Purposes (VAKHUM)](https://raw.githubusercontent.com/demotu/BMC/master/refs/VAKHUM.pdf). ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e713e003fe2a61a35428cd04a79b1c771681037d | 1,391 | ipynb | Jupyter Notebook | DAY 4 ASSIGNMENT.ipynb | gkumarashish213/LETSUPGRADE-PYTHON--ASSIGNMENT-B7 | c81d23de4c6176c4379d47c8e3c22346da648c0d | [
"Apache-2.0"
] | null | null | null | DAY 4 ASSIGNMENT.ipynb | gkumarashish213/LETSUPGRADE-PYTHON--ASSIGNMENT-B7 | c81d23de4c6176c4379d47c8e3c22346da648c0d | [
"Apache-2.0"
] | null | null | null | DAY 4 ASSIGNMENT.ipynb | gkumarashish213/LETSUPGRADE-PYTHON--ASSIGNMENT-B7 | c81d23de4c6176c4379d47c8e3c22346da648c0d | [
"Apache-2.0"
] | null | null | null | 20.455882 | 157 | 0.502516 | [
[
[
"# Print the first Amstrong number in the range of 1024000 to 702648265 and exit the loop as soon you encounter the first amstrong number.use while loop",
"_____no_output_____"
]
],
[
[
"upper = 702648264\nlower = 1024000\nfor i in range(lower,upper+1):\n o=len(str(i))\n temp=i\n sum=0\n while temp!=0:\n a=temp%10\n sum+=a**o\n temp//=10\n if sum==i:\n print(\"The first amstrong number is:\",i)",
"The first amstrong number is: 1741725\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e713ffd925be1f9e5daead0c69a38bb6536d909e | 345,457 | ipynb | Jupyter Notebook | SSIM_CW-SSIM_comparison.ipynb | Pandinosaurus/pyssim | 878dfa1ba4a31864b71c1224afb5e7dbe3bf60c7 | [
"MIT"
] | 1 | 2019-05-31T14:03:18.000Z | 2019-05-31T14:03:18.000Z | SSIM_CW-SSIM_comparison.ipynb | Pandinosaurus/pyssim | 878dfa1ba4a31864b71c1224afb5e7dbe3bf60c7 | [
"MIT"
] | null | null | null | SSIM_CW-SSIM_comparison.ipynb | Pandinosaurus/pyssim | 878dfa1ba4a31864b71c1224afb5e7dbe3bf60c7 | [
"MIT"
] | null | null | null | 1,293.846442 | 86,270 | 0.947128 | [
[
[
"%matplotlib inline\nimport time\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageOps\n\nfrom ssim import SSIM\nfrom ssim.utils import get_gaussian_kernel\n\ngaussian_kernel_sigma = 1.5\ngaussian_kernel_width = 11\ngaussian_kernel_1d = get_gaussian_kernel(gaussian_kernel_width, gaussian_kernel_sigma)\n",
"_____no_output_____"
],
[
"size = (256,256)\n\nim = Image.open('test-images/test3-orig.jpg')\nim = im.resize(size, Image.ANTIALIAS)\n\n# slightly rotated image\nim_rot = Image.open('test-images/test3-rot.jpg')\nim_rot = im_rot.resize(size, Image.ANTIALIAS)\n\n# slightly modified lighting conditions\nim_lig = Image.open('test-images/test3-lig.jpg')\nim_lig = im_lig.resize(size, Image.ANTIALIAS)\n\n# image cropped\nim_cro = Image.open('test-images/test3-cro.jpg')\nim_cro = im_cro.resize(size, Image.ANTIALIAS)",
"_____no_output_____"
],
[
"im_gray = ImageOps.grayscale(im)\nplt.imshow(np.asarray(im_gray),cmap='gray',vmin=0,vmax=255)",
"_____no_output_____"
],
[
"im_rot_gray = ImageOps.grayscale(im_rot)\nplt.imshow(np.asarray(im_rot_gray),cmap='gray',vmin=0,vmax=255)",
"_____no_output_____"
],
[
"im_lig_gray = ImageOps.grayscale(im_lig)\nplt.imshow(np.asarray(im_lig_gray),cmap='gray',vmin=0,vmax=255)",
"_____no_output_____"
],
[
"im_cro_gray = ImageOps.grayscale(im_cro)\nplt.imshow(np.asarray(im_cro_gray),cmap='gray',vmin=0,vmax=255)",
"_____no_output_____"
],
[
"start = time.time()\nssim_rot = SSIM(im, gaussian_kernel_1d).ssim_value(im_rot)\nssim_lig = SSIM(im, gaussian_kernel_1d).ssim_value(im_lig)\nssim_cro = SSIM(im, gaussian_kernel_1d).ssim_value(im_cro)\nend = time.time()\n\nprint(\"SSIM of rotated image %.4f\" % ssim_rot)\nprint(\"SSIM of modified lighting image %.4f\" % ssim_lig)\nprint(\"SSIM of cropped image %.4f\" % ssim_cro)\nprint(\"Elapsed time of SSIM is %.6fs\" % (end - start))",
"SSIM of rotated image 0.4971\nSSIM of modified lighting image 0.8603\nSSIM of cropped image 0.3958\n"
],
[
"start = time.time()\ncw_ssim_rot = SSIM(im).cw_ssim_value(im_rot)\ncw_ssim_lig = SSIM(im).cw_ssim_value(im_lig)\ncw_ssim_cro = SSIM(im).cw_ssim_value(im_cro)\nend = time.time()\n\nprint(\"CW-SSIM of rotated image %.4f\" % cw_ssim_rot)\nprint(\"CW-SSIM of modified lighting image %.4f\" % cw_ssim_lig)\nprint(\"CW-SSIM of cropped image %.4f\" % cw_ssim_cro)\nprint(\"Elapsed time of CW-SSIM is %.6fs\" % (end - start))",
"CW-SSIM of rotated image 0.8724\nCW-SSIM of modified lighting image 0.9668\nCW-SSIM of cropped image 0.7171\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71420e05665fc71ada595661d883906407a3e39 | 16,955 | ipynb | Jupyter Notebook | Webinar 11 - Project 3 walkthrough/.ipynb_checkpoints/Project 3 walkthrough-checkpoint.ipynb | soheil-mp/Natural-Language-Processing-Tutorials | 2c89279946fd13d56e819c2683db3cb22e988f60 | [
"MIT"
] | 7 | 2020-07-26T11:57:23.000Z | 2021-06-30T21:31:02.000Z | Webinar 11 - Project 3 walkthrough/Project 3 walkthrough.ipynb | TomLin/Natural-Language-Processing-Tutorials | 2c89279946fd13d56e819c2683db3cb22e988f60 | [
"MIT"
] | null | null | null | Webinar 11 - Project 3 walkthrough/Project 3 walkthrough.ipynb | TomLin/Natural-Language-Processing-Tutorials | 2c89279946fd13d56e819c2683db3cb22e988f60 | [
"MIT"
] | 7 | 2020-07-28T05:24:40.000Z | 2021-03-02T06:34:54.000Z | 63.264925 | 794 | 0.697316 | [
[
[
"# Project 3 walkthrough",
"_____no_output_____"
],
[
"<hr>\n\n## Model 0: RNN\n\nGiven their effectiveness in modeling sequential data, the first acoustic model you will use is an RNN. As shown in the figure below, the RNN we supply to you will take the time sequence of audio features as input.\n\n<img src=\"images/simple_rnn.png\" width=\"50%\">\n\nAt each time step, the speaker pronounces one of 28 possible characters, including each of the 26 letters in the English alphabet, along with a space character (\" \"), and an apostrophe (').\n\nThe output of the RNN at each time step is a vector of probabilities with 29 entries, where the $i$-th entry encodes the probability that the $i$-th character is spoken in the time sequence. (The extra 29th character is an empty \"character\" used to pad training examples within batches containing uneven lengths.) If you would like to peek under the hood at how characters are mapped to indices in the probability vector, look at the `char_map.py` file in the repository. The figure below shows an equivalent, rolled depiction of the RNN that shows the output layer in greater detail. \n\n<img src=\"images/simple_rnn_unrolled.png\" width=\"60%\">",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Create a GRU layer which returns the sequence and also have implementation mode of 2\n3. Use a softmax activation function\n4. Finalize your model using keras.models.Model\n5. Use **.output_length** to change the output of your neural networks based on the output_dim\n6. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"<hr>\n\n## Model 1: RNN + TimeDistributed Dense\n\nRead about the [TimeDistributed](https://keras.io/layers/wrappers/) wrapper and the [BatchNormalization](https://keras.io/layers/normalization/) layer in the Keras documentation. For your next architecture, you will add [batch normalization](https://arxiv.org/pdf/1510.01378.pdf) to the recurrent layer to reduce training times. The `TimeDistributed` layer will be used to find more complex patterns in the dataset. The unrolled snapshot of the architecture is depicted below.\n\n<img src=\"images/rnn_model.png\" width=\"60%\">\n\nThe next figure shows an equivalent, rolled depiction of the RNN that shows the (`TimeDistrbuted`) dense and output layers in greater detail. \n\n<img src=\"images/rnn_model_unrolled.png\" width=\"60%\">\n\nUse your research to complete the `rnn_model` function within the `sample_models.py` file. The function should specify an architecture that satisfies the following requirements:\n- The first layer of the neural network should be an RNN (`SimpleRNN`, `LSTM`, or `GRU`) that takes the time sequence of audio features as input. We have added `GRU` units for you, but feel free to change `GRU` to `SimpleRNN` or `LSTM`, if you like!\n- Whereas the architecture in `simple_rnn_model` treated the RNN output as the final layer of the model, you will use the output of your RNN as a hidden layer. Use `TimeDistributed` to apply a `Dense` layer to each of the time steps in the RNN output. Ensure that each `Dense` layer has `output_dim` units.\n\nUse the code cell below to load your model into the `model_1` variable. Use a value for `input_dim` that matches your chosen audio features, and feel free to change the values for `units` and `activation` to tweak the behavior of your recurrent layer.",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Create a GRU layer which returns the sequence and also have implementation mode of 2\n3. Add batch normalization layer\n4. Add a TimeDistributed(Dense(output_dim)) layer\n5. Use a softmax activation function\n4. Finalize your model using keras.models.Model\n5. Use **.output_length** to change the output of your neural networks based on the output_dim\n6. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"<hr>\n\n## Model 2: CNN + RNN + TimeDistributed Dense\n\nThe architecture in `cnn_rnn_model` adds an additional level of complexity, by introducing a [1D convolution layer](https://keras.io/layers/convolutional/#conv1d). \n\n<img src=\"images/cnn_rnn_model.png\" width=\"100%\">\n\nThis layer incorporates many arguments that can be (optionally) tuned when calling the `cnn_rnn_model` module. We provide sample starting parameters, which you might find useful if you choose to use spectrogram audio features. \n\nIf you instead want to use MFCC features, these arguments will have to be tuned. Note that the current architecture only supports values of `'same'` or `'valid'` for the `conv_border_mode` argument.\n\nWhen tuning the parameters, be careful not to choose settings that make the convolutional layer overly small. If the temporal length of the CNN layer is shorter than the length of the transcribed text label, your code will throw an error.\n\nBefore running the code cell below, you must modify the `cnn_rnn_model` function in `sample_models.py`. Please add batch normalization to the recurrent layer, and provide the same `TimeDistributed` layer as before.",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Add convolutional with only one dimension (CONV1D) layer\n3. Add batch normalization layer\n4. Create a LSTM layer which returns the sequence and also have implementation mode of 2\n5. Add batch normalization layer\n6. Add a TimeDistributed(Dense(output_dim)) layer\n7. Use a softmax activation function\n8. Finalize your model using keras.models.Model\n9. Use **.output_length** to change the output of your neural networks based on the output_dim\n10. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"<hr>\n\n## Model 3: Deeper RNN + TimeDistributed Dense\n\nReview the code in `rnn_model`, which makes use of a single recurrent layer. Now, specify an architecture in `deep_rnn_model` that utilizes a variable number `recur_layers` of recurrent layers. The figure below shows the architecture that should be returned if `recur_layers=2`. In the figure, the output sequence of the first recurrent layer is used as input for the next recurrent layer.\n\n<img src=\"images/deep_rnn_model.png\" width=\"80%\">\n\nFeel free to change the supplied values of `units` to whatever you think performs best. You can change the value of `recur_layers`, as long as your final value is greater than 1. (As a quick check that you have implemented the additional functionality in `deep_rnn_model` correctly, make sure that the architecture that you specify here is identical to `rnn_model` if `recur_layers=1`.)",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Create a LSTM layer which returns the sequence and also have implementation mode of 2\n3. Add batch normalization layer\n4. Create a LSTM layer which returns the sequence and also have implementation mode of 2\n5. Add batch normalization layer\n6. Add a TimeDistributed(Dense(output_dim)) layer\n7. Use a softmax activation function\n8. Finalize your model using keras.models.Model\n9. Use **.output_length** to change the output of your neural networks based on the output_dim\n10. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"<hr>\n\n## Model 4: Bidirectional RNN + TimeDistributed Dense\n\nRead about the [Bidirectional](https://keras.io/layers/wrappers/) wrapper in the Keras documentation. For your next architecture, you will specify an architecture that uses a single bidirectional RNN layer, before a (`TimeDistributed`) dense layer. The added value of a bidirectional RNN is described well in [this paper](http://www.cs.toronto.edu/~hinton/absps/DRNN_speech.pdf).\n> One shortcoming of conventional RNNs is that they are only able to make use of previous context. In speech recognition, where whole utterances are transcribed at once, there is no reason not to exploit future context as well. Bidirectional RNNs (BRNNs) do this by processing the data in both directions with two separate hidden layers which are then fed forwards to the same output layer.\n\n<img src=\"images/bidirectional_rnn_model.png\" width=\"80%\">\n\nBefore running the code cell below, you must complete the `bidirectional_rnn_model` function in `sample_models.py`. Feel free to use `SimpleRNN`, `LSTM`, or `GRU` units. When specifying the `Bidirectional` wrapper, use `merge_mode='concat'`.",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Create a bidirectional layer which wrapped around a GRU layer which returns the sequence and also have implementation mode of 2.\n3. Add a TimeDistributed(Dense(output_dim)) layer\n4. Use a softmax activation function\n5. Finalize your model using keras.models.Model\n6. Use **.output_length** to change the output of your neural networks based on the output_dim\n7. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"<hr>\n\n## Final Model\n\nNow that you've tried out many sample models, use what you've learned to draft your own architecture! While your final acoustic model should not be identical to any of the architectures explored above, you are welcome to merely combine the explored layers above into a deeper architecture. It is **NOT** necessary to include new layer types that were not explored in the notebook.\n\nHowever, if you would like some ideas for even more layer types, check out these ideas for some additional, optional extensions to your model:\n\n- If you notice your model is overfitting to the training dataset, consider adding **dropout**! To add dropout to [recurrent layers](https://faroit.github.io/keras-docs/1.0.2/layers/recurrent/), pay special attention to the `dropout_W` and `dropout_U` arguments. This [paper](http://arxiv.org/abs/1512.05287) may also provide some interesting theoretical background.\n- If you choose to include a convolutional layer in your model, you may get better results by working with **dilated convolutions**. If you choose to use dilated convolutions, make sure that you are able to accurately calculate the length of the acoustic model's output in the `model.output_length` lambda function. You can read more about dilated convolutions in Google's [WaveNet paper](https://arxiv.org/abs/1609.03499). For an example of a speech-to-text system that makes use of dilated convolutions, check out this GitHub [repository](https://github.com/buriburisuri/speech-to-text-wavenet). You can work with dilated convolutions [in Keras](https://keras.io/layers/convolutional/) by paying special attention to the `padding` argument when you specify a convolutional layer.\n- If your model makes use of convolutional layers, why not also experiment with adding **max pooling**? Check out [this paper](https://arxiv.org/pdf/1701.02720.pdf) for example architecture that makes use of max pooling in an acoustic model.\n- So far, you have experimented with a single bidirectional RNN layer. Consider stacking the bidirectional layers, to produce a [deep bidirectional RNN](https://www.cs.toronto.edu/~graves/asru_2013.pdf)!\n\nAll models that you specify in this repository should have `output_length` defined as an attribute. This attribute is a lambda function that maps the (temporal) length of the input acoustic features to the (temporal) length of the output softmax layer. This function is used in the computation of CTC loss; to see this, look at the `add_ctc_loss` function in `train_utils.py`. To see where the `output_length` attribute is defined for the models in the code, take a look at the `sample_models.py` file. You will notice this line of code within most models:\n```\nmodel.output_length = lambda x: x\n```\nThe acoustic model that incorporates a convolutional layer (`cnn_rnn_model`) has a line that is a bit different:\n```\nmodel.output_length = lambda x: cnn_output_length(\n x, kernel_size, conv_border_mode, conv_stride)\n```\n\nIn the case of models that use purely recurrent layers, the lambda function is the identity function, as the recurrent layers do not modify the (temporal) length of their input tensors. However, convolutional layers are more complicated and require a specialized function (`cnn_output_length` in `sample_models.py`) to determine the temporal length of their output.\n\nYou will have to add the `output_length` attribute to your final model before running the code cell below. Feel free to use the `cnn_output_length` function, if it suits your model. ",
"_____no_output_____"
],
[
"**STEP BY STEP WALKTHROUGH:**\n1. Create your input using keras.layers.Input, Define the shape as (None, input_dim).\n2. Create a bidirectional layer which wrapped around a GRU layer which returns the sequence and also have implementation mode of 2.\n3. Add batch normalization layer\n4. Use a relu activation function\n5. Create a bidirectional layer which wrapped around a GRU layer which returns the sequence and also have implementation mode of 2.\n6. Add batch normalization layer\n7. Use a relu activation function\n8. Add a TimeDistributed(Dense(output_dim)) layer\n9. Add softmax activation layer\n10. Finalize your model using keras.models.Model\n11. Use **.output_length** to change the output of your neural networks based on the output_dim\n12. Train the model using train_model that you have imported at the start",
"_____no_output_____"
],
[
"**RESOURCES:**\n\n- <a href=\"https://realpython.com/python-speech-recognition/\">The Ultimate Guide To Speech Recognition With Python</a>\n- <a href=\"https://pythonprogramminglanguage.com/speech-recognition/\">Speech Recognition API</a>\n- <a href=\"https://www.youtube.com/watch?v=K_WbsFrPUCk\">Speech Recognition using Python - Youtube Video</a>\n- <a href=\"https://www.edx.org/course/speech-recognition-systems-2\">Speech Recognition Systems Course - Microsoft</a>\n- <a href=\"https://towardsdatascience.com/automatic-speech-recognition-data-collection-with-youtube-v3-api-mask-rcnn-and-google-vision-api-2370d6776109\">Automatic Speech Recognition Data Collection with Youtube V3 API, Mask-RCNN and Google Vision API</a>",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7142588a100190c0e768dc9aaeb5f528e1059c1 | 440,917 | ipynb | Jupyter Notebook | notebooks/03-make_the_map.ipynb | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | notebooks/03-make_the_map.ipynb | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | notebooks/03-make_the_map.ipynb | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | 903.518443 | 165,280 | 0.949043 | [
[
[
"# Genomic analysis of a parasite invasion: colonization of the New World by the blood fluke, Schistosoma mansoni \n\nRoy Nelson Platt II*, Frédéric D. Chevalier*, Winka Le Clec'h, Marina McDew-White, Philip T. LoVerde, Rafael Ramiro de Assis, Guilherme Oliveira, Safari Kinunghi, Anouk Gouvras, Bonnie Webster, Joanne Webster, Aidan Emery, David Rollinson, Timothy J. Anderson\n\n# Make maps of sample locations\n\nuse the `sch_man_nwinvasion-geo_map` conda env",
"_____no_output_____"
]
],
[
[
"import os\nimport cartopy.crs as ccrs\nimport cartopy\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shutil\n\nfrom IPython.display import Image\n\n\nimport rpy2.ipython\n\n\n%load_ext rpy2.ipython\n\nos.chdir(\"/master/nplatt/sch_man_nwinvasion\")",
"_____no_output_____"
],
[
"if os.path.isdir(\"results/maps\"):\n shutil.rmtree(\"results/maps\")\nos.mkdir(\"results/maps\")",
"_____no_output_____"
]
],
[
[
"# Specimens examined map",
"_____no_output_____"
]
],
[
[
"pop_colors={}\npop_colors[\"eafrica\"] = \"green\"\npop_colors[\"wafrica\"] = \"red\"\npop_colors[\"brazil\"] = \"purple\"\npop_colors[\"caribbean\"] = \"blue\"\npop_colors[\"rodhaini\"] = \"darkgrey\"",
"_____no_output_____"
],
[
"#get coordinates\n# data/sampling_lat_lon.csv\n\n# Country Location,Latitude,Longitude\n# Senegal,Nder,15.083333,-16.566667\n# Senegal,TemeyeS3,16.389444,-15.728889\n# Niger,Diambala,14.31376,1.299648\n# Niger,Namarigoungou,14.3554,1.2627\n# Tanzania,Kigongo,-2.70955,32.89879\n# Tanzania,Kabita,-2.31845,33.64372\n# Tanzania,Nyamazugo,-2.55731,32.54214\n# Tanzania,Katunguru,-2.50537937,32.67316932\n# Tanzania,Luchelele,-2.61916,32.8733\n# Tanzania,Nyandago,-2.69974,31.85495\n# Tanzania,Sekagi,-2.39005,33.09819\n# Brazil,Pontos dos Volantes,-16.752778,-41.503889\n# Kenya, Nairobi, -1.3011869,36.7906464 \n# Uganda,Lake Victoria, 0.0583665,32.4151119,13\n# Uganda,Lake Alberta, 1.6738572,31.2493658\n# Burundia,,-3.3751268,29.2853413\n# Guadeloupe,,16.0102561,-61.7489572\n# Puerto Rico,,18.389512,-66.2005625\n# Cameroon,,4.036072,9.6717631\n\n\nsample_sites = np.array([ [15.083333, -16.566667],\n [16.389444, -15.728889],\n [14.31376, 1.299648],\n [14.3554, 1.2627],\n [-2.70955, 32.89879],\n [-2.31845, 33.64372],\n [-2.55731, 32.54214],\n [-2.50537937, 32.67316932],\n [-2.61916, 32.8733],\n [-2.69974, 31.85495],\n [-2.39005, 33.09819],\n [-16.752778, -41.503889],\n [-1.3011869, 36.7906464], \n [0.0583665, 32.4151119],\n [1.6738572, 31.2493658],\n [-3.3751268, 29.2853413],\n [16.0102561, -61.7489572],\n [18.389512, -66.2005625],\n [4.036072, 9.6717631] ])\n\nlons, lats = sample_sites.T\n\n \nsite_colors = [pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], \n pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], \n pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"brazil\"], \n pop_colors[\"rodhaini\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"rodhaini\"], \n pop_colors[\"caribbean\"], pop_colors[\"caribbean\"], pop_colors[\"wafrica\"] ] ",
"_____no_output_____"
],
[
"#set fig size\nplt.figure(figsize=(15, 15))\n\n#gen axes\nax = plt.axes(projection=ccrs.PlateCarree(central_longitude=0))\n\n#draw land features\nax.add_feature(cartopy.feature.LAND, facecolor = \"white\")\nax.add_feature(cartopy.feature.OCEAN)\nax.add_feature(cartopy.feature.COASTLINE)\nax.add_feature(cartopy.feature.BORDERS, alpha = 0.75)\nax.add_feature(cartopy.feature.LAKES, alpha = 0.5)\nax.add_feature(cartopy.feature.RIVERS, alpha = 0.5)\n\n#set lat and long tick marks\nlat_ticks = np.arange(-105, 80, 15)\nlon_ticks = np.arange(-70, 70, 15)\n\nax.set_xticks(lat_ticks, crs = ccrs.PlateCarree())\nax.set_yticks(lon_ticks, crs = ccrs.PlateCarree())\nplt.tick_params(labelsize = 12)\n\nplt.text(-68, 22, \"Puerto Rico\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(-58, 13, \"Guadeloupe\\nn=4\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(-37, -18, \"Brazil\\nn=50\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(-20, 20, \"Senegal\\nn=29\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(0, 18, \"Niger\\nn=13\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(0, -4, \"Cameroon\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(34, -11, \"Tanzania\\nn=58\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(16, -11, \"Burundi\\nn=1 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(28, 5, \"Uganda\\nn=2\", bbox=dict(facecolor='white'), fontsize=12)\nplt.text(40, -2, \"Kenya\\nn=8 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\n\n#set limits of map\nax.set_extent([-90, 60, -28, 35], crs = ccrs.PlateCarree())\n\n#plot sampling logations\nplt.scatter(lats, lons,\n facecolors = site_colors, \n edgecolors = 'black', \n linewidth = 1, \n marker = 'o', \n s = 200, \n alpha = 1,\n transform = ccrs.PlateCarree(), \n zorder = 10 )\n\nplt.savefig(\"results/maps/world_map.svg\", format=\"svg\")\nplt.savefig(\"results/maps/world_map.png\", format=\"png\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Trade maps",
"_____no_output_____"
]
],
[
[
"sample_sites = np.array([ [15.083333, -16.566667],\n [16.389444, -15.728889],\n [14.31376, 1.299648],\n [14.3554, 1.2627],\n [-2.70955, 32.89879],\n [-2.31845, 33.64372],\n [-2.55731, 32.54214],\n [-2.50537937, 32.67316932],\n [-2.61916, 32.8733],\n [-2.69974, 31.85495],\n [-2.39005, 33.09819],\n [-16.752778, -41.503889],\n [0.0583665, 32.4151119],\n [1.6738572, 31.2493658],\n [16.0102561, -61.7489572],\n [18.389512, -66.2005625],\n [4.036072, 9.6717631] ])\n\nlons, lats = sample_sites.T\n\n \nsite_colors = [pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], pop_colors[\"wafrica\"], \n pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], \n pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"brazil\"], \n pop_colors[\"rodhaini\"], pop_colors[\"eafrica\"], pop_colors[\"eafrica\"], pop_colors[\"rodhaini\"], \n pop_colors[\"caribbean\"], pop_colors[\"caribbean\"], pop_colors[\"wafrica\"] ] ",
"_____no_output_____"
]
],
[
[
"## Brazil",
"_____no_output_____"
]
],
[
[
"#set fig size\nplt.figure(figsize=(15, 15))\n\n#gen axes\nax = plt.axes(projection=ccrs.PlateCarree(central_longitude=0))\n\n#draw land features\nax.add_feature(cartopy.feature.LAND, facecolor = \"white\")\nax.add_feature(cartopy.feature.OCEAN, facecolor= \"gainsboro\")\nax.add_feature(cartopy.feature.COASTLINE)\nax.add_feature(cartopy.feature.BORDERS, alpha = 0.75)\nax.add_feature(cartopy.feature.LAKES, alpha = 0.5)\nax.add_feature(cartopy.feature.RIVERS, alpha = 0.5)\n\n#set lat and long tick marks\nlat_ticks = np.arange(-105, 80, 5)\nlon_ticks = np.arange(-70, 70, 5)\n\nax.set_xticks(lat_ticks, crs = ccrs.PlateCarree())\nax.set_yticks(lon_ticks, crs = ccrs.PlateCarree())\nplt.tick_params(labelsize = 12)\n\n# plt.text(-68, 22, \"Puerto Rico\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-58, 13, \"Guadeloupe\\nn=4\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-37, -18, \"Brazil\\nn=50\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-20, 20, \"Senegal\\nn=29\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(0, 18, \"Niger\\nn=13\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(0, -4, \"Cameroon\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(34, -11, \"Tanzania\\nn=58\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(16, -11, \"Burundi\\nn=1 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(28, 5, \"Uganda\\nn=2\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(40, -2, \"Kenya\\nn=8 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\n\n#set limits of map\nax.set_extent([-60, -30, -28, 6], crs = ccrs.PlateCarree())\n\n#plot sampling logations\nplt.scatter(lats, lons,\n facecolors = \"grey\", \n edgecolors = 'black', \n linewidth = 1, \n marker = 'o', \n s = 200, \n alpha = 1,\n transform = ccrs.PlateCarree(), \n zorder = 10 )\n\nplt.savefig(\"results/maps/brazil.svg\", format=\"svg\")\nplt.savefig(\"results/maps/brazil.png\", format=\"png\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Africa",
"_____no_output_____"
]
],
[
[
"#set fig size\nplt.figure(figsize=(15, 15))\n\n#gen axes\nax = plt.axes(projection=ccrs.PlateCarree(central_longitude=0))\n\n#draw land features\nax.add_feature(cartopy.feature.LAND, facecolor = \"white\")\nax.add_feature(cartopy.feature.OCEAN, facecolor= \"gainsboro\")\nax.add_feature(cartopy.feature.COASTLINE)\nax.add_feature(cartopy.feature.BORDERS, alpha = 0.75)\nax.add_feature(cartopy.feature.LAKES, alpha = 0.5)\nax.add_feature(cartopy.feature.RIVERS, alpha = 0.5)\n\n#set lat and long tick marks\nlat_ticks = np.arange(-105, 80, 10)\nlon_ticks = np.arange(-70, 70, 10)\n\nax.set_xticks(lat_ticks, crs = ccrs.PlateCarree())\nax.set_yticks(lon_ticks, crs = ccrs.PlateCarree())\nplt.tick_params(labelsize = 12)\n\n# plt.text(-68, 22, \"Puerto Rico\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-58, 13, \"Guadeloupe\\nn=4\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-37, -18, \"Brazil\\nn=50\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(-20, 20, \"Senegal\\nn=29\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(0, 18, \"Niger\\nn=13\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(0, -4, \"Cameroon\\nn=1\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(34, -11, \"Tanzania\\nn=58\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(16, -11, \"Burundi\\nn=1 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(28, 5, \"Uganda\\nn=2\", bbox=dict(facecolor='white'), fontsize=12)\n# plt.text(40, -2, \"Kenya\\nn=8 (SR)\", bbox=dict(facecolor='white'), fontsize=12)\n\n#set limits of map\nax.set_extent([-22, 45, -38, 35], crs = ccrs.PlateCarree())\n\n#plot sampling logations\nplt.scatter(lats, lons,\n facecolors = \"grey\", \n edgecolors = 'black', \n linewidth = 1, \n marker = 'o', \n s = 200, \n alpha = 1,\n transform = ccrs.PlateCarree(), \n zorder = 10 )\n\nplt.savefig(\"results/maps/africa.svg\", format=\"svg\")\nplt.savefig(\"results/maps/africa.png\", format=\"png\")\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7142611cf0b70212e521078e46361c28bdf2234 | 98,937 | ipynb | Jupyter Notebook | pessimal_noise.ipynb | isabella232/mnist-c | bba57e4ccc282f106907c5239958e72298451ea7 | [
"Apache-2.0"
] | 48 | 2019-06-03T22:30:41.000Z | 2021-12-14T13:22:13.000Z | pessimal_noise.ipynb | google-research/mnist-c | bba57e4ccc282f106907c5239958e72298451ea7 | [
"Apache-2.0"
] | 3 | 2020-02-25T14:01:32.000Z | 2021-11-10T02:46:41.000Z | pessimal_noise.ipynb | isabella232/mnist-c | bba57e4ccc282f106907c5239958e72298451ea7 | [
"Apache-2.0"
] | 11 | 2019-06-04T01:39:05.000Z | 2022-02-10T12:13:27.000Z | 363.738971 | 82,430 | 0.91345 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nimport corruptions\nimport models\n\n%matplotlib inline\n%env CUDA_VISIBLE_DEVICES=3",
"_____no_output_____"
],
[
"def show(x):\n x = np.array(x)\n plt.imshow(x, cmap='gray')\n plt.show()\n \ntrain_mnist = datasets.MNIST(\"../data/\", train=True, transform=transforms.ToTensor())\ntest_mnist = datasets.MNIST(\"../data/\", train=False, transform=transforms.ToTensor())\nIMAGES = [test_mnist[i][0] for i in range(50)]\nLABELS = [test_mnist[i][1] for i in range(50)]\nto_pil = transforms.ToPILImage()\ndevice = torch.device(\"cuda\")\ncpu = torch.device(\"cpu\")",
"_____no_output_____"
],
[
"def normalize(tensor, mean=(0.1307,), std=(0.3081,)):\n mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)\n std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)\n return tensor.sub(mean[:, None, None]).div(std[:, None, None])\n\ndef main(model, epochs, lr, momentum, sgd, batch_size, targeted, adv_target, test_batch_size, log_interval, eps, lam):\n\n def train(model, A, device, train_loader, optimizer, epoch):\n model.eval()\n for batch_idx, (data, target) in enumerate(train_loader):\n A.requires_grad = True\n data, target = data.to(device), target.to(device)\n noise = torch.normal(mean=torch.zeros(batch_size, 1, 196)).to(device) @ A\n norm = noise.norm(dim=-1)\n scaled_noise = (noise.transpose(0, -1) / norm.squeeze()).transpose(0, -1) * eps / 4\n tiled_noise = scaled_noise.view(batch_size, 1, 14, 14).repeat(1, 1, 2, 2)\n data = data + tiled_noise.view(data.shape)\n data = normalize(torch.clamp(data, 0, 1))\n optimizer.zero_grad()\n output = model(data)\n if targeted:\n loss = -F.nll_loss(output, target) + F.nll_loss(output, adv_target)\n else:\n loss = -F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n A.detach_()\n\n\n def test(model, A, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n noise = torch.normal(mean=torch.zeros(test_batch_size, 1, 196)).to(device) @ A\n norm = noise.norm(dim=-1)\n scaled_noise = (noise.transpose(0, -1) / norm.squeeze()).transpose(0, -1) * eps / 4\n tiled_noise = scaled_noise.view(test_batch_size, 1, 14, 14).repeat(1, 1, 2, 2)\n data = data + tiled_noise.view(data.shape)\n data = normalize(torch.clamp(data, 0, 1))\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n \n device = torch.device(\"cuda\")\n cpu = torch.device(\"cpu\")\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=batch_size, shuffle=True, pin_memory=True)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False,\n transform=transforms.ToTensor()),\n batch_size=test_batch_size, shuffle=True, pin_memory=True)\n\n A = torch.eye(196).to(device)\n A.requires_grad = True\n \n if targeted:\n adv_target = torch.tensor([adv_target]).repeat(batch_size).to(device)\n\n model = model()\n \n if sgd:\n optimizer = optim.SGD([A], lr=lr, momentum=momentum)\n else:\n optimizer = optim.Adam([A])\n \n for epoch in range(1, epochs + 1):\n if epoch in [45, 90]:\n new_lr = lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n train(model, A, device, train_loader, optimizer, epoch)\n test(model, A, device, test_loader)\n \n return A",
"_____no_output_____"
],
[
"A = main(model=models.ConvNet, epochs=135, lr=1000., momentum=0., sgd=True, batch_size=100,\n targeted=False, adv_target=8, test_batch_size=1000, log_interval=100, eps=10.63, lam=0.)",
"_____no_output_____"
],
[
"noise = torch.normal(mean=torch.zeros(1, 1, 196)) @ A.to(cpu)\nnorm = noise.norm(dim=-1)\nscaled_noise = (noise.transpose(0, -1) / norm.squeeze()).transpose(0, -1) * 10.63 / 4\ntiled_noise = scaled_noise.view(1, 1, 14, 14).repeat(1, 1, 2, 2)\nx = IMAGES[0] + tiled_noise.view(IMAGES[0].shape)\nx = torch.clamp(x, 0, 1)\nshow(x[0])\nx = A.to(cpu).view(196, 196)\nplt.imshow(np.array(x), cmap=\"gray\")",
"_____no_output_____"
],
[
"final = A.detach().cpu().numpy()\ntorch.save(final, \"pessimal_noise_matrix\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71434d9f391a0170e1700b7afae161ab3be54c3 | 545,379 | ipynb | Jupyter Notebook | Vaccinefinder.ipynb | yunhecui/nycvaccine | 45a2aa39f575b3a293828f5dfc0593a39e4f13a1 | [
"MIT"
] | null | null | null | Vaccinefinder.ipynb | yunhecui/nycvaccine | 45a2aa39f575b3a293828f5dfc0593a39e4f13a1 | [
"MIT"
] | null | null | null | Vaccinefinder.ipynb | yunhecui/nycvaccine | 45a2aa39f575b3a293828f5dfc0593a39e4f13a1 | [
"MIT"
] | 1 | 2022-02-23T21:15:20.000Z | 2022-02-23T21:15:20.000Z | 698.308579 | 216,572 | 0.948817 | [
[
[
"# Import Packages",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport urllib\nimport zipfile\nimport glob\nimport os\nfrom shapely.geometry import Point\nfrom geopandas import GeoDataFrame\nimport random as rd\n%matplotlib inline\n",
"_____no_output_____"
]
],
[
[
"# Import and Clean Data",
"_____no_output_____"
],
[
"For this mini project, we will need 2 layers: **Public transit stations** and **COVID-19 vaccination clinic locations**",
"_____no_output_____"
]
],
[
[
"try:\n os.mkdir('downloads')\nexcept:\n print('folder exists')",
"_____no_output_____"
],
[
"def download_unzip_shp(link, foldername):\n # download shapefile as a zipped folder\n urllib.request.urlretrieve(link, foldername + \".zip\")\n \n # unzip to another folder\n with zipfile.ZipFile(foldername + \".zip\",\"r\") as zip_ref:\n zip_ref.extractall(foldername)\n\n shp_name = glob.glob(foldername + '/*.shp')\n \n return shp_name",
"_____no_output_____"
],
[
"# bus stations:\n## check this link: https://data.cityofnewyork.us/Transportation/Bus-Stop-Shelters/qafz-7myz\nbuslink = \"https://data.cityofnewyork.us/api/geospatial/qafz-7myz?method=export&format=Shapefile\"\nbusname = \"downloads/bus_shp\"\nbus_shp = download_unzip_shp(buslink, busname)",
"_____no_output_____"
],
[
"bus = gpd.read_file(bus_shp[0])\n\n# convert from GCS to PCS for further actions\nbus = bus.to_crs(epsg=6434)\n\nbus.plot()\nplt.show()",
"_____no_output_____"
],
[
"# uncomment to check the shapefile coordinate system\n#bus.crs",
"_____no_output_____"
],
[
"# metro stations:\n## check this link: https://data.cityofnewyork.us/Transportation/Subway-Stations/arq3-7z49\nmetrolink = \"https://data.cityofnewyork.us/api/geospatial/arq3-7z49?method=export&format=Shapefile\"\nmetroname = \"downloads/metro_shp\"\nmetro_shp = download_unzip_shp(metrolink, metroname)",
"_____no_output_____"
],
[
"metro = gpd.read_file(metro_shp[0])\n# convert from GCS to PCS for further actions\nmetro = metro.to_crs(epsg=6434)\nmetro.plot()\nplt.show()",
"_____no_output_____"
],
[
"# join them together:\nbshort = bus[['shelter_id','geometry']]\nmshort = metro[['name','geometry']]\ntransit = gpd.GeoDataFrame( pd.concat([bshort, mshort], ignore_index=True) )\ntransit.plot()\nplt.show()",
"_____no_output_____"
],
[
"transit['ID'] = transit.index\n",
"_____no_output_____"
],
[
"# uncomment to check the shapefile coordinate system\n#metro.crs",
"_____no_output_____"
],
[
"# if you would like to download a .csv version: try codes below\n# # bus stations:\n# urllib.request.urlretrieve(\"https://data.cityofnewyork.us/api/geospatial/qafz-7myz?method=export&format=Shapefile\", \"bus.csv\")\n\n# # metro stations:\n# urllib.request.urlretrieve(\"https://data.cityofnewyork.us/api/views/kk4q-3rt2/rows.csv?accessType=DOWNLOAD\", \"metro.csv\")",
"_____no_output_____"
],
[
"# census tracts:\n## check this link: https://data.cityofnewyork.us/City-Government/2010-Census-Tracts/fxpq-c8ku\nctlink = \"https://data.cityofnewyork.us/api/geospatial/fxpq-c8ku?method=export&format=Shapefile\"\nctname = \"downloads/2010 Census Tracts_shp\"\nct_shp = download_unzip_shp(ctlink, ctname)",
"_____no_output_____"
],
[
"ct = gpd.read_file(ct_shp[0])\n\n# convert from GCS to PCS for further actions\nct = ct.to_crs(epsg=6434)\n\nct.plot()\nplt.show()",
"_____no_output_____"
],
[
"# uncomment to check the shapefile coordinate system\n#ct.crs",
"_____no_output_____"
],
[
"# vaccine station\n# the processing notebook is in the floder\n# several geocoding errors are fixed manually\nvs = pd.read_csv('Vac_station_final.csv')\nvs.head(2)",
"_____no_output_____"
],
[
"## generate geo-spatial dataframe using csv files\ngeometry = [Point(xy) for xy in zip(vs.longitude, vs.latitude)]\nvs = GeoDataFrame(vs, geometry=geometry)\nvs.set_crs(epsg=4326, inplace=True)\nvs = vs.to_crs(epsg=6434)\n#vs.head(1)",
"_____no_output_____"
],
[
"vs.plot()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Analysis",
"_____no_output_____"
]
],
[
[
"plt.rcParams[\"figure.figsize\"] = (10,10) # change the plots to a larger size",
"_____no_output_____"
]
],
[
[
"### First, select a random station",
"_____no_output_____"
]
],
[
[
"# select 1 random station:\nstation_code = rd.randrange(3896) \nstation = transit[transit['ID'] == station_code]\n\n# plot both the select station and the census tracts\nax = ct.plot()\nstation.plot(ax=ax, color='brown')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Then, generate 1 mile radius buffer and clip census tract to 3 mile bounding box",
"_____no_output_____"
]
],
[
[
"radius = 1 * 5280 ## convert 1 mile radius to 5280 ft\n\n# generate buffer around the selected station\nbuffer = station.geometry.buffer(radius)\nbounding = station.buffer(radius * 3, cap_style=3) ## 3 is square bounding\nct_clip = gpd.clip(ct,bounding)\n\nax = ct_clip.plot()\nbuffer.plot(ax=ax, color='yellow')\nstation.plot(ax=ax, color='brown')\nplt.show()\n",
"C:\\Users\\yunhe\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:6: UserWarning: CRS mismatch between the CRS of left geometries and the CRS of right geometries.\nUse `to_crs()` to reproject one of the input geometries to match the CRS of the other.\n\nLeft CRS: EPSG:6434\nRight CRS: None\n\n \n"
]
],
[
[
"### Next, clip vaccination station layer with bounding box, and futher select vaccination stations in 1-mile buffer",
"_____no_output_____"
]
],
[
[
"vac_clip = gpd.clip(vs,buffer)\nother_vac_clip = gpd.clip(vs,bounding)\n\n\nax = ct_clip.plot()\nbuffer.plot(ax=ax, color='yellow')\nstation.plot(ax=ax, color='brown')\n\nother_vac_clip.plot(ax=ax, marker = \"o\", markersize=20, color = 'violet')\nvac_clip.plot(ax=ax, marker = \"*\", markersize=260, color = 'hotpink')\n\nplt.show()",
"C:\\Users\\yunhe\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: CRS mismatch between the CRS of left geometries and the CRS of right geometries.\nUse `to_crs()` to reproject one of the input geometries to match the CRS of the other.\n\nLeft CRS: EPSG:6434\nRight CRS: None\n\n \n"
],
[
"vac_clip.head()",
"_____no_output_____"
],
[
"# if you would like to generate shapefile/csv output, uncomment lines below\n# os.mkdir('outputs')\n# vac_clip.to_file(driver='ESRI Shapefile', filename = \"outputs/vac_clip.shp\")\n# vac_clip.to_csv('outputs/vac_clip.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7147acfeba5051c3f20b52cd1b7329e6b8decf2 | 17,913 | ipynb | Jupyter Notebook | simple-rnn.ipynb | nthend/cellai | 7e79d72bebd3e99bce40876867b9811fbf752f17 | [
"MIT"
] | null | null | null | simple-rnn.ipynb | nthend/cellai | 7e79d72bebd3e99bce40876867b9811fbf752f17 | [
"MIT"
] | null | null | null | simple-rnn.ipynb | nthend/cellai | 7e79d72bebd3e99bce40876867b9811fbf752f17 | [
"MIT"
] | null | null | null | 34.514451 | 120 | 0.455144 | [
[
[
"from IPython.display import display, clear_output\nimport numpy as np\nimport pyopencl as cl\nfrom PIL import Image\nimport time\nimport signal\nimport re",
"_____no_output_____"
],
[
"from skvideo.io import FFmpegWriter\nfrom IPython.display import HTML\nimport base64\nimport io",
"_____no_output_____"
],
[
"signal_done = False\n\ndef signal_handler(signal, frame):\n global signal_done\n signal_done = True\n\ndef stop_on_signal():\n global signal_done\n signal_done = False\n signal.signal(signal.SIGINT, signal_handler)",
"_____no_output_____"
],
[
"ctx = cl.create_some_context()\nqueue = cl.CommandQueue(ctx)\nmf = cl.mem_flags",
"_____no_output_____"
],
[
"def exp_prob(one_step_prob, n_steps):\n return 1.0 - (1.0 - one_step_prob)**n_steps",
"_____no_output_____"
],
[
"class Buffer:\n def __init__(self, nparray, ro=False, dual=False):\n self.ro = ro\n self.dual = dual\n self.host = nparray\n flags = 0\n if ro:\n flags |= mf.READ_ONLY\n else:\n flags |= mf.READ_WRITE\n mkbuf = lambda buf: cl.Buffer(ctx, flags | mf.COPY_HOST_PTR, hostbuf=buf)\n self.buf = mkbuf(self.host)\n if dual:\n self.dbuf = mkbuf(self.host)\n \n def swap(self):\n if self.dual:\n self.buf, self.dbuf = self.dbuf, self.buf\n \n def load(self):\n cl.enqueue_copy(queue, self.host, self.buf)",
"_____no_output_____"
],
[
"class World:\n def __init__(self, size, n_agents, param, net=None):\n self.size = size\n self.n_agents = (n_agents,)\n self.param = param\n \n self.step_count = 0\n self.avg_score = 0.0\n self.avg_varexp = 0.0\n self.plants_count = 0\n \n self.w_shape = (size[1], size[0])\n self.a_shape = (n_agents,)\n \n self.buffers = {}\n \n # screen\n self.buffers[\"w_screen\"] = Buffer(np.zeros((*self.w_shape, 3), dtype=np.uint8))\n \n # random\n self.buffers[\"a_random\"] = Buffer(np.random.randint(1<<32, size=self.a_shape, dtype=np.uint32))\n self.buffers[\"w_random\"] = Buffer(np.random.randint(1<<32, size=self.w_shape, dtype=np.uint32))\n \n # world\n self.init_world()\n self.init_agent_outer()\n \n # network\n net_param = {\n \"rnn_sx\": 11,\n \"rnn_sh\": 16,\n \"rnn_sy\": 5,\n }\n self.init_net(net_param, net=net)\n self.init_agent_inner()\n \n # parameters \n self.constants = {\n \"WORLD_SIZE_X\": self.size[0],\n \"WORLD_SIZE_Y\": self.size[1],\n \"SIZE_A_AGENT_I\": self.buffers[\"a_agents_i\"].host.shape[-1],\n \"SIZE_A_AGENT_F\": self.buffers[\"a_agents_f\"].host.shape[-1],\n \"SIZE_W_CACHE_I\": self.buffers[\"w_cache_i\"].host.shape[-1],\n \"SIZE_W_CACHE_F\": self.buffers[\"w_cache_f\"].host.shape[-1],\n \"SIZE_W_TRACE_F\": self.buffers[\"w_trace_f\"].host.shape[-1],\n \"SIZE_W_OBJECT_I\": self.buffers[\"w_object_i\"].host.shape[-1],\n \"SIZE_A_RNN_F\": self.buffers[\"a_rnn_f\"].host.shape[-1],\n \"RNN_SIZE_X\": self.net_param[\"rnn_sx\"],\n \"RNN_SIZE_H\": self.net_param[\"rnn_sh\"],\n \"RNN_SIZE_Y\": self.net_param[\"rnn_sy\"],\n \"AGENT_SELECT_N\": self.param[\"agent_selection_size\"],\n }\n \n self.buffers[\"PAR_I\"] = Buffer(np.array([\n self.param[\"animal_sensor_length\"],\n ], dtype=np.int32), ro=True)\n \n wperiod = self.param[\"world_step_period\"];\n self.buffers[\"PAR_F\"] = Buffer(np.array([\n exp_prob(self.param[\"trace_fade_factor\"], wperiod),\n exp_prob(self.param[\"trace_diffusion_factor\"], wperiod),\n self.param[\"trace_animal_factor\"],\n exp_prob(self.param[\"trace_plant_factor\"], wperiod),\n exp_prob(self.param[\"plant_appear_prob\"], wperiod),\n self.param[\"weight_variation_factor\"],\n self.param[\"selection_prob\"],\n self.param[\"softmax_temperature\"],\n self.param[\"selection_temperature\"],\n ], dtype=np.float32), ro=True)\n \n self.build()\n \n def init_world(self):\n self.buffers[\"w_object_i\"] = Buffer(np.zeros((*self.w_shape, 1), dtype=np.int32))\n self.buffers[\"w_trace_f\"] = Buffer(np.zeros((*self.w_shape, 3), dtype=np.float32))\n self.buffers[\"w_cache_i\"] = Buffer(np.zeros((*self.w_shape, 4), dtype=np.int32))\n self.buffers[\"w_cache_f\"] = Buffer(np.zeros((*self.w_shape, 4), dtype=np.float32))\n \n def init_agent_outer(self):\n a_pos = np.stack((\n np.random.randint(0, self.size[0], size=self.a_shape),\n np.random.randint(0, self.size[1], size=self.a_shape),\n ), axis=1)\n a_dir = np.random.randint(0, 4, size=(*self.a_shape, 1))\n a_score = np.zeros((*self.a_shape, 2))\n self.buffers[\"a_agents_i\"] = Buffer(np.concatenate((a_pos, a_dir, a_score), axis=1).astype(np.int32))\n \n def init_agent_inner(self):\n a_ve = np.zeros((*self.a_shape, 1))\n a_h = np.zeros((*self.a_shape, self.net_param[\"rnn_sh\"]))\n self.buffers[\"a_agents_f\"] = Buffer(np.concatenate((a_ve, a_h), axis=1).astype(np.float32))\n \n def init_net(self, param, net=None):\n self.net_param = param\n rnn_sx, rnn_sh, rnn_sy = param[\"rnn_sx\"], param[\"rnn_sh\"], param[\"rnn_sy\"]\n if net is None:\n rnn_wim = 1e-1\n wxh = rnn_wim*np.random.randn(*self.a_shape, (rnn_sx+1)*rnn_sh)\n whh = rnn_wim*np.random.randn(*self.a_shape, rnn_sh*rnn_sh)\n why = rnn_wim*np.random.randn(*self.a_shape, (rnn_sh+1)*rnn_sy)\n self.buffers[\"a_rnn_f\"] = Buffer(np.concatenate((wxh, whh, why), axis=1).astype(np.float32))\n else:\n assert net.shape[1] == (rnn_sx+1)*rnn_sh + rnn_sh*rnn_sh + (rnn_sh+1)*rnn_sy\n idxs = np.random.randint(net.shape[0], size=self.a_shape)\n self.buffers[\"a_rnn_f\"] = Buffer(np.copy(net[idxs]).astype(np.float32))\n \n def build(self):\n with open(\"simple-rnn.cl\", \"r\") as f:\n source = f.read()\n for k, v in self.constants.items():\n source = re.sub(\"(#define *%s)\" % k, \"\\g<0> %s\" % str(v), source)\n self.program = cl.Program(ctx, source).build()\n \n def step(self):\n if self.param[\"selection_period\"] != 0 and (self.step_count % self.param[\"selection_period\"]) == 0:\n self.program.a_select(\n queue,\n self.n_agents,\n None,\n\n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n\n self.buffers[\"a_random\"].buf,\n\n self.buffers[\"a_agents_i\"].buf,\n self.buffers[\"a_agents_f\"].buf,\n self.buffers[\"a_rnn_f\"].buf,\n )\n \n if self.param[\"disaster_period\"] != 0 and (self.step_count % self.param[\"disaster_period\"]) == 0:\n self.init_world()\n self.init_agent_outer()\n \n if (self.step_count % self.param[\"world_step_period\"]) == 0:\n self.program.w_step_read(\n queue,\n self.size,\n None,\n\n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n \n self.buffers[\"w_random\"].buf,\n \n self.buffers[\"w_cache_i\"].buf,\n self.buffers[\"w_cache_f\"].buf,\n\n self.buffers[\"w_object_i\"].buf,\n self.buffers[\"w_trace_f\"].buf,\n )\n \n self.program.w_step_write(\n queue,\n self.size,\n None,\n\n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n \n self.buffers[\"w_random\"].buf,\n \n self.buffers[\"w_cache_i\"].buf,\n self.buffers[\"w_cache_f\"].buf,\n\n self.buffers[\"w_object_i\"].buf,\n self.buffers[\"w_trace_f\"].buf,\n )\n \n self.program.a_step(\n queue,\n self.n_agents,\n None,\n \n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n \n self.buffers[\"a_random\"].buf,\n \n self.buffers[\"a_agents_i\"].buf,\n self.buffers[\"a_agents_f\"].buf,\n self.buffers[\"a_rnn_f\"].buf,\n \n self.buffers[\"w_object_i\"].buf,\n self.buffers[\"w_trace_f\"].buf,\n )\n \n self.step_count += 1;\n \n def draw(self):\n self.program.w_draw(\n queue,\n self.size,\n None,\n \n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n \n self.buffers[\"w_object_i\"].buf,\n self.buffers[\"w_trace_f\"].buf,\n self.buffers[\"w_screen\"].buf,\n )\n \n self.program.a_draw(\n queue,\n self.n_agents,\n None,\n \n self.buffers[\"PAR_I\"].buf,\n self.buffers[\"PAR_F\"].buf,\n \n self.buffers[\"a_agents_i\"].buf,\n self.buffers[\"a_agents_f\"].buf,\n \n self.buffers[\"w_screen\"].buf,\n )\n \n self.buffers[\"w_screen\"].load()\n return self.buffers[\"w_screen\"].host\n \n def fetch_stats(self):\n self.buffers[\"a_agents_i\"].load()\n self.buffers[\"a_agents_f\"].load()\n self.avg_score = np.mean(self.buffers[\"a_agents_i\"].host[:,3])\n self.avg_varexp = np.mean(self.buffers[\"a_agents_f\"].host[:,0])\n \n self.buffers[\"w_object_i\"].load()\n self.plants_count = np.sum(self.buffers[\"w_object_i\"].host != 0)\n \n def dump_net(self, count):\n self.buffers[\"a_rnn_f\"].load()\n net = self.buffers[\"a_rnn_f\"].host\n idxs = np.random.randint(net.shape[0], size=(count,))\n return np.copy(net[idxs])",
"_____no_output_____"
],
[
"world_param = {\n \"world_step_period\": 100,\n \"plant_appear_prob\": 1e-7,\n \"animal_sensor_length\": 6,\n \"trace_animal_factor\": 0.5,\n \"trace_plant_factor\": 0.5,\n \"trace_fade_factor\": 0.0002,\n \"trace_diffusion_factor\": 0.002,\n \"selection_prob\": 0.5,\n \"selection_period\": 1000,\n \"agent_selection_size\": 16,\n \"weight_variation_factor\": 1e-1,\n \"softmax_temperature\": 1.0,\n \"selection_temperature\": 1.0,\n \"disaster_period\": 50000,\n}",
"_____no_output_____"
],
[
"world = World((4000, 4000), 1024, param=world_param) #, net=world.dump_net(1024))",
"_____no_output_____"
],
[
"stop_on_signal()\nlast = time.time()\ndraw = False\nwhile not signal_done:\n world.step()\n now = time.time()\n if now - last >= 10.0:\n clear_output(wait=True)\n if draw:\n img = Image.fromarray(world.draw())\n h = 600\n w = int((h/img.size[1])*img.size[0])\n img = img.resize((w, h), Image.ANTIALIAS)\n display(img)\n world.fetch_stats()\n print(\"steps elapsed: %s\" % world.step_count)\n print(\"average score: %s\" % world.avg_score)\n print(\"average varexp: %s\" % world.avg_varexp)\n print(\"plants count: %s\" % world.plants_count)\n last = now",
"steps elapsed: 3485435\naverage score: 0.748046875\naverage varexp: -1.638028\nplants count: 3899\n"
],
[
"np.save(\"simple-rnn-net\", world.dump_net(32))",
"_____no_output_____"
],
[
"tiny_world_param = dict(world_param)\ntiny_world_param.update({\n #\"selection_period\": 0,\n \"disaster_period\": 0\n})",
"_____no_output_____"
],
[
"tiny_world = World((800, 600), 32, param=tiny_world_param, net=world.dump_net(256))",
"_____no_output_____"
],
[
"for j in range(5000):\n tiny_world.step()",
"_____no_output_____"
],
[
"params = {\n \"-vcodec\": \"libx264\",\n \"-pix_fmt\": \"yuv420p\",\n \"-profile:v\": \"baseline\",\n \"-level\": \"3\"\n}\nvideo = FFmpegWriter(\"tmp.mp4\", outputdict=params)\nstride = 10\nfor i in range(20*24):\n for j in range(stride):\n tiny_world.step()\n img = tiny_world.draw()\n video.writeFrame(img)\nvideo.close()",
"_____no_output_____"
],
[
"with open(\"tmp.mp4\", \"rb\") as f:\n vdata = f.read()\nvbase64 = base64.b64encode(vdata).decode(\"ascii\")\nHTML('<video controls src=\"data:video/mp4;base64,%s\" type=\"video/mp4\" >' % vbase64)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71489499de6320ed13fe8f5ab8acb5fbd9ebd11 | 3,610 | ipynb | Jupyter Notebook | miricoord/mrs/makecrds/makecrds_mrs_cdp6.ipynb | mengesser/miricoord | e4f7bef16f8a2e6f1d46c97a2b3d78cd50de7bec | [
"BSD-3-Clause"
] | null | null | null | miricoord/mrs/makecrds/makecrds_mrs_cdp6.ipynb | mengesser/miricoord | e4f7bef16f8a2e6f1d46c97a2b3d78cd50de7bec | [
"BSD-3-Clause"
] | null | null | null | miricoord/mrs/makecrds/makecrds_mrs_cdp6.ipynb | mengesser/miricoord | e4f7bef16f8a2e6f1d46c97a2b3d78cd50de7bec | [
"BSD-3-Clause"
] | null | null | null | 21.878788 | 178 | 0.552632 | [
[
[
"## Create new MIRI MRS reference files for CDP6 ##",
"_____no_output_____"
],
[
"First import the things that we need from the pipeline code",
"_____no_output_____"
]
],
[
[
"import os as os\nimport numpy as np\nimport pdb as pdb\nfrom astropy.modeling import models\nfrom asdf import AsdfFile\nfrom jwst import datamodels\nfrom jwst.assign_wcs import miri",
"_____no_output_____"
]
],
[
[
"Import the MIRI coordinates code from https://github.com/STScI-MIRI/coordinates and ensure that it is on the PYTHONPATH. Also ensure that the output data directory is set:",
"_____no_output_____"
],
[
"setenv MIRICOORD_DATA_DIR /YourLocalPathToData/ (this is where output will happen)",
"_____no_output_____"
]
],
[
[
"data_dir=os.path.join(os.path.expandvars('$MIRICOORD_DATA_DIR'),'temp/')",
"_____no_output_____"
],
[
"import miricoord.miricoord.mrs.mrs_tools as mrst",
"_____no_output_____"
],
[
"import miricoord.miricoord.mrs.mrs_pipetools as mrspt",
"_____no_output_____"
]
],
[
[
"Import the python scripts that do the heavy lifting for reference file creation:",
"_____no_output_____"
]
],
[
[
"import miricoord.miricoord.mrs.makecrds.makecrds_mrs_cdp6 as makecrds",
"_____no_output_____"
]
],
[
[
"Make new CDP-6 reference file for all channels and test them.",
"_____no_output_____"
]
],
[
[
"makecrds.create_cdp6_all(data_dir)",
"Working on: 12A\nTesting: 12A\nTesting channel 1A\nTesting channel 2A\nDone testing: 12A\nWorking on: 12B\nTesting: 12B\nTesting channel 1B\nTesting channel 2B\nDone testing: 12B\nWorking on: 12C\nTesting: 12C\nTesting channel 1C\nTesting channel 2C\nDone testing: 12C\nWorking on: 34A\nTesting: 34A\nTesting channel 3A\nTesting channel 4A\nDone testing: 34A\nWorking on: 34B\nTesting: 34B\nTesting channel 3B\nTesting channel 4B\nDone testing: 34B\nWorking on: 34C\nTesting: 34C\nTesting channel 3C\nTesting channel 4C\nDone testing: 34C\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e71490396b4ad18385fb45d2fa88223ba7e5e43b | 15,817 | ipynb | Jupyter Notebook | netcdf_to_parquet.ipynb | fbriol/pangeo-argo | 0c9b4521bda5c9e23f7bf9aa80c61d5ac6373175 | [
"MIT"
] | null | null | null | netcdf_to_parquet.ipynb | fbriol/pangeo-argo | 0c9b4521bda5c9e23f7bf9aa80c61d5ac6373175 | [
"MIT"
] | null | null | null | netcdf_to_parquet.ipynb | fbriol/pangeo-argo | 0c9b4521bda5c9e23f7bf9aa80c61d5ac6373175 | [
"MIT"
] | null | null | null | 39.941919 | 110 | 0.438832 | [
[
[
"# Converts ARGO/PR/PF to a Parquet dataset",
"_____no_output_____"
]
],
[
[
"import datetime\nimport os\nimport netCDF4\nimport numpy as np\nimport pandas as pd\nimport uuid",
"_____no_output_____"
],
[
"def array_to_list(array):\n \"\"\"Converts a numpy array into a Python list\"\"\"\n if array.dtype != np.dtype(\"S1\"):\n return [item for item in array[:, ]]\n else:\n return [item.tostring().decode() for item in array[:, ]]",
"_____no_output_____"
],
[
"def load(path):\n \"\"\"Load a NetCDF file and transform it into a pandas DataFrame\"\"\"\n # Definitions of the dataframe schema\n cols = dict(datetime=None,\n config_mission_number=None,\n cycle_number=None,\n data_centre=None,\n data_mode=None,\n data_state_indicator=None,\n data_type=None,\n date_creation=None,\n date_qc=None,\n date_update=None,\n dc_reference=None,\n depth=None,\n depth_adjusted=None,\n depth_adjusted_error=None,\n depth_adjusted_qc=None,\n depth_qc=None,\n direction=None,\n firmware_version=None,\n float_serial_no=None,\n hdyn=None,\n latitude=None,\n longitude=None,\n original_file_name=None,\n pi_name=None,\n platform_number=None,\n platform_type=None,\n position_qc=None,\n positioning_system=None,\n pres=None,\n pres_adjusted=None,\n pres_adjusted_error=None,\n pres_adjusted_qc=None,\n pres_qc=None,\n project_name=None,\n psal=None,\n psal_adjusted=None,\n psal_adjusted_error=None,\n psal_adjusted_qc=None,\n psal_qc=None,\n station_parameters=None,\n sla=None,\n temp=None,\n temp_adjusted=None,\n temp_adjusted_error=None,\n temp_adjusted_qc=None,\n temp_qc=None,\n vertical_sampling_scheme=None,\n wmo_inst_type=None)\n\n dtypes = dict(datetime=np.datetime64,\n config_mission_number=np.int32,\n cycle_number=np.int32,\n data_centre=str,\n data_mode=str,\n data_state_indicator=str,\n data_type=str,\n date_creation=np.datetime64,\n date_qc=str,\n date_update=np.datetime64,\n dc_reference=np.int64,\n depth=[np.float32],\n depth_adjusted=[np.float32],\n depth_adjusted_error=[np.float32],\n depth_adjusted_qc=[str],\n depth_qc=[str],\n direction=str,\n firmware_version=str,\n float_serial_no=str,\n hdyn=np.float32,\n latitude=np.float64,\n longitude=np.float64,\n original_file_name=str,\n pi_name=str,\n platform_number=str,\n platform_type=str,\n position_qc=str,\n positioning_system=str,\n pres=[np.float32],\n pres_adjusted=[np.float32],\n pres_adjusted_error=[np.float32],\n pres_adjusted_qc=[str],\n pres_qc=[str],\n project_name=str,\n psal=[np.float32],\n psal_adjusted=[np.float32],\n psal_adjusted_error=[np.float32],\n psal_adjusted_qc=[str],\n psal_qc=[str],\n sla=np.float32,\n station_parameters=[str],\n temp=[np.float32],\n temp_adjusted=[np.float32],\n temp_adjusted_error=[np.float32],\n temp_adjusted_qc=[str],\n temp_qc=[str],\n vertical_sampling_scheme=str,\n wmo_inst_type=str)\n\n with netCDF4.Dataset(path, \"r\") as ds:\n # Axis of this dataset\n time, levels = len(ds.dimensions[\"N_PROF\"]), len(\n ds.dimensions[\"N_LEVELS\"])\n\n for name, item in ds.variables.items():\n values = item[:]\n\n # Axis : the axes must not contain undefined values\n if name in [\"JULD\", \"LATITUDE\", \"LONGITUDE\"]:\n if isinstance(values, np.ma.MaskedArray):\n if np.ma.is_masked(values) and name == \"JULD\":\n return None\n values = values.data\n if len(values) != time:\n assert len(values) == 1\n values = np.full((time, ), values[0], dtype=values.dtype)\n if name == 'JULD':\n cols[\"datetime\"] = pd.Series(\n netCDF4.num2date(values, item.units))\n cols[\"partition\"] = cols[\"datetime\"].apply(lambda x: x.date().replace(day=1))\n else:\n cols[name.lower()] = values\n continue\n\n # Process numpy MaskedArray\n if isinstance(\n values,\n np.ma.MaskedArray) and values.dtype != np.dtype(\"S1\"):\n values[values.mask] = netCDF4.default_fillvals[\n values.dtype.\n str[1:]] if values.dtype.kind != 'f' else np.nan\n values = values.data\n\n # Transform the data type into a vector\n # (new column of the DataFrame)\n if name == \"DATA_TYPE\":\n values = [values.tostring().decode()] * time\n # Transforms the matrix of char into an array of strings\n elif name == \"STATION_PARAMETERS\":\n values = values.data\n values = [[\n values[ix, jx, :].tostring().decode().strip()\n for jx in range(values.shape[1])\n ] for ix in range(values.shape[0])]\n # Transforms matrix into an array of Python lists\n elif (item.dimensions == (\"N_PROF\", \"N_LEVELS\")) or (len(\n values.shape) == 2 and item.dimensions[0] == \"N_PROF\"):\n values = array_to_list(values)\n # Converts arrays of chars into string\n elif values.dtype == np.dtype(\"S1\"):\n string = values.tostring().decode()\n values = list(string) if item.dimensions == (\n \"N_PROF\", ) else string\n # Converts a scalar into a new column\n elif item.dimensions[0] == \"N_LEVELS\" and levels == 1:\n values = [values] * time\n # Not handled\n elif item.dimensions[0] == \"N_LEVELS\":\n raise RuntimeError((path, name))\n # Transforms column name into lower case\n if name.lower() in cols:\n cols[name.lower()] = values\n\n # For all loaded data, the values are casted into the specified\n # dataframe type.\n for k, v in cols.items():\n if v is None:\n dtype = dtypes[k]\n if isinstance(dtype, list):\n dtype = dtype[0]\n if dtype != str:\n cols[k] = [\n item for item in np.full(\n (time, levels), np.nan, dtype=dtype)[:, ]\n ]\n else:\n cols[k] = [' ' * levels for _ in range(time)]\n elif dtype == np.dtype(\"float32\"):\n cols[k] = np.full((time,), np.nan, dtype=dtype)\n elif dtype == np.datetime64:\n cols[k] = np.datetime64()\n elif dtype == str:\n cols[k] = \"\"\n\n df = pd.DataFrame(cols)\n # Strip strings\n for key in [\n \"data_state_indicator\", \"data_type\", \"firmware_version\",\n \"float_serial_no\", \"pi_name\", \"platform_number\",\n \"platform_type\", \"positioning_system\",\n \"vertical_sampling_scheme\", \"wmo_inst_type\"\n ]:\n df.loc[:, key] = df.loc[:, key].apply(lambda x: x.strip())\n df[\"original_file_name\"] = os.path.basename(path)\n # Transformation of some types contained in the columns. It's faster\n # to do it here on the dataframe pandas.\n df.loc[:, \"dc_reference\"] = df.loc[:, \"dc_reference\"].apply(lambda x:\n int(x))\n df.loc[:, \"date_creation\"] = df.loc[:, \"date_creation\"].apply(\n lambda x: datetime.datetime.strptime(x, \"%Y%m%d%H%M%S\"))\n df.loc[:, \"date_update\"] = df.loc[:, \"date_update\"].apply(\n lambda x: datetime.datetime.strptime(x, \"%Y%m%d%H%M%S\"))\n return df",
"_____no_output_____"
],
[
"dirname = \"dataset\"",
"_____no_output_____"
],
[
"# Process one file to test the conversion\nload(os.path.join(dirname, \"CO_DMQCGL01_20000510_PR_PF.nc\")).iloc[0]",
"_____no_output_____"
],
[
"def write_db(dirname, df):\n \"\"\"Function to write or dataset\"\"\"\n # During the conversion, we defined a column labeled \"partition\" defining\n # the date of our data. This column will be used to group our dataset by\n # month. It's possible to do it differently, for example by days.\n partition_keys = [df[\"partition\"]]\n data_df = df.drop(\"partition\", axis='columns')\n for key, subgroup in data_df.groupby(partition_keys):\n outfile = None\n subdir = os.path.join(dirname, f'year={key.year}',\n f'month={key.month}')\n update = os.path.exists(subdir)\n # Handles of dataframe update.\n if update:\n files = list(os.listdir(subdir))\n if len(files) == 1:\n outfile = os.path.join(subdir, files.pop())\n subgroup = pd.concat([pd.read_parquet(outfile), subgroup])\n elif len(files) == 0:\n pass\n else:\n raise RuntimeError(files)\n else:\n os.makedirs(subdir)\n\n if outfile is None:\n outfile = f'{uuid.uuid4().hex}.parquet'\n # TODO: lock file before write\n subgroup.to_parquet(os.path.join(subdir, outfile),\n index=False,\n compression='snappy')",
"_____no_output_____"
],
[
"# A minimalist solution to avoid reprocessing files twice.\ndef write_buffer(dirname, buffer, files):\n write_db(dirname, pd.concat(buffer))\n for item in files:\n with open(f\"{item}.done\", \"w\") as stream:\n pass",
"_____no_output_____"
],
[
"root = \"argo\"",
"_____no_output_____"
],
[
"# We can now start the conversion. We'll process the data in blocks to avoid\n# doing too many OIs. We make blocks of 64 files, it is the computer RAM that\n# acts as a limit here.\nblocs = []\nfiles = []\n\ndef netcdf_2_parquet(dirname, blocs, files):\n write_buffer(dirname, blocs, files)\n blocs.clear()\n files.clear() \n\nfor item in sorted(os.listdir(dirname)):\n # Skip the file already processed\n if 'PR_PF' not in item or item.endswith(\".done\"):\n continue\n path = os.path.join(dirname, item)\n blocs.append(load(path))\n files.append(path)\n if len(blocs) > 64:\n netcdf_2_parquet(root, blocs, files)\nif len(blocs):\n netcdf_2_parquet(root, blocs, files)",
"_____no_output_____"
],
[
"# Reading our file\nimport pyarrow.parquet as pq\npq.read_table(root, filters=[('year', '==', '2000')]).to_pandas()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7149a9590839f15f2d4a15c1f903d9b6f863fa9 | 60,621 | ipynb | Jupyter Notebook | community/awards/teach_me_quantum_2018/qml_mooc/05_Gate-Model Quantum Computing.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 293 | 2020-05-29T17:03:04.000Z | 2022-03-31T07:09:50.000Z | community/awards/teach_me_quantum_2018/qml_mooc/05_Gate-Model Quantum Computing.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 30 | 2020-06-23T19:11:32.000Z | 2021-12-20T22:25:54.000Z | community/awards/teach_me_quantum_2018/qml_mooc/05_Gate-Model Quantum Computing.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 204 | 2020-06-08T12:55:52.000Z | 2022-03-31T08:37:14.000Z | 191.233438 | 18,184 | 0.881114 | [
[
[
"So far you mastered the notation of quantum mechanics and quantum computing, understood as much physics as needed to perform various operations on quantum states, and now you are ready to build quantum algorithms. In this notebook, we look at the basics of gate-model quantum computing, which is sometimes also referred to as universal quantum computing. Most academic and commercial efforts to build a quantum computer focus on this model: Alibaba, Baidu, Google, HP, IBM Q, Intel, IonQ, Microsoft, Rigetti Computing, and Tencent all aim at this, and the list keeps expanding. It remains unclear which implementation will prove scalable: superconducting chips, photonic systems, and ion traps are the most common types, each having its own advantages and disadvantages. We abstract away, we focus on the quantum algorithms irrespective of the physical implementation.\n\nTo get there, first we have to familiarize ourselves with some gates and what happens to those gates on quantum computers. The following diagram shows the software stack that bridges a problem we want to solve with the actual computational back-end [[1](#1)]:\n\n<img src=\"figures/universal_quantum_workflow.png\" alt=\"Software stack on a gate-model quantum computer\" style=\"width: 400px;\"/>\n\nFirst, we define the problem at a high-level and a suitable quantum algorithm is chosen. Then, we express the quantum algorithm as a quantum circuit composed of gates. This in turn has to be compiled to a specific quantum gate set available. The last step is to execute the final circuit either on a quantum processor or on a simulator.\n\nThe quantum algorithms we are interested in are about machine learning. In this notebook, we look at the levels below algorithms: the definition of circuits, their compilation, and the mapping to the hardware or a simulator.\n\n\n# Defining circuits\n\nCircuits are composed of qubit registers, gates acting on them, and measurements on the registers. To store the outcome of registers, many quantum computing libraries add classical registers to the circuits. Even by this language, you can tell that this is a very low level of programming a computer. It resembles the assembly language of digital computers, in which a program consists of machine code instructions.\n\nQubit registers are indexed from 0. We often just say qubit 0, qubit 1, and so on, to refer to the register containing a qubit. This is not to be confused with the actual state of the qubit, which can be $|0\\rangle$, $|1\\rangle$, or any superposition thereof. For instance, qubit 0 can be in the state $|1\\rangle$.\n\nLet's take a look at the gates. In digital computing, a processor transform bit strings to bit strings with logical gates. Any bit string can be achieved with just two gates, which makes universal computations possible with simple operations composed only of these two types of gates. It is remarkable and surprising that the same is also true for quantum computers: any unitary operation can be decomposed into elementary gates, and three types of gates are sufficient. This is remarkable since we are talking about transforming continuous-valued probability amplitudes, not just discrete elements. Yet, this result is what provides the high-level theoretical foundation for being able to build a universal quantum computer at all.\n\nLet's look at some common gates, some of which we have already seen. Naturally, all of these are unitary.\n\n| Gate |Name | Matrix |\n|------|--------------------|---------------------------------------------------------------------|\n| X | Pauli-X or NOT gate|$\\begin{bmatrix}0 & 1\\\\ 1& 0\\end{bmatrix}$|\n| Z | Pauli-Z gate |$\\begin{bmatrix}1 & 0\\\\ 0& -1\\end{bmatrix}$|\n| H | Hadamard gate |$\\frac{1}{\\sqrt{2}}\\begin{bmatrix}1 & 1\\\\ 1& -1\\end{bmatrix}$|\n| Rx($\\theta$)| Rotation around X|$\\begin{bmatrix}\\cos(\\theta/2) & -\\imath \\sin(\\theta/2)\\\\ -\\imath \\sin(\\theta / 2) & \\cos(\\theta / 2)\\end{bmatrix}$|\n| Ry($\\theta$)| Rotation around Y|$\\begin{bmatrix}\\cos(\\theta/2) & -\\sin(\\theta/2)\\\\ -\\sin(\\theta / 2) & \\cos(\\theta / 2)\\end{bmatrix}$|\n| CNOT, CX | Controlled-NOT | $\\begin{bmatrix}1 & 0 & 0 &0\\\\ 0 & 1 & 0 &0\\\\ 0 & 0 & 0 &1\\\\ 0 & 0 & 1 &0\\end{bmatrix}$|\n\nAs we have seen before, the rotations correspond to axis defined in the Bloch sphere. \n\nThere should be one thing immediately apparent from the table: there are many, in fact, infinitely many single-qubit operations. The rotations, for instance, are parametrized by a continuous value. This is in stark contrast with digital circuits, where the only non-trivial single-bit gate is the NOT gate.\n\nThe CNOT gate is the only two-qubit gate in this list. It has a special role: we need two-qubit interactions to create entanglement. Let's repeat the circuit for creating the $|\\phi^+\\rangle = \\frac{1}{\\sqrt{2}}(|00\\rangle+|11\\rangle)$. We will have two qubit registers and two classical registers for measurement output. First, let's define the circuit and plot it:",
"_____no_output_____"
]
],
[
[
"from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister\nfrom qiskit import execute\nfrom qiskit import BasicAer\nfrom qiskit.tools.visualization import circuit_drawer, plot_histogram\n\nq = QuantumRegister(2)\nc = ClassicalRegister(2)\ncircuit = QuantumCircuit(q, c)\ncircuit.h(q[0])\ncircuit.cx(q[0], q[1])\ncircuit_drawer(circuit)",
"/home/pwittek/.anaconda3/envs/qiskit/lib/python3.7/site-packages/marshmallow/schema.py:364: ChangedInMarshmallow3Warning: strict=False is not recommended. In marshmallow 3.0, schemas will always be strict. See https://marshmallow.readthedocs.io/en/latest/upgrading.html#schemas-are-always-strict\n ChangedInMarshmallow3Warning\n/home/pwittek/.anaconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/tools/visualization/_circuit_visualization.py:206: DeprecationWarning: The current behavior for the default output will change in a future release. Instead of trying latex and falling back to mpl on failure it will just use \"text\" by default\n '\"text\" by default', DeprecationWarning)\n"
]
],
[
[
"Note that we can't just initialize the qubit registers in a state we fancy. All registers are initialized in $|0\\rangle$ and creating a desired state is **part** of the circuit. In a sense, arbitrary state preparation is the same as universal quantum computation: the end of the calculation is a state that we desired to prepare. Some states are easier to prepare than others. The above circuit has only two gates to prepare our target state, so it is considered very easy.\n\nLet us see what happens in this circuit. The Hadamard gate prepares an equal superposition $\\frac{1}{\\sqrt{2}}(|0\\rangle+|1\\rangle)$ in qubit 0. This qubit controls an X gate on qubit 1. Since qubit 0 is in the equal superposition after the Hadamard gate, it will not apply the X gate for the first part of the superposition ($|0\\rangle$) and it will apply the X gate for the second part of the superposition ($|1\\rangle$). Thus we create the final state $\\frac{1}{\\sqrt{2}}(|00\\rangle+|11\\rangle)$, and we entangle the two qubit registers.\n\nA digital computer's processing unit typically has 64-bit registers and it is able to perform universal calculations on bit strings. Any complex calculation is broken down into elementary 64-bit operations, either sequentially or in parallel execution. So you may wonder what is the deal with the thousands of qubits we expect from a quantum computer. Why can't a 64-qubit quantum computer be enough?\n\nEntanglement is the easiest way to understand why we need so many qubits. Entanglement is a key resource in quantum computing and we want to make use of it. If we have 64-qubits and we want to entangle another one outside these 64 registers, we would have get rid of the qubit in one of the registers, potentially destroying a superposition ad definitely destroying entanglement between that register and any other qubit on the chip. The only way to make use of superpositions and the strong correlations provided by entanglement is if the entire problem is on the quantum processing unit for the duration of the calculation.\n\nThis global nature of the calculation is also the reason why there is a focus on problems that are difficult to break down into elementary calculations. The travelling salesman problem is a great example: we need to consider all cities and all distances to minimize overall travel length.\n\nTo finish off the circuit, we could add a measurement to each qubit:",
"_____no_output_____"
]
],
[
[
"circuit.measure(q, c)\ncircuit_drawer(circuit)",
"/home/pwittek/.anaconda3/envs/qiskit/lib/python3.7/site-packages/qiskit/tools/visualization/_circuit_visualization.py:206: DeprecationWarning: The current behavior for the default output will change in a future release. Instead of trying latex and falling back to mpl on failure it will just use \"text\" by default\n '\"text\" by default', DeprecationWarning)\n"
]
],
[
[
"Finally, we can plot the statistics:",
"_____no_output_____"
]
],
[
[
"backend = BasicAer.get_backend('qasm_simulator')\njob = execute(circuit, backend, shots=100)\nplot_histogram(job.result().get_counts(circuit))",
"/home/pwittek/.anaconda3/envs/qiskit/lib/python3.7/site-packages/marshmallow/schema.py:364: ChangedInMarshmallow3Warning: strict=False is not recommended. In marshmallow 3.0, schemas will always be strict. See https://marshmallow.readthedocs.io/en/latest/upgrading.html#schemas-are-always-strict\n ChangedInMarshmallow3Warning\n/home/pwittek/.anaconda3/envs/qiskit/lib/python3.7/site-packages/marshmallow/schema.py:364: ChangedInMarshmallow3Warning: strict=False is not recommended. In marshmallow 3.0, schemas will always be strict. See https://marshmallow.readthedocs.io/en/latest/upgrading.html#schemas-are-always-strict\n ChangedInMarshmallow3Warning\n"
]
],
[
[
"As we have seen before, 01 and 10 never appear.",
"_____no_output_____"
],
[
"# Compilation\n\nThe circuit is the way to describe a quantum algorithm. It may also contain some arbitrary single or two-qubit unitary and controlled versions thereof. A quantum compiler should be able to decompose these into elementary gates.",
"_____no_output_____"
],
[
"For instance, in Qiskit, you can access to the general unitary using the $u3$ gate\n\n$$\nu3(\\theta, \\phi, \\lambda) = \\begin{pmatrix}\n\\cos(\\theta/2) & -e^{i\\lambda}\\sin(\\theta/2) \\\\\ne^{i\\phi}\\sin(\\theta/2) & e^{i\\lambda+i\\phi}\\cos(\\theta/2) \n\\end{pmatrix}.\n$$\n\nThe compiler decomposes it into an actual gate sequence.",
"_____no_output_____"
],
[
"This is one task of a quantum compiler. The next one is to translate the gates given in the circuit to the gates implemented in the hardware or the simulator. In the table above, we defined many gates, but a well-chosen set of three is sufficient for universality. For engineering constraints, typically one minimal set of universal gates is implemented in the hardware. It depends on the physical architecture which three.\n\nAt this point, the number of gates applied is probably already increasing: the decomposition of unitary will create many gates and the translation of gates is also likely to add more gates. An additional problem is the topology of the qubits: in some implementations not all qubit registers are connected to each other. The most popular implementation is superconducting qubits, which are manufactured on silicon chips just like any digital device you have. Since this is a quintessentially two dimensional layout, most qubits on the chip will not be connected. Here is an example topology of eight qubits on a superconducting quantum computer:\n\n<img src=\"figures/eight_qubits.svg\" alt=\"8-qubit topology\" style=\"width: 200px;\"/>\n\nIf we want to perform a two-qubit operations between two qubits that are not neighbouring, we have to perform SWAP operations to switch the qubit states between registers. A SWAP consists of three CNOT gates in a sequence.\n\nThe total number of gates at the end of the compilation reflects the true requirement of the hardware. *Circuit depth* is the number of time steps required to execute the circuit, assuming that gates acting on distinct qubits can operate in parallel. On current and near-term quantum computers, we want circuits to be shallow, otherwise decoherence or other forms of noise destroy our calculations.\n\nWe have to emphasize that the compilation depends on the backend. On the simulator, physical constraints do not apply. If we compile the circuit above, its depth will not increase:",
"_____no_output_____"
]
],
[
[
"from qiskit import compile\ncompiled_circuit = compile(circuit, backend)\ncompiled_circuit.as_dict()['experiments'][0]['instructions']",
"_____no_output_____"
]
],
[
[
"In this case, the only thing the compiler did was replacing the Hadamard gate with a parametrized unitary operation.",
"_____no_output_____"
],
[
"# References\n\n[1] M. Fingerhuth, T. Babej, P. Wittek. (2018). [Open source software in quantum computing](https://doi.org/10.1371/journal.pone.0208561). *PLOS ONE* 13(12):e0208561. <a id='1'></a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7149be2a01706233455d4519d4b9b0aef197874 | 14,058 | ipynb | Jupyter Notebook | section_3/03_exercise.ipynb | okada-tak/object_detection_azuma | ed3897ca52f250137eb83e92b1ccb07821c64b8d | [
"MIT"
] | null | null | null | section_3/03_exercise.ipynb | okada-tak/object_detection_azuma | ed3897ca52f250137eb83e92b1ccb07821c64b8d | [
"MIT"
] | null | null | null | section_3/03_exercise.ipynb | okada-tak/object_detection_azuma | ed3897ca52f250137eb83e92b1ccb07821c64b8d | [
"MIT"
] | null | null | null | 31.591011 | 243 | 0.464646 | [
[
[
"<a href=\"https://colab.research.google.com/github/yukinaga/object_detection/blob/main/section_3/03_exercise.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# 演習\nRetinaNetで、物体の領域を出力する`regression_head`も訓練対象に加えてみましょう。 \nモデルを構築するコードに、追記を行なってください。",
"_____no_output_____"
],
[
"## 各設定",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.utils import draw_bounding_boxes\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# インデックスを物体名に変換\nindex2name = [\n \"person\",\n \"bird\",\n \"cat\",\n \"cow\",\n \"dog\",\n \"horse\",\n \"sheep\",\n \"aeroplane\",\n \"bicycle\",\n \"boat\",\n \"bus\",\n \"car\",\n \"motorbike\",\n \"train\",\n \"bottle\",\n \"chair\",\n \"diningtable\",\n \"pottedplant\",\n \"sofa\",\n \"tvmonitor\",\n]\nprint(index2name)\n\n# 物体名をインデックスに変換\nname2index = {}\nfor i in range(len(index2name)):\n name2index[index2name[i]] = i\nprint(name2index)",
"_____no_output_____"
]
],
[
[
"## ターゲットを整える関数",
"_____no_output_____"
]
],
[
[
"def arrange_target(target):\n objects = target[\"annotation\"][\"object\"]\n box_dics = [obj[\"bndbox\"] for obj in objects]\n box_keys = [\"xmin\", \"ymin\", \"xmax\", \"ymax\"]\n\n # バウンディングボックス\n boxes = []\n for box_dic in box_dics:\n box = [int(box_dic[key]) for key in box_keys]\n boxes.append(box)\n boxes = torch.tensor(boxes)\n\n # 物体名\n labels = [name2index[obj[\"name\"]] for obj in objects] # 物体名はインデックスに変換\n labels = torch.tensor(labels)\n\n dic = {\"boxes\":boxes, \"labels\":labels}\n return dic",
"_____no_output_____"
]
],
[
[
"## データセットの読み込み",
"_____no_output_____"
]
],
[
[
"dataset_train=torchvision.datasets.VOCDetection(root=\"./VOCDetection/2012\",\n year=\"2012\",image_set=\"train\",\n download=True,\n transform=transforms.ToTensor(),\n target_transform=transforms.Lambda(arrange_target)\n )\n\ndataset_test=torchvision.datasets.VOCDetection(root=\"./VOCDetection/2012\",\n year=\"2012\",image_set=\"val\",\n download=True,\n transform=transforms.ToTensor(),\n target_transform=transforms.Lambda(arrange_target)\n )",
"_____no_output_____"
]
],
[
[
"## DataLoaderの設定",
"_____no_output_____"
]
],
[
[
"data_loader_train = DataLoader(dataset_train, batch_size=1, shuffle=True)\ndata_loader_test = DataLoader(dataset_test, batch_size=1, shuffle=True)",
"_____no_output_____"
]
],
[
[
"## ターゲットの表示",
"_____no_output_____"
]
],
[
[
"def show_boxes(image, boxes, names):\n drawn_boxes = draw_bounding_boxes(image, boxes, labels=names)\n\n plt.figure(figsize = (16,16))\n plt.imshow(np.transpose(drawn_boxes, (1, 2, 0))) # チャンネルを一番後ろに\n plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に\n plt.show()\n\ndataiter = iter(data_loader_train) # イテレータ\nimage, target = dataiter.next() # バッチを取り出す\nprint(target)\n\nimage = image[0]\nimage = (image*255).to(torch.uint8) # draw_bounding_boxes関数の入力は0-255\n\nboxes = target[\"boxes\"][0]\n\nlabels = target[\"labels\"][0]\nnames = [index2name[label.item()] for label in labels]\n\nshow_boxes(image, boxes, names)",
"_____no_output_____"
]
],
[
[
"# モデルの構築\n以下のセルのコードに追記を行い、物体領域の座標を出力する`regression_head`のパラメータも訓練可能にしましょう。 \nPyTorchの公式ドキュメントに記載されている、RetinaNetのコードを参考にしましょう。 \nhttps://pytorch.org/vision/stable/_modules/torchvision/models/detection/retinanet.html#retinanet_resnet50_fpn",
"_____no_output_____"
]
],
[
[
"model = torchvision.models.detection.retinanet_resnet50_fpn(pretrained=True)\n\nnum_classes=len(index2name)+1 # 分類数: 背景も含めて分類するため1を加える\nnum_anchors = model.head.classification_head.num_anchors # アンカーの数\n\n# 分類数を設定\nmodel.head.classification_head.num_classes = num_classes\n\n# 分類結果を出力する層の入れ替え\ncls_logits = torch.nn.Conv2d(256, num_anchors*num_classes, kernel_size=3, stride=1, padding=1)\ntorch.nn.init.normal_(cls_logits.weight, std=0.01) # RetinaNetClassificationHeadクラスより\ntorch.nn.init.constant_(cls_logits.bias, -math.log((1 - 0.01) / 0.01)) # RetinaNetClassificationHeadクラスより\nmodel.head.classification_head.cls_logits = cls_logits # 層の入れ替え\n\n# 全てのパラメータを更新不可に\nfor p in model.parameters():\n p.requires_grad = False\n\n# classification_headのパラメータを更新可能に\nfor p in model.head.classification_head.parameters():\n p.requires_grad = True\n\n# regression_headのパラメータを更新可能に\n# ------- 以下にコードを書く -------\n\n\n# ------- ここまで -------\n\nmodel.cuda() # GPU対応",
"_____no_output_____"
]
],
[
[
"## 訓練",
"_____no_output_____"
]
],
[
[
"# 最適化アルゴリズム\nparams = [p for p in model.parameters() if p.requires_grad]\noptimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9)\n\nmodel.train() # 訓練モード\nepochs = 3\nfor epoch in range(epochs):\n for i, (image, target) in enumerate(data_loader_train):\n image = [img.cuda() for img in image] # GPU対応\n\n boxes = target[\"boxes\"][0].cuda()\n labels = target[\"labels\"][0].cuda()\n target = [{\"boxes\":boxes, \"labels\":labels}] # ターゲットは辞書を要素に持つリスト\n\n loss_dic = model(image, target)\n loss = sum(loss for loss in loss_dic.values()) # 誤差の合計を計算\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if i%100 == 0: # 100回ごとに経過を表示\n print(\"epoch:\", epoch, \"iteration:\", i, \"loss:\", loss.item()) ",
"_____no_output_____"
]
],
[
[
"## 訓練したモデルの使用",
"_____no_output_____"
]
],
[
[
"dataiter = iter(data_loader_test) # イテレータ\nimage, target = dataiter.next() # バッチを取り出す\n\nimage = [img.cuda() for img in image] # GPU対応\n\nmodel.eval()\npredictions = model(image)\nprint(predictions)\n\nimage = (image[0]*255).to(torch.uint8).cpu() # draw_bounding_boxes関数の入力は0-255\nboxes = predictions[0][\"boxes\"].cpu()\nlabels = predictions[0][\"labels\"].cpu().detach().numpy()\nlabels = np.where(labels>=len(index2name), 0, labels) # ラベルが範囲外の場合は0に\nnames = [index2name[label.item()] for label in labels]\n\nprint(names)\nshow_boxes(image, boxes, names)",
"_____no_output_____"
]
],
[
[
"## スコアによる選別",
"_____no_output_____"
]
],
[
[
"boxes = []\nnames = []\nfor i, box in enumerate(predictions[0][\"boxes\"]):\n score = predictions[0][\"scores\"][i].cpu().detach().numpy()\n if score > 0.5: # スコアが0.5より大きいものを抜き出す\n boxes.append(box.cpu().tolist())\n label = predictions[0][\"labels\"][i].item()\n if label >= len(index2name): # ラベルが範囲外の場合は0に\n label = 0\n name = index2name[label]\n names.append(name)\nboxes = torch.tensor(boxes)\n\nshow_boxes(image, boxes, names)",
"_____no_output_____"
]
],
[
[
"# 解答例\n以下は、どうしても手がかりがないときのみ参考にしましょう。",
"_____no_output_____"
]
],
[
[
"model = torchvision.models.detection.retinanet_resnet50_fpn(pretrained=True)\n\nnum_classes=len(index2name)+1 # 分類数: 背景も含めて分類するため1を加える\nnum_anchors = model.head.classification_head.num_anchors # アンカーの数\n\n# 分類数を設定\nmodel.head.classification_head.num_classes = num_classes\n\n# 分類結果を出力する層の入れ替え\ncls_logits = torch.nn.Conv2d(256, num_anchors*num_classes, kernel_size=3, stride=1, padding=1)\ntorch.nn.init.normal_(cls_logits.weight, std=0.01) # RetinaNetClassificationHeadクラスより\ntorch.nn.init.constant_(cls_logits.bias, -math.log((1 - 0.01) / 0.01)) # RetinaNetClassificationHeadクラスより\nmodel.head.classification_head.cls_logits = cls_logits # 層の入れ替え\n\n# 全てのパラメータを更新不可に\nfor p in model.parameters():\n p.requires_grad = False\n\n# classification_headのパラメータを更新可能に\nfor p in model.head.classification_head.parameters():\n p.requires_grad = True\n\n# regression_headのパラメータを更新可能に\n# ------- 以下にコードを書く -------\nfor p in model.head.regression_head.parameters():\n p.requires_grad = True\n# ------- ここまで -------\n\nmodel.cuda() # GPU対応",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e714a56e2c088e8a1033d03b0ce0fbd9633d2a2c | 19,521 | ipynb | Jupyter Notebook | tests/ml-books/saving-and-reloading-models.ipynb | gopala-kr/ds-notebooks | bc35430ecdd851f2ceab8f2437eec4d77cb59423 | [
"MIT"
] | 1 | 2019-05-10T09:16:23.000Z | 2019-05-10T09:16:23.000Z | tests/ml-books/saving-and-reloading-models.ipynb | gopala-kr/ds-notebooks | bc35430ecdd851f2ceab8f2437eec4d77cb59423 | [
"MIT"
] | null | null | null | tests/ml-books/saving-and-reloading-models.ipynb | gopala-kr/ds-notebooks | bc35430ecdd851f2ceab8f2437eec4d77cb59423 | [
"MIT"
] | 1 | 2019-05-10T09:17:28.000Z | 2019-05-10T09:17:28.000Z | 36.083179 | 479 | 0.52651 | [
[
[
"*Accompanying code examples of the book \"Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python\" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*\n \nOther code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).",
"_____no_output_____"
]
],
[
[
"#load watermark\n%load_ext watermark\n%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer,seaborn,keras,tflearn,bokeh,gensim",
"/srv/venv/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nWARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.\nUsing TensorFlow backend.\n/srv/venv/lib/python3.6/site-packages/tensorflow/python/util/tf_inspect.py:45: DeprecationWarning: inspect.getargspec() is deprecated, use inspect.signature() or inspect.getfullargspec()\n if d.decorator_argspec is not None), _inspect.getargspec(target))\n"
]
],
[
[
"# Model Zoo -- Saving and Loading Trained Models \n\n## from TensorFlow Checkpoint Files and NumPy NPZ Archives",
"_____no_output_____"
],
[
"This notebook demonstrates different strategies on how to export and import training TensorFlow models based on a a simple 2-hidden layer multilayer perceptron. These include\n\n- Using regular TensorFlow meta and checkpoint files\n- Loading variables from NumPy archives (.npz) files\n\nNote that the graph def is going set up in a way that it constructs \"rigid,\" not trainable TensorFlow classifier if .npz files are provided. This is on purpose, since it may come handy in certain use cases, but the code can be easily modified to make the model trainable if NumPy .npz files are provided -- for example, by wrapping the `tf.constant` calls in `fc_layer` in a `tf.Variable` constructor like so:\n\n```python\n...\nif weight_params is not None:\n weights = tf.Variable(tf.constant(weight_params, name='weights',\n dtype=tf.float32))\n...\n```\n\ninstead of \n\n```python\n...\nif weight_params is not None:\n weights = tf.constant(weight_params, name='weights',\n dtype=tf.float32)\n...\n```",
"_____no_output_____"
],
[
"## Define Multilayer Perceptron Graph",
"_____no_output_____"
],
[
"The following code cells defines wrapper functions for our convenience; it saves us some re-typing later when we set up the TensorFlow multilayer perceptron graphs for the trainable and non-trainable models.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\n\n##########################\n### WRAPPER FUNCTIONS\n##########################\n\n\ndef fc_layer(input_tensor, n_output_units, name,\n activation_fn=None, seed=None,\n weight_params=None, bias_params=None):\n\n with tf.variable_scope(name):\n\n if weight_params is not None:\n weights = tf.constant(weight_params, name='weights',\n dtype=tf.float32)\n else:\n weights = tf.Variable(tf.truncated_normal(\n shape=[input_tensor.get_shape().as_list()[-1], n_output_units],\n mean=0.0,\n stddev=0.1,\n dtype=tf.float32,\n seed=seed),\n name='weights',)\n\n if bias_params is not None:\n biases = tf.constant(bias_params, name='biases', \n dtype=tf.float32)\n\n else:\n biases = tf.Variable(tf.zeros(shape=[n_output_units]),\n name='biases', \n dtype=tf.float32)\n\n act = tf.matmul(input_tensor, weights) + biases\n\n if activation_fn is not None:\n act = activation_fn(act)\n\n return act\n\n\ndef mlp_graph(n_input=784, n_classes=10, n_hidden_1=128, n_hidden_2=256,\n learning_rate=0.1,\n fixed_params=None):\n \n # fixed_params to allow loading weights & biases\n # from NumPy npz archives and defining a fixed, non-trainable\n # TensorFlow classifier\n if not fixed_params:\n var_names = ['fc1/weights:0', 'fc1/biases:0',\n 'fc2/weights:0', 'fc2/biases:0',\n 'logits/weights:0', 'logits/biases:0',]\n \n fixed_params = {v: None for v in var_names}\n found_params = False\n else:\n found_params = True\n \n # Input data\n tf_x = tf.placeholder(tf.float32, [None, n_input], name='features')\n tf_y = tf.placeholder(tf.int32, [None], name='targets')\n tf_y_onehot = tf.one_hot(tf_y, depth=n_classes, name='onehot_targets')\n\n # Multilayer perceptron\n fc1 = fc_layer(input_tensor=tf_x, \n n_output_units=n_hidden_1, \n name='fc1',\n weight_params=fixed_params['fc1/weights:0'], \n bias_params=fixed_params['fc1/biases:0'],\n activation_fn=tf.nn.relu)\n\n fc2 = fc_layer(input_tensor=fc1, \n n_output_units=n_hidden_2, \n name='fc2',\n weight_params=fixed_params['fc2/weights:0'], \n bias_params=fixed_params['fc2/biases:0'],\n activation_fn=tf.nn.relu)\n \n logits = fc_layer(input_tensor=fc2, \n n_output_units=n_classes, \n name='logits',\n weight_params=fixed_params['logits/weights:0'], \n bias_params=fixed_params['logits/biases:0'],\n activation_fn=tf.nn.relu)\n \n # Loss and optimizer\n ### Only necessary if no existing params are found\n ### and a trainable graph has to be initialized\n if not found_params:\n loss = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=tf_y_onehot)\n cost = tf.reduce_mean(loss, name='cost')\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate)\n train = optimizer.minimize(cost, name='train')\n\n # Prediction\n probabilities = tf.nn.softmax(logits, name='probabilities')\n labels = tf.cast(tf.argmax(logits, 1), tf.int32, name='labels')\n \n correct_prediction = tf.equal(labels, \n tf_y, name='correct_predictions')\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),\n name='accuracy')",
"_____no_output_____"
]
],
[
[
"## Train and Save Multilayer Perceptron",
"_____no_output_____"
]
],
[
[
"from tensorflow.examples.tutorials.mnist import input_data\n\n##########################\n### SETTINGS\n##########################\n\n# Hyperparameters\nlearning_rate = 0.1\ntraining_epochs = 10\nbatch_size = 64\n\n##########################\n### GRAPH DEFINITION\n##########################\n\ng = tf.Graph()\nwith g.as_default():\n mlp_graph()\n\n##########################\n### DATASET\n##########################\n\nmnist = input_data.read_data_sets(\"./\", one_hot=False)\n\n##########################\n### TRAINING & EVALUATION\n##########################\n\nwith tf.Session(graph=g) as sess:\n sess.run(tf.global_variables_initializer())\n saver0 = tf.train.Saver()\n \n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = mnist.train.num_examples // batch_size\n\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n _, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,\n 'targets:0': batch_y})\n avg_cost += c\n \n train_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.train.images,\n 'targets:0': mnist.train.labels})\n valid_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.validation.images,\n 'targets:0': mnist.validation.labels}) \n \n print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch + 1, avg_cost / (i + 1)), end=\"\")\n print(\" | Train/Valid ACC: %.3f/%.3f\" % (train_acc, valid_acc))\n \n test_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.test.images,\n 'targets:0': mnist.test.labels})\n print('Test ACC: %.3f' % test_acc)\n \n ##########################\n ### SAVE TRAINED MODEL\n ##########################\n saver0.save(sess, save_path='./mlp')",
"/srv/venv/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py:539: DeprecationWarning: The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead\n return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)\n"
]
],
[
[
"## Reload Model from Meta and Checkpoint Files",
"_____no_output_____"
],
[
"**You can restart and the notebook and the following code cells should execute without any additional code dependencies.**",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"./\", one_hot=False)\n\nwith tf.Session() as sess:\n \n saver1 = tf.train.import_meta_graph('./mlp.meta')\n saver1.restore(sess, save_path='./mlp')\n \n test_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.test.images,\n 'targets:0': mnist.test.labels})\n print('Test ACC: %.3f' % test_acc)",
"_____no_output_____"
]
],
[
[
"## Working with NumPy Archive Files and Creating Non-Trainable Graphs",
"_____no_output_____"
],
[
"### Export Model Parameters to NumPy NPZ files",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n\n saver1 = tf.train.import_meta_graph('./mlp.meta')\n saver1.restore(sess, save_path='./mlp')\n \n var_names = [v.name for v in \n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]\n \n params = {}\n print('Found variables:')\n for v in var_names:\n print(v)\n \n ary = sess.run(v)\n params[v] = ary\n \n np.savez('mlp', **params)",
"_____no_output_____"
]
],
[
[
"### Load NumPy .npz files into the `mlp_graph`",
"_____no_output_____"
],
[
"Note that the graph def was set up in a way that it constructs \"rigid,\" not trainable TensorFlow classifier if .npz files are provided. This is on purpose, since it may come handy in certain use cases, but the code can be easily modified to make the model trainable if NumPy .npz files are provided (e.g., by wrapping the `tf.constant` calls in `fc_layer` in a `tf.Variable` constructor.",
"_____no_output_____"
],
[
"**Note: If you defined the `fc_layer` and `mlp_graph` wrapper functions in *Define Multilayer Perceptron Graph*, the following code cell is otherwise independent and has no other code dependencies.**",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n###########################\n### LOAD DATA AND PARAMS\n###########################\n\nmnist = input_data.read_data_sets(\"./\", one_hot=False)\nparam_dict = np.load('mlp.npz')\n\n##########################\n### GRAPH DEFINITION\n##########################\n\n\ng = tf.Graph()\nwith g.as_default():\n \n # here: constructs a non-trainable graph\n # due to the provided fixed_params argument\n mlp_graph(fixed_params=param_dict)\n\nwith tf.Session(graph=g) as sess:\n \n test_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.test.images,\n 'targets:0': mnist.test.labels})\n print('Test ACC: %.3f' % test_acc)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e714b98a0a68d2782ad1ff4546beea2dc910d167 | 60,655 | ipynb | Jupyter Notebook | LearningQuantumGates.ipynb | Alexfm101/QuantumHelloWorld | b67ac59c835dd82c4a05a915b9326231ea5b6cab | [
"MIT"
] | null | null | null | LearningQuantumGates.ipynb | Alexfm101/QuantumHelloWorld | b67ac59c835dd82c4a05a915b9326231ea5b6cab | [
"MIT"
] | null | null | null | LearningQuantumGates.ipynb | Alexfm101/QuantumHelloWorld | b67ac59c835dd82c4a05a915b9326231ea5b6cab | [
"MIT"
] | null | null | null | 367.606061 | 46,720 | 0.94444 | [
[
[
"from qiskit import *",
"_____no_output_____"
],
[
"from qiskit.tools.visualization import plot_bloch_multivector",
"_____no_output_____"
],
[
"#representacion bra-ket\n#crear circuito\ncircuit = QuantumCircuit(1,1)\n#compuerta x\ncircuit.x(0)\n#tipo de simulacion\nsimulator = Aer.get_backend('statevector_simulator')\n#resultado\nresult = execute(circuit,backend=simulator).result()\nstatevector = result.get_statevector()\nprint(statevector)\n\n%matplotlib inline\n#dibujar el circuito\ncircuit.draw(output='mpl')\n",
"[0.+0.j 1.+0.j]\n"
],
[
"plot_bloch_multivector(statevector)",
"_____no_output_____"
],
[
"#poniendo el valor de medida en un bit clasico\ncircuit.measure([0], [0])\n#creando otro backend\nbackend = Aer.get_backend('qasm_simulator')\n#resultado\nresult = execute(circuit,backend = backend,shots = 1024).result()\ncounts = result.get_counts()\nfrom qiskit.tools.visualization import plot_histogram\nplot_histogram(counts)",
"_____no_output_____"
],
[
"# representacion en matrices\n\ncircuit = QuantumCircuit(1,1)\n#compuerta x\ncircuit.x(0)\n#tipo de simulacion\nsimulator = Aer.get_backend('unitary_simulator')\n#resultado\nresult = execute(circuit,backend=simulator).result()\nunitary = result.get_unitary()\nprint(unitary)\n\n",
"[[0.+0.j 1.+0.j]\n [1.+0.j 0.+0.j]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e714cd8ca095dd74eb8ac3de27721742ea675a72 | 13,994 | ipynb | Jupyter Notebook | Improving Deep Neural Networks/Gradient Checking.ipynb | amankedia/DeepLearning-AI | c7bd191209383fc17b7cac2a4e254b48daeb6ceb | [
"Apache-2.0"
] | null | null | null | Improving Deep Neural Networks/Gradient Checking.ipynb | amankedia/DeepLearning-AI | c7bd191209383fc17b7cac2a4e254b48daeb6ceb | [
"Apache-2.0"
] | null | null | null | Improving Deep Neural Networks/Gradient Checking.ipynb | amankedia/DeepLearning-AI | c7bd191209383fc17b7cac2a4e254b48daeb6ceb | [
"Apache-2.0"
] | 1 | 2019-07-02T05:38:49.000Z | 2019-07-02T05:38:49.000Z | 34.048662 | 142 | 0.481206 | [
[
[
"# Packages\nimport numpy as np\nfrom testCases import *\nfrom gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector",
"_____no_output_____"
],
[
"# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(x, theta):\n \"\"\"\n Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n \n Returns:\n J -- the value of function J, computed using the formula J(theta) = theta * x\n \"\"\"\n \n ### START CODE HERE ### (approx. 1 line)\n J = np.dot(theta, x)\n ### END CODE HERE ###\n \n return J",
"_____no_output_____"
],
[
"x, theta = 2, 4\nJ = forward_propagation(x, theta)\nprint (\"J = \" + str(J))",
"J = 8\n"
],
[
"def backward_propagation(x, theta):\n \"\"\"\n Computes the derivative of J with respect to theta (see Figure 1).\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n \n Returns:\n dtheta -- the gradient of the cost with respect to theta\n \"\"\"\n \n ### START CODE HERE ### (approx. 1 line)\n dtheta = x\n ### END CODE HERE ###\n \n return dtheta",
"_____no_output_____"
],
[
"x, theta = 2, 4\ndtheta = backward_propagation(x, theta)\nprint (\"dtheta = \" + str(dtheta))",
"dtheta = 2\n"
],
[
"def gradient_check(x, theta, epsilon = 1e-7):\n \"\"\"\n Implement the backward propagation presented in Figure 1.\n \n Arguments:\n x -- a real-valued input\n theta -- our parameter, a real number as well\n epsilon -- tiny shift to the input to compute approximated gradient with formula(1)\n \n Returns:\n difference -- difference (2) between the approximated gradient and the backward propagation gradient\n \"\"\"\n \n # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.\n ### START CODE HERE ### (approx. 5 lines)\n thetaplus = theta + epsilon # Step 1\n thetaminus = theta - epsilon # Step 2\n J_plus = forward_propagation(x, thetaplus) # Step 3\n J_minus = forward_propagation(x, thetaminus) # Step 4\n gradapprox = (J_plus - J_minus)/(2*epsilon) # Step 5\n ### END CODE HERE ###\n \n # Check if gradapprox is close enough to the output of backward_propagation()\n ### START CODE HERE ### (approx. 1 line)\n grad = backward_propagation(x, theta)\n ### END CODE HERE ###\n \n ### START CODE HERE ### (approx. 1 line)\n numerator = np.linalg.norm(grad - gradapprox) # Step 1'\n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'\n difference = numerator / denominator # Step 3'\n ### END CODE HERE ###\n \n if difference < 1e-7:\n print (\"The gradient is correct!\")\n else:\n print (\"The gradient is wrong!\")\n \n return difference",
"_____no_output_____"
],
[
"x, theta = 2, 4\ndifference = gradient_check(x, theta)\nprint(\"difference = \" + str(difference))",
"The gradient is correct!\ndifference = 2.91933588329e-10\n"
],
[
"def forward_propagation_n(X, Y, parameters):\n \"\"\"\n Implements the forward propagation (and computes the cost) presented in Figure 3.\n \n Arguments:\n X -- training set for m examples\n Y -- labels for m examples \n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (5, 4)\n b1 -- bias vector of shape (5, 1)\n W2 -- weight matrix of shape (3, 5)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n \n Returns:\n cost -- the cost function (logistic cost for one example)\n \"\"\"\n \n # retrieve parameters\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n\n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n\n # Cost\n logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)\n cost = 1./m * np.sum(logprobs)\n \n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n \n return cost, cache",
"_____no_output_____"
],
[
"def backward_propagation_n(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n \n Arguments:\n X -- input datapoint, of shape (input size, 1)\n Y -- true \"label\"\n cache -- cache output from forward_propagation_n()\n \n Returns:\n gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T) * 2\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"_____no_output_____"
],
[
"# GRADED FUNCTION: gradient_check_n\n\ndef gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):\n \"\"\"\n Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n\n \n Arguments:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. \n x -- input datapoint, of shape (input size, 1)\n y -- true \"label\"\n epsilon -- tiny shift to the input to compute approximated gradient with formula(1)\n \n Returns:\n difference -- difference (2) between the approximated gradient and the backward propagation gradient\n \"\"\"\n \n # Set-up variables\n parameters_values, _ = dictionary_to_vector(parameters)\n grad = gradients_to_vector(gradients)\n num_parameters = parameters_values.shape[0]\n J_plus = np.zeros((num_parameters, 1))\n J_minus = np.zeros((num_parameters, 1))\n gradapprox = np.zeros((num_parameters, 1))\n \n # Compute gradapprox\n for i in range(num_parameters):\n \n # Compute J_plus[i]. Inputs: \"parameters_values, epsilon\". Output = \"J_plus[i]\".\n # \"_\" is used because the function you have to outputs two parameters but we only care about the first one\n ### START CODE HERE ### (approx. 3 lines)\n thetaplus = np.copy(parameters_values) # Step 1\n thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2\n J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3\n ### END CODE HERE ###\n \n # Compute J_minus[i]. Inputs: \"parameters_values, epsilon\". Output = \"J_minus[i]\".\n ### START CODE HERE ### (approx. 3 lines)\n thetaminus = np.copy(parameters_values) # Step 1\n thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2 \n J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3\n ### END CODE HERE ###\n \n # Compute gradapprox[i]\n ### START CODE HERE ### (approx. 1 line)\n gradapprox[i] = (J_plus[i] - J_minus[i])/(2*epsilon)\n ### END CODE HERE ###\n \n # Compare gradapprox to backward propagation gradients by computing difference.\n ### START CODE HERE ### (approx. 1 line)\n numerator = np.linalg.norm(grad - gradapprox) # Step 1'\n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'\n difference = numerator/ denominator # Step 3'\n ### END CODE HERE ###\n\n if difference > 2e-7:\n print (\"\\033[93m\" + \"There is a mistake in the backward propagation! difference = \" + str(difference) + \"\\033[0m\")\n else:\n print (\"\\033[92m\" + \"Your backward propagation works perfectly fine! difference = \" + str(difference) + \"\\033[0m\")\n \n return difference",
"_____no_output_____"
],
[
"X, Y, parameters = gradient_check_n_test_case()\n\ncost, cache = forward_propagation_n(X, Y, parameters)\ngradients = backward_propagation_n(X, Y, cache)\ndifference = gradient_check_n(parameters, gradients, X, Y)",
"\u001b[93mThere is a mistake in the backward propagation! difference = 0.285093156654\u001b[0m\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e714d34d3646925a6f5f94a1fdf3340f2c96297b | 129,812 | ipynb | Jupyter Notebook | pymaceuticals_starter.ipynb | Mark-Liang-2000/Matplotlib-Challenge | c6277256c01270b42204b7b64ddc2e27180e7a67 | [
"MIT"
] | null | null | null | pymaceuticals_starter.ipynb | Mark-Liang-2000/Matplotlib-Challenge | c6277256c01270b42204b7b64ddc2e27180e7a67 | [
"MIT"
] | null | null | null | pymaceuticals_starter.ipynb | Mark-Liang-2000/Matplotlib-Challenge | c6277256c01270b42204b7b64ddc2e27180e7a67 | [
"MIT"
] | null | null | null | 121.319626 | 18,676 | 0.825109 | [
[
[
"## Observations and Insights ",
"_____no_output_____"
],
[
"1. In the data used, 248 unique mice was analyzed with different treatment options with a 51% to 49% male to female ratio.\n\n2. Mouse weight and tumor volume were closely related, showing that weight is important to analyzing drug effectiveness.\n\n3. There was one outlier data point with the drug Infubinol.",
"_____no_output_____"
]
],
[
[
"\n\n# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as st\n\n# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)\n\n# Combine the data into a single dataset\ndataComplete = pd.merge(study_results, mouse_metadata, how = \"left\", on =[\"Mouse ID\"])\n\n\n# Display the data table for preview\ndataComplete.head()",
"_____no_output_____"
],
[
"# Checking the number of mice.\nnumMice = len(dataComplete[\"Mouse ID\"].unique())\nnumMice",
"_____no_output_____"
],
[
"# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. \nsameMiceID = dataComplete.loc[dataComplete.duplicated(subset = [\"Mouse ID\", \"Timepoint\"]), \n \"Mouse ID\"].unique()\nsameMiceID",
"_____no_output_____"
],
[
"# Optional: Get all the data for the duplicate mouse ID. \n\n",
"_____no_output_____"
],
[
"# Create a clean DataFrame by dropping the duplicate mouse by its ID.\ndataClean = dataComplete[dataComplete[\"Mouse ID\"].isin(sameMiceID) == False]\ndataClean.head()",
"_____no_output_____"
],
[
"# Checking the number of mice in the clean DataFrame.\nnumNoDupMice = len(dataClean[\"Mouse ID\"].unique())\nnumNoDupMice",
"_____no_output_____"
]
],
[
[
"## Summary Statistics",
"_____no_output_____"
]
],
[
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\n# This method is the most straighforward, creating multiple series and putting them all together at the end.\nmean = dataClean.groupby(\"Drug Regimen\").mean()[\"Tumor Volume (mm3)\"]\nmedian = dataClean.groupby(\"Drug Regimen\").median()[\"Tumor Volume (mm3)\"]\nvar = dataClean.groupby(\"Drug Regimen\").var()[\"Tumor Volume (mm3)\"]\nstd = dataClean.groupby(\"Drug Regimen\").std()[\"Tumor Volume (mm3)\"]\nsem = dataClean.groupby(\"Drug Regimen\").sem()[\"Tumor Volume (mm3)\"]\ndataTable = pd.DataFrame({\"Mean Tumor Volume\" : mean, \n \"Median Tumor Volume\" : median,\n \"Tumor Volume Variance\" : var,\n \"Tumor Volume Std. Dev.\" : std,\n \"Tumor Volume Std. Err.\" : sem})\ndataTable\n",
"_____no_output_____"
],
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\n# This method produces everything in a single groupby function\ndataTable = dataClean.groupby(\"Drug Regimen\").agg({\"Tumor Volume (mm3)\":\n [\"mean\",\"median\",\"var\",\"std\",\"sem\"]})\ndataTable",
"_____no_output_____"
]
],
[
[
"## Bar and Pie Charts",
"_____no_output_____"
]
],
[
[
"# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas. \n\nnum = dataClean[\"Drug Regimen\"].value_counts()\nnum.plot(kind = \"bar\")\nplt.xlabel(\"Drug Regimen\")\nplt.xticks(rotation = 90)\nplt.ylabel(\"Number of Data Points\")\nplt.show()",
"_____no_output_____"
],
[
"# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.\n\nnum = dataClean[\"Drug Regimen\"].value_counts()\nplt.bar(num.index.values, num.values)\nplt.xlabel(\"Drug Regimen\")\nplt.xticks(rotation = 90)\nplt.ylabel(\"Number of Data Points\")\nplt.show()",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pandas\n\nnum = dataClean.Sex.value_counts()\nnum.plot(kind = \"pie\", autopct = '%2.2f%%')\nplt.show()",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pyplot\n\nnum = dataClean.Sex.value_counts()\nplt.pie(num.values, labels = num.index.values, autopct = '%2.2f%%')\nplt.ylabel(\"Sex\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Quartiles, Outliers and Boxplots",
"_____no_output_____"
]
],
[
[
"# Calculate the final tumor volume of each mouse across four of the treatment regimens: \n# Capomulin, Ramicane, Infubinol, and Ceftamin\n\n# Start by getting the last (greatest) timepoint for each mouse\n\nmaxPoint = dataClean.groupby([\"Mouse ID\"])[\"Timepoint\"].max()\nmaxPoint = maxPoint.reset_index()\n\n# Merge this group df with the original dataframe to get the tumor volume at the last timepoint\nmergeDataComplete = maxPoint.merge(dataClean, on = [\"Mouse ID\", \"Timepoint\"], how = \"left\")",
"_____no_output_____"
],
[
"# Put treatments into a list for for loop (and later for plot labels)\ntreatment = [\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"]\n\n# Create empty list to fill with tumor vol data (for plotting)\ntumorList = []\n\n# Calculate the IQR and quantitatively determine if there are any potential outliers. \nfor drugs in treatment:\n \n # Locate the rows which contain mice on each drug and get the tumor volumes\n tumor = mergeDataComplete.loc[mergeDataComplete[\"Drug Regimen\"] == drugs, \"Tumor Volume (mm3)\"]\n \n # add subset \n tumorList.append(tumor)\n \n # Determine outliers using upper and lower bounds\n quart = tumor.quantile([.25, .5, .75])\n lowerQuart = quart[.25]\n upperQuart = quart[.75]\n midQuart = upperQuart - lowerQuart\n lowerBound = lowerQuart - (1.5 * midQuart)\n upperBound = upperQuart + (1.5 * midQuart)\n outliers = tumor.loc[(tumor < lowerBound) | (tumor > upperBound)]\n print(f\"{drugs}'s potential outliers: {outliers}\")",
"Capomulin's potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\nRamicane's potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\nInfubinol's potential outliers: 31 36.321346\nName: Tumor Volume (mm3), dtype: float64\nCeftamin's potential outliers: Series([], Name: Tumor Volume (mm3), dtype: float64)\n"
],
[
"# Generate a box plot of the final tumor volume of each mouse across four regimens of interest\nboxPlot = dict(markerfacecolor = \"blue\", markersize = 12)\nplt.boxplot(tumorList, labels = treatment, flierprops = boxPlot)\nplt.ylabel(\"Final Tumor Volume (mm3)\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Line and Scatter Plots",
"_____no_output_____"
]
],
[
[
"# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin\ncapomulData = dataClean.loc[dataClean[\"Drug Regimen\"] == \"Capomulin\"]\nmouseData = capomulData.loc[capomulData[\"Mouse ID\"] == \"x401\"]\nplt.plot(mouseData[\"Timepoint\"], mouseData[\"Tumor Volume (mm3)\"])\nplt.xlabel(\"Timepoint (days)\")\nplt.ylabel(\"Tumor Volume (mm3)\")\nplt.title(\"Mouse x401 Capomulin Data\")\nplt.show()",
"_____no_output_____"
],
[
"# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen\ncapomulData = dataClean.loc[dataClean[\"Drug Regimen\"] == \"Capomulin\"]\ncapomulMean = capomulData.groupby([\"Mouse ID\"]).mean()\nplt.scatter(capomulMean[\"Weight (g)\"], capomulMean[\"Tumor Volume (mm3)\"])\nplt.xlabel(\"Weight (g)\")\nplt.ylabel(\"Average Tumor Volume (mm3)\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Correlation and Regression",
"_____no_output_____"
]
],
[
[
"# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen\n",
"_____no_output_____"
],
[
"correlation = round(st.pearsonr(capomulMean[\"Weight (g)\"], capomulMean[\"Tumor Volume (mm3)\"])[0],2)\n\nprint(f\"The correlation between mouse weight and the average tumor volume is {correlation}\")\n\nmodel = st.linregress(capomulMean[\"Weight (g)\"], capomulMean[\"Tumor Volume (mm3)\"])\n\ny_values = capomulMean[\"Weight (g)\"] * model[0] + model[1]\nplt.scatter(capomulMean[\"Weight (g)\"], capomulMean[\"Tumor Volume (mm3)\"])\nplt.plot(capomulMean[\"Weight (g)\"], y_values, color = \"blue\")\nplt.xlabel(\"Weight (g)\")\nplt.ylabel(\"Average Tumor Volume (mm3)\")\nplt.show()",
"The correlation between mouse weight and the average tumor volume is 0.84\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e714d361ef79549f034e1bf1338d82688ec75b48 | 37,722 | ipynb | Jupyter Notebook | tasks/task2.ipynb | kingstdio/DMLF | 37035517ed53dc4d907828a43c3538123a11282d | [
"MIT"
] | 8 | 2021-11-25T01:36:44.000Z | 2022-03-03T05:59:11.000Z | tasks/task2.ipynb | kingstdio/DMLF | 37035517ed53dc4d907828a43c3538123a11282d | [
"MIT"
] | null | null | null | tasks/task2.ipynb | kingstdio/DMLF | 37035517ed53dc4d907828a43c3538123a11282d | [
"MIT"
] | null | null | null | 30.970443 | 165 | 0.414241 | [
[
[
"# Task2. Enzyme Catalytic Function Quantity Annotation\n\n> author: Shizhenkun \n> email: [email protected] \n> date: 2021-09-28 \n\n\n",
"_____no_output_____"
],
[
"## 1. Import packages",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\n\nimport sys\nimport os\nfrom tqdm import tqdm\nfrom functools import reduce\n\nsys.path.append(\"../tools/\")\nimport funclib\n\nsys.path.append(\"../\")\nimport benchmark_common as bcommon\nimport benchmark_train as btrain\nimport benchmark_test as btest\nimport config as cfg\nimport benchmark_evaluation as eva\nimport joblib\n\n\n\nfrom sklearn import metrics\n\nfrom pandarallel import pandarallel # 导入pandaralle\npandarallel.initialize() # 初始化该这个b...并行库\n\n%load_ext autoreload\n%autoreload 2",
"INFO: Pandarallel will run on 80 workers.\nINFO: Pandarallel will use Memory file system to transfer data between the main process and workers.\n"
]
],
[
[
"## 2. Load data",
"_____no_output_____"
]
],
[
[
"#read train test data\ntrain = pd.read_feather(cfg.DATADIR+'task2/train.feather')\ntest = pd.read_feather(cfg.DATADIR+'task2/test.feather')\nprint('train size: {0}\\ntest size: {1}'.format(len(train), len(test)))",
"train size: 222567\ntest size: 3304\n"
]
],
[
[
"## 3. Gather features",
"_____no_output_____"
]
],
[
[
"trainf=pd.read_feather(cfg.DATADIR+'train_rep32.feather')\ntestf=pd.read_feather(cfg.DATADIR+'test_rep32.feather')\n\ntrain = train.merge(trainf, on='id', how='left')\ntest = test.merge(testf, on='id', how='left')",
"_____no_output_____"
]
],
[
[
"## 4. Prediction\n",
"_____no_output_____"
],
[
"### 4.1 singel-to-multi",
"_____no_output_____"
]
],
[
[
"train_s1=train.copy()\ntest_s1=test.copy()\n\ntrain_s1['lb'] = train_s1.functionCounts.apply(lambda x : 0 if x==1 else 1)\ntest_s1['lb'] = test_s1.functionCounts.apply(lambda x : 0 if x==1 else 1)\n\nX_train = np.array(train.iloc[:,np.r_[3:1283]])\nY_train = np.array(train_s1.lb.astype('int')).flatten()\n\nX_test = np.array(test.iloc[:,np.r_[3:1283]])\nY_test = np.array(test_s1.lb.astype('int')).flatten()",
"_____no_output_____"
],
[
"funclib.run_baseline(X_train, Y_train, X_test, Y_test)",
"baslineName \t accuracy \t precision(PPV) \t NPV \t\t recall \t f1 \t\t \t\t confusion Matrix\nknn \t\t0.925545 \t0.517857 \t\t0.947385 \t0.345238 \t0.414286 \t tp: 87 fp: 81 fn: 165 tn: 2971\n"
],
[
"groundtruth, predict, predictprob, model = funclib.xgmain(X_train, Y_train, X_test, Y_test, type='binary')\njoblib.dump(model, cfg.MODELDIR+'/single_multi.model')",
"_____no_output_____"
],
[
"pd.DataFrame(X_train)",
"_____no_output_____"
]
],
[
[
"### 4.2 2-8 functions",
"_____no_output_____"
]
],
[
[
"#gather >2 data\ntrain_s2=train.copy()\ntest_s2=test.copy()\n\ntrain_s2=train_s2[train_s2.functionCounts>=2]\ntest_s2=test_s2[test_s2.functionCounts>=2]\n\ntrain_s2.reset_index(drop=True, inplace=True)\ntest_s2.reset_index(drop=True, inplace=True)\n\n#define X,Y\nX_train = np.array(train_s2.iloc[:,np.r_[3:1283]])\nY_train = np.array(train_s2.functionCounts.astype('int')-2).flatten()\n\nX_test = np.array(test_s2.iloc[:,np.r_[3:1283]])\nY_test = np.array(test_s2.functionCounts.astype('int')-2).flatten()",
"_____no_output_____"
],
[
"funclib.run_baseline(X_train, Y_train, X_test, Y_test, type='multi')",
" baslineName \t\t accuracy \t precision-macro \t recall-macro \t f1-macro\n knn \t\t0.833333 \t0.678550 \t\t0.623593 \t0.646456\n lr \t\t0.761905 \t0.707956 \t\t0.637942 \t0.521033\n xg \t\t0.849206 \t0.854167 \t\t0.630704 \t0.623925\n dt \t\t0.702381 \t0.479052 \t\t0.482337 \t0.479975\n rf \t\t0.853175 \t0.864239 \t\t0.546546 \t0.594133\n gbdt \t\t0.853175 \t0.856578 \t\t0.573413 \t0.602268\n"
],
[
"groundtruth, predict, predictprob, model = funclib.xgmain(X_train, Y_train, X_test, Y_test, type='multi')\njoblib.dump(model, cfg.MODELDIR+'/multi_many.model')",
"_____no_output_____"
]
],
[
[
"## 5. Integration",
"_____no_output_____"
],
[
"### 5.1 sequence aligment",
"_____no_output_____"
]
],
[
[
"res_data=funclib.getblast(train,test)\nres_data = res_data[['id', 'sseqid']].merge(train, left_on='sseqid',right_on='id', how='left')[['id_x','sseqid','functionCounts']]\nres_data =res_data.rename(columns={'id_x':'id','sseqid':'id_ref', 'functionCounts':'functionCounts_pred'})\nres_data = res_data.merge(test, on='id', how='left')[['id','functionCounts_pred','functionCounts']]",
"Write finished\nWrite finished\ndiamond makedb --in /tmp/train.fasta -d /tmp/train.dmnd\n"
],
[
"res_data",
"_____no_output_____"
],
[
"aa=test.iloc[:,np.r_[0,2]].merge(res_data.iloc[:,np.r_[0,1]], on='id', how='left').fillna(0)",
"_____no_output_____"
],
[
"eva.caculateMetrix(groundtruth=res_data.functionCounts, predict=res_data.functionCounts_pred, baselineName='diamond', type='multi')",
" ours \t\t0.905469 \t0.582284 \t\t0.553278 \t0.560797\n"
],
[
"eva.caculateMetrix(groundtruth=aa.functionCounts, predict=aa.functionCounts_pred, baselineName='diamond', type='multi')",
" diamond \t\t0.756659 \t0.509499 \t\t0.585681 \t0.477800\n"
]
],
[
[
"### 5.2 Xgboost",
"_____no_output_____"
]
],
[
[
"X_test = np.array(test.iloc[:,3:])\nY_test = np.array(test.functionCounts.astype('int')).flatten()\n\nmodel_s = joblib.load(cfg.MODELDIR+'/single_multi.model')\nmodel_m = joblib.load(cfg.MODELDIR+'/multi_many.model')",
"_____no_output_____"
],
[
"pred_s=model_s.predict(X_test)\npred_m=model_m.predict(X_test)",
"_____no_output_____"
]
],
[
[
"### 5.3 Results integration",
"_____no_output_____"
]
],
[
[
"pred_final = test.iloc[:,np.r_[0,2]]\npred_final = pred_final.merge(res_data, on='id', how='left')\npred_final['pred_s']=1-pred_s\npred_final['pred_m']=pred_m+2\npred_final = pred_final.iloc[:,np.r_[0,1,2,4,5]]\n\ncolnames=[ 'id', 'functionCounts_groundtruth', 'functionCounts_blast', 'functionCounts_s', 'functionCounts_m' ]\npred_final.columns = colnames\n\ndef choose_functioncounts(blast, s,m):\n if str(blast)!='nan':\n return blast\n if s ==1:\n return 1\n return m\n\npred_final['functionCounts_dmlf'] = pred_final.apply(lambda x:choose_functioncounts(x.functionCounts_blast, x.functionCounts_s, x.functionCounts_m), axis=1)\npred_final=pred_final.iloc[:,np.r_[0,1,5]]",
"_____no_output_____"
],
[
"for i in range(1,9):\n right= len(pred_final[(pred_final.functionCounts_groundtruth==pred_final.functionCounts_dmlf) & (pred_final.functionCounts_groundtruth==i)])\n total= len(pred_final[(pred_final.functionCounts_groundtruth==i)])\n print(str(i)+'\\t'+str(right)+'/'+str(total))",
"1\t2942/3052\n2\t48/183\n3\t31/53\n4\t2/6\n5\t0/2\n6\t5/7\n7\t1/1\n8\t0/0\n"
],
[
"pred_final",
"_____no_output_____"
],
[
"eva.caculateMetrix(groundtruth=pred_final.functionCounts_groundtruth, predict=pred_final.functionCounts_dmlf, baselineName='ours', type='multi')",
" ours \t\t0.917070 \t0.583702 \t\t0.552035 \t0.560549\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e714e6da9626a04e8db542caef23ec6851c8f574 | 34,689 | ipynb | Jupyter Notebook | notebook/Prototype with FastAPI in Jupyter.ipynb | Build-week-track-team-38/fastapi-jupytercon-2020 | c0280e65aecc4121bf7d2b0c7571857687305955 | [
"MIT"
] | 2 | 2021-03-05T17:27:12.000Z | 2021-07-03T04:57:54.000Z | notebook/Prototype with FastAPI in Jupyter.ipynb | Build-week-track-team-38/fastapi-jupytercon-2020 | c0280e65aecc4121bf7d2b0c7571857687305955 | [
"MIT"
] | null | null | null | notebook/Prototype with FastAPI in Jupyter.ipynb | Build-week-track-team-38/fastapi-jupytercon-2020 | c0280e65aecc4121bf7d2b0c7571857687305955 | [
"MIT"
] | 6 | 2020-10-19T15:52:48.000Z | 2021-07-26T22:40:55.000Z | 28.503698 | 486 | 0.574995 | [
[
[
"# After model.fit, before you deploy: Prototype with FastAPI in Jupyter!\n\n_By Ryan Herr, for JupyterCon 2020_\n\nYou want to deploy your scikit-learn model. Now what? You can make an API for your model in Jupyter!\n\nYou’ll learn [FastAPI](https://fastapi.tiangolo.com/), a Python web framework with automatic interactive docs. We’ll validate inputs with type hints, and convert to a dataframe, to make new predictions with your model. You’ll have a working API prototype, running from a notebook and ready to deploy! \n\nThis talk is for people who feel comfortable in notebooks and can fit scikit-learn models. It’s about the technical process in-between developing your model and deploying it. Maybe you’ve never deployed an API before, or maybe you’ve tried Flask but you’re curious about FastAPI.",
"_____no_output_____"
],
[
"## Part 0, model.fit",
"_____no_output_____"
],
[
" We'll use the [Palmer Penguins](https://github.com/allisonhorst/palmerpenguins) dataset. It's an alternative to [Iris](https://en.wikipedia.org/wiki/Iris_flower_data_set). Instead of using Iris flower measurements to predict one of three species, we'll use penguin measurements to predict one of three species.",
"_____no_output_____"
],
[
"<img src=\"https://raw.githubusercontent.com/allisonhorst/palmerpenguins/master/man/figures/lter_penguins.png\" width=\"50%\" />\n\nArtwork by [@allison_horst](https://twitter.com/allison_horst)",
"_____no_output_____"
],
[
"First, load and explore the data:",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\npenguins = sns.load_dataset('penguins')\nsns.pairplot(data=penguins, hue='species')",
"_____no_output_____"
]
],
[
[
"Looks like Adelie penguins have less bill length. Gentoo penguins have less bill depth, more flipper length, and more body mass.\n\nSo we can classify the three species using two features: bill length and another numeric feature, such as bill depth.",
"_____no_output_____"
],
[
"<img src=\"https://raw.githubusercontent.com/allisonhorst/palmerpenguins/master/man/figures/culmen_depth.png\" width=\"50%\" />\n\nArtwork by [@allison_horst](https://twitter.com/allison_horst)",
"_____no_output_____"
],
[
"We'll select `bill_length_mm` and `bill_depth_mm` for our features, and `species` is our target. We'll use scikit-learn to fit a Logistic Regression model. \n\nScikit-learn's implementation of Logistic Regression is regularized. We'll use cross-validation to automate the amount of regularization, after scaling the features. We can combine the scaler transformation and the model into a scikit-learn pipeline. \n\nWe'll also use cross-validation to estimate how accurately the model generalizes.",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.pipeline import make_pipeline\n\nfeatures = ['bill_length_mm', 'bill_depth_mm']\ntarget = 'species'\n\npenguins.dropna(subset=features, inplace=True)\nX = penguins[features]\ny = penguins[target]\n\nclassifier = make_pipeline(\n StandardScaler(), \n LogisticRegressionCV()\n)\n\nclassifier.fit(X, y)\n\nscores = cross_val_score(classifier, X, y)\navg_acc = scores.mean() * 100\nstd_acc = scores.std() * 100\nprint(f'Cross-Validation Accuracy: {avg_acc:.0f}% +/- {2*std_acc:.0f}%')",
"_____no_output_____"
]
],
[
[
"So, our model seems to classify penguins nearly perfectly.\n\nNext, we'll deploy this model in a FastAPI app. ",
"_____no_output_____"
],
[
"Web apps aren't usually served from notebooks, especially temporary cloud notebooks like Binder. But it can be useful for rapid prototyping. Here's a helper function to make it possible:",
"_____no_output_____"
]
],
[
[
"def enable_cloud_notebook(port=8000):\n \"\"\"\n Enables you to run a FastAPI app from a cloud notebook.\n Useful for rapid prototyping if you like notebooks!\n Not needed when you develop in a local IDE or deploy \"for real.\"\n \"\"\"\n\n # Prevent \"RuntimeError: This event loop is already running\"\n import nest_asyncio\n nest_asyncio.apply()\n\n # Get a public URL to the localhost server \n from pyngrok import ngrok\n print('Public URL:', ngrok.connect(port=port))",
"_____no_output_____"
]
],
[
[
"## Part 1, random penguins, GET request",
"_____no_output_____"
],
[
"Let's back up and begin with something like \"Hello World.\" Before we make real predictions, we’ll make random guesses.",
"_____no_output_____"
]
],
[
[
"import random\n\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n return random.choice(['Adelie', 'Chinstrap', 'Gentoo'])",
"_____no_output_____"
]
],
[
[
"Run this function and you'll get random penguin species.",
"_____no_output_____"
]
],
[
[
"random_penguin()",
"_____no_output_____"
]
],
[
[
"In the next cell, you'll see that we add a half-dozen lines of code to turn this function into a FastAPI app.\n\nThese lines create a FastAPI app instance:\n\n```python\nfrom fastapi import FastAPI\napp = FastAPI()\n```\n\nThis decorator tells FastAPI to call the function whenever the app receives a request to the `/` path using the HTTP GET method.\n\n```python\[email protected]('/')\ndef random_penguin():\n ...\n```\n\nThis line enables running FastAPI from a cloud notebook:\n\n```python\nenable_cloud_notebook()\n```\n\nThese lines run the app with Uvicorn, the recommended web server for FastAPI:\n\n```python\nimport uvicorn\nuvicorn.run(app)\n```\n\nThe code below puts it all together. Run the cell. You'll see a \"Public URL\" that ends in \"ngrok.io\". Click the link to open it in a new tab. You'll see a random penguin species. Refresh the tab to get another random penguin.",
"_____no_output_____"
]
],
[
[
"import random\n\nfrom fastapi import FastAPI\nimport uvicorn\n\napp = FastAPI()\n\[email protected]('/')\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n species = random.choice(['Adelie', 'Chinstrap', 'Gentoo'])\n return species\n\nenable_cloud_notebook()\nuvicorn.run(app)",
"_____no_output_____"
]
],
[
[
"Every time you refresh you see it in the web logs above. The app is up on the public internet for anyone to access, but only while this cell in this notebook is running.",
"_____no_output_____"
],
[
"In this notebook, stop the cell from running now.",
"_____no_output_____"
],
[
"Next we'll add an app `title`, change the `docs_url` parameter, and change the path to `/random` for the `random_penguin` function.\n\nRun the cell and click the new Public URL.",
"_____no_output_____"
]
],
[
[
"import random\n\nfrom fastapi import FastAPI\nimport uvicorn\n\napp = FastAPI(\n title='🐧 Penguin predictor API',\n docs_url='/'\n)\n\[email protected]('/random')\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n species = random.choice(['Adelie', 'Chinstrap', 'Gentoo'])\n return species\n\nenable_cloud_notebook()\nuvicorn.run(app)",
"_____no_output_____"
]
],
[
[
"Now you'll see automatically generated documentation. It's interactive too! \n\nClick on \"/random\", then the \"Try It Out\" button, then the \"Execute\" button. Scroll down to the \"Server response.\" You'll see \"Response body\" with a penguin species, and \"Code\" with 200 which is a successful [status code](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status).",
"_____no_output_____"
],
[
"Or change the end of the URL to `/random` and you can use the API directly.",
"_____no_output_____"
],
[
"You access your API with code like this, from another notebook or Python shell. Replace the url with your own dynamically generated ngrok URL.\n\n```python\nimport requests\nurl = 'http://9571e5899f73.ngrok.io/random'\nresponse = requests.get(url)\nprint(response.status_code, response.text)\n```",
"_____no_output_____"
],
[
"Then stop the cell above from running like before.",
"_____no_output_____"
],
[
"## Part 2, real predictions, POST request",
"_____no_output_____"
],
[
"Okay, now let's work on adding our model to make real predictions.",
"_____no_output_____"
],
[
"To make a prediction, we need penguin measurements, which we'll receive as [JSON](https://developer.mozilla.org/en-US/docs/Learn/JavaScript/Objects/JSON): \n\n> JavaScript Object Notation (JSON) is a standard text-based format for representing structured data based on JavaScript object syntax. It is commonly used for transmitting data in web applications (e.g., sending some data from the server to the client, so it can be displayed on a web page, or vice versa). You'll come across it quite often ... it can be used independently from JavaScript, and many programming environments feature the ability to read (parse) and generate JSON.",
"_____no_output_____"
],
[
"JSON looks a lot like a Python dictionary, like this example:",
"_____no_output_____"
]
],
[
[
"gary_gentoo = {\"bill_length_mm\": 45, \"bill_depth_mm\": 15}",
"_____no_output_____"
]
],
[
[
"How do we go from JSON / dictionary format to something our model can use?\n\nWe need a Numpy array or a Pandas dataframe, with two columns (for our two features) and one row (for our one observation that we want to predict). We can make a dataframe from a list of dicts, like this:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\npd.DataFrame([gary_gentoo])",
"_____no_output_____"
]
],
[
[
"When we use this dataframe with our classifier's predict method, we get the correct result.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf = pd.DataFrame([gary_gentoo])\nclassifier.predict(df)",
"_____no_output_____"
]
],
[
[
"The predict method returns a Numpy array with all our predictions. But we're just making a single prediction, so we want the \"zeroeth\" item from the array. Putting it all together, we could write a function like this:",
"_____no_output_____"
]
],
[
[
"def predict_species(penguin: dict):\n \"\"\"Predict penguin species\"\"\"\n df = pd.DataFrame([penguin])\n species = classifier.predict(df)\n return species[0]",
"_____no_output_____"
],
[
"predict_species(gary_gentoo)",
"_____no_output_____"
]
],
[
[
"Here's another example.",
"_____no_output_____"
]
],
[
[
"amy_adelie = {\"bill_length_mm\": 35, \"bill_depth_mm\": 18}\npredict_species(amy_adelie)",
"_____no_output_____"
]
],
[
[
"We'll add the function to our FastAPI app using a decorator. The decorator tells FastAPI to call the function whenever the app receives a request to the `/predict` path using the [HTTP POST method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST). FastAPI will automatically parse the request body's JSON to a Python dict named `penguin`.\n\n```python\[email protected]('/predict')\ndef predict_species(penguin: dict):\n ...\n```\n\nWe'll also add a more descriptive `description` parameter to the app. Putting it all together:",
"_____no_output_____"
]
],
[
[
"import random\n\nfrom fastapi import FastAPI\nimport pandas as pd\nimport uvicorn\n\napp = FastAPI(\n title='🐧 Penguin predictor API', \n description='Deploys a logistic regression model fit on the [Palmer Penguins](https://github.com/allisonhorst/palmerpenguins) dataset.', \n docs_url='/'\n)\n\n\[email protected]('/predict')\ndef predict_species(penguin: dict):\n \"\"\"Predict penguin species\"\"\"\n df = pd.DataFrame([penguin])\n species = classifier.predict(df)\n return species[0]\n\n\[email protected]('/random')\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n species = random.choice(['Adelie', 'Chinstrap', 'Gentoo'])\n return species\n\n\nenable_cloud_notebook()\nuvicorn.run(app)",
"_____no_output_____"
]
],
[
[
"\nRun the cell above, then try an example:\n\n- Click the \"Try it out\" button.\n- The \"Request body\" text field becomes editable. Copy-paste Gary Gentoo's measurements into the field: `{\"bill_length_mm\": 45, \"bill_depth_mm\": 15}`\n- Click the \"Execute\" button, then scroll down to the \"Server response.\" You should see the species \"Gentoo\" correctly classified.\n\nTry another example:\n\n- Copy-paste Amy Adelie's measurements into the \"Request body\" text field: `{\"bill_length_mm\": 35, \"bill_depth_mm\": 18}`\n- Click the \"Execute\" button. You should see the species \"Adelie\" correctly classified.\n\nBut what happens if you change your \"Request body\" to something unexpected?\n- What if your input doesn't have exactly two keys, `bill_length_mm` and `bill_depth_mm`, in that order?\n- What if your input values are zero? Huge numbers? Negative numbers? Not a number?\n\nWe aren't validating input yet. We just assume the API users give valid input. That's a dangerous assumption. When the inputs aren't valid, the app may respond with a Server Error instead of helpful warnings. Or worse, the app seems to work and returns a response, but because the inputs were flawed, the output is flawed too. \"Garbage in, garbage out.\"",
"_____no_output_____"
],
[
"Stop the cell above from running. Next we'll add data validation.",
"_____no_output_____"
],
[
"## Part 3, Data validation",
"_____no_output_____"
],
[
"Look at the type annotation for the `predict_species` function's argument. The function accepts any `dict`. \n\n```python\[email protected]('/predict')\ndef predict_species(penguin: dict):\n ...\n```\n\nWe'll change this so the function expects an argument of type `Penguin`.\n\n```python\nclass Penguin:\n \"\"\"Parse & validate penguin measurements\"\"\"\n ...\n\[email protected]('/predict')\ndef predict_species(penguin: Penguin):\n ...\n```",
"_____no_output_____"
],
[
"We'll create a `Penguin` [data class](https://docs.python.org/3/library/dataclasses.html) with [type annotations](https://docs.python.org/3/library/typing.html) to define what attributes we expect our input to have. We'll use [Pydantic](https://pydantic-docs.helpmanual.io/), a data validation library integrated with FastAPI. It sounds complex, but it's just a few lines of code! ",
"_____no_output_____"
]
],
[
[
"from pydantic import BaseModel\n\nclass Penguin(BaseModel):\n \"\"\"Parse & validate penguin measurements\"\"\"\n bill_length_mm: float\n bill_depth_mm: float",
"_____no_output_____"
]
],
[
[
"We can instantiate a penguin object like this:",
"_____no_output_____"
]
],
[
[
"Penguin(bill_length_mm=45, bill_depth_mm=15)",
"_____no_output_____"
]
],
[
[
"Or like this, by unpacking our dictionary into parameters:",
"_____no_output_____"
]
],
[
[
"Penguin(**gary_gentoo)",
"_____no_output_____"
]
],
[
[
"Now let's see what happens with missing input:",
"_____no_output_____"
]
],
[
[
"missing_input = {\"bill_length_mm\": 45}\nPenguin(**missing_input)",
"_____no_output_____"
]
],
[
[
"We automatically get a `ValidationError` with a helpful, descriptive error message. That's what we want in this situation!",
"_____no_output_____"
],
[
"Next we'll try a misnamed input (`bill_depth` instead of `bill_depth_mm`)",
"_____no_output_____"
]
],
[
[
"wrong_name = {\"bill_length_mm\": 45, \"bill_depth\": 15}\nPenguin(**wrong_name)",
"_____no_output_____"
]
],
[
[
"Again, we get a `ValidationError`, which is want we want here.",
"_____no_output_____"
],
[
"Let's try an input with the wrong type, such as a string instead of a number.",
"_____no_output_____"
]
],
[
[
"wrong_type = {\"bill_length_mm\": 45, \"bill_depth_mm\": \"Hello Penguins!\"}\nPenguin(**wrong_type)",
"_____no_output_____"
]
],
[
[
"We get a different `ValidationError` because the value is not a valid float.",
"_____no_output_____"
],
[
"Let's try a different string:",
"_____no_output_____"
]
],
[
[
"convertable_type = {\"bill_length_mm\": 45, \"bill_depth_mm\": \"15\"}\nPenguin(**convertable_type)",
"_____no_output_____"
]
],
[
[
"This works because the string can be converted to a float. ",
"_____no_output_____"
],
[
"If we add an extra input ...",
"_____no_output_____"
]
],
[
[
"extra_input = {\"bill_length_mm\": 45, \"bill_depth_mm\": 15, \"extra_feature\": \"will be ignored\"}\nPenguin(**extra_input)",
"_____no_output_____"
]
],
[
[
"... it will be ignored.",
"_____no_output_____"
],
[
"If we flip the order of inputs ...",
"_____no_output_____"
]
],
[
[
"flipped_order = {\"bill_depth_mm\": 15, \"bill_length_mm\": 45}\nPenguin(**flipped_order)",
"_____no_output_____"
]
],
[
[
"... they'll be flipped back.",
"_____no_output_____"
],
[
"What about penguin measurements that are implausibly large or small? We can use \"constrained floats\" to catch this.\n\nWe'll set constraints that each input must be greater than (`gt`) some minimum and less than (`lt`) some maximum.",
"_____no_output_____"
]
],
[
[
"from pydantic import confloat\nhelp(confloat)",
"_____no_output_____"
]
],
[
[
"First, let's look at the minimum and maximum measurements from our training data:",
"_____no_output_____"
]
],
[
[
"X.describe()",
"_____no_output_____"
]
],
[
[
"Then, set some reasonable constraints:",
"_____no_output_____"
]
],
[
[
"from pydantic import BaseModel, confloat\n\nclass Penguin(BaseModel):\n \"\"\"Parse & validate penguin measurements\"\"\"\n bill_length_mm: confloat(gt=32, lt=60)\n bill_depth_mm: confloat(gt=13, lt=22)",
"_____no_output_____"
]
],
[
[
"Now when inputs are too large or small, we get a `ValidationError` with descriptive messages.",
"_____no_output_____"
]
],
[
[
"huge_penguin = {\"bill_depth_mm\": 1500, \"bill_length_mm\": 4500}\nPenguin(**huge_penguin)",
"_____no_output_____"
],
[
"zero_penguin = {\"bill_depth_mm\": 0, \"bill_length_mm\": 0}\nPenguin(**zero_penguin)",
"_____no_output_____"
],
[
"negative_penguin = {\"bill_depth_mm\": -45, \"bill_length_mm\": -15}\nPenguin(**negative_penguin)",
"_____no_output_____"
]
],
[
[
"One more thing. Let's add a helpful method to our class:",
"_____no_output_____"
]
],
[
[
"from pydantic import BaseModel, confloat\n\nclass Penguin(BaseModel):\n \"\"\"Parse & validate penguin measurements\"\"\"\n bill_length_mm: confloat(gt=32, lt=60)\n bill_depth_mm: confloat(gt=13, lt=22)\n\n def to_df(self):\n \"\"\"Convert to pandas dataframe with 1 row.\"\"\"\n return pd.DataFrame([dict(self)])",
"_____no_output_____"
]
],
[
[
"Now we can validate JSON input and convert it to a pandas dataframe with one line of code.",
"_____no_output_____"
]
],
[
[
"Penguin(**gary_gentoo).to_df()",
"_____no_output_____"
]
],
[
[
"Let's put this all together in our FastAPI code.\n\n- Add the `Penguin` class.\n- Change the type annotation for the `predict_species` function argument. Instead of `dict`, the type is now `Penguin`.\n- When a POST request is made to the `/predict` path, then FastAPI will automatically validate and parse the request body's JSON into a `Penguin` object.\n- Use the penguin's `to_df` method to convert into a dataframe for our model.",
"_____no_output_____"
]
],
[
[
"import random\n\nfrom fastapi import FastAPI\nimport pandas as pd\nfrom pydantic import BaseModel, confloat\nimport uvicorn\n\napp = FastAPI(\n title='🐧 Penguin predictor API', \n description='Deploys a logistic regression model fit on the [Palmer Penguins](https://github.com/allisonhorst/palmerpenguins) dataset.', \n docs_url='/'\n)\n\n\nclass Penguin(BaseModel):\n \"\"\"Parse & validate penguin measurements\"\"\"\n bill_length_mm: confloat(gt=32, lt=60)\n bill_depth_mm: confloat(gt=13, lt=22)\n\n def to_df(self):\n \"\"\"Convert to pandas dataframe with 1 row.\"\"\"\n return pd.DataFrame([dict(self)])\n\n\[email protected]('/predict')\ndef predict_species(penguin: Penguin):\n \"\"\"Predict penguin species from bill length & depth\n \n Parameters\n ----------\n bill_length_mm : float, greater than 32, less than 60 \n bill_depth_mm : float, greater than 13, less than 22 \n\n Returns\n -------\n str \"Adelie\", \"Chinstrap\", or \"Gentoo\" \n \"\"\"\n species = classifier.predict(penguin.to_df())\n return species[0]\n\n\[email protected]('/random')\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n species = random.choice(['Adelie', 'Chinstrap', 'Gentoo'])\n return species\n\n\nenable_cloud_notebook()\nuvicorn.run(app)",
"_____no_output_____"
]
],
[
[
"Test the app, then stop the cell from running.",
"_____no_output_____"
],
[
"## Part -1, Deploy",
"_____no_output_____"
],
[
"Let's save the model so you can use it without retraining. This is sometimes called \"pickling.\" See [scikit-learn docs on \"model persistence.\"](https://scikit-learn.org/stable/modules/model_persistence.html)",
"_____no_output_____"
]
],
[
[
"from joblib import dump\ndump(classifier, 'classifier.joblib', compress=True)",
"_____no_output_____"
]
],
[
[
"Now even if we delete the object from memory ...",
"_____no_output_____"
]
],
[
[
"del classifier",
"_____no_output_____"
]
],
[
[
"We can reload from our file ...",
"_____no_output_____"
]
],
[
[
"from joblib import load\nclassifier = load('classifier.joblib')",
"_____no_output_____"
]
],
[
[
"... and it's back, ready to use:",
"_____no_output_____"
]
],
[
[
"from sklearn import set_config\nset_config(display='diagram')\nclassifier",
"_____no_output_____"
]
],
[
[
"If you're using a cloud notebook, you can get a link to download the file using code like this:",
"_____no_output_____"
]
],
[
[
"from IPython.display import FileLink\nFileLink('classifier.joblib')",
"_____no_output_____"
]
],
[
[
"This last code cell has 3 changes from the previous iteration:\n\n- Loads the model with joblib\n- Adds image HTML tags in the app's description\n- Configures [CORS (Cross-Origin Resource Sharing)](https://fastapi.tiangolo.com/tutorial/cors/) so your API could be called by apps on different domains.",
"_____no_output_____"
]
],
[
[
"import random\n\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom joblib import load\nimport pandas as pd\nfrom pydantic import BaseModel, confloat\nimport uvicorn\n\ndescription = \"\"\"\nDeploys a logistic regression model fit on the [Palmer Penguins](https://github.com/allisonhorst/palmerpenguins) dataset.\n\n<img src=\"https://raw.githubusercontent.com/allisonhorst/palmerpenguins/master/man/figures/lter_penguins.png\" width=\"40%\" /> <img src=\"https://raw.githubusercontent.com/allisonhorst/palmerpenguins/master/man/figures/culmen_depth.png\" width=\"30%\" />\n\nArtwork by [@allison_horst](https://twitter.com/allison_horst)\n\"\"\"\n\napp = FastAPI(\n title='🐧 Penguin predictor API',\n description=description, \n docs_url='/'\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_methods=['*']\n)\n\nclassifier = load('classifier.joblib')\n\n\nclass Penguin(BaseModel):\n \"\"\"Parse & validate penguin measurements\"\"\"\n bill_length_mm: confloat(gt=32, lt=60)\n bill_depth_mm: confloat(gt=13, lt=22)\n\n def to_df(self):\n \"\"\"Convert to pandas dataframe with 1 row.\"\"\"\n return pd.DataFrame([dict(self)])\n\n\[email protected]('/predict')\ndef predict_species(penguin: Penguin):\n \"\"\"Predict penguin species from bill length & depth\n \n Parameters\n ----------\n bill_length_mm : float, greater than 32, less than 60 \n bill_depth_mm : float, greater than 13, less than 22 \n\n Returns\n -------\n str \"Adelie\", \"Chinstrap\", or \"Gentoo\" \n \"\"\"\n species = classifier.predict(penguin.to_df())\n return species[0]\n\n\[email protected]('/random')\ndef random_penguin():\n \"\"\"Return a random penguin species\"\"\"\n species = random.choice(['Adelie', 'Chinstrap', 'Gentoo'])\n return species\n\n\nenable_cloud_notebook()\nuvicorn.run(app)",
"_____no_output_____"
]
],
[
[
"We've prototyped a complete working web app, running from a notebook. We're ready to deploy!\n\nDo you want to take this last step and go beyond the notebook? See the README in this repo for instructions how to deploy to Heroku, a popular cloud platform.",
"_____no_output_____"
],
[
"## Learn more\n\nWant to learn more about FastAPI? I recommend these links:\n\n- [Build a machine learning API from scratch](https://youtu.be/1zMQBe0l1bM) by Sebastián Ramírez, FastAPI's creator\n- [calmcode.io — FastAPI videos](https://calmcode.io/fastapi/hello-world.html) by Vincent D. Warmerdam\n- [FastAPI for Flask Users](https://amitness.com/2020/06/fastapi-vs-flask/) by Amit Chaudhary\n- [FastAPI official docs](https://fastapi.tiangolo.com/)\n- [testdriven.io — FastAPI blog posts](https://testdriven.io/blog/topics/fastapi/)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e714e976e6af9704c157537fa358ff51ac4bd0d8 | 7,471 | ipynb | Jupyter Notebook | tantum/libs/fmix/notebooks/grad_cam.ipynb | dmitryshendryk/tantum | afd07e7a52d65338297a4f46d26e5241d3e756dc | [
"MIT"
] | 320 | 2020-02-17T11:22:58.000Z | 2022-03-09T09:51:57.000Z | image-fmix/FMix-master/notebooks/grad_cam.ipynb | chandlerbing65nm/Cassava-Leaf-Disease-Classification | c359a076193a7768687e8eb7a2d49c3ee97a104c | [
"MIT"
] | 9 | 2020-03-02T20:04:00.000Z | 2022-02-09T11:30:38.000Z | image-fmix/FMix-master/notebooks/grad_cam.ipynb | chandlerbing65nm/Cassava-Leaf-Disease-Classification | c359a076193a7768687e8eb7a2d49c3ee97a104c | [
"MIT"
] | 42 | 2020-02-28T12:26:53.000Z | 2022-03-03T19:19:34.000Z | 32.341991 | 203 | 0.56552 | [
[
[
"# Example Masks\n\nIn this notebook, we plot the [Grad-CAM](https://arxiv.org/abs/1610.02391) figures from the paper. For more info and other examples, have a look at [our README](https://github.com/ecs-vlc/fmix).\n\n**Note**: The easiest way to use this is as a colab notebook, which allows you to dive in with no setup.\n\nFirst, we load dependencies and some data from CIFAR-10:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import transforms\nimport models\nfrom torchbearer import Trial\nimport cv2\nimport torch.nn.functional as F\n\ninv_norm = transforms.Normalize((-0.4914/0.2023, -0.4822/0.1994, -0.4465/0.2010), (1/0.2023, 1/0.1994, 1/0.2010))\nvalset = torchvision.datasets.CIFAR10(root='./data/cifar', train=False, download=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),]))\n\nvalloader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=True, num_workers=8)",
"Files already downloaded and verified\n"
]
],
[
[
"## Model Wrapper\n\nNext, we define a `ResNet` wrapper that will iterate up to some given block `k`:",
"_____no_output_____"
]
],
[
[
"class ResNet_CAM(nn.Module):\n def __init__(self, net, layer_k):\n super(ResNet_CAM, self).__init__()\n self.resnet = net\n convs = nn.Sequential(*list(net.children())[:-1])\n self.first_part_conv = convs[:layer_k]\n self.second_part_conv = convs[layer_k:]\n self.linear = nn.Sequential(*list(net.children())[-1:])\n \n def forward(self, x):\n x = self.first_part_conv(x)\n x.register_hook(self.activations_hook)\n x = self.second_part_conv(x)\n x = F.adaptive_avg_pool2d(x, (1,1))\n x = x.view((1, -1))\n x = self.linear(x)\n return x\n \n def activations_hook(self, grad):\n self.gradients = grad\n \n def get_activations_gradient(self):\n return self.gradients\n \n def get_activations(self, x):\n return self.first_part_conv(x)",
"_____no_output_____"
]
],
[
[
"## Grad-CAM\n\nNow for the Grad-CAM code, adapted from [implementing-grad-cam-in-pytorch](https://medium.com/@stepanulyanin/implementing-grad-cam-in-pytorch-ea0937c31e82):",
"_____no_output_____"
]
],
[
[
"def superimpose_heatmap(heatmap, img):\n resized_heatmap = cv2.resize(heatmap.numpy(), (img.shape[2], img.shape[3]))\n resized_heatmap = np.uint8(255 * resized_heatmap)\n resized_heatmap = cv2.applyColorMap(resized_heatmap, cv2.COLORMAP_JET)\n superimposed_img = torch.Tensor(cv2.cvtColor(resized_heatmap, cv2.COLOR_BGR2RGB)) * 0.006 + inv_norm(img[0]).permute(1,2,0)\n \n return superimposed_img\n\ndef get_grad_cam(net, img):\n net.eval()\n pred = net(img)\n pred[:,pred.argmax(dim=1)].backward()\n gradients = net.get_activations_gradient()\n pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])\n activations = net.get_activations(img).detach()\n for i in range(activations.size(1)):\n activations[:, i, :, :] *= pooled_gradients[i]\n heatmap = torch.mean(activations, dim=1).squeeze()\n heatmap = np.maximum(heatmap, 0)\n heatmap /= torch.max(heatmap)\n \n return torch.Tensor(superimpose_heatmap(heatmap, img).permute(2,0,1))",
"_____no_output_____"
]
],
[
[
"## Models From `torch.hub`\n\nNext, we load in the models from `torch.hub`",
"_____no_output_____"
]
],
[
[
"baseline_net = torch.hub.load('ecs-vlc/FMix:master', 'preact_resnet18_cifar10_baseline', pretrained=True)\n\nfmix_net = torch.hub.load('ecs-vlc/FMix:master', 'preact_resnet18_cifar10_fmix', pretrained=True)\n\nmixup_net = torch.hub.load('ecs-vlc/FMix:master', 'preact_resnet18_cifar10_mixup', pretrained=True)\n\nfmix_plus_net = torch.hub.load('ecs-vlc/FMix:master', 'preact_resnet18_cifar10_fmixplusmixup', pretrained=True)",
"Downloading: \"https://github.com/ecs-vlc/FMix/archive/master.zip\" to /home/ethan/.cache/torch/hub/master.zip\nUsing cache found in /home/ethan/.cache/torch/hub/ecs-vlc_FMix_master\nUsing cache found in /home/ethan/.cache/torch/hub/ecs-vlc_FMix_master\nUsing cache found in /home/ethan/.cache/torch/hub/ecs-vlc_FMix_master\n"
]
],
[
[
"## Plots\n\nFinally, generate and save the Grad-CAM plots:",
"_____no_output_____"
]
],
[
[
"layer_k = 4\nn_imgs = 10\n\nbaseline_cam_net = ResNet_CAM(baseline_net, layer_k)\nfmix_cam_net = ResNet_CAM(fmix_net, layer_k)\nmixup_cam_net = ResNet_CAM(mixup_net, layer_k)\nfmix_plus_cam_net = ResNet_CAM(fmix_plus_net, layer_k)\n\nimgs = torch.Tensor(5, n_imgs, 3, 32, 32)\nit = iter(valloader)\nfor i in range(0,n_imgs):\n img, _ = next(it)\n imgs[0][i] = inv_norm(img[0])\n imgs[1][i] = get_grad_cam(baseline_cam_net, img)\n imgs[2][i] = get_grad_cam(mixup_cam_net, img)\n imgs[3][i] = get_grad_cam(fmix_cam_net, img)\n imgs[4][i] = get_grad_cam(fmix_plus_cam_net, img)\n\ntorchvision.utils.save_image(imgs.view(-1, 3, 32, 32), \"gradcam_at_layer\" + str(layer_k) + \".png\",nrow=n_imgs, pad_value=1)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e714f6e1950bdc29fc17b4a1ba0f77da340b88d5 | 37,695 | ipynb | Jupyter Notebook | lectures/01_Python_introduction.ipynb | ladahkk/python_nlp_2021_spring | b40666e688bb60eec6aee076225eb65cd1659512 | [
"MIT"
] | 14 | 2021-02-09T09:35:18.000Z | 2022-02-23T08:54:39.000Z | lectures/01_Python_introduction.ipynb | ladahkk/python_nlp_2021_spring | b40666e688bb60eec6aee076225eb65cd1659512 | [
"MIT"
] | null | null | null | lectures/01_Python_introduction.ipynb | ladahkk/python_nlp_2021_spring | b40666e688bb60eec6aee076225eb65cd1659512 | [
"MIT"
] | 13 | 2021-02-09T11:00:38.000Z | 2022-02-21T16:18:13.000Z | 21.71371 | 867 | 0.509298 | [
[
[
"# Introduction to Python and Natural Language Technologies\n\n__Lecture 01-2, Introduction to Python__\n\n__Feb 09, 2021__\n\n__Judit Ács__",
"_____no_output_____"
],
[
"# About this part of the course\n\n## Goal\n\n- upper intermediate level Python\n- will cover some advanced concepts\n- focus on string manipulation",
"_____no_output_____"
],
[
"## Prerequisites\n\n- intermediate level in at least one object oriented programming language\n- must know: _class, instance, method, operator overloading, basic IO handling_\n- good to know: _static method, property, mutability, garbage collection_",
"_____no_output_____"
],
[
"## Links to course material\n\n[Official Github repository](https://github.com/bmeaut/python_nlp_2020_fall)",
"_____no_output_____"
],
[
"# Jupyter\n\n- Jupyter - formally known as IPython Notebook is a web application that allows you to create and share documents with live code, equations, visualizations etc.\n- Jupyter notebooks are JSON files with the extension `.ipynb`\n- Can be converted to HTML, PDF, LateX etc.\n- Can render images, tables, graphs, LateX equations\n- Large number of extensions called 'nbextensions'\n - `jupyter-vim-binding` is used in this lecture\n - Table of Contents (main/toc2)\n- Content is organized into cells",
"_____no_output_____"
],
[
"## Cell types\n\n1. code cell: Python/R/Lua/etc. code\n2. raw cell: raw text\n3. markdown cell: formatted text using Markdown",
"_____no_output_____"
],
[
"## Code cell",
"_____no_output_____"
]
],
[
[
"print(\"Hello world\")",
"_____no_output_____"
]
],
[
[
"The last command's output is displayed",
"_____no_output_____"
]
],
[
[
"2 + 3\n3 + 4",
"_____no_output_____"
]
],
[
[
"This can be a tuple of multiple values",
"_____no_output_____"
]
],
[
[
"2 + 3, 3 + 4, \"hello \" + \"world\"",
"_____no_output_____"
]
],
[
[
"## Markdown cell\n\n**This is in bold**\n\n*This is in italics*\n\n| This | is |\n| --- | --- |\n| a | table |\n\nand this is a pretty LateX equation:\n\n$$\n\\mathbf{E}\\cdot\\mathrm{d}\\mathbf{S} = \\frac{1}{\\varepsilon_0} \\iiint_\\Omega \\rho \\,\\mathrm{d}V\n$$",
"_____no_output_____"
],
[
"## Using Jupyter\n\n### Command mode and edit mode\n\nJupyter has two modes: command mode and edit mode\n\n1. Command mode: perform non-edit operations on selected cells (can select more than one cell)\n - Selected cells are marked blue\n2. Edit mode: edit a single cell\n - The cell being edited is marked green",
"_____no_output_____"
],
[
"### Switching between modes\n\n1. Esc: Edit mode -> Command mode\n2. Enter or double click: Command mode -> Edit mode",
"_____no_output_____"
],
[
"### Running cells\n\n1. Ctrl + Enter: run cell\n2. Shift + Enter: run cell and select next cell\n3. Alt + Enter: run cell and insert new cell below",
"_____no_output_____"
]
],
[
[
"3 + 4",
"_____no_output_____"
]
],
[
[
"## User input\n\nJupyter has a widget for the built-in `input` function. This __halts__ the execution until some input is provided. Note the * in place of the execution counter:",
"_____no_output_____"
]
],
[
[
"input(\"Please input something: \")",
"_____no_output_____"
]
],
[
[
"## Cell magic\n\nSpecial commands can modify a single cell's behavior, for example",
"_____no_output_____"
]
],
[
[
"%%time\n\nfor x in range(1000000):\n pass",
"_____no_output_____"
],
[
"%%timeit\n\nx = 2",
"_____no_output_____"
],
[
"%%writefile hello.py\n\nprint(\"Hello world from BME\")",
"_____no_output_____"
]
],
[
[
"For a complete list of magic commands:",
"_____no_output_____"
]
],
[
[
"%lsmagic",
"_____no_output_____"
]
],
[
[
"## Under the hood\n\n- Each notebook is run by its own _Kernel_ (Python interpreter)\n - The kernel can interrupted or restarted through the Kernel menu\n - **Always** run `Kernel -> Restart & Run All` before submitting homework to make sure that your notebook behaves as expected\n- All cells share a single namespace",
"_____no_output_____"
]
],
[
[
"my_name = 12",
"_____no_output_____"
],
[
"my_name + 1",
"_____no_output_____"
]
],
[
[
"Cells can be run in arbitrary order, execution count is helpful",
"_____no_output_____"
]
],
[
[
"print(\"this is run first\")",
"_____no_output_____"
],
[
"print(\"this is run afterwords. Note the execution count on the left.\")",
"_____no_output_____"
]
],
[
[
"# The Python programming language",
"_____no_output_____"
],
[
"## History of Python\n\n\n- Python started as a hobby project of Dutch programmer, Guido van Rossum in 1989.\n- Python 1.0 in 1994\n- Python 2.0 in 2000\n - Cycle-detecting garbage collector\n - Unicode support\n- Python 3.0 in 2008\n - Backward incompatible\n- Python2 End-of-Life (EOL) date was postponed from 2015 to 2020",
"_____no_output_____"
],
[
"## Guido van Rossum, <s>Benevolent Dictator for Life</s> Stepped down in 2018\n \nGuido van Rossum at OSCON 2006. by\n[Doc Searls](https://www.flickr.com/photos/docsearls/)\nlicensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/)\n <img width=\"400\" alt=\"portfolio_view\" src=\"https://upload.wikimedia.org/wikipedia/commons/6/66/Guido_van_Rossum_OSCON_2006.jpg\">",
"_____no_output_____"
],
[
"## Python community and development\n\n- Python Software Foundation nonprofit organization based in Delaware, US\n- Managed through PEPs (Python Enhancement Proposal)\n - Public discussion for example [PEP 3000 about Python 3.0](https://www.python.org/dev/peps/pep-3000/)\n- Strong community inclusion\n- Large standard library\n- Very large third-party module repository called PyPI (Python Package Index)\n- pip installer",
"_____no_output_____"
]
],
[
[
"import antigravity",
"_____no_output_____"
]
],
[
[
"## Python neologisms\n\n- the Python community has a number of made-up expressions\n- _Pythonic_: following Python's conventions, Python-like\n- _Pythonist_ or _Pythonista_: good Python programmer",
"_____no_output_____"
],
[
"# Developing in Python\n\n## Notebooks\n\n- Jupyter\n- [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/): Jupyter + IDE-like features\n- [Google Colab](https://colab.research.google.com): online notebook with GPU access\n\n## IDEs\n\n- [VSCode](https://code.visualstudio.com/): free, cross-platform, Python plugin, command line support\n- [PyCharm](https://www.jetbrains.com/pycharm/): free Community edition, cross-platform\n\n## Command line tools\n\n- [VIM](https://www.vim.org/) or [neovim](https://neovim.io/) + [tmux](https://github.com/tmux/tmux/wiki): small, runs everywhere, modal editing, steep learning curve, Python plugins, very mature\n - VSCode and PyCharm have VIM editing mode\n- [Emacs](https://www.emacswiki.org/emacs/PythonProgrammingInEmacs): another CLI editor, built-in Python support",
"_____no_output_____"
],
[
"## PEP8, the Python style guide\n\n- widely accepted style guide for Python\n- [PEP8](https://www.python.org/dev/peps/pep-0008/) by Guido himself, 2001\n\nSpecifies:\n\n- indentation\n- line length\n- module imports\n- class names, function names etc.\n\nWe shall use PEP8 throughout this course. You are expected to follow it in the homeworks.",
"_____no_output_____"
],
[
"# General properties of Python",
"_____no_output_____"
],
[
"## Whitespaces\n\nWhitespace indentation instead of curly braces, no need for semicolons:",
"_____no_output_____"
]
],
[
[
"n = 12\nif n % 2 == 0:\n print(\"n is even\")\nelse:\n print(\"n is odd\")",
"_____no_output_____"
]
],
[
[
"## Dynamic typing\n\nType checking is performed at run-time as opposed to compile-time (C++):",
"_____no_output_____"
]
],
[
[
"n = 2\nprint(type(n))\n\nn = 2.1\nprint(type(n))\n\nn = \"foo\"\nprint(type(n))",
"_____no_output_____"
]
],
[
[
"## Assignment\n\nAssignment differs from other imperative languages:\n\n- in C++ `i = 2` translates to _typed variable named i receives a copy of numeric value 2_\n- in Python `i = 2` translates to _name i receives a reference to object of numeric type of value 2_\n\nThe built-in function `id` returns the object's id",
"_____no_output_____"
]
],
[
[
"i = 2\nprint(id(i))\n\ni = 3\nprint(id(i))",
"_____no_output_____"
]
],
[
[
"The `is` operator compares two objects' identities:",
"_____no_output_____"
]
],
[
[
"a = 2\nb = a\nprint(a is b) # same as print(id(a) == id(b))",
"_____no_output_____"
]
],
[
[
"String concatenation results in a new object:",
"_____no_output_____"
]
],
[
[
"s = \"foo\"\nold_id = id(s)\n\ns += \"bar\"\nprint(old_id == id(s))",
"_____no_output_____"
]
],
[
[
"Numerical operations also result in new objects. We will talk about this in detail next week.",
"_____no_output_____"
]
],
[
[
"a = 2\nb = a\nprint(id(a) == id(b))\na += 1\nprint(a is b)",
"_____no_output_____"
]
],
[
[
"Integers from -5 to 256 are preallocated since these numbers are used frequently.\n\nMore information [here](https://github.com/satwikkansal/wtfPython#-is-is-not-what-it-is).\n\nMore crazy stuff in [WTFPython](https://github.com/satwikkansal/wtfpython)",
"_____no_output_____"
]
],
[
[
"for n in range(-8, 0):\n print(n, n is n + 1 - 1)\n \nfor n in range(253, 260):\n print(n, n is n + 1 - 1)",
"_____no_output_____"
]
],
[
[
"# Simple statements",
"_____no_output_____"
],
[
"## Conditional expressions\n\n### if, elif, else",
"_____no_output_____"
]
],
[
[
"n = int(input())\n#n = 12\n\nif n < 0:\n print(\"N is negative\")\nelif n > 0:\n print(\"N is positive\")\nelse:\n print(\"N is neither positive nor negative\")",
"_____no_output_____"
]
],
[
[
"### Ternary conditional operator\n\n- one-line `if` statements\n- the order of operands is different from C's `?:` operator, the C version of abs would look like this\n\n~~~C\nint x = -2;\nint abs_x = x>=0 ? x : -x;\n~~~\n- should only be used for very short statements\n\n\n`<expr1> if <condition> else <expr2>`",
"_____no_output_____"
]
],
[
[
"n = -2\nabs_n = n if n >= 0 else -n\nabs_n",
"_____no_output_____"
]
],
[
[
"## Lists\n\n- lists are the most frequently used built-in containers\n- basic operations: indexing, length, append, extend\n- lists will be covered in detail next week",
"_____no_output_____"
]
],
[
[
"l = [] # empty list\nl.append(2)\nl.append(2)\nl.append(\"foo\")\n# l = [2, 2, \"foo\"]\n\nlen(l), l",
"_____no_output_____"
]
],
[
[
"## Iteration\n\n### Iterating a list",
"_____no_output_____"
]
],
[
[
"for e in [\"foo\", \"bar\"]:\n print(e)",
"_____no_output_____"
]
],
[
[
"## `enumerate`: iterating with an index",
"_____no_output_____"
]
],
[
[
"for idx, element in enumerate([\"foo\", \"bar\"]):\n print(idx, element)",
"_____no_output_____"
]
],
[
[
"## `range`: Iterating over a range of integers\n\nThe same in C++:\n~~~C++\nfor (int i=0; i<5; i++)\n cout << i << endl;\n~~~\n\nBy default `range` starts from 0.",
"_____no_output_____"
]
],
[
[
"for i in range(5):\n print(i)",
"_____no_output_____"
]
],
[
[
"Specifying the start of the range:",
"_____no_output_____"
]
],
[
[
"for i in range(2, 5):\n print(i)",
"_____no_output_____"
]
],
[
[
"Specifying the step. Note that in this case we need to specify all three positional arguments.",
"_____no_output_____"
]
],
[
[
"for i in range(0, 10, 2):\n print(i)",
"_____no_output_____"
]
],
[
[
"Negative values:",
"_____no_output_____"
]
],
[
[
"for i in range(-3, 0):\n print(i)",
"_____no_output_____"
],
[
"for i in range(-3, 0, -1):\n print(i)",
"_____no_output_____"
],
[
"for i in range(0, -3, -1):\n print(i)",
"_____no_output_____"
]
],
[
[
"## `break` and `continue`\n\n- `break`: allows early exit from a loop\n- `continue`: allows early jump to next iteration",
"_____no_output_____"
]
],
[
[
"for i in range(10):\n if i % 2 == 0:\n continue\n print(i)",
"_____no_output_____"
],
[
"for i in range(10):\n if i > 4:\n break\n print(i)",
"_____no_output_____"
]
],
[
[
"## `else`\n\n__`else`__ can be used with `for`:",
"_____no_output_____"
]
],
[
[
"numbers = [3, -1, 0, 5, 3, 7]\n\nfor n in numbers:\n if n % 2 == 0:\n break\nelse:\n print(\"Found no even numbers.\")",
"_____no_output_____"
]
],
[
[
"## while",
"_____no_output_____"
]
],
[
[
"i = 0\nwhile i < 5:\n print(i)\n i += 1\ni",
"_____no_output_____"
]
],
[
[
"There is no `do...while` loop in Python.",
"_____no_output_____"
],
[
"# Functions",
"_____no_output_____"
],
[
"Functions can be defined using the `def` keyword:",
"_____no_output_____"
]
],
[
[
"def foo():\n print(\"this is a function\")\n \nfoo()",
"_____no_output_____"
]
],
[
[
"## Function arguments\n\n1. positional\n2. named or keyword arguments\n\nKeyword arguments must follow positional arguments:",
"_____no_output_____"
]
],
[
[
"def foo(arg1, arg2, arg3):\n print(\"arg1 \", arg1)\n print(\"arg2 \", arg2)\n print(\"arg3 \", arg3)\n \nfoo(1, 2, arg3=\"asdfs\")\n# foo(1, arg3=\"asdfs\", 2) # raises SyntaxError",
"_____no_output_____"
],
[
"foo(1, arg3=2, arg2=29)",
"_____no_output_____"
]
],
[
[
"## Default arguments\n\n- arguments can have default values\n- default arguments must follow non-default arguments",
"_____no_output_____"
]
],
[
[
"def foo(arg1, arg2, arg3=3):\n# def foo(arg1, arg2=2, arg3): # raises SyntaxError\n print(\"arg1 \", arg1)\n print(\"arg2 \", arg2)\n print(\"arg3 \", arg3)\nfoo(1, 2)",
"_____no_output_____"
]
],
[
[
"Default arguments need not be specified when calling the function",
"_____no_output_____"
]
],
[
[
"foo(1, 2)",
"_____no_output_____"
]
],
[
[
"They can be specified in any order:",
"_____no_output_____"
]
],
[
[
"foo(arg1=1, arg3=33, arg2=222)",
"_____no_output_____"
]
],
[
[
"If more than one value has default arguments, either can be skipped:",
"_____no_output_____"
]
],
[
[
"def foo(arg1, arg2=2, arg3=3):\n print(\"arg1 \", arg1)\n print(\"arg2 \", arg2)\n print(\"arg3 \", arg3)\n \nfoo(11, 33)\nprint(\"\")\nfoo(11, arg3=33)",
"_____no_output_____"
]
],
[
[
"This mechanism allows having a very large number of arguments.\nMany libraries have functions with dozens of arguments.",
"_____no_output_____"
],
[
"The popular data analysis library `pandas` has functions with dozens of arguments, for example:\n\n~~~python\npandas.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, cache_dates=True, iterator=False, chunksize=None, compression='infer', thousands=None, decimal='.', lineterminator=None, quotechar='\"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)\n ~~~",
"_____no_output_____"
],
[
"## Variable number of arguments\n\n__`args` and `kwargs`__\n\n- both positional and keyword arguments can be captured in arbitrary numbers using the `*` and `**` operators\n- positional arguments are captured in a tuple",
"_____no_output_____"
]
],
[
[
"def arbitrary_positional_f(*args):\n print(type(args))\n for arg in args:\n print(arg)\n \narbitrary_positional_f(1, 2, -1, \"aaa\", \"444\")\narbitrary_positional_f()\n# arbitrary_positional_f(1, 2, arg=-1) # raises TypeError",
"_____no_output_____"
]
],
[
[
"Keyword arguments are captured in a dictionary:",
"_____no_output_____"
]
],
[
[
"def arbitrary_keyword_f(**kwargs):\n print(type(kwargs))\n for argname, value in kwargs.items():\n print(argname, value)\n \narbitrary_keyword_f(arg1=1, arg2=12)\n# arbitrary_keyword_f(12, arg=12) # TypeError",
"_____no_output_____"
]
],
[
[
"We usually capture both:",
"_____no_output_____"
]
],
[
[
"def arbitrary_arg_f(*args, **kwargs):\n if args:\n print(\"Positional arguments\")\n for arg in args:\n print(arg)\n else:\n print(\"No positional arguments\")\n if kwargs:\n print(\"Keyword arguments\")\n for argname, value in kwargs.items():\n print(argname, value)\n else:\n print(\"No keyword arguments\")\n \narbitrary_arg_f()\narbitrary_arg_f(12, -2, param1=\"foo\")",
"_____no_output_____"
]
],
[
[
"## The return statement\n\n- functions may return more than one value\n - a tuple of the values is returned\n- without an explicit return statement `None` is returned\n- an empty return statement returns `None`",
"_____no_output_____"
]
],
[
[
"def foo(n):\n if n < 0:\n return \"negative\"\n if 0 <= n < 10:\n return \"positive\", n\n # return None\n # return\n\nprint(foo(-2))\nprint(foo(3), type(foo(3)))\nprint(foo(12))",
"_____no_output_____"
]
],
[
[
"# Exception handling\n\nFully typed exception handling:",
"_____no_output_____"
]
],
[
[
"try:\n int(\"abc\")\nexcept ValueError as e:\n print(type(e), e)\n print(e)",
"_____no_output_____"
]
],
[
[
"More than one except clauses may be defined ordered from more specific to least specific:",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\n if age < 0:\n raise Exception(\"Age cannot be negative\")\nexcept ValueError as e:\n print(\"ValueError caught\")\nexcept Exception as e:\n print(\"Other exception caught: {}\".format(type(e)))",
"_____no_output_____"
]
],
[
[
"## More than one type of exception can be handled in the same except clause",
"_____no_output_____"
]
],
[
[
"def age_printer(age):\n next_age = age + 1\n print(\"Next year your age will be \" + next_age)\n \ntry:\n your_age = input()\n your_age = int(your_age)\n age_printer(your_age)\nexcept ValueError:\n print(\"ValueError caught\")\nexcept TypeError:\n print(\"TypeError caught\")\n raise",
"_____no_output_____"
],
[
"def age_printer(age):\n next_age = age + 1\n print(\"Next year your age will be \" + next_age)\n \ntry:\n your_age = input()\n your_age = int(your_age)\n age_printer(your_age)\nexcept (ValueError, TypeError) as e:\n print(\"{} caught\".format(type(e).__name__))",
"_____no_output_____"
]
],
[
[
"## `except` without an exception type\n\n- without specifying a type, `except` catches everything but all information about the exception is lost",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\n if age < 0:\n raise Exception(\"Age cannot be negative\")\nexcept ValueError:\n print(\"ValueError caught\")\nexcept:\n#except Exception as e:\n print(\"Something else caught\")",
"_____no_output_____"
]
],
[
[
"- the empty `except` must be the last except block since it blocks all others\n- `SyntaxError` otherwise",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\n if age < 0:\n raise Exception(\"Age cannot be negative\")\n#except:\n #print(\"Something else caught\")\nexcept ValueError:\n print(\"ValueError caught\")",
"_____no_output_____"
]
],
[
[
"## Base class' except clauses catch derived classes too\n\n`ValueError` subclasses `Exception` so the second `except` never runs:",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\n if age < 0:\n raise Exception(\"Age cannot be negative\")\nexcept Exception as e:\n print(\"Exception caught: {}\".format(type(e)))\nexcept ValueError:\n print(\"ValueError caught\")",
"_____no_output_____"
]
],
[
[
"## `finally`\n\nThe `finally` block is guaranteed to run regardless an exception was raised or not",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\nexcept Exception as e:\n print(type(e), e)\nfinally:\n print(\"this always runs\")",
"_____no_output_____"
]
],
[
[
"## `else`\n\nTry-except blocks may have an `else` clause that **only** runs if no exception was raised",
"_____no_output_____"
]
],
[
[
"try:\n age = int(input())\nexcept ValueError as e:\n print(\"Exception\", e)\nelse:\n print(\"No exception was raised\")\n # raise Exception(\"Raising an exception in else\")\nfinally:\n print(\"this always runs\")",
"_____no_output_____"
]
],
[
[
"### `raise` keyword\n\n- `raise` throws/raises an exception\n- an empty `raise` in an `except` block reraises the exception",
"_____no_output_____"
]
],
[
[
"try:\n int(\"not a number\")\nexcept Exception:\n # important log message\n print(\"Caught an exception\")\n # raise",
"_____no_output_____"
]
],
[
[
"### Defining exceptions\n\nAny type that subclasses `Exception` (`BaseException` to be exact) can be used as an exception object:",
"_____no_output_____"
]
],
[
[
"class NegativeAgeError(Exception):\n pass\n\ntry:\n age = int(input())\n if age < 0:\n raise NegativeAgeError(\"Age cannot be negative. Invalid age: {}\".format(age))\nexcept NegativeAgeError as e:\n print(e)\nexcept Exception as e:\n print(\"Something else happened. Caught {}, with message {}\".format(type(e), e))",
"_____no_output_____"
]
],
[
[
"Using exception for trial-and-error is considered _Pythonic_:",
"_____no_output_____"
]
],
[
[
"try:\n v = input()\n int(v)\nexcept ValueError:\n print(\"not an int\")\nelse:\n print(\"looks like an int\")",
"_____no_output_____"
]
],
[
[
"# Zen of Python",
"_____no_output_____"
]
],
[
[
"import this",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e714fe8f4ce3ee9a667d57e2323d2717c8797c65 | 57,462 | ipynb | Jupyter Notebook | MachineLearning/Pandas.ipynb | Abhishek1103/PythonScripts | 48b9e2739510efe09aa4cc95b059546b983e8429 | [
"MIT"
] | 1 | 2018-02-09T22:14:13.000Z | 2018-02-09T22:14:13.000Z | MachineLearning/Pandas.ipynb | Abhishek1103/PythonScripts | 48b9e2739510efe09aa4cc95b059546b983e8429 | [
"MIT"
] | null | null | null | MachineLearning/Pandas.ipynb | Abhishek1103/PythonScripts | 48b9e2739510efe09aa4cc95b059546b983e8429 | [
"MIT"
] | 1 | 2018-12-19T17:08:41.000Z | 2018-12-19T17:08:41.000Z | 24.929284 | 114 | 0.369288 | [
[
[
"## Lesson 9 - Pandas Basics\n\nOutline:\n\n* Series\n* DataFrame\n* index, columns\n* dtypes, info, describe\n* read_csv\n* head, tail\n* loc, iloc, ix\n* to_datetime",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Series",
"_____no_output_____"
]
],
[
[
"# a list of strings\nmy_list = ['cubs', 'pirates', 'giants', 'yankees', 'donkeys']\nmy_list",
"_____no_output_____"
],
[
"# pandas Series from list\nseries_from_list = pd.Series(my_list)\nseries_from_list",
"_____no_output_____"
],
[
"# indexing a Series is similar to lists and arrays\nseries_from_list[3]",
"_____no_output_____"
],
[
"# a numpy array\nmy_array = np.random.rand(5)\nmy_array",
"_____no_output_____"
],
[
"# pandas Series from array\nseries_from_array = pd.Series(my_array)\nseries_from_array",
"_____no_output_____"
],
[
"# indexing also supports slices\nseries_from_array[3:]",
"_____no_output_____"
]
],
[
[
"### DataFrame",
"_____no_output_____"
],
[
"#### 2D array to DataFrame",
"_____no_output_____"
]
],
[
[
"# create a 2D numpy array\nmy_2d_array = np.random.randn(5,5)\nmy_2d_array",
"_____no_output_____"
],
[
"# make a DataFrame from the 2D numpy array\npd.DataFrame(my_2d_array)",
"_____no_output_____"
],
[
"# we can set the index and column labels when we create the DataFrame\ndf_from_2d_array = pd.DataFrame(my_2d_array, \n index=['row1', 'row2', 'row3', 'row4', 'row5'], \n columns=['col1', 'col2', 'col3', 'col4', 'col5'])\ndf_from_2d_array",
"_____no_output_____"
]
],
[
[
"#### List or Series to DataFrame",
"_____no_output_____"
]
],
[
[
"# method 1: getting data as a list of series will orient them as rows\nx = pd.DataFrame(data=[series_from_list, series_from_array])\nx",
"_____no_output_____"
],
[
"# we can transpose a DataFrame using T or transpose\nx.T",
"_____no_output_____"
],
[
"x.transpose()",
"_____no_output_____"
],
[
"# method 2: pass list/Series as value of dictionary\ny = pd.DataFrame({'a': series_from_list, 'b': series_from_array}, dtype=str)\ny",
"_____no_output_____"
],
[
"# method 3: use pd.concat to combine series in column orientation\ndf = pd.concat([series_from_list, series_from_array], axis=1)\ndf",
"_____no_output_____"
]
],
[
[
"### index, columns",
"_____no_output_____"
]
],
[
[
"# set the index and column names to an existing DataFrame\ndf.index = ['a', 'b', 'c', 'd', 'e']\ndf.columns = ['team', 'random']\ndf",
"_____no_output_____"
],
[
"# add a new column to the DataFrame\ndf['integers'] = [2, 3, 5, 8, 13]\ndf",
"_____no_output_____"
]
],
[
[
"### dtypes, info, describe",
"_____no_output_____"
]
],
[
[
"# gives the datatype of each column\ndf.dtypes",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nIndex: 5 entries, a to e\nData columns (total 3 columns):\nteam 5 non-null object\nrandom 5 non-null float64\nintegers 5 non-null int64\ndtypes: float64(1), int64(1), object(1)\nmemory usage: 160.0+ bytes\n"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"### read_csv",
"_____no_output_____"
]
],
[
[
"# by default column headers are the first row and row indexes are integers starting from zero\ndf_sio = pd.read_csv('scripps_pier_20151110.csv')",
"_____no_output_____"
],
[
"# by default, read_csv will infer the object types\ndf_sio.dtypes",
"_____no_output_____"
],
[
"df_sio.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 66 entries, 0 to 65\nData columns (total 5 columns):\nDate 66 non-null object\nchl (ug/L) 66 non-null float64\npres (dbar) 66 non-null float64\nsal (PSU) 66 non-null float64\ntemp (C) 66 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 2.7+ KB\n"
],
[
"df_sio.describe()",
"_____no_output_____"
],
[
"# we can also specify the dtype (and specify index and header to defaults)\n# sometimes it's better to specify the dtype as object and convert to int, float, etc. later\ndf_sio = pd.read_csv('scripps_pier_20151110.csv', dtype=object, index_col=None, header=0)",
"_____no_output_____"
],
[
"df_sio.dtypes",
"_____no_output_____"
]
],
[
[
"#### Changing dtype of columns",
"_____no_output_____"
]
],
[
[
"# method 1: list comprehension (one column)\ndf_sio['chl (ug/L)'] = [float(x) for x in df_sio['chl (ug/L)']]",
"_____no_output_____"
],
[
"# method 2: pd.to_numeric (one column)\ndf_sio['pres (dbar)'] = pd.to_numeric(df_sio['pres (dbar)'])",
"_____no_output_____"
],
[
"# method 3: apply(pd.to_numeric) (multiple columns)\ndf_sio[['sal (PSU)','temp (C)']] = df_sio[['sal (PSU)','temp (C)']].apply(pd.to_numeric)",
"_____no_output_____"
],
[
"df_sio.dtypes",
"_____no_output_____"
]
],
[
[
"### head, tail",
"_____no_output_____"
]
],
[
[
"# add a number to change the number of rows printed\ndf_sio.head(7)",
"_____no_output_____"
],
[
"# tail works the same way\ndf_sio.tail(3)",
"_____no_output_____"
]
],
[
[
"### loc, iloc, ix",
"_____no_output_____"
],
[
"Pandas's three indexing methods defined:\n\n* loc works on labels in the index.\n* iloc works on the positions in the index (so it only takes integers).\n* ix usually tries to behave like loc but falls back to behaving like iloc if the label is not in the index.",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
]
],
[
[
"#### brackets only -- column by header",
"_____no_output_____"
]
],
[
[
"# to get a column (Series), use the column header (don't need .loc, .iloc, or .ix)\ndf['team']",
"_____no_output_____"
],
[
"# for multiple columns, put a list inside the brackets (so two sets of brackets)\ndf[['team', 'random']]",
"_____no_output_____"
]
],
[
[
"#### loc -- row by index",
"_____no_output_____"
]
],
[
[
"# to get a row by name, use .loc with the row index\ndf.loc['a']",
"_____no_output_____"
],
[
"# for multiple rows, put a list inside the brackets (so two sets of brackets)\ndf.loc[['a', 'd']]",
"_____no_output_____"
]
],
[
[
"#### iloc -- row (or column) by position",
"_____no_output_____"
]
],
[
[
"# to get a row by position, use .iloc with the row number\ndf.iloc[0]",
"_____no_output_____"
],
[
"# for multiple rows, put a list inside the brackets (so two sets of brackets)\ndf.iloc[[0, 3]]",
"_____no_output_____"
],
[
"# or pass a slice\ndf.iloc[2:]",
"_____no_output_____"
],
[
"# iloc also works with columns\ndf.iloc[:,[0, 2]]",
"_____no_output_____"
]
],
[
[
"#### ix -- row (or column) by index or position",
"_____no_output_____"
]
],
[
[
"# ix supports both index labels and numbers\ndf.ix['d']",
"_____no_output_____"
],
[
"df.ix[3]",
"_____no_output_____"
],
[
"# ix also works with column labels and numbers\ndf.ix[:, 1]",
"_____no_output_____"
],
[
"df.ix[:, 'random']",
"_____no_output_____"
]
],
[
[
"### to_datetime\n\nWe will cover time series in greater detail in a future lesson.",
"_____no_output_____"
]
],
[
[
"df_sio.head()",
"_____no_output_____"
],
[
"time = pd.to_datetime(df_sio['Date'])\ntime.head()",
"_____no_output_____"
],
[
"df_sio['Date'] = time",
"_____no_output_____"
],
[
"df_sio.head()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7150608bc8e8b1b43432572057eb9ba3790c1f4 | 13,811 | ipynb | Jupyter Notebook | analysis_notebooks/sigmaSecondCorrection.ipynb | villano-lab/nrFano_paper2019 | f44565bfb3e45b2dfbe2a73cba9f620a7120abd7 | [
"MIT"
] | 2 | 2020-04-06T17:27:33.000Z | 2022-03-30T20:38:54.000Z | analysis_notebooks/sigmaSecondCorrection.ipynb | villano-lab/nrFano_paper2019 | f44565bfb3e45b2dfbe2a73cba9f620a7120abd7 | [
"MIT"
] | null | null | null | analysis_notebooks/sigmaSecondCorrection.ipynb | villano-lab/nrFano_paper2019 | f44565bfb3e45b2dfbe2a73cba9f620a7120abd7 | [
"MIT"
] | null | null | null | 29.893939 | 310 | 0.551082 | [
[
[
"# The difference between the Edelweiss resolution function and the true Yield variance\n\n## The problem\nWe perform the fit to the data using an approximation to the yield variance, not the true yield variance. This begs the question, \"does this impact our answer significantly?\"\n\n## What this notebook investigates\nThis notebook samples the posterior distribution and, for each parameter set sampled, stores the difference between the true yield and the estimate used by the fit.\n\nThis notebook focuses on a single energy.v",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom astropy.table import Table, Column, vstack\nfrom astropy.io.misc.hdf5 import read_table_hdf5, write_table_hdf5\n\nimport sys\nsys.path.append('../python/')\nfrom EdwRes import *\nfrom prob_dist import *\nfrom checkDifference_yieldVariance import *",
"GGA3/NR/4.0/5.556E-02/0.0380/000/0.1493/0.1782/0.9975/\nGGA3/NR/4.0/5.556E-02/0.0381/000/0.1537/0.1703/0.9948/\n"
],
[
"# We'll look at the Er values of the data points\n# import data from Edelweiss\nresNR_data = pd.read_csv(\"data/edelweiss_NRwidth_GGA3_data.txt\", skiprows=1, \\\n names=['E_recoil', 'sig_NR', 'E_recoil_err', 'sig_NR_err'], \\\n delim_whitespace=True)\n\n# the sorting is necessary!\n# otherwise the mask defined below will select the wrong data\nresNR_data = resNR_data.sort_values(by='E_recoil')\nNR_data = {'Erecoil': resNR_data[\"E_recoil\"][2::], 'sigma': resNR_data[\"sig_NR\"][2::], 'sigma_err': resNR_data[\"sig_NR_err\"][2::]}\nEr = np.sort(NR_data['Erecoil'])\nErecoil = Er[0]\n#print (NR_data['Erecoil'])\n#print (NR_data['sigma'])\n#print (NR_data['sigma_err'])\n#print (len(samples))\n#print(np.random.randint(len(samples), size=10))\n\nfilenames = []\nfor Erecoil in NR_data['Erecoil']:\n filename = 'data/yield_accuracy_Erecoil_%.2f_keV_all_corrAB_Aug2.h5' % Erecoil\n filenames.append(filename)\n \nprint (filenames)",
"['data/yield_accuracy_Erecoil_24.50_keV_all_corrAB_Aug2.h5', 'data/yield_accuracy_Erecoil_34.22_keV_all_corrAB_Aug2.h5', 'data/yield_accuracy_Erecoil_44.26_keV_all_corrAB_Aug2.h5', 'data/yield_accuracy_Erecoil_58.40_keV_all_corrAB_Aug2.h5', 'data/yield_accuracy_Erecoil_97.72_keV_all_corrAB_Aug2.h5']\n"
],
[
"# read the data into a pandas dataframe\ndf_24keV = pd.read_hdf(filenames[0], key='table')\ndf_34keV = pd.read_hdf(filenames[1], key='table')\ndf_44keV = pd.read_hdf(filenames[2], key='table')\ndf_58keV = pd.read_hdf(filenames[3], key='table')\ndf_97keV = pd.read_hdf(filenames[4], key='table')\n#df_97keV.drop_duplicates()\n\ndf_24keV['Yield'] = df_24keV['A']*np.power(df_24keV['energy_recoil_keV'], df_24keV['B'])\ndf_34keV['Yield'] = df_34keV['A']*np.power(df_34keV['energy_recoil_keV'], df_34keV['B'])\ndf_44keV['Yield'] = df_44keV['A']*np.power(df_44keV['energy_recoil_keV'], df_44keV['B'])\ndf_58keV['Yield'] = df_58keV['A']*np.power(df_58keV['energy_recoil_keV'], df_58keV['B'])\ndf_97keV['Yield'] = df_97keV['A']*np.power(df_97keV['energy_recoil_keV'], df_97keV['B'])",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression\n\nmask = df_24keV['true_yield_sig'].notnull()\ny = df_24keV[mask]['true_yield_sig'] - df_24keV[mask]['cor1_yield_sig']\nX = df_24keV[mask][['aH', 'scale', 'A', 'B']]\n\nreg = LinearRegression().fit(X, y)\n\nprint(reg.score(X, y))\n\nprint (\"coefficents: \", reg.coef_)\nprint (\"intercept: \", reg.intercept_)\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nX0 = np.array([[aH, scale, A, B]])\nprint(reg.predict(X0))\n\n\npredicted=0\nprint('CALCULATION')\nprint('intercept: {}'.format(reg.intercept_))\nfor i,coef in enumerate(reg.coef_):\n #print(i)\n print('coef X X0 = {:01.7f} X {:01.7f} = {:01.7f}'.format(coef,X0[0,i],coef*X0[0,i]))\n #print(X0[0,i])\n predicted+=coef*X0[0,i]\n \npredicted+=reg.intercept_\nprint(predicted)",
"0.9881916830492861\ncoefficents: [0.00552431 0.00133703 0.01633244 0.02117115]\nintercept: -0.007662520580006483\n[-4.98203281e-06]\nCALCULATION\nintercept: -0.007662520580006483\ncoef X X0 = 0.0055243 X 0.0381135 = 0.0002106\ncoef X X0 = 0.0013370 X 0.9947786 = 0.0013300\ncoef X X0 = 0.0163324 X 0.1537376 = 0.0025109\ncoef X X0 = 0.0211711 X 0.1703277 = 0.0036060\n-4.982032810407766e-06\n"
],
[
"mask = df_34keV['true_yield_sig'].notnull()\ny = df_34keV[mask]['true_yield_sig'] - df_34keV[mask]['cor1_yield_sig']\nX = df_34keV[mask][['aH', 'scale', 'A', 'B']]\n\nreg = LinearRegression().fit(X, y)\n\nprint(reg.score(X, y))\n\nprint (\"coefficents: \", reg.coef_)\nprint (\"intercept: \", reg.intercept_)\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nX0 = np.array([[aH, scale, A, B]])\nprint(reg.predict(X0))\n\n\npredicted=0\nfor i,coef in enumerate(reg.coef_):\n predicted+=coef*X0[0,i]\n \npredicted+=reg.intercept_\nprint(predicted)",
"0.9883018033806901\ncoefficents: [0.00891606 0.00139528 0.013981 0.01819466]\nintercept: -0.006976876162577476\n[-5.97786977e-07]\n-5.977869772712946e-07\n"
],
[
"mask = df_44keV['true_yield_sig'].notnull()\ny = df_44keV[mask]['true_yield_sig'] - df_44keV[mask]['cor1_yield_sig']\nX = df_44keV[mask][['aH', 'scale', 'A', 'B']]\n\nreg = LinearRegression().fit(X, y)\n\nprint(reg.score(X, y))\n\nprint (\"coefficents: \", reg.coef_)\nprint (\"intercept: \", reg.intercept_)\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nX0 = np.array([[aH, scale, A, B]])\nprint(reg.predict(X0))\n\n\npredicted=0\nfor i,coef in enumerate(reg.coef_):\n predicted+=coef*X0[0,i]\n \npredicted+=reg.intercept_\nprint(predicted)",
"0.9870510077511788\ncoefficents: [0.01268575 0.00128421 0.01304974 0.01676499]\nintercept: -0.006621463867453043\n[1.31282427e-06]\n1.312824273716831e-06\n"
],
[
"mask = df_58keV['true_yield_sig'].notnull()\ny = df_58keV[mask]['true_yield_sig'] - df_58keV[mask]['cor1_yield_sig']\nX = df_58keV[mask][['aH', 'scale', 'A', 'B']]\n\nreg = LinearRegression().fit(X, y)\n\nprint(reg.score(X, y))\n\nprint (\"coefficents: \", reg.coef_)\nprint (\"intercept: \", reg.intercept_)\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nX0 = np.array([[aH, scale, A, B]])\nprint(reg.predict(X0))\n\n\npredicted=0\nfor i,coef in enumerate(reg.coef_):\n predicted+=coef*X0[0,i]\n \npredicted+=reg.intercept_\nprint(predicted)",
"0.9848943746599956\ncoefficents: [0.0195214 0.00114802 0.01256059 0.01603726]\nintercept: -0.00654459822082259\n[4.08033284e-06]\n4.080332840841749e-06\n"
],
[
"mask = df_97keV['true_yield_sig'].notnull()\ny = df_97keV[mask]['true_yield_sig'] - df_97keV[mask]['cor1_yield_sig']\nX = df_97keV[mask][['aH', 'scale', 'A', 'B']]\n\nreg = LinearRegression().fit(X, y)\n\nprint(reg.score(X, y))\n\nprint (\"coefficents: \", reg.coef_)\nprint (\"intercept: \", reg.intercept_)\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nX0 = np.array([[aH, scale, A, B]])\nprint(reg.predict(X0))\n\n\npredicted=0\nfor i,coef in enumerate(reg.coef_):\n predicted+=coef*X0[0,i]\n \npredicted+=reg.intercept_\nprint(predicted)",
"0.9820387798382159\ncoefficents: [0.02973745 0.00088643 0.01310807 0.01668303]\nintercept: -0.006859726936860776\n[1.22579782e-05]\n1.2257978188948138e-05\n"
],
[
"from edw_data_util import *\nER_data, NR_data = getERNR()",
"_____no_output_____"
],
[
"print(NR_data['Erecoil'])",
"3 24.5012\n6 34.2156\n2 44.2627\n5 58.4014\n4 97.7172\nName: E_recoil, dtype: float64\n"
],
[
"import prob_dist as pd\nimport imp\nimp.reload(pd)\n\nEn = 24.5\n\naH, scale, A, B = 0.0381134613, 0.994778557, 0.153737587, 0.170327657\nVmod = 4.0*scale\ncorr2 = pd.series_NRQ_sig_c2(Er=En,F=0.0,V=Vmod,aH=aH,alpha=(1/18.0),A=A,B=B,label='GGA3',verbose=True)\n\nprint(corr2)",
"[0.00552431 0.00133703 0.01633244 0.02117115]\n-0.007662520580006483\nintercept: -0.007662520580006483\ncoef X X0 = 0.0055243 X 0.0381135 = 0.0002106\ncoef X X0 = 0.0013370 X 0.9947786 = 0.0013300\ncoef X X0 = 0.0163324 X 0.1537376 = 0.0025109\ncoef X X0 = 0.0211711 X 0.1703277 = 0.0036060\n-4.978939628740367e-06\n"
],
[
"var0 = pd.series_NRQ_var(Er=En,F=0.0,V=Vmod,aH=aH,alpha=(1/18.0),A=A,B=B,label='GGA3')\nvar1 = pd.series_NRQ_var_corr1(Er=En,F=0.0,V=Vmod,aH=aH,alpha=(1/18.0),A=A,B=B,label='GGA3')\nvar2 = pd.series_NRQ_var_corr2(Er=En,F=0.0,V=Vmod,aH=aH,alpha=(1/18.0),A=A,B=B,label='GGA3',verbose=True)\n\nprint(var0)\nprint(var1)\nprint(var2)\nprint(np.sqrt(var1)-np.sqrt(var2))",
"[0.00552431 0.00133703 0.01633244 0.02117115]\n-0.007662520580006483\nintercept: -0.007662520580006483\ncoef X X0 = 0.0055243 X 0.0381135 = 0.0002106\ncoef X X0 = 0.0013370 X 0.9947786 = 0.0013300\ncoef X X0 = 0.0163324 X 0.1537376 = 0.0025109\ncoef X X0 = 0.0211711 X 0.1703277 = 0.0036060\n0.002268664529354329\n0.0024378325770850356\n0.0024373409374400827\n4.978939628738632e-06\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71520bb00a464e0b4eca0b22c2bad423ef656f4 | 784,983 | ipynb | Jupyter Notebook | notebook.ipynb | ralphxiu/machine-learning-nanodegree-capstone | 5268c7d1c1aa5770808ecbf39ceec874370c1e3c | [
"MIT"
] | null | null | null | notebook.ipynb | ralphxiu/machine-learning-nanodegree-capstone | 5268c7d1c1aa5770808ecbf39ceec874370c1e3c | [
"MIT"
] | null | null | null | notebook.ipynb | ralphxiu/machine-learning-nanodegree-capstone | 5268c7d1c1aa5770808ecbf39ceec874370c1e3c | [
"MIT"
] | null | null | null | 402.761929 | 251,172 | 0.909553 | [
[
[
"<div style=\"text-align:center; font-size:28px; font-weight: bold; line-height: 200%\" markdown=\"1\">Machine Learning Nanodegree Capstone Project</div>\n<div style=\"text-align:center; font-size:22px; font-weight: bold; line-height: 200%\" markdown=\"1\">Stock Price Prediction with SVR and LSTM RNN -- A Comparative Approach</div>",
"_____no_output_____"
],
[
"# Table of contents\n1. [Import Modules and Data](##1)\n2. [Data Exploration and Preprocessing](##2)\n3. [Implement and Evaluate The SVR Model](##3)\n3. [Implement and Evaluate the LSTM Model](##4)",
"_____no_output_____"
],
[
"# 1. Import Modules and Data <a class=\"anchor\" id=\"#1\"></a>",
"_____no_output_____"
],
[
"## 1) Import required modules\nmain modules include numpy, sklearn, pandas, matplotlib",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import datetime\nimport math, time\nimport itertools\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.svm import SVR\nimport datetime\nfrom operator import itemgetter\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import load_model\nfrom keras.utils.vis_utils import plot_model\nimport keras\nimport h5py\nimport requests\nimport os",
"_____no_output_____"
]
],
[
[
"## 2) Read data and transform them to pandas dataframe",
"_____no_output_____"
]
],
[
[
"df_origin = pd.read_csv(\"./input/GOOGL.csv\", index_col = 0, parse_dates=['Date'])\ndf_origin = df_origin.rename(str.lower, axis='columns')\ndf_origin.head()",
"_____no_output_____"
]
],
[
[
"## 3) Data cleasing\nFrom empirical experience, we know that this dataset should not contain any NaN values. Checking to make sure that it's the case.",
"_____no_output_____"
]
],
[
[
"df_origin.isnull().values.any()",
"_____no_output_____"
]
],
[
[
"## 4) Reverse the order in chronological order",
"_____no_output_____"
]
],
[
[
"df_sorted = df_origin.sort_index(axis=0 ,ascending=True)\ndf_sorted.head()",
"_____no_output_____"
]
],
[
[
"# 2. Data Exploration and Preprocessing <a class=\"anchor\" id=\"#2\"></a>",
"_____no_output_____"
],
[
"## 1) Statistical traits of the dataset",
"_____no_output_____"
]
],
[
[
"print(df_sorted.mean(), df_sorted.std())",
"open 6.689058e+02\nhigh 6.741916e+02\nlow 6.630564e+02\nclose 6.688434e+02\nvolume 4.088972e+06\nadj close 4.661172e+02\ndtype: float64 open 1.652566e+02\nhigh 1.658896e+02\nlow 1.644025e+02\nclose 1.649341e+02\nvolume 2.840959e+06\nadj close 1.972226e+02\ndtype: float64\n"
]
],
[
[
"From the traits we can tell the volume is on a much larger scale than the rest of the features.",
"_____no_output_____"
],
[
"## 1) Visualize the stock price change since inception",
"_____no_output_____"
]
],
[
[
"df_sorted[['adj close', 'high', 'low']].plot(figsize=(20, 10))\nplt.xlabel('timeline')\nplt.ylabel('stock price')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 2) Normalize the data",
"_____no_output_____"
]
],
[
[
"# global scaler to denormalize the data for better visualization after evaluating the model\n# standard_scaler for SVR model\nstandard_scaler = StandardScaler()\n# min_max_scaler for RNN model\nmin_max_scaler = MinMaxScaler()\n\ndef normalize_data(scaler, df):\n df_new = df.copy()\n for symbol in df_new.columns:\n df_new[symbol] = scaler.fit_transform(df_new[symbol].values.reshape(-1,1))\n print(df_new.head())\n return df_new\n\ndf_normalized_std = normalize_data(standard_scaler, df_sorted)\ndf_normalized_minmax = normalize_data(min_max_scaler, df_sorted)",
" open high low close volume adj close\nDate \n2009-05-22 -2.847057 -2.862058 -2.840449 -1.669832 -0.230709 -1.365145\n2009-05-26 -1.676333 -1.623121 -1.661317 -1.603971 0.744205 -1.337579\n2009-05-27 -1.593471 -1.581758 -1.571210 -1.596694 0.694843 -1.334532\n2009-05-28 -1.575071 -1.583205 -1.572427 -1.567342 0.437718 -1.322247\n2009-05-29 -1.554310 -1.549379 -1.535375 -1.525921 0.423247 -1.304910\n open high low close volume adj close\nDate \n2009-05-22 0.000000 0.000000 0.000000 0.000000 0.100109 0.000000\n2009-05-26 0.188104 0.199616 0.189557 0.013137 0.195266 0.007233\n2009-05-27 0.201417 0.206280 0.204042 0.014589 0.190448 0.008032\n2009-05-28 0.204374 0.206047 0.203847 0.020443 0.165351 0.011255\n2009-05-29 0.207709 0.211497 0.209803 0.028706 0.163939 0.015804\n"
],
[
"def visualize_df(df_input, features=None):\n df_input[features or df_input.columns].plot(figsize=(20, 10))\n plt.xlabel('timeline')\n plt.ylabel('stock price')\n plt.show()\n#visualize_df(df_normalized_std, features=['adj close', 'high', 'low'])\nvisualize_df(df_normalized_std)\nvisualize_df(df_normalized_minmax)",
"_____no_output_____"
]
],
[
[
"# 3. Implement and Evaluate The SVR Model <a class=\"anchor\" id=\"#3\"></a>",
"_____no_output_____"
],
[
"## 1) Choose technical indicators\nthe selected technical indicators include\n",
"_____no_output_____"
],
[
"| technical indicator | description |formula |\n|---------------------|--------------|------------------------|\n| momentum | general trend over a certain period | price(t) - price(t-n) |\n| rolling average | mean of prices during a certain period| sum(price(t-n),...,price(n))/n |\n| rolling standard deviation | std of prices during a certain period| std(price(t-n),...,price(t))|\n| average true range | volitility of the market|ATR(t)=((n-1) * ATR(t-1)+Tr(t))/n |\n| triple exponential moving average | smooth the insignificant movements | TR(t)/TR(t-1) where TR(t)=EMA(EMA(EMA(Price(t)))) over n days period |\nnote: \n+ rolling average and rolling std are all critical part in calculating the bollinger band which provides good prediction on the range the stock price would oscillate\n+ Tr(t)=Max(Abs(High-Low), Abs(Hight-Close(t-1)), Abs(Low-Close(t-1)) \nSince the purpose of this project is for demonstrating the predictive power of a traditional regression model and the deep learning model in a comparative manner rather than to squeeze every bit of performance out of the models, only a selected few of technical indicators were utilized in training the model. Provided that adding more indicators could potentially improve the performance.",
"_____no_output_____"
]
],
[
[
"# moving average\ndef calc_technical_indicators(df_input, window_size=10):\n df = df_input.copy()\n df['momentum'] = df['adj close'] - df['adj close'].shift(window_size)\n df['rolling average'] = df['adj close'].rolling(window_size).mean()\n df['rolling std'] = df['adj close'].rolling(window_size).std()\n df['tr1'] = abs (df['high'] - df['low'])\n df['tr2'] = abs (df['high'] - df['close'].shift())\n df['tr3'] = abs (df['low'] - df['close'].shift())\n df['true range'] = df[['tr1', 'tr2', 'tr3']].max(axis=1)\n df['average true range'] = df['true range'].ewm(span=window_size).mean()\n# df['tema'] = df['close'].ewm(span=10).mean()\n df = df.fillna(method='backfill')\n df = df.drop(columns=['tr1', 'tr2', 'tr3', 'true range'])\n return df\ndf_svr = calc_technical_indicators(df_normalized_std, 8)\ndf_svr.head(30)",
"_____no_output_____"
]
],
[
[
"## 2) Create training set and testing set",
"_____no_output_____"
]
],
[
[
"def create_train_test_datasets(df_input, window_size, features=[]):\n if features:\n number_of_features = len(features)\n dataset = df_input.copy()[features].values\n else:\n number_of_features = len(df_input.columns)\n dataset = df_input.copy().values\n window_size = window_size + 1\n result = []\n for index in range(len(dataset) - window_size): # maxmimum date = lastest date - sequence length\n result.append(dataset[index: index + window_size]) # index : index + 22days\n result = np.array(result)\n\n row = round(0.9 * result.shape[0]) # 90% split\n data_train = result[:int(row), :] # 90% date, all features\n\n X_train = data_train[:, :-1] \n y_train = data_train[:, -1][:,-1]\n print('Shapes: \\n processed data : {}\\n data_train - {}\\n X_train - {}\\n y_train : {}\\n'.format(result.shape, data_train.shape, X_train.shape, y_train.shape))\n\n X_test = result[int(row):, :-1] \n y_test = result[int(row):, -1][:,-1]\n print('Shapes: \\n X_test - {}\\n y_test : {}\\n'.format(X_test.shape, y_test.shape))\n\n X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], number_of_features))\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], number_of_features))\n print('X_train has a shape of {} | X_train sample at 1: \\n{}'.format(X_train.shape, X_train[1]))\n print('y_train has a shape of {} | y_train sample at 1: \\n{}'.format(y_train.shape, y_train[1]))\n \n return [X_train, y_train, X_test, y_test]\n",
"_____no_output_____"
]
],
[
[
"## 3) Implement the SVR Model",
"_____no_output_____"
]
],
[
[
"# create the SVR model\nsvr = SVR(kernel='rbf', C=1, gamma=0.1, cache_size=200)\ndef transform_3d_to_2d(data_3d):\n dimensions = data_3d.shape\n data_2d = np.reshape(data_3d, (dimensions[0], dimensions[1] * dimensions[2]))\n return data_2d\nX_train, y_train, X_test, y_test = create_train_test_datasets(df_svr.iloc[10:], 1, features=['momentum', 'rolling average', 'rolling std', 'average true range', 'adj close'])\nX_train_2d = transform_3d_to_2d(X_train)\nprint(X_train_2d[1])\nsvr.fit(X_train_2d, y_train)\nsvr.get_params()",
"Shapes: \n processed data : (1989, 2, 5)\n data_train - (1790, 2, 5)\n X_train - (1790, 1, 5)\n y_train : (1790,)\n\nShapes: \n X_test - (199, 1, 5)\n y_test : (199,)\n\nX_train has a shape of (1790, 1, 5) | X_train sample at 1: \n[[ 0.06401788 -1.26525065 0.02217636 0.02825994 -1.25822886]]\ny_train has a shape of (1790,) | y_train sample at 1: \n-1.2658948026248575\n[ 0.06401788 -1.26525065 0.02217636 0.02825994 -1.25822886]\n"
]
],
[
[
"## 4) Calculate the performance score and visualize the results",
"_____no_output_____"
]
],
[
[
"X_test_2d = transform_3d_to_2d(X_test)\ntrain_predict = svr.predict(X_train_2d)\ntest_predict = svr.predict(X_test_2d)\n\ny_train_denom = standard_scaler.inverse_transform(y_train)\ny_test_denorm = standard_scaler.inverse_transform(y_test)\ntrain_predict_denom = standard_scaler.inverse_transform(train_predict)\ntest_predict_denorm = standard_scaler.inverse_transform(test_predict)\n\nrmse_train = sqrt(mean_squared_error(y_train_denom, train_predict_denom))\nprint('the root mean squared error for training set is {}'.format(rmse_train))\nrmse_test = sqrt(mean_squared_error(y_test_denorm, test_predict_denorm))\nprint('the root mean squared error for test set is {}'.format(rmse_test))\n\nplt.figure(figsize=(20, 10))\nplt.plot(y_test_denorm, color='C1', label='test')\nplt.plot(test_predict_denorm, color='C2', label='predicted')\nplt.xlabel('timeline')\nplt.ylabel('stock price')\nplt.legend(loc='upper left')\nplt.show()",
"the root mean squared error for training set is 9.207596297502324\nthe root mean squared error for test set is 33.59647918777672\n"
]
],
[
[
"# 4. Implement and Evaluate the LSTM Model <a class=\"anchor\" id=\"#4\"></a>\n## 1) Build the structure of model\n\n+ dropout = 0.3\n+ epochs = 100\n+ LSTM 256 > LSTM 256 > Relu 32 > Linear 1",
"_____no_output_____"
]
],
[
[
"window_size_lstm = 1\nnum_of_features = len(df_normalized_minmax.columns)\n\ndef build_model(layers):\n d = 0.3\n model = Sequential()\n \n model.add(LSTM(256, input_shape=(layers[1], layers[0]), return_sequences=True))\n model.add(Dropout(d))\n \n model.add(LSTM(256, input_shape=(layers[1], layers[0]), return_sequences=False))\n model.add(Dropout(d))\n \n model.add(Dense(32,kernel_initializer=\"uniform\",activation='relu')) \n model.add(Dense(1,kernel_initializer=\"uniform\",activation='linear'))\n \n # adam = keras.optimizers.Adam(decay=0.2)\n \n start = time.time()\n model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])\n print(\"Compilation Time : \", time.time() - start)\n return model\n\nmodel = build_model([num_of_features, window_size_lstm])\nmodel.summary()\nplot_model(model)",
"Compilation Time : 0.017756223678588867\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm_15 (LSTM) (None, 1, 256) 269312 \n_________________________________________________________________\ndropout_15 (Dropout) (None, 1, 256) 0 \n_________________________________________________________________\nlstm_16 (LSTM) (None, 256) 525312 \n_________________________________________________________________\ndropout_16 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_15 (Dense) (None, 32) 8224 \n_________________________________________________________________\ndense_16 (Dense) (None, 1) 33 \n=================================================================\nTotal params: 802,881\nTrainable params: 802,881\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## 2) Train the LSTM model",
"_____no_output_____"
]
],
[
[
"X_train, y_train, X_test, y_test = create_train_test_datasets(df_normalized_minmax, window_size_lstm)\nmodel.fit(X_train, y_train, batch_size=410, epochs=200, validation_split=0.1, verbose=1)",
"Shapes: \n processed data : (1999, 2, 6)\n data_train - (1799, 2, 6)\n X_train - (1799, 1, 6)\n y_train : (1799,)\n\nShapes: \n X_test - (200, 1, 6)\n y_test : (200,)\n\nX_train has a shape of (1799, 1, 6) | X_train sample at 1: \n[[0.1881035 0.19961566 0.189557 0.01313706 0.19526587 0.00723276]]\ny_train has a shape of (1799,) | y_train sample at 1: \n0.008031962025756512\nTrain on 1619 samples, validate on 180 samples\nEpoch 1/200\n1619/1619 [==============================] - 2s 1ms/step - loss: 0.0984 - acc: 0.0000e+00 - val_loss: 0.5136 - val_acc: 0.0000e+00\nEpoch 2/200\n1619/1619 [==============================] - 0s 205us/step - loss: 0.0937 - acc: 0.0000e+00 - val_loss: 0.4954 - val_acc: 0.0000e+00\nEpoch 3/200\n1619/1619 [==============================] - 0s 217us/step - loss: 0.0867 - acc: 0.0000e+00 - val_loss: 0.4651 - val_acc: 0.0000e+00\nEpoch 4/200\n1619/1619 [==============================] - 0s 206us/step - loss: 0.0757 - acc: 0.0000e+00 - val_loss: 0.4137 - val_acc: 0.0000e+00\nEpoch 5/200\n1619/1619 [==============================] - 0s 203us/step - loss: 0.0594 - acc: 0.0000e+00 - val_loss: 0.3303 - val_acc: 0.0000e+00\nEpoch 6/200\n1619/1619 [==============================] - 0s 206us/step - loss: 0.0384 - acc: 0.0000e+00 - val_loss: 0.2101 - val_acc: 0.0000e+00\nEpoch 7/200\n1619/1619 [==============================] - 0s 206us/step - loss: 0.0251 - acc: 0.0000e+00 - val_loss: 0.0959 - val_acc: 0.0000e+00\nEpoch 8/200\n1619/1619 [==============================] - 0s 206us/step - loss: 0.0300 - acc: 0.0000e+00 - val_loss: 0.0843 - val_acc: 0.0000e+00\nEpoch 9/200\n1619/1619 [==============================] - 0s 210us/step - loss: 0.0259 - acc: 0.0000e+00 - val_loss: 0.1311 - val_acc: 0.0000e+00\nEpoch 10/200\n1619/1619 [==============================] - 0s 225us/step - loss: 0.0225 - acc: 0.0000e+00 - val_loss: 0.1716 - val_acc: 0.0000e+00\nEpoch 11/200\n1619/1619 [==============================] - 0s 218us/step - loss: 0.0229 - acc: 0.0000e+00 - val_loss: 0.1756 - val_acc: 0.0000e+00\nEpoch 12/200\n1619/1619 [==============================] - 0s 214us/step - loss: 0.0222 - acc: 0.0000e+00 - val_loss: 0.1513 - val_acc: 0.0000e+00\nEpoch 13/200\n1619/1619 [==============================] - 0s 210us/step - loss: 0.0198 - acc: 0.0000e+00 - val_loss: 0.1170 - val_acc: 0.0000e+00\nEpoch 14/200\n1619/1619 [==============================] - 0s 211us/step - loss: 0.0191 - acc: 0.0000e+00 - val_loss: 0.0924 - val_acc: 0.0000e+00\nEpoch 15/200\n1619/1619 [==============================] - 0s 214us/step - loss: 0.0184 - acc: 0.0000e+00 - val_loss: 0.0903 - val_acc: 0.0000e+00\nEpoch 16/200\n1619/1619 [==============================] - 0s 211us/step - loss: 0.0168 - acc: 0.0000e+00 - val_loss: 0.1004 - val_acc: 0.0000e+00\nEpoch 17/200\n1619/1619 [==============================] - 0s 212us/step - loss: 0.0157 - acc: 0.0000e+00 - val_loss: 0.1017 - val_acc: 0.0000e+00\nEpoch 18/200\n1619/1619 [==============================] - 0s 216us/step - loss: 0.0146 - acc: 0.0000e+00 - val_loss: 0.0874 - val_acc: 0.0000e+00\nEpoch 19/200\n1619/1619 [==============================] - 0s 212us/step - loss: 0.0130 - acc: 0.0000e+00 - val_loss: 0.0666 - val_acc: 0.0000e+00\nEpoch 20/200\n1619/1619 [==============================] - 0s 214us/step - loss: 0.0115 - acc: 0.0000e+00 - val_loss: 0.0539 - val_acc: 0.0000e+00\nEpoch 21/200\n1619/1619 [==============================] - 0s 217us/step - loss: 0.0100 - acc: 0.0000e+00 - val_loss: 0.0485 - val_acc: 0.0000e+00\nEpoch 22/200\n1619/1619 [==============================] - 0s 207us/step - loss: 0.0080 - acc: 0.0000e+00 - val_loss: 0.0405 - val_acc: 0.0000e+00\nEpoch 23/200\n1619/1619 [==============================] - 0s 208us/step - loss: 0.0066 - acc: 0.0000e+00 - val_loss: 0.0275 - val_acc: 0.0000e+00\nEpoch 24/200\n1619/1619 [==============================] - 0s 216us/step - loss: 0.0047 - acc: 0.0000e+00 - val_loss: 0.0152 - val_acc: 0.0000e+00\nEpoch 25/200\n1619/1619 [==============================] - 0s 218us/step - loss: 0.0034 - acc: 0.0000e+00 - val_loss: 0.0067 - val_acc: 0.0000e+00\nEpoch 26/200\n1619/1619 [==============================] - 0s 226us/step - loss: 0.0021 - acc: 0.0000e+00 - val_loss: 0.0024 - val_acc: 0.0000e+00\nEpoch 27/200\n1619/1619 [==============================] - 0s 219us/step - loss: 0.0016 - acc: 0.0000e+00 - val_loss: 3.9342e-04 - val_acc: 0.0000e+00\nEpoch 28/200\n1619/1619 [==============================] - 0s 208us/step - loss: 0.0013 - acc: 0.0000e+00 - val_loss: 8.6486e-04 - val_acc: 0.0000e+00\nEpoch 29/200\n1619/1619 [==============================] - 0s 214us/step - loss: 0.0014 - acc: 0.0000e+00 - val_loss: 0.0014 - val_acc: 0.0000e+00\nEpoch 30/200\n1619/1619 [==============================] - 0s 218us/step - loss: 0.0013 - acc: 0.0000e+00 - val_loss: 0.0015 - val_acc: 0.0000e+00\nEpoch 31/200\n1619/1619 [==============================] - 0s 224us/step - loss: 0.0012 - acc: 0.0000e+00 - val_loss: 0.0011 - val_acc: 0.0000e+00\nEpoch 32/200\n1619/1619 [==============================] - 0s 207us/step - loss: 0.0011 - acc: 0.0000e+00 - val_loss: 5.1601e-04 - val_acc: 0.0000e+00\nEpoch 33/200\n1619/1619 [==============================] - 0s 209us/step - loss: 0.0010 - acc: 0.0000e+00 - val_loss: 3.6967e-04 - val_acc: 0.0000e+00\nEpoch 34/200\n1619/1619 [==============================] - 0s 222us/step - loss: 0.0010 - acc: 0.0000e+00 - val_loss: 2.8236e-04 - val_acc: 0.0000e+00\nEpoch 35/200\n1619/1619 [==============================] - 0s 210us/step - loss: 8.9477e-04 - acc: 0.0000e+00 - val_loss: 3.7346e-04 - val_acc: 0.0000e+00\nEpoch 36/200\n1619/1619 [==============================] - 0s 223us/step - loss: 8.8772e-04 - acc: 0.0000e+00 - val_loss: 2.7404e-04 - val_acc: 0.0000e+00\nEpoch 37/200\n1619/1619 [==============================] - 0s 229us/step - loss: 9.2260e-04 - acc: 0.0000e+00 - val_loss: 2.8305e-04 - val_acc: 0.0000e+00\nEpoch 38/200\n1619/1619 [==============================] - 0s 252us/step - loss: 8.1166e-04 - acc: 0.0000e+00 - val_loss: 2.7101e-04 - val_acc: 0.0000e+00\nEpoch 39/200\n1619/1619 [==============================] - 0s 267us/step - loss: 7.8194e-04 - acc: 0.0000e+00 - val_loss: 3.0359e-04 - val_acc: 0.0000e+00\nEpoch 40/200\n1619/1619 [==============================] - 0s 271us/step - loss: 7.4621e-04 - acc: 0.0000e+00 - val_loss: 3.2337e-04 - val_acc: 0.0000e+00\nEpoch 41/200\n1619/1619 [==============================] - 0s 285us/step - loss: 7.7717e-04 - acc: 0.0000e+00 - val_loss: 3.3338e-04 - val_acc: 0.0000e+00\nEpoch 42/200\n1619/1619 [==============================] - 0s 259us/step - loss: 7.4892e-04 - acc: 0.0000e+00 - val_loss: 3.5945e-04 - val_acc: 0.0000e+00\nEpoch 43/200\n1619/1619 [==============================] - 0s 214us/step - loss: 6.9273e-04 - acc: 0.0000e+00 - val_loss: 3.1283e-04 - val_acc: 0.0000e+00\nEpoch 44/200\n1619/1619 [==============================] - 0s 206us/step - loss: 7.0225e-04 - acc: 0.0000e+00 - val_loss: 4.2388e-04 - val_acc: 0.0000e+00\nEpoch 45/200\n1619/1619 [==============================] - 0s 259us/step - loss: 6.7689e-04 - acc: 0.0000e+00 - val_loss: 3.7270e-04 - val_acc: 0.0000e+00\nEpoch 46/200\n1619/1619 [==============================] - 0s 259us/step - loss: 6.5629e-04 - acc: 0.0000e+00 - val_loss: 3.6965e-04 - val_acc: 0.0000e+00\nEpoch 47/200\n1619/1619 [==============================] - 0s 229us/step - loss: 6.3784e-04 - acc: 0.0000e+00 - val_loss: 3.7581e-04 - val_acc: 0.0000e+00\nEpoch 48/200\n1619/1619 [==============================] - 0s 229us/step - loss: 6.2399e-04 - acc: 0.0000e+00 - val_loss: 3.6044e-04 - val_acc: 0.0000e+00\nEpoch 49/200\n1619/1619 [==============================] - 0s 222us/step - loss: 5.9189e-04 - acc: 0.0000e+00 - val_loss: 4.0516e-04 - val_acc: 0.0000e+00\nEpoch 50/200\n1619/1619 [==============================] - 0s 202us/step - loss: 6.1209e-04 - acc: 0.0000e+00 - val_loss: 5.2509e-04 - val_acc: 0.0000e+00\nEpoch 51/200\n1619/1619 [==============================] - 0s 212us/step - loss: 5.5009e-04 - acc: 0.0000e+00 - val_loss: 3.7063e-04 - val_acc: 0.0000e+00\nEpoch 52/200\n1619/1619 [==============================] - 0s 204us/step - loss: 5.5376e-04 - acc: 0.0000e+00 - val_loss: 4.4546e-04 - val_acc: 0.0000e+00\nEpoch 53/200\n"
]
],
[
[
"## 3) Denormalize the data, calculate the score and visualize the results",
"_____no_output_____"
]
],
[
[
"\ntrain_predict = model.predict(X_train)\ntest_predict = model.predict(X_test)\n\ny_train_denorm = min_max_scaler.inverse_transform(y_train.reshape(-1, 1))\ntrain_predict_denorm = min_max_scaler.inverse_transform(train_predict)\ny_test_denorm = min_max_scaler.inverse_transform(y_test.reshape(-1, 1))\ntest_predict_denorm = min_max_scaler.inverse_transform(test_predict)\n\n# print(y_predict_denormalized[:5], '\\n\\n', y_test_denormalized[:5])\ntrain_score = sqrt(mean_squared_error(y_train_denorm[:,0], train_predict_denorm[:,0]))\ntest_score = sqrt(mean_squared_error(y_test_denorm[:,0], test_predict_denorm[:,0]))\nprint('train score and test score are respectively {} and {}'.format(train_score, test_score))\n\nplt.figure(figsize=(20, 10))\nplt.plot(y_test_denorm, color='C1', label='test')\nplt.plot(test_predict_denorm, color='C2', label='predicted')\nplt.xlabel('timeline')\nplt.ylabel('stock price')\nplt.legend(loc='upper left')\nplt.show()\n ",
"train score and test score are respectively 7.711845916677005 and 9.004305540494096\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e715501c156ceb1dcef0377a8ff79c0b5b7dfe1a | 23,189 | ipynb | Jupyter Notebook | ml-models.ipynb | rksleung/dataworks_gtm_dashboard | 333dc34a198f6d0306c42eeb20abeb2b3dc256a6 | [
"MIT"
] | 7 | 2020-10-16T11:45:14.000Z | 2022-03-30T06:53:19.000Z | ml-models.ipynb | rksleung/dataworks_gtm_dashboard | 333dc34a198f6d0306c42eeb20abeb2b3dc256a6 | [
"MIT"
] | null | null | null | ml-models.ipynb | rksleung/dataworks_gtm_dashboard | 333dc34a198f6d0306c42eeb20abeb2b3dc256a6 | [
"MIT"
] | 2 | 2020-10-18T15:22:33.000Z | 2022-03-30T09:09:10.000Z | 30.312418 | 131 | 0.385873 | [
[
[
"### Data Preprocessing",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport pickle\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"df = pd.read_csv('data/Telco-Customer-Churn.csv')\ndf['TotalCharges'] = df['TotalCharges'].replace(\" \", 0).astype('float32')",
"_____no_output_____"
],
[
"cat_features = df.drop(['customerID','TotalCharges', 'MonthlyCharges', 'SeniorCitizen', 'tenure', 'Churn'],axis=1).columns\ncat_features",
"_____no_output_____"
],
[
"df[cat_features].head()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import OneHotEncoder\nohe = OneHotEncoder(sparse=False)\nohe.fit(df[cat_features])",
"_____no_output_____"
],
[
"dff = ohe.transform(df[cat_features])\ndff = pd.DataFrame(dff, columns=ohe.get_feature_names())\ndff = pd.concat([dff, df[['SeniorCitizen', 'MonthlyCharges', 'TotalCharges', 'tenure']]], axis=1)",
"_____no_output_____"
],
[
"dff.head()",
"_____no_output_____"
],
[
"bin_dict = {'No':0, 'Yes':1}\ndf.Churn = df.Churn.map(bin_dict)",
"_____no_output_____"
]
],
[
[
"### Modeling",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\nX = dff.values\ny = df.Churn\n\nX_train, X_test, y_train, y_test = train_test_split(X,y)",
"_____no_output_____"
],
[
"print(df.shape)\nprint(\"\\n\")\nprint(\"X_train: \", X_train.shape)\nprint(\"y_train: \", y_train.shape)\n#print(\"\\n\")\nprint(\"X_test: \", X_test.shape)\nprint(\"y_test: \", y_test.shape)",
"(7043, 21)\n\n\nX_train: (5282, 45)\ny_train: (5282,)\nX_test: (1761, 45)\ny_test: (1761,)\n"
]
],
[
[
"### SVM",
"_____no_output_____"
]
],
[
[
"# Fitting classifier to the Training set\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\nclassifier_svm = SVC()\nsteps = [\n ('scalar', StandardScaler()),\n ('model', SVC())\n]\nsvm_pipe = Pipeline(steps)",
"_____no_output_____"
],
[
"%%time\nparameters = { 'model__kernel' : ['poly'],\n 'model__C' : [10],\n 'model__gamma' : ['scale'],\n 'model__random_state' : [42],\n 'model__degree' : [1]\n}\nclassifier_svm = GridSearchCV(svm_pipe, parameters, scoring='accuracy', verbose=10, cv=6, n_jobs=-1)\nclassifier_svm = classifier_svm.fit(X_train, y_train.ravel())",
"Fitting 6 folds for each of 1 candidates, totalling 6 fits\n"
],
[
"y_pred_svm_train = classifier_svm.predict(X_train)\naccuracy_svm_train = accuracy_score(y_train, y_pred_svm_train)\nprint(\"Training set: \", accuracy_svm_train)\n\ny_pred_svm_test = classifier_svm.predict(X_test)\naccuracy_svm_test = accuracy_score(y_test, y_pred_svm_test)\nprint(\"Test set: \", accuracy_svm_test)",
"Training set: 0.8031048845134419\nTest set: 0.787052810902896\n"
],
[
"filename = 'data/svm_model.sav'\npickle.dump(classifier_svm, open(filename, 'wb'))",
"_____no_output_____"
]
],
[
[
"### XGBoost",
"_____no_output_____"
]
],
[
[
"import xgboost as xgb\n\nsteps = [\n ('scalar', StandardScaler()),\n ('model', xgb.XGBClassifier())\n]\nxgb_pipe = Pipeline(steps)",
"_____no_output_____"
],
[
"%%time\nparameters = { 'model__min_child_weight': [10],\n 'model__gamma': [5],\n 'model__subsample': [0.6],\n 'model__colsample_bytree': [0.6],\n 'model__max_depth': [3],\n 'model__random_state' : [42]\n}\nclassifier_xgb = GridSearchCV(xgb_pipe, parameters, scoring='accuracy', verbose=10, cv=6, n_jobs=-1)\nclassifier_xgb = classifier_xgb.fit(X_train, y_train.ravel())",
"Fitting 6 folds for each of 1 candidates, totalling 6 fits\n"
],
[
"y_pred_xgb_train = classifier_xgb.predict(X_train)\naccuracy_xgb_train = accuracy_score(y_train, y_pred_xgb_train)\nprint(\"Training set: \", accuracy_xgb_train)\n\ny_pred_xgb_test = classifier_xgb.predict(X_test)\naccuracy_xgb_test = accuracy_score(y_test, y_pred_xgb_test)\nprint(\"Test set: \", accuracy_xgb_test)",
"Training set: 0.8290420295342673\nTest set: 0.8029528676888131\n"
],
[
"filename = 'data/xgb_model.sav'\npickle.dump(classifier_xgb, open(filename, 'wb'))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e715536a07f4d96605d4f909b0de876290a6c69a | 71,457 | ipynb | Jupyter Notebook | Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb | KAClough/nrpytutorial | 2cc3b22cb1092bc10890237dd8ee3b6881c36b52 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb | KAClough/nrpytutorial | 2cc3b22cb1092bc10890237dd8ee3b6881c36b52 | [
"BSD-2-Clause"
] | null | null | null | Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb | KAClough/nrpytutorial | 2cc3b22cb1092bc10890237dd8ee3b6881c36b52 | [
"BSD-2-Clause"
] | null | null | null | 80.469595 | 24,228 | 0.731433 | [
[
[
"<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Start-to-Finish Example: Numerical Solution of the Scalar Wave Equation, in Curvilinear Coordinates\n\n## Author: Zach Etienne\n### Formatting improvements courtesy Brandon Clark\n\n## This module solves the scalar wave equation for a plane wave in *spherical coordinates* (though other coordinates, including Cartesian, may be chosen).\n\n**Notebook Status:** <font color =\"green\"><b> Validated </b></font>\n\n**Validation Notes:** This module has been validated to converge at the expected order to the exact solution (see [plot](#convergence) at bottom).\n\n### NRPy+ Source Code for this module: \n* [ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py) [\\[**tutorial**\\]](Tutorial-ScalarWaveCurvilinear.ipynb) Generates the right-hand side for the Scalar Wave Equation in curvilinear coordinates\n* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) [\\[**tutorial**\\]](Tutorial-ScalarWave.ipynb) Generating C code for plane wave initial data for the scalar wave equation \n\n## Introduction:\nAs outlined in the [previous NRPy+ tutorial notebook](Tutorial-ScalarWaveCurvilinear.ipynb), we first use NRPy+ to generate initial data for the scalar wave equation, and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).\n\nThe entire algorithm is outlined below, with NRPy+-based components highlighted in <font color='green'>green</font>.\n\n1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.\n1. <font color='green'>Set gridfunction values to initial data.</font>\n1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following:\n 1. <font color='green'>Evaluate scalar wave RHS expressions.</font>\n 1. Apply boundary conditions.\n1. At the end of each iteration in time, output the relative error between numerical and exact solutions.",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis notebook is organized as follows\n\n1. [Step 1](#writec): Generate C code to solve the scalar wave equation in curvilinear coordinates\n 1. [Step 1.a](#id_rhss): C code generation: Initial data and scalar wave right-hand-sides\n 1. [Step 1.b](#boundaryconditions): C code generation: Boundary condition driver\n 1. [Step 1.c](#cparams_rfm_and_domainsize): Generate Cparameters files; set reference metric parameters, including `domain_size`\n 1. [Step 1.d](#cfl): C code generation: Finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep\n1. [Step 2](#mainc): `ScalarWaveCurvilinear_Playground.c`: The Main C Code\n1. [Step 3](#compileexec): Compile generated C codes & solve the scalar wave equation\n1. [Step 4](#convergence): Code validation: Plot the numerical error, and confirm that it converges to zero at expected rate with increasing numerical resolution (sampling)\n1. [Step 5](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file",
"_____no_output_____"
],
[
"<a id='writec'></a>\n\n# Step 1: Using NRPy+ to generate necessary C code to solve the scalar wave equation in curvilinear, singular coordinates \\[Back to [top](#toc)\\]\n$$\\label{writec}$$",
"_____no_output_____"
],
[
"<a id='id_rhss'></a>\n\n## Step 1.a: C code generation: Initial data and scalar wave RHSs \\[Back to [top](#toc)\\]\n$$\\label{id_rhss}$$\n\n\nWe choose simple plane wave initial data, which is documented in the [Cartesian scalar wave module](Tutorial-ScalarWave.ipynb). Specifically, we implement monochromatic (single-wavelength) wave traveling in the $\\hat{k}$ direction with speed $c$\n$$u(\\vec{x},t) = f(\\hat{k}\\cdot\\vec{x} - c t),$$\nwhere $\\hat{k}$ is a unit vector.\n\nThe scalar wave RHSs in curvilinear coordinates (documented [in the previous module](Tutorial-ScalarWaveCurvilinear.ipynb)) are simply the right-hand sides of the scalar wave equation written in curvilinear coordinates\n\\begin{align}\n\\partial_t u &= v \\\\\n\\partial_t v &= c^2 \\left(\\hat{g}^{ij} \\partial_{i} \\partial_{j} u - \\hat{\\Gamma}^i \\partial_i u\\right),\n\\end{align}\nwhere $\\hat{g}^{ij}$ is the inverse reference 3-metric (i.e., the metric corresponding to the underlying coordinate system we choose$-$spherical coordinates in our example below), and $\\hat{\\Gamma}^i$ is the contracted Christoffel symbol $\\hat{\\Gamma}^\\tau = \\hat{g}^{\\mu\\nu} \\hat{\\Gamma}^\\tau_{\\mu\\nu}$.\n\nBelow we generate \n+ the initial data by calling `InitialData_PlaneWave()` inside the NRPy+ [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWave.ipynb)), and \n+ the RHS expressions by calling `ScalarWaveCurvilinear_RHSs()` inside the NRPy+ [ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py) module (documented in [this NRPy+ Jupyter notebook](Tutorial-ScalarWaveCurvilinear.ipynb)).",
"_____no_output_____"
]
],
[
[
"# Step P1: Import needed NRPy+ core modules:\nfrom outputC import * # NRPy+: Core C code output module\nimport finite_difference as fin # NRPy+: Finite difference C code generation module\nimport NRPy_param_funcs as par # NRPy+: Parameter interface\nimport grid as gri # NRPy+: Functions having to do with numerical grids\nimport indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support\nimport reference_metric as rfm # NRPy+: Reference metric support\nimport cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface\nimport shutil, os, sys # Standard Python modules for multiplatform OS-level functions\n\n# Step P2: Create C code output directory:\nCcodesdir = os.path.join(\"ScalarWaveCurvilinear_Playground_Ccodes/\")\n# First remove C code output directory if it exists\n# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty\nshutil.rmtree(Ccodesdir, ignore_errors=True)\n# Then create a fresh directory\ncmd.mkdir(Ccodesdir)\n\n# Step P3: Create executable output directory:\noutdir = os.path.join(Ccodesdir,\"output/\")\ncmd.mkdir(outdir)\n\n# Step 1: Set the spatial dimension parameter \n# to three this time, and then read\n# the parameter as DIM.\npar.set_parval_from_str(\"grid::DIM\",3)\nDIM = par.parval_from_str(\"grid::DIM\")\n\n# Step 2: Set some core parameters, including CoordSystem MoL timestepping algorithm,\n# FD order, floating point precision, and CFL factor:\n# Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical, \n# SymTP, SinhSymTP\nCoordSystem = \"SinhSpherical\"\n\n# Step 2.a: Set defaults for Coordinate system parameters.\n# These are perhaps the most commonly adjusted parameters,\n# so we enable modifications at this high level.\n\n# domain_size sets the default value for:\n# * Spherical's params.RMAX\n# * SinhSpherical*'s params.AMAX\n# * Cartesians*'s -params.{x,y,z}min & .{x,y,z}max\n# * Cylindrical's -params.ZMIN & .{Z,RHO}MAX\n# * SinhCylindrical's params.AMPL{RHO,Z}\n# * *SymTP's params.AMAX\ndomain_size = 10.0 # Needed for all coordinate systems.\n\n# sinh_width sets the default value for:\n# * SinhSpherical's params.SINHW\n# * SinhCylindrical's params.SINHW{RHO,Z}\n# * SinhSymTP's params.SINHWAA\nsinh_width = 0.4 # If Sinh* coordinates chosen\n\n# sinhv2_const_dr sets the default value for:\n# * SinhSphericalv2's params.const_dr\n# * SinhCylindricalv2's params.const_d{rho,z}\nsinhv2_const_dr = 0.05# If Sinh*v2 coordinates chosen\n\n# SymTP_bScale sets the default value for:\n# * SinhSymTP's params.bScale\nSymTP_bScale = 1.0 # If SymTP chosen\n\n# Step 2.b: Set the order of spatial and temporal derivatives;\n# the core data type, and the CFL factor.\n# RK_method choices include: Euler, \"RK2 Heun\", \"RK2 MP\", \"RK2 Ralston\", RK3, \"RK3 Heun\", \"RK3 Ralston\",\n# SSPRK3, RK4, DP5, DP5alt, CK5, DP6, L6, DP8\nRK_method = \"RK4\"\nFD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable\nREAL = \"double\" # Best to use double here.\nCFL_FACTOR= 1.0\n\n# Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. \n# Each RK substep involves two function calls:\n# 3.A: Evaluate RHSs (RHS_string)\n# 3.B: Apply boundary conditions (post_RHS_string)\nimport MoLtimestepping.C_Code_Generation as MoL\nfrom MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict\nRK_order = Butcher_dict[RK_method][1]\ncmd.mkdir(os.path.join(Ccodesdir,\"MoLtimestepping/\"))\nMoL.MoL_C_Code_Generation(RK_method, \n RHS_string = \"rhs_eval(&rfmstruct, ¶ms, RK_INPUT_GFS, RK_OUTPUT_GFS);\",\n post_RHS_string = \"apply_bcs_curvilinear(¶ms, &bcstruct, NUM_EVOL_GFS, evol_gf_parity, RK_OUTPUT_GFS);\",\n outdir = os.path.join(Ccodesdir,\"MoLtimestepping/\"))\n\n# Step 4: Set the coordinate system for the numerical grid\npar.set_parval_from_str(\"reference_metric::CoordSystem\",CoordSystem)\n\n# Step 5: Import the ScalarWave.InitialData module. \n# This command only declares ScalarWave initial data \n# parameters and the InitialData_PlaneWave() function.\nimport ScalarWave.InitialData_PlaneWave as swid\n\n# Step 6: Import ScalarWave_RHSs module. \n# This command only declares ScalarWave RHS parameters\n# and the ScalarWave_RHSs function (called later)\nimport ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs as swrhs\n\n# Step 7: Set the finite differencing order to FD_order (set above).\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\",FD_order)\n\n# Step 8: Generate SymPy symbolic expressions for\n# monochromatic (single frequency/wavelength)\n# plane wave initial data.\nswid.InitialData_PlaneWave()\n\n# Step 9: Generate SymPy symbolic expressions for\n# uu_rhs and vv_rhs; the ScalarWave RHSs.\n# This function also declares the uu and vv\n# gridfunctions, which need to be declared\n# to output even the initial data to C file.\ncmd.mkdir(os.path.join(Ccodesdir,\"rfm_files/\"))\npar.set_parval_from_str(\"reference_metric::enable_rfm_precompute\",\"True\")\npar.set_parval_from_str(\"reference_metric::rfm_precompute_Ccode_outdir\",os.path.join(Ccodesdir,\"rfm_files/\"))\nswrhs.ScalarWaveCurvilinear_RHSs()\n# Step 9.a: Now that we are finished with all the rfm hatted\n# quantities, let's restore them to their closed-\n# form expressions.\npar.set_parval_from_str(\"reference_metric::enable_rfm_precompute\",\"False\") # Reset to False to disable rfm_precompute.\nrfm.ref_metric__hatted_quantities()\n\n# Step 10: Copy SIMD/SIMD_intrinsics.h to $Ccodesdir/SIMD/SIMD_intrinsics.h\ncmd.mkdir(os.path.join(Ccodesdir,\"SIMD\"))\nshutil.copy(os.path.join(\"SIMD/\")+\"SIMD_intrinsics.h\",os.path.join(Ccodesdir,\"SIMD/\"))\n\n# Step 11: Generate all needed C functions\ndesc=\"Part P3: Declare the function for the exact solution at a single point. time==0 corresponds to the initial data.\"\nname=\"exact_solution_single_point\"\noutCfunction(\n outfile = os.path.join(Ccodesdir,name+\".h\"), desc=desc, name=name,\n params =\"const REAL xx0,const REAL xx1,const REAL xx2,const paramstruct *restrict params,REAL *uu_exact,REAL *vv_exact\",\n body = fin.FD_outputC(\"returnstring\",[lhrh(lhs=\"*uu_exact\",rhs=swid.uu_ID),\n lhrh(lhs=\"*vv_exact\",rhs=swid.vv_ID)]),\n loopopts = \"\")\n\ndesc=\"Part P4: Declare the function for the exact solution at all points. time==0 corresponds to the initial data.\"\nname=\"exact_solution_all_points\"\noutCfunction(\n outfile = os.path.join(Ccodesdir,name+\".h\"), desc=desc, name=name,\n params =\"const paramstruct *restrict params,REAL *restrict xx[3], REAL *restrict in_gfs\",\n body =\"\"\"\nREAL xCart[3]; xxCart(params,xx, i0,i1,i2, xCart);\nREAL xx0 = xCart[0]; REAL xx1 = xCart[1]; REAL xx2 = xCart[2];\nexact_solution_single_point(xx0,xx1,xx2,params,&in_gfs[IDX4S(UUGF,i0,i1,i2)],&in_gfs[IDX4S(VVGF,i0,i1,i2)]);\"\"\",\n loopopts = \"AllPoints\")\n\ndesc=\"Part P5: Declare the function to evaluate the scalar wave RHSs\"\nname=\"rhs_eval\"\noutCfunction(\n outfile = os.path.join(Ccodesdir,name+\".h\"), desc=desc, name=name,\n params =\"\"\"rfm_struct *restrict rfmstruct,const paramstruct *restrict params, \n const REAL *restrict in_gfs, REAL *restrict rhs_gfs\"\"\",\n body =fin.FD_outputC(\"returnstring\",[lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"uu\"),rhs=swrhs.uu_rhs),\n lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"vv\"),rhs=swrhs.vv_rhs)],\n params=\"SIMD_enable=True\").replace(\"IDX4\",\"IDX4S\"),\n loopopts = \"InteriorPoints,EnableSIMD,Enable_rfm_precompute\")",
"Output C function exact_solution_single_point() to file ScalarWaveCurvilinear_Playground_Ccodes/exact_solution_single_point.h\nOutput C function exact_solution_all_points() to file ScalarWaveCurvilinear_Playground_Ccodes/exact_solution_all_points.h\nOutput C function rhs_eval() to file ScalarWaveCurvilinear_Playground_Ccodes/rhs_eval.h\n"
]
],
[
[
"<a id='boundaryconditions'></a>\n\n## Step 1.b: Output needed C code for boundary condition driver \\[Back to [top](#toc)\\]\n$$\\label{boundaryconditions}$$",
"_____no_output_____"
]
],
[
[
"import CurviBoundaryConditions.CurviBoundaryConditions as cbcs\ncbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,\"boundary_conditions/\"),Cparamspath=os.path.join(\"../\"))",
"Wrote to file \"ScalarWaveCurvilinear_Playground_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h\"\nEvolved gridfunction \"uu\" has parity type 0.\nEvolved gridfunction \"vv\" has parity type 0.\nWrote to file \"ScalarWaveCurvilinear_Playground_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h\"\n"
]
],
[
[
"<a id='cparams_rfm_and_domainsize'></a>\n\n## Step 1.c: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \\[Back to [top](#toc)\\]\n$$\\label{cparams_rfm_and_domainsize}$$\n\nBased on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.\n\nThen we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above",
"_____no_output_____"
]
],
[
[
"# Step 1.c.i: Set free_parameters.h\nwith open(os.path.join(Ccodesdir,\"free_parameters.h\"),\"w\") as file:\n file.write(\"\"\"\n// Set free-parameter values.\nparams.time = 0.0; // Initial simulation time time corresponds to exact solution at time=0.\nparams.wavespeed = 1.0;\nparams.kk0 = 1.0;\nparams.kk1 = 1.0;\nparams.kk2 = 1.0;\"\"\")\n \n# Append to $Ccodesdir/free_parameters.h reference metric parameters based on generic\n# domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale,\n# parameters set above. \nrfm.out_default_free_parameters_for_rfm(os.path.join(Ccodesdir,\"free_parameters.h\"),\n domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale)\n\n# Step 1.c.ii: Generate set_Nxx_dxx_invdx_params__and__xx.h:\nrfm.set_Nxx_dxx_invdx_params__and__xx_h(os.path.join(Ccodesdir))\n\n# Step 1.c.iii: Generate xxCart.h, which contains xxCart() for\n# (the mapping from xx->Cartesian) for the chosen\n# CoordSystem:\nrfm.xxCart_h(\"xxCart\",\"./set_Cparameters.h\",os.path.join(Ccodesdir,\"xxCart.h\"))\n\n# Step 1.c.iv: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[].h\npar.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))",
"_____no_output_____"
]
],
[
[
"<a id='cfl'></a>\n\n## Step 1.d: Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep \\[Back to [top](#toc)\\]\n$$\\label{cfl}$$\n\nIn order for our explicit-timestepping numerical solution to the scalar wave equation to be stable, it must satisfy the [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673) condition:\n$$\n\\Delta t \\le \\frac{\\min(ds_i)}{c},\n$$\nwhere $c$ is the wavespeed, and\n$$ds_i = h_i \\Delta x^i$$ \nis the proper distance between neighboring gridpoints in the $i$th direction (in 3D, there are 3 directions), $h_i$ is the $i$th reference metric scale factor, and $\\Delta x^i$ is the uniform grid spacing in the $i$th direction:",
"_____no_output_____"
]
],
[
[
"# Output the find_timestep() function to a C file.\nrfm.out_timestep_func_to_file(os.path.join(Ccodesdir,\"find_timestep.h\"))",
"_____no_output_____"
]
],
[
[
"<a id='mainc'></a>\n\n# Step 2: `ScalarWaveCurvilinear_Playground.c`: The Main C Code \\[Back to [top](#toc)\\]\n$$\\label{mainc}$$\n\nJust as in [the start-to-finish, solving the scalar wave equation in Cartesian coordinates module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will implement the scalar wave equation via the Method of Lines. As discussed above, the critical differences between this code and the Cartesian version are as follows:\n1. The CFL-constrained timestep depends on the proper distance between neighboring gridpoints\n1. The boundary conditions must account for the fact that ghost zone points lying in the domain exterior can map either to the interior of the domain, or lie on the outer boundary. In the former case, we simply copy the data from the interior. In the latter case, we apply the usual outer boundary conditions.\n1. The numerical grids must be staggered to avoid direct evaluation of the equations on coordinate singularities.",
"_____no_output_____"
]
],
[
[
"# Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER),\n# and set the CFL_FACTOR (which can be overwritten at the command line)\n\nwith open(os.path.join(Ccodesdir,\"ScalarWaveCurvilinear_Playground_REAL__NGHOSTS__CFL_FACTOR.h\"), \"w\") as file:\n file.write(\"\"\"\n// Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER\n#define NGHOSTS \"\"\"+str(int(FD_order/2))+\"\"\"\n// Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point\n// numbers are stored to at least ~16 significant digits\n#define REAL \"\"\"+REAL+\"\"\"\n// Part P0.c: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER\nREAL CFL_FACTOR = \"\"\"+str(CFL_FACTOR)+\";\\n\")",
"_____no_output_____"
],
[
"%%writefile $Ccodesdir/ScalarWaveCurvilinear_Playground.c\n\n// Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+.\n#include \"ScalarWaveCurvilinear_Playground_REAL__NGHOSTS__CFL_FACTOR.h\"\n\n#include \"rfm_files/rfm_struct__declare.h\"\n\n#include \"declare_Cparameters_struct.h\"\n\n// All SIMD intrinsics used in SIMD-enabled C code loops are defined here:\n#include \"SIMD/SIMD_intrinsics.h\"\n\n// Step P1: Import needed header files\n#include \"stdio.h\"\n#include \"stdlib.h\"\n#include \"math.h\"\n#include \"stdint.h\" // Needed for Windows GCC 6.x compatibility\n#ifndef M_PI\n#define M_PI 3.141592653589793238462643383279502884L\n#endif\n#ifndef M_SQRT1_2\n#define M_SQRT1_2 0.707106781186547524400844362104849039L\n#endif\n\n// Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of\n// data in a 1D array. In this case, consecutive values of \"i\" \n// (all other indices held to a fixed value) are consecutive in memory, where \n// consecutive values of \"j\" (fixing all other indices) are separated by \n// Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of\n// \"k\" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc.\n#define IDX4S(g,i,j,k) \\\n( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )\n#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) )\n#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \\\n for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)\n#define LOOP_ALL_GFS_GPS(ii) _Pragma(\"omp parallel for\") \\\n for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)\n\n// Step P3: Set UUGF and VVGF macros, as well as xxCart()\n#include \"boundary_conditions/gridfunction_defines.h\"\n\n// Step P4: Set xxCart(const paramstruct *restrict params, \n// REAL *restrict xx[3],\n// const int i0,const int i1,const int i2, \n// REAL xCart[3]),\n// which maps xx->Cartesian via\n// {xx[0][i0],xx[1][i1],xx[2][i2]}->{xCart[0],xCart[1],xCart[2]}\n#include \"xxCart.h\"\n\n// Step P5: Defines set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3], \n// paramstruct *restrict params, REAL *restrict xx[3]),\n// which sets params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for\n// the chosen Eigen-CoordSystem if EigenCoord==1, or\n// CoordSystem if EigenCoord==0.\n#include \"set_Nxx_dxx_invdx_params__and__xx.h\"\n\n// Step P6: Include basic functions needed to impose curvilinear\n// parity and boundary conditions.\n#include \"boundary_conditions/CurviBC_include_Cfunctions.h\"\n\n// Step P7: Find the CFL-constrained timestep\n#include \"find_timestep.h\"\n\n// Part P8: Declare the function for the exact solution at a single point. time==0 corresponds to the initial data.\n#include \"exact_solution_single_point.h\"\n\n// Part P9: Declare the function for the exact solution at all points. time==0 corresponds to the initial data.\n#include \"exact_solution_all_points.h\"\n\n// Part P10: Declare the function to evaluate the scalar wave RHSs\n#include \"rhs_eval.h\"\n\n// main() function:\n// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates\n// Step 1: Set up scalar wave initial data\n// Step 2: Output relative error between numerical and exact solution.\n// Step 3: Evolve scalar wave initial data forward in time using Method of Lines with chosen RK-like algorithm,\n// applying quadratic extrapolation outer boundary conditions.\n// Step 4: Free all allocated memory\nint main(int argc, const char *argv[]) {\n paramstruct params;\n#include \"set_Cparameters_default.h\"\n \n // Step 0a: Read command-line input, error out if nonconformant\n if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) {\n printf(\"Error: Expected one command-line argument: ./ScalarWaveCurvilinear_Playground Nx0 Nx1 Nx2,\\n\");\n printf(\"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\\n\");\n printf(\"Nx[] MUST BE larger than NGHOSTS (= %d)\\n\",NGHOSTS);\n exit(1);\n }\n // Step 0b: Set up numerical grid structure, first in space...\n const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };\n if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {\n printf(\"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\\n\");\n printf(\" For example, in case of angular directions, proper symmetry zones will not exist.\\n\");\n exit(1);\n }\n\n // Step 0c: Set free parameters, overwriting Cparameters defaults \n // by hand or with command-line input, as desired.\n#include \"free_parameters.h\"\n\n // Step 0d: Uniform coordinate grids are stored to *xx[3]\n REAL *xx[3];\n // Step 0d.i: Set bcstruct\n bc_struct bcstruct;\n {\n int EigenCoord = 1;\n // Step 0d.ii: Call set_Nxx_dxx_invdx_params__and__xx(), which sets\n // params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the\n // chosen Eigen-CoordSystem.\n set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);\n // Step 0d.iii: Set Nxx_plus_2NGHOSTS_tot\n#include \"set_Cparameters-nopointer.h\"\n const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;\n // Step 0e: Find ghostzone mappings; set up bcstruct\n#include \"boundary_conditions/driver_bcstruct.h\"\n // Step 0e.i: Free allocated space for xx[][] array\n for(int i=0;i<3;i++) free(xx[i]);\n }\n \n // Step 0f: Call set_Nxx_dxx_invdx_params__and__xx(), which sets\n // params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the\n // chosen (non-Eigen) CoordSystem.\n int EigenCoord = 0;\n set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);\n\n // Step 0g: Set all C parameters \"blah\" for params.blah, including\n // Nxx_plus_2NGHOSTS0 = params.Nxx_plus_2NGHOSTS0, etc.\n#include \"set_Cparameters-nopointer.h\"\n const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;\n\n // Step 0h: Time coordinate parameters\n const REAL t_final = 0.7*domain_size; /* Final time is set so that at t=t_final, \n * data at the origin have not been corrupted \n * by the approximate outer boundary condition */\n \n // Step 0i: Set timestep based on smallest proper distance between gridpoints and CFL factor \n REAL dt = find_timestep(¶ms, xx);\n //printf(\"# Timestep set to = %e\\n\",(double)dt);\n int N_final = (int)(t_final / dt + 0.5); // The number of points in time. \n // Add 0.5 to account for C rounding down \n // typecasts to integers.\n int output_every_N = (int)((REAL)N_final/800.0);\n if(output_every_N == 0) output_every_N = 1;\n\n // Step 0j: Error out if the number of auxiliary gridfunctions outnumber evolved gridfunctions.\n // This is a limitation of the RK method. You are always welcome to declare & allocate \n // additional gridfunctions by hand.\n if(NUM_AUX_GFS > NUM_EVOL_GFS) {\n printf(\"Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\\n\");\n printf(\" or allocate (malloc) by hand storage for *diagnostic_output_gfs. \\n\");\n exit(1);\n }\n \n // Step 0k: Allocate memory for gridfunctions\n#include \"MoLtimestepping/RK_Allocate_Memory.h\"\n \n // Step 0l: Set up precomputed reference metric arrays\n // Step 0l.i: Allocate space for precomputed reference metric arrays.\n#include \"rfm_files/rfm_struct__malloc.h\"\n\n // Step 0l.ii: Define precomputed reference metric arrays.\n {\n#include \"set_Cparameters-nopointer.h\"\n#include \"rfm_files/rfm_struct__define.h\"\n }\n\n // Step 1: Set up initial data to be exact solution at time=0:\n params.time = 0.0; exact_solution_all_points(¶ms, xx, y_n_gfs);\n\n for(int n=0;n<=N_final;n++) \n { // Main loop to progress forward in time. \n\n // Step 1a: Set current time to correct value & compute exact solution\n params.time = ((REAL)n)*dt;\n\n // Step 2: Code validation: Compute log of L2 norm of difference\n // between numerical and exact solutions:\n // log_L2_Norm = log10( sqrt[Integral( [numerical - exact]^2 * dV)] ),\n // where integral is within 30% of the grid outer boundary (domain_size)\n if(n%output_every_N == 0) {\n REAL integral = 0.0;\n REAL numpts = 0.0;\n#pragma omp parallel for reduction(+:integral,numpts)\n LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS, \n NGHOSTS,Nxx_plus_2NGHOSTS1-NGHOSTS, \n NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) {\n REAL xCart[3]; xxCart(¶ms,xx,i0,i1,i2, xCart);\n if(sqrt(xCart[0]*xCart[0] + xCart[1]*xCart[1] + xCart[2]*xCart[2]) < domain_size*0.3) {\n REAL uu_exact,vv_exact; exact_solution_single_point(xCart[0],xCart[1],xCart[2],¶ms, \n &uu_exact,&vv_exact);\n double num = (double)y_n_gfs[IDX4S(UUGF,i0,i1,i2)];\n double exact = (double)uu_exact;\n integral += (num - exact)*(num - exact);\n numpts += 1.0;\n }\n }\n // Compute and output the log of the L2 norm.\n REAL log_L2_Norm = log10(sqrt(integral/numpts));\n printf(\"%e %e\\n\",(double)params.time,log_L2_Norm);\n }\n \n // Step 3: Step forward one timestep (t -> t+dt) in time using \n // chosen RK-like MoL timestepping algorithm\n#include \"MoLtimestepping/RK_MoL.h\"\n\n } // End main loop to progress forward in time.\n\n // Step 4: Free all allocated memory\n#include \"rfm_files/rfm_struct__freemem.h\"\n#include \"boundary_conditions/bcstruct_freemem.h\"\n#include \"MoLtimestepping/RK_Free_Memory.h\"\n for(int i=0;i<3;i++) free(xx[i]);\n return 0;\n}",
"Writing ScalarWaveCurvilinear_Playground_Ccodes//ScalarWaveCurvilinear_Playground.c\n"
]
],
[
[
"<a id='compileexec'></a>\n\n# Step 3: Compile generated C codes & solve the scalar wave equation \\[Back to [top](#toc)\\]\n$$\\label{compileexec}$$\n\nTo aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb).",
"_____no_output_____"
]
],
[
[
"import cmdline_helper as cmd\n\ncmd.C_compile(os.path.join(Ccodesdir,\"ScalarWaveCurvilinear_Playground.c\"), \n os.path.join(outdir,\"ScalarWaveCurvilinear_Playground\"),compile_mode=\"optimized\")\n# !clang -Ofast -fopenmp -mavx2 -mfma ScalarWaveCurvilinear/ScalarWaveCurvilinear_Playground.c -o ScalarWaveCurvilinear_Playground -lm\n# !icc -align -qopenmp -xHost -O2 -qopt-report=5 -qopt-report-phase ipo -qopt-report-phase vec -vec-threshold1 -qopt-prefetch=4 ScalarWaveCurvilinear/ScalarWaveCurvilinear_Playground.c -o ScalarWaveCurvilinear_Playground\n# !gcc-7 -Ofast -fopenmp -march=native ScalarWaveCurvilinear/ScalarWaveCurvilinear_Playground.c -o ScalarWaveCurvilinear_Playground -lm\n\n# Change to output directory\nos.chdir(outdir)\n# Clean up existing output files\ncmd.delete_existing_files(\"out-*resolution.txt\")\n# Run executable\nif par.parval_from_str(\"reference_metric::CoordSystem\") == \"Cartesian\":\n cmd.Execute(\"ScalarWaveCurvilinear_Playground\", \"16 16 16\", \"out-lowresolution.txt\")\n cmd.Execute(\"ScalarWaveCurvilinear_Playground\", \"24 24 24\", \"out-medresolution.txt\")\nelse:\n cmd.Execute(\"ScalarWaveCurvilinear_Playground\", \"16 8 16\", \"out-lowresolution.txt\")\n # 4.28s with icc and FD order = 10.\n cmd.Execute(\"ScalarWaveCurvilinear_Playground\", \"24 12 24\", \"out-medresolution.txt\")\n # For benchmarking purposes, FD order = 4. desktop: 17.33s\n # laptop: 51.82s on icc. 45.02s on GCC 9, 45.03s on GCC 7, 51.67s on clang\n cmd.Execute(\"ScalarWaveCurvilinear_Playground\", \"48 24 48\", \"out-hghresolution.txt\")\n# Return to root directory\nos.chdir(os.path.join(\"../../\"))",
"Compiling executable...\nExecuting `gcc -Ofast -fopenmp -march=native -funroll-loops ScalarWaveCurvilinear_Playground_Ccodes/ScalarWaveCurvilinear_Playground.c -o ScalarWaveCurvilinear_Playground_Ccodes/output/ScalarWaveCurvilinear_Playground -lm`...\nFinished executing in 1.6170401573181152 seconds.\nFinished compilation.\nExecuting `taskset -c 0,1,2,3,4,5 ./ScalarWaveCurvilinear_Playground 16 8 16`...\nFinished executing in 0.2134850025177002 seconds.\nExecuting `taskset -c 0,1,2,3,4,5 ./ScalarWaveCurvilinear_Playground 24 12 24`...\nFinished executing in 0.6151793003082275 seconds.\nExecuting `taskset -c 0,1,2,3,4,5 ./ScalarWaveCurvilinear_Playground 48 24 48`...\nFinished executing in 18.449751377105713 seconds.\n"
]
],
[
[
"<a id='convergence'></a>\n\n# Step 4: Code validation: Plot the numerical error, and confirm that it converges to zero at expected rate with increasing numerical resolution (sampling) \\[Back to [top](#toc)\\]\n$$\\label{convergence}$$\nThe numerical solution $u_{\\rm num}(x0,x1,x2,t)$ should converge to the exact solution $u_{\\rm exact}(x0,x1,x2,t)$ at fourth order, which means that\n$$\nu_{\\rm num}(x0,x1,x2,t) = u_{\\rm exact}(x0,x1,x2,t) + \\mathcal{O}\\left((\\Delta x0)^4\\right)+ \\mathcal{O}\\left((\\Delta x1)^4\\right)+ \\mathcal{O}\\left((\\Delta x2)^4\\right)+ \\mathcal{O}\\left((\\Delta t)^4\\right).\n$$\n\nThus the relative error $E_{\\rm rel}$ should satisfy:\n$$\n\\left|\\frac{u_{\\rm num}(x0,x1,x2,t) - u_{\\rm exact}(x0,x1,x2,t)}{u_{\\rm exact}(x0,x1,x2,t)}\\right| + \\mathcal{O}\\left((\\Delta x0)^4\\right)+ \\mathcal{O}\\left((\\Delta x1)^4\\right)+ \\mathcal{O}\\left((\\Delta x2)^4\\right)+ \\mathcal{O}\\left((\\Delta t)^4\\right).\n$$\n\nWe confirm this convergence behavior by first solving the scalar wave equation at two resolutions: $16\\times 8\\times 16$ (or $16^3$ if `reference_metric::CoordSystem` is set to `Cartesian`), and $24\\times 12\\times 24$ (or $24^3$ if `reference_metric::CoordSystem` is set to `Cartesian`) and evaluating the maximum logarithmic relative error $\\log_{10} E_{\\rm rel,max}$ between numerical and exact solutions within a region $R < 0.1 {\\rm RMAX}$ at all iterations. \n\nSince we increase the resolution uniformly over all four coordinates $(x0,x1,x2,t)$, $E_{\\rm rel}$ should drop uniformly as $(\\Delta x0)^4$:\n$$\nE_{\\rm rel} \\propto (\\Delta x0)^4.\n$$\n\nSo at the two resolutions, we should find that\n$$\n\\frac{E_{\\rm rel}(16\\times 8\\times 16)}{E_{\\rm rel}(24\\times 12\\times 24)} = \\frac{E_{\\rm rel}(16^3)}{E_{\\rm rel}(24^3)} \\approx \\left(\\frac{(\\Delta x0)_{16}}{(\\Delta x0)_{24}}\\right)^{4} = \\left(\\frac{24}{16}\\right)^4 \\approx 5.\n$$\n\nSince we're measuring logarithmic relative error, this should be\n$$\n\\log_{10}\\left(\\frac{E_{\\rm rel}(16\\times 8\\times 16)}{E_{\\rm rel}(24\\times 12\\times 24)}\\right) = \\log_{10}\\left(\\frac{E_{\\rm rel}(16^3)}{E_{\\rm rel}(24^3)}\\right) \\approx \\log_{10}(5).\n$$",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport mpmath as mp\nimport csv\n\ndef file_reader(filename):\n with open(filename) as file:\n reader = csv.reader(file, delimiter=\" \")\n data = list(zip(*reader))\n # data is a tuple of strings. Tuples are immutable, and we need to perform math on\n # the data, so here we convert tuple to lists of floats:\n data0 = []\n data1 = []\n for i in range(len(data[0])):\n data0.append(float(data[0][i]))\n data1.append(float(data[1][i]))\n return data0,data1\n\nfirst_col16,second_col16 = file_reader(os.path.join(outdir,'out-lowresolution.txt'))\nfirst_col24,second_col24 = file_reader(os.path.join(outdir,'out-medresolution.txt'))\n\nsecond_col16_rescaled4o = []\nsecond_col16_rescaled5o = []\nfor i in range(len(second_col16)):\n # data16 = data24*(16/24)**4\n # -> log10(data24) = log10(data24) + 4*log10(16/24)\n second_col16_rescaled4o.append(second_col16[i] + 4*mp.log10(16./24.))\n second_col16_rescaled5o.append(second_col16[i] + 5*mp.log10(16./24.))\n\n# https://matplotlib.org/gallery/text_labels_and_annotations/legend.html#sphx-glr-gallery-text-labels-and-annotations-legend-py \nfig, ax = plt.subplots()\n\nplt.title(\"Demonstrating 4th-order Convergence: \"+par.parval_from_str(\"reference_metric::CoordSystem\")+\" Coordinates\")\nplt.xlabel(\"time\")\nplt.ylabel(\"log10(Max relative error)\")\n\nax.plot(first_col24, second_col24, 'k-', label='logErel(N0=24)')\nax.plot(first_col16, second_col16_rescaled4o, 'k--', label='logErel(N0=16) + log((16/24)^4)')\nax.set_ylim([-8.05,-1.7]) # Manually set the y-axis range case, since the log10\n # relative error at t=0 could be -inf or about -16, \n # resulting in very different-looking plots \n # despite the data being the same to roundoff.\nif par.parval_from_str(\"reference_metric::CoordSystem\") == \"Cartesian\":\n ax.set_ylim([-2.68,-1.62])\nif par.parval_from_str(\"reference_metric::CoordSystem\") == \"Cylindrical\":\n ax.plot(first_col16, second_col16_rescaled5o, 'k.', label='(Assuming 5th-order convergence)')\nlegend = ax.legend(loc='lower right', shadow=True, fontsize='large')\nlegend.get_frame().set_facecolor('C1')\nplt.show()",
"_____no_output_____"
]
],
[
[
"<a id='latex_pdf_output'></a>\n\n# Step 5: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-Start_to_Finish-ScalarWaveCurvilinear.pdf](Tutorial-Start_to_Finish-ScalarWaveCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)",
"_____no_output_____"
]
],
[
[
"!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex\n!pdflatex -interaction=batchmode Tutorial-Start_to_Finish-ScalarWaveCurvilinear.tex\n!rm -f Tut*.out Tut*.aux Tut*.log",
"This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e71558a54685a5df9416f6f9899f6986994882f1 | 200,102 | ipynb | Jupyter Notebook | Project/finding_donors/finding_donors.ipynb | kundan7kumar/Machine-Learning | 8b62b68324713007c967a6120a0f48498992ce2f | [
"MIT"
] | null | null | null | Project/finding_donors/finding_donors.ipynb | kundan7kumar/Machine-Learning | 8b62b68324713007c967a6120a0f48498992ce2f | [
"MIT"
] | null | null | null | Project/finding_donors/finding_donors.ipynb | kundan7kumar/Machine-Learning | 8b62b68324713007c967a6120a0f48498992ce2f | [
"MIT"
] | null | null | null | 123.519753 | 54,852 | 0.818657 | [
[
[
"# Machine Learning Engineer Nanodegree\n## Supervised Learning\n## Project: Finding Donors for *CharityML*",
"_____no_output_____"
],
[
"Welcome to the second project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. \n\n>**Note:** Please specify WHICH VERSION OF PYTHON you are using when submitting this notebook. Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.",
"_____no_output_____"
],
[
"## Getting Started\n\nIn this project, you will employ several supervised algorithms of your choice to accurately model individuals' income using data collected from the 1994 U.S. Census. You will then choose the best candidate algorithm from preliminary results and further optimize this algorithm to best model the data. Your goal with this implementation is to construct a model that accurately predicts whether an individual makes more than $50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features. \n\nThe dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by Ron Kohavi and Barry Becker, after being published in the article _\"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid\"_. You can find the article by Ron Kohavi [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.",
"_____no_output_____"
],
[
"----\n## Exploring the Data\nRun the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.",
"_____no_output_____"
]
],
[
[
"# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom IPython.display import display # Allows the use of display() for DataFrames\n\n# Import supplementary visualization code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the Census dataset\ndata = pd.read_csv(\"census.csv\")\n\n# Success - Display the first record\ndisplay(data.head(n=2))",
"_____no_output_____"
]
],
[
[
"### Implementation: Data Exploration\nA cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \\$50,000. In the code cell below, you will need to compute the following:\n- The total number of records, `'n_records'`\n- The number of individuals making more than \\$50,000 annually, `'n_greater_50k'`.\n- The number of individuals making at most \\$50,000 annually, `'n_at_most_50k'`.\n- The percentage of individuals making more than \\$50,000 annually, `'greater_percent'`.\n\n** HINT: ** You may need to look at the table above to understand how the `'income'` entries are formatted. ",
"_____no_output_____"
]
],
[
[
"print(\"Income types: \", data['income'].unique() )\n# TODO: Total number of records\nn_records = len(data.index)\n\n# TODO: Number of records where individual's income is more than $50,000\nn_greater_50k = data[data.income==\">50K\"].income.count()\n#print(n_greater_50k)\n\n# TODO: Number of records where individual's income is at most $50,000\nn_at_most_50k = data[data.income==\"<=50K\"].income.count()\n\n# TODO: Percentage of individuals whose income is more than $50,000\ngreater_percent =float (n_greater_50k)*100/n_records\n\n\n# Print the results\nprint \"Total number of records: {}\".format(n_records)\nprint \"Individuals making more than $50,000: {}\".format(n_greater_50k)\nprint \"Individuals making at most $50,000: {}\".format(n_at_most_50k)\nprint \"Percentage of individuals making more than $50,000: {:.2f}%\".format(greater_percent)",
"('Income types: ', array(['<=50K', '>50K'], dtype=object))\nTotal number of records: 45222\nIndividuals making more than $50,000: 11208\nIndividuals making at most $50,000: 34014\nPercentage of individuals making more than $50,000: 24.78%\n"
]
],
[
[
"** Featureset Exploration **\n\n* **age**: continuous. \n* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. \n* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. \n* **education-num**: continuous. \n* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. \n* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. \n* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. \n* **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other. \n* **sex**: Female, Male. \n* **capital-gain**: continuous. \n* **capital-loss**: continuous. \n* **hours-per-week**: continuous. \n* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.",
"_____no_output_____"
],
[
"----\n## Preparing the Data\nBefore data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.",
"_____no_output_____"
],
[
"### Transforming Skewed Continuous Features\nA dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`. \n\nRun the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.",
"_____no_output_____"
]
],
[
[
"# Split the data into features and target label\nincome_raw = data['income']\nfeatures_raw = data.drop('income', axis = 1)\n\n# Visualize skewed continuous features of original data\nvs.distribution(data)",
"_____no_output_____"
]
],
[
[
"For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href=\"https://en.wikipedia.org/wiki/Data_transformation_(statistics)\">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.\n\nRun the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed. ",
"_____no_output_____"
]
],
[
[
"# Log-transform the skewed features\nskewed = ['capital-gain', 'capital-loss']\nfeatures_log_transformed = pd.DataFrame(data = features_raw)\nfeatures_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))\n\n# Visualize the new log distributions\nvs.distribution(features_log_transformed, transformed = True)",
"_____no_output_____"
]
],
[
[
"### Normalizing Numerical Features\nIn addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.\n\nRun the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.",
"_____no_output_____"
]
],
[
[
"# Import sklearn.preprocessing.StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Initialize a scaler, then apply it to the features\nscaler = MinMaxScaler() # default=(0, 1)\nnumerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']\n\nfeatures_log_minmax_transform = pd.DataFrame(data = features_log_transformed)\nfeatures_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])\n\n# Show an example of a record with scaling applied\ndisplay(features_log_minmax_transform.head(n = 5))",
"_____no_output_____"
]
],
[
[
"### Implementation: Data Preprocessing\n\nFrom the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _\"dummy\"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.\n\n| | someFeature | | someFeature_A | someFeature_B | someFeature_C |\n| :-: | :-: | | :-: | :-: | :-: |\n| 0 | B | | 0 | 1 | 0 |\n| 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |\n| 2 | A | | 1 | 0 | 0 |\n\nAdditionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label (\"<=50K\" and \">50K\"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:\n - Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_log_minmax_transform'` data.\n - Convert the target label `'income_raw'` to numerical entries.\n - Set records with \"<=50K\" to `0` and records with \">50K\" to `1`.",
"_____no_output_____"
]
],
[
[
"# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()\nfeatures_final = pd.get_dummies(features_raw)\n\n# TODO: Encode the 'income_raw' data to numerical values\nincome = income_raw.apply(lambda x: 0 if x == \"<=50K\" else 1)\n\n# Print the number of features after one-hot encoding\nencoded = list(features_final.columns)\nprint \"{} total features after one-hot encoding.\".format(len(encoded))\n\n# Uncomment the following line to see the encoded feature names\nprint encoded",
"103 total features after one-hot encoding.\n['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week', 'workclass_ Federal-gov', 'workclass_ Local-gov', 'workclass_ Private', 'workclass_ Self-emp-inc', 'workclass_ Self-emp-not-inc', 'workclass_ State-gov', 'workclass_ Without-pay', 'education_level_ 10th', 'education_level_ 11th', 'education_level_ 12th', 'education_level_ 1st-4th', 'education_level_ 5th-6th', 'education_level_ 7th-8th', 'education_level_ 9th', 'education_level_ Assoc-acdm', 'education_level_ Assoc-voc', 'education_level_ Bachelors', 'education_level_ Doctorate', 'education_level_ HS-grad', 'education_level_ Masters', 'education_level_ Preschool', 'education_level_ Prof-school', 'education_level_ Some-college', 'marital-status_ Divorced', 'marital-status_ Married-AF-spouse', 'marital-status_ Married-civ-spouse', 'marital-status_ Married-spouse-absent', 'marital-status_ Never-married', 'marital-status_ Separated', 'marital-status_ Widowed', 'occupation_ Adm-clerical', 'occupation_ Armed-Forces', 'occupation_ Craft-repair', 'occupation_ Exec-managerial', 'occupation_ Farming-fishing', 'occupation_ Handlers-cleaners', 'occupation_ Machine-op-inspct', 'occupation_ Other-service', 'occupation_ Priv-house-serv', 'occupation_ Prof-specialty', 'occupation_ Protective-serv', 'occupation_ Sales', 'occupation_ Tech-support', 'occupation_ Transport-moving', 'relationship_ Husband', 'relationship_ Not-in-family', 'relationship_ Other-relative', 'relationship_ Own-child', 'relationship_ Unmarried', 'relationship_ Wife', 'race_ Amer-Indian-Eskimo', 'race_ Asian-Pac-Islander', 'race_ Black', 'race_ Other', 'race_ White', 'sex_ Female', 'sex_ Male', 'native-country_ Cambodia', 'native-country_ Canada', 'native-country_ China', 'native-country_ Columbia', 'native-country_ Cuba', 'native-country_ Dominican-Republic', 'native-country_ Ecuador', 'native-country_ El-Salvador', 'native-country_ England', 'native-country_ France', 'native-country_ Germany', 'native-country_ Greece', 'native-country_ Guatemala', 'native-country_ Haiti', 'native-country_ Holand-Netherlands', 'native-country_ Honduras', 'native-country_ Hong', 'native-country_ Hungary', 'native-country_ India', 'native-country_ Iran', 'native-country_ Ireland', 'native-country_ Italy', 'native-country_ Jamaica', 'native-country_ Japan', 'native-country_ Laos', 'native-country_ Mexico', 'native-country_ Nicaragua', 'native-country_ Outlying-US(Guam-USVI-etc)', 'native-country_ Peru', 'native-country_ Philippines', 'native-country_ Poland', 'native-country_ Portugal', 'native-country_ Puerto-Rico', 'native-country_ Scotland', 'native-country_ South', 'native-country_ Taiwan', 'native-country_ Thailand', 'native-country_ Trinadad&Tobago', 'native-country_ United-States', 'native-country_ Vietnam', 'native-country_ Yugoslavia']\n"
]
],
[
[
"### Shuffle and Split Data\nNow all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. As always, we will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.\n\nRun the code cell below to perform this split.",
"_____no_output_____"
]
],
[
[
"# Import train_test_split\nfrom sklearn.cross_validation import train_test_split\n\n# Split the 'features' and 'income' data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(features_final, \n income, \n test_size = 0.2, \n random_state = 0)\n\n# Show the results of the split\nprint \"Training set has {} samples.\".format(X_train.shape[0])\nprint \"Testing set has {} samples.\".format(X_test.shape[0])",
"Training set has 36177 samples.\nTesting set has 9045 samples.\n"
]
],
[
[
"----\n## Evaluating Model Performance\nIn this section, we will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of your choice, and the fourth algorithm is known as a *naive predictor*.",
"_____no_output_____"
],
[
"### Metrics and the Naive Predictor\n*CharityML*, equipped with their research, knows individuals that make more than \\$50,000 are most likely to donate to their charity. Because of this, *CharityML* is particularly interested in predicting who makes more than \\$50,000 accurately. It would seem that using **accuracy** as a metric for evaluating a particular model's performace would be appropriate. Additionally, identifying someone that *does not* make more than \\$50,000 as someone who does would be detrimental to *CharityML*, since they are looking to find individuals willing to donate. Therefore, a model's ability to precisely predict those that make more than \\$50,000 is *more important* than the model's ability to **recall** those individuals. We can use **F-beta score** as a metric that considers both precision and recall:\n\n$$ F_{\\beta} = (1 + \\beta^2) \\cdot \\frac{precision \\cdot recall}{\\left( \\beta^2 \\cdot precision \\right) + recall} $$\n\nIn particular, when $\\beta = 0.5$, more emphasis is placed on precision. This is called the **F$_{0.5}$ score** (or F-score for simplicity).\n\nLooking at the distribution of classes (those who make at most \\$50,000, and those who make more), it's clear most individuals do not make more than \\$50,000. This can greatly affect **accuracy**, since we could simply say *\"this person does not make more than \\$50,000\"* and generally be right, without ever looking at the data! Making such a statement would be called **naive**, since we have not considered any information to substantiate the claim. It is always important to consider the *naive prediction* for your data, to help establish a benchmark for whether a model is performing well. That been said, using that prediction would be pointless: If we predicted all people made less than \\$50,000, *CharityML* would identify no one as donors. \n\n\n#### Note: Recap of accuracy, precision, recall\n\n** Accuracy ** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).\n\n** Precision ** tells us what proportion of messages we classified as spam, actually were spam.\nIt is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classificatio), in other words it is the ratio of\n\n`[True Positives/(True Positives + False Positives)]`\n\n** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.\nIt is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of\n\n`[True Positives/(True Positives + False Negatives)]`\n\nFor classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average(harmonic mean) of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score(we take the harmonic mean as we are dealing with ratios).",
"_____no_output_____"
],
[
"### Question 1 - Naive Predictor Performace\n* If we chose a model that always predicted an individual made more than $50,000, what would that model's accuracy and F-score be on this dataset? You must use the code cell below and assign your results to `'accuracy'` and `'fscore'` to be used later.\n\n** Please note ** that the the purpose of generating a naive predictor is simply to show what a base model without any intelligence would look like. In the real world, ideally your base model would be either the results of a previous model or could be based on a research paper upon which you are looking to improve. When there is no benchmark model set, getting a result better than random choice is a place you could start from.\n\n** HINT: ** \n\n* When we have a model that always predicts '1' (i.e. the individual makes more than 50k) then our model will have no True Negatives(TN) or False Negatives(FN) as we are not making any negative('0' value) predictions. Therefore our Accuracy in this case becomes the same as our Precision(True Positives/(True Positives + False Positives)) as every prediction that we have made with value '1' that should have '0' becomes a False Positive; therefore our denominator in this case is the total number of records we have in total. \n* Our Recall score(True Positives/(True Positives + False Negatives)) in this setting becomes 1 as we have no False Negatives.",
"_____no_output_____"
]
],
[
[
"#need to check it\n\nTP =np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data \n#encoded to numerical values done in the data preprocessing step.\nFP = float(income.count()) - TP # Specific to the naive case\n\nTN = 0 # No predicted negatives in the naive case\nFN = 0 # No predicted negatives in the naive case\n#print(TP)\n#print(FP)\n\n# TODO: Calculate accuracy\naccuracy = float(TP)/(TP+FP)\n#print(accuracy)\n\n# TODO: Calculate F-score using the formula above for beta = 0.5\nbeta = 0.5\nprecision =TP/(TP+FP)\n#print(precision)\nrecall = TP/(TP+FN)\n#print(recalll)\n\n# TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.\nfscore = (1 + beta**2) * (accuracy * recall) / ((beta**2 * precision) + recall)\n#print(fscore)\n\n# Print the results \nprint \"Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]\".format(accuracy, fscore)\n\n",
"Naive Predictor: [Accuracy score: 0.2478, F-score: 0.2917]\n"
],
[
"#few Important points \n# accuracy = (n_greater_50k/n_records) \n#TP = n_greater_50k\n#TN = 0\n#FP = n_at_most_50k\n#FN=0",
"_____no_output_____"
]
],
[
[
"### Supervised Learning Models\n**The following are some of the supervised learning models that are currently available in** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) **that you may choose from:**\n- Gaussian Naive Bayes (GaussianNB)\n- Decision Trees\n- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)\n- K-Nearest Neighbors (KNeighbors)\n- Stochastic Gradient Descent Classifier (SGDC)\n- Support Vector Machines (SVM)\n- Logistic Regression",
"_____no_output_____"
],
[
"### Question 2 - Model Application\nList three of the supervised learning models above that are appropriate for this problem that you will test on the census data. For each model chosen\n\n- Describe one real-world application in industry where the model can be applied. \n- What are the strengths of the model; when does it perform well?\n- What are the weaknesses of the model; when does it perform poorly?\n- What makes this model a good candidate for the problem, given what you know about the data?\n\n** HINT: **\n\nStructure your answer in the same format as above^, with 4 parts for each of the three models you pick. Please include references with your answer.\n\n",
"_____no_output_____"
],
[
"**Answer: **\nThe three supervised learning models which is appropriate for problem are :\n\n a. Logistic Regression\n b. Support Vector Machines\n c. Decision Tree\n \nLogistic Regression : It is used for classification problems and it is a binary classifier which \n gives takes the output 0 or 1.\n \n Real World application:\n a. Financial companies: to prediction to loan andd credit defaulters or not. \n b. Medical fields: e.g .Finding the person is obese or not depending upon body mass index.\n \n Strengths:\n a. Good for smaller datasets or limited features in the larger datasets.\n b. simple and fast algorithms for providing the probabilistic interpretation.\n \n Weakness:\n a. cannot find the complex faetures relations in the datasets.\n b. Need fine tuning to avoid overfitting or underfitting.\n \n What makes a model good for problem:\n a. It is used for binary classification problem.Our problem is also an example of binary classification \n problem to predict the income is greater or lesser than $50k.\n \nSupport Vector Machine : used for classifictaion as well as for regression problems.\n \n Real World application:There are various real time applications for e.g:\n a.Stock market predictions : Able to predict the price volatility and momentum of individual stocks.\n \n Strengths :\n a. Fast training Speed.\n b. Perform well when there is a definite distinction between two classes.\n c. Using kernel trick, it fit the data very well and give accurate results.\n d. Powerful algorithm to capture complex relationships automatically in datasets\n \n Weakness :\n a. Support Vector Machines perform badly when the classification problem is not binary.\n b. Slow while testing prediction.\n c. Difficult to interpret the boundary plane if there are complex data transformations.\n\n What makes a model good for problem :\n a. In our problem, We have to find the person above or below $50k earnings. It is binary classification.\n So it is good choice for model.\n\nDecision Tree :\n \n Real World application:\n a. Medical problems Diagnosis : Able to identify whether person has serious illness or not.\n \n Strengths :\n a. It is simple to understand and interpret even for complex datasets.\n b. It is very fast training speed and easy to visualize.\n c. Automatically selects the feature from the datasets by screening the variable.\n d. Performance will be higher if they have smaller decision trees rather than bigger trees.\n\n Weakness :\n a. It can cause overfit easily due to grow of the height of the tree.\n b. Need high memory and take computation time if many values are uncertain or many outcomes are linked.\n \n What makes a model good for problem :\n a. Decision tree will be suited for one-hot encoding of features in our dataset as able to handle lots of \n data easily and also easy to visualize.\n \n \nReferences:\n\n [1] https://machinelearningmastery.com/logistic-regression-for-machine-learning/\n [2] https://www.quora.com/What-are-the-advantages-of-support-vector-machines-SVM-compared-with-linear-regressionor-logistic-regression\n [3] http://scikit-learn.org/stable/modules/svm.html\n [4] https://data-flair.training/blogs/r-decision-trees/\n [5] https://en.wikipedia.org/wiki/Decision_tree\n [6] http://www.brighthubpm.com/project-planning/106005-disadvantages-to-using-decision-trees/\n \n\n",
"_____no_output_____"
],
[
"### Implementation - Creating a Training and Predicting Pipeline\nTo properly evaluate the performance of each model you've chosen, it's important that you create a training and predicting pipeline that allows you to quickly and effectively train models using various sizes of training data and perform predictions on the testing data. Your implementation here will be used in the following section.\nIn the code block below, you will need to implement the following:\n - Import `fbeta_score` and `accuracy_score` from [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics).\n - Fit the learner to the sampled training data and record the training time.\n - Perform predictions on the test data `X_test`, and also on the first 300 training points `X_train[:300]`.\n - Record the total prediction time.\n - Calculate the accuracy score for both the training subset and testing set.\n - Calculate the F-score for both the training subset and testing set.\n - Make sure that you set the `beta` parameter!",
"_____no_output_____"
]
],
[
[
"# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score\nfrom sklearn.metrics import accuracy_score, fbeta_score\ndef train_predict(learner, sample_size, X_train, y_train, X_test, y_test): \n '''\n inputs:\n - learner: the learning algorithm to be trained and predicted on\n - sample_size: the size of samples (number) to be drawn from training set\n - X_train: features training set\n - y_train: income training set\n - X_test: features testing set\n - y_test: income testing set\n '''\n \n results = {}\n beta=0.5\n X_train = X_train[:sample_size]\n y_train = y_train[:sample_size]\n \n # TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])\n start = time() # Get start time\n learner =learner.fit(X_train,y_train)\n end = time() # Get end time\n \n # TODO: Calculate the training time\n results['train_time'] = end-start\n \n # TODO: Get the predictions on the test set(X_test),\n # then get predictions on the first 300 training samples(X_train) using .predict()\n start = time() # Get start time\n \n predictions_test = learner.predict(X_test)\n predictions_train = learner.predict(X_train[:300])\n end = time() # Get end time\n \n # TODO: Calculate the total prediction time\n results['pred_time'] = end - start\n \n # TODO: Compute accuracy on the first 300 training samples which is y_train[:300]\n results['acc_train'] = accuracy_score(y_train[:300], predictions_train[:300])\n \n # TODO: Compute accuracy on test set using accuracy_score()\n results['acc_test'] = accuracy_score(y_test, predictions_test)\n \n # TODO: Compute F-score on the the first 300 training samples using fbeta_score()\n results['f_train'] = fbeta_score(y_train[:300], predictions_train[:300], beta)\n \n # TODO: Compute F-score on the test set which is y_test\n results['f_test'] = fbeta_score(y_test, predictions_test, beta)\n \n # Success\n print \"{} trained on {} samples.\".format(learner.__class__.__name__, sample_size)\n \n # Return the results\n return results",
"_____no_output_____"
]
],
[
[
"### Implementation: Initial Model Evaluation\nIn the code cell, you will need to implement the following:\n- Import the three supervised learning models you've discussed in the previous section.\n- Initialize the three models and store them in `'clf_A'`, `'clf_B'`, and `'clf_C'`.\n - Use a `'random_state'` for each model you use, if provided.\n - **Note:** Use the default settings for each model — you will tune one specific model in a later section.\n- Calculate the number of records equal to 1%, 10%, and 100% of the training data.\n - Store those values in `'samples_1'`, `'samples_10'`, and `'samples_100'` respectively.\n\n**Note:** Depending on which algorithms you chose, the following implementation may take some time to run!",
"_____no_output_____"
]
],
[
[
"# TODO: Import the three supervised learning models from sklearn\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\n\n# TODO: Initialize the three models\nclf_A = LogisticRegression(random_state=0)\nclf_B = DecisionTreeClassifier(random_state=0)\nclf_C = SVC(random_state=0)\n\n# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data\n# HINT: samples_100 is the entire training set i.e. len(y_train)\n# HINT: samples_10 is 10% of samples_100\n# HINT: samples_1 is 1% of samples_100\nsamples_100 = int(len(X_train))\nsamples_10 = int(.1*len(X_train))\nsamples_1 = int(.01*len(X_train))\n\n# Collect results on the learners\nresults = {}\nfor clf in [clf_A, clf_B, clf_C]:\n clf_name = clf.__class__.__name__\n results[clf_name] = {}\n for i, samples in enumerate([samples_1, samples_10, samples_100]):\n results[clf_name][i] = \\\n train_predict(clf, samples, X_train, y_train, X_test, y_test)\n\n# Run metrics visualization for the three supervised learning models chosen\nvs.evaluate(results, accuracy, fscore)",
"LogisticRegression trained on 361 samples.\nLogisticRegression trained on 3617 samples.\nLogisticRegression trained on 36177 samples.\nDecisionTreeClassifier trained on 361 samples.\nDecisionTreeClassifier trained on 3617 samples.\nDecisionTreeClassifier trained on 36177 samples.\nSVC trained on 361 samples.\nSVC trained on 3617 samples.\nSVC trained on 36177 samples.\n"
]
],
[
[
"----\n## Improving Results\nIn this final section, you will choose from the three supervised learning models the *best* model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (`X_train` and `y_train`) by tuning at least one parameter to improve upon the untuned model's F-score. ",
"_____no_output_____"
],
[
"### Question 3 - Choosing the Best Model\n\n* Based on the evaluation you performed earlier, in one to two paragraphs, explain to *CharityML* which of the three models you believe to be most appropriate for the task of identifying individuals that make more than \\$50,000. \n\n** HINT: ** \nLook at the graph at the bottom left from the cell above(the visualization created by `vs.evaluate(results, accuracy, fscore)`) and check the F score for the testing set when 100% of the training set is used. Which model has the highest score? Your answer should include discussion of the:\n* metrics - F score on the testing when 100% of the training data is used, \n* prediction/training time\n* the algorithm's suitability for the data.",
"_____no_output_____"
]
],
[
[
"for i in results.items():\n print(i[0])\n display(pd.DataFrame(i[1]).rename(columns={0:'1%', 1:'10%', 2:'100%'}))",
"LogisticRegression\n"
]
],
[
[
"**Answer: **\nThe most approximate model from the above analysis is Logistic Regression.The training/testing speed as well as accuracy/F1 scores of test data with different volumes are more balanced in case of Logistic regression.Also It gives slightly better Accuracy and F1 score than others.It gives low variance but higher accuracy/F1 score.Moreover,it is simple and very fast algorithm.\n\nFor Support vector Machine(SVC) ,it has slightly lower accuracy and F1 Score and also tuning the hyperparameter is difficult because of high training time.\n\nFor Decision Tree ,it has very high variance than SVC and logistic regression can cause overfitting.Moreover, it has lower accuracy and F1 score than logistic regression.",
"_____no_output_____"
],
[
"### Question 4 - Describing the Model in Layman's Terms\n\n* In one to two paragraphs, explain to *CharityML*, in layman's terms, how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical jargon, such as describing equations.\n\n** HINT: **\n\nWhen explaining your model, if using external resources please include all citations.",
"_____no_output_____"
],
[
"**Answer: ** \n\nThe final model which i think is Logistic Regression. From the given problem we have predict the person has income greater than 50k or less than 50k based on various features. It is a classification problem and as we know that logistic regression is binary classifier, so it is best for such problems. \n\nIt is trained on the datasets, having various features club together with the class label of each person for a group of people.It learns from the various features and then pass through probability function which just maps the features of a person between 0 and 1 belonging to one class. \n\nFor making prediction ,we just put the features of a person with unknown class to the model. depending upon the probabilities whether it is greater or less than 0.5, it assign that person has income greater or lesser than 50k.\n\nReferences:\n1. http://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html\n2. https://machinelearningmastery.com/logistic-regression-for-machine-learning/",
"_____no_output_____"
],
[
"### Implementation: Model Tuning\nFine tune the chosen model. Use grid search (`GridSearchCV`) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following:\n- Import [`sklearn.grid_search.GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) and [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).\n- Initialize the classifier you've chosen and store it in `clf`.\n - Set a `random_state` if one is available to the same state you set before.\n- Create a dictionary of parameters you wish to tune for the chosen model.\n - Example: `parameters = {'parameter' : [list of values]}`.\n - **Note:** Avoid tuning the `max_features` parameter of your learner if that parameter is available!\n- Use `make_scorer` to create an `fbeta_score` scoring object (with $\\beta = 0.5$).\n- Perform grid search on the classifier `clf` using the `'scorer'`, and store it in `grid_obj`.\n- Fit the grid search object to the training data (`X_train`, `y_train`), and store it in `grid_fit`.\n\n**Note:** Depending on the algorithm chosen and the parameter list, the following implementation may take some time to run!",
"_____no_output_____"
]
],
[
[
"# TODO: Import 'GridSearchCV', 'make_scorer', and any other necessary libraries\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import GridSearchCV\n\n# TODO: Initialize the classifier\nclf = LogisticRegression()\n\n# TODO: Create the parameters list you wish to tune, using a dictionary if needed.\n# HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]}\nparameters = {'C': [1.0, 10.0, 100.0, 1000.0],'solver': ['newton-cg', 'lbfgs'],'multi_class': ['ovr', 'multinomial']}\n\n# TODO: Make an fbeta_score scoring object using make_scorer()\nscorer = make_scorer(fbeta_score, beta=0.5)\n\n# TODO: Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV()\ngrid_obj = GridSearchCV(clf, parameters, scoring=scorer)\n\n# TODO: Fit the grid search object to the training data and find the optimal parameters using fit()\ngrid_fit = grid_obj.fit(X_train, y_train)\n\n# Get the estimator\nbest_clf = grid_fit.best_estimator_\n\n# Make predictions using the unoptimized and model\npredictions = (clf.fit(X_train, y_train)).predict(X_test)\nbest_predictions = best_clf.predict(X_test)\n\n# Report the before-and-afterscores\nprint \"Unoptimized model\\n------\"\nprint \"Accuracy score on testing data: {:.4f}\".format(accuracy_score(y_test, predictions))\nprint \"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, predictions, beta = 0.5))\nprint \"\\nOptimized Model\\n------\"\nprint \"Final accuracy score on the testing data: {:.4f}\".format(accuracy_score(y_test, best_predictions))\nprint \"Final F-score on the testing data: {:.4f}\".format(fbeta_score(y_test, best_predictions, beta = 0.5))\nprint \"--Best Estimator\\n--\"\nprint best_clf",
"Unoptimized model\n------\nAccuracy score on testing data: 0.8419\nF-score on testing data: 0.6832\n\nOptimized Model\n------\nFinal accuracy score on the testing data: 0.8418\nFinal F-score on the testing data: 0.6829\n--Best Estimator\n--\nLogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=100, multi_class='multinomial',\n n_jobs=1, penalty='l2', random_state=None, solver='newton-cg',\n tol=0.0001, verbose=0, warm_start=False)\n"
]
],
[
[
"### Question 5 - Final Model Evaluation\n\n* What is your optimized model's accuracy and F-score on the testing data? \n* Are these scores better or worse than the unoptimized model? \n* How do the results from your optimized model compare to the naive predictor benchmarks you found earlier in **Question 1**?_ \n\n**Note:** Fill in the table below with your results, and then provide discussion in the **Answer** box.",
"_____no_output_____"
],
[
"#### Results:\n\n| Metric | Unoptimized Model | Optimized Model |\n| :------------: | :---------------: | :-------------: | \n| Accuracy Score | 0.8419 | 0.8418 |\n| F-score | 0.6832 | 0.6829 |\n",
"_____no_output_____"
],
[
"**Answer: **\n\nBoth optimized and unoptimized models perform better than the naive predictor benchmarks. But both optimized and unoptimized models perform similarly in terms of accuracy score and F-Score.",
"_____no_output_____"
],
[
"----\n## Feature Importance\n\nAn important task when performing supervised learning on a dataset like the census data we study here is determining which features provide the most predictive power. By focusing on the relationship between only a few crucial features and the target label we simplify our understanding of the phenomenon, which is most always a useful thing to do. In the case of this project, that means we wish to identify a small number of features that most strongly predict whether an individual makes at most or more than \\$50,000.\n\nChoose a scikit-learn classifier (e.g., adaboost, random forests) that has a `feature_importance_` attribute, which is a function that ranks the importance of features according to the chosen classifier. In the next python cell fit this classifier to training set and use this attribute to determine the top 5 most important features for the census dataset.",
"_____no_output_____"
],
[
"### Question 6 - Feature Relevance Observation\nWhen **Exploring the Data**, it was shown there are thirteen available features for each individual on record in the census data. Of these thirteen records, which five features do you believe to be most important for prediction, and in what order would you rank them and why?",
"_____no_output_____"
],
[
"**Answer:**\n\nThe most important features i think are :\n\na. education_num : Better education can lead to a better jobs and it has direct impact to their income.E.g. Chances are higher to get better paying jobs if a candidate holds master's or PHD degree.\n\nb. Age : It is very important factor because younger people has less experience than older people so they get less salary than older people.\n\nc. Capital gain : It is investment or profit from the sale of property, a person can invest more if they have good earning for investment.\n\nd. hours_per_week :pay is directly proportional to number of hour somebody worked. If a person work more hour will earn more.\n\ne. Occupation: It represent what type of jobs they are doing. E.g a managerial position guy will earn more than clerical guys. Chances are less for clerical guys to earn more than $50,000.\n",
"_____no_output_____"
],
[
"### Implementation - Extracting Feature Importance\nChoose a `scikit-learn` supervised learning algorithm that has a `feature_importance_` attribute availble for it. This attribute is a function that ranks the importance of each feature when making predictions based on the chosen algorithm.\n\nIn the code cell below, you will need to implement the following:\n - Import a supervised learning model from sklearn if it is different from the three used earlier.\n - Train the supervised model on the entire training set.\n - Extract the feature importances using `'.feature_importances_'`.",
"_____no_output_____"
]
],
[
[
"# TODO: Import a supervised learning model that has 'feature_importances_'\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n# TODO: Train the supervised model on the training set using .fit(X_train, y_train)\nmodel = GradientBoostingClassifier(random_state=0)\nmodel.fit(X_train, y_train)\n\n# TODO: Extract the feature importances using .feature_importances_ \nimportances = model.feature_importances_\n\n# Plot\nvs.feature_plot(importances, X_train, y_train)",
"_____no_output_____"
]
],
[
[
"### Question 7 - Extracting Feature Importance\n\nObserve the visualization created above which displays the five most relevant features for predicting if an individual makes at most or above \\$50,000. \n* How do these five features compare to the five features you discussed in **Question 6**?\n* If you were close to the same answer, how does this visualization confirm your thoughts? \n* If you were not close, why do you think these features are more relevant?",
"_____no_output_____"
],
[
"**Answer:**\n\nOut of five features, I was not expecting two features capital loss and marital status as revelant features.It was my intuition that Capital loss means loss of earnings but now understood that it implies they have more wealth for investment but causes loss on investments.Also, i was thinking that marital status was not a revelant features but it shows that they are more financial stable and healthy income.\n\nAlso, the features like Hours per week, Occupation not in the top five features make sense that lower paid jobs will not earn more if they work more hours also than high paid jobs.\n\nHere, only top five features has not too much importance for defining the good model. So, it is important to use multiple features for better predictions while making a Machine Learning model.\n",
"_____no_output_____"
],
[
"### Feature Selection\nHow does a model perform if we only use a subset of all the available features in the data? With less features required to train, the expectation is that training and prediction time is much lower — at the cost of performance metrics. From the visualization above, we see that the top five most important features contribute more than half of the importance of **all** features present in the data. This hints that we can attempt to *reduce the feature space* and simplify the information required for the model to learn. The code cell below will use the same optimized model you found earlier, and train it on the same training set *with only the top five important features*. ",
"_____no_output_____"
]
],
[
[
"# Import functionality for cloning a model\nfrom sklearn.base import clone\n\n# Reduce the feature space\nX_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]\nX_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]\n\n# Train on the \"best\" model found from grid search earlier\nclf = (clone(best_clf)).fit(X_train_reduced, y_train)\n\n# Make new predictions\nreduced_predictions = clf.predict(X_test_reduced)\n\n# Report scores from the final model using both versions of data\nprint \"Final Model trained on full data\\n------\"\nprint \"Accuracy on testing data: {:.4f}\".format(accuracy_score(y_test, best_predictions))\nprint \"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, best_predictions, beta = 0.5))\nprint \"\\nFinal Model trained on reduced data\\n------\"\nprint \"Accuracy on testing data: {:.4f}\".format(accuracy_score(y_test, reduced_predictions))\nprint \"F-score on testing data: {:.4f}\".format(fbeta_score(y_test, reduced_predictions, beta = 0.5))",
"Final Model trained on full data\n------\nAccuracy on testing data: 0.8418\nF-score on testing data: 0.6829\n\nFinal Model trained on reduced data\n------\nAccuracy on testing data: 0.8284\nF-score on testing data: 0.6520\n"
]
],
[
[
"### Question 8 - Effects of Feature Selection\n\n* How does the final model's F-score and accuracy score on the reduced data using only five features compare to those same scores when all features are used?\n* If training time was a factor, would you consider using the reduced data as your training set?",
"_____no_output_____"
],
[
"**Answer:**\n\n| Metric |Model on Full data |Model on reduced data |\n| :------------: | :---------------: | :-------------: | \n| Accuracy Score | 0.8418 | 0.8284 |\n| F-score | 0.6829 | 0.6520 |\n\nFrom the above we have seen that if number of features will reduce to five features, the accuracy score and F-Score also reduced.If number of features has reduced then its training time has also reduced so classifier will unable to generalize the model properly.\n\nThere is a tradeoff here, If training time is factor then reducing the features of the data is not going to work at all.\n\nSecondly, If there is large datasets and computational cost will be considered as a factor then it is better to reduce the features of the datasets as we saw that there is slight change in the accuracy and F-score. Also, a faster algorithm can be a better choice.",
"_____no_output_____"
],
[
"> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e715652425700791fc8d8f471a3a765df37ec3c6 | 5,379 | ipynb | Jupyter Notebook | examples/assemblies/6-nested-assemblies.ipynb | roipoussiere/jupyter-cadquery | 7ed5ff2ba484ea6cb96f5ea999a88484dfb6b1c1 | [
"Apache-2.0"
] | null | null | null | examples/assemblies/6-nested-assemblies.ipynb | roipoussiere/jupyter-cadquery | 7ed5ff2ba484ea6cb96f5ea999a88484dfb6b1c1 | [
"Apache-2.0"
] | null | null | null | examples/assemblies/6-nested-assemblies.ipynb | roipoussiere/jupyter-cadquery | 7ed5ff2ba484ea6cb96f5ea999a88484dfb6b1c1 | [
"Apache-2.0"
] | null | null | null | 28.764706 | 128 | 0.476111 | [
[
[
"import cadquery as cq\nfrom jupyter_cadquery import (\n open_viewer, show, web_color,\n set_defaults, get_defaults, \n PartGroup, Part, Faces, Edges\n)\nfrom cadquery_massembly import Mate, MAssembly, relocate\n\ncv = open_viewer(\"Test\")",
"_____no_output_____"
],
[
"set_defaults(axes=True, axes0=True, mate_scale=2.5)",
"_____no_output_____"
],
[
"box0 = cq.Workplane(\"XY\").box(10,20,10)\nbox1 = cq.Workplane(\"XZ\").box(10,20,10)\nbox2 = cq.Workplane(\"YX\").box(10,20,10)\nbox3 = cq.Workplane(\"YZ\").box(10,20,10)\n\nfor box, name, dirs in (\n (box0, \"box0\", (\"Y\", \"X\")),\n (box1, \"box1\", (\"Z\", \"X\")),\n (box2, \"box2\", (\"X\", \"Y\")),\n (box3, \"box3\", (\"Z\", \"Y\")),\n):\n for i, direction in enumerate(dirs):\n box.faces(f\">{direction}\").tag(f\"{name}_m{i}\")\n\nshow(box0, box1, box2, box3)",
"_____no_output_____"
],
[
"cyl1 = cq.Workplane(\"XY\").circle(2).extrude(10)\ncyl2 = cq.Workplane(\"XZ\").circle(2).extrude(10)\ncyl3 = cq.Workplane(\"YZ\").circle(2).extrude(10)\n\nfor cyl, name, ax in (\n (cyl1, \"cyl1\", \"Z\"), \n (cyl2, \"cyl2\", \"Y\"), \n (cyl3, \"cyl3\", \"X\"),\n):\n cyl.faces(f\">{ax}\").tag(f\"{name}_m0\")\n cyl.faces(f\"<{ax}\").tag(f\"{name}_m1\")\n \nshow(cyl1, cyl2, cyl3)",
"_____no_output_____"
],
[
"def create():\n L = lambda *args: cq.Location(cq.Vector(*args))\n C = lambda *args: cq.Color(*args)\n\n a = (MAssembly(cyl3, name=\"cyl3\", color=C(1,0,0), loc=L(-20, -10, 20))\n .add(box3, name=\"box3\", color=C(1,0,0), loc=L(20,10,0))\n )\n b = (MAssembly(cyl2, name=\"cyl2\", color=C(0,0.5, 0.25), loc=L(0, -20, 20))\n .add(box2, name=\"box2\", color=C(0,0.5,0.25), loc=L(0, 20, 20))\n .add(a, name=\"a\")\n )\n c = (MAssembly(cyl1, name=\"cyl1\", color=C(0,0,1), loc=L(10,0,-10))\n .add(box1, name=\"box1\", color=C(0,0,1), loc=L(10, 0,10))\n .add(b, name=\"b\")\n )\n d = (MAssembly(box0, name=\"box0\", color=C(0.5,0.5,0.5), loc=L(30,30,30))\n .add(c, name=\"c\")\n )\n return d\n\nassy = create()\nshow(assy)",
"_____no_output_____"
],
[
"from collections import OrderedDict as odict\n\nassy = create()\nfor obj, name in (\n (\"box0\", \"box0\"), (\"c/box1\", \"box1\"), (\"c/b/box2\", \"box2\"), (\"c/b/a/box3\", \"box3\"), \n (\"c\", \"cyl1\"), (\"c/b\", \"cyl2\"), (\"c/b/a\", \"cyl3\")\n):\n assy.mate(f\"{obj}?{name}_m0\", name=f\"{name}_m0\", transforms=odict(rx=180 if \"c\" in name else 0), origin=True)\n assy.mate(f\"{obj}?{name}_m1\", name=f\"{name}_m1\", transforms=odict(rx=0 if \"b\" in name else 180))\n\nshow(assy, render_mates=True)",
"_____no_output_____"
],
[
"relocate(assy)\nshow(assy, render_mates=True)",
"_____no_output_____"
],
[
"assy.assemble(\"cyl1_m0\", \"box0_m0\")\nassy.assemble(\"box1_m1\", \"cyl1_m1\")\nassy.assemble(\"cyl2_m0\", \"box1_m0\")\nassy.assemble(\"box2_m1\", \"cyl2_m1\")\nassy.assemble(\"cyl3_m0\", \"box2_m0\")\nassy.assemble(\"box3_m1\", \"cyl3_m1\")\n\nd = show(assy, render_mates=True)",
"_____no_output_____"
],
[
"import numpy as np\nfrom jupyter_cadquery.animation import Animation\n\nanimation = Animation(d)\nanimation.add_track(f\"/box0/c\", \"rz\", np.linspace(0,6,13), np.linspace(0, 360, 13))\nanimation.add_track(f\"/box0/c/b\", \"rz\", np.linspace(0,6,13), np.linspace(0, 360, 13))\nanimation.add_track(f\"/box0/c/b/a\", \"rz\", np.linspace(0,6,13), np.linspace(0, 360, 13))\n \nanimation.animate(speed=3)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7156fd0b7ffb74453ae359dd3daa47eb3c2a7be | 112,573 | ipynb | Jupyter Notebook | topic_modelling.ipynb | SANKET7738/random | 1e5267b520ec187187a3602e36c6d9daacbfabd6 | [
"MIT"
] | 2 | 2020-09-20T06:29:04.000Z | 2020-09-20T09:11:31.000Z | topic_modelling.ipynb | SANKET7738/random | 1e5267b520ec187187a3602e36c6d9daacbfabd6 | [
"MIT"
] | null | null | null | topic_modelling.ipynb | SANKET7738/random | 1e5267b520ec187187a3602e36c6d9daacbfabd6 | [
"MIT"
] | 2 | 2020-09-15T13:23:51.000Z | 2020-10-20T08:21:36.000Z | 191.125637 | 86,172 | 0.65792 | [
[
[
"<a href=\"https://colab.research.google.com/github/SANKET7738/random/blob/master/topic_modelling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"import pickle\nimport pandas as pd",
"_____no_output_____"
],
[
"file = open('drive/My Drive/gssoc/preprocessed/id2word.txt','rb')\nid2word = pickle.load(file)\nfile.close()",
"_____no_output_____"
],
[
"print(id2word)",
"Dictionary(55850 unique tokens: ['file', 'html', 'info', 'leader', 'learn']...)\n"
],
[
"file = open('drive/My Drive/gssoc/preprocessed/data_lemmatized.txt','rb')\ndata_lemmatized = pickle.load(file)\nfile.close()",
"_____no_output_____"
],
[
"print(data_lemmatized[:2])",
"[['info', 'page', 'file', 'wait', 'team', 'leader', 'process', 'learn', 'html'], ['team', 'member', 'draw', 'mail', 'mail']]\n"
],
[
"file = open('drive/My Drive/gssoc/preprocessed/corpus.txt','rb')\ncorpus = pickle.load(file)\nfile.close()",
"_____no_output_____"
],
[
"print(corpus[:1])",
"[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1)]]\n"
],
[
"pip uninstall gensim",
"Uninstalling gensim-3.8.3:\n Would remove:\n /usr/local/lib/python3.6/dist-packages/gensim-3.8.3.dist-info/*\n /usr/local/lib/python3.6/dist-packages/gensim/*\nProceed (y/n)? y\n Successfully uninstalled gensim-3.8.3\n"
],
[
"pip install gensim",
"Collecting gensim\n Using cached https://files.pythonhosted.org/packages/2b/e0/fa6326251692056dc880a64eb22117e03269906ba55a6864864d24ec8b4e/gensim-3.8.3-cp36-cp36m-manylinux1_x86_64.whl\nRequirement already satisfied: scipy>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.4.1)\nRequirement already satisfied: six>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.15.0)\nRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.18.5)\nRequirement already satisfied: smart-open>=1.8.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (2.2.0)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.8.1->gensim) (2.23.0)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.8.1->gensim) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.8.1->gensim) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.8.1->gensim) (2020.6.20)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.8.1->gensim) (2.10)\nInstalling collected packages: gensim\nSuccessfully installed gensim-3.8.3\n"
],
[
"import gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel",
"_____no_output_____"
],
[
"lda_model = gensim.models.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=20,\n random_state=0,\n update_every=1,\n chunksize=100,\n passes=1,\n alpha='auto',\n per_word_topics='True')",
"_____no_output_____"
],
[
"file = open('drive/My Drive/gssoc/pickle/lda_model_pkl','rb')\nlda_model_pkl = pickle.load(file)\nfile.close()",
"_____no_output_____"
],
[
"print(lda_model_pkl.print_topics())",
"[(0, '0.190*\"ask\" + 0.148*\"woman\" + 0.127*\"question\" + 0.115*\"moment\" + 0.098*\"answer\" + 0.076*\"young\" + 0.051*\"mother\" + 0.037*\"wife\" + 0.035*\"husband\" + 0.016*\"repeat\"'), (1, '0.066*\"away\" + 0.045*\"walk\" + 0.039*\"hand\" + 0.039*\"heart\" + 0.038*\"fall\" + 0.037*\"face\" + 0.036*\"stand\" + 0.035*\"hold\" + 0.033*\"open\" + 0.031*\"close\"'), (2, '0.110*\"water\" + 0.106*\"run\" + 0.105*\"link\" + 0.068*\"add\" + 0.054*\"trust\" + 0.044*\"chat\" + 0.044*\"video\" + 0.041*\"photo\" + 0.031*\"cream\" + 0.028*\"danger\"'), (3, '0.082*\"tri\" + 0.079*\"suggest\" + 0.066*\"blood\" + 0.046*\"smell\" + 0.044*\"ride\" + 0.041*\"wind\" + 0.038*\"hit\" + 0.037*\"round\" + 0.036*\"intern\" + 0.035*\"bless\"'), (4, '0.155*\"hate\" + 0.120*\"child\" + 0.052*\"form\" + 0.052*\"exist\" + 0.052*\"student\" + 0.046*\"public\" + 0.043*\"nation\" + 0.043*\"program\" + 0.042*\"state\" + 0.038*\"folk\"'), (5, '0.134*\"food\" + 0.112*\"eat\" + 0.100*\"pay\" + 0.077*\"vote\" + 0.076*\"put\" + 0.054*\"dear\" + 0.046*\"cost\" + 0.043*\"coffe\" + 0.041*\"fish\" + 0.036*\"meal\"'), (6, '0.494*\"school\" + 0.235*\"drive\" + 0.093*\"white\" + 0.039*\"hole\" + 0.026*\"chicken\" + 0.024*\"kitchen\" + 0.020*\"chines\" + 0.014*\"salad\" + 0.012*\"high\" + 0.008*\"sidewalk\"'), (7, '0.164*\"night\" + 0.155*\"home\" + 0.099*\"hour\" + 0.091*\"sleep\" + 0.078*\"tomorrow\" + 0.070*\"stay\" + 0.065*\"tonight\" + 0.061*\"wake\" + 0.061*\"drink\" + 0.027*\"even\"'), (8, '0.163*\"music\" + 0.132*\"wear\" + 0.098*\"black\" + 0.073*\"birthday\" + 0.069*\"rain\" + 0.050*\"record\" + 0.044*\"shirt\" + 0.043*\"shoe\" + 0.041*\"ahead\" + 0.030*\"season\"'), (9, '0.076*\"strong\" + 0.067*\"support\" + 0.060*\"burn\" + 0.046*\"push\" + 0.043*\"clear\" + 0.043*\"leader\" + 0.041*\"project\" + 0.038*\"race\" + 0.037*\"stage\" + 0.034*\"american\"'), (10, '0.113*\"teach\" + 0.112*\"death\" + 0.100*\"name\" + 0.094*\"teacher\" + 0.079*\"rule\" + 0.072*\"claim\" + 0.063*\"tast\" + 0.061*\"fill\" + 0.047*\"address\" + 0.045*\"medium\"'), (11, '0.187*\"write\" + 0.172*\"read\" + 0.123*\"blog\" + 0.079*\"post\" + 0.069*\"book\" + 0.054*\"site\" + 0.044*\"list\" + 0.037*\"page\" + 0.034*\"email\" + 0.017*\"space\"'), (12, '0.145*\"stuff\" + 0.070*\"comment\" + 0.066*\"test\" + 0.065*\"current\" + 0.049*\"team\" + 0.037*\"huge\" + 0.036*\"fail\" + 0.035*\"bear\" + 0.031*\"perform\" + 0.031*\"urllink\"'), (13, '0.066*\"creat\" + 0.065*\"allow\" + 0.050*\"level\" + 0.049*\"develop\" + 0.048*\"human\" + 0.044*\"high\" + 0.038*\"effect\" + 0.032*\"provid\" + 0.028*\"view\" + 0.027*\"similar\"'), (14, '0.034*\"think\" + 0.034*\"know\" + 0.032*\"go\" + 0.027*\"time\" + 0.023*\"want\" + 0.022*\"feel\" + 0.021*\"good\" + 0.021*\"thing\" + 0.018*\"come\" + 0.016*\"work\"'), (15, '0.097*\"probabl\" + 0.067*\"posit\" + 0.063*\"self\" + 0.063*\"result\" + 0.062*\"power\" + 0.054*\"truth\" + 0.053*\"websit\" + 0.040*\"news\" + 0.040*\"direct\" + 0.039*\"opinion\"'), (16, '0.409*\"love\" + 0.122*\"girl\" + 0.080*\"listen\" + 0.055*\"song\" + 0.047*\"phone\" + 0.046*\"smile\" + 0.031*\"cold\" + 0.029*\"stick\" + 0.022*\"bodi\" + 0.021*\"warm\"'), (17, '0.072*\"govern\" + 0.046*\"report\" + 0.044*\"process\" + 0.042*\"suffer\" + 0.041*\"mail\" + 0.041*\"refer\" + 0.040*\"concern\" + 0.036*\"action\" + 0.035*\"lack\" + 0.032*\"base\"'), (18, '0.041*\"room\" + 0.036*\"small\" + 0.030*\"buy\" + 0.028*\"build\" + 0.025*\"visit\" + 0.024*\"train\" + 0.022*\"event\" + 0.020*\"offer\" + 0.020*\"sell\" + 0.020*\"pull\"'), (19, '0.139*\"class\" + 0.096*\"sit\" + 0.088*\"move\" + 0.072*\"share\" + 0.070*\"total\" + 0.065*\"brother\" + 0.051*\"clean\" + 0.043*\"weight\" + 0.033*\"lesson\" + 0.029*\"player\"')]\n"
],
[
"print('\\nPerplexity: ', lda_model_pkl.log_perplexity(corpus)) ",
"\nPerplexity: -11.496303696483185\n"
],
[
"coherence_model_lda = CoherenceModel(model=lda_model_pkl, texts=data_lemmatized, dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()",
"_____no_output_____"
],
[
"print(coherence_lda)",
"0.35765087513967575\n"
],
[
"pip install pyLDavis",
"Collecting pyLDavis\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a5/3a/af82e070a8a96e13217c8f362f9a73e82d61ac8fff3a2561946a97f96266/pyLDAvis-2.1.2.tar.gz (1.6MB)\n\u001b[K |████████████████████████████████| 1.6MB 2.8MB/s \n\u001b[?25hRequirement already satisfied: wheel>=0.23.0 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (0.35.1)\nRequirement already satisfied: numpy>=1.9.2 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (1.18.5)\nRequirement already satisfied: scipy>=0.18.0 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (1.4.1)\nRequirement already satisfied: pandas>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (1.1.2)\nRequirement already satisfied: joblib>=0.8.4 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (0.16.0)\nRequirement already satisfied: jinja2>=2.7.2 in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (2.11.2)\nRequirement already satisfied: numexpr in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (2.7.1)\nRequirement already satisfied: pytest in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (3.6.4)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyLDavis) (0.16.0)\nCollecting funcy\n Downloading https://files.pythonhosted.org/packages/66/89/479de0afbbfb98d1c4b887936808764627300208bb771fcd823403645a36/funcy-1.15-py2.py3-none-any.whl\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.17.0->pyLDavis) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.17.0->pyLDavis) (2.8.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2>=2.7.2->pyLDavis) (1.1.1)\nRequirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (20.2.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (50.3.0)\nRequirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (1.9.0)\nRequirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (0.7.1)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (1.15.0)\nRequirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (1.4.0)\nRequirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest->pyLDavis) (8.5.0)\nBuilding wheels for collected packages: pyLDavis\n Building wheel for pyLDavis (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyLDavis: filename=pyLDAvis-2.1.2-py2.py3-none-any.whl size=97712 sha256=970e6e77cf158a6e7cf850a70c8cac684c620a7a1b5efd616aabacc863fa260c\n Stored in directory: /root/.cache/pip/wheels/98/71/24/513a99e58bb6b8465bae4d2d5e9dba8f0bef8179e3051ac414\nSuccessfully built pyLDavis\nInstalling collected packages: funcy, pyLDavis\nSuccessfully installed funcy-1.15 pyLDavis-2.1.2\n"
],
[
"import pyLDAvis\nimport pyLDAvis.gensim \nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"pyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model_pkl, corpus, id2word)\nvis",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7157c375b44a3d63a498d3993be37a74c6eb79b | 572,668 | ipynb | Jupyter Notebook | load_hcp_task_with_behaviour.ipynb | anubratabhowmick/NeuroMatch_WorkingMemoryTask | 6ae9f6c34afbb3eebefd46ae7b19d22d969f9c36 | [
"MIT"
] | null | null | null | load_hcp_task_with_behaviour.ipynb | anubratabhowmick/NeuroMatch_WorkingMemoryTask | 6ae9f6c34afbb3eebefd46ae7b19d22d969f9c36 | [
"MIT"
] | null | null | null | load_hcp_task_with_behaviour.ipynb | anubratabhowmick/NeuroMatch_WorkingMemoryTask | 6ae9f6c34afbb3eebefd46ae7b19d22d969f9c36 | [
"MIT"
] | null | null | null | 767.651475 | 255,464 | 0.950605 | [
[
[
"# Load HCP parcellated task data \n# **(VERSION WITH BEHAVIOURAL DATA)**\n\nThe HCP dataset comprises task-based fMRI from a large sample of human subjects. The NMA-curated dataset includes time series data that has been preprocessed and spatially-downsampled by aggregating within 360 regions of interest.\n\nIn order to use this dataset, please electronically sign the HCP data use terms at [ConnectomeDB](https://db.humanconnectome.org). Instructions for this are on pp. 24-25 of the [HCP Reference Manual](https://www.humanconnectome.org/storage/app/media/documentation/s1200/HCP_S1200_Release_Reference_Manual.pdf).\n\nIn this notebook, NMA provides code for downloading the data and doing some basic visualisation and processing.",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"#@title Figure settings\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")",
"_____no_output_____"
],
[
"# The download cells will store the data in nested directories starting here:\nHCP_DIR = \"./hcp\"\nif not os.path.isdir(HCP_DIR):\n os.mkdir(HCP_DIR)\n\n# The data shared for NMA projects is a subset of the full HCP dataset\nN_SUBJECTS = 100\n\n# The data have already been aggregated into ROIs from the Glasser parcellation\nN_PARCELS = 360\n\n# The acquisition parameters for all tasks were identical\nTR = 0.72 # Time resolution, in seconds\n\n# The parcels are matched across hemispheres with the same order\nHEMIS = [\"Right\", \"Left\"]\n\n# Each experiment was repeated twice in each subject\nRUNS = ['LR','RL']\nN_RUNS = 2\n\n# There are 7 tasks. Each has a number of 'conditions'\n# TIP: look inside the data folders for more fine-graned conditions\n\nEXPERIMENTS = {\n 'MOTOR' : {'cond':['lf','rf','lh','rh','t','cue']},\n 'WM' : {'cond':['0bk_body','0bk_faces','0bk_places','0bk_tools','2bk_body','2bk_faces','2bk_places','2bk_tools']},\n 'EMOTION' : {'cond':['fear','neut']},\n 'GAMBLING' : {'cond':['loss','win']},\n 'LANGUAGE' : {'cond':['math','story']},\n 'RELATIONAL' : {'cond':['match','relation']},\n 'SOCIAL' : {'cond':['ment','rnd']}\n}\n\n",
"_____no_output_____"
]
],
[
[
"> For a detailed description of the tasks have a look pages 45-54 of the [HCP reference manual](https://www.humanconnectome.org/storage/app/media/documentation/s1200/HCP_S1200_Release_Reference_Manual.pdf).",
"_____no_output_____"
],
[
"# Downloading data\n\nThe task data are shared in different files, but they will unpack into the same directory structure.\n",
"_____no_output_____"
]
],
[
[
"fname = \"hcp_task.tgz\"\nif not os.path.exists(fname):\n !wget -qO $fname https://osf.io/2y3fw/download\n !tar -xzf $fname -C $HCP_DIR --strip-components=1\n\n\nsubjects = np.loadtxt(os.path.join(HCP_DIR,'subjects_list.txt'),dtype='str')\n",
"_____no_output_____"
]
],
[
[
"## Understanding the folder organisation\n\nThe data folder has the following organisation:\n\n- hcp\n - regions.npy (information on the brain parcellation)\n - subjects_list.txt (list of subject IDs)\n - subjects (main data folder)\n - [subjectID] (subject-specific subfolder)\n - EXPERIMENT (one folder per experiment)\n - RUN (one folder per run)\n - data.npy (the parcellated time series data)\n - EVs (EVs folder)\n - [ev1.txt] (one file per condition)\n - [ev2.txt]\n - Stats.txt (behavioural data [where available] - averaged per run)\n - Sync.txt (ignore this file)\n\n",
"_____no_output_____"
],
[
"## Loading region information\n\nDownloading this dataset will create the `regions.npy` file, which contains the region name and network assignment for each parcel.\n\nDetailed information about the name used for each region is provided [in the Supplement](https://static-content.springer.com/esm/art%3A10.1038%2Fnature18933/MediaObjects/41586_2016_BFnature18933_MOESM330_ESM.pdf) to [Glasser et al. 2016](https://www.nature.com/articles/nature18933).\n\nInformation about the network parcellation is provided in [Ji et al, 2019](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6289683/).\n\n",
"_____no_output_____"
]
],
[
[
"regions = np.load(f\"{HCP_DIR}/regions.npy\").T\nregion_info = dict(\n name=regions[0].tolist(),\n network=regions[1],\n hemi=['Right']*int(N_PARCELS/2) + ['Left']*int(N_PARCELS/2),\n)",
"_____no_output_____"
]
],
[
[
"# Help functions\n\nWe provide two helper functions: one for loading the time series from a single suject and a single run, and one for loading an EV file for each task. \n\nAn EV file (EV:Explanatory Variable) describes the task experiment in terms of stimulus onset, duration, and amplitude. These can be used to model the task time series data.",
"_____no_output_____"
]
],
[
[
"def load_single_timeseries(subject, experiment, run, remove_mean=True):\n \"\"\"Load timeseries data for a single subject and single run.\n \n Args:\n subject (str): subject ID to load\n experiment (str): Name of experiment \n run (int): (0 or 1)\n remove_mean (bool): If True, subtract the parcel-wise mean (typically the mean BOLD signal is not of interest)\n\n Returns\n ts (n_parcel x n_timepoint array): Array of BOLD data values\n\n \"\"\"\n bold_run = RUNS[run]\n bold_path = f\"{HCP_DIR}/subjects/{subject}/{experiment}/tfMRI_{experiment}_{bold_run}\"\n bold_file = \"data.npy\"\n ts = np.load(f\"{bold_path}/{bold_file}\")\n if remove_mean:\n ts -= ts.mean(axis=1, keepdims=True)\n return ts\n\n\ndef load_evs(subject, experiment, run):\n \"\"\"Load EVs (explanatory variables) data for one task experiment.\n\n Args:\n subject (str): subject ID to load\n experiment (str) : Name of experiment\n run (int): 0 or 1\n\n Returns\n evs (list of lists): A list of frames associated with each condition\n\n \"\"\"\n frames_list = []\n cond_list = []\n task_key = f'tfMRI_{experiment}_{RUNS[run]}'\n for cond in EXPERIMENTS[experiment]['cond']: \n ev_file = f\"{HCP_DIR}/subjects/{subject}/{experiment}/{task_key}/EVs/{cond}.txt\"\n ev_array = np.loadtxt(ev_file, ndmin=2, unpack=True)\n ev = dict(zip([\"onset\", \"duration\", \"amplitude\"], ev_array))\n # Determine when trial starts, rounded down\n start = np.floor(ev[\"onset\"] / TR).astype(int)\n # Use trial duration to determine how many frames to include for trial\n duration = np.ceil(ev[\"duration\"] / TR).astype(int)\n # Take the range of frames that correspond to this specific trial\n frames = [s + np.arange(0, d) for s, d in zip(start, duration)]\n frames_list.append(frames)\n cond_list.append(cond)\n\n return frames_list, cond_list",
"_____no_output_____"
]
],
[
[
"# Example run\n\nLet's load the timeseries data for the MOTOR experiment from a single subject and a single run",
"_____no_output_____"
]
],
[
[
"my_exp = 'WM'\nmy_subj = subjects[2]\nmy_run = 1\n\ndata = load_single_timeseries(subject=my_subj,experiment=my_exp,run=my_run,remove_mean=True)\nprint(data.shape)\n\nplt.plot(data[17]) # 18 refers to the FFA/FFC\n\nplt.plot(range(53, 91), data[17][53:91], 'g-') # 53:91 refers to the 0bk_faces\nplt.plot(range(152, 190), data[17][152:190], 'r-') # 152:190 refers to the 0bk_body",
"(360, 405)\n"
]
],
[
[
"As you can see the time series data contains 284 time points in 360 regions of interest (ROIs).\n\n",
"_____no_output_____"
],
[
"Now in order to understand how to model these data, we need to relate the time series to the experimental manipulation. This is described by the EV files. Let us load the EVs for this experiment.",
"_____no_output_____"
]
],
[
[
"evs, cond = load_evs(subject=my_subj, experiment=my_exp,run=my_run)\n\nprint(cond, '\\n', evs, '\\n')\n\n# 14(2bk_body) 53(0bk_faces) 113(2bk_tools) 152(0bk_body) 212(0bk_places) 251(2bk_faces) 311(0bk_tools) 350(2bk_places)\n",
"['0bk_body', '0bk_faces', '0bk_places', '0bk_tools', '2bk_body', '2bk_faces', '2bk_places', '2bk_tools'] \n [[array([153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,\n 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,\n 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191])], [array([53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,\n 87, 88, 89, 90, 91])], [array([213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,\n 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,\n 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251])], [array([313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,\n 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,\n 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351])], [array([14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,\n 48, 49, 50, 51, 52])], [array([252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,\n 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,\n 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290])], [array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,\n 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377,\n 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390])], [array([114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,\n 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,\n 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152])]] \n\n"
],
[
"def average_frames_idx(data, evs, experiment, idx_cond): \n #idx = EXPERIMENTS[experiment]['cond'].index(cond)\n return np.mean(np.concatenate([np.mean(data[:,evs[idx_cond][i]],axis=1,keepdims=True) for i in range(len(evs[idx_cond]))],axis=-1),axis=1)\n\nactivity = np.zeros([360,len(evs)])\nfor iEV in range(len(evs)):\n activity[:,iEV] = average_frames_idx(data, evs, my_exp, iEV) ",
"_____no_output_____"
],
[
"plt.imshow(activity.T,aspect='auto')\nplt.colorbar()\nplt.xlabel('360 ROIs')\nplt.ylabel('8 WM conditions')\n#WMconds",
"_____no_output_____"
],
[
"set(region_info['network'])\n#print(region_info['name'])\n#print(region_info['network'])",
"_____no_output_____"
],
[
"t = region_info['network'] == 'Visual1' #False or True\n#print(t)\nind_ROIs = np.where(t)[0]\nind_ROIs",
"_____no_output_____"
],
[
"df = pd.DataFrame(activity[ind_ROIs,:])\ncorrM = df.corr()\ncmap = sns.diverging_palette(230, 20, as_cmap=True)\nmask = np.triu(np.ones_like(corrM, dtype=bool))\nsns.heatmap(corrM,mask=mask,cmap=cmap)",
"_____no_output_____"
]
],
[
[
"For the motor task, this evs variable contains a list of 5 arrays corresponding to the 5 conditions. \n\nNow let's use these evs to compare the average activity during the left foot ('lf') and right foot ('rf') conditions:",
"_____no_output_____"
]
],
[
[
"# we need a little function that averages all frames from any given condition\n\ndef average_frames(data, evs, experiment, cond): \n idx = EXPERIMENTS[experiment]['cond'].index(cond)\n return np.mean(np.concatenate([np.mean(data[:,evs[idx][i]],axis=1,keepdims=True) for i in range(len(evs[idx]))],axis=-1),axis=1)\n\ncond1 = '0bk_faces'\ncond2 = '0bk_places'\n\nlf_activity = average_frames(data, evs, my_exp, cond1)\nrf_activity = average_frames(data, evs, my_exp, cond2)\ncontrast = lf_activity-rf_activity # difference between left and right hand movement\n\n# Plot activity level in each ROI for both conditions\nplt.plot(lf_activity,label='cond1')\nplt.plot(rf_activity,label='cond2')\nplt.xlabel('ROI')\nplt.ylabel('activity')\nplt.legend()\nplt.xlim(0, 20)",
"_____no_output_____"
]
],
[
[
"Now let's plot these activity vectors. We will also make use of the ROI names to find out which brain areas show highest activity in these conditions. But since there are so many areas, we will group them by network.\n\nA powerful tool for organising and plotting this data is the combination of pandas and seaborn. Below is an example where we use pandas to create a table for the activity data and we use seaborn oto visualise it.\n\n\n\n",
"_____no_output_____"
]
],
[
[
"print(region_info['network'][344])\nprint(region_info['hemi'][344])\nprint(region_info['name'][344])",
"Posterior-Mu\nLeft\nL_s32\n"
],
[
"import pandas as pd\nimport seaborn as sns\n\ndf = pd.DataFrame({'cond1' : lf_activity,\n 'cond2' : rf_activity,\n 'network' : region_info['network'],\n 'hemi' : region_info['hemi']})\n\nfig,(ax1,ax2) = plt.subplots(1,2)\n\nprint(df)\nsns.barplot(y='network', x='cond1', data=df, hue='hemi',ax=ax1)\nsns.barplot(y='network', x='cond2', data=df, hue='hemi',ax=ax2)\n",
" cond1 cond2 network hemi\n0 30.262659 40.228761 Visual1 Right\n1 -23.173884 14.976772 Visual2 Right\n2 -32.867064 -8.497185 Visual2 Right\n3 33.045244 37.106598 Visual2 Right\n4 10.746488 47.104466 Visual2 Right\n.. ... ... ... ...\n355 80.691940 -12.352714 Posterior-Mu Left\n356 -8.329930 -19.543815 Frontopariet Left\n357 26.542068 4.012363 Cingulo-Oper Left\n358 16.739952 -13.932081 Cingulo-Oper Left\n359 26.276002 -23.027267 Cingulo-Oper Left\n\n[360 rows x 4 columns]\n"
]
],
[
[
"You should be able to notice that for the somatosensory network, brain activity in the right hemi is higher for the left foot movement and vice versa for the left hemi and right foot. But this may be subtle at the single subject/session level (these are quick 3-4min scans). \n\n\nLet us boost thee stats by averaging across all subjects and runs. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7158b5e7226406d09cd73ffc744b4ed911343ec | 88,569 | ipynb | Jupyter Notebook | [ARE 212] Discussion Section - Python 02.ipynb | bk-econ/share | d49d6e63c76e4753b7180f4f784b9bb88c839261 | [
"CC0-1.0"
] | null | null | null | [ARE 212] Discussion Section - Python 02.ipynb | bk-econ/share | d49d6e63c76e4753b7180f4f784b9bb88c839261 | [
"CC0-1.0"
] | null | null | null | [ARE 212] Discussion Section - Python 02.ipynb | bk-econ/share | d49d6e63c76e4753b7180f4f784b9bb88c839261 | [
"CC0-1.0"
] | null | null | null | 51.493605 | 748 | 0.614165 | [
[
[
"# [ARE 212] Discussion Section - Python 02\n",
"_____no_output_____"
],
[
"- [Ethan's materials](https://github.com/ligonteaching/ARE212_Materials)\n- [My github](https://github.com/bk-econ/share)",
"_____no_output_____"
],
[
"### Source Material",
"_____no_output_____"
],
[
"These notes are the fruits of arduous labor of others. My contributions are minimual, but please expect to find some code (and even explanation) errors (and assume them all to be mine). If you find any mistakes or have questions, please [let me know](mailto:[email protected]).\n \nThe primary sources of these notes are:\n- Ethan and in particular his EEP 153 Notes\n- [Computational and Inferential Thinking: The Foundations of Data Science](https://www.inferentialthinking.com/chapters/intro.html) which is the textbook for UC Berkeley's [Data 8: The Foundations of Data Science](http://data8.org/) course. All of the notes, readings, labs, and assignments are fully available online as well. For instance, here is [Spring 2020](http://data8.org/sp20/).\n- [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/)",
"_____no_output_____"
],
[
"### Follow-up from questions posed last week",
"_____no_output_____"
],
[
"- Discussions: See [Ethan's full response](https://bcourses.berkeley.edu/courses/1487913/discussion_topics/5737368)\n - *When is discussion participation \"due\"?* \n - Initial response by Wednesday\n - Further contributiioins can be later in the week (including over the weekend)\n- Final format: See [Ethan's full response](https://bcourses.berkeley.edu/courses/1487913/discussion_topics/5737448) \n - *Do we need to be able to code in Python?* \n - \"My intention, rather, is to provide you with some simple working notebooks you can play with, and follow along with to achieve instructional ends which aren't principally focused on on new language acquisition\"\n - \"I'll make sure the final doesn't require python mastery.\"\n - *What should we expect in the final?* \n - \"My intention is for the topics raised in the discussions to serve as likely jumping off points for the final. \n - \". . . students are responsible for the content of the lecture, and should be familiar with the specific readings I reference (not necessarily all the papers mentioned in the syllabus).\"",
"_____no_output_____"
],
[
"## Review Lecture Jupyter Notebooks 1",
"_____no_output_____"
],
[
"### Learning Goals Today",
"_____no_output_____"
],
[
"1. Review [classical_regression](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Materials/classical_regression.ipynb)\n2. Review [weighted_regression](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Materials/weighted_regression.ipynb) \n \n\nNB: If needed, review the introduction from last week at [ARE 212 Discussion Section - Python 01](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Discussion_Section/%5BARE%20212%5D%20Discussion%20Section%20-%20Python%2001.ipynb) or from [my github](https://github.com/bk-econ/share).",
"_____no_output_____"
],
[
"##### Open a `jupyter` notebook on `datahub.berkeley.edu`",
"_____no_output_____"
],
[
"1. Open the website [URL to Interact](https://url-to-interact.herokuapp.com/)\n2. For `1. Choose your desired hub:` select `datahub.berkeley.edu`\n3. Either navigate to [my github](https://github.com/bk-econ/share) page and select the `URL` of the correct section or simply copy it from right here: https://github.com/bk-econ/share/blob/master/%5BARE%20212%5D%20Discussion%20Section%20-%20Python%2001.ipynb\n4. Paste the `URL` you copied in the previous step into `2. Paste the GitHub URL for your file or folder in the box below.`\n5. Click `Convert to interact link!`\n6. Copy the new `URL` generated in field `4. Your interact link URL will appear in the box below.`\n7. Paste the new `URL` in any web browser and get to it!",
"_____no_output_____"
],
[
"### 1. [classical_regression](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Materials/classical_regression.ipynb)",
"_____no_output_____"
],
[
"or see [classical regression on github](https://github.com/ligonteaching/ARE212_Materials/blob/master/classical_regression.ipynb)",
"_____no_output_____"
],
[
"#### Classical regression in =python=\n\n",
"_____no_output_____"
],
[
"The fact that $X$ and $u$ are “independent” variables means that\nif we want to compute a “classical” regression we’d do it\nsomething like this:\n\n",
"_____no_output_____"
],
[
"##### Define independent random variables",
"_____no_output_____"
]
],
[
[
"# 1.1. ORIGINAL CODE\n%matplotlib inline\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nk = 2 # Number of observables\n\nmu = [0]*k\nSigma=[[1,0.5],\n [0.5,2]]\n\nX = multivariate_normal(mu,Sigma)\n\nu = multivariate_normal(cov=0.2)",
"_____no_output_____"
]
],
[
[
"<font color='red'> *notes*: </font>\n- `%matplotlib inline`\n - `%` is used to call [magic functions](https://ipython.readthedocs.io/en/stable/interactive/tutorial.html#magics-explained) in `Python`.\n - [`matplotlib`](https://matplotlib.org/) - \"comprehensive library for creating static, animated, and interactive visualizations in Python\"\n - Here is a more comprehensive treatment of [Using matplotlib in jupyter notebooks](https://medium.com/@1522933668924/using-matplotlib-in-jupyter-notebooks-comparing-methods-and-some-tips-python-c38e85b40ba1)\n - Originally designed to enable MATLAB-style plotting via gnuplot from the [IPython](https://plotly.com/python/ipython-vs-python/) command line\n - Noteable because it plays well with many operating systems and graphics packages\n - `inline` simply means that any plots we create are going to appear in our notebook inline \n",
"_____no_output_____"
]
],
[
[
"# check the available backends for matplotlib with --list\n%matplotlib --list",
"Available matplotlib backends: ['tk', 'gtk', 'gtk3', 'wx', 'qt4', 'qt5', 'qt', 'osx', 'nbagg', 'notebook', 'agg', 'svg', 'pdf', 'ps', 'inline', 'ipympl', 'widget']\n"
]
],
[
[
"<font color='red'> *notes*: </font>\n- `import numpy as np` \n - [numpy](https://numpy.org/): mathematical functions, generally used to create arrays\n- `from scipy.stats import multivariate_normal`\n - `import X` vs `from X import x` (Explanation from this [Stack Exchange](https://stackoverflow.com/questions/9439480/from-import-vs-import))\n - `import X`: \"Imports the module X, and creates a reference to that module in the current namespace. Then you need to define completed module path to access a particular attribute or method from inside the module (e.g.: X.name or X.attribute)\"\n - `from X import x`: \"Imports the module X, and creates references to all public objects defined by that module in the current namespace . . . after you've run this statement, you can simply use a plain (unqualified) name to refer to things defined in module X.\" \n - \"But X itself is not defined, so X.name doesn't work.\"\n - \"And if name was already defined, it is replaced by the new version.\" \n - \"And if name in X is changed to point to some other object, your module won’t notice.\"\n - `scipy.stats`\n - [scipy](https://www.scipy.org/): scientific functions\n - `scipy.stats` is the set of statistical functions within `scipy`",
"_____no_output_____"
]
],
[
[
"import scipy\n# scipy?\n# A scientific computing package for Python . . . \n## imports all the functions from the NumPy namespace, and in addition provides:\n## Subpackages: Using any of these subpackages requires an explicit import. \n## For example,``import scipy.cluster``.",
"_____no_output_____"
],
[
"# scipy.stats?\n# This module contains a large number of probability distributions \n## as well as a growing library of statistical functions.",
"_____no_output_____"
],
[
"# scipy.stats.multivariate_normal?\n# A multivariate normal random variable.\n### multivariate_normal(mean=None,\n ### cov=1,\n ### allow_singular=False,\n ### seed=None,\n ### )",
"_____no_output_____"
],
[
"# 1.1. CODE ABOVE WITH COMMENTS AND PRINTING\n\n# run magic function matplotlib to have visualizations inline in your notebook\n%matplotlib inline \n\n# import the NumPy module and call it \"np\"\nimport numpy as np # see notes below\n\n# import just the multivariate_normal function from the \"stats\" functions of the SciPy module\nfrom scipy.stats import multivariate_normal \n\n# k gets 2\nk = 2 # Number of observables; R::k <- 2\nprint(\"k =\", k)\n\n# mu gets the 1x2 zero vector\nmu = [0]*k\nprint(\"mu =\", mu)\n\n# sigma gets the following matrix (note the use double brackets)\nSigma=[[1,0.5],\n [0.5,2]] # R::matrix(c(1, .5, .5, 2), nrow=2, byrow=TRUE)\nprint(\"Sigma =\", Sigma)\n\n# X gets a multivariate normal random variable with mean = mu and cov = Sigma \nX = multivariate_normal(mu,Sigma) \nprint(\"X =\", X)\n\n# u gets a multivariate normal random variable with mean = 0 and cov = .2 \nu = multivariate_normal(cov=0.2) # \nprint(\"u =\", u)",
"_____no_output_____"
]
],
[
[
"##### Construct Sample\n\n",
"_____no_output_____"
],
[
"To construct a sample of observables $(y,X)$ we just use the regression equation,\n plus an assumption about the value of $\\beta$:\n\n",
"_____no_output_____"
]
],
[
[
"# 1.2. ORIGINAL CODE\n\nbeta = [1/2,1]\n\nN=1000 # Sample size\n\n# Now: Transform r.v. X into a sample\nXrvs = X.rvs(N)\n\ny = Xrvs@beta + u.rvs(N) # Note use of @ operator for matrix multiplication",
"_____no_output_____"
]
],
[
[
"<font color='red'> *notes*: </font>\n- `X.rvs(size = n)` yields a random sample of size n from the object X \n- `@` represents matrix multiplication (as already stated above); R::%*%",
"_____no_output_____"
]
],
[
[
"# 1.2. CODE ABOVE WITH COMMENTS AND PRINTING\n\n# beta gets a list of 1/2 and 1\nbeta = [1/2,1] # R::list(1/2, 1)\nprint(\"beta =\", beta)\n\n# N gets 1000\nN=1000 # Sample size\nprint(\"N =\", N)\n\n# Now: Transform random variable X into a sample of N draws\nX = X.rvs(N)\nprint(\"X =\", X)\n\n# y gets (R::X %*% beta + U) where U is a sample of N draws from u\ny = X@beta + u.rvs(N) # Note use of @ operator for matrix multiplication\nprint(\"y =\", y)",
"beta = [0.5, 1]\nN = 1000\nX = [[ 1.83060078e+00 3.29108751e+00]\n [-3.73065819e-01 1.81987481e+00]\n [ 2.60583681e-03 -6.95979686e-02]\n ...\n [ 9.18830004e-01 -1.47532613e-01]\n [ 4.43816010e-01 -1.63953599e+00]\n [ 7.72931540e-01 3.25456680e-01]]\ny = [ 4.15323223e+00 1.57593293e+00 -8.92900708e-01 3.63055938e+00\n -9.42805732e-01 1.25090666e+00 1.33234155e+00 -3.44921295e-01\n 6.14277188e-01 1.06841910e-01 -1.16316395e-01 3.99417847e+00\n -9.54343171e-01 1.44271715e+00 -2.16166953e+00 -3.70770738e-01\n -9.22714305e-01 3.02260156e+00 1.10104795e+00 3.55070482e-01\n -3.08398704e+00 -5.84769127e-01 1.57342201e+00 -6.87316112e-01\n -8.56972052e-02 -3.72929216e-01 -1.91511948e+00 9.19301364e-01\n 1.68704106e+00 3.39941381e+00 -1.31357417e+00 -1.55955084e+00\n 5.26289421e-01 5.10066569e-01 9.21489466e-01 -5.29070614e-02\n 1.32846540e+00 3.66834438e+00 -1.21586370e+00 -3.17341804e-01\n 1.49462972e+00 -1.11932421e+00 1.60012728e+00 8.02210737e-02\n 1.91722116e+00 9.52929461e-01 -6.98807082e-01 6.09702838e-01\n 1.25401904e+00 -1.27318532e+00 1.72148260e+00 -2.22416075e+00\n 2.88226952e-01 9.41239950e-02 -2.09943979e+00 4.24220832e-01\n 3.80076469e-01 1.50227706e+00 -8.11796170e-01 -5.00199434e-01\n 1.72508121e+00 2.26245524e+00 9.94502934e-01 9.26583304e-01\n -2.68735996e+00 1.36073019e-01 8.80937530e-01 -2.23976497e+00\n -2.49610922e+00 1.29346295e+00 -6.23152449e+00 2.01803748e-01\n 1.18777525e+00 -1.44614540e+00 1.85189915e+00 1.11109711e+00\n 3.88209336e+00 7.16630879e-01 -2.72739869e-01 -1.31526066e+00\n -1.66522763e-02 -6.15006052e-02 -1.37258636e-01 7.49941741e-01\n 4.66893051e-01 -1.42366201e+00 3.16826009e+00 -2.18102519e+00\n -6.57086156e-01 1.05757379e+00 2.93219459e+00 1.53640565e+00\n -8.91509478e-01 -1.92384206e+00 -1.01210020e+00 -4.78728847e-01\n 6.81570268e-01 8.27913935e-01 -1.27557424e+00 2.18745756e+00\n -2.93228002e+00 -3.96474153e+00 9.51449708e-02 -8.71822070e-01\n -2.56943262e+00 1.18422485e+00 -1.39663251e+00 1.68833723e-01\n 1.60212879e+00 3.79850353e-01 2.59771339e+00 2.67976768e+00\n -6.84836384e-01 -2.01128190e+00 1.20594706e+00 2.14013195e+00\n -3.04820867e-01 -2.97211028e+00 1.08842894e+00 1.25015057e+00\n 2.35344144e+00 -9.29122866e-01 -3.34830070e+00 5.98014487e-02\n -2.16184179e+00 2.11188132e+00 -1.14197291e+00 -1.53532784e+00\n 1.34677393e+00 -2.61233286e+00 -5.30058763e-01 -1.23500559e+00\n -1.07668598e+00 -2.81417545e-01 3.10559249e+00 2.24720850e+00\n 1.90541372e+00 -1.73988990e+00 -1.97519414e+00 -1.09630020e+00\n 5.07308596e-01 4.13707208e+00 -8.72875464e-01 -8.78143834e-01\n 1.05453734e+00 3.04723451e+00 1.07349154e+00 1.31768511e+00\n -5.36072449e-01 -8.43536210e-01 6.87462070e-01 1.64173278e+00\n 1.77253764e+00 4.60210172e-01 -2.47397658e+00 -2.81560097e+00\n -6.21814539e-01 8.58819681e-01 3.29162697e+00 7.24438783e-02\n 1.77702522e-01 2.07447218e+00 2.65312136e-01 -3.60776512e+00\n -1.77376355e+00 1.52107338e+00 -3.12153803e-02 -7.50809904e-02\n 2.66844109e+00 -3.44006031e+00 -5.12362439e-01 -1.76989178e+00\n -3.24066575e+00 -2.95252379e+00 -4.00247919e-01 -1.74745535e-01\n -2.72076457e+00 7.09184410e-01 9.09736044e-01 1.00941737e+00\n 1.62829049e+00 1.06803461e+00 -3.54250720e-01 3.90991023e-01\n -9.28856922e-01 9.91303433e-01 -2.11586979e-01 -3.04560399e+00\n 3.43968849e+00 9.80254235e-01 -2.08749537e+00 -7.62857551e-01\n -2.20810109e-01 1.91387706e+00 -1.42815738e+00 4.63599552e-03\n 1.60340424e+00 -2.67810377e+00 2.88317304e-01 -6.79481623e-01\n 2.81084233e+00 2.07008229e+00 7.37193116e-01 6.39400970e-01\n 2.29360370e+00 3.08109078e-01 1.33390505e-02 -9.81837570e-01\n -1.49542593e+00 -1.75723910e+00 1.67129659e+00 2.09529552e+00\n 9.75286762e-01 6.03948808e-01 -6.64510488e-01 -1.87984125e+00\n 2.63607146e-01 4.22762512e+00 9.59532241e-02 5.13096568e-01\n -1.24935483e+00 -1.94478447e+00 4.06821132e+00 -2.23973252e-01\n -2.72090102e+00 1.19140461e-02 -1.71874418e+00 1.49731743e+00\n -4.52807951e-01 -3.18891912e+00 -8.56761061e-01 -1.59594590e+00\n 1.19607905e+00 1.30491658e+00 2.91146943e+00 -1.99735456e+00\n -6.62939468e-01 -1.20382706e+00 4.78332958e-02 -5.18382470e-01\n -5.15394757e-01 -3.12601582e+00 -8.68606161e-02 -3.57153716e+00\n -1.22843223e+00 -1.46907680e-01 5.24672929e-01 -1.36496050e+00\n -7.65128618e-01 -2.93153271e-02 1.60108560e+00 -3.21126953e-01\n -4.95581656e-01 -1.43198252e+00 -5.74946455e-01 5.00395783e-01\n -1.19712683e+00 -1.40789407e-01 9.57098288e-01 8.03659052e-02\n 6.34378259e-01 -3.74213387e-01 -1.38542849e+00 3.68131511e-01\n -1.53954720e+00 2.71702619e+00 -4.55147155e-01 4.01574895e-01\n 3.02774373e-02 -1.16620008e+00 -5.72799939e-01 -1.83710454e+00\n -3.46936476e-01 1.05837404e-01 1.94929323e+00 1.54215455e+00\n 1.94288163e+00 -1.15801515e+00 -7.79888412e-01 1.85934696e+00\n -8.88017516e-02 -3.81890153e+00 -1.03430424e+00 3.21705773e-01\n 8.68471489e-01 1.45628980e+00 -1.04061008e+00 -3.20488235e+00\n 1.02751536e+00 -1.82288820e+00 -1.88050310e+00 -4.30639715e+00\n 3.02203281e-01 2.90831320e-01 3.10632591e+00 -2.25491870e+00\n 3.97609901e-01 -8.11357666e-01 1.15742932e+00 -1.72011162e+00\n 1.97523511e+00 3.95411744e+00 -3.34919114e-01 -1.66445743e+00\n -1.45631141e+00 -2.92559992e-01 -9.36836572e-01 1.74620343e+00\n 2.67814050e+00 -1.35203967e+00 -2.38077023e+00 -1.48172053e+00\n -2.92324081e+00 -5.98754819e-01 1.13143248e+00 -1.97618602e+00\n -1.49365322e+00 -1.07231800e+00 1.14873695e+00 1.68860853e+00\n 5.28824246e-02 -1.97573871e+00 -1.54447813e+00 -1.35052435e+00\n 1.38440199e+00 -1.79777774e+00 -3.64446614e-02 1.31669727e+00\n 2.10502444e+00 -5.54353299e-02 1.94292787e+00 -8.81661278e-02\n 1.02100126e+00 -5.15129073e-01 2.14340865e-01 4.78204486e-01\n -2.19070670e+00 -4.35532935e-01 2.61937758e+00 -2.34332092e-01\n 1.64426053e+00 -2.58597872e-01 2.79134158e+00 1.48172676e+00\n 2.21193060e+00 -9.49933139e-01 -4.32637457e-01 4.93480648e-01\n -2.12034345e+00 4.01880249e-01 -5.74629663e-01 1.67874164e-01\n 1.69863939e+00 -5.75072850e-02 -8.07964821e-01 1.50343319e+00\n -9.33105323e-01 -1.20870296e+00 2.14171997e-01 -6.44156839e+00\n 1.35057926e+00 1.20125664e+00 -2.36209629e+00 2.34365310e+00\n -3.69557604e-01 2.68869694e+00 2.27829965e+00 2.81878540e-02\n -2.52198251e-01 5.79765368e-02 -1.28012767e+00 -2.38200566e+00\n 3.04217930e-01 -2.04264669e+00 3.42404728e+00 -7.54868074e-01\n -1.78795173e+00 -5.40741723e-01 -7.27420804e-01 6.50926409e-01\n -2.58988045e-01 -2.36898660e-01 -1.89637194e+00 8.66961999e-01\n -1.24148314e+00 1.63695050e+00 -1.71965937e+00 -5.71371794e-01\n -6.85719010e-01 -1.77881066e-01 -3.08865183e+00 2.20560844e+00\n 3.98019380e-01 5.71952774e-01 2.43705626e+00 -2.58935501e+00\n -2.68372552e-01 3.84879214e-01 -2.36447123e+00 2.01183276e+00\n 2.36135203e+00 -1.76121063e+00 1.26621117e+00 -1.37204405e+00\n 2.01930678e-01 1.13327153e+00 2.13679781e+00 1.98969869e+00\n 2.00597857e+00 -1.33808755e+00 1.95326575e+00 -2.03488691e+00\n 1.67116002e+00 8.41717665e-02 -2.71619010e+00 -9.24326837e-02\n -1.21908043e-01 3.94897558e-01 -1.85301397e+00 2.33502386e+00\n 1.75295879e+00 -3.38162335e-01 2.48966314e+00 -1.58716530e+00\n 4.31884687e+00 -4.23161052e-01 -1.14537087e+00 2.40683019e+00\n 7.59447953e-01 -6.75282964e-01 1.59895136e-01 -2.69550091e+00\n -2.72018321e-01 2.74489105e+00 -2.34837567e+00 1.70137749e+00\n 2.11664114e+00 1.09945312e-01 4.99403385e-01 1.24923951e+00\n -5.73842718e-01 1.50885441e+00 1.63546540e+00 3.96286420e-01\n -9.10249169e-01 1.58113432e+00 2.53014970e+00 -1.03010713e+00\n -1.25977489e+00 -2.84884408e+00 -4.17612925e+00 2.61149703e+00\n 4.61047525e+00 -3.35522705e+00 -1.11482610e+00 -1.05620989e+00\n -8.61264343e-01 7.88063958e-01 -5.05637299e-01 -1.03313961e+00\n 2.24827673e+00 1.43199173e-01 2.62994725e+00 -1.75603076e-02\n -5.99037825e-01 -1.52353290e+00 -2.32959494e+00 6.71032233e-01\n -5.31639236e-01 -1.99785261e+00 -1.25109063e+00 -9.73499505e-01\n 9.06195578e-02 7.59396591e-01 7.84299738e-01 -1.45525901e+00\n -1.39423214e+00 -4.06696232e-01 6.65559206e-01 -2.26380619e+00\n -2.24063604e+00 -9.38967680e-01 5.13144675e-01 -7.34171104e-01\n 8.79716944e-01 2.13722486e+00 1.24404191e+00 1.64934430e+00\n 2.14375920e+00 1.90510789e+00 -1.04453625e+00 -1.80471490e+00\n 1.00215785e-01 -1.89928655e+00 2.42810369e+00 1.07960933e+00\n 2.71066420e+00 -1.22952777e+00 -1.91592762e+00 5.98427666e-01\n 5.40957716e-01 -8.91411811e-01 1.94283435e+00 2.70353966e+00\n 3.13136759e+00 5.77326896e-01 3.12565522e+00 -1.00225869e+00\n -1.34752390e+00 -1.22800017e+00 -1.69104798e+00 -1.49592494e+00\n -3.25032852e+00 2.93549669e+00 -5.13619951e+00 -1.87524577e+00\n 1.59666022e-01 -1.73386305e+00 -8.67697730e-01 6.74418751e-01\n 1.11713955e-01 6.39946219e-01 2.39996021e+00 2.43910637e+00\n -2.56659401e+00 9.45504497e-02 2.67426870e+00 -9.46050784e-01\n -2.58072017e+00 -5.25914190e-01 -2.22272814e+00 6.98668116e-01\n 3.19894699e-02 1.05215180e+00 -5.31642116e-01 8.71263330e-01\n -1.11706003e+00 -4.05156119e+00 1.07444660e+00 -2.06847782e+00\n -1.50506263e+00 -1.93132434e+00 5.26778644e-01 -4.24612128e-01\n 1.46737318e+00 5.15434875e-01 -1.81544375e+00 6.67726342e-01\n 1.70152259e+00 8.19145296e-02 -1.32541473e+00 9.45945563e-01\n 2.27066001e+00 -5.40619645e-01 9.02505173e-01 1.91221136e+00\n 4.90327497e+00 -2.12583779e+00 -1.50593493e+00 -2.38371761e+00\n 2.16666760e-01 2.02144117e+00 -1.24829846e+00 -2.86773115e-01\n 1.78021319e+00 -1.27263988e+00 -2.41011653e+00 -3.22747952e-01\n 3.62227183e-01 -1.25134570e+00 -1.15284294e+00 -1.06021142e+00\n 1.14021511e+00 -2.27365029e+00 5.74115966e-01 1.47401430e+00\n -7.92921720e-01 -2.60806779e+00 -2.05380146e+00 1.14666956e+00\n -1.38166001e-01 4.32962876e-01 -4.77762267e-01 9.34799896e-01\n 1.22388373e-01 1.23642027e-01 -2.44451475e+00 -7.57835967e-01\n 1.99487296e+00 -1.06936404e+00 1.11526937e+00 2.14455262e+00\n -1.71133325e+00 3.30122642e+00 3.18764599e+00 -9.57515832e-01\n -6.45839226e-01 2.13609578e+00 -1.95801508e+00 2.46704069e-02\n -2.40775828e-01 -1.72560039e+00 3.81314941e-01 -9.19686726e-01\n -2.28594029e+00 -5.09053890e-01 4.29762738e-01 -2.19812504e+00\n 7.06343160e-01 -3.74867423e-01 -5.91002167e-01 6.80147781e-01\n -1.07211611e+00 1.30058967e-01 1.05910246e+00 -2.48082636e-02\n 3.43865257e-01 5.14201540e-01 1.32979282e+00 -1.67879238e+00\n 2.33930539e+00 -3.82004381e-02 7.03312433e-01 -1.85212058e+00\n 3.12109845e+00 -3.71505391e-01 -1.40787561e+00 -1.90332827e+00\n 2.27084757e+00 -6.01724246e-01 1.61096406e+00 -2.32074858e-01\n 2.21946956e+00 2.47212882e+00 -2.20511191e+00 -2.76546178e+00\n -2.82347373e+00 -2.59146113e-01 -7.42476871e-01 6.14969857e-01\n -1.48482478e+00 4.17678211e-01 7.45577952e-01 -6.81525825e-01\n -1.22871948e+00 -2.48437728e-01 -1.68564093e+00 -1.80495764e+00\n -2.28849115e-01 -7.35934711e-01 2.43439463e+00 1.67598841e+00\n 1.02140920e+00 5.43691936e-01 -2.76776363e+00 1.74518026e-01\n -1.27693798e+00 -6.38721966e-01 2.09397104e-01 2.65714674e+00\n 1.36322556e+00 -1.93775067e+00 -1.43477464e+00 -9.62003139e-01\n -9.10314249e-01 3.98152398e-01 -1.57493697e+00 -9.12895668e-01\n 3.74488623e+00 3.78780558e+00 2.63469665e+00 4.20921158e-01\n 1.26581883e+00 -2.28618791e+00 1.08742927e+00 -3.10264912e+00\n 1.74947500e+00 1.73106153e+00 1.89755695e+00 -3.36861349e+00\n 3.83293813e-01 -2.01648933e+00 -1.70389933e+00 -1.90163018e+00\n -2.16846779e+00 -2.62988922e+00 -7.46886434e-01 -3.88278019e-01\n -2.48763692e+00 5.37852530e-01 -2.90465214e+00 -1.94604396e+00\n -2.36225056e+00 3.32887864e-01 -3.94276386e-01 -1.94313688e-01\n 2.39979541e+00 -1.07468641e+00 3.02707767e+00 -2.87251163e+00\n -7.32173314e-01 3.02341388e+00 9.15521638e-01 2.85407405e+00\n 1.80970115e+00 -2.40272404e-01 -2.72922982e+00 -3.33101099e-01\n 8.15017869e-01 2.07831562e+00 1.79049912e+00 3.22450599e-01\n -2.37151335e+00 6.08678295e-01 -4.53230637e-01 -1.02906180e+00\n -4.39327369e+00 1.06342695e-02 2.31788100e+00 -1.64058074e+00\n 3.60554757e+00 -1.29390637e+00 -1.05340167e+00 -2.25304051e+00\n 1.23780234e+00 9.98178497e-02 -1.88340851e+00 -1.68767976e+00\n -1.36497743e-02 -4.39064474e-01 -1.82835498e+00 -1.69684751e+00\n -1.83512439e+00 -3.81544556e+00 1.80371488e+00 2.31473771e-01\n 1.04233948e-01 -2.67116664e+00 4.39563793e-01 1.09483006e+00\n -1.31331547e+00 2.88863896e-01 -2.18269386e+00 1.97530141e+00\n -1.82913844e+00 -1.48552868e-01 -5.97714730e-01 1.42406350e+00\n 3.05260339e-01 3.52574041e+00 1.48278355e+00 2.11300101e+00\n 1.60923774e+00 -1.34171591e+00 1.86810630e-01 -4.92210633e-01\n 3.16372227e+00 -1.50347313e-03 2.15817810e+00 3.69001010e+00\n 1.11784010e+00 8.13942915e-01 3.47688290e+00 -2.12666175e+00\n -5.81953281e-01 1.42188496e-01 -1.73496769e+00 7.13980436e-01\n -2.20212180e+00 4.08295926e-01 3.20001195e+00 1.48306710e+00\n -8.67544292e-01 2.07315723e+00 3.71086464e+00 -2.58564806e+00\n 1.08142868e+00 2.14438804e+00 2.35182428e+00 -5.03169929e-01\n -6.79686339e-01 2.34337983e+00 4.87665316e-01 3.74359814e+00\n -5.53039720e-01 -1.29210308e+00 5.34826656e-01 -1.40655080e+00\n 1.16578862e+00 -2.50780479e+00 -2.00144812e+00 -4.36608544e-01\n -1.15549565e+00 -1.63692472e-01 1.06782171e+00 -1.60636298e+00\n 4.42826474e-01 5.98005096e-01 1.14225587e+00 -7.31872775e-01\n 1.75894778e+00 4.69387059e+00 3.33962469e+00 1.82282003e+00\n -1.29555767e+00 -2.97401128e-01 5.76647182e-01 -1.15585378e+00\n 7.51702238e-01 2.03649227e+00 -1.17573464e+00 1.56101211e+00\n 2.89336739e-01 1.01566805e+00 -3.05330606e+00 4.36836157e-02\n 1.23153945e+00 -9.41127943e-01 -3.14392878e-02 -7.69234739e-01\n 1.33453377e+00 2.86686586e+00 1.09945324e+00 1.03264152e+00\n 3.44151789e-01 -6.79162455e-01 1.37753817e+00 3.76350994e+00\n -4.95802633e-02 -1.57125357e-02 -1.27137957e+00 -3.87819365e-01\n 1.35101047e+00 -3.26819379e-01 -1.23349942e+00 4.92182533e-01\n -5.55544636e-01 5.78388218e-02 1.72647570e+00 6.07948507e-01\n -3.28124561e-01 4.07338149e-01 6.72604894e-01 -5.06704134e-01\n 2.79362436e+00 -2.31728157e+00 -9.70838458e-02 -1.07140205e+00\n 2.24692837e-01 -1.63914668e+00 -2.30986388e+00 3.76890173e+00\n 1.05947348e+00 6.70365718e-01 2.18176621e-01 9.44426775e-01\n 1.95349819e+00 6.01093861e-01 -1.41729996e+00 1.98771813e+00\n -3.07525373e+00 -2.43315606e-01 6.35790603e-01 2.99523277e+00\n -3.28391278e+00 -6.10780054e-01 -5.62604067e-01 -1.03320375e+00\n -7.33484844e-01 6.63165332e-01 -1.46959873e+00 5.88408360e-01\n -1.14461399e-02 -6.06911411e-02 -2.30443472e+00 5.50836915e-01\n -2.87017891e+00 9.53115064e-01 -2.82791120e+00 -7.67932273e-01\n 1.08819234e+00 -2.31730816e+00 8.67327350e-01 -2.98490413e+00\n 1.02080849e+00 -3.37931255e-01 8.94593124e-01 1.93919292e+00\n 5.20930813e-02 2.00752199e+00 -2.60017970e+00 7.86210836e-03\n 2.10062899e+00 2.68197385e+00 -1.27298103e+00 4.24713423e-01\n 1.08962143e+00 -2.58355897e+00 1.19226253e+00 3.23572533e+00\n 2.10281249e+00 2.70345521e+00 1.99021712e+00 1.72877297e+00\n 3.03473179e+00 4.96185840e-01 2.59070151e-01 1.36770430e+00\n 3.84323633e+00 -3.92771788e-01 -1.92453623e+00 -2.69579221e+00\n 3.15447956e+00 -2.69561037e+00 2.91040107e+00 3.11135854e+00\n -1.40648304e+00 1.14451890e+00 -2.47804685e+00 2.69727302e+00\n -1.99286168e+00 -2.72310913e+00 1.75715037e+00 2.86654795e+00\n -3.93346011e-01 7.65002373e-01 1.97550697e+00 -3.09284956e+00\n -4.61555674e-01 1.00993055e-02 -2.84739669e-01 5.25363827e+00\n -1.53689864e+00 -1.23544080e+00 8.50184144e-01 7.24215866e-01\n -2.70296338e+00 7.01717102e-01 7.78212882e-01 -2.72582141e+00\n 5.15338795e-01 -1.65907249e+00 2.68472246e+00 2.05567636e+00\n -4.04660421e+00 -6.06604576e-01 -9.33737494e-01 -1.25825539e+00\n 2.53678367e+00 -1.56240085e+00 -1.09530794e-01 8.39573993e-01\n -3.08406257e+00 5.91339075e-01 -1.33722358e+00 5.72767156e-01\n -9.17593187e-01 8.98274144e-01 -3.02216029e+00 2.67256657e-01\n -9.17101475e-01 2.77249412e+00 -2.08031596e+00 1.57659502e-01\n 3.04095325e+00 8.02415238e-01 1.20863141e+00 1.89039124e+00\n -7.71942865e-01 1.80911826e-01 3.65273205e-01 -4.99061214e-01\n -1.87633963e+00 1.79284002e+00 1.76563126e+00 -4.38495341e-01\n -7.19613677e-01 1.92313240e+00 1.39672162e+00 1.67803882e-02\n -2.59284392e-01 2.12018270e+00 -6.82717515e-01 -1.76887704e+00\n 3.45687280e-02 1.89943052e+00 2.17753359e+00 1.52351965e-01\n -1.17594878e+00 -2.50002416e+00 -7.35543382e-01 1.23132823e+00\n 7.17801721e-01 -2.69958547e+00 -6.11555423e+00 -1.05814010e+00\n -2.47000262e-01 -1.70681054e-01 -9.63320416e-01 1.24154187e+00]\n"
]
],
[
[
"<font color='red'>NB: Once you redefine your random variables using X.rvs(N), you must rerun the earlier code to run this cell again. </font> ",
"_____no_output_____"
],
[
"##### Turn to estimation\n\n",
"_____no_output_____"
],
[
"So, we now have data on *realizations* $(y,X)$ which take the\n Now forget that we know $\\beta$ and let’s estimate it, using\n OLS. As a numerical matter it’s better to avoid explicitly\n inverting the $(X^T X)$ matrix; instead we can solve the “normal”\n equations.\n\n",
"_____no_output_____"
],
[
"##### Numerical solution\n\n",
"_____no_output_____"
]
],
[
[
"# 1.3. ORIGINAL CODE\n\nfrom scipy.linalg import inv, sqrtm\n\nb = np.linalg.solve(X.T@X,X.T@y)\n\ne = y - X@b\n\nvb = e.var()*inv(X.T@X)\n\nprint(b,end='\\n\\n')\nprint(sqrtm(vb))",
"[0.54382267 0.99563508]\n\n[[ 0.01469681 -0.00224287]\n [-0.00224287 0.01048138]]\n"
]
],
[
[
"<font color='red'> *notes*: </font>\n- `X.T` produces the transpose of X; R::t(X)\n- `np.linalg.solve(a, b)` \"Solve a linear matrix equation, or system of linear scalar equations. Computes the \"exact\" solution, `x`, of the well-determined, i.e., full rank, linear matrix equation `ax = b`.\"\n- `print(x, end = '\\n\\n')` by default `end = '\\n'` in the `print()` function, which starts a new line. However, when `'\\n\\n'` is instead entered, your notebook will also skip a line between the printed information and the next output. To avoid a new line being started, you can instead enter `' '`. \n- `inv( )` is a function that calculates the inverse of a matrix\n- `sqrtm( )` is the matrix square root function.",
"_____no_output_____"
]
],
[
[
"# 1.3. CODE ABOVE WITH COMMENTS AND PRINTING\n\n# import both the 'inv' and 'sqrtm' functions from the 'linalg' functions of the SciPy module\nfrom scipy.linalg import inv, sqrtm\n\n# solve for b in the equation X.T@X@b = X.T@y b = solve(t(X) %*% X) %*% t(X) %*% y\nb = np.linalg.solve(X.T@X,X.T@y)\nprint(\"b =\", b)\n\n# e gets R:: y - X %*% b \ne = y - X@b\nprint(\"e =\", e)\n\n# An additional line simply to show what e.var() generates\nevar = e.var()\nprint(\"evar =\", evar)\n\n# vb gets R:: var(e) * solve(t(X) %*% X)\nvb = e.var()*inv(X.T@X)\nprint(\"vb =\", vb)\n\n# print b with an additional line after the results\nprint(b,end='\\n\\n')\n\n# print the squareroot of the matrix vb\nprint(sqrtm(vb))",
"b = [0.48733973 1.02314017]\ne = [-1.06136115e-01 -1.04244294e-01 -8.22962158e-01 6.57504296e-01\n -5.67355626e-01 -4.62722401e-01 2.05093577e-02 -4.64945246e-01\n -6.11217441e-01 4.58831214e-01 1.90567791e-01 -6.22041208e-01\n -1.16449535e-01 3.45170038e-01 3.84591212e-01 1.54259448e-01\n -1.73285633e+00 -6.26650267e-02 1.13202485e-01 -2.55643315e-01\n -5.30006309e-01 5.59453518e-01 1.02996082e+00 -1.34219894e-01\n 1.33686592e-01 3.97901761e-01 9.79020187e-01 -7.00798286e-02\n -4.23323950e-01 1.48368520e-01 -3.11763267e-01 -3.28342031e-01\n -5.32310056e-01 5.93463516e-02 4.59391819e-01 -3.92615132e-01\n -3.74088948e-01 8.07584341e-01 3.07646186e-01 -4.31417749e-01\n 4.03795391e-01 1.51158413e-01 -6.29627863e-01 -2.90045814e-01\n 1.10314948e+00 1.78976864e-02 4.00047489e-01 -5.94328747e-02\n 3.03915261e-01 -8.48278140e-02 1.69690710e-01 -3.50864990e-01\n -3.13058835e-01 7.19253132e-01 2.76768462e-01 -4.38702552e-01\n -7.25856224e-01 -1.20984160e-02 6.56840849e-01 7.47482659e-02\n 2.30219138e-01 9.95199115e-01 9.12019288e-01 5.38383414e-01\n -3.41783213e-01 -3.64330688e-02 4.94144176e-01 6.35094609e-01\n 7.49688833e-02 5.84250682e-01 -2.79967699e-01 2.96639205e-01\n -2.03621016e-01 6.36323874e-01 2.90435336e-01 3.69626645e-01\n -1.18982126e-01 -1.27653682e-01 -8.24956767e-01 -7.80378507e-02\n 4.38256359e-01 -3.08762678e-01 5.05779969e-01 -5.99705354e-01\n 1.02969082e-01 -5.54767925e-01 -6.86276543e-02 2.65045212e-01\n 2.28420281e-01 6.28383807e-01 -2.01799960e-01 -3.18905941e-01\n -4.69615540e-01 -2.61463708e-01 8.28261826e-01 2.59079879e-01\n 4.38420658e-01 3.33507353e-01 -3.26797949e-01 -2.81956539e-01\n -5.81791452e-02 9.38327547e-02 -6.69664035e-01 3.29835456e-01\n -1.49431088e-01 4.59478274e-01 4.63975590e-01 9.79772065e-01\n -2.34247868e-01 5.12408611e-01 -2.48929366e-01 4.13369116e-01\n -2.31549258e-01 3.75699258e-01 3.48257102e-01 1.05020933e-01\n 8.04872035e-01 -9.67652722e-02 2.09204946e-01 4.75667685e-02\n 2.20399090e-01 2.70996780e-01 -6.57434901e-01 -5.89280078e-01\n 3.75092163e-01 5.64002904e-01 -4.12663942e-01 -4.16639098e-01\n 3.38347627e-01 -4.21417332e-01 3.12604135e-01 -4.30511825e-01\n -1.91196354e-01 3.93436760e-01 4.55426017e-01 1.93040732e-01\n 2.05471997e-01 2.72082617e-01 -3.26684463e-01 4.66304056e-02\n 4.92812123e-01 3.81268011e-02 -3.21740732e-01 -3.35372202e-01\n -2.10548467e-01 -2.92259547e-01 8.80536137e-01 8.92160348e-01\n -1.02549640e+00 4.69186559e-01 -2.01404776e-01 8.51042625e-01\n -5.89463007e-01 -6.58442445e-01 -7.08285068e-01 -1.22784468e+00\n -2.66088472e-02 -1.12492543e-01 1.19210672e+00 7.78325107e-01\n -5.33426618e-02 -2.96270540e-01 3.15215593e-01 -1.86680418e-01\n -5.69860098e-01 2.25883806e-01 5.45486696e-01 2.77843960e-01\n 1.24255071e+00 -2.29507538e-01 4.14170814e-01 1.38985370e-01\n -7.22351740e-01 2.05042360e-02 -1.39053876e-01 -1.98928298e-01\n 5.84315463e-02 3.88230266e-01 2.69612673e-01 -3.82640171e-01\n 4.74131723e-01 -2.96129968e-02 6.00234060e-01 3.57637134e-01\n 6.02768516e-01 -6.58934345e-01 4.82738483e-01 -3.45868010e-01\n -9.10075261e-02 2.45330563e-01 -2.95154917e-01 4.43757043e-01\n -8.88998596e-02 4.56317205e-01 3.07109671e-01 -1.42476750e-02\n -4.27660318e-02 3.03512075e-01 4.04129931e-01 1.22227940e-01\n 2.15767263e-01 3.81221290e-01 5.14126418e-01 -1.66758198e-01\n 2.94845903e-01 2.61852882e-01 -4.46334076e-01 8.56471025e-01\n -4.17482924e-01 -2.79241430e-02 2.53752200e-01 -6.57807621e-01\n 1.44919931e-01 -1.46905558e-01 2.11996210e-01 -2.66539653e-01\n -1.88283719e-01 4.37293192e-01 -4.05909971e-01 6.44952935e-02\n 1.31036358e-01 4.12491252e-01 -3.70534079e-01 2.16436205e-01\n -8.90551205e-01 6.32642761e-01 -1.52701835e-01 -1.74894360e-02\n -5.22130498e-01 -4.59799408e-01 -8.56494532e-01 1.83669679e-01\n 8.69453669e-02 -8.28303497e-01 3.53245154e-01 -3.78576709e-01\n -7.39969162e-02 -4.87038992e-03 -1.42595183e-02 -4.40063250e-01\n 8.68416664e-02 1.76948310e-01 3.40866500e-01 -1.55389885e-02\n -8.31586782e-02 1.63366202e-03 2.89618447e-01 2.36651725e-01\n -4.05933162e-01 -1.73700919e-01 3.43675920e-01 -2.85871057e-01\n 1.99812104e-01 -4.18925937e-01 5.41192262e-01 5.69276999e-01\n 3.17794526e-01 -2.36776215e-01 3.05380601e-01 -1.08655735e-01\n -6.46377024e-01 7.96177293e-03 -2.35434364e-01 -3.50174259e-01\n 3.29959348e-01 6.78814041e-01 3.69322802e-03 -2.68135806e-02\n -7.17375768e-02 1.26655443e-01 -1.25981814e-01 6.36492784e-01\n 7.25078055e-01 -1.55751296e-01 -4.82930403e-02 3.91135870e-01\n -3.66313126e-01 -2.55987665e-01 4.19010432e-01 -1.17430346e-01\n -2.70030454e-02 4.00712237e-01 1.71083265e-02 3.57909571e-01\n 5.90274177e-01 -6.21715454e-02 -6.68858481e-01 -3.63889333e-01\n -3.43273220e-01 -6.75034068e-01 -5.76779721e-01 -2.86573621e-01\n 4.60753419e-01 1.04900976e-01 1.88022504e-01 -1.65371948e-01\n -5.16202972e-01 -9.55666455e-02 -2.27944435e-01 -1.08773838e-01\n -2.08272574e-01 1.06043507e+00 -2.00388995e-01 5.74314723e-01\n -6.34457693e-01 1.02264380e-01 -9.32117312e-02 -3.13905477e-01\n 4.67747378e-01 -1.79051006e-01 -2.27550981e-01 4.89318248e-02\n 7.80370663e-03 8.64837552e-01 -2.17852348e-01 -7.51871767e-01\n -6.98527467e-02 -8.65337274e-02 2.30536636e-01 7.59524598e-02\n 6.46209495e-01 -6.21902704e-01 3.29415414e-01 -6.80174382e-01\n -7.91254427e-01 -4.28637918e-01 -2.52331721e-01 -1.03832998e-01\n -7.50556159e-02 -4.95974817e-01 3.56071612e-01 1.46834940e-01\n -1.92704147e-01 3.91420876e-01 2.79154573e-01 5.45339041e-01\n -8.66741387e-01 -9.95681020e-02 4.68751525e-01 4.51058810e-01\n 2.23655035e-01 -5.65917868e-01 -9.84634315e-02 4.84738007e-01\n 4.28057316e-01 7.46960612e-01 5.76788970e-01 -3.15667169e-01\n 4.07978250e-01 -5.91601356e-01 -3.01459445e-01 3.11486915e-01\n 2.68529996e-01 -1.52335882e-02 -4.79135280e-01 6.51209029e-02\n -2.83140836e-01 -3.40167449e-01 -1.92306231e-01 1.96155574e-01\n -9.16788654e-02 -3.33233353e-02 -2.78987881e-01 2.14773361e-06\n 3.09105620e-01 3.61396035e-01 6.07880323e-03 2.12475757e-01\n 5.35066508e-02 6.15144428e-01 -7.60014332e-01 -9.88767447e-01\n 2.29147818e-02 -2.96610582e-01 5.08606596e-01 6.71346889e-01\n 2.99895000e-01 -4.68082346e-01 8.39322044e-02 -3.62474918e-01\n -1.01163326e-01 -2.10926255e-01 1.48302649e-01 -2.15655851e-01\n -5.88776027e-01 4.87841994e-01 -4.21010184e-01 2.49165577e-01\n 9.55889300e-02 -4.41344592e-01 -3.26256062e-01 3.68661904e-01\n -6.81491089e-01 2.52010363e-02 -2.70066248e-01 1.17438940e-01\n 5.43762904e-01 4.97531125e-01 2.73816522e-01 2.47043674e-02\n 1.74821484e-01 -3.82252346e-01 1.76735585e-01 -7.31525134e-01\n 5.24550220e-02 6.58353284e-01 -2.26108588e-01 3.27339987e-01\n 1.18468605e-02 -2.76701761e-01 6.47830895e-01 -6.38307638e-02\n 5.08812784e-01 -2.49869754e-01 -4.61745927e-01 3.44570137e-01\n 3.91348009e-01 1.60736984e-01 -1.17377975e+00 4.18110609e-01\n 2.94201157e-02 1.63036482e-01 3.03050704e-01 -4.66592254e-02\n -7.11805624e-01 2.11512449e-01 3.09442173e-01 8.87719339e-02\n 2.28894239e-01 -1.15626794e+00 -2.52197250e-01 -3.41806197e-01\n -1.96105243e-01 2.36083151e-01 1.63587364e-01 -1.16065700e-01\n 5.27371563e-01 -9.25314589e-02 1.14087811e+00 -1.67562798e-01\n -2.68632957e-01 -3.56144148e-01 -1.70957251e-01 -1.45221505e-02\n 1.06668197e-01 7.10384947e-01 3.86311405e-01 2.99375432e-02\n -8.24963328e-01 -8.00458113e-01 6.47333728e-02 7.58393699e-01\n 8.59041038e-01 -2.72571204e-01 -2.74039802e-01 -6.29395017e-02\n 1.89711998e-01 6.43798812e-01 7.74749202e-03 4.68337714e-01\n 5.82629726e-02 -8.52883231e-02 1.60098670e-01 2.41294853e-01\n -9.51669591e-01 -2.63671494e-01 -6.71791423e-02 -1.24750893e-01\n 9.52316713e-02 2.90024195e-01 -2.70211667e-01 -2.14932686e-02\n 1.14097796e+00 -9.00981707e-02 -2.97764806e-01 -4.34017373e-01\n 1.15205035e-01 -2.63358257e-01 1.64327161e-01 2.35652342e-01\n 2.28640844e-01 -4.44889050e-01 -7.81166707e-02 -3.87946967e-02\n 1.86148781e-01 -7.35836697e-01 3.06523132e-01 1.05080463e-01\n 7.79149962e-01 3.33638643e-01 -5.78623928e-01 2.10630459e-01\n -6.48911044e-01 2.29606207e-01 5.54696810e-01 -2.57555764e-01\n 2.25649214e-01 3.69117936e-01 -2.94044705e-01 -6.53080553e-01\n 8.92678713e-02 5.41259941e-01 3.12461924e-01 6.40536243e-01\n -2.06276345e-01 -7.67409874e-01 -1.34752175e-01 3.27343044e-01\n 5.73943082e-01 5.56477432e-01 -1.84771645e-01 1.56101710e-01\n -5.49359553e-01 -2.83762200e-01 3.93462312e-01 -1.03555288e+00\n 3.15415958e-01 3.93713831e-01 -5.31769438e-01 -2.16259061e-01\n 5.15616159e-01 -4.16424757e-02 1.45531607e-01 -8.49238840e-01\n -3.50636469e-01 -1.61117611e-01 2.43280603e-01 -5.78628831e-01\n 2.64489548e-02 -1.80921665e-02 -1.43651801e-01 6.61190261e-02\n 2.79356168e-01 5.31693364e-02 5.66630109e-01 3.37850443e-01\n 1.39422806e-01 -9.50998304e-02 3.71248836e-01 5.41745719e-01\n -3.60603242e-01 -2.60330222e-01 -3.03669236e-01 -7.11078939e-01\n 5.47500341e-01 5.33276068e-01 -1.36090290e-01 -7.01747716e-02\n 4.31471392e-01 4.42467579e-01 -2.26484414e-01 1.25366544e-02\n 3.74618347e-01 -8.24057645e-01 3.77820899e-01 1.65739934e-01\n 7.96207250e-01 -3.40101567e-01 -4.28369082e-01 -4.36208957e-01\n 4.33922844e-01 7.98118298e-02 -1.68793933e-01 -2.71057398e-01\n -5.82083251e-01 -5.39520372e-01 -4.20444849e-01 -4.63354096e-01\n -1.99221238e-01 5.70944246e-01 2.76858751e-01 5.67941708e-02\n -5.04015866e-01 5.68179909e-01 7.93240241e-01 3.23613576e-02\n 5.14789264e-01 2.41770056e-01 1.51513889e-02 3.59664906e-01\n -6.44562451e-01 9.74672884e-01 5.51936199e-01 -2.03980454e-01\n 4.53148792e-01 -7.14830312e-01 -4.57859360e-01 -2.37867883e-01\n -6.24356208e-01 -6.28324923e-03 6.39781043e-02 -4.72573095e-01\n 1.03449570e-01 8.18957820e-01 -4.09337932e-01 -4.09137942e-01\n -1.57396746e-01 5.09507725e-02 5.09199896e-01 -3.42683205e-02\n 5.59419692e-01 4.46818309e-01 -2.88960097e-01 -5.56863994e-02\n 9.21312667e-02 -7.28692081e-01 -5.02044773e-01 2.69217536e-02\n -8.78238872e-03 1.29712789e+00 8.70971271e-02 -1.78377532e-03\n -9.60736445e-02 1.95826238e-01 6.72563135e-01 2.07830868e-01\n 7.82384386e-01 1.70364631e-02 -1.51150746e-01 -7.15495594e-01\n 4.09642143e-01 -1.20471838e-01 4.24546102e-01 -1.34198979e-01\n 8.57242637e-01 -5.29639232e-01 3.95951924e-01 5.91670604e-01\n -1.82814626e-02 2.76051128e-01 -2.48382959e-01 -1.14108334e+00\n 1.61099320e-01 7.54247810e-03 -1.24617366e-01 3.95247204e-01\n -5.45873269e-01 5.24851345e-01 -3.85940343e-01 4.74399338e-01\n -5.08614173e-01 -1.24091296e-01 -3.22878808e-01 2.02681061e-01\n -6.30404541e-01 -1.60333463e-01 5.93136586e-01 1.69707663e-01\n 5.77073863e-02 1.56209552e-01 8.59558164e-01 -5.56592420e-01\n -6.20797926e-01 -7.25931175e-02 -2.90923362e-01 -2.07517653e-01\n -6.57029495e-01 -2.98755568e-01 -8.31996722e-01 -9.60948078e-02\n 5.06222700e-02 -1.80410064e-01 -3.86896712e-01 -2.19643535e-01\n 2.33681913e-03 -1.31845449e-01 -1.38307994e-01 -9.86210655e-02\n 2.73099876e-01 -1.35103763e-02 5.21949754e-01 -3.77866131e-01\n 5.41728194e-02 -9.40558588e-01 -3.17241229e-01 -4.66937156e-01\n 4.80936987e-01 3.19597694e-01 -1.60069676e-01 -4.54094632e-01\n -2.39970122e-02 -8.10340435e-01 3.26008692e-01 -2.27185870e-01\n -1.34697261e-01 5.06006419e-01 -3.99680866e-02 -8.97881709e-02\n -5.39946170e-01 -4.45423449e-01 -2.82848788e-01 -4.14975040e-01\n -3.08868860e-01 3.45855074e-01 1.48362937e-01 -8.76575611e-02\n -1.51087333e-01 3.20754906e-01 -4.86115761e-01 -5.94334738e-01\n 2.35938952e-01 1.14847830e+00 -4.98859930e-03 2.97438396e-01\n -1.59505316e-01 1.61007331e-01 -2.35168395e-01 1.90138796e-01\n 4.04084610e-01 -1.31310342e-01 8.20110994e-01 -1.31079934e-01\n 5.91163645e-01 3.29213385e-01 1.89459372e-03 -1.17419983e-01\n -3.51539826e-01 -4.07147497e-01 4.10937766e-01 1.64814050e-01\n -6.85431903e-01 3.76619856e-01 -5.18599126e-01 3.64229895e-02\n -5.97381003e-01 3.07572659e-01 1.76465852e-01 7.40452352e-01\n 7.24202605e-01 1.63357006e-01 -2.34733233e-01 -6.38884063e-01\n 2.51997020e-01 -5.03160301e-01 8.13848395e-01 -1.39018833e-01\n 4.53989521e-01 5.82362767e-01 5.19110548e-01 4.44666906e-02\n -4.80383525e-01 2.99186487e-01 1.19849952e-02 -3.81690659e-01\n -7.32952782e-01 4.50304564e-01 7.01018979e-01 4.77540045e-01\n -1.00378210e-01 3.26119249e-01 2.81961928e-01 -1.10002640e+00\n 1.45039641e-01 2.01850493e-01 -8.06799286e-01 6.27516635e-01\n 1.05607871e-01 3.18114688e-01 1.65348224e-01 -1.96793795e-02\n -4.34786857e-01 -1.59394312e-02 -2.12648636e-01 -4.91605341e-01\n 3.21264240e-01 -6.94604420e-01 6.01688476e-02 -3.59778594e-01\n -2.89843779e-01 8.20040307e-01 9.32908861e-02 5.66409099e-01\n -2.00466023e-01 5.79497732e-01 1.15099361e+00 -4.60203625e-01\n 5.42744870e-01 -4.81727461e-01 5.14197549e-01 5.72896995e-01\n 2.46018356e-01 -7.77885239e-03 3.25988088e-01 3.97594418e-01\n 5.86465106e-01 -5.75348688e-01 6.55362893e-01 -7.93561889e-02\n 4.21618052e-01 2.05846994e-01 4.10966687e-01 -5.05351647e-02\n 2.75765581e-01 5.75423906e-01 7.32846419e-01 4.63018682e-02\n -3.64684002e-01 -2.99138579e-01 1.21599216e-01 2.54299990e-01\n 9.79607738e-01 8.40889162e-01 -4.67246330e-01 3.56899851e-01\n -1.57966938e-02 -3.13543415e-01 2.18168222e-01 -3.69095635e-01\n 4.50473996e-01 -6.61569591e-02 -9.19920780e-01 1.36786390e-01\n -1.54660158e-01 -3.28098744e-02 -6.28439299e-01 -1.78409765e-01\n -5.61354972e-02 -4.35122760e-01 -1.47730840e-01 -2.05790664e-02\n -5.66874645e-02 6.55935347e-01 5.49326714e-02 -2.69298915e-01\n 1.11068723e-01 -2.52075475e-01 -8.63846793e-03 4.00397124e-01\n -1.12743140e+00 2.90800764e-01 -8.43668299e-01 7.03347125e-01\n -4.07690019e-01 -9.20542509e-01 -2.47126362e-03 -4.37179602e-01\n 4.25634739e-01 -3.00617475e-01 4.25762079e-01 -6.80668179e-01\n 4.18310421e-01 7.87876755e-01 -6.50463216e-02 3.90081710e-02\n -6.28238039e-03 -9.78685715e-01 -4.65366413e-01 5.69945276e-01\n 3.52097995e-01 -6.39180610e-01 -5.83564121e-01 1.29121207e-01\n -2.11754885e-01 4.08442457e-01 -4.56473460e-01 2.22125648e-01\n 1.28592001e-01 9.22652928e-01 5.94863949e-02 1.00582304e+00\n 5.98267282e-01 8.41719592e-02 5.48756123e-01 3.82717645e-01\n 1.00848215e+00 1.26392850e-01 2.86363854e-01 -6.39216510e-01\n 1.69900425e-01 1.09920519e-01 -7.84663125e-02 -2.86109596e-01\n -3.13253532e-01 -5.21913783e-01 -6.90125746e-01 -4.41440208e-01\n -5.04922259e-01 7.66236825e-02 -5.13165810e-01 -1.90718833e-01\n -4.10216862e-01 -5.88190079e-01 6.35829781e-02 -7.11571013e-01\n -2.56083046e-03 -2.30900894e-02 9.24531511e-01 2.34516812e-01\n 3.62041064e-01 -1.04714356e-01 -6.62100594e-01 4.39634227e-01\n -5.46157349e-01 5.10735006e-01 -5.30404929e-01 8.54128173e-01\n 3.48820714e-01 -4.72423790e-01 4.65553678e-01 5.09117614e-01\n 1.11357450e+00 -2.01256660e-01 2.50689423e-01 -2.46206684e-01\n 7.17185966e-01 4.23757183e-01 4.34753977e-01 -9.26060973e-01\n -2.91377757e-02 -9.84853496e-02 6.93940086e-02 -1.81545722e-01\n 3.95270347e-01 -1.92749095e-01 2.88363307e-01 3.40857703e-01\n 2.55417665e-01 4.50536795e-01 4.48080795e-01 2.83576607e-01\n -4.73933035e-01 -8.87899425e-01 -4.46002036e-01 2.53646509e-01\n -5.54109955e-01 -3.32439714e-01 -1.26949323e-01 -1.96376723e-01\n 3.05272637e-01 2.49801318e-01 -1.70243107e-01 9.78451911e-01\n -3.52114027e-01 -7.65046402e-01 -3.47690403e-01 -2.32851592e-01\n -1.07896881e-01 5.67445242e-01 -2.87986798e-01 -2.61799182e-01\n 5.77397282e-01 -1.15158119e+00 -1.64173077e-01 8.15030050e-02\n 1.77154800e-01 -4.99359223e-01 -2.41766092e-01 5.08548964e-01\n 6.00927125e-01 -2.27889309e-01 5.49243066e-02 2.68249674e-01\n -6.49575877e-02 -1.55284441e-01 -5.11956132e-01 5.44403835e-01\n -1.61177149e-01 -4.02447963e-01 -5.72284167e-01 7.08946052e-01\n -6.34743428e-01 7.61343373e-01 -1.01648580e-01 -2.69334504e-01\n 1.03451710e+00 -2.09128638e-02 -6.63579247e-01 5.77976503e-01\n 1.64667520e-01 1.26113078e-01 -5.23954914e-01 -5.36465953e-01\n 4.76239087e-01 3.98275659e-01 1.97277842e-01 -2.65839724e-01\n 9.86441258e-01 -1.14866806e-01 -3.43142982e-02 -7.96912469e-02\n -3.82093431e-01 5.47233145e-01 -2.91481415e-01 3.78395361e-01\n -2.76783064e-01 -4.84545310e-02 5.36107630e-01 1.49727776e-01\n 3.33075257e-01 4.46834073e-01 -7.92661089e-01 -2.71237855e-01\n -2.55900069e-01 4.31056313e-01 2.06418668e-01 6.56476817e-01\n -3.73565880e-02 -4.67516881e-01 4.97865548e-01 5.31873815e-01]\nevar = 0.2003668688111608\nvb = [[ 2.25174815e-04 -5.38460162e-05]\n [-5.38460162e-05 1.10835407e-04]]\n[0.48733973 1.02314017]\n\n[[ 0.01485243 -0.0021401 ]\n [-0.0021401 0.01030802]]\n"
]
],
[
[
"### 2. [Weighted Regression in =python=](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Materials/weighted_regression.ipynb)\n\n",
"_____no_output_____"
],
[
"or see [weighted_regressions on github](https://github.com/ligonteaching/ARE212_Materials/blob/master/weighted_regression.ipynb)",
"_____no_output_____"
],
[
"The fact that $T$ and $u$ are “independent” (or at least\northogonal) variables means that if we want to compute a\n“classical” regression we’d do it something like this:\n\n",
"_____no_output_____"
],
[
"##### Define independent random variables\n\n",
"_____no_output_____"
]
],
[
[
"# 2.1. ORIGINAL CODE\n\n%matplotlib inline\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nk = 3 # Number of observables in T\n\nmu = [0]*k\nSigma=[[1,0.5,0],\n [0.5,2,0],\n [0,0,3]]\n\nT = multivariate_normal(mu,Sigma)\n\nu = multivariate_normal(cov=0.2)",
"_____no_output_____"
]
],
[
[
"<font color='red'> *notes*: </font>\n- `len(x)` returns the length of an array; R::length(x)\n- `np.shape(x)` returns the dimensions of a matrix; R::dim(x)",
"_____no_output_____"
]
],
[
[
"# 2.1. CODE ABOVE WITH COMMENTS AND PRINTING\n\n%matplotlib inline\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nk = 3 # Number of observables in T\nprint(\"k =\", k)\n\nmu = [0]*k\nprint(\"mu =\", mu)\nprint(\"Length mu =\", len(mu))\n\n\nSigma=[[1,0.5,0],\n [0.5,2,0],\n [0,0,3]]\nprint(\"Sigma =\", Sigma)\nprint(\"Shape Sigma =\", np.shape(Sigma))\n\nT = multivariate_normal(mu,Sigma)\nprint(\"T =\", T)\n\nu = multivariate_normal(cov=0.2)\nprint(\"u =\", u)",
"k = 3\nmu = [0, 0, 0]\nLength mu = 3\nSigma = [[1, 0.5, 0], [0.5, 2, 0], [0, 0, 3]]\nShape Sigma = (3, 3)\nT = <scipy.stats._multivariate.multivariate_normal_frozen object at 0x7f716d6ba2b0>\nu = <scipy.stats._multivariate.multivariate_normal_frozen object at 0x7f7121f5b278>\n"
]
],
[
[
"##### Define =X=\n\n",
"_____no_output_____"
],
[
"Recall that $X$ can depend on $T$ and $u$. This dependence needn’t be\nlinear! For example, suppose $X=T^3D + u$, where $D$ is an\n$\\ell\\times k$ matrix.\n\n",
"_____no_output_____"
],
[
"##### Construct Sample\n\n",
"_____no_output_____"
],
[
"To construct a sample of observables $(y,X,T)$ we just use the regression equation,\n plus an assumption about the value of $\\beta$:\n\n",
"_____no_output_____"
]
],
[
[
"# 2.2. ORIGINAL CODE\n\nbeta = [1/2,1]\n\nD = np.random.random(size=(3,2)) # Generate random 3x2 matrix\n\nN=1000 # Sample size\n\n# Now: Transform rvs into a sample\nT = T.rvs(N)\n\nu = u.rvs(N) # Replace u with a sample\n\nX = (T**3)@D # Note use of ** operator for exponentiation\n\ny = X@beta + u # Note use of @ operator for matrix multiplication",
"_____no_output_____"
]
],
[
[
"<font color='red'>NB: Once you redefine your random variables using X.rvs(N), you must rerun the earlier code to run this cell again. </font> ",
"_____no_output_____"
],
[
"<font color='red'> *notes*: </font>\n- `**` for exponents so that `3**2` = $3^2$; R::3^2 or 3**2 (i.e. the same)",
"_____no_output_____"
]
],
[
[
"# 2.2. CODE ABOVE WITH COMMENTS AND PRINTING\n\nbeta = [1/2,1]\nprint(\"beta =\", beta)\nprint(\"Length beta =\", len(beta))\n\nD = np.random.random(size=(3,2)) # Generate random 3x2 matrix\nprint(\"D =\", D)\nprint(\"Shape D =\", np.shape(D))\n\nN=1000 # Sample size\n\n# Now: Transform rvs into a sample\nT = T.rvs(N)\nprint(\"T =\", T)\nprint(\"Shape T =\", np.shape(T))\n\nu = u.rvs(N) # Replace u with a sample\nprint(\"u =\", u)\nprint(\"Shape u =\", np.shape(u))\nprint(\"Length u =\", len(u))\n\nX = (T**3)@D # Note use of ** operator for exponentiation\nprint(\"Shape X =\", np.shape(X))\n\ny = X@beta + u # Note use of @ operator for matrix multiplication\nprint(\"Shape y =\", np.shape(y))",
"beta = [0.5, 1]\nLength beta = 2\nD = [[0.9747384 0.00637538]\n [0.88560785 0.13854454]\n [0.94139493 0.28054936]]\nShape D = (3, 2)\nT = [[ 1.79044788 1.57618282 -0.62527475]\n [ 0.95042714 1.37823013 1.46242992]\n [-1.22589168 -0.09118055 -0.18603817]\n ...\n [ 1.73295916 -0.43395036 0.91862477]\n [-0.50308082 -0.42424176 1.46060027]\n [ 0.1561883 -0.04889125 -1.52390502]]\nShape T = (1000, 3)\nu = [ 4.10811167e-01 -4.71750893e-01 -3.81919493e-01 6.03697253e-02\n -4.13359733e-01 -2.42025650e-01 -8.70931439e-01 3.20978893e-01\n -1.76539975e-01 2.40444144e-01 7.33148208e-02 -4.06563731e-01\n 2.94791934e-02 1.76831242e-01 1.00869048e-03 -4.86030867e-02\n 2.02104019e-01 5.48479850e-01 1.19939424e-01 -2.61510235e-01\n -8.70381677e-02 -2.56647666e-01 -4.00420040e-01 -8.63562599e-02\n 4.05892979e-02 1.39330094e-01 2.92040681e-01 -4.47211305e-01\n 1.44864243e-01 2.51371139e-01 -4.07993098e-01 -6.55088144e-02\n 9.61217714e-02 3.68555876e-01 7.57784744e-01 -3.48685909e-01\n -2.68729141e-01 1.96453434e-01 1.33419929e-01 -6.25526183e-01\n 5.16540091e-02 -2.62430156e-02 -9.49409271e-01 -7.52394645e-02\n 3.93563510e-01 -3.10777847e-01 1.74771530e-01 2.80068428e-01\n -9.96165445e-01 -4.24281556e-03 -2.96517137e-01 5.38314337e-01\n -3.02732706e-01 -6.97684347e-01 -2.77942244e-01 -6.17426193e-02\n 9.48943234e-02 -1.51311998e-01 -2.91851312e-01 2.48435324e-02\n 5.10135322e-02 -1.08992199e+00 -1.23654071e-01 9.84979332e-02\n -5.60773416e-01 1.73271470e-01 4.85719527e-01 -3.55239875e-01\n 3.40548487e-02 4.24044729e-01 -1.72525259e-01 -7.86675567e-02\n 3.63809696e-02 1.84427015e-01 -1.09788461e-01 1.31489413e-01\n -5.67495756e-02 -4.66919029e-01 -2.88184703e-01 -5.89476167e-02\n -7.19533200e-01 4.97003280e-01 1.95135615e-01 -5.17296401e-02\n 5.25946693e-02 -7.94440619e-01 5.74318232e-02 2.82195604e-01\n 9.01856915e-02 2.65039132e-01 -2.73571852e-01 -5.87562769e-02\n -2.18662350e-01 1.46282917e-01 9.16838503e-03 -5.71646229e-01\n -4.08190923e-01 6.04479719e-01 2.86993862e-01 -8.05815717e-01\n 7.33310815e-01 -4.86873577e-01 -5.66877468e-01 2.26166926e-01\n -3.41060199e-01 -8.89182794e-01 -3.53289036e-01 5.00943132e-01\n 6.22161370e-01 7.08408581e-02 3.35774493e-01 9.94108590e-02\n -3.56888892e-01 -3.93038483e-01 -7.56435236e-02 6.49847223e-01\n -1.72502692e-01 -3.22571379e-01 -2.78639441e-01 -2.50247598e-01\n 2.90981617e-02 -5.51627462e-01 -9.44808352e-02 -3.39216948e-01\n -1.75358039e-01 5.79057499e-01 2.77267598e-01 -2.17978540e-01\n 9.10125860e-01 -2.33817537e-01 -1.06717783e+00 5.12586150e-01\n -5.68093836e-01 -8.09623502e-01 1.95416138e-01 -1.25973920e-02\n 7.93979568e-01 -7.33339181e-01 2.33192978e-01 -6.92912901e-01\n -1.25760657e-01 8.77573087e-01 2.54392772e-01 -1.06912039e+00\n -3.04789717e-01 -2.72865965e-01 1.27617456e-02 2.41117706e-01\n 4.25827180e-01 8.24372944e-01 -5.69618170e-02 4.60316489e-01\n -2.35896415e-01 -1.28176547e-01 4.80383255e-01 -9.00362381e-01\n 2.78831947e-01 -3.83859730e-01 -7.44967152e-01 -3.09254631e-01\n -1.34918400e-01 2.56527908e-01 4.50172479e-01 -5.84248774e-01\n 3.76716294e-01 -4.26008883e-01 -6.62524226e-01 9.96836817e-01\n 2.33104364e-01 -5.78099088e-01 2.61340851e-01 -5.14585820e-02\n -1.08026638e+00 2.48110183e-01 4.07670119e-01 5.13399954e-01\n 5.63375701e-01 4.09877952e-01 3.24854815e-01 -3.22961834e-01\n 1.67403767e-01 1.38559695e-01 1.00307028e-01 -2.82063950e-02\n -4.76715636e-01 -6.45783573e-01 -8.24586782e-02 7.99706902e-01\n 2.30653063e-01 -4.11247421e-01 3.63445943e-01 -5.36184323e-01\n 7.40730818e-01 3.74801428e-01 -3.79160766e-01 6.22155671e-01\n 3.70943849e-03 1.53078970e-01 -5.09115876e-01 3.67717218e-01\n -9.27479789e-01 4.19973062e-01 1.36512176e-01 3.28964216e-01\n 3.36869665e-01 2.71089851e-02 -6.51596931e-02 3.15671563e-01\n 7.94954756e-01 -2.38874957e-01 -8.20358014e-01 8.27962375e-02\n 1.48877887e-01 -3.14730481e-01 7.27037349e-02 -8.72581213e-01\n 8.59856059e-02 3.87283953e-01 6.50343832e-01 4.15098751e-01\n -9.21894717e-01 5.33708584e-01 1.01705357e-01 1.24334355e-01\n -9.80551785e-01 2.75457627e-01 8.95392681e-01 -4.19484449e-02\n 7.13818371e-01 -4.56690353e-01 3.76772459e-01 6.29669542e-01\n -6.04471137e-01 3.08514939e-02 3.18190480e-01 -1.70149022e-01\n 3.31147201e-01 -3.49441579e-02 -6.63541438e-01 -7.44485473e-01\n 3.69292801e-01 -6.59974839e-01 -2.65108559e-01 7.53351647e-02\n -6.05840041e-03 1.70326433e-01 3.01074932e-01 1.43065790e+00\n -2.01414888e-01 -5.67661764e-01 6.17042286e-02 -2.68004291e-01\n 6.70948204e-02 -6.03702584e-01 3.03823074e-01 -7.72214988e-01\n -7.67933664e-01 5.32596897e-01 -7.22553376e-01 -1.01381202e-01\n -4.24149513e-01 -9.09723071e-02 -3.67146918e-01 -5.20147820e-01\n -9.44161062e-01 1.03587486e+00 -5.09447411e-02 -2.58094508e-01\n -4.30778722e-01 -4.70249466e-01 -1.12459624e-01 5.93545923e-01\n -1.67621568e-01 -4.20976116e-01 2.86294268e-01 -8.86204987e-01\n 3.87822395e-01 -4.49361877e-01 -1.57605928e-01 1.23983918e-01\n -3.44648138e-02 -1.04102860e-01 9.81974445e-01 -3.17892758e-01\n 1.05377589e+00 -3.16958747e-02 2.65310277e-01 3.51451446e-01\n 2.70637840e-01 2.47649775e-01 -7.24335600e-01 -3.24005068e-02\n -6.80340437e-01 -1.82380631e-01 5.55093146e-01 -4.45204780e-01\n 1.77364337e-01 6.82898147e-02 -7.02765494e-02 -5.40348121e-01\n -1.79533839e-01 8.12029032e-03 -7.99585089e-02 -2.20861160e-01\n -1.71383459e-01 -6.44036010e-02 -9.34928425e-02 -2.97675871e-02\n 3.95162872e-01 8.06142891e-01 -1.27252485e-01 -4.11614810e-02\n -1.39924372e-01 -5.84028805e-01 -2.14306529e-01 5.71435001e-02\n -3.22922515e-01 5.42090472e-02 3.02597237e-01 2.74148379e-02\n 4.34612146e-01 -7.62839648e-01 7.14805041e-02 -1.42156540e-01\n 3.80652358e-01 -5.74282540e-02 1.58530701e-01 5.18362448e-01\n -2.61568556e-01 -2.48860833e-01 1.44806143e-01 -2.83555679e-02\n -2.23025986e-01 2.00572737e-02 -4.18769638e-01 5.49936122e-01\n 4.30185554e-01 -1.67455979e-01 1.49500275e-01 -1.42960281e-01\n -8.61412826e-02 -6.84377349e-01 -1.35974158e-01 1.70003078e-01\n -5.66769308e-01 1.94067736e-01 3.23649697e-01 7.16302446e-02\n 1.86843386e-01 -9.42731861e-02 -2.66209541e-01 -1.23059722e+00\n -3.25279373e-01 5.43289802e-01 -6.42347713e-02 -6.13733390e-01\n -4.73192870e-01 -6.91585434e-01 1.83979140e-01 -3.02808550e-01\n -1.11473096e-01 3.99455327e-02 -2.18319657e-02 -1.83010632e-01\n 9.03993094e-01 -1.57377039e-01 -1.84498706e-01 -2.69256896e-01\n 1.51440494e-01 3.64234523e-01 -2.06734204e-01 1.11683812e-01\n 3.27372751e-01 6.59366938e-01 5.30161193e-01 -9.55059188e-02\n -3.64968638e-01 -3.49539016e-01 -2.36829874e-01 5.90511230e-02\n -6.17571941e-01 -1.01869392e-01 1.17815305e-01 -3.64039520e-01\n -2.12489037e-01 -4.44155409e-01 -2.33737316e-01 -3.09003748e-01\n -6.00091139e-01 3.08517119e-01 -8.83587922e-01 -3.42235178e-02\n -3.86196818e-01 7.50332333e-01 -1.06257891e+00 5.49936991e-01\n -3.70634627e-01 -4.02452827e-01 -1.95424385e-01 -1.07151153e-01\n 5.66020734e-01 2.64548809e-01 1.16752338e-01 4.40407183e-01\n 8.27473450e-01 1.43468257e-01 -9.18060201e-02 5.97293326e-01\n 5.48439944e-02 -2.17863905e-01 -3.03915433e-01 2.06863185e-02\n 8.52804225e-02 3.16706506e-01 -3.84762501e-01 -6.55702960e-01\n 1.38862378e-02 -1.12451340e-01 2.48254299e-01 2.00277727e-03\n 6.06651013e-01 -1.03315499e-01 1.03982466e-01 -7.56699102e-01\n -4.00998418e-01 1.03004879e-01 -3.86272973e-02 5.17235088e-01\n 1.22503913e-01 -4.73752332e-01 2.67891391e-02 -7.93373920e-01\n 2.59865965e-02 -4.18536996e-01 6.49300132e-01 -2.65242113e-01\n 2.83538694e-01 2.18308258e-01 -1.77033727e-01 -2.48437185e-01\n 1.54398818e-01 2.39427974e-01 1.64481031e-01 3.24137018e-01\n -1.91120140e-01 6.48455450e-01 3.34284463e-01 4.19366718e-01\n 4.83842501e-02 4.31455407e-01 5.75717912e-01 2.26922149e-01\n 5.06265031e-03 1.44042511e-01 3.14408886e-01 1.78759062e-01\n 4.22800593e-01 -6.87153506e-01 -4.26930468e-01 4.91224168e-01\n -4.34246890e-01 5.50561554e-01 2.93126000e-01 -6.00846177e-02\n -6.85945424e-01 3.82716661e-01 1.61882189e-01 7.33132754e-01\n -2.92278835e-01 -1.91408433e-01 -2.93211960e-02 4.21770349e-01\n 2.98443516e-01 -8.65083337e-02 -3.73495575e-02 -3.71377492e-01\n 6.54719321e-01 -2.02675314e-01 1.82195264e-01 4.00697167e-01\n 5.30765795e-01 5.56787793e-01 -1.05783730e+00 4.75064868e-02\n 9.76713587e-01 3.42357281e-01 1.43705901e-01 -1.62196675e-02\n -1.70969074e-02 -2.51505846e-01 8.82044681e-01 -1.01883448e-01\n -1.94593271e-01 -4.81731859e-01 -3.73170572e-01 1.23189550e-01\n 6.50065367e-01 -6.59577277e-01 3.32419706e-02 2.17875942e-01\n 9.60696186e-03 2.47249144e-01 -5.23100942e-01 -5.94601466e-02\n 3.35662648e-01 -1.54206348e-01 -3.27109668e-01 6.51033760e-01\n -2.51480932e-01 -6.53891200e-01 -1.80057845e-01 2.86230711e-01\n -2.63448714e-01 -1.38937202e-01 3.20248622e-01 -5.64221087e-01\n 6.31387236e-02 2.60477864e-01 3.78447897e-01 -3.07731333e-01\n -8.25132000e-02 -7.40579494e-01 -2.63115891e-01 2.09142462e-01\n -7.56553960e-01 -1.20244835e-02 -3.47473555e-02 2.00672489e-01\n 8.88544047e-01 -1.70377537e-01 9.89793951e-01 -6.04924687e-01\n -3.58198450e-01 -9.59516438e-01 8.43699007e-02 -5.66210923e-01\n 3.66404738e-01 9.84587225e-03 -4.51961431e-01 7.17394214e-03\n 2.56538346e-01 -6.18353078e-02 -5.55459142e-01 -2.25629421e-01\n -6.63539862e-02 -4.46231293e-01 7.91049962e-01 7.23502780e-01\n -5.39776875e-01 2.42966451e-01 -4.28042049e-02 4.33240416e-01\n 3.68127226e-01 -6.56969528e-03 6.78332366e-01 1.96753493e-01\n -5.56898742e-03 -3.50949776e-01 8.42364125e-01 -4.50617748e-01\n -2.35486732e-01 -1.12508671e-01 9.76763523e-02 7.55785540e-01\n 6.82884397e-01 -2.25190221e-01 -6.57030616e-01 8.78603788e-02\n -6.27554695e-01 3.91218657e-01 -7.59900631e-02 1.56331159e-01\n -4.11108496e-01 -3.62504434e-01 3.21472595e-01 6.76605001e-01\n 5.20498994e-01 3.91215244e-01 -2.53196943e-03 2.83108932e-01\n -2.70749290e-01 1.44814402e-01 -5.76740763e-02 -1.58366553e-01\n -3.84762013e-01 -4.83717985e-01 8.90803350e-01 -1.86856219e-01\n -5.12697891e-01 3.72097652e-01 -6.57109704e-01 1.32748661e-01\n 9.83654856e-02 3.75132762e-01 1.52903999e-01 4.23027694e-01\n -1.38441667e-01 8.93518924e-02 7.46455510e-02 -6.34323765e-02\n 3.70956519e-01 1.11943780e+00 -5.49004195e-01 3.99984015e-01\n 2.32937524e-01 1.54190387e-01 6.07409486e-02 6.28798341e-01\n -1.26836991e-01 7.25291415e-01 -8.67245121e-02 1.98876975e-01\n -1.67643718e-01 6.64970426e-01 7.30354625e-01 -6.81324063e-02\n 1.04161115e-01 -4.80278702e-01 5.92208877e-01 -2.62511105e-01\n 3.37869606e-01 -4.03832620e-01 1.21947507e+00 -6.53569802e-01\n 1.93289037e-01 3.22624058e-01 -1.74196978e-02 8.85756267e-01\n 1.22640157e-01 9.35117771e-01 -3.27718395e-01 -7.24807456e-01\n 7.09632412e-01 -1.52888241e-02 -3.14296524e-01 -2.04040509e-01\n 2.52845536e-01 -2.39654038e-01 -3.60544253e-01 2.33922294e-01\n -1.33829674e-03 -4.24466482e-01 -3.26437038e-01 2.86668214e-01\n 1.44346245e-01 -3.04284841e-01 6.04574089e-01 -6.69742036e-01\n 6.11950956e-01 1.22501307e-02 -6.16735461e-01 -8.05252091e-01\n -1.88505837e-01 -4.27753239e-01 4.74929816e-01 5.15252353e-03\n 1.63649747e-01 -2.99644216e-02 -2.96385766e-01 -4.30006275e-01\n -5.12063057e-01 3.84396682e-01 -6.29545484e-01 8.65107401e-01\n -1.52628428e-02 4.03735800e-01 3.97882154e-01 1.03053235e-01\n -4.26682868e-01 -7.07994388e-01 9.05088774e-01 -6.55640591e-01\n 1.16572780e-01 5.09396240e-01 2.28501375e-01 -4.83523171e-01\n 5.32768287e-01 5.40810612e-03 -1.64659333e-01 1.02485545e-01\n 3.43585230e-01 2.60806125e-01 -4.47696710e-02 -1.90668243e-02\n -4.79113735e-01 5.86121194e-01 -2.29910151e-01 5.53332566e-01\n -7.25259004e-01 -7.58321248e-01 2.63340746e-01 3.53978594e-01\n 1.95515269e-01 -3.84167048e-01 7.20350196e-01 5.15238729e-01\n -1.04149207e+00 -6.24217136e-02 -9.94106981e-01 -4.14642100e-01\n -1.58702830e-01 5.55649885e-01 -7.02812594e-01 2.06370068e-01\n 1.22764102e-01 3.20587998e-01 7.25158037e-02 -1.56076438e-01\n 5.15077900e-01 -1.42527298e-01 1.14332138e-01 6.86239706e-02\n -4.99171873e-01 -3.98471710e-02 9.93221609e-02 -6.37252244e-01\n 4.05110831e-01 3.32252706e-01 2.60419853e-01 -5.91981475e-01\n -5.78020592e-01 3.72172323e-01 -1.40267934e+00 -1.06847910e+00\n 7.33025668e-01 -4.58706136e-01 4.97883166e-02 2.74634672e-01\n 9.77208605e-02 2.42209855e-01 -6.57280937e-02 -1.21241391e-01\n -4.11351083e-03 2.95428665e-02 -8.97632182e-01 1.48930338e-01\n 5.71755777e-01 4.18974961e-01 -3.20848518e-01 6.92736456e-01\n 3.03975267e-01 -2.81308602e-01 -1.36784456e-01 1.23280511e-01\n -3.91180893e-01 6.78386445e-01 1.03983563e-01 -1.77527443e-01\n 1.58018075e-01 6.14151499e-01 -1.82373447e-01 1.23421588e-01\n -1.03387119e+00 6.20531465e-01 3.05097628e-01 -4.52493171e-01\n -6.38369566e-01 -2.54578653e-01 -2.24730280e-02 -5.93466214e-01\n 7.39642777e-01 -4.25828937e-03 4.90487226e-02 5.83542546e-01\n -3.30749279e-01 2.23033385e-01 -6.76285597e-01 -3.27403063e-01\n 1.00763629e+00 -5.10063740e-01 -1.08095561e-01 -6.56693222e-02\n -2.25003751e-01 9.51798302e-01 1.74743509e-01 3.81913031e-02\n -2.84128815e-02 -1.14399959e-01 -3.17784918e-01 7.81586567e-01\n -7.07695855e-01 -5.44119969e-02 2.84912170e-01 2.99901440e-02\n 8.55895566e-01 8.82379065e-01 7.27357549e-01 4.40682938e-01\n -5.71935135e-01 -1.56174099e-02 -1.07632419e+00 3.70025716e-01\n -3.74767555e-03 -4.99064520e-02 -6.97008353e-01 2.74795243e-01\n -1.83621411e-01 -9.58472548e-01 1.28817498e-02 1.14754079e+00\n 2.97308566e-01 2.74016255e-01 -6.93717689e-02 -1.37199274e-01\n -5.10996717e-01 -7.10168438e-01 7.84710322e-01 -6.06992337e-01\n -4.79913670e-01 -1.76727116e-01 -1.78096898e-02 1.64848379e-01\n -8.10393325e-01 2.50578533e-01 -6.46898387e-02 -2.78035722e-01\n -6.04085604e-01 -5.87059645e-02 1.94814528e-01 -1.63942624e-01\n 4.58708853e-01 -9.47132291e-03 -9.70674408e-02 2.69252028e-01\n 1.53525070e-01 -2.03672174e-02 1.73260266e-01 1.70530856e-01\n 1.86793907e-01 1.38306650e-02 9.91091981e-02 6.82947679e-03\n -7.18547052e-01 -3.46858728e-01 3.64199206e-02 6.87599378e-01\n 1.73634577e-02 -4.04752401e-01 4.03594290e-01 5.32777717e-01\n 7.13515334e-01 3.67798728e-01 -5.01971752e-02 -7.40807915e-01\n -5.56176164e-02 -2.94459802e-01 4.47567925e-01 -4.87682422e-01\n -2.32936444e-01 -1.32722718e-01 -4.25001572e-01 7.45203982e-01\n 4.66862126e-01 5.12594039e-01 4.73346213e-01 -6.63708273e-01\n -1.96498178e-01 5.29108607e-01 -2.86215713e-01 -1.13525968e-01\n -1.36199860e-02 1.21047289e-01 1.45552248e-01 -2.24219796e-02\n 3.28169928e-01 4.71364553e-01 2.44566908e-01 -1.49829301e-02\n 2.46746349e-01 1.39135991e-01 4.13897366e-01 5.08212592e-01\n -1.75787778e-01 6.12532805e-01 2.34049030e-01 -5.76835180e-01\n -2.25019000e-01 -2.57998843e-01 2.32437582e-01 -3.51584590e-02\n -2.41496335e-03 -2.47331602e-01 4.21586783e-01 -2.63467616e-01\n -4.61965160e-01 2.42577111e-01 2.57984172e-01 -4.90132528e-01\n -5.92349958e-01 -1.54066638e-02 1.47802017e-01 -3.05608002e-02\n 1.83597012e-01 -5.18493517e-01 6.93372861e-01 -2.16795620e-01\n -4.49831816e-02 3.84421858e-01 3.47914301e-01 7.30785603e-01\n -4.87557516e-01 1.62629280e-01 8.02566723e-01 -3.38405445e-01\n -7.70754768e-03 -3.73972933e-01 3.20900669e-01 1.59337690e-01\n -2.23367187e-01 2.11206258e-01 1.86548251e-01 1.20918967e-01\n 4.49484484e-01 2.28648170e-01 -2.91101007e-01 -4.75107104e-01\n 1.62689475e-02 4.01263218e-01 -3.52073840e-01 -4.62493680e-01\n -2.93206662e-02 -1.86610294e-01 5.59841580e-02 3.21682393e-01\n 3.04243521e-01 -9.82905209e-01 -3.74358047e-01 1.90848349e-01\n -5.11899036e-01 -7.42107718e-01 -5.33251454e-02 2.48084237e-01\n -2.74743700e-01 1.01583305e-01 2.92546513e-01 1.45726328e-01\n 5.48724732e-01 7.25518592e-01 7.78009400e-01 1.40460777e-01\n 9.30688352e-01 -5.14368559e-01 -7.35385874e-01 8.80736619e-01\n 6.19708882e-01 -2.57186042e-01 5.80935188e-01 -2.11467709e-02\n -2.49362784e-01 9.39030034e-01 8.57675202e-01 6.59185496e-01\n -2.42539189e-01 7.21148255e-02 -2.81857947e-01 -1.64046338e-02\n -3.02907445e-01 1.59712739e-02 6.08706046e-01 4.21272752e-01\n 1.85019592e-01 -2.05423800e-01 -1.45721826e-01 -1.32819834e-02\n 3.38028518e-01 -1.04711629e-01 5.51624136e-02 7.81870805e-01\n 4.01941188e-01 -1.46264742e-01 -5.82416662e-02 1.13339910e-01\n -1.19357128e-01 6.06306521e-02 -3.57226974e-01 4.44868495e-01\n -6.01535493e-01 -2.39202171e-01 4.55032216e-01 -3.58583089e-01\n 1.43100195e-01 -7.11436710e-01 9.32242617e-01 -1.91815689e-01\n -1.78294923e-02 -4.48109619e-01 -7.99087525e-01 2.30340232e-01\n -2.48222256e-01 -1.56941986e-01 4.96115089e-01 -5.21722504e-01\n -1.20990824e-01 -7.27595138e-01 -1.56311276e-01 2.30527692e-01\n 2.33436604e-01 -6.11321230e-01 6.70147456e-02 -4.57963163e-01]\nShape u = (1000,)\nLength u = 1000\nShape X = (1000, 2)\nShape y = (1000,)\n"
]
],
[
[
"##### Turn to estimation\n\n",
"_____no_output_____"
],
[
"So, we now have data on *realizations* $(y,X,T)$. Now forget\n that we know $\\beta$ and let’s estimate it, using weighted least\n squares. As a numerical matter it’s better to avoid explicitly\n inverting the $(T^T X)$ matrix; instead we can solve the “normal”\n equations\n\n\\begin{align*}\n X'y &= X' X b + X' u\\\\\n \\mbox{E}(T'u) = 0\n\\end{align*}\n\n",
"_____no_output_____"
],
[
"##### Numerical solution\n\n",
"_____no_output_____"
],
[
"In the classical case we were trying to solve a linear system that\n took the form $Ab=0$, with $A$ a square matrix. In the present case\n we’re also trying to solve a linear system, but with a matrix $A$\n that may have more rows than columns. Provided the rows are linearly\n independent, this implies that we have an **overidentified** system of\n equations. We’ll return to the implications of this later, but for\n now this also calls for a different numerical approach, using\n `np.linalg.lstsq` instead of `np.linalg.solve`.\n\n",
"_____no_output_____"
]
],
[
[
"# 2.3. ORIGINAL CODE\n\nfrom scipy.linalg import inv, sqrtm\n\nb = np.linalg.lstsq(T.T@X,T.T@y)[0] # lstsqs returns several results\n\ne = y - X@b\n\nprint(b)\n\nTXplus = np.linalg.pinv(T.T@X) # Moore-Penrose pseudo-inverse\n\n# Covariance matrix of b\nvb = e.var()*[email protected]@[email protected] # u is known to be homoskedastic\n\nprint(vb)",
"[0.50661294 0.99862493]\n[[ 4.04318742e-05 -1.99102565e-05]\n [-1.99102565e-05 1.05915628e-05]]\n"
],
[
"# np.linalg.lstsq?\n# Return the least-squares solution to a linear matrix equation.\n## Solves the equation `a x = b` by computing a vector `x` that\n## minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n## be under-, well-, or over- determined (i.e., the number of\n## linearly independent rows of `a` can be less than, equal to, or\n## greater than its number of linearly independent columns). If `a`\n## is square and of full rank, then `x` (but for round-off error) is\n## the \"exact\" solution of the equation.",
"_____no_output_____"
],
[
"# np.linalg.pinv?\n# Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n## Calculate the generalized inverse of a matrix using its\n## singular-value decomposition (SVD) and including all\n## *large* singular values.",
"_____no_output_____"
],
[
"# 2.3. CODE ABOVE WITH COMMENTS AND PRINTING\n\n# import both the 'inv' and 'sqrtm' functions from the 'linalg' functions of the SciPy module\nfrom scipy.linalg import inv, sqrtm\n\n# solve for b in the equation T.T@X@b = T.T@y\nb = np.linalg.lstsq(T.T@X,T.T@y, rcond=-1)[0] # lstsqs returns several results\n# note that I'm also passing it rcond=-1 to mute the error above and maintain the old parameter\n\n# e gets R:: y - X %*% b \ne = y - X@b\nprint\n\nprint(\"b =\", b)\n\n# calculate the Moore-Penrose pseudo-inverse\nTXplus = np.linalg.pinv(T.T@X) # Moore-Penrose pseudo-inverse\n\n# Covariance matrix of b\n## vb gets var(e) * TXplus %*% t(T) %*% T %*% t(TXplus)\nvb = e.var()*[email protected]@[email protected] # u is known to be homoskedastic\n\nprint(\"vb =\",vb)",
"b = [0.49651782 1.01160452]\nvb = [[ 6.94188425e-06 -2.61030984e-05]\n [-2.61030984e-05 1.08902749e-04]]\n"
]
],
[
[
"### Final Word",
"_____no_output_____"
],
[
"This discussion section originally covered [random_variables0](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Materials/random_variables0.ipynb), but the related notes have been moved to and expanded in [\\[ARE 212\\] Discussion Section - Python 03](https://datahub.berkeley.edu/user/benjaminkrause/notebooks/ARE212_Discussion_Section/%5BARE%20212%5D%20Discussion%20Section%20-%20Python%2003.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7158e4fcc46c04bce0a6621839b02bab0638d0a | 217,086 | ipynb | Jupyter Notebook | code/explore_SSNA_data.ipynb | edgeryders/explore-ssna-data | 6e96363ee82267f7aabea187330f074f63c9f4b7 | [
"MIT"
] | null | null | null | code/explore_SSNA_data.ipynb | edgeryders/explore-ssna-data | 6e96363ee82267f7aabea187330f074f63c9f4b7 | [
"MIT"
] | null | null | null | code/explore_SSNA_data.ipynb | edgeryders/explore-ssna-data | 6e96363ee82267f7aabea187330f074f63c9f4b7 | [
"MIT"
] | null | null | null | 92.024587 | 12,728 | 0.758962 | [
[
[
"## Looking for patterns in SSNA data (using Pandas)\n\nThe plan: use SSNA data exports from various projects and explore the link between semantic density of posts and topics and the structure of the conversation they are part of. \n\n* Semantic density is measured by the number of annotations and codes. \n* Structure comes down to the interaction network. \n\n### Semantic density\n\nMy first goals are:\n\n* A frequency distribution of annotations per post (semantic density). How concentrated vs. dispersed is meaning?\n* Measures of correlation between a post's semantic density and: \n * the length of the topic the post is in;\n * the number of replies the post has;\n\n### High level\n\n* Read the data\n* Re-organize the data to create a table as follows: \n\n| post_id | topic_id | annotations | author |[quality_measures] |\n|---------|----------|-------------|-----------|------------------|\n| 123456 | 12345 | 5 | anon12345 | [12] |\n\n* compute correlation indices between columns 3 and 5. 4 can be used for correlating or regressing 3 on properties of the author, for example measures of network centrality.\n\n**Remember**. These data are re-exported with added fields for post quality. They no longer correspond to the datasets uploaded onto Zenodo. If we do a hackathon etc, not only we should refresh the data files, but also regenerate the `datapackage.json` files.",
"_____no_output_____"
],
[
"## Part 1: data wrangling",
"_____no_output_____"
]
],
[
[
"import pandas as pd # the customary imports\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"Import the files and put them into pandas dataframes",
"_____no_output_____"
]
],
[
[
"dirPath = '~/all_projects/'\n # path to your data folderprojects = ['ngi', 'poprebel', 'opencare'] \n\nparticipants = pd.read_csv(dirPath + projects[0] + '/participants.csv')\nparticipants['project'] = projects[0]\nannotations = pd.read_csv(dirPath + projects[0] + '/annotations.csv')\nannotations['project'] = projects[0]\ncodes = pd.read_csv(dirPath + projects[0] + '/codes.csv')\ncodes['project'] = projects[0]\nposts = pd.read_csv(dirPath + projects[0] + '/posts.csv')\nposts['project'] = projects[0]\n \nfor project in projects[1:]:\n print('adding ' + project)\n participants1 = pd.read_csv(dirPath + project + '/participants.csv')\n participants1['project'] = project\n annotations1 = pd.read_csv(dirPath + project + '/annotations.csv')\n annotations1['project'] = project\n codes1 = pd.read_csv(dirPath + project + '/codes.csv')\n codes1['project'] = project\n posts1 = pd.read_csv(dirPath + project + '/posts.csv')\n posts1['project'] = project\n \n participants = participants.append(participants1, ignore_index = True)\n annotations = annotations.append(annotations1, ignore_index = True)\n codes = codes.append(codes1, ignore_index = True)\n posts = posts.append(posts1, ignore_index = True)\n \n# last step: eliminate duplicates (posts assigned to more than one project and their annotations)\n# except participants, who are pseudonymized anew every time, so we cannot do that\n\ndup_ann = annotations.duplicated(subset= 'id')\ndup= 0\nfor i in dup_ann:\n if i==True:\n dup +=1\nprint ('duplicated annotations: ' + str (dup))\n\ndup_posts = posts.duplicated(subset= 'post_id')\ndup= 0\nfor i in dup_posts:\n if i==True:\n dup +=1\nprint ('duplicated posts: ' + str (dup))\n\ndup_codes = codes.duplicated(subset= 'id')\ndup= 0\nfor i in dup_codes:\n if i==True:\n dup +=1\nprint ('duplicated codes: ' + str (dup))\n\nannotations = annotations.drop_duplicates(subset='id')\ncodes = codes.drop_duplicates(subset='id')\nposts = posts.drop_duplicates(subset= 'post_id')\n",
"adding poprebel\nadding opencare\nduplicated annotations: 121\nduplicated posts: 21\nduplicated codes: 180\n"
]
],
[
[
"Notice that the number of duplicated posts is not that high. I can keep on using the `project` variable. \n\nNext, add number of annotations per post. This is a `merge` operation in pandas. I don't need the `codes` dataframe for now, because the `annotations` dataframe already contains the `code_id` identifiers. Notice that in the merge I lose about 500 annotations, presumably related to posts that were dropped out of the dataset because of missing consent.",
"_____no_output_____"
]
],
[
[
"print(posts.shape)\nprint(posts.columns)\nprint(annotations.shape)\nprint(annotations.columns)\n\npostsAnno = posts.merge(annotations, how = 'left', on='post_id')\npostsAnno = postsAnno.drop(columns=['project_y', 'topic_id_y'])\nprint(postsAnno.shape)\nprint(postsAnno.columns)\nprint(postsAnno['id'].nunique())\nprint(annotations['id'].nunique())\nprint(postsAnno['post_id'].nunique())\n",
"(8512, 16)\nIndex(['post_number', 'post_id', 'created_at', 'topic_id', 'reply_count',\n 'reads', 'readers_count', 'incoming_link_count', 'quote_count',\n 'like_count', 'score', 'reply_to_post_id', 'source_username',\n 'target_username', 'text', 'project'],\n dtype='object')\n(10140, 21)\nIndex(['id', 'version', 'text', 'quote', 'uri', 'created_at', 'updated_at',\n 'post_id', 'creator_id', 'type', 'shape', 'units', 'geometry', 'src',\n 'ext', 'container', 'start', 'end', 'topic_id', 'code_id', 'project'],\n dtype='object')\n(16341, 34)\nIndex(['post_number', 'post_id', 'created_at_x', 'topic_id_x', 'reply_count',\n 'reads', 'readers_count', 'incoming_link_count', 'quote_count',\n 'like_count', 'score', 'reply_to_post_id', 'source_username',\n 'target_username', 'text_x', 'project_x', 'id', 'version', 'text_y',\n 'quote', 'uri', 'created_at_y', 'updated_at', 'creator_id', 'type',\n 'shape', 'units', 'geometry', 'src', 'ext', 'container', 'start', 'end',\n 'code_id'],\n dtype='object')\n9683\n10140\n8512\n"
]
],
[
[
"I use a filter mask to create a column in `postsAnno` that cointains the number of annotations. For now, I set it to `0` where the annotation `id` does not exist (is `NaN`). Elsewehere it is provisionally set to 1.",
"_____no_output_____"
]
],
[
[
"filt = postsAnno['id'].isna()\nprint(filt.describe)\npostsAnno['annoexists'] = ~ filt\npostsAnno['n_anns'] = postsAnno['annoexists'].astype(int)\npostsAnno.drop('annoexists', axis=1, inplace=True) # delete annoexists, I no longer need it\nprint(postsAnno[['post_id','id', 'n_anns']].tail)",
"<bound method NDFrame.describe of 0 True\n1 True\n2 True\n3 True\n4 True\n ... \n16336 False\n16337 True\n16338 True\n16339 True\n16340 True\nName: id, Length: 16341, dtype: bool>\n<bound method NDFrame.tail of post_id id n_anns\n0 55953 NaN 0\n1 55962 NaN 0\n2 55968 NaN 0\n3 55973 NaN 0\n4 55974 NaN 0\n... ... ... ...\n16336 19954 4716.0 1\n16337 8916 NaN 0\n16338 12509 NaN 0\n16339 15815 NaN 0\n16340 18436 NaN 0\n\n[16341 rows x 3 columns]>\n"
]
],
[
[
"Next step is to count the number of annotations for each value of `post_id`: goes in the `annotations` column. This is the number of rows of `postsAnno` that have the same value of `post_id`. This is our dependent variable.",
"_____no_output_____"
]
],
[
[
"mys=postsAnno.groupby('post_id').count().reset_index()\nmys.rename(columns={'post_number': 'annotations'}, inplace=True)\nmys =mys[['post_id', 'annotations']]\nmys",
"_____no_output_____"
]
],
[
[
"Merging the grouped dataframe with the original one adds to the latter the extra column I need, `annotations`. However, since the `count` method counts NaNs too, posts with no annotations show a count of 1, too. Fortunately, I have stored the zeros in the `n_ann` columns in `postsAnno`. ",
"_____no_output_____"
]
],
[
[
"postsAnno = postsAnno.merge(mys, how='left')\n# since the groupby.count() method also counts NaNs, it follows that even when 'n_anns' == 0 'annotations' == 1\npostsAnno.tail(10)",
"_____no_output_____"
]
],
[
[
"I replace the zeros from `n_ann` into the `annotations` column. This yields a correct situation: the values of `annotations` now range from `0` to `105`. When `n_anns == 0`, `annotations == 0`. When `n_anns == 1`, `annotations > 0`. ",
"_____no_output_____"
]
],
[
[
"postsAnno.loc[postsAnno['n_anns']==0, 'annotations']= 0\nprint(postsAnno['annotations'].describe())\npostsAnno[['n_anns', 'annotations']].tail(10)",
"count 16341.000000\nmean 9.480876\nstd 17.298335\nmin 0.000000\n25% 0.000000\n50% 3.000000\n75% 11.000000\nmax 105.000000\nName: annotations, dtype: float64\n"
]
],
[
[
"Now I can get rid of the duplicate posts. This should yield a dataframe with one row per post, i.e. `8512` rows.",
"_____no_output_____"
]
],
[
[
"the_table = postsAnno.drop_duplicates('post_id').drop(columns=['n_anns']).rename(columns={'topic_id_x': 'topic_id'})\nthe_table.tail(10)",
"_____no_output_____"
]
],
[
[
"Looks good. Now I need to compute the number of posts in each topic. I can do this from the `the_table`dataframe, as it has exactly one row per post in the original dataset.",
"_____no_output_____"
]
],
[
[
"myf = the_table.groupby('topic_id').count().reset_index()\nmyf.rename(columns={'post_number': 'posts_in_topic'}, inplace=True)\nmyf =myf[['topic_id', 'posts_in_topic']]\nmyf",
"_____no_output_____"
],
[
"the_table = the_table.merge(myf, how='left')\nthe_table['posts_in_topic'].describe()",
"_____no_output_____"
]
],
[
[
"As a final step, I compute the length of each post from the `text_x` column.",
"_____no_output_____"
]
],
[
[
"the_table['char_count'] = the_table['text_x'].str.len()\nthe_table['char_count'].head(5)",
"_____no_output_____"
]
],
[
[
"## Part 2. Statistical analysis",
"_____no_output_____"
],
[
"Start with histograms and simple frequency counts. The former show some kind of power-lawy behavior.",
"_____no_output_____"
]
],
[
[
"my_columns = ['annotations' , 'posts_in_topic', 'char_count', 'reply_count', 'reads', 'readers_count', 'incoming_link_count', 'quote_count', 'score']\nfor col in my_columns:\n the_table.hist(column = col, log = True)",
"_____no_output_____"
],
[
"for col in my_columns:\n print(the_table[col].describe())",
"count 8512.000000\nmean 1.137570\nstd 4.112047\nmin 0.000000\n25% 0.000000\n50% 0.000000\n75% 0.000000\nmax 105.000000\nName: annotations, dtype: float64\ncount 8512.000000\nmean 20.692434\nstd 27.375359\nmin 1.000000\n25% 5.000000\n50% 12.000000\n75% 23.000000\nmax 142.000000\nName: posts_in_topic, dtype: float64\ncount 8472.000000\nmean 1223.086992\nstd 2762.905139\nmin 3.000000\n25% 179.000000\n50% 483.500000\n75% 1178.250000\nmax 89013.000000\nName: char_count, dtype: float64\ncount 8512.000000\nmean 0.463111\nstd 0.647874\nmin 0.000000\n25% 0.000000\n50% 0.000000\n75% 1.000000\nmax 9.000000\nName: reply_count, dtype: float64\ncount 8512.000000\nmean 10.494948\nstd 8.713294\nmin 0.000000\n25% 4.000000\n50% 8.000000\n75% 14.000000\nmax 120.000000\nName: reads, dtype: float64\ncount 8512.000000\nmean 9.495066\nstd 8.713159\nmin 0.000000\n25% 3.000000\n50% 7.000000\n75% 13.000000\nmax 119.000000\nName: readers_count, dtype: float64\ncount 8512.000000\nmean 4.975094\nstd 39.771351\nmin 0.000000\n25% 0.000000\n50% 1.000000\n75% 1.000000\nmax 1846.000000\nName: incoming_link_count, dtype: float64\ncount 8512.000000\nmean 0.074248\nstd 0.279961\nmin 0.000000\n25% 0.000000\n50% 0.000000\n75% 0.000000\nmax 7.000000\nName: quote_count, dtype: float64\ncount 8512.000000\nmean 42.240531\nstd 207.707070\nmin 0.200000\n25% 5.600000\n50% 11.800000\n75% 31.600000\nmax 9375.800000\nName: score, dtype: float64\n"
]
],
[
[
"Correlation coefficients. Counterintuitively, the correlation between the number of annotations and quality indices is quite low. Only `char_count`, `incoming_link_counts` and the composite `score` (computed by Discourse on default parameters) show correlation over 0.1. ",
"_____no_output_____"
]
],
[
[
"reduced = the_table[my_columns]\nreduced.corr()",
"_____no_output_____"
]
],
[
[
"Try some scatterplots. They are very inconclusive. Only `char_count` offers any hope.",
"_____no_output_____"
]
],
[
[
"reduced.plot.scatter('char_count', 'annotations', c='red')\n",
"_____no_output_____"
]
],
[
[
"I tried eliminating the outliers with over 100 annotations and score over 9,000, but there was no significant change.\n\nHowever, much of the corpus belong to unfinished projects. Let me redo this selecting only the OpenCare corpus, the only one to be fully coded.",
"_____no_output_____"
]
],
[
[
"oc = the_table[the_table['project_x']=='opencare']\noc = oc[my_columns]\nfor col in my_columns:\n oc.hist(column = col, log = True)",
"_____no_output_____"
]
],
[
[
"Indeed, in this corpus the correlations are stronger.",
"_____no_output_____"
]
],
[
[
"oc.corr()",
"_____no_output_____"
],
[
"oc.plot.scatter('char_count', 'annotations', c='red')",
"_____no_output_____"
]
],
[
[
"Now I regress `annotations` on the other variables, or a subset thereof. For now, I do not look at regressors based on identity – or maybe I can think about introducing a dummy, let's see. \n\nFor now I want to drop `score` (a combination of the other variables) and `readers_count` (almost equal to `reads`) on counts of multicollinearity.\n\nProvisionally, I export the data and treat them with Stata. In the future, I hope to teach myself how to use Python statistical libraries more efficienty.",
"_____no_output_____"
]
],
[
[
"columns_reg = my_columns\ncolumns_reg.append('source_username')\nprint(columns_reg)\nocreg = the_table[the_table['project_x']=='opencare'] # only posts with annotations from opencare \nocreg = ocreg[columns_reg] # drop useless columns\n\nprint(ocreg.head(10))\nocreg.to_csv(dirPath + 'ocreg.csv')",
"['annotations', 'posts_in_topic', 'char_count', 'reply_count', 'reads', 'readers_count', 'incoming_link_count', 'quote_count', 'score', 'source_username', 'source_username']\n annotations posts_in_topic char_count reply_count reads \\\n4812 0 6 15263.0 0 16 \n4813 0 6 668.0 0 12 \n4814 0 6 1551.0 0 11 \n4815 0 6 2914.0 0 9 \n4816 0 6 5.0 0 7 \n4817 0 6 4556.0 0 8 \n4818 5 17 3217.0 0 8 \n4819 2 17 502.0 1 7 \n4820 4 17 1482.0 0 5 \n4821 1 17 880.0 0 5 \n\n readers_count incoming_link_count quote_count score source_username \\\n4812 15 47 0 243.0 anon3579443982 \n4813 11 2 0 12.2 anon360117346 \n4814 10 0 0 2.0 anon5100568 \n4815 8 1 0 1.6 anon125867087 \n4816 6 1 0 6.2 anon797389185 \n4817 7 2 0 6.4 anon797389185 \n4818 7 15 0 76.6 anon2975761471 \n4819 6 0 0 6.4 anon360117346 \n4820 4 2 0 11.0 anon2975761471 \n4821 4 0 0 1.0 anon2975761471 \n\n source_username \n4812 anon3579443982 \n4813 anon360117346 \n4814 anon5100568 \n4815 anon125867087 \n4816 anon797389185 \n4817 anon797389185 \n4818 anon2975761471 \n4819 anon360117346 \n4820 anon2975761471 \n4821 anon2975761471 \n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e715923f1c53b8ac4e3fae84932cd190f94bc270 | 20,238 | ipynb | Jupyter Notebook | 00 Tutorial/03 Project Guide 3 MainActivity_java.ipynb | Coding-Forest/2021-Android-WeatherApp | 0a8f7b2d33da4524314904cb5dc0dd0a84dfaeb7 | [
"Apache-2.0"
] | null | null | null | 00 Tutorial/03 Project Guide 3 MainActivity_java.ipynb | Coding-Forest/2021-Android-WeatherApp | 0a8f7b2d33da4524314904cb5dc0dd0a84dfaeb7 | [
"Apache-2.0"
] | null | null | null | 00 Tutorial/03 Project Guide 3 MainActivity_java.ipynb | Coding-Forest/2021-Android-WeatherApp | 0a8f7b2d33da4524314904cb5dc0dd0a84dfaeb7 | [
"Apache-2.0"
] | null | null | null | 37.065934 | 267 | 0.471341 | [
[
[
"# Android WeatherApp Project Guide for absolute beginners - 3. MainActivity.java\n\nI created this guide as a reference for my personal learning on Android app development. The original tutorial available in link in the references below.\n\n\n**References** \n- GeeksforGeeks (2021) Making Weather App in Android Studio | Android Projects | GeeksforGeeks https://www.youtube.com/watch?v=q7NF-2gtfEU&t=864s",
"_____no_output_____"
],
[
"# MainActivity.java\n\n\n\n",
"_____no_output_____"
],
[
"## 1) Declare API variables\n",
"_____no_output_____"
],
[
"## 2) Initialise them in `onCreate`\ninside the **`protected void onCreate(Bundle savedInstanceState){}`** method",
"_____no_output_____"
],
[
"### `setAdapter()`\n\n- Excerpt from StackOverFlow:\n\n\n mDrawerList.setAdapter(new ArrayAdapter<String>\n (this,R.layout.drawer_list_item, mServices));\n\n\nA list adapter is an object that adapts a collection objects for display in a ListView. ArrayAdapter is one simple implementation that maps an array of objects.\n\nThis line is mapping an array of strings (mServices) for display in a ListView (mDrawerList). The second argument to the adapter's constructor is the layout that will be used to render each list item.\n\n- https://stackoverflow.com/questions/33703548/how-to-use-setadapter",
"_____no_output_____"
]
],
[
[
"# MainActivity.java\n\npublic class MainActivity extends AppCompatActivity {\n\n # Declare variables with there id's.\n private RelativeLayout homeRL;\n\n ...\n\n private ImageView backIV, iconIV, searchIV;\n\n # Add WeatherRVModal variable here.\n # then initialise these down below in onCreate too.\n private ArrayList<WeatherRVModal> weatherRVModalArrayList;\n private WeatherRVAdapter weatherRVAdapter; # adaptor class\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_main);\n\n # initialise the variables here.\n homeRL = findViewById(R.id.idRLHome);\n\n ...\n\n searchIV = findViewById(R.id.idIVSearch);\n \n # API we will going to use.\n weatherRVModalArrayList = new ArrayList<>();\n # Init ialise the adapter.\n weatherRVAdapter = new WeatherRVAdapter(this, weatherRVModalArrayList);\n # Set this adapter to the RecyclerView.\n weatherRV.setAdapter(weatherRVAdapter);\n\n # Inisialise the LocationManager.\n locationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE);\n }\n\n...",
"_____no_output_____"
]
],
[
[
"## 3) Get weather info",
"_____no_output_____"
]
],
[
[
"...\n\n # Create a method for weather info,\n # and another for user location.\n private void getWeatherInfo(String cityName){\n\n # Parse the data from API.\n # so first, create a url.\n String url = \"http://api.weatherapi.com/v1/forecast.json?key=34a7fe8762854dd7b8765811212509&q=\" +cityName+ \"&days=1&aqi=yes&alerts=yes\";\n\n cityNameTV.setText(cityName);\n\n # Create a variabel RequestQueue\n RequestQueue requestQueue = Volley.newRequestQueue(MainActivity.this);\n\n # The weather API is a json object, so we have to make a json object request.\n JsonObjectRequest jsonObjectRequest = new JsonObjectRequest(Request.Method.GET, url, null, new Response.Listener<JSONObject>() {\n @Override\n public void onResponse(JSONObject response) {\n loadingPB.setVisibility(View.GONE);\n homeRL.setVisibility(View.VISIBLE);\n weatherRVModalArrayList.clear();\n\n # Take a close look at the syntax of accessing the data in Json format.\n try {\n String temperature = response.getJSONObject(\"current\").getString(\"temp_c\");\n temperatureTV.setText(temperature + \"°c\");\n int isDay = response.getJSONObject(\"current\").getInt(\"is_day\");\n String condition = response.getJSONObject(\"current\").getJSONObject(\"condition\").getString(\"text\");\n String conditionIcon = response.getJSONObject(\"current\").getJSONObject(\"condition\").getString(\"icon\");\n Picasso.get().load(\"http:\".concat(conditionIcon)).into(iconIV);\n conditionTV.setText(condition);\n\n # Switch background colour\n if (isDay==1) {\n # Morning\n Picasso.get().load(\"@assets/day light.jpg\").into(backIV)\n } else {\n # Night\n Picasso.get().load(\"@assets/night sky.jpg\").into(backIV);\n }\n\n JSONObject forecastObj = response.getJSONObject(\"forecast\");\n JSONObject forecastArr = forecastObj.getJSONArray(\"forecastday\").getJSONObject(0);\n JSONArray hourArray = forecastArr.getJSONArray(\"hour\");\n\n for(int i=0; i < hourArray.length(); i++) {\n JSONObject hourObj = hourArray.getJSONObject(i);\n\n # Create variables to store the fetched data.\n String time = hourObj.getString(\"time\");\n String temp_c = hourObj.getString(\"temp_c\");\n String icon = hourObj.getJSONObject(\"condition\").getString(\"icon\");\n String wind_kph = hourObj.getString(\"wind_kph\");\n weatherRVModalArrayList.add(new WeatherRVModal(time, temp_c, icon, wind_kph));\n }\n\n # Notify adaptor about these variables.\n weatherRVAdapter.notifyDataSetChanged();\n\n\n\n } catch (JSONException e) {\n e.printStackTrace();\n }\n }\n }, new Response.ErrorListener() {\n @Override\n public void onErrorResponse(VolleyError error) {\n Toast.makeText(MainActivity.this, \"Please enter valid city name\", Toast.LENGTH_SHORT).show();\n }\n });\n\n\n }\n",
"_____no_output_____"
]
],
[
[
"## 4) Get city name\n\n1) Create a method that gets the city name from the API latitude and longitude data. \n2) Initialise with \"Not found\" in case the method fails to get longitude & latitude values.",
"_____no_output_____"
]
],
[
[
" ...\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n ...\n\n # 1) Create a method\n private String getCityName(double longitude, double latitude){\n # 2) Initialise CityName.\n String cityName = \"Not found\";\n Geocoder gcd = new Geocoder(getBaseContext(), Locale.getDefault());\n try {\n List<Address> addresses = gcd.getFromLocation(latitude, longitude, 10);\n\n for (Address adr: addresses){\n if (adr!=null){ # if the locality data is there,\n String city = adr.getLocality(); # get that data.\n if (city!=null && !city.equals(\"\")){ # check if the locality data is correct.\n cityName = city; # update cityName with that screened data.\n } else {\n Log.d(\"TAG\", \"CITY NOT FOUND\");\n Toast.makeText(this, \"User City Not Found\", Toast.LENGTH_SHORT).show();\n }\n }\n }\n } catch (IOException e){\n e.printStackTrace();\n }\n\n return cityName;\n\n }\n\n ...",
"_____no_output_____"
]
],
[
[
"## 5) Get User Location",
"_____no_output_____"
],
[
"### 5-1) Configure Internet Permission in `AndroidManifest.xml`",
"_____no_output_____"
]
],
[
[
"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n package=\"com.example.weatherapp\" >\n\n # Configure Internet Permission to use the user's current location..\n <uses-permission android:name=\"android.permission.INTERNET\"/>\n <uses-permission android:name=\"android.permission.ACCESS_NETWORK_STATE\"/>\n <uses-permission android:name=\"android.permission.ACCESS_COARSE_LOCATION\"/>\n <uses-permission android:name=\"android.permission.ACCESS_FINE_LOCATION\"/>\n\n <application\n \n ...\n\n</manifest> ",
"_____no_output_____"
]
],
[
[
"### 5-2) Variables - `LocationManager` & `PERMISSION_CODE`",
"_____no_output_____"
]
],
[
[
"public class MainActivity extends AppCompatActivity {\n\n ...\n\n # User location-related variables\n # We need to ask for permission first when using user's current location.\n private LocationManager locationManager;\n private int PERMISSION_CODE = 1;\n\n ...",
"_____no_output_____"
]
],
[
[
"### 5-3) `LocationManager`\n",
"_____no_output_____"
],
[
"#### Inisialise and Check Permission\n- 1) Inisialise the LocationManager.\n- 2) Check whether the user has granted permission or not. If the permission is not granted, ask the user to grant the permission.",
"_____no_output_____"
]
],
[
[
" ...\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n ...\n\n # 1) Inisialise the LocationManager.\n locationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE);\n # 2) Permission check=\n if(ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_FINE_LOCATION)!= PackageManager.PERMISSION_GRANTED && ActivityCompat.checkSelfPermission(this,Manifest.permission.ACCESS_COARSE_LOCATION)!=PackageManager.PERMISSION_GRANTED) {\n ActivityCompat.requestPermissions(MainActivity.this, new String[]{Manifest.permission.ACCESS_FINE_LOCATION, Manifest.permission.ACCESS_COARSE_LOCATION}, PERMISSION_CODE);\n }\n\n ...",
"_____no_output_____"
]
],
[
[
"#### Get user's current location",
"_____no_output_____"
],
[
"#### Update city name and geolocation",
"_____no_output_____"
]
],
[
[
" ...\n\n # Once the permission is granted,\n Location location = locationManager.getLastKnownLocation(LocationManager.NETWORK_PROVIDER);\n # Get the user's city name using uer's current location obtained upon the granted permission.\n String cityName = getCityName(location.getLongitude(), location.getLatitude());\n # Pass the obtained cityName into getWeatherInfo to fetch the weather info.\n getWeatherInfo(cityName);\n\n searchIV.setOnClickListener(new View.OnClickListener(){\n @Override\n public void onClick(View v) {\n # Whatever user types on the city edit search bar (@id/idEditCity)\n # Turn the input into a string\n String city = cityEdt.getText().toString();\n\n # If no input, alert the user to enter something.\n if (city.isEmpty()){\n Toast.makeText(MainActivity.this, \"Please enter city name\", Toast.LENGTH_SHORT).show();\n } else {\n # if any input,\n cityNameTV.setText(cityName); # Update the city name display bar with the inputted text (city name)\n getWeatherInfo(city); # and fetch weather info.\n }\n }\n });\n }\n \n ...",
"_____no_output_____"
]
],
[
[
"#### `onRequestPermissionsResult`\n- Handle the user response: \n - if permissions granted: show granted status message. \n - if denied: show denied status message and close the app.",
"_____no_output_____"
]
],
[
[
" ...\n \n # Once the permission is granted by the user,\n # You can call this built-in method automatically.\n @Override\n public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {\n super.onRequestPermissionsResult(requestCode, permissions, grantResults);\n if (requestCode==PERMISSION_CODE) {\n if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {\n Toast.makeText(this, \"Permissions granted\", Toast.LENGTH_SHORT).show();\n } else {\n Toast.makeText(this, \"Please provide permissions\", Toast.LENGTH_SHORT).show();\n finish();\n }\n }\n }\n\n ...",
"_____no_output_____"
]
],
[
[
"## 6) Set App Icon\n",
"_____no_output_____"
]
],
[
[
"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n package=\"com.example.weatherapp\" >\n\n <uses-permission android:name=\"android.permission.INTERNET\"/>\n ...\n\n <application\n android:allowBackup=\"true\"\n android:icon=\"@drawable/cloudy\" # set this to the icon of your choice.\n android:label=\"@string/app_name\"\n android:roundIcon=\"@drawable/cloudy\" # set this to the icon of your choice.\n android:supportsRtl=\"true\"\n android:theme=\"@style/Theme.WeatherApp\" >\n\n ...\n\n </application>\n\n</manifest>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7159d68be61e769eddbfe80551e387ab9ce128b | 576,818 | ipynb | Jupyter Notebook | experiments/1a_data_wrangling.ipynb | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | experiments/1a_data_wrangling.ipynb | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | experiments/1a_data_wrangling.ipynb | eerichmond/ml-wildfire-prediction | 7b0fa66389f8429856896ccaf600c72c357272db | [
"CC0-1.0"
] | null | null | null | 45.187466 | 919 | 0.528014 | [
[
[
"# Wildfire and Drought Data Wrangling",
"_____no_output_____"
],
[
"A lot of the motivation behind my data wrangling came directly from the [weather, soil and drought Kaggle project](https://www.kaggle.com/datasets/cdminix/us-drought-meteorological-data). Christoph Minixhofer, the author of the Kaggle project, posted several [Jupyter notebooks on GitHub](https://github.com/MiniXC/droughted_scripts) on how he was able to gather the weather, soil and drought data from various United State goverment public APIs [<sup>2, 3, 4</sup>](#acknowledgements). I ended up reproducing a lot of his work because I wanted more fine grain weather, drought and soil data. His data coverered all of the United States and was only as narrow as each state's counties. I noticed a lot of the California counties were drastically different from each other in terms of terrain and weather patterns. I was worried that assuming each county was uniform was going to cause problems with the models.\n\nI also downloaded the [latest curated fire data from the United States Forest Service](https://www.fs.usda.gov/rds/archive/Catalog/RDS-2013-0009.5) by Karen Short as of 2021 [<sup>1</sup>](#acknowledgements). I wanted more data to overlap with the weather data since the weather API only started at 2000 and the original [wildfire Kaggle project](https://www.kaggle.com/datasets/cdminix/us-drought-meteorological-data) only went to 2015. The newest Forest Service data went out 2018 and contained corrections to the orginal data.",
"_____no_output_____"
],
[
"### Data Wrangling Steps\n\n - ☑ Download [wildfire Sqlite DB](https://www.fs.usda.gov/rds/archive/Catalog/RDS-2013-0009.5) [<sup>`</sup>](#acknowledgements)\n - ☑ Download [soil CSV from Kaggle](https://www.kaggle.com/cdminix/us-drought-meteorological-data) which was originally sourced form the Harmonized World Soil Database [<sup>4</sup>](#acknowledgements)\n - ☑ Import soil CSV into Sqlite\n - ☑ Remove non-California data to keep the dataset more focused\n - ☑ Remove wildfire and soil/weather data that does not overlap (pre 2000 wildfires)\n - ☑ Load county FIPS codes and geospatial latitude, longitude and polygon perimeter [<sup>5</sup>](#acknowledgements) into Sqlite\n - Add indexes/foreign keys to speed up Sqlite\n - ☑ year\n - ☑ FIPS\n - ☑ long/lat on fires and soil\n - ☑ Truncate latitude and longitude to 1/10th degree (~11.11 km)\n - ☑ Backfill fires data missing the county by cross referencing the long/lat with each county's geospatial perimeter\n - ☑ Query NASA's weather API by date and long/lat between 2000-01-01 and 2018-12-31 [<sup>2</sup>](#acknowledgements)\n - ☑ Query U.S. Drought Monitor API by date and FIPS county between 2000-01-01 and 2018-12-31 [<sup>3</sup>](#acknowledgements)\n - ☑ Incidate if there was a prior fire at the same geospatial location within the last 1, 2, 3, 4 and 5 years respectively\n\n#### Future Ideas\n - Running total of precipitation for past year (really slow)\n - Narrow down latitude and longitude to 1/10th degree (~11 km)\n - Download soil data by long/lat, instead of the current by county, [<sup>4</sup>](#acknowledgements)",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n\nfrom datetime import datetime, timedelta\nimport math\nimport numpy as np\nimport pandas as pd\nimport re\nimport requests\nimport shapely.wkt\nfrom shapely.geometry import Point, Polygon\nimport sqlite3\nimport time\nimport urllib.parse\n\nfrom all_col_queries import get_fires_df, get_no_fires_df",
"_____no_output_____"
]
],
[
[
"#### Download California Counties\n\n- Scrape Wikipedia for the Unites States counties.\n- Filter out non-California counties.\n- Truncate longitude and latitude to 1/10th degree or 6 minutes (~11.11 km). This should make the analysis go faster and also better generalize the location of predicted fires.\n- Join the Wikipedia county data with the geographic boundies data for each California county. The geographic boundaries data is in the form of `MULTIPOLYGON (((` tuples that can be interpreted by the shapely python package. Later I use the geographic ploygons to backfill missing wildfire county data.",
"_____no_output_____"
]
],
[
[
"df_county = pd.read_html('https://en.wikipedia.org/wiki/User:Michael_J/County_table')[0]\nfloat_degrees = lambda x: float(x.replace('°','').replace('–','-'))\ndf_county['latitude'] = df_county['Latitude'].apply(float_degrees)\ndf_county['longitude'] = df_county['Longitude'].apply(float_degrees)\ndf_county['lat'] = (round(df_county['latitude'] * 10) / 10)\ndf_county['long'] = (round(df_county['longitude'] * 10) / 10)\ndf_county['name'] = df_county['County [2]']\n\ndf_county = df_county[df_county['State'] == 'CA']\ndf_county = df_county.loc[:, df_county.columns.intersection(['FIPS', 'name', 'latitude', 'longitude', 'lat', 'long'])]\n\n# Downloaded from https://data.edd.ca.gov/api/views/bpwh-bcb3/rows.csv?accessType=DOWNLOAD\ncounty_geo_df = pd.read_csv('../data/county_geospatial.csv')\ncounty_geo_df = county_geo_df.loc[:, county_geo_df.columns.intersection(['name', 'geo_multipolygon'])]\n\ndf_county = pd.merge(df_county, county_geo_df, left_on='name', right_on='name')\ndf_county = df_county.set_index('FIPS')\n\nprint(df_county.head())",
" latitude longitude lat long name \\\nFIPS \n6001 37.648081 -121.913304 37.6 -121.9 Alameda \n6003 38.617610 -119.798999 38.6 -119.8 Alpine \n6005 38.443550 -120.653856 38.4 -120.7 Amador \n6007 39.665959 -121.601919 39.7 -121.6 Butte \n6009 38.187844 -120.555115 38.2 -120.6 Calaveras \n\n geo_multipolygon \nFIPS \n6001 MULTIPOLYGON (((-122.3110971410252 37.86340197... \n6003 MULTIPOLYGON (((-119.93538249202298 38.8084818... \n6005 MULTIPOLYGON (((-120.25874105290194 38.5799975... \n6007 MULTIPOLYGON (((-121.6354363647807 40.00088422... \n6009 MULTIPOLYGON (((-120.2108859831663 38.50000349... \n"
]
],
[
[
"Load the counties DataFrame into Sqlite to make joins and analysis using SQL easier.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\n\ncur = conn.cursor()\n\ncur.execute('DROP TABLE county')\ncur.execute('''CREATE TABLE county (\n\tfips\t \t\t\t\t\tINTEGER NOT NULL,\n\tname\t \t\t\t\t\tTEXT NOT NULL,\n\tlatitude \t\t\t\t\tREAL NOT NULL,\n\tlongitude\t\t\t\t\tREAL NOT NULL,\n\tlat\t \t\t\t\t\tREAL NOT NULL,\n\tlong\t \t\t\t\t\tREAL NOT NULL,\n\tgeo_multipolygon\tTEXT NOT NULL,\n\tPRIMARY KEY(fips)\n);''')\n\ndf_county.to_sql('county', conn, if_exists='append')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Calculate the geographic boundaries for California as a reference.",
"_____no_output_____"
]
],
[
[
"ca_bounds = [-180, 90, 180, -90]\n\nfor i, county in df_county.iterrows():\n name = county['name']\n geo = shapely.wkt.loads(county['geo_multipolygon'])\n\n # East\n if (geo.bounds[0] > ca_bounds[0]):\n ca_bounds[0] = geo.bounds[0]\n\n # South\n if (geo.bounds[1] < ca_bounds[1]):\n ca_bounds[1] = geo.bounds[1]\n\n # West\n if (geo.bounds[2] < ca_bounds[2]):\n ca_bounds[2] = geo.bounds[2]\n\n # Norht\n if (geo.bounds[3] > ca_bounds[3]):\n ca_bounds[3] = geo.bounds[3]\n\nca_bounds = tuple(ca_bounds)\nprint(f'California bounds (east-south, west-north): {ca_bounds}')",
"California bounds (east-south, west-north): (-116.10618166434291, 32.53402817678555, -123.51814169611895, 42.009834867689875)\n"
]
],
[
[
"### Adjust Forest Service Data\n\nTo make analysis easier I fiddled with the Forest Service Sqlite table to make the schema more consistent with other data sources. This included:\n\n* Converting the discovery date `string` into the Sqlite ISO standard date `yyyy-mm-dd`\n* Renaming several of the columns to be shorter such as `FIRE_YEAR` to `year`, `FIPS_CODE` to `fips`, `NWCG_GENERAL_CAUSE` to `cause`, etc\n* Truncating `longitude` and `latitude` into 1/4th degree `long` and `lat` columns\n* Lowercasing the column names\n* Adding indexes on data and geospatial fields to help speed up the queries\n* Renaming the fire causes to be shorter\n* Adding a column for each fire's perimeter geospatial shape `geo_polygon` (only larger fires)\n* Adding 5 new columns to hold prior fire data",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('ALTER TABLE fires ADD COLUMN year INTEGER NOT NULL DEFAULT 0')\ncur.execute('UPDATE fires SET year = FIRE_YEAR WHERE year = 0')\ncur.execute('ALTER TABLE fires DROP COLUMN FIRE_YEAR')\n\ncur.execute('ALTER TABLE fires ADD COLUMN month INTEGER NOT NULL DEFAULT 0')\ncur.execute(\"UPDATE fires set month = cast(substr(discovery_date, 1, instr(discovery_date,'/')-1) as 'INTEGER')\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN day INTEGER NOT NULL DEFAULT 0\")\ncur.execute(\"UPDATE fires set day = cast(substr(substr(discovery_date, instr(discovery_date,'/')+1), 1, instr(substr(discovery_date, instr(discovery_date,'/')+1),'/')-1) as 'INTEGER')\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN date TEXT NOT NULL DEFAULT ''\")\ncur.execute(\"UPDATE fires SET date = date(year|| '-' || substr('0' || month, -2, 2) || '-' || substr('0' || day, -2, 2))\")\n\ncur.execute('ALTER TABLE fires ADD COLUMN fips INTEGER NOT NULL DEFAULT 0')\ncur.execute('UPDATE fires SET fips = FIPS_CODE WHERE FIPS = 0 and FIPS_CODE is not null')\n\ncur.execute('ALTER TABLE fires ADD COLUMN long REAL NOT NULL DEFAULT 0')\ncur.execute('ALTER TABLE fires ADD COLUMN lat REAL NOT NULL DEFAULT 0')\ncur.execute('UPDATE fires SET long = round(LONGITUDE * 10) / 10, lat = round(LATITUDE * 10) / 10')\n\ncur.execute(\"ALTER TABLE fires ADD COLUMN date_1d_before TEXT NOT NULL DEFAULT ''\")\ncur.execute(\"UPDATE fires SET date_1d_before = date(date, '-1 days') where date_1d_before = ''\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN date_2d_before TEXT NOT NULL DEFAULT ''\")\ncur.execute(\"UPDATE fires SET date_2d_before = date(date, '-2 days') where date_2d_before = ''\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN date_3d_before TEXT NOT NULL DEFAULT ''\")\ncur.execute(\"UPDATE fires SET date_3d_before = date(date, '-3 days') where date_3d_before = ''\")\n\ncur.execute('ALTER TABLE fires ADD COLUMN cause TEXT')\ncur.execute(\"UPDATE fires SET cause = NWCG_GENERAL_CAUSE where cause is null\")\n\ncur.execute(\"UPDATE fires SET cause = 'Power' where cause = 'Power generation/transmission/distribution'\")\ncur.execute(\"UPDATE fires SET cause = 'Missing/Undefined' where cause = 'Missing data/not specified/undetermined'\")\ncur.execute(\"UPDATE fires SET cause = 'Equipment Use' where cause = 'Equipment and vehicle use'\")\ncur.execute(\"UPDATE fires SET cause = 'Arson' where cause = 'Arson/incendiarism'\")\ncur.execute(\"UPDATE fires SET cause = 'Children' where cause = 'Misuse of fire by a minor'\")\ncur.execute(\"UPDATE fires SET cause = 'Firearms' where cause = 'Firearms and explosives use'\")\ncur.execute(\"UPDATE fires SET cause = 'Railroad' where cause = 'Railroad operations and maintenance'\")\ncur.execute(\"UPDATE fires SET cause = 'Debris Burning' where cause = 'Debris and open burning'\")\ncur.execute(\"UPDATE fires SET cause = 'Recreation' where cause = 'Recreation and ceremony'\")\n\ncur.execute(\"ALTER TABLE fires ADD COLUMN geo_polygon TEXT\")\n\ncur.execute(\"ALTER TABLE fires ADD COLUMN prior_fire_0_1_year INTEGER NOT NULL DEFAULT 0\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN prior_fire_1_2_year INTEGER NOT NULL DEFAULT 0\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN prior_fire_2_3_year INTEGER NOT NULL DEFAULT 0\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN prior_fire_3_4_year INTEGER NOT NULL DEFAULT 0\")\ncur.execute(\"ALTER TABLE fires ADD COLUMN prior_fire_4_5_year INTEGER NOT NULL DEFAULT 0\")\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_fpa_id')\ncur.execute('CREATE INDEX idx_fires_fpa_id ON fires(fpa_id)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_longitude_latitude')\ncur.execute('CREATE INDEX idx_fires_longitude_latitude ON fires(longitude, latitude)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_long_lat')\ncur.execute('CREATE INDEX idx_fires_date_long_lat ON fires(date, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_range_long_lat')\ncur.execute('CREATE INDEX idx_fires_date_range_long_lat ON fires(date, date_1d_before, date_2d_before, date_3d_before, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_fips')\ncur.execute('CREATE INDEX idx_fires_date_fips ON fires(date, fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_1d_before_fips')\ncur.execute('CREATE INDEX idx_fires_date_1d_before_fips ON fires(date_1d_before, fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_2d_before_fips')\ncur.execute('CREATE INDEX idx_fires_date_2d_before_fips ON fires(date_2d_before, fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_3d_before_fips')\ncur.execute('CREATE INDEX idx_fires_date_3d_before_fips ON fires(date_3d_before, fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_1d_before_long_lat')\ncur.execute('CREATE INDEX idx_fires_date_1d_before_long_lat ON fires(date_1d_before, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_3d_before_long_lat')\ncur.execute('CREATE INDEX idx_fires_date_2d_before_long_lat ON fires(date_2d_before, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_date_3d_before_long_lat')\ncur.execute('CREATE INDEX idx_fires_date_3d_before_long_lat ON fires(date_3d_before, long, lat)')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Create a `weater_geo` table that holds the daily weather details at 1/4th degree or 27.77km wide longitude/latitude points between 1 Jan 2000 and 21 Dec 2018.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('''CREATE TABLE weather_geo (\n\tdate\t\t\t\t\t\t\t TEXT NOT NULL,\n\tyear\t\t\t\t\t\t\t INTEGER NOT NULL,\n\tmonth\t\t\t\t\t\t\t INTEGER NOT NULL,\n\tday \t\t\t\t\t\t\t INTEGER NOT NULL,\n\tlong\t\t\t\t\t\t REAL NOT NULL,\n\tlat\t\t\t\t\t\t\t REAL NOT NULL,\n\tfips\t\t\t\t\t\t\t INTEGER NOT NULL,\n\tdrought_score\t\t\t INTEGER NOT NULL DEFAULT 0,\n\tprecipitation\t\t\t REAL NOT NULL,\n\tpressure\t\t\t\t\t REAL NOT NULL,\n\thumidity_2m\t\t\t\t REAL NOT NULL,\n\ttemp_2m\t\t\t\t\t\t REAL NOT NULL,\n\ttemp_dew_point_2m\t REAL NOT NULL,\n\ttemp_wet_bulb_2m\t REAL NOT NULL,\n\ttemp_max_2m\t\t\t\t REAL NOT NULL,\n\ttemp_min_2m\t\t\t\t REAL NOT NULL,\n\ttemp_range_2m\t\t\t REAL NOT NULL,\n\ttemp_0m\t\t\t\t\t\t REAL NOT NULL,\n\twind_10m\t\t\t\t\t REAL NOT NULL,\n\twind_max_10m\t\t\t REAL NOT NULL,\n\twind_min_10m\t\t\t REAL NOT NULL,\n\twind_range_10m\t\t REAL NOT NULL,\n\twind_50m\t\t\t\t\t REAL NOT NULL,\n\twind_max_50m\t\t\t REAL NOT NULL,\n\twind_min_50m\t\t\t REAL NOT NULL,\n\twind_range_50m\t\t REAL NOT NULL,\n\tPRIMARY KEY(date, long, lat)\n);''')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_fips')\ncur.execute('CREATE INDEX idx_weather_geo_fips ON weather_geo (fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_long_lat ON weather_geo (long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_month_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_month_long_lat ON weather_geo (month, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_day')\ncur.execute('CREATE INDEX idx_weather_geo_day ON weather_geo(day)')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Defined the `fetch_weather` method for pulling various weather data points from NASA's POWER temperature and weather data API [<sup>2</sup>](#Acknowledgements).",
"_____no_output_____"
]
],
[
[
"weather_params = [p.strip() for p in re.findall(\n\"^\\w+\",\n\"\"\"\nWS10M_MIN MERRA2 1/2x1/2 Minimum Wind Speed at 10 Meters (m/s) \nQV2M MERRA2 1/2x1/2 Specific Humidity at 2 Meters (g/kg) \nT2M_RANGE MERRA2 1/2x1/2 Temperature Range at 2 Meters (C) \nWS10M MERRA2 1/2x1/2 Wind Speed at 10 Meters (m/s) \nT2M MERRA2 1/2x1/2 Temperature at 2 Meters (C) \nWS50M_MIN MERRA2 1/2x1/2 Minimum Wind Speed at 50 Meters (m/s) \nT2M_MAX MERRA2 1/2x1/2 Maximum Temperature at 2 Meters (C) \nWS50M MERRA2 1/2x1/2 Wind Speed at 50 Meters (m/s) \nTS MERRA2 1/2x1/2 Earth Skin Temperature (C) \nWS50M_RANGE MERRA2 1/2x1/2 Wind Speed Range at 50 Meters (m/s) \nWS50M_MAX MERRA2 1/2x1/2 Maximum Wind Speed at 50 Meters (m/s) \nWS10M_MAX MERRA2 1/2x1/2 Maximum Wind Speed at 10 Meters (m/s) \nWS10M_RANGE MERRA2 1/2x1/2 Wind Speed Range at 10 Meters (m/s) \nPS MERRA2 1/2x1/2 Surface Pressure (kPa) \nT2MDEW MERRA2 1/2x1/2 Dew/Frost Point at 2 Meters (C) \nT2M_MIN MERRA2 1/2x1/2 Minimum Temperature at 2 Meters (C) \nT2MWET MERRA2 1/2x1/2 Wet Bulb Temperature at 2 Meters (C) \nPRECTOT MERRA2 1/2x1/2 Precipitation (mm day-1) \n\"\"\",\nre.MULTILINE\n)]\n\nprint(weather_params)\n\ndef fetch_weather(long, lat, start, end):\n return requests.get(\n 'https://power.larc.nasa.gov/api/temporal/daily/point',\n {\n 'parameters': ','.join(weather_params),\n 'community': 'SB',\n 'longitude': long,\n 'latitude': lat,\n 'start': start,\n 'end': end,\n 'format': 'JSON',\n }\n ).json()['properties']['parameter']",
"['WS10M_MIN', 'QV2M', 'T2M_RANGE', 'WS10M', 'T2M', 'WS50M_MIN', 'T2M_MAX', 'WS50M', 'TS', 'WS50M_RANGE', 'WS50M_MAX', 'WS10M_MAX', 'WS10M_RANGE', 'PS', 'T2MDEW', 'T2M_MIN', 'T2MWET', 'PRECTOT']\n"
]
],
[
[
"For each California county iterate over all the 11.11km (1/10th degree or 6 minutes) longitude and latitude points within that county's goegraphical boundary and fetch the weather data for those points between 1 Jan 2000 and 31 Dec 2018.",
"_____no_output_____"
]
],
[
[
"start_date = '20000101'\nend_date = '20181231'\n\nconn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\nfor fips, county in df_county.iterrows():\n name = county.name\n geo = shapely.wkt.loads(county.geo_multipolygon)\n\n long_min = round(geo.bounds[0], 1)\n long_max = round(geo.bounds[2], 1)\n\n lat_min = round(geo.bounds[1], 1)\n lat_max = round(geo.bounds[3], 1)\n\n print(f'{name} southwest to northeast: ({long_min}, {lat_min}) to ({long_max}, {lat_max})')\n\n for long in range(int(long_min * 10), int(long_max * 10) + 1):\n for lat in range(int(lat_min * 10), int(lat_max * 10) + 1):\n if ((long % 10 == 0 or long % 10 == 5) and (lat % 10 == 0 or lat % 10 == 5)):\n continue\n\n point = Point(long / 10, lat / 10)\n start = time.time()\n\n if geo.contains(point):\n # Only process lat/long that have not been precessed yet.\n cur.execute(\"\"\"SELECT 1 as found FROM weather_geo WHERE lat = :lat and long = :long\"\"\", { 'lat': point.y, 'long': point.x })\n found = cur.fetchall()\n\n if (len(found) == 0):\n print(f'lat: {point.x}, long: {point.y}')\n json = fetch_weather(point.x, point.y, start_date, end_date)\n\n for date in json['TS'].keys():\n cur.execute('''\n INSERT INTO weather_geo (\n date, year, month, day, long, lat, fips, precipitation, pressure, humidity_2m,\n temp_2m, temp_dew_point_2m, temp_wet_bulb_2m, temp_max_2m, temp_min_2m, temp_range_2m,\n temp_0m, wind_10m, wind_max_10m, wind_min_10m, wind_range_10m, wind_50m,\n wind_max_50m, wind_min_50m, wind_range_50m\n )\n VALUES (\n :date, :year, :month, :day, :long, :lat, :fips, :precipitation, :pressure, :humidity_2m,\n :temp_2m, :temp_dew_point_2m, :temp_wet_bulb_2m, :temp_max_2m, :temp_min_2m, :temp_range_2m,\n :temp_0m, :wind_10m, :wind_max_10m, :wind_min_10m, :wind_range_10m, :wind_50m,\n :wind_max_50m, :wind_min_50m, :wind_range_50m\n )\n ''', {\n 'date': f'{date[0:4]}-{date[4:6]}-{date[6:8]}',\n 'year': int(date[0:4]),\n 'month': int(date[4:6]),\n 'day': int(date[6:8]),\n 'long': point.x,\n 'lat': point.y,\n 'fips': fips,\n 'precipitation': json['PRECTOTCORR'][date],\n 'pressure': json['PS'][date],\n 'humidity_2m': json['QV2M'][date],\n 'temp_2m': json['T2M'][date],\n 'temp_dew_point_2m': json['T2MDEW'][date],\n 'temp_wet_bulb_2m': json['T2MWET'][date],\n 'temp_max_2m': json['T2M_MAX'][date],\n 'temp_min_2m': json['T2M_MIN'][date],\n 'temp_range_2m': json['T2M_RANGE'][date],\n 'temp_0m': json['TS'][date],\n 'wind_10m': json['WS10M'][date],\n 'wind_max_10m': json['WS10M_MAX'][date],\n 'wind_min_10m': json['WS10M_MIN'][date],\n 'wind_range_10m': json['WS10M_RANGE'][date],\n 'wind_50m': json['WS50M'][date],\n 'wind_max_50m': json['WS50M_MAX'][date],\n 'wind_min_50m': json['WS50M_MIN'][date],\n 'wind_range_50m': json['WS50M_RANGE'][date]\n })\n\n conn.commit()\n\n end = time.time()\n print(f'{name} at {point} took {round(end - start, 1)}s')\n\nconn.close()",
"6001 southwest to northeast: (-122.3, 37.5) to (-121.5, 37.9)\n6003 southwest to northeast: (-120.1, 38.3) to (-119.5, 38.9)\n6005 southwest to northeast: (-121.0, 38.2) to (-120.1, 38.7)\nlat: -120.8, long: 38.4\n6005 at POINT (-120.8 38.4) took 10.4s\nlat: -120.8, long: 38.5\n6005 at POINT (-120.8 38.5) took 10.2s\nlat: -120.7, long: 38.4\n6005 at POINT (-120.7 38.4) took 9.6s\nlat: -120.7, long: 38.5\n6005 at POINT (-120.7 38.5) took 10.0s\nlat: -120.6, long: 38.4\n6005 at POINT (-120.6 38.4) took 10.5s\nlat: -120.6, long: 38.5\n6005 at POINT (-120.6 38.5) took 10.7s\nlat: -120.4, long: 38.5\n6005 at POINT (-120.4 38.5) took 9.5s\nlat: -120.3, long: 38.5\n6005 at POINT (-120.3 38.5) took 10.2s\nlat: -120.2, long: 38.5\n6005 at POINT (-120.2 38.5) took 9.7s\nlat: -120.2, long: 38.6\n6005 at POINT (-120.2 38.6) took 69.7s\nlat: -120.1, long: 38.6\n6005 at POINT (-120.1 38.6) took 10.4s\nlat: -120.1, long: 38.7\n6005 at POINT (-120.1 38.7) took 10.0s\n6007 southwest to northeast: (-122.1, 39.3) to (-121.1, 40.2)\nlat: -122.0, long: 39.8\n6007 at POINT (-122 39.8) took 9.9s\nlat: -121.9, long: 39.6\n6007 at POINT (-121.9 39.6) took 10.4s\nlat: -121.9, long: 39.7\n6007 at POINT (-121.9 39.7) took 10.5s\nlat: -121.9, long: 39.8\n6007 at POINT (-121.9 39.8) took 10.4s\nlat: -121.8, long: 39.4\n6007 at POINT (-121.8 39.4) took 10.5s\nlat: -121.8, long: 39.5\n6007 at POINT (-121.8 39.5) took 9.9s\nlat: -121.8, long: 39.6\n6007 at POINT (-121.8 39.6) took 10.3s\nlat: -121.8, long: 39.7\n6007 at POINT (-121.8 39.7) took 9.8s\nlat: -121.8, long: 39.8\n6007 at POINT (-121.8 39.8) took 10.7s\nlat: -121.7, long: 39.4\n6007 at POINT (-121.7 39.4) took 10.6s\nlat: -121.7, long: 39.5\n6007 at POINT (-121.7 39.5) took 10.6s\nlat: -121.7, long: 39.6\n6007 at POINT (-121.7 39.6) took 10.5s\nlat: -121.7, long: 39.7\n6007 at POINT (-121.7 39.7) took 9.8s\nlat: -121.7, long: 39.8\n6007 at POINT (-121.7 39.8) took 10.2s\nlat: -121.7, long: 39.9\n6007 at POINT (-121.7 39.9) took 10.3s\nlat: -121.6, long: 39.4\n6007 at POINT (-121.6 39.4) took 71.0s\nlat: -121.6, long: 39.5\n6007 at POINT (-121.6 39.5) took 12.2s\nlat: -121.6, long: 39.6\n6007 at POINT (-121.6 39.6) took 11.3s\nlat: -121.6, long: 39.7\n6007 at POINT (-121.6 39.7) took 10.9s\nlat: -121.6, long: 39.8\n6007 at POINT (-121.6 39.8) took 11.0s\nlat: -121.6, long: 39.9\n6007 at POINT (-121.6 39.9) took 10.4s\nlat: -121.6, long: 40.0\n6007 at POINT (-121.6 40) took 10.8s\nlat: -121.5, long: 39.4\n6007 at POINT (-121.5 39.4) took 71.7s\nlat: -121.5, long: 39.6\n6007 at POINT (-121.5 39.6) took 11.0s\nlat: -121.5, long: 39.7\n6007 at POINT (-121.5 39.7) took 11.2s\nlat: -121.5, long: 39.8\n6007 at POINT (-121.5 39.8) took 11.2s\nlat: -121.5, long: 39.9\n6007 at POINT (-121.5 39.9) took 11.0s\nlat: -121.5, long: 40.1\n6007 at POINT (-121.5 40.1) took 11.6s\nlat: -121.4, long: 39.4\n6007 at POINT (-121.4 39.4) took 11.3s\nlat: -121.4, long: 39.5\n6007 at POINT (-121.4 39.5) took 11.4s\nlat: -121.4, long: 39.6\n6007 at POINT (-121.4 39.6) took 11.0s\nlat: -121.4, long: 39.7\n6007 at POINT (-121.4 39.7) took 11.2s\nlat: -121.4, long: 39.8\n6007 at POINT (-121.4 39.8) took 12.4s\nlat: -121.4, long: 40.1\n6007 at POINT (-121.4 40.1) took 10.9s\nlat: -121.3, long: 39.6\n6007 at POINT (-121.3 39.6) took 10.9s\nlat: -121.3, long: 39.7\n6007 at POINT (-121.3 39.7) took 11.0s\nlat: -121.2, long: 39.6\n6007 at POINT (-121.2 39.6) took 11.2s\nlat: -121.2, long: 39.7\n6007 at POINT (-121.2 39.7) took 10.3s\nlat: -121.1, long: 39.6\n6007 at POINT (-121.1 39.6) took 10.7s\n6009 southwest to northeast: (-121.0, 37.8) to (-120.0, 38.5)\nlat: -120.9, long: 38.1\n6009 at POINT (-120.9 38.1) took 9.8s\nlat: -120.9, long: 38.2\n6009 at POINT (-120.9 38.2) took 9.9s\nlat: -120.8, long: 38.0\n6009 at POINT (-120.8 38) took 10.5s\nlat: -120.8, long: 38.1\n6009 at POINT (-120.8 38.1) took 9.9s\nlat: -120.8, long: 38.2\n6009 at POINT (-120.8 38.2) took 10.3s\nlat: -120.7, long: 37.9\n6009 at POINT (-120.7 37.9) took 10.5s\nlat: -120.7, long: 38.0\n6009 at POINT (-120.7 38) took 10.1s\nlat: -120.7, long: 38.1\n6009 at POINT (-120.7 38.1) took 11.4s\nlat: -120.7, long: 38.2\n6009 at POINT (-120.7 38.2) took 10.8s\nlat: -120.7, long: 38.3\n6009 at POINT (-120.7 38.3) took 11.0s\nlat: -120.6, long: 37.9\n6009 at POINT (-120.6 37.9) took 12.9s\nlat: -120.6, long: 38.0\n6009 at POINT (-120.6 38) took 11.0s\nlat: -120.6, long: 38.1\n6009 at POINT (-120.6 38.1) took 10.5s\nlat: -120.6, long: 38.2\n6009 at POINT (-120.6 38.2) took 10.8s\nlat: -120.6, long: 38.3\n6009 at POINT (-120.6 38.3) took 11.2s\nlat: -120.5, long: 38.1\n6009 at POINT (-120.5 38.1) took 11.2s\nlat: -120.5, long: 38.2\n6009 at POINT (-120.5 38.2) took 11.0s\nlat: -120.5, long: 38.3\n6009 at POINT (-120.5 38.3) took 12.4s\nlat: -120.5, long: 38.4\n6009 at POINT (-120.5 38.4) took 11.9s\nlat: -120.4, long: 38.2\n6009 at POINT (-120.4 38.2) took 11.8s\nlat: -120.4, long: 38.3\n6009 at POINT (-120.4 38.3) took 11.6s\nlat: -120.4, long: 38.4\n6009 at POINT (-120.4 38.4) took 13.5s\nlat: -120.3, long: 38.3\n6009 at POINT (-120.3 38.3) took 70.8s\nlat: -120.3, long: 38.4\n6009 at POINT (-120.3 38.4) took 11.7s\nlat: -120.2, long: 38.4\n6009 at POINT (-120.2 38.4) took 11.5s\nlat: -120.1, long: 38.5\n6009 at POINT (-120.1 38.5) took 12.2s\n6011 southwest to northeast: (-122.8, 38.9) to (-121.8, 39.4)\nlat: -122.7, long: 39.3\n6011 at POINT (-122.7 39.3) took 10.5s\nlat: -122.6, long: 39.3\n6011 at POINT (-122.6 39.3) took 10.0s\nlat: -122.5, long: 39.2\n6011 at POINT (-122.5 39.2) took 10.3s\nlat: -122.5, long: 39.3\n6011 at POINT (-122.5 39.3) took 9.9s\nlat: -122.4, long: 39.0\n6011 at POINT (-122.4 39) took 10.1s\nlat: -122.4, long: 39.1\n6011 at POINT (-122.4 39.1) took 10.1s\nlat: -122.4, long: 39.2\n6011 at POINT (-122.4 39.2) took 10.6s\nlat: -122.4, long: 39.3\n6011 at POINT (-122.4 39.3) took 10.2s\nlat: -122.3, long: 39.0\n6011 at POINT (-122.3 39) took 11.3s\nlat: -122.3, long: 39.1\n6011 at POINT (-122.3 39.1) took 11.1s\nlat: -122.3, long: 39.2\n6011 at POINT (-122.3 39.2) took 10.3s\nlat: -122.3, long: 39.3\n6011 at POINT (-122.3 39.3) took 11.0s\nlat: -122.2, long: 39.0\n6011 at POINT (-122.2 39) took 10.7s\nlat: -122.2, long: 39.1\n6011 at POINT (-122.2 39.1) took 11.0s\nlat: -122.2, long: 39.2\n6011 at POINT (-122.2 39.2) took 11.5s\nlat: -122.2, long: 39.3\n6011 at POINT (-122.2 39.3) took 11.3s\nlat: -122.1, long: 39.0\n6011 at POINT (-122.1 39) took 10.2s\nlat: -122.1, long: 39.1\n6011 at POINT (-122.1 39.1) took 10.7s\nlat: -122.1, long: 39.2\n6011 at POINT (-122.1 39.2) took 10.9s\nlat: -122.1, long: 39.3\n6011 at POINT (-122.1 39.3) took 10.6s\nlat: -122.1, long: 39.4\n6011 at POINT (-122.1 39.4) took 10.6s\nlat: -122.0, long: 39.1\n6011 at POINT (-122 39.1) took 11.4s\nlat: -122.0, long: 39.2\n6011 at POINT (-122 39.2) took 10.3s\nlat: -122.0, long: 39.3\n6011 at POINT (-122 39.3) took 11.2s\nlat: -121.9, long: 39.0\n6011 at POINT (-121.9 39) took 10.4s\nlat: -121.9, long: 39.1\n6011 at POINT (-121.9 39.1) took 11.7s\n6013 southwest to northeast: (-122.4, 37.7) to (-121.5, 38.1)\nlat: -122.3, long: 37.9\n6013 at POINT (-122.3 37.9) took 11.9s\nlat: -122.3, long: 38.0\n6013 at POINT (-122.3 38) took 10.2s\nlat: -122.2, long: 37.9\n6013 at POINT (-122.2 37.9) took 11.0s\nlat: -122.2, long: 38.0\n6013 at POINT (-122.2 38) took 11.4s\nlat: -122.1, long: 37.9\n6013 at POINT (-122.1 37.9) took 10.8s\nlat: -122.1, long: 38.0\n6013 at POINT (-122.1 38) took 10.2s\nlat: -122.0, long: 37.8\n6013 at POINT (-122 37.8) took 10.7s\nlat: -122.0, long: 37.9\n6013 at POINT (-122 37.9) took 10.2s\nlat: -121.9, long: 37.8\n6013 at POINT (-121.9 37.8) took 11.3s\nlat: -121.9, long: 37.9\n6013 at POINT (-121.9 37.9) took 10.8s\nlat: -121.9, long: 38.0\n6013 at POINT (-121.9 38) took 11.3s\nlat: -121.8, long: 37.8\n6013 at POINT (-121.8 37.8) took 9.8s\nlat: -121.8, long: 37.9\n6013 at POINT (-121.8 37.9) took 10.9s\nlat: -121.8, long: 38.0\n6013 at POINT (-121.8 38) took 9.9s\nlat: -121.7, long: 37.8\n6013 at POINT (-121.7 37.8) took 10.1s\nlat: -121.7, long: 37.9\n6013 at POINT (-121.7 37.9) took 9.9s\nlat: -121.7, long: 38.0\n6013 at POINT (-121.7 38) took 10.7s\nlat: -121.6, long: 37.9\n6013 at POINT (-121.6 37.9) took 11.0s\nlat: -121.6, long: 38.0\n6013 at POINT (-121.6 38) took 12.7s\n6015 southwest to northeast: (-124.3, 41.4) to (-123.5, 42.0)\nlat: -124.2, long: 41.8\n6015 at POINT (-124.2 41.8) took 11.4s\nlat: -124.2, long: 41.9\n6015 at POINT (-124.2 41.9) took 10.6s\nlat: -124.1, long: 41.6\n6015 at POINT (-124.1 41.6) took 10.0s\nlat: -124.1, long: 41.7\n6015 at POINT (-124.1 41.7) took 10.7s\nlat: -124.1, long: 41.8\n6015 at POINT (-124.1 41.8) took 10.4s\nlat: -124.1, long: 41.9\n6015 at POINT (-124.1 41.9) took 11.8s\nlat: -124.0, long: 41.6\n6015 at POINT (-124 41.6) took 11.7s\nlat: -124.0, long: 41.7\n6015 at POINT (-124 41.7) took 11.6s\nlat: -124.0, long: 41.8\n6015 at POINT (-124 41.8) took 11.2s\nlat: -124.0, long: 41.9\n6015 at POINT (-124 41.9) took 12.1s\nlat: -123.9, long: 41.5\n6015 at POINT (-123.9 41.5) took 71.8s\nlat: -123.9, long: 41.6\n6015 at POINT (-123.9 41.6) took 71.0s\nlat: -123.9, long: 41.7\n6015 at POINT (-123.9 41.7) took 11.1s\nlat: -123.9, long: 41.8\n6015 at POINT (-123.9 41.8) took 10.5s\nlat: -123.9, long: 41.9\n6015 at POINT (-123.9 41.9) took 10.8s\nlat: -123.8, long: 41.5\n6015 at POINT (-123.8 41.5) took 10.9s\nlat: -123.8, long: 41.6\n6015 at POINT (-123.8 41.6) took 11.0s\nlat: -123.8, long: 41.7\n6015 at POINT (-123.8 41.7) took 11.1s\nlat: -123.8, long: 41.8\n6015 at POINT (-123.8 41.8) took 11.8s\nlat: -123.8, long: 41.9\n6015 at POINT (-123.8 41.9) took 10.4s\nlat: -123.7, long: 41.4\n6015 at POINT (-123.7 41.4) took 10.4s\nlat: -123.7, long: 41.5\n6015 at POINT (-123.7 41.5) took 10.4s\nlat: -123.7, long: 41.7\n6015 at POINT (-123.7 41.7) took 11.3s\nlat: -123.7, long: 41.8\n6015 at POINT (-123.7 41.8) took 10.5s\nlat: -123.7, long: 41.9\n6015 at POINT (-123.7 41.9) took 10.2s\nlat: -123.6, long: 41.9\n6015 at POINT (-123.6 41.9) took 10.5s\nlat: -123.6, long: 42.0\n6015 at POINT (-123.6 42) took 10.8s\n6017 southwest to northeast: (-121.1, 38.5) to (-119.9, 39.1)\nlat: -121.1, long: 38.7\n6017 at POINT (-121.1 38.7) took 10.0s\nlat: -121.1, long: 38.8\n6017 at POINT (-121.1 38.8) took 10.4s\nlat: -121.0, long: 38.6\n6017 at POINT (-121 38.6) took 10.1s\nlat: -121.0, long: 38.7\n6017 at POINT (-121 38.7) took 10.3s\nlat: -121.0, long: 38.8\n6017 at POINT (-121 38.8) took 10.5s\nlat: -121.0, long: 38.9\n6017 at POINT (-121 38.9) took 10.0s\nlat: -120.9, long: 38.6\n6017 at POINT (-120.9 38.6) took 10.2s\nlat: -120.9, long: 38.7\n6017 at POINT (-120.9 38.7) took 10.6s\nlat: -120.9, long: 38.8\n6017 at POINT (-120.9 38.8) took 11.7s\nlat: -120.9, long: 38.9\n6017 at POINT (-120.9 38.9) took 10.2s\nlat: -120.8, long: 38.6\n6017 at POINT (-120.8 38.6) took 10.2s\nlat: -120.8, long: 38.7\n6017 at POINT (-120.8 38.7) took 11.3s\nlat: -120.8, long: 38.8\n6017 at POINT (-120.8 38.8) took 11.5s\nlat: -120.8, long: 38.9\n6017 at POINT (-120.8 38.9) took 11.0s\nlat: -120.7, long: 38.6\n6017 at POINT (-120.7 38.6) took 10.3s\nlat: -120.7, long: 38.7\n6017 at POINT (-120.7 38.7) took 71.0s\nlat: -120.7, long: 38.8\n6017 at POINT (-120.7 38.8) took 10.6s\nlat: -120.7, long: 38.9\n6017 at POINT (-120.7 38.9) took 10.4s\nlat: -120.6, long: 38.6\n6017 at POINT (-120.6 38.6) took 10.1s\nlat: -120.6, long: 38.7\n6017 at POINT (-120.6 38.7) took 10.2s\nlat: -120.6, long: 38.8\n6017 at POINT (-120.6 38.8) took 10.7s\nlat: -120.6, long: 38.9\n6017 at POINT (-120.6 38.9) took 10.5s\nlat: -120.5, long: 38.6\n6017 at POINT (-120.5 38.6) took 10.9s\nlat: -120.5, long: 38.7\n6017 at POINT (-120.5 38.7) took 10.6s\nlat: -120.5, long: 38.8\n6017 at POINT (-120.5 38.8) took 10.8s\nlat: -120.5, long: 38.9\n6017 at POINT (-120.5 38.9) took 70.5s\nlat: -120.4, long: 38.6\n6017 at POINT (-120.4 38.6) took 11.6s\nlat: -120.4, long: 38.7\n6017 at POINT (-120.4 38.7) took 13.8s\nlat: -120.4, long: 38.8\n6017 at POINT (-120.4 38.8) took 11.9s\nlat: -120.4, long: 38.9\n6017 at POINT (-120.4 38.9) took 12.1s\nlat: -120.4, long: 39.0\n6017 at POINT (-120.4 39) took 10.8s\nlat: -120.3, long: 38.6\n6017 at POINT (-120.3 38.6) took 10.6s\nlat: -120.3, long: 38.7\n6017 at POINT (-120.3 38.7) took 11.2s\nlat: -120.3, long: 38.8\n6017 at POINT (-120.3 38.8) took 11.3s\nlat: -120.3, long: 38.9\n6017 at POINT (-120.3 38.9) took 10.6s\nlat: -120.3, long: 39.0\n6017 at POINT (-120.3 39) took 10.4s\nlat: -120.2, long: 38.7\n6017 at POINT (-120.2 38.7) took 12.1s\nlat: -120.2, long: 38.8\n6017 at POINT (-120.2 38.8) took 11.4s\nlat: -120.2, long: 38.9\n6017 at POINT (-120.2 38.9) took 10.9s\nlat: -120.2, long: 39.0\n6017 at POINT (-120.2 39) took 11.1s\nlat: -120.1, long: 38.8\n6017 at POINT (-120.1 38.8) took 11.6s\nlat: -120.1, long: 38.9\n6017 at POINT (-120.1 38.9) took 11.8s\nlat: -120.1, long: 39.0\n6017 at POINT (-120.1 39) took 11.3s\nlat: -120.0, long: 38.8\n6017 at POINT (-120 38.8) took 13.5s\nlat: -120.0, long: 38.9\n6017 at POINT (-120 38.9) took 11.9s\nlat: -119.9, long: 38.9\n6017 at POINT (-119.9 38.9) took 12.1s\n6019 southwest to northeast: (-120.9, 35.9) to (-118.4, 37.6)\nlat: -120.8, long: 36.7\n6019 at POINT (-120.8 36.7) took 10.9s\nlat: -120.8, long: 36.8\n6019 at POINT (-120.8 36.8) took 10.6s\nlat: -120.7, long: 36.6\n6019 at POINT (-120.7 36.6) took 11.5s\nlat: -120.7, long: 36.7\n6019 at POINT (-120.7 36.7) took 12.0s\nlat: -120.7, long: 36.8\n6019 at POINT (-120.7 36.8) took 11.6s\nlat: -120.7, long: 36.9\n6019 at POINT (-120.7 36.9) took 11.8s\nlat: -120.6, long: 36.2\n6019 at POINT (-120.6 36.2) took 12.5s\nlat: -120.6, long: 36.3\n6019 at POINT (-120.6 36.3) took 12.1s\nlat: -120.6, long: 36.5\n6019 at POINT (-120.6 36.5) took 14.1s\nlat: -120.6, long: 36.6\n6019 at POINT (-120.6 36.6) took 13.3s\nlat: -120.6, long: 36.7\n6019 at POINT (-120.6 36.7) took 73.0s\nlat: -120.6, long: 36.8\n6019 at POINT (-120.6 36.8) took 10.9s\nlat: -120.6, long: 36.9\n6019 at POINT (-120.6 36.9) took 11.5s\nlat: -120.5, long: 36.1\n6019 at POINT (-120.5 36.1) took 11.4s\nlat: -120.5, long: 36.2\n6019 at POINT (-120.5 36.2) took 12.8s\nlat: -120.5, long: 36.3\n6019 at POINT (-120.5 36.3) took 11.6s\nlat: -120.5, long: 36.4\n6019 at POINT (-120.5 36.4) took 11.3s\nlat: -120.5, long: 36.6\n6019 at POINT (-120.5 36.6) took 11.5s\nlat: -120.5, long: 36.7\n6019 at POINT (-120.5 36.7) took 12.3s\nlat: -120.5, long: 36.8\n6019 at POINT (-120.5 36.8) took 11.8s\nlat: -120.5, long: 36.9\n6019 at POINT (-120.5 36.9) took 11.8s\nlat: -120.4, long: 36.0\n6019 at POINT (-120.4 36) took 11.1s\nlat: -120.4, long: 36.1\n6019 at POINT (-120.4 36.1) took 11.7s\nlat: -120.4, long: 36.2\n6019 at POINT (-120.4 36.2) took 12.1s\nlat: -120.4, long: 36.3\n6019 at POINT (-120.4 36.3) took 12.8s\nlat: -120.4, long: 36.4\n6019 at POINT (-120.4 36.4) took 12.0s\nlat: -120.4, long: 36.5\n6019 at POINT (-120.4 36.5) took 11.4s\nlat: -120.4, long: 36.6\n6019 at POINT (-120.4 36.6) took 11.1s\nlat: -120.4, long: 36.7\n6019 at POINT (-120.4 36.7) took 12.4s\nlat: -120.4, long: 36.8\n6019 at POINT (-120.4 36.8) took 12.7s\nlat: -120.3, long: 36.0\n6019 at POINT (-120.3 36) took 10.4s\nlat: -120.3, long: 36.1\n6019 at POINT (-120.3 36.1) took 10.1s\nlat: -120.3, long: 36.2\n6019 at POINT (-120.3 36.2) took 70.8s\nlat: -120.3, long: 36.3\n6019 at POINT (-120.3 36.3) took 11.4s\nlat: -120.3, long: 36.4\n6019 at POINT (-120.3 36.4) took 11.0s\nlat: -120.3, long: 36.5\n6019 at POINT (-120.3 36.5) took 70.8s\nlat: -120.3, long: 36.6\n6019 at POINT (-120.3 36.6) took 11.4s\nlat: -120.3, long: 36.7\n6019 at POINT (-120.3 36.7) took 11.2s\nlat: -120.2, long: 36.0\n6019 at POINT (-120.2 36) took 11.2s\nlat: -120.2, long: 36.1\n6019 at POINT (-120.2 36.1) took 10.2s\nlat: -120.2, long: 36.2\n6019 at POINT (-120.2 36.2) took 10.6s\nlat: -120.2, long: 36.3\n6019 at POINT (-120.2 36.3) took 11.5s\nlat: -120.2, long: 36.4\n6019 at POINT (-120.2 36.4) took 71.6s\nlat: -120.2, long: 36.5\n6019 at POINT (-120.2 36.5) took 11.5s\nlat: -120.2, long: 36.6\n6019 at POINT (-120.2 36.6) took 11.4s\nlat: -120.2, long: 36.7\n6019 at POINT (-120.2 36.7) took 10.7s\nlat: -120.1, long: 36.1\n6019 at POINT (-120.1 36.1) took 11.4s\nlat: -120.1, long: 36.2\n6019 at POINT (-120.1 36.2) took 10.8s\nlat: -120.1, long: 36.3\n6019 at POINT (-120.1 36.3) took 11.0s\nlat: -120.1, long: 36.4\n6019 at POINT (-120.1 36.4) took 10.6s\nlat: -120.1, long: 36.5\n6019 at POINT (-120.1 36.5) took 10.3s\nlat: -120.1, long: 36.6\n6019 at POINT (-120.1 36.6) took 11.2s\nlat: -120.1, long: 36.7\n6019 at POINT (-120.1 36.7) took 11.4s\nlat: -120.1, long: 36.8\n6019 at POINT (-120.1 36.8) took 11.3s\nlat: -120.0, long: 36.2\n6019 at POINT (-120 36.2) took 10.9s\nlat: -120.0, long: 36.3\n6019 at POINT (-120 36.3) took 10.2s\nlat: -120.0, long: 36.4\n6019 at POINT (-120 36.4) took 10.3s\nlat: -120.0, long: 36.6\n6019 at POINT (-120 36.6) took 10.7s\nlat: -120.0, long: 36.7\n6019 at POINT (-120 36.7) took 10.1s\nlat: -120.0, long: 36.8\n6019 at POINT (-120 36.8) took 10.3s\nlat: -119.9, long: 36.5\n6019 at POINT (-119.9 36.5) took 10.8s\nlat: -119.9, long: 36.6\n6019 at POINT (-119.9 36.6) took 10.6s\nlat: -119.9, long: 36.7\n6019 at POINT (-119.9 36.7) took 10.1s\nlat: -119.9, long: 36.8\n6019 at POINT (-119.9 36.8) took 10.1s\nlat: -119.8, long: 36.5\n6019 at POINT (-119.8 36.5) took 9.9s\nlat: -119.8, long: 36.6\n6019 at POINT (-119.8 36.6) took 10.5s\nlat: -119.8, long: 36.7\n6019 at POINT (-119.8 36.7) took 10.0s\nlat: -119.8, long: 36.8\n6019 at POINT (-119.8 36.8) took 10.4s\nlat: -119.7, long: 36.5\n6019 at POINT (-119.7 36.5) took 10.5s\nlat: -119.7, long: 36.6\n6019 at POINT (-119.7 36.6) took 10.8s\nlat: -119.7, long: 36.7\n6019 at POINT (-119.7 36.7) took 10.4s\nlat: -119.7, long: 36.8\n6019 at POINT (-119.7 36.8) took 10.7s\nlat: -119.7, long: 36.9\n6019 at POINT (-119.7 36.9) took 10.7s\nlat: -119.7, long: 37.0\n6019 at POINT (-119.7 37) took 10.5s\nlat: -119.6, long: 36.5\n6019 at POINT (-119.6 36.5) took 10.7s\nlat: -119.6, long: 36.6\n6019 at POINT (-119.6 36.6) took 11.3s\nlat: -119.6, long: 36.7\n6019 at POINT (-119.6 36.7) took 13.0s\nlat: -119.6, long: 36.8\n6019 at POINT (-119.6 36.8) took 12.0s\nlat: -119.6, long: 36.9\n6019 at POINT (-119.6 36.9) took 11.2s\nlat: -119.6, long: 37.0\n6019 at POINT (-119.6 37) took 11.3s\nlat: -119.5, long: 36.6\n6019 at POINT (-119.5 36.6) took 10.4s\nlat: -119.5, long: 36.7\n6019 at POINT (-119.5 36.7) took 10.3s\nlat: -119.5, long: 36.8\n6019 at POINT (-119.5 36.8) took 10.2s\nlat: -119.5, long: 36.9\n6019 at POINT (-119.5 36.9) took 10.4s\nlat: -119.5, long: 37.1\n6019 at POINT (-119.5 37.1) took 10.1s\nlat: -119.4, long: 36.6\n6019 at POINT (-119.4 36.6) took 10.7s\nlat: -119.4, long: 36.7\n6019 at POINT (-119.4 36.7) took 11.8s\nlat: -119.4, long: 36.8\n6019 at POINT (-119.4 36.8) took 10.7s\nlat: -119.4, long: 36.9\n6019 at POINT (-119.4 36.9) took 10.5s\nlat: -119.4, long: 37.0\n6019 at POINT (-119.4 37) took 10.0s\nlat: -119.4, long: 37.1\n6019 at POINT (-119.4 37.1) took 10.6s\nlat: -119.3, long: 36.7\n6019 at POINT (-119.3 36.7) took 11.0s\nlat: -119.3, long: 36.8\n6019 at POINT (-119.3 36.8) took 11.6s\nlat: -119.3, long: 36.9\n6019 at POINT (-119.3 36.9) took 10.6s\nlat: -119.3, long: 37.0\n6019 at POINT (-119.3 37) took 10.4s\nlat: -119.3, long: 37.1\n6019 at POINT (-119.3 37.1) took 11.9s\nlat: -119.3, long: 37.2\n6019 at POINT (-119.3 37.2) took 11.9s\nlat: -119.3, long: 37.3\n6019 at POINT (-119.3 37.3) took 10.6s\nlat: -119.2, long: 36.7\n6019 at POINT (-119.2 36.7) took 10.9s\nlat: -119.2, long: 36.8\n6019 at POINT (-119.2 36.8) took 11.4s\nlat: -119.2, long: 36.9\n6019 at POINT (-119.2 36.9) took 11.5s\nlat: -119.2, long: 37.0\n6019 at POINT (-119.2 37) took 11.1s\nlat: -119.2, long: 37.1\n6019 at POINT (-119.2 37.1) took 11.1s\nlat: -119.2, long: 37.2\n6019 at POINT (-119.2 37.2) took 11.0s\nlat: -119.2, long: 37.3\n6019 at POINT (-119.2 37.3) took 11.2s\nlat: -119.2, long: 37.4\n6019 at POINT (-119.2 37.4) took 10.9s\nlat: -119.1, long: 36.7\n6019 at POINT (-119.1 36.7) took 11.1s\nlat: -119.1, long: 36.8\n6019 at POINT (-119.1 36.8) took 13.5s\nlat: -119.1, long: 36.9\n6019 at POINT (-119.1 36.9) took 12.4s\nlat: -119.1, long: 37.0\n6019 at POINT (-119.1 37) took 11.7s\nlat: -119.1, long: 37.1\n6019 at POINT (-119.1 37.1) took 11.1s\nlat: -119.1, long: 37.2\n6019 at POINT (-119.1 37.2) took 11.0s\nlat: -119.1, long: 37.3\n6019 at POINT (-119.1 37.3) took 11.0s\nlat: -119.1, long: 37.4\n6019 at POINT (-119.1 37.4) took 10.8s\nlat: -119.1, long: 37.5\n6019 at POINT (-119.1 37.5) took 11.6s\nlat: -119.0, long: 36.7\n6019 at POINT (-119 36.7) took 11.1s\nlat: -119.0, long: 36.8\n6019 at POINT (-119 36.8) took 10.1s\nlat: -119.0, long: 36.9\n6019 at POINT (-119 36.9) took 10.0s\nlat: -119.0, long: 37.1\n6019 at POINT (-119 37.1) took 10.9s\nlat: -119.0, long: 37.2\n6019 at POINT (-119 37.2) took 9.5s\nlat: -119.0, long: 37.3\n6019 at POINT (-119 37.3) took 9.9s\nlat: -119.0, long: 37.4\n6019 at POINT (-119 37.4) took 9.8s\nlat: -118.9, long: 36.8\n6019 at POINT (-118.9 36.8) took 10.3s\nlat: -118.9, long: 36.9\n6019 at POINT (-118.9 36.9) took 10.8s\nlat: -118.9, long: 37.0\n6019 at POINT (-118.9 37) took 10.4s\nlat: -118.9, long: 37.1\n6019 at POINT (-118.9 37.1) took 10.2s\nlat: -118.9, long: 37.2\n6019 at POINT (-118.9 37.2) took 10.3s\nlat: -118.9, long: 37.3\n6019 at POINT (-118.9 37.3) took 10.9s\nlat: -118.9, long: 37.4\n6019 at POINT (-118.9 37.4) took 10.2s\nlat: -118.9, long: 37.5\n6019 at POINT (-118.9 37.5) took 10.4s\nlat: -118.8, long: 36.8\n6019 at POINT (-118.8 36.8) took 10.5s\nlat: -118.8, long: 36.9\n6019 at POINT (-118.8 36.9) took 10.3s\nlat: -118.8, long: 37.0\n6019 at POINT (-118.8 37) took 10.1s\nlat: -118.8, long: 37.1\n6019 at POINT (-118.8 37.1) took 10.9s\nlat: -118.8, long: 37.2\n6019 at POINT (-118.8 37.2) took 10.2s\nlat: -118.8, long: 37.3\n6019 at POINT (-118.8 37.3) took 10.3s\nlat: -118.8, long: 37.4\n6019 at POINT (-118.8 37.4) took 10.5s\nlat: -118.7, long: 36.8\n6019 at POINT (-118.7 36.8) took 10.3s\nlat: -118.7, long: 36.9\n6019 at POINT (-118.7 36.9) took 10.6s\nlat: -118.7, long: 37.0\n6019 at POINT (-118.7 37) took 10.5s\nlat: -118.7, long: 37.1\n6019 at POINT (-118.7 37.1) took 10.4s\nlat: -118.7, long: 37.2\n6019 at POINT (-118.7 37.2) took 10.3s\nlat: -118.7, long: 37.3\n6019 at POINT (-118.7 37.3) took 10.5s\nlat: -118.6, long: 36.8\n6019 at POINT (-118.6 36.8) took 10.4s\nlat: -118.6, long: 36.9\n6019 at POINT (-118.6 36.9) took 10.1s\nlat: -118.6, long: 37.0\n6019 at POINT (-118.6 37) took 10.3s\nlat: -118.6, long: 37.1\n6019 at POINT (-118.6 37.1) took 10.8s\nlat: -118.5, long: 36.8\n6019 at POINT (-118.5 36.8) took 10.7s\nlat: -118.5, long: 36.9\n6019 at POINT (-118.5 36.9) took 10.5s\nlat: -118.4, long: 36.8\n6019 at POINT (-118.4 36.8) took 10.8s\nlat: -118.4, long: 36.9\n6019 at POINT (-118.4 36.9) took 11.1s\n6021 southwest to northeast: (-122.9, 39.4) to (-121.9, 39.8)\nlat: -122.8, long: 39.6\n6021 at POINT (-122.8 39.6) took 9.4s\nlat: -122.8, long: 39.7\n6021 at POINT (-122.8 39.7) took 9.4s\nlat: -122.8, long: 39.8\n6021 at POINT (-122.8 39.8) took 9.7s\nlat: -122.7, long: 39.4\n6021 at POINT (-122.7 39.4) took 9.9s\nlat: -122.7, long: 39.5\n6021 at POINT (-122.7 39.5) took 9.6s\nlat: -122.7, long: 39.6\n6021 at POINT (-122.7 39.6) took 9.5s\nlat: -122.7, long: 39.7\n6021 at POINT (-122.7 39.7) took 69.5s\nlat: -122.7, long: 39.8\n6021 at POINT (-122.7 39.8) took 9.7s\nlat: -122.6, long: 39.4\n6021 at POINT (-122.6 39.4) took 9.6s\nlat: -122.6, long: 39.5\n6021 at POINT (-122.6 39.5) took 69.7s\nlat: -122.6, long: 39.6\n6021 at POINT (-122.6 39.6) took 9.4s\nlat: -122.6, long: 39.7\n6021 at POINT (-122.6 39.7) took 69.9s\nlat: -122.5, long: 39.4\n6021 at POINT (-122.5 39.4) took 9.8s\nlat: -122.5, long: 39.6\n6021 at POINT (-122.5 39.6) took 9.6s\nlat: -122.5, long: 39.7\n6021 at POINT (-122.5 39.7) took 9.3s\nlat: -122.4, long: 39.4\n6021 at POINT (-122.4 39.4) took 10.0s\nlat: -122.4, long: 39.5\n6021 at POINT (-122.4 39.5) took 9.7s\nlat: -122.4, long: 39.6\n6021 at POINT (-122.4 39.6) took 9.7s\nlat: -122.4, long: 39.7\n6021 at POINT (-122.4 39.7) took 10.0s\nlat: -122.3, long: 39.4\n6021 at POINT (-122.3 39.4) took 10.0s\nlat: -122.3, long: 39.5\n6021 at POINT (-122.3 39.5) took 9.9s\nlat: -122.3, long: 39.6\n6021 at POINT (-122.3 39.6) took 9.9s\nlat: -122.3, long: 39.7\n6021 at POINT (-122.3 39.7) took 9.7s\nlat: -122.2, long: 39.4\n6021 at POINT (-122.2 39.4) took 9.9s\nlat: -122.2, long: 39.5\n6021 at POINT (-122.2 39.5) took 9.9s\nlat: -122.2, long: 39.6\n6021 at POINT (-122.2 39.6) took 9.8s\nlat: -122.2, long: 39.7\n6021 at POINT (-122.2 39.7) took 9.8s\nlat: -122.1, long: 39.5\n6021 at POINT (-122.1 39.5) took 10.2s\nlat: -122.1, long: 39.6\n6021 at POINT (-122.1 39.6) took 10.3s\nlat: -122.1, long: 39.7\n6021 at POINT (-122.1 39.7) took 10.0s\nlat: -122.0, long: 39.4\n6021 at POINT (-122 39.4) took 9.8s\nlat: -122.0, long: 39.6\n6021 at POINT (-122 39.6) took 9.8s\nlat: -122.0, long: 39.7\n6021 at POINT (-122 39.7) took 9.7s\nlat: -121.9, long: 39.4\n6021 at POINT (-121.9 39.4) took 9.9s\nlat: -121.9, long: 39.5\n6021 at POINT (-121.9 39.5) took 9.8s\n6023 southwest to northeast: (-124.4, 40.0) to (-123.4, 41.5)\nlat: -124.3, long: 40.3\n6023 at POINT (-124.3 40.3) took 9.8s\nlat: -124.3, long: 40.4\n6023 at POINT (-124.3 40.4) took 9.4s\nlat: -124.3, long: 40.5\n6023 at POINT (-124.3 40.5) took 9.5s\nlat: -124.3, long: 40.6\n6023 at POINT (-124.3 40.6) took 9.7s\nlat: -124.2, long: 40.2\n6023 at POINT (-124.2 40.2) took 9.9s\nlat: -124.2, long: 40.3\n6023 at POINT (-124.2 40.3) took 9.6s\nlat: -124.2, long: 40.4\n6023 at POINT (-124.2 40.4) took 9.6s\nlat: -124.2, long: 40.5\n6023 at POINT (-124.2 40.5) took 9.8s\nlat: -124.2, long: 40.6\n6023 at POINT (-124.2 40.6) took 9.8s\nlat: -124.2, long: 40.7\n6023 at POINT (-124.2 40.7) took 9.7s\nlat: -124.2, long: 40.8\n6023 at POINT (-124.2 40.8) took 9.7s\nlat: -124.1, long: 40.1\n6023 at POINT (-124.1 40.1) took 10.0s\nlat: -124.1, long: 40.2\n6023 at POINT (-124.1 40.2) took 12.3s\nlat: -124.1, long: 40.3\n6023 at POINT (-124.1 40.3) took 10.0s\nlat: -124.1, long: 40.4\n6023 at POINT (-124.1 40.4) took 10.2s\nlat: -124.1, long: 40.5\n6023 at POINT (-124.1 40.5) took 10.0s\nlat: -124.1, long: 40.6\n6023 at POINT (-124.1 40.6) took 10.1s\nlat: -124.1, long: 40.7\n6023 at POINT (-124.1 40.7) took 9.9s\nlat: -124.1, long: 40.8\n6023 at POINT (-124.1 40.8) took 9.9s\nlat: -124.1, long: 40.9\n6023 at POINT (-124.1 40.9) took 10.1s\nlat: -124.1, long: 41.0\n6023 at POINT (-124.1 41) took 9.8s\nlat: -124.1, long: 41.1\n6023 at POINT (-124.1 41.1) took 10.2s\nlat: -124.1, long: 41.2\n6023 at POINT (-124.1 41.2) took 10.0s\nlat: -124.0, long: 40.1\n6023 at POINT (-124 40.1) took 10.6s\nlat: -124.0, long: 40.2\n6023 at POINT (-124 40.2) took 10.4s\nlat: -124.0, long: 40.3\n6023 at POINT (-124 40.3) took 10.2s\nlat: -124.0, long: 40.4\n6023 at POINT (-124 40.4) took 9.9s\nlat: -124.0, long: 40.6\n6023 at POINT (-124 40.6) took 10.0s\nlat: -124.0, long: 40.7\n6023 at POINT (-124 40.7) took 9.9s\nlat: -124.0, long: 40.8\n6023 at POINT (-124 40.8) took 9.7s\nlat: -124.0, long: 40.9\n6023 at POINT (-124 40.9) took 9.6s\nlat: -124.0, long: 41.1\n6023 at POINT (-124 41.1) took 9.8s\nlat: -124.0, long: 41.2\n6023 at POINT (-124 41.2) took 9.6s\nlat: -124.0, long: 41.3\n6023 at POINT (-124 41.3) took 10.1s\nlat: -124.0, long: 41.4\n6023 at POINT (-124 41.4) took 9.9s\nlat: -123.9, long: 40.1\n6023 at POINT (-123.9 40.1) took 10.4s\nlat: -123.9, long: 40.2\n6023 at POINT (-123.9 40.2) took 10.1s\nlat: -123.9, long: 40.3\n6023 at POINT (-123.9 40.3) took 10.0s\nlat: -123.9, long: 40.4\n6023 at POINT (-123.9 40.4) took 10.0s\nlat: -123.9, long: 40.5\n6023 at POINT (-123.9 40.5) took 10.1s\nlat: -123.9, long: 40.6\n6023 at POINT (-123.9 40.6) took 10.3s\nlat: -123.9, long: 40.7\n6023 at POINT (-123.9 40.7) took 10.4s\nlat: -123.9, long: 40.8\n6023 at POINT (-123.9 40.8) took 10.2s\nlat: -123.9, long: 40.9\n6023 at POINT (-123.9 40.9) took 10.3s\nlat: -123.9, long: 41.0\n6023 at POINT (-123.9 41) took 10.1s\nlat: -123.9, long: 41.1\n6023 at POINT (-123.9 41.1) took 10.3s\nlat: -123.9, long: 41.2\n6023 at POINT (-123.9 41.2) took 70.6s\nlat: -123.9, long: 41.3\n6023 at POINT (-123.9 41.3) took 10.8s\nlat: -123.9, long: 41.4\n6023 at POINT (-123.9 41.4) took 70.8s\nlat: -123.8, long: 40.1\n6023 at POINT (-123.8 40.1) took 10.4s\nlat: -123.8, long: 40.2\n6023 at POINT (-123.8 40.2) took 10.7s\nlat: -123.8, long: 40.3\n6023 at POINT (-123.8 40.3) took 10.7s\nlat: -123.8, long: 40.4\n6023 at POINT (-123.8 40.4) took 11.1s\nlat: -123.8, long: 40.5\n6023 at POINT (-123.8 40.5) took 10.8s\nlat: -123.8, long: 40.6\n6023 at POINT (-123.8 40.6) took 10.6s\nlat: -123.8, long: 40.7\n6023 at POINT (-123.8 40.7) took 10.8s\nlat: -123.8, long: 40.8\n6023 at POINT (-123.8 40.8) took 11.0s\nlat: -123.8, long: 40.9\n6023 at POINT (-123.8 40.9) took 10.6s\nlat: -123.8, long: 41.0\n6023 at POINT (-123.8 41) took 10.8s\nlat: -123.8, long: 41.1\n6023 at POINT (-123.8 41.1) took 10.6s\nlat: -123.8, long: 41.2\n6023 at POINT (-123.8 41.2) took 10.8s\nlat: -123.8, long: 41.3\n6023 at POINT (-123.8 41.3) took 10.6s\nlat: -123.8, long: 41.4\n6023 at POINT (-123.8 41.4) took 10.6s\nlat: -123.7, long: 40.1\n6023 at POINT (-123.7 40.1) took 10.5s\nlat: -123.7, long: 40.2\n6023 at POINT (-123.7 40.2) took 10.5s\nlat: -123.7, long: 40.3\n6023 at POINT (-123.7 40.3) took 10.5s\nlat: -123.7, long: 40.4\n6023 at POINT (-123.7 40.4) took 10.4s\nlat: -123.7, long: 40.5\n6023 at POINT (-123.7 40.5) took 10.5s\nlat: -123.7, long: 40.6\n6023 at POINT (-123.7 40.6) took 10.2s\nlat: -123.7, long: 40.7\n6023 at POINT (-123.7 40.7) took 10.4s\nlat: -123.7, long: 40.8\n6023 at POINT (-123.7 40.8) took 10.3s\nlat: -123.7, long: 40.9\n6023 at POINT (-123.7 40.9) took 10.2s\nlat: -123.7, long: 41.0\n6023 at POINT (-123.7 41) took 10.1s\nlat: -123.7, long: 41.1\n6023 at POINT (-123.7 41.1) took 10.5s\nlat: -123.7, long: 41.2\n6023 at POINT (-123.7 41.2) took 10.4s\nlat: -123.7, long: 41.3\n6023 at POINT (-123.7 41.3) took 10.5s\nlat: -123.6, long: 40.1\n6023 at POINT (-123.6 40.1) took 10.2s\nlat: -123.6, long: 40.2\n6023 at POINT (-123.6 40.2) took 10.2s\nlat: -123.6, long: 40.3\n6023 at POINT (-123.6 40.3) took 10.4s\nlat: -123.6, long: 40.4\n6023 at POINT (-123.6 40.4) took 10.7s\nlat: -123.6, long: 40.5\n6023 at POINT (-123.6 40.5) took 10.2s\nlat: -123.6, long: 40.6\n6023 at POINT (-123.6 40.6) took 10.6s\nlat: -123.6, long: 40.7\n6023 at POINT (-123.6 40.7) took 10.3s\nlat: -123.6, long: 40.8\n6023 at POINT (-123.6 40.8) took 10.4s\nlat: -123.6, long: 41.0\n6023 at POINT (-123.6 41) took 10.2s\nlat: -123.6, long: 41.1\n6023 at POINT (-123.6 41.1) took 10.5s\nlat: -123.6, long: 41.2\n6023 at POINT (-123.6 41.2) took 10.5s\nlat: -123.6, long: 41.3\n6023 at POINT (-123.6 41.3) took 11.6s\nlat: -123.5, long: 41.1\n6023 at POINT (-123.5 41.1) took 10.4s\nlat: -123.5, long: 41.2\n6023 at POINT (-123.5 41.2) took 10.5s\nlat: -123.5, long: 41.3\n6023 at POINT (-123.5 41.3) took 10.6s\n6025 southwest to northeast: (-116.1, 32.6) to (-114.5, 33.4)\nlat: -116.1, long: 32.7\n6025 at POINT (-116.1 32.7) took 11.2s\nlat: -116.1, long: 32.8\n6025 at POINT (-116.1 32.8) took 10.3s\nlat: -116.1, long: 32.9\n6025 at POINT (-116.1 32.9) took 11.2s\nlat: -116.1, long: 33.0\n6025 at POINT (-116.1 33) took 10.3s\nlat: -116.0, long: 32.7\n6025 at POINT (-116 32.7) took 10.7s\nlat: -116.0, long: 32.8\n6025 at POINT (-116 32.8) took 10.0s\nlat: -116.0, long: 32.9\n6025 at POINT (-116 32.9) took 10.8s\nlat: -116.0, long: 33.1\n6025 at POINT (-116 33.1) took 10.7s\nlat: -116.0, long: 33.2\n6025 at POINT (-116 33.2) took 11.2s\nlat: -116.0, long: 33.3\n6025 at POINT (-116 33.3) took 12.0s\nlat: -116.0, long: 33.4\n6025 at POINT (-116 33.4) took 12.6s\nlat: -115.9, long: 32.7\n6025 at POINT (-115.9 32.7) took 11.3s\nlat: -115.9, long: 32.8\n6025 at POINT (-115.9 32.8) took 11.8s\nlat: -115.9, long: 32.9\n6025 at POINT (-115.9 32.9) took 11.6s\nlat: -115.9, long: 33.0\n6025 at POINT (-115.9 33) took 11.3s\nlat: -115.9, long: 33.1\n6025 at POINT (-115.9 33.1) took 13.2s\nlat: -115.9, long: 33.2\n6025 at POINT (-115.9 33.2) took 12.0s\nlat: -115.9, long: 33.3\n6025 at POINT (-115.9 33.3) took 11.8s\nlat: -115.9, long: 33.4\n6025 at POINT (-115.9 33.4) took 12.9s\nlat: -115.8, long: 32.7\n6025 at POINT (-115.8 32.7) took 12.0s\nlat: -115.8, long: 32.8\n6025 at POINT (-115.8 32.8) took 11.8s\nlat: -115.8, long: 32.9\n6025 at POINT (-115.8 32.9) took 13.7s\nlat: -115.8, long: 33.0\n6025 at POINT (-115.8 33) took 12.7s\nlat: -115.8, long: 33.1\n6025 at POINT (-115.8 33.1) took 12.2s\nlat: -115.8, long: 33.2\n6025 at POINT (-115.8 33.2) took 12.7s\nlat: -115.8, long: 33.3\n6025 at POINT (-115.8 33.3) took 11.9s\nlat: -115.8, long: 33.4\n6025 at POINT (-115.8 33.4) took 11.7s\nlat: -115.7, long: 32.7\n6025 at POINT (-115.7 32.7) took 12.3s\nlat: -115.7, long: 32.8\n6025 at POINT (-115.7 32.8) took 12.1s\nlat: -115.7, long: 32.9\n6025 at POINT (-115.7 32.9) took 11.5s\nlat: -115.7, long: 33.0\n6025 at POINT (-115.7 33) took 11.1s\nlat: -115.7, long: 33.1\n6025 at POINT (-115.7 33.1) took 11.1s\nlat: -115.7, long: 33.2\n6025 at POINT (-115.7 33.2) took 11.2s\nlat: -115.7, long: 33.3\n6025 at POINT (-115.7 33.3) took 11.2s\nlat: -115.7, long: 33.4\n6025 at POINT (-115.7 33.4) took 11.0s\nlat: -115.6, long: 32.7\n6025 at POINT (-115.6 32.7) took 10.7s\nlat: -115.6, long: 32.8\n6025 at POINT (-115.6 32.8) took 11.0s\nlat: -115.6, long: 32.9\n6025 at POINT (-115.6 32.9) took 11.1s\nlat: -115.6, long: 33.0\n6025 at POINT (-115.6 33) took 10.2s\nlat: -115.6, long: 33.1\n6025 at POINT (-115.6 33.1) took 10.2s\nlat: -115.6, long: 33.2\n6025 at POINT (-115.6 33.2) took 10.3s\nlat: -115.6, long: 33.3\n6025 at POINT (-115.6 33.3) took 10.4s\nlat: -115.6, long: 33.4\n6025 at POINT (-115.6 33.4) took 10.3s\nlat: -115.5, long: 32.7\n6025 at POINT (-115.5 32.7) took 10.5s\nlat: -115.5, long: 32.8\n6025 at POINT (-115.5 32.8) took 10.9s\nlat: -115.5, long: 32.9\n6025 at POINT (-115.5 32.9) took 10.3s\nlat: -115.5, long: 33.1\n6025 at POINT (-115.5 33.1) took 9.9s\nlat: -115.5, long: 33.2\n6025 at POINT (-115.5 33.2) took 10.2s\nlat: -115.5, long: 33.3\n6025 at POINT (-115.5 33.3) took 9.7s\nlat: -115.5, long: 33.4\n6025 at POINT (-115.5 33.4) took 9.9s\nlat: -115.4, long: 32.7\n6025 at POINT (-115.4 32.7) took 12.8s\nlat: -115.4, long: 32.8\n6025 at POINT (-115.4 32.8) took 10.1s\nlat: -115.4, long: 32.9\n6025 at POINT (-115.4 32.9) took 9.8s\nlat: -115.4, long: 33.0\n6025 at POINT (-115.4 33) took 11.1s\nlat: -115.4, long: 33.1\n6025 at POINT (-115.4 33.1) took 10.9s\nlat: -115.4, long: 33.2\n6025 at POINT (-115.4 33.2) took 10.6s\nlat: -115.4, long: 33.3\n6025 at POINT (-115.4 33.3) took 10.6s\nlat: -115.4, long: 33.4\n6025 at POINT (-115.4 33.4) took 10.5s\nlat: -115.3, long: 32.7\n6025 at POINT (-115.3 32.7) took 10.5s\nlat: -115.3, long: 32.8\n6025 at POINT (-115.3 32.8) took 12.6s\nlat: -115.3, long: 32.9\n6025 at POINT (-115.3 32.9) took 11.0s\nlat: -115.3, long: 33.0\n6025 at POINT (-115.3 33) took 10.4s\nlat: -115.3, long: 33.1\n6025 at POINT (-115.3 33.1) took 10.5s\nlat: -115.3, long: 33.2\n6025 at POINT (-115.3 33.2) took 10.1s\nlat: -115.3, long: 33.3\n6025 at POINT (-115.3 33.3) took 10.2s\nlat: -115.3, long: 33.4\n6025 at POINT (-115.3 33.4) took 10.6s\nlat: -115.2, long: 32.7\n6025 at POINT (-115.2 32.7) took 10.9s\nlat: -115.2, long: 32.8\n6025 at POINT (-115.2 32.8) took 10.0s\nlat: -115.2, long: 32.9\n6025 at POINT (-115.2 32.9) took 10.6s\nlat: -115.2, long: 33.0\n6025 at POINT (-115.2 33) took 10.1s\nlat: -115.2, long: 33.1\n6025 at POINT (-115.2 33.1) took 10.7s\nlat: -115.2, long: 33.2\n6025 at POINT (-115.2 33.2) took 10.1s\nlat: -115.2, long: 33.3\n6025 at POINT (-115.2 33.3) took 9.9s\nlat: -115.2, long: 33.4\n6025 at POINT (-115.2 33.4) took 9.9s\nlat: -115.1, long: 32.7\n6025 at POINT (-115.1 32.7) took 10.3s\nlat: -115.1, long: 32.8\n6025 at POINT (-115.1 32.8) took 10.1s\nlat: -115.1, long: 32.9\n6025 at POINT (-115.1 32.9) took 11.0s\nlat: -115.1, long: 33.0\n6025 at POINT (-115.1 33) took 9.9s\nlat: -115.1, long: 33.1\n6025 at POINT (-115.1 33.1) took 10.3s\nlat: -115.1, long: 33.2\n6025 at POINT (-115.1 33.2) took 9.9s\nlat: -115.1, long: 33.3\n6025 at POINT (-115.1 33.3) took 10.0s\nlat: -115.1, long: 33.4\n6025 at POINT (-115.1 33.4) took 10.7s\nlat: -115.0, long: 32.7\n6025 at POINT (-115 32.7) took 9.9s\nlat: -115.0, long: 32.8\n6025 at POINT (-115 32.8) took 10.4s\nlat: -115.0, long: 32.9\n6025 at POINT (-115 32.9) took 10.3s\nlat: -115.0, long: 33.1\n6025 at POINT (-115 33.1) took 10.4s\nlat: -115.0, long: 33.2\n6025 at POINT (-115 33.2) took 10.2s\nlat: -115.0, long: 33.3\n6025 at POINT (-115 33.3) took 10.2s\nlat: -115.0, long: 33.4\n6025 at POINT (-115 33.4) took 10.1s\nlat: -114.9, long: 32.8\n6025 at POINT (-114.9 32.8) took 10.1s\nlat: -114.9, long: 32.9\n6025 at POINT (-114.9 32.9) took 10.7s\nlat: -114.9, long: 33.0\n6025 at POINT (-114.9 33) took 10.6s\nlat: -114.9, long: 33.1\n6025 at POINT (-114.9 33.1) took 10.2s\nlat: -114.9, long: 33.2\n6025 at POINT (-114.9 33.2) took 10.2s\nlat: -114.9, long: 33.3\n6025 at POINT (-114.9 33.3) took 9.8s\nlat: -114.9, long: 33.4\n6025 at POINT (-114.9 33.4) took 9.7s\nlat: -114.8, long: 32.8\n6025 at POINT (-114.8 32.8) took 9.7s\nlat: -114.8, long: 32.9\n6025 at POINT (-114.8 32.9) took 10.0s\nlat: -114.8, long: 33.0\n6025 at POINT (-114.8 33) took 9.8s\nlat: -114.8, long: 33.1\n6025 at POINT (-114.8 33.1) took 10.1s\nlat: -114.8, long: 33.2\n6025 at POINT (-114.8 33.2) took 9.9s\nlat: -114.8, long: 33.3\n6025 at POINT (-114.8 33.3) took 69.8s\nlat: -114.8, long: 33.4\n6025 at POINT (-114.8 33.4) took 12.3s\nlat: -114.7, long: 32.8\n6025 at POINT (-114.7 32.8) took 9.9s\nlat: -114.7, long: 32.9\n6025 at POINT (-114.7 32.9) took 9.7s\nlat: -114.7, long: 33.0\n6025 at POINT (-114.7 33) took 9.8s\nlat: -114.7, long: 33.2\n6025 at POINT (-114.7 33.2) took 9.7s\nlat: -114.6, long: 32.8\n6025 at POINT (-114.6 32.8) took 10.1s\nlat: -114.6, long: 32.9\n6025 at POINT (-114.6 32.9) took 10.5s\nlat: -114.6, long: 33.0\n6025 at POINT (-114.6 33) took 9.6s\nlat: -114.5, long: 32.9\n6025 at POINT (-114.5 32.9) took 10.7s\n6027 southwest to northeast: (-118.8, 35.8) to (-115.6, 37.5)\nlat: -118.7, long: 37.4\n6027 at POINT (-118.7 37.4) took 10.5s\nlat: -118.6, long: 37.2\n6027 at POINT (-118.6 37.2) took 9.5s\nlat: -118.6, long: 37.3\n6027 at POINT (-118.6 37.3) took 10.6s\nlat: -118.6, long: 37.4\n6027 at POINT (-118.6 37.4) took 9.8s\nlat: -118.5, long: 37.1\n6027 at POINT (-118.5 37.1) took 9.7s\nlat: -118.5, long: 37.2\n6027 at POINT (-118.5 37.2) took 9.6s\nlat: -118.5, long: 37.3\n6027 at POINT (-118.5 37.3) took 9.8s\nlat: -118.5, long: 37.4\n6027 at POINT (-118.5 37.4) took 9.7s\nlat: -118.4, long: 37.0\n6027 at POINT (-118.4 37) took 9.4s\nlat: -118.4, long: 37.1\n6027 at POINT (-118.4 37.1) took 9.7s\nlat: -118.4, long: 37.2\n6027 at POINT (-118.4 37.2) took 9.8s\nlat: -118.4, long: 37.3\n6027 at POINT (-118.4 37.3) took 9.8s\nlat: -118.4, long: 37.4\n6027 at POINT (-118.4 37.4) took 9.7s\nlat: -118.3, long: 36.7\n6027 at POINT (-118.3 36.7) took 70.3s\nlat: -118.3, long: 36.8\n6027 at POINT (-118.3 36.8) took 9.9s\nlat: -118.3, long: 36.9\n6027 at POINT (-118.3 36.9) took 10.1s\nlat: -118.3, long: 37.0\n6027 at POINT (-118.3 37) took 10.0s\nlat: -118.3, long: 37.1\n6027 at POINT (-118.3 37.1) took 9.8s\nlat: -118.3, long: 37.2\n6027 at POINT (-118.3 37.2) took 10.7s\nlat: -118.3, long: 37.3\n6027 at POINT (-118.3 37.3) took 10.2s\nlat: -118.3, long: 37.4\n6027 at POINT (-118.3 37.4) took 10.0s\nlat: -118.2, long: 36.5\n6027 at POINT (-118.2 36.5) took 10.1s\nlat: -118.2, long: 36.6\n6027 at POINT (-118.2 36.6) took 16.0s\nlat: -118.2, long: 36.7\n6027 at POINT (-118.2 36.7) took 57.1s\nlat: -118.2, long: 36.8\n6027 at POINT (-118.2 36.8) took 9.6s\nlat: -118.2, long: 36.9\n6027 at POINT (-118.2 36.9) took 9.9s\nlat: -118.2, long: 37.0\n6027 at POINT (-118.2 37) took 9.6s\nlat: -118.2, long: 37.1\n6027 at POINT (-118.2 37.1) took 9.7s\nlat: -118.2, long: 37.2\n6027 at POINT (-118.2 37.2) took 9.7s\nlat: -118.2, long: 37.3\n6027 at POINT (-118.2 37.3) took 9.7s\nlat: -118.2, long: 37.4\n6027 at POINT (-118.2 37.4) took 9.9s\nlat: -118.1, long: 36.3\n6027 at POINT (-118.1 36.3) took 10.1s\nlat: -118.1, long: 36.4\n6027 at POINT (-118.1 36.4) took 10.8s\nlat: -118.1, long: 36.5\n6027 at POINT (-118.1 36.5) took 9.7s\nlat: -118.1, long: 36.6\n6027 at POINT (-118.1 36.6) took 9.9s\nlat: -118.1, long: 36.7\n6027 at POINT (-118.1 36.7) took 10.0s\nlat: -118.1, long: 36.8\n6027 at POINT (-118.1 36.8) took 9.8s\nlat: -118.1, long: 36.9\n6027 at POINT (-118.1 36.9) took 9.9s\nlat: -118.1, long: 37.0\n6027 at POINT (-118.1 37) took 9.6s\nlat: -118.1, long: 37.1\n6027 at POINT (-118.1 37.1) took 9.7s\nlat: -118.1, long: 37.2\n6027 at POINT (-118.1 37.2) took 9.9s\nlat: -118.1, long: 37.3\n6027 at POINT (-118.1 37.3) took 10.1s\nlat: -118.1, long: 37.4\n6027 at POINT (-118.1 37.4) took 9.9s\nlat: -118.0, long: 35.8\n6027 at POINT (-118 35.8) took 10.9s\nlat: -118.0, long: 36.1\n6027 at POINT (-118 36.1) took 9.7s\nlat: -118.0, long: 36.2\n6027 at POINT (-118 36.2) took 9.9s\nlat: -118.0, long: 36.3\n6027 at POINT (-118 36.3) took 69.8s\nlat: -118.0, long: 36.4\n6027 at POINT (-118 36.4) took 9.8s\nlat: -118.0, long: 36.6\n6027 at POINT (-118 36.6) took 9.6s\nlat: -118.0, long: 36.7\n6027 at POINT (-118 36.7) took 9.7s\nlat: -118.0, long: 36.8\n6027 at POINT (-118 36.8) took 9.9s\nlat: -118.0, long: 36.9\n6027 at POINT (-118 36.9) took 10.9s\nlat: -118.0, long: 37.1\n6027 at POINT (-118 37.1) took 14.3s\nlat: -118.0, long: 37.2\n6027 at POINT (-118 37.2) took 10.8s\nlat: -118.0, long: 37.3\n6027 at POINT (-118 37.3) took 10.0s\nlat: -118.0, long: 37.4\n6027 at POINT (-118 37.4) took 9.9s\nlat: -117.9, long: 35.8\n6027 at POINT (-117.9 35.8) took 10.4s\nlat: -117.9, long: 35.9\n6027 at POINT (-117.9 35.9) took 10.9s\nlat: -117.9, long: 36.0\n6027 at POINT (-117.9 36) took 11.0s\nlat: -117.9, long: 36.1\n6027 at POINT (-117.9 36.1) took 10.3s\nlat: -117.9, long: 36.2\n6027 at POINT (-117.9 36.2) took 10.4s\nlat: -117.9, long: 36.3\n6027 at POINT (-117.9 36.3) took 10.3s\nlat: -117.9, long: 36.4\n6027 at POINT (-117.9 36.4) took 10.3s\nlat: -117.9, long: 36.5\n6027 at POINT (-117.9 36.5) took 10.5s\nlat: -117.9, long: 36.6\n6027 at POINT (-117.9 36.6) took 11.2s\nlat: -117.9, long: 36.7\n6027 at POINT (-117.9 36.7) took 10.9s\nlat: -117.9, long: 36.8\n6027 at POINT (-117.9 36.8) took 10.4s\nlat: -117.9, long: 36.9\n6027 at POINT (-117.9 36.9) took 10.5s\nlat: -117.9, long: 37.0\n6027 at POINT (-117.9 37) took 9.7s\nlat: -117.9, long: 37.1\n6027 at POINT (-117.9 37.1) took 10.3s\nlat: -117.9, long: 37.2\n6027 at POINT (-117.9 37.2) took 9.6s\nlat: -117.9, long: 37.3\n6027 at POINT (-117.9 37.3) took 9.8s\nlat: -117.9, long: 37.4\n6027 at POINT (-117.9 37.4) took 9.6s\nlat: -117.8, long: 35.8\n6027 at POINT (-117.8 35.8) took 9.7s\nlat: -117.8, long: 35.9\n6027 at POINT (-117.8 35.9) took 9.8s\nlat: -117.8, long: 36.0\n6027 at POINT (-117.8 36) took 9.8s\nlat: -117.8, long: 36.1\n6027 at POINT (-117.8 36.1) took 10.6s\nlat: -117.8, long: 36.2\n6027 at POINT (-117.8 36.2) took 9.9s\nlat: -117.8, long: 36.3\n6027 at POINT (-117.8 36.3) took 10.1s\nlat: -117.8, long: 36.4\n6027 at POINT (-117.8 36.4) took 9.3s\nlat: -117.8, long: 36.5\n6027 at POINT (-117.8 36.5) took 9.3s\nlat: -117.8, long: 36.6\n6027 at POINT (-117.8 36.6) took 9.3s\nlat: -117.8, long: 36.7\n6027 at POINT (-117.8 36.7) took 10.0s\nlat: -117.8, long: 36.8\n6027 at POINT (-117.8 36.8) took 9.4s\nlat: -117.8, long: 36.9\n6027 at POINT (-117.8 36.9) took 9.7s\nlat: -117.8, long: 37.0\n6027 at POINT (-117.8 37) took 9.4s\nlat: -117.8, long: 37.1\n6027 at POINT (-117.8 37.1) took 9.4s\nlat: -117.8, long: 37.2\n6027 at POINT (-117.8 37.2) took 9.3s\nlat: -117.8, long: 37.3\n6027 at POINT (-117.8 37.3) took 9.4s\nlat: -117.8, long: 37.4\n6027 at POINT (-117.8 37.4) took 9.3s\nlat: -117.7, long: 35.8\n6027 at POINT (-117.7 35.8) took 9.4s\nlat: -117.7, long: 35.9\n6027 at POINT (-117.7 35.9) took 9.4s\nlat: -117.7, long: 36.0\n6027 at POINT (-117.7 36) took 69.3s\nlat: -117.7, long: 36.1\n6027 at POINT (-117.7 36.1) took 9.4s\nlat: -117.7, long: 36.2\n6027 at POINT (-117.7 36.2) took 9.4s\nlat: -117.7, long: 36.3\n6027 at POINT (-117.7 36.3) took 9.6s\nlat: -117.7, long: 36.4\n6027 at POINT (-117.7 36.4) took 9.2s\nlat: -117.7, long: 36.5\n6027 at POINT (-117.7 36.5) took 9.4s\nlat: -117.7, long: 36.6\n6027 at POINT (-117.7 36.6) took 9.2s\nlat: -117.7, long: 36.7\n6027 at POINT (-117.7 36.7) took 9.3s\nlat: -117.7, long: 36.8\n6027 at POINT (-117.7 36.8) took 9.8s\nlat: -117.7, long: 36.9\n6027 at POINT (-117.7 36.9) took 9.2s\nlat: -117.7, long: 37.0\n6027 at POINT (-117.7 37) took 9.6s\nlat: -117.7, long: 37.1\n6027 at POINT (-117.7 37.1) took 9.4s\nlat: -117.7, long: 37.2\n6027 at POINT (-117.7 37.2) took 9.3s\nlat: -117.7, long: 37.3\n6027 at POINT (-117.7 37.3) took 9.4s\nlat: -117.6, long: 35.8\n6027 at POINT (-117.6 35.8) took 9.5s\nlat: -117.6, long: 35.9\n6027 at POINT (-117.6 35.9) took 9.3s\nlat: -117.6, long: 36.0\n6027 at POINT (-117.6 36) took 9.4s\nlat: -117.6, long: 36.1\n6027 at POINT (-117.6 36.1) took 10.6s\nlat: -117.6, long: 36.2\n6027 at POINT (-117.6 36.2) took 9.2s\nlat: -117.6, long: 36.3\n6027 at POINT (-117.6 36.3) took 9.3s\nlat: -117.6, long: 36.4\n6027 at POINT (-117.6 36.4) took 9.3s\nlat: -117.6, long: 36.5\n6027 at POINT (-117.6 36.5) took 9.4s\nlat: -117.6, long: 36.6\n6027 at POINT (-117.6 36.6) took 9.5s\nlat: -117.6, long: 36.7\n6027 at POINT (-117.6 36.7) took 9.4s\nlat: -117.6, long: 36.8\n6027 at POINT (-117.6 36.8) took 9.6s\nlat: -117.6, long: 36.9\n6027 at POINT (-117.6 36.9) took 10.6s\nlat: -117.6, long: 37.0\n6027 at POINT (-117.6 37) took 69.3s\nlat: -117.6, long: 37.1\n6027 at POINT (-117.6 37.1) took 9.7s\nlat: -117.6, long: 37.2\n6027 at POINT (-117.6 37.2) took 9.8s\nlat: -117.5, long: 35.8\n6027 at POINT (-117.5 35.8) took 9.7s\nlat: -117.5, long: 35.9\n6027 at POINT (-117.5 35.9) took 10.0s\nlat: -117.5, long: 36.1\n6027 at POINT (-117.5 36.1) took 9.6s\nlat: -117.5, long: 36.2\n6027 at POINT (-117.5 36.2) took 9.6s\nlat: -117.5, long: 36.3\n6027 at POINT (-117.5 36.3) took 9.5s\nlat: -117.5, long: 36.4\n6027 at POINT (-117.5 36.4) took 10.0s\nlat: -117.5, long: 36.6\n6027 at POINT (-117.5 36.6) took 9.4s\nlat: -117.5, long: 36.7\n6027 at POINT (-117.5 36.7) took 10.8s\nlat: -117.5, long: 36.8\n6027 at POINT (-117.5 36.8) took 9.6s\nlat: -117.5, long: 36.9\n6027 at POINT (-117.5 36.9) took 9.4s\nlat: -117.5, long: 37.1\n6027 at POINT (-117.5 37.1) took 9.3s\nlat: -117.5, long: 37.2\n6027 at POINT (-117.5 37.2) took 9.0s\nlat: -117.4, long: 35.8\n6027 at POINT (-117.4 35.8) took 9.7s\nlat: -117.4, long: 35.9\n6027 at POINT (-117.4 35.9) took 9.3s\nlat: -117.4, long: 36.0\n6027 at POINT (-117.4 36) took 9.1s\nlat: -117.4, long: 36.1\n6027 at POINT (-117.4 36.1) took 9.5s\nlat: -117.4, long: 36.2\n6027 at POINT (-117.4 36.2) took 9.3s\nlat: -117.4, long: 36.3\n6027 at POINT (-117.4 36.3) took 9.3s\nlat: -117.4, long: 36.4\n6027 at POINT (-117.4 36.4) took 9.1s\nlat: -117.4, long: 36.5\n6027 at POINT (-117.4 36.5) took 9.2s\nlat: -117.4, long: 36.6\n6027 at POINT (-117.4 36.6) took 9.4s\nlat: -117.4, long: 36.7\n6027 at POINT (-117.4 36.7) took 9.3s\nlat: -117.4, long: 36.8\n6027 at POINT (-117.4 36.8) took 9.2s\nlat: -117.4, long: 36.9\n6027 at POINT (-117.4 36.9) took 9.5s\nlat: -117.4, long: 37.0\n6027 at POINT (-117.4 37) took 9.3s\nlat: -117.4, long: 37.1\n6027 at POINT (-117.4 37.1) took 9.3s\nlat: -117.3, long: 35.8\n6027 at POINT (-117.3 35.8) took 9.2s\nlat: -117.3, long: 35.9\n6027 at POINT (-117.3 35.9) took 9.2s\nlat: -117.3, long: 36.0\n6027 at POINT (-117.3 36) took 9.1s\nlat: -117.3, long: 36.1\n6027 at POINT (-117.3 36.1) took 9.2s\nlat: -117.3, long: 36.2\n6027 at POINT (-117.3 36.2) took 9.2s\nlat: -117.3, long: 36.3\n6027 at POINT (-117.3 36.3) took 9.4s\nlat: -117.3, long: 36.4\n6027 at POINT (-117.3 36.4) took 9.3s\nlat: -117.3, long: 36.5\n6027 at POINT (-117.3 36.5) took 9.7s\nlat: -117.3, long: 36.6\n6027 at POINT (-117.3 36.6) took 9.6s\nlat: -117.3, long: 36.7\n6027 at POINT (-117.3 36.7) took 9.2s\nlat: -117.3, long: 36.8\n6027 at POINT (-117.3 36.8) took 9.2s\nlat: -117.3, long: 36.9\n6027 at POINT (-117.3 36.9) took 9.2s\nlat: -117.3, long: 37.0\n6027 at POINT (-117.3 37) took 9.5s\nlat: -117.2, long: 35.8\n6027 at POINT (-117.2 35.8) took 10.7s\nlat: -117.2, long: 35.9\n6027 at POINT (-117.2 35.9) took 9.2s\nlat: -117.2, long: 36.0\n6027 at POINT (-117.2 36) took 9.4s\nlat: -117.2, long: 36.1\n6027 at POINT (-117.2 36.1) took 9.3s\nlat: -117.2, long: 36.2\n6027 at POINT (-117.2 36.2) took 9.4s\nlat: -117.2, long: 36.3\n6027 at POINT (-117.2 36.3) took 9.9s\nlat: -117.2, long: 36.4\n6027 at POINT (-117.2 36.4) took 9.3s\nlat: -117.2, long: 36.5\n6027 at POINT (-117.2 36.5) took 9.4s\nlat: -117.2, long: 36.6\n6027 at POINT (-117.2 36.6) took 9.6s\nlat: -117.2, long: 36.7\n6027 at POINT (-117.2 36.7) took 9.5s\nlat: -117.2, long: 36.8\n6027 at POINT (-117.2 36.8) took 9.3s\nlat: -117.2, long: 36.9\n6027 at POINT (-117.2 36.9) took 9.5s\nlat: -117.1, long: 35.8\n6027 at POINT (-117.1 35.8) took 9.4s\nlat: -117.1, long: 35.9\n6027 at POINT (-117.1 35.9) took 9.5s\nlat: -117.1, long: 36.0\n6027 at POINT (-117.1 36) took 9.2s\nlat: -117.1, long: 36.1\n6027 at POINT (-117.1 36.1) took 10.3s\nlat: -117.1, long: 36.2\n6027 at POINT (-117.1 36.2) took 9.5s\nlat: -117.1, long: 36.3\n6027 at POINT (-117.1 36.3) took 9.4s\nlat: -117.1, long: 36.4\n6027 at POINT (-117.1 36.4) took 9.5s\nlat: -117.1, long: 36.5\n6027 at POINT (-117.1 36.5) took 9.9s\nlat: -117.1, long: 36.6\n6027 at POINT (-117.1 36.6) took 9.3s\nlat: -117.1, long: 36.7\n6027 at POINT (-117.1 36.7) took 9.2s\nlat: -117.1, long: 36.8\n6027 at POINT (-117.1 36.8) took 9.3s\nlat: -117.1, long: 36.9\n6027 at POINT (-117.1 36.9) took 9.4s\nlat: -117.0, long: 35.8\n6027 at POINT (-117 35.8) took 9.5s\nlat: -117.0, long: 35.9\n6027 at POINT (-117 35.9) took 9.4s\nlat: -117.0, long: 36.1\n6027 at POINT (-117 36.1) took 9.7s\nlat: -117.0, long: 36.2\n6027 at POINT (-117 36.2) took 9.2s\nlat: -117.0, long: 36.3\n6027 at POINT (-117 36.3) took 69.1s\nlat: -117.0, long: 36.4\n6027 at POINT (-117 36.4) took 9.4s\nlat: -117.0, long: 36.6\n6027 at POINT (-117 36.6) took 9.3s\nlat: -117.0, long: 36.7\n6027 at POINT (-117 36.7) took 9.5s\nlat: -117.0, long: 36.8\n6027 at POINT (-117 36.8) took 9.4s\nlat: -116.9, long: 35.8\n6027 at POINT (-116.9 35.8) took 9.7s\nlat: -116.9, long: 35.9\n6027 at POINT (-116.9 35.9) took 9.4s\nlat: -116.9, long: 36.0\n6027 at POINT (-116.9 36) took 9.4s\nlat: -116.9, long: 36.1\n6027 at POINT (-116.9 36.1) took 9.8s\nlat: -116.9, long: 36.2\n6027 at POINT (-116.9 36.2) took 9.5s\nlat: -116.9, long: 36.3\n6027 at POINT (-116.9 36.3) took 9.3s\nlat: -116.9, long: 36.4\n6027 at POINT (-116.9 36.4) took 9.3s\nlat: -116.9, long: 36.5\n6027 at POINT (-116.9 36.5) took 9.4s\nlat: -116.9, long: 36.6\n6027 at POINT (-116.9 36.6) took 9.4s\nlat: -116.9, long: 36.7\n6027 at POINT (-116.9 36.7) took 69.4s\nlat: -116.8, long: 35.8\n6027 at POINT (-116.8 35.8) took 9.5s\nlat: -116.8, long: 35.9\n6027 at POINT (-116.8 35.9) took 9.3s\nlat: -116.8, long: 36.0\n6027 at POINT (-116.8 36) took 9.5s\nlat: -116.8, long: 36.1\n6027 at POINT (-116.8 36.1) took 9.4s\nlat: -116.8, long: 36.2\n6027 at POINT (-116.8 36.2) took 9.3s\nlat: -116.8, long: 36.3\n6027 at POINT (-116.8 36.3) took 9.3s\nlat: -116.8, long: 36.4\n6027 at POINT (-116.8 36.4) took 9.2s\nlat: -116.8, long: 36.5\n6027 at POINT (-116.8 36.5) took 9.4s\nlat: -116.8, long: 36.6\n6027 at POINT (-116.8 36.6) took 9.5s\nlat: -116.7, long: 35.8\n6027 at POINT (-116.7 35.8) took 9.6s\nlat: -116.7, long: 35.9\n6027 at POINT (-116.7 35.9) took 9.2s\nlat: -116.7, long: 36.0\n6027 at POINT (-116.7 36) took 9.2s\nlat: -116.7, long: 36.1\n6027 at POINT (-116.7 36.1) took 69.3s\nlat: -116.7, long: 36.2\n6027 at POINT (-116.7 36.2) took 9.3s\nlat: -116.7, long: 36.3\n6027 at POINT (-116.7 36.3) took 9.4s\nlat: -116.7, long: 36.4\n6027 at POINT (-116.7 36.4) took 10.5s\nlat: -116.7, long: 36.5\n6027 at POINT (-116.7 36.5) took 10.3s\nlat: -116.7, long: 36.6\n6027 at POINT (-116.7 36.6) took 9.4s\nlat: -116.6, long: 35.8\n6027 at POINT (-116.6 35.8) took 9.4s\nlat: -116.6, long: 35.9\n6027 at POINT (-116.6 35.9) took 9.5s\nlat: -116.6, long: 36.0\n6027 at POINT (-116.6 36) took 9.6s\nlat: -116.6, long: 36.1\n6027 at POINT (-116.6 36.1) took 10.9s\nlat: -116.6, long: 36.2\n6027 at POINT (-116.6 36.2) took 9.6s\nlat: -116.6, long: 36.3\n6027 at POINT (-116.6 36.3) took 9.5s\nlat: -116.6, long: 36.4\n6027 at POINT (-116.6 36.4) took 9.4s\nlat: -116.6, long: 36.5\n6027 at POINT (-116.6 36.5) took 15.4s\nlat: -116.5, long: 35.8\n6027 at POINT (-116.5 35.8) took 9.5s\nlat: -116.5, long: 35.9\n6027 at POINT (-116.5 35.9) took 10.4s\nlat: -116.5, long: 36.1\n6027 at POINT (-116.5 36.1) took 12.4s\nlat: -116.5, long: 36.2\n6027 at POINT (-116.5 36.2) took 9.8s\nlat: -116.5, long: 36.3\n6027 at POINT (-116.5 36.3) took 10.5s\nlat: -116.5, long: 36.4\n6027 at POINT (-116.5 36.4) took 10.4s\nlat: -116.4, long: 35.8\n6027 at POINT (-116.4 35.8) took 10.5s\nlat: -116.4, long: 35.9\n6027 at POINT (-116.4 35.9) took 9.4s\nlat: -116.4, long: 36.0\n6027 at POINT (-116.4 36) took 9.1s\nlat: -116.4, long: 36.1\n6027 at POINT (-116.4 36.1) took 9.2s\nlat: -116.4, long: 36.2\n6027 at POINT (-116.4 36.2) took 9.3s\nlat: -116.4, long: 36.3\n6027 at POINT (-116.4 36.3) took 9.7s\nlat: -116.3, long: 35.8\n6027 at POINT (-116.3 35.8) took 9.2s\nlat: -116.3, long: 35.9\n6027 at POINT (-116.3 35.9) took 9.6s\nlat: -116.3, long: 36.0\n6027 at POINT (-116.3 36) took 9.3s\nlat: -116.3, long: 36.1\n6027 at POINT (-116.3 36.1) took 9.4s\nlat: -116.3, long: 36.2\n6027 at POINT (-116.3 36.2) took 9.2s\nlat: -116.3, long: 36.3\n6027 at POINT (-116.3 36.3) took 9.4s\nlat: -116.2, long: 35.8\n6027 at POINT (-116.2 35.8) took 9.7s\nlat: -116.2, long: 35.9\n6027 at POINT (-116.2 35.9) took 9.6s\nlat: -116.2, long: 36.0\n6027 at POINT (-116.2 36) took 9.2s\nlat: -116.2, long: 36.1\n6027 at POINT (-116.2 36.1) took 9.2s\nlat: -116.2, long: 36.2\n6027 at POINT (-116.2 36.2) took 9.5s\nlat: -116.1, long: 35.8\n6027 at POINT (-116.1 35.8) took 9.4s\nlat: -116.1, long: 35.9\n6027 at POINT (-116.1 35.9) took 13.0s\nlat: -116.1, long: 36.0\n6027 at POINT (-116.1 36) took 9.3s\nlat: -116.1, long: 36.1\n6027 at POINT (-116.1 36.1) took 9.1s\nlat: -116.0, long: 35.8\n6027 at POINT (-116 35.8) took 9.1s\nlat: -116.0, long: 35.9\n6027 at POINT (-116 35.9) took 9.5s\nlat: -115.9, long: 35.8\n6027 at POINT (-115.9 35.8) took 9.2s\nlat: -115.9, long: 35.9\n6027 at POINT (-115.9 35.9) took 9.1s\nlat: -115.9, long: 36.0\n6027 at POINT (-115.9 36) took 9.5s\nlat: -115.8, long: 35.8\n6027 at POINT (-115.8 35.8) took 9.4s\nlat: -115.8, long: 35.9\n6027 at POINT (-115.8 35.9) took 9.3s\n6029 southwest to northeast: (-120.2, 34.8) to (-117.6, 35.8)\nlat: -120.1, long: 35.7\n6029 at POINT (-120.1 35.7) took 9.8s\nlat: -120.0, long: 35.6\n6029 at POINT (-120 35.6) took 9.3s\nlat: -120.0, long: 35.7\n6029 at POINT (-120 35.7) took 9.0s\nlat: -119.9, long: 35.5\n6029 at POINT (-119.9 35.5) took 9.7s\nlat: -119.9, long: 35.6\n6029 at POINT (-119.9 35.6) took 9.5s\nlat: -119.9, long: 35.7\n6029 at POINT (-119.9 35.7) took 10.0s\nlat: -119.8, long: 35.3\n6029 at POINT (-119.8 35.3) took 9.9s\nlat: -119.8, long: 35.4\n6029 at POINT (-119.8 35.4) took 9.5s\nlat: -119.8, long: 35.5\n6029 at POINT (-119.8 35.5) took 9.8s\nlat: -119.8, long: 35.6\n6029 at POINT (-119.8 35.6) took 9.7s\nlat: -119.8, long: 35.7\n6029 at POINT (-119.8 35.7) took 69.3s\nlat: -119.7, long: 35.3\n6029 at POINT (-119.7 35.3) took 11.9s\nlat: -119.7, long: 35.4\n6029 at POINT (-119.7 35.4) took 10.0s\nlat: -119.7, long: 35.5\n6029 at POINT (-119.7 35.5) took 9.7s\nlat: -119.7, long: 35.6\n6029 at POINT (-119.7 35.6) took 69.5s\nlat: -119.7, long: 35.7\n6029 at POINT (-119.7 35.7) took 9.3s\nlat: -119.6, long: 35.2\n6029 at POINT (-119.6 35.2) took 9.9s\nlat: -119.6, long: 35.3\n6029 at POINT (-119.6 35.3) took 10.7s\nlat: -119.6, long: 35.4\n6029 at POINT (-119.6 35.4) took 9.5s\nlat: -119.6, long: 35.5\n6029 at POINT (-119.6 35.5) took 10.4s\nlat: -119.6, long: 35.6\n6029 at POINT (-119.6 35.6) took 9.7s\nlat: -119.6, long: 35.7\n6029 at POINT (-119.6 35.7) took 9.6s\nlat: -119.5, long: 35.1\n6029 at POINT (-119.5 35.1) took 9.7s\nlat: -119.5, long: 35.2\n6029 at POINT (-119.5 35.2) took 9.5s\nlat: -119.5, long: 35.3\n6029 at POINT (-119.5 35.3) took 9.5s\nlat: -119.5, long: 35.4\n6029 at POINT (-119.5 35.4) took 9.1s\nlat: -119.5, long: 35.6\n6029 at POINT (-119.5 35.6) took 9.3s\nlat: -119.5, long: 35.7\n6029 at POINT (-119.5 35.7) took 9.3s\nlat: -119.4, long: 35.0\n6029 at POINT (-119.4 35) took 10.1s\nlat: -119.4, long: 35.1\n6029 at POINT (-119.4 35.1) took 9.7s\nlat: -119.4, long: 35.2\n6029 at POINT (-119.4 35.2) took 9.5s\nlat: -119.4, long: 35.3\n6029 at POINT (-119.4 35.3) took 9.6s\nlat: -119.4, long: 35.4\n6029 at POINT (-119.4 35.4) took 9.4s\nlat: -119.4, long: 35.5\n6029 at POINT (-119.4 35.5) took 9.3s\nlat: -119.4, long: 35.6\n6029 at POINT (-119.4 35.6) took 10.0s\nlat: -119.4, long: 35.7\n6029 at POINT (-119.4 35.7) took 9.3s\nlat: -119.3, long: 34.9\n6029 at POINT (-119.3 34.9) took 9.3s\nlat: -119.3, long: 35.0\n6029 at POINT (-119.3 35) took 9.4s\nlat: -119.3, long: 35.1\n6029 at POINT (-119.3 35.1) took 9.3s\nlat: -119.3, long: 35.2\n6029 at POINT (-119.3 35.2) took 9.4s\nlat: -119.3, long: 35.3\n6029 at POINT (-119.3 35.3) took 9.3s\nlat: -119.3, long: 35.4\n6029 at POINT (-119.3 35.4) took 9.3s\nlat: -119.3, long: 35.5\n6029 at POINT (-119.3 35.5) took 9.4s\nlat: -119.3, long: 35.6\n6029 at POINT (-119.3 35.6) took 9.6s\nlat: -119.3, long: 35.7\n6029 at POINT (-119.3 35.7) took 9.8s\nlat: -119.2, long: 34.9\n6029 at POINT (-119.2 34.9) took 9.1s\nlat: -119.2, long: 35.0\n6029 at POINT (-119.2 35) took 9.6s\nlat: -119.2, long: 35.1\n6029 at POINT (-119.2 35.1) took 10.0s\nlat: -119.2, long: 35.2\n6029 at POINT (-119.2 35.2) took 9.3s\nlat: -119.2, long: 35.3\n6029 at POINT (-119.2 35.3) took 9.4s\nlat: -119.2, long: 35.4\n6029 at POINT (-119.2 35.4) took 9.7s\nlat: -119.2, long: 35.5\n6029 at POINT (-119.2 35.5) took 9.3s\nlat: -119.2, long: 35.6\n6029 at POINT (-119.2 35.6) took 69.7s\nlat: -119.2, long: 35.7\n6029 at POINT (-119.2 35.7) took 9.3s\nlat: -119.1, long: 34.9\n6029 at POINT (-119.1 34.9) took 9.5s\nlat: -119.1, long: 35.0\n6029 at POINT (-119.1 35) took 9.7s\nlat: -119.1, long: 35.1\n6029 at POINT (-119.1 35.1) took 9.4s\nlat: -119.1, long: 35.2\n6029 at POINT (-119.1 35.2) took 9.3s\nlat: -119.1, long: 35.3\n6029 at POINT (-119.1 35.3) took 9.3s\nlat: -119.1, long: 35.4\n6029 at POINT (-119.1 35.4) took 9.3s\nlat: -119.1, long: 35.5\n6029 at POINT (-119.1 35.5) took 9.3s\nlat: -119.1, long: 35.6\n6029 at POINT (-119.1 35.6) took 9.6s\nlat: -119.1, long: 35.7\n6029 at POINT (-119.1 35.7) took 9.4s\nlat: -119.0, long: 34.9\n6029 at POINT (-119 34.9) took 9.5s\nlat: -119.0, long: 35.1\n6029 at POINT (-119 35.1) took 9.7s\nlat: -119.0, long: 35.2\n6029 at POINT (-119 35.2) took 9.3s\nlat: -119.0, long: 35.3\n6029 at POINT (-119 35.3) took 9.3s\nlat: -119.0, long: 35.4\n6029 at POINT (-119 35.4) took 9.2s\nlat: -119.0, long: 35.6\n6029 at POINT (-119 35.6) took 10.0s\nlat: -119.0, long: 35.7\n6029 at POINT (-119 35.7) took 9.3s\nlat: -118.9, long: 34.8\n6029 at POINT (-118.9 34.8) took 9.5s\nlat: -118.9, long: 34.9\n6029 at POINT (-118.9 34.9) took 9.5s\nlat: -118.9, long: 35.0\n6029 at POINT (-118.9 35) took 9.6s\nlat: -118.9, long: 35.1\n6029 at POINT (-118.9 35.1) took 9.9s\nlat: -118.9, long: 35.2\n6029 at POINT (-118.9 35.2) took 9.4s\nlat: -118.9, long: 35.3\n6029 at POINT (-118.9 35.3) took 9.4s\nlat: -118.9, long: 35.4\n6029 at POINT (-118.9 35.4) took 9.0s\nlat: -118.9, long: 35.5\n6029 at POINT (-118.9 35.5) took 9.1s\nlat: -118.9, long: 35.6\n6029 at POINT (-118.9 35.6) took 9.3s\nlat: -118.9, long: 35.7\n6029 at POINT (-118.9 35.7) took 9.5s\nlat: -118.8, long: 34.9\n6029 at POINT (-118.8 34.9) took 9.1s\nlat: -118.8, long: 35.0\n6029 at POINT (-118.8 35) took 8.8s\nlat: -118.8, long: 35.1\n6029 at POINT (-118.8 35.1) took 8.8s\nlat: -118.8, long: 35.2\n6029 at POINT (-118.8 35.2) took 8.9s\nlat: -118.8, long: 35.3\n6029 at POINT (-118.8 35.3) took 9.3s\nlat: -118.8, long: 35.4\n6029 at POINT (-118.8 35.4) took 9.4s\nlat: -118.8, long: 35.5\n6029 at POINT (-118.8 35.5) took 9.4s\nlat: -118.8, long: 35.6\n6029 at POINT (-118.8 35.6) took 9.1s\nlat: -118.8, long: 35.7\n6029 at POINT (-118.8 35.7) took 9.1s\nlat: -118.7, long: 34.9\n6029 at POINT (-118.7 34.9) took 9.2s\nlat: -118.7, long: 35.0\n6029 at POINT (-118.7 35) took 9.0s\nlat: -118.7, long: 35.1\n6029 at POINT (-118.7 35.1) took 9.3s\nlat: -118.7, long: 35.2\n6029 at POINT (-118.7 35.2) took 9.1s\nlat: -118.7, long: 35.3\n6029 at POINT (-118.7 35.3) took 9.0s\nlat: -118.7, long: 35.4\n6029 at POINT (-118.7 35.4) took 9.0s\nlat: -118.7, long: 35.5\n6029 at POINT (-118.7 35.5) took 9.3s\nlat: -118.7, long: 35.6\n6029 at POINT (-118.7 35.6) took 9.7s\nlat: -118.7, long: 35.7\n6029 at POINT (-118.7 35.7) took 9.1s\nlat: -118.6, long: 34.9\n6029 at POINT (-118.6 34.9) took 9.0s\nlat: -118.6, long: 35.0\n6029 at POINT (-118.6 35) took 9.7s\nlat: -118.6, long: 35.1\n6029 at POINT (-118.6 35.1) took 9.2s\nlat: -118.6, long: 35.2\n6029 at POINT (-118.6 35.2) took 9.2s\nlat: -118.6, long: 35.3\n6029 at POINT (-118.6 35.3) took 9.2s\nlat: -118.6, long: 35.4\n6029 at POINT (-118.6 35.4) took 9.0s\nlat: -118.6, long: 35.5\n6029 at POINT (-118.6 35.5) took 9.5s\nlat: -118.6, long: 35.6\n6029 at POINT (-118.6 35.6) took 9.2s\nlat: -118.6, long: 35.7\n6029 at POINT (-118.6 35.7) took 9.5s\nlat: -118.5, long: 34.9\n6029 at POINT (-118.5 34.9) took 9.1s\nlat: -118.5, long: 35.1\n6029 at POINT (-118.5 35.1) took 9.2s\nlat: -118.5, long: 35.2\n6029 at POINT (-118.5 35.2) took 9.8s\nlat: -118.5, long: 35.3\n6029 at POINT (-118.5 35.3) took 9.3s\nlat: -118.5, long: 35.4\n6029 at POINT (-118.5 35.4) took 9.2s\nlat: -118.5, long: 35.6\n6029 at POINT (-118.5 35.6) took 9.4s\nlat: -118.5, long: 35.7\n6029 at POINT (-118.5 35.7) took 9.5s\nlat: -118.4, long: 34.9\n6029 at POINT (-118.4 34.9) took 9.1s\nlat: -118.4, long: 35.0\n6029 at POINT (-118.4 35) took 9.2s\nlat: -118.4, long: 35.1\n6029 at POINT (-118.4 35.1) took 9.4s\nlat: -118.4, long: 35.2\n6029 at POINT (-118.4 35.2) took 10.0s\nlat: -118.4, long: 35.3\n6029 at POINT (-118.4 35.3) took 9.5s\nlat: -118.4, long: 35.4\n6029 at POINT (-118.4 35.4) took 9.2s\nlat: -118.4, long: 35.5\n6029 at POINT (-118.4 35.5) took 9.4s\nlat: -118.4, long: 35.6\n6029 at POINT (-118.4 35.6) took 9.3s\nlat: -118.4, long: 35.7\n6029 at POINT (-118.4 35.7) took 9.8s\nlat: -118.3, long: 34.9\n6029 at POINT (-118.3 34.9) took 9.4s\nlat: -118.3, long: 35.0\n6029 at POINT (-118.3 35) took 10.1s\nlat: -118.3, long: 35.1\n6029 at POINT (-118.3 35.1) took 9.3s\nlat: -118.3, long: 35.2\n6029 at POINT (-118.3 35.2) took 9.4s\nlat: -118.3, long: 35.3\n6029 at POINT (-118.3 35.3) took 9.4s\nlat: -118.3, long: 35.4\n6029 at POINT (-118.3 35.4) took 9.5s\nlat: -118.3, long: 35.5\n6029 at POINT (-118.3 35.5) took 9.3s\nlat: -118.3, long: 35.6\n6029 at POINT (-118.3 35.6) took 9.5s\nlat: -118.3, long: 35.7\n6029 at POINT (-118.3 35.7) took 9.5s\nlat: -118.2, long: 34.9\n6029 at POINT (-118.2 34.9) took 9.3s\nlat: -118.2, long: 35.0\n6029 at POINT (-118.2 35) took 9.3s\nlat: -118.2, long: 35.1\n6029 at POINT (-118.2 35.1) took 9.1s\nlat: -118.2, long: 35.2\n6029 at POINT (-118.2 35.2) took 9.3s\nlat: -118.2, long: 35.3\n6029 at POINT (-118.2 35.3) took 9.3s\nlat: -118.2, long: 35.4\n6029 at POINT (-118.2 35.4) took 9.3s\nlat: -118.2, long: 35.5\n6029 at POINT (-118.2 35.5) took 10.3s\nlat: -118.2, long: 35.6\n6029 at POINT (-118.2 35.6) took 9.3s\nlat: -118.2, long: 35.7\n6029 at POINT (-118.2 35.7) took 9.3s\nlat: -118.1, long: 34.9\n6029 at POINT (-118.1 34.9) took 9.4s\nlat: -118.1, long: 35.0\n6029 at POINT (-118.1 35) took 9.3s\nlat: -118.1, long: 35.1\n6029 at POINT (-118.1 35.1) took 9.1s\nlat: -118.1, long: 35.2\n6029 at POINT (-118.1 35.2) took 9.7s\nlat: -118.1, long: 35.3\n6029 at POINT (-118.1 35.3) took 9.1s\nlat: -118.1, long: 35.4\n6029 at POINT (-118.1 35.4) took 9.0s\nlat: -118.1, long: 35.5\n6029 at POINT (-118.1 35.5) took 9.1s\nlat: -118.1, long: 35.6\n6029 at POINT (-118.1 35.6) took 9.7s\nlat: -118.1, long: 35.7\n6029 at POINT (-118.1 35.7) took 9.1s\nlat: -118.0, long: 34.9\n6029 at POINT (-118 34.9) took 9.1s\nlat: -118.0, long: 35.1\n6029 at POINT (-118 35.1) took 9.2s\nlat: -118.0, long: 35.2\n6029 at POINT (-118 35.2) took 9.2s\nlat: -118.0, long: 35.3\n6029 at POINT (-118 35.3) took 9.2s\nlat: -118.0, long: 35.4\n6029 at POINT (-118 35.4) took 9.2s\nlat: -118.0, long: 35.6\n6029 at POINT (-118 35.6) took 9.0s\nlat: -118.0, long: 35.7\n6029 at POINT (-118 35.7) took 9.2s\nlat: -117.9, long: 34.9\n6029 at POINT (-117.9 34.9) took 9.4s\nlat: -117.9, long: 35.0\n6029 at POINT (-117.9 35) took 9.0s\nlat: -117.9, long: 35.1\n6029 at POINT (-117.9 35.1) took 9.2s\nlat: -117.9, long: 35.2\n6029 at POINT (-117.9 35.2) took 9.1s\nlat: -117.9, long: 35.3\n6029 at POINT (-117.9 35.3) took 9.2s\nlat: -117.9, long: 35.4\n6029 at POINT (-117.9 35.4) took 9.2s\nlat: -117.9, long: 35.5\n6029 at POINT (-117.9 35.5) took 9.2s\nlat: -117.9, long: 35.6\n6029 at POINT (-117.9 35.6) took 9.3s\nlat: -117.9, long: 35.7\n6029 at POINT (-117.9 35.7) took 9.2s\nlat: -117.8, long: 34.9\n6029 at POINT (-117.8 34.9) took 9.2s\nlat: -117.8, long: 35.0\n6029 at POINT (-117.8 35) took 9.1s\nlat: -117.8, long: 35.1\n6029 at POINT (-117.8 35.1) took 8.8s\nlat: -117.8, long: 35.2\n6029 at POINT (-117.8 35.2) took 9.7s\nlat: -117.8, long: 35.3\n6029 at POINT (-117.8 35.3) took 9.3s\nlat: -117.8, long: 35.4\n6029 at POINT (-117.8 35.4) took 9.3s\nlat: -117.8, long: 35.5\n6029 at POINT (-117.8 35.5) took 9.2s\nlat: -117.8, long: 35.6\n6029 at POINT (-117.8 35.6) took 9.0s\nlat: -117.8, long: 35.7\n6029 at POINT (-117.8 35.7) took 9.1s\nlat: -117.7, long: 34.9\n6029 at POINT (-117.7 34.9) took 9.1s\nlat: -117.7, long: 35.0\n6029 at POINT (-117.7 35) took 9.3s\nlat: -117.7, long: 35.1\n6029 at POINT (-117.7 35.1) took 9.3s\nlat: -117.7, long: 35.2\n6029 at POINT (-117.7 35.2) took 9.1s\nlat: -117.7, long: 35.3\n6029 at POINT (-117.7 35.3) took 9.0s\nlat: -117.7, long: 35.4\n6029 at POINT (-117.7 35.4) took 8.9s\nlat: -117.7, long: 35.5\n6029 at POINT (-117.7 35.5) took 8.9s\nlat: -117.7, long: 35.6\n6029 at POINT (-117.7 35.6) took 9.3s\nlat: -117.7, long: 35.7\n6029 at POINT (-117.7 35.7) took 9.2s\n6031 southwest to northeast: (-120.3, 35.8) to (-119.5, 36.5)\nlat: -120.2, long: 35.8\n6031 at POINT (-120.2 35.8) took 9.5s\nlat: -120.2, long: 35.9\n6031 at POINT (-120.2 35.9) took 8.9s\nlat: -120.1, long: 35.8\n6031 at POINT (-120.1 35.8) took 9.2s\nlat: -120.1, long: 35.9\n6031 at POINT (-120.1 35.9) took 9.1s\nlat: -120.1, long: 36.0\n6031 at POINT (-120.1 36) took 9.1s\nlat: -120.0, long: 35.8\n6031 at POINT (-120 35.8) took 9.0s\nlat: -120.0, long: 35.9\n6031 at POINT (-120 35.9) took 9.2s\nlat: -120.0, long: 36.1\n6031 at POINT (-120 36.1) took 9.3s\nlat: -119.9, long: 35.8\n6031 at POINT (-119.9 35.8) took 9.5s\nlat: -119.9, long: 35.9\n6031 at POINT (-119.9 35.9) took 9.2s\nlat: -119.9, long: 36.0\n6031 at POINT (-119.9 36) took 9.3s\nlat: -119.9, long: 36.1\n6031 at POINT (-119.9 36.1) took 9.4s\nlat: -119.9, long: 36.2\n6031 at POINT (-119.9 36.2) took 10.4s\nlat: -119.9, long: 36.3\n6031 at POINT (-119.9 36.3) took 9.2s\nlat: -119.9, long: 36.4\n6031 at POINT (-119.9 36.4) took 9.3s\nlat: -119.8, long: 35.8\n6031 at POINT (-119.8 35.8) took 9.2s\nlat: -119.8, long: 35.9\n6031 at POINT (-119.8 35.9) took 9.2s\nlat: -119.8, long: 36.0\n6031 at POINT (-119.8 36) took 9.2s\nlat: -119.8, long: 36.1\n6031 at POINT (-119.8 36.1) took 9.3s\nlat: -119.8, long: 36.2\n6031 at POINT (-119.8 36.2) took 10.1s\nlat: -119.8, long: 36.3\n6031 at POINT (-119.8 36.3) took 9.1s\nlat: -119.8, long: 36.4\n6031 at POINT (-119.8 36.4) took 9.3s\nlat: -119.7, long: 35.8\n6031 at POINT (-119.7 35.8) took 9.3s\nlat: -119.7, long: 35.9\n6031 at POINT (-119.7 35.9) took 10.9s\nlat: -119.7, long: 36.0\n6031 at POINT (-119.7 36) took 9.3s\nlat: -119.7, long: 36.1\n6031 at POINT (-119.7 36.1) took 9.3s\nlat: -119.7, long: 36.2\n6031 at POINT (-119.7 36.2) took 9.1s\nlat: -119.7, long: 36.3\n6031 at POINT (-119.7 36.3) took 9.2s\nlat: -119.7, long: 36.4\n6031 at POINT (-119.7 36.4) took 9.2s\nlat: -119.6, long: 35.8\n6031 at POINT (-119.6 35.8) took 9.2s\nlat: -119.6, long: 35.9\n6031 at POINT (-119.6 35.9) took 10.6s\nlat: -119.6, long: 36.0\n6031 at POINT (-119.6 36) took 9.3s\nlat: -119.6, long: 36.1\n6031 at POINT (-119.6 36.1) took 9.4s\nlat: -119.6, long: 36.2\n6031 at POINT (-119.6 36.2) took 9.3s\nlat: -119.6, long: 36.3\n6031 at POINT (-119.6 36.3) took 9.3s\nlat: -119.6, long: 36.4\n6031 at POINT (-119.6 36.4) took 9.3s\nlat: -119.5, long: 36.3\n6031 at POINT (-119.5 36.3) took 9.3s\nlat: -119.5, long: 36.4\n6031 at POINT (-119.5 36.4) took 11.5s\n6033 southwest to northeast: (-123.1, 38.7) to (-122.3, 39.6)\nlat: -123.0, long: 39.1\n6033 at POINT (-123 39.1) took 10.3s\nlat: -123.0, long: 39.2\n6033 at POINT (-123 39.2) took 9.1s\nlat: -123.0, long: 39.3\n6033 at POINT (-123 39.3) took 9.0s\nlat: -123.0, long: 39.4\n6033 at POINT (-123 39.4) took 9.4s\nlat: -122.9, long: 38.9\n6033 at POINT (-122.9 38.9) took 9.1s\nlat: -122.9, long: 39.0\n6033 at POINT (-122.9 39) took 9.0s\nlat: -122.9, long: 39.1\n6033 at POINT (-122.9 39.1) took 9.0s\nlat: -122.9, long: 39.2\n6033 at POINT (-122.9 39.2) took 9.2s\nlat: -122.9, long: 39.3\n6033 at POINT (-122.9 39.3) took 9.0s\nlat: -122.9, long: 39.4\n6033 at POINT (-122.9 39.4) took 9.1s\nlat: -122.9, long: 39.5\n6033 at POINT (-122.9 39.5) took 8.9s\nlat: -122.8, long: 38.9\n6033 at POINT (-122.8 38.9) took 9.0s\nlat: -122.8, long: 39.0\n6033 at POINT (-122.8 39) took 9.0s\nlat: -122.8, long: 39.1\n6033 at POINT (-122.8 39.1) took 9.3s\nlat: -122.8, long: 39.2\n6033 at POINT (-122.8 39.2) took 9.1s\nlat: -122.8, long: 39.3\n6033 at POINT (-122.8 39.3) took 9.1s\nlat: -122.8, long: 39.4\n6033 at POINT (-122.8 39.4) took 9.2s\nlat: -122.8, long: 39.5\n6033 at POINT (-122.8 39.5) took 9.0s\nlat: -122.7, long: 38.8\n6033 at POINT (-122.7 38.8) took 9.2s\nlat: -122.7, long: 38.9\n6033 at POINT (-122.7 38.9) took 9.1s\nlat: -122.7, long: 39.0\n6033 at POINT (-122.7 39) took 9.3s\nlat: -122.7, long: 39.1\n6033 at POINT (-122.7 39.1) took 9.2s\nlat: -122.7, long: 39.2\n6033 at POINT (-122.7 39.2) took 9.2s\nlat: -122.6, long: 38.7\n6033 at POINT (-122.6 38.7) took 9.2s\nlat: -122.6, long: 38.8\n6033 at POINT (-122.6 38.8) took 9.3s\nlat: -122.6, long: 38.9\n6033 at POINT (-122.6 38.9) took 9.1s\nlat: -122.6, long: 39.0\n6033 at POINT (-122.6 39) took 9.1s\nlat: -122.6, long: 39.1\n6033 at POINT (-122.6 39.1) took 9.1s\nlat: -122.6, long: 39.2\n6033 at POINT (-122.6 39.2) took 9.1s\nlat: -122.5, long: 38.7\n6033 at POINT (-122.5 38.7) took 9.1s\nlat: -122.5, long: 38.8\n6033 at POINT (-122.5 38.8) took 9.0s\nlat: -122.5, long: 38.9\n6033 at POINT (-122.5 38.9) took 9.1s\nlat: -122.5, long: 39.1\n6033 at POINT (-122.5 39.1) took 9.2s\n6035 southwest to northeast: (-121.3, 39.7) to (-120.0, 41.2)\nlat: -121.3, long: 40.5\n6035 at POINT (-121.3 40.5) took 10.1s\nlat: -121.3, long: 40.6\n6035 at POINT (-121.3 40.6) took 69.1s\nlat: -121.3, long: 40.7\n6035 at POINT (-121.3 40.7) took 9.2s\nlat: -121.3, long: 40.8\n6035 at POINT (-121.3 40.8) took 9.2s\nlat: -121.3, long: 40.9\n6035 at POINT (-121.3 40.9) took 9.1s\nlat: -121.3, long: 41.0\n6035 at POINT (-121.3 41) took 8.9s\nlat: -121.3, long: 41.1\n6035 at POINT (-121.3 41.1) took 9.0s\nlat: -121.2, long: 40.5\n6035 at POINT (-121.2 40.5) took 9.1s\nlat: -121.2, long: 40.6\n6035 at POINT (-121.2 40.6) took 9.3s\nlat: -121.2, long: 40.7\n6035 at POINT (-121.2 40.7) took 9.1s\nlat: -121.2, long: 40.8\n6035 at POINT (-121.2 40.8) took 9.1s\nlat: -121.2, long: 40.9\n6035 at POINT (-121.2 40.9) took 9.3s\nlat: -121.2, long: 41.0\n6035 at POINT (-121.2 41) took 9.1s\nlat: -121.2, long: 41.1\n6035 at POINT (-121.2 41.1) took 9.6s\nlat: -121.1, long: 40.5\n6035 at POINT (-121.1 40.5) took 8.9s\nlat: -121.1, long: 40.6\n6035 at POINT (-121.1 40.6) took 9.0s\nlat: -121.1, long: 40.7\n6035 at POINT (-121.1 40.7) took 8.9s\nlat: -121.1, long: 40.8\n6035 at POINT (-121.1 40.8) took 9.1s\nlat: -121.1, long: 40.9\n6035 at POINT (-121.1 40.9) took 9.4s\nlat: -121.1, long: 41.0\n6035 at POINT (-121.1 41) took 8.9s\nlat: -121.1, long: 41.1\n6035 at POINT (-121.1 41.1) took 9.3s\nlat: -121.0, long: 40.3\n6035 at POINT (-121 40.3) took 9.6s\nlat: -121.0, long: 40.4\n6035 at POINT (-121 40.4) took 9.0s\nlat: -121.0, long: 40.6\n6035 at POINT (-121 40.6) took 8.9s\nlat: -121.0, long: 40.7\n6035 at POINT (-121 40.7) took 9.2s\nlat: -121.0, long: 40.8\n6035 at POINT (-121 40.8) took 9.0s\nlat: -121.0, long: 40.9\n6035 at POINT (-121 40.9) took 9.3s\nlat: -121.0, long: 41.1\n6035 at POINT (-121 41.1) took 8.9s\nlat: -120.9, long: 40.2\n6035 at POINT (-120.9 40.2) took 9.5s\nlat: -120.9, long: 40.3\n6035 at POINT (-120.9 40.3) took 9.1s\nlat: -120.9, long: 40.4\n6035 at POINT (-120.9 40.4) took 9.0s\nlat: -120.9, long: 40.5\n6035 at POINT (-120.9 40.5) took 9.2s\nlat: -120.9, long: 40.6\n6035 at POINT (-120.9 40.6) took 9.2s\nlat: -120.9, long: 40.7\n6035 at POINT (-120.9 40.7) took 9.5s\nlat: -120.9, long: 40.8\n6035 at POINT (-120.9 40.8) took 9.3s\nlat: -120.9, long: 40.9\n6035 at POINT (-120.9 40.9) took 9.1s\nlat: -120.9, long: 41.0\n6035 at POINT (-120.9 41) took 9.1s\nlat: -120.9, long: 41.1\n6035 at POINT (-120.9 41.1) took 9.1s\nlat: -120.8, long: 40.3\n6035 at POINT (-120.8 40.3) took 9.3s\nlat: -120.8, long: 40.4\n6035 at POINT (-120.8 40.4) took 9.4s\nlat: -120.8, long: 40.5\n6035 at POINT (-120.8 40.5) took 9.4s\nlat: -120.8, long: 40.6\n6035 at POINT (-120.8 40.6) took 9.2s\nlat: -120.8, long: 40.7\n6035 at POINT (-120.8 40.7) took 9.3s\nlat: -120.8, long: 40.8\n6035 at POINT (-120.8 40.8) took 9.2s\nlat: -120.8, long: 40.9\n6035 at POINT (-120.8 40.9) took 10.9s\nlat: -120.8, long: 41.0\n6035 at POINT (-120.8 41) took 9.2s\nlat: -120.8, long: 41.1\n6035 at POINT (-120.8 41.1) took 9.4s\nlat: -120.7, long: 40.4\n6035 at POINT (-120.7 40.4) took 9.8s\nlat: -120.7, long: 40.5\n6035 at POINT (-120.7 40.5) took 9.2s\nlat: -120.7, long: 40.6\n6035 at POINT (-120.7 40.6) took 9.2s\nlat: -120.7, long: 40.7\n6035 at POINT (-120.7 40.7) took 9.9s\nlat: -120.7, long: 40.8\n6035 at POINT (-120.7 40.8) took 9.0s\nlat: -120.7, long: 40.9\n6035 at POINT (-120.7 40.9) took 9.3s\nlat: -120.7, long: 41.0\n6035 at POINT (-120.7 41) took 9.2s\nlat: -120.7, long: 41.1\n6035 at POINT (-120.7 41.1) took 9.2s\nlat: -120.6, long: 40.3\n6035 at POINT (-120.6 40.3) took 9.2s\nlat: -120.6, long: 40.4\n6035 at POINT (-120.6 40.4) took 9.2s\nlat: -120.6, long: 40.5\n6035 at POINT (-120.6 40.5) took 9.1s\nlat: -120.6, long: 40.6\n6035 at POINT (-120.6 40.6) took 9.5s\nlat: -120.6, long: 40.7\n6035 at POINT (-120.6 40.7) took 9.6s\nlat: -120.6, long: 40.8\n6035 at POINT (-120.6 40.8) took 9.4s\nlat: -120.6, long: 40.9\n6035 at POINT (-120.6 40.9) took 9.3s\nlat: -120.6, long: 41.0\n6035 at POINT (-120.6 41) took 9.1s\nlat: -120.6, long: 41.1\n6035 at POINT (-120.6 41.1) took 9.2s\nlat: -120.5, long: 40.3\n6035 at POINT (-120.5 40.3) took 9.3s\nlat: -120.5, long: 40.4\n6035 at POINT (-120.5 40.4) took 9.3s\nlat: -120.5, long: 40.6\n6035 at POINT (-120.5 40.6) took 9.3s\nlat: -120.5, long: 40.7\n6035 at POINT (-120.5 40.7) took 9.3s\nlat: -120.5, long: 40.8\n6035 at POINT (-120.5 40.8) took 9.4s\nlat: -120.5, long: 40.9\n6035 at POINT (-120.5 40.9) took 9.2s\nlat: -120.5, long: 41.1\n6035 at POINT (-120.5 41.1) took 9.6s\nlat: -120.4, long: 40.2\n6035 at POINT (-120.4 40.2) took 9.3s\nlat: -120.4, long: 40.3\n6035 at POINT (-120.4 40.3) took 9.4s\nlat: -120.4, long: 40.4\n6035 at POINT (-120.4 40.4) took 9.2s\nlat: -120.4, long: 40.5\n6035 at POINT (-120.4 40.5) took 9.5s\nlat: -120.4, long: 40.6\n6035 at POINT (-120.4 40.6) took 9.4s\nlat: -120.4, long: 40.7\n6035 at POINT (-120.4 40.7) took 9.3s\nlat: -120.4, long: 40.8\n6035 at POINT (-120.4 40.8) took 9.5s\nlat: -120.4, long: 40.9\n6035 at POINT (-120.4 40.9) took 9.2s\nlat: -120.4, long: 41.0\n6035 at POINT (-120.4 41) took 9.2s\nlat: -120.4, long: 41.1\n6035 at POINT (-120.4 41.1) took 9.3s\nlat: -120.3, long: 40.2\n6035 at POINT (-120.3 40.2) took 9.4s\nlat: -120.3, long: 40.3\n6035 at POINT (-120.3 40.3) took 9.4s\nlat: -120.3, long: 40.4\n6035 at POINT (-120.3 40.4) took 9.4s\nlat: -120.3, long: 40.5\n6035 at POINT (-120.3 40.5) took 9.2s\nlat: -120.3, long: 40.6\n6035 at POINT (-120.3 40.6) took 9.2s\nlat: -120.3, long: 40.7\n6035 at POINT (-120.3 40.7) took 9.1s\nlat: -120.3, long: 40.8\n6035 at POINT (-120.3 40.8) took 9.1s\nlat: -120.3, long: 40.9\n6035 at POINT (-120.3 40.9) took 9.0s\nlat: -120.3, long: 41.0\n6035 at POINT (-120.3 41) took 9.4s\nlat: -120.3, long: 41.1\n6035 at POINT (-120.3 41.1) took 9.3s\nlat: -120.2, long: 40.1\n6035 at POINT (-120.2 40.1) took 9.2s\nlat: -120.2, long: 40.2\n6035 at POINT (-120.2 40.2) took 9.1s\nlat: -120.2, long: 40.3\n6035 at POINT (-120.2 40.3) took 9.0s\nlat: -120.2, long: 40.4\n6035 at POINT (-120.2 40.4) took 9.6s\nlat: -120.2, long: 40.5\n6035 at POINT (-120.2 40.5) took 9.1s\nlat: -120.2, long: 40.6\n6035 at POINT (-120.2 40.6) took 9.1s\nlat: -120.2, long: 40.7\n6035 at POINT (-120.2 40.7) took 9.5s\nlat: -120.2, long: 40.8\n6035 at POINT (-120.2 40.8) took 9.5s\nlat: -120.2, long: 40.9\n6035 at POINT (-120.2 40.9) took 9.5s\nlat: -120.2, long: 41.0\n6035 at POINT (-120.2 41) took 9.3s\nlat: -120.2, long: 41.1\n6035 at POINT (-120.2 41.1) took 9.7s\nlat: -120.1, long: 39.8\n6035 at POINT (-120.1 39.8) took 9.2s\nlat: -120.1, long: 40.0\n6035 at POINT (-120.1 40) took 69.1s\nlat: -120.1, long: 40.1\n6035 at POINT (-120.1 40.1) took 9.2s\nlat: -120.1, long: 40.2\n6035 at POINT (-120.1 40.2) took 9.1s\nlat: -120.1, long: 40.3\n6035 at POINT (-120.1 40.3) took 9.1s\nlat: -120.1, long: 40.4\n6035 at POINT (-120.1 40.4) took 9.4s\nlat: -120.1, long: 40.5\n6035 at POINT (-120.1 40.5) took 9.2s\nlat: -120.1, long: 40.6\n6035 at POINT (-120.1 40.6) took 9.2s\nlat: -120.1, long: 40.7\n6035 at POINT (-120.1 40.7) took 9.1s\nlat: -120.1, long: 40.8\n6035 at POINT (-120.1 40.8) took 9.5s\nlat: -120.1, long: 40.9\n6035 at POINT (-120.1 40.9) took 9.2s\nlat: -120.1, long: 41.0\n6035 at POINT (-120.1 41) took 9.3s\nlat: -120.1, long: 41.1\n6035 at POINT (-120.1 41.1) took 9.1s\nlat: -120.0, long: 39.9\n6035 at POINT (-120 39.9) took 9.3s\nlat: -120.0, long: 40.1\n6035 at POINT (-120 40.1) took 9.0s\nlat: -120.0, long: 40.2\n6035 at POINT (-120 40.2) took 9.0s\nlat: -120.0, long: 40.3\n6035 at POINT (-120 40.3) took 9.1s\nlat: -120.0, long: 40.4\n6035 at POINT (-120 40.4) took 9.2s\nlat: -120.0, long: 40.6\n6035 at POINT (-120 40.6) took 9.0s\nlat: -120.0, long: 40.7\n6035 at POINT (-120 40.7) took 9.0s\nlat: -120.0, long: 40.8\n6035 at POINT (-120 40.8) took 9.0s\nlat: -120.0, long: 40.9\n6035 at POINT (-120 40.9) took 9.0s\nlat: -120.0, long: 41.1\n6035 at POINT (-120 41.1) took 9.7s\n6037 southwest to northeast: (-118.9, 33.7) to (-117.6, 34.8)\nlat: -118.8, long: 34.1\n6037 at POINT (-118.8 34.1) took 9.9s\nlat: -118.8, long: 34.7\n6037 at POINT (-118.8 34.7) took 9.1s\nlat: -118.8, long: 34.8\n6037 at POINT (-118.8 34.8) took 9.0s\nlat: -118.7, long: 34.1\n6037 at POINT (-118.7 34.1) took 9.2s\nlat: -118.7, long: 34.5\n6037 at POINT (-118.7 34.5) took 9.0s\nlat: -118.7, long: 34.6\n6037 at POINT (-118.7 34.6) took 9.2s\nlat: -118.7, long: 34.7\n6037 at POINT (-118.7 34.7) took 9.5s\nlat: -118.7, long: 34.8\n6037 at POINT (-118.7 34.8) took 9.1s\nlat: -118.6, long: 34.1\n6037 at POINT (-118.6 34.1) took 9.3s\nlat: -118.6, long: 34.2\n6037 at POINT (-118.6 34.2) took 9.5s\nlat: -118.6, long: 34.3\n6037 at POINT (-118.6 34.3) took 9.2s\nlat: -118.6, long: 34.4\n6037 at POINT (-118.6 34.4) took 9.2s\nlat: -118.6, long: 34.5\n6037 at POINT (-118.6 34.5) took 9.2s\nlat: -118.6, long: 34.6\n6037 at POINT (-118.6 34.6) took 9.1s\nlat: -118.6, long: 34.7\n6037 at POINT (-118.6 34.7) took 9.0s\nlat: -118.6, long: 34.8\n6037 at POINT (-118.6 34.8) took 9.3s\nlat: -118.5, long: 34.1\n6037 at POINT (-118.5 34.1) took 9.5s\nlat: -118.5, long: 34.2\n6037 at POINT (-118.5 34.2) took 9.2s\nlat: -118.5, long: 34.3\n6037 at POINT (-118.5 34.3) took 9.2s\nlat: -118.5, long: 34.4\n6037 at POINT (-118.5 34.4) took 69.5s\nlat: -118.5, long: 34.6\n6037 at POINT (-118.5 34.6) took 9.3s\nlat: -118.5, long: 34.7\n6037 at POINT (-118.5 34.7) took 9.5s\nlat: -118.5, long: 34.8\n6037 at POINT (-118.5 34.8) took 9.3s\nlat: -118.4, long: 33.8\n6037 at POINT (-118.4 33.8) took 9.3s\nlat: -118.4, long: 33.9\n6037 at POINT (-118.4 33.9) took 9.2s\nlat: -118.4, long: 34.0\n6037 at POINT (-118.4 34) took 9.1s\nlat: -118.4, long: 34.1\n6037 at POINT (-118.4 34.1) took 9.1s\nlat: -118.4, long: 34.2\n6037 at POINT (-118.4 34.2) took 9.3s\nlat: -118.4, long: 34.3\n6037 at POINT (-118.4 34.3) took 9.5s\nlat: -118.4, long: 34.4\n6037 at POINT (-118.4 34.4) took 9.4s\nlat: -118.4, long: 34.5\n6037 at POINT (-118.4 34.5) took 9.2s\nlat: -118.4, long: 34.6\n6037 at POINT (-118.4 34.6) took 9.1s\nlat: -118.4, long: 34.7\n6037 at POINT (-118.4 34.7) took 9.4s\nlat: -118.4, long: 34.8\n6037 at POINT (-118.4 34.8) took 9.1s\nlat: -118.3, long: 33.8\n6037 at POINT (-118.3 33.8) took 9.1s\nlat: -118.3, long: 33.9\n6037 at POINT (-118.3 33.9) took 9.3s\nlat: -118.3, long: 34.0\n6037 at POINT (-118.3 34) took 9.1s\nlat: -118.3, long: 34.1\n6037 at POINT (-118.3 34.1) took 9.1s\nlat: -118.3, long: 34.2\n6037 at POINT (-118.3 34.2) took 9.3s\nlat: -118.3, long: 34.3\n6037 at POINT (-118.3 34.3) took 9.1s\nlat: -118.3, long: 34.4\n6037 at POINT (-118.3 34.4) took 9.1s\nlat: -118.3, long: 34.5\n6037 at POINT (-118.3 34.5) took 9.1s\nlat: -118.3, long: 34.6\n6037 at POINT (-118.3 34.6) took 8.9s\nlat: -118.3, long: 34.7\n6037 at POINT (-118.3 34.7) took 9.0s\nlat: -118.3, long: 34.8\n6037 at POINT (-118.3 34.8) took 9.2s\nlat: -118.2, long: 33.8\n6037 at POINT (-118.2 33.8) took 9.2s\nlat: -118.2, long: 33.9\n6037 at POINT (-118.2 33.9) took 9.0s\nlat: -118.2, long: 34.0\n6037 at POINT (-118.2 34) took 9.0s\nlat: -118.2, long: 34.1\n6037 at POINT (-118.2 34.1) took 9.1s\nlat: -118.2, long: 34.2\n6037 at POINT (-118.2 34.2) took 9.2s\nlat: -118.2, long: 34.3\n6037 at POINT (-118.2 34.3) took 9.2s\nlat: -118.2, long: 34.4\n6037 at POINT (-118.2 34.4) took 9.0s\nlat: -118.2, long: 34.5\n6037 at POINT (-118.2 34.5) took 11.4s\nlat: -118.2, long: 34.6\n6037 at POINT (-118.2 34.6) took 9.4s\nlat: -118.2, long: 34.7\n6037 at POINT (-118.2 34.7) took 9.2s\nlat: -118.2, long: 34.8\n6037 at POINT (-118.2 34.8) took 9.1s\nlat: -118.1, long: 33.8\n6037 at POINT (-118.1 33.8) took 9.2s\nlat: -118.1, long: 33.9\n6037 at POINT (-118.1 33.9) took 9.1s\nlat: -118.1, long: 34.0\n6037 at POINT (-118.1 34) took 9.3s\nlat: -118.1, long: 34.1\n6037 at POINT (-118.1 34.1) took 9.3s\nlat: -118.1, long: 34.2\n6037 at POINT (-118.1 34.2) took 9.2s\nlat: -118.1, long: 34.3\n6037 at POINT (-118.1 34.3) took 9.4s\nlat: -118.1, long: 34.4\n6037 at POINT (-118.1 34.4) took 9.0s\nlat: -118.1, long: 34.5\n6037 at POINT (-118.1 34.5) took 69.6s\nlat: -118.1, long: 34.6\n6037 at POINT (-118.1 34.6) took 9.1s\nlat: -118.1, long: 34.7\n6037 at POINT (-118.1 34.7) took 9.1s\nlat: -118.1, long: 34.8\n6037 at POINT (-118.1 34.8) took 9.2s\nlat: -118.0, long: 33.9\n6037 at POINT (-118 33.9) took 9.1s\nlat: -118.0, long: 34.1\n6037 at POINT (-118 34.1) took 9.2s\nlat: -118.0, long: 34.2\n6037 at POINT (-118 34.2) took 9.3s\nlat: -118.0, long: 34.3\n6037 at POINT (-118 34.3) took 9.0s\nlat: -118.0, long: 34.4\n6037 at POINT (-118 34.4) took 9.0s\nlat: -118.0, long: 34.6\n6037 at POINT (-118 34.6) took 9.0s\nlat: -118.0, long: 34.7\n6037 at POINT (-118 34.7) took 9.2s\nlat: -118.0, long: 34.8\n6037 at POINT (-118 34.8) took 9.2s\nlat: -117.9, long: 34.0\n6037 at POINT (-117.9 34) took 9.3s\nlat: -117.9, long: 34.1\n6037 at POINT (-117.9 34.1) took 9.1s\nlat: -117.9, long: 34.2\n6037 at POINT (-117.9 34.2) took 9.1s\nlat: -117.9, long: 34.3\n6037 at POINT (-117.9 34.3) took 9.8s\nlat: -117.9, long: 34.4\n6037 at POINT (-117.9 34.4) took 9.1s\nlat: -117.9, long: 34.5\n6037 at POINT (-117.9 34.5) took 9.0s\nlat: -117.9, long: 34.6\n6037 at POINT (-117.9 34.6) took 9.2s\nlat: -117.9, long: 34.7\n6037 at POINT (-117.9 34.7) took 9.2s\nlat: -117.9, long: 34.8\n6037 at POINT (-117.9 34.8) took 9.1s\nlat: -117.8, long: 34.0\n6037 at POINT (-117.8 34) took 9.2s\nlat: -117.8, long: 34.1\n6037 at POINT (-117.8 34.1) took 9.1s\nlat: -117.8, long: 34.2\n6037 at POINT (-117.8 34.2) took 9.2s\nlat: -117.8, long: 34.3\n6037 at POINT (-117.8 34.3) took 9.2s\nlat: -117.8, long: 34.4\n6037 at POINT (-117.8 34.4) took 9.2s\nlat: -117.8, long: 34.5\n6037 at POINT (-117.8 34.5) took 9.2s\nlat: -117.8, long: 34.6\n6037 at POINT (-117.8 34.6) took 9.2s\nlat: -117.8, long: 34.7\n6037 at POINT (-117.8 34.7) took 9.5s\nlat: -117.8, long: 34.8\n6037 at POINT (-117.8 34.8) took 9.1s\nlat: -117.7, long: 34.2\n6037 at POINT (-117.7 34.2) took 9.2s\nlat: -117.7, long: 34.3\n6037 at POINT (-117.7 34.3) took 9.3s\nlat: -117.7, long: 34.4\n6037 at POINT (-117.7 34.4) took 9.2s\nlat: -117.7, long: 34.5\n6037 at POINT (-117.7 34.5) took 9.3s\nlat: -117.7, long: 34.6\n6037 at POINT (-117.7 34.6) took 9.3s\nlat: -117.7, long: 34.7\n6037 at POINT (-117.7 34.7) took 9.0s\nlat: -117.7, long: 34.8\n6037 at POINT (-117.7 34.8) took 9.2s\n6039 southwest to northeast: (-120.5, 36.8) to (-119.0, 37.8)\nlat: -120.4, long: 36.9\n6039 at POINT (-120.4 36.9) took 9.6s\nlat: -120.4, long: 37.0\n6039 at POINT (-120.4 37) took 9.2s\nlat: -120.4, long: 37.1\n6039 at POINT (-120.4 37.1) took 9.2s\nlat: -120.3, long: 36.8\n6039 at POINT (-120.3 36.8) took 9.2s\nlat: -120.3, long: 36.9\n6039 at POINT (-120.3 36.9) took 9.5s\nlat: -120.3, long: 37.0\n6039 at POINT (-120.3 37) took 9.1s\nlat: -120.3, long: 37.1\n6039 at POINT (-120.3 37.1) took 9.1s\nlat: -120.2, long: 36.8\n6039 at POINT (-120.2 36.8) took 9.2s\nlat: -120.2, long: 36.9\n6039 at POINT (-120.2 36.9) took 9.1s\nlat: -120.2, long: 37.0\n6039 at POINT (-120.2 37) took 9.0s\nlat: -120.2, long: 37.1\n6039 at POINT (-120.2 37.1) took 9.3s\nlat: -120.1, long: 36.9\n6039 at POINT (-120.1 36.9) took 9.7s\nlat: -120.1, long: 37.0\n6039 at POINT (-120.1 37) took 9.2s\nlat: -120.1, long: 37.1\n6039 at POINT (-120.1 37.1) took 9.2s\nlat: -120.0, long: 36.9\n6039 at POINT (-120 36.9) took 9.1s\nlat: -120.0, long: 37.1\n6039 at POINT (-120 37.1) took 9.0s\nlat: -120.0, long: 37.2\n6039 at POINT (-120 37.2) took 9.3s\nlat: -119.9, long: 36.9\n6039 at POINT (-119.9 36.9) took 9.3s\nlat: -119.9, long: 37.0\n6039 at POINT (-119.9 37) took 11.5s\nlat: -119.9, long: 37.1\n6039 at POINT (-119.9 37.1) took 9.1s\nlat: -119.9, long: 37.2\n6039 at POINT (-119.9 37.2) took 9.1s\nlat: -119.9, long: 37.3\n6039 at POINT (-119.9 37.3) took 9.1s\nlat: -119.8, long: 36.9\n6039 at POINT (-119.8 36.9) took 9.3s\nlat: -119.8, long: 37.0\n6039 at POINT (-119.8 37) took 9.0s\nlat: -119.8, long: 37.1\n6039 at POINT (-119.8 37.1) took 9.1s\nlat: -119.8, long: 37.2\n6039 at POINT (-119.8 37.2) took 9.4s\nlat: -119.8, long: 37.3\n6039 at POINT (-119.8 37.3) took 9.2s\nlat: -119.7, long: 37.1\n6039 at POINT (-119.7 37.1) took 9.2s\nlat: -119.7, long: 37.2\n6039 at POINT (-119.7 37.2) took 9.2s\nlat: -119.7, long: 37.3\n6039 at POINT (-119.7 37.3) took 9.0s\nlat: -119.7, long: 37.4\n6039 at POINT (-119.7 37.4) took 9.2s\nlat: -119.6, long: 37.1\n6039 at POINT (-119.6 37.1) took 9.1s\nlat: -119.6, long: 37.2\n6039 at POINT (-119.6 37.2) took 9.2s\nlat: -119.6, long: 37.3\n6039 at POINT (-119.6 37.3) took 9.2s\nlat: -119.6, long: 37.4\n6039 at POINT (-119.6 37.4) took 9.2s\nlat: -119.5, long: 37.2\n6039 at POINT (-119.5 37.2) took 9.3s\nlat: -119.5, long: 37.3\n6039 at POINT (-119.5 37.3) took 9.3s\nlat: -119.5, long: 37.4\n6039 at POINT (-119.5 37.4) took 9.3s\nlat: -119.5, long: 37.6\n6039 at POINT (-119.5 37.6) took 9.4s\nlat: -119.4, long: 37.2\n6039 at POINT (-119.4 37.2) took 9.3s\nlat: -119.4, long: 37.3\n6039 at POINT (-119.4 37.3) took 9.3s\nlat: -119.4, long: 37.4\n6039 at POINT (-119.4 37.4) took 10.7s\nlat: -119.4, long: 37.5\n6039 at POINT (-119.4 37.5) took 9.8s\nlat: -119.4, long: 37.6\n6039 at POINT (-119.4 37.6) took 9.4s\nlat: -119.4, long: 37.7\n6039 at POINT (-119.4 37.7) took 9.3s\nlat: -119.3, long: 37.4\n6039 at POINT (-119.3 37.4) took 9.6s\nlat: -119.3, long: 37.5\n6039 at POINT (-119.3 37.5) took 9.3s\nlat: -119.3, long: 37.6\n6039 at POINT (-119.3 37.6) took 9.3s\nlat: -119.3, long: 37.7\n6039 at POINT (-119.3 37.7) took 9.1s\nlat: -119.2, long: 37.5\n6039 at POINT (-119.2 37.5) took 9.2s\nlat: -119.2, long: 37.6\n6039 at POINT (-119.2 37.6) took 9.3s\nlat: -119.2, long: 37.7\n6039 at POINT (-119.2 37.7) took 9.1s\nlat: -119.1, long: 37.6\n6039 at POINT (-119.1 37.6) took 9.4s\nlat: -119.1, long: 37.7\n6039 at POINT (-119.1 37.7) took 9.1s\n6041 southwest to northeast: (-123.0, 37.8) to (-122.4, 38.3)\nlat: -122.9, long: 38.1\n6041 at POINT (-122.9 38.1) took 9.3s\nlat: -122.9, long: 38.2\n6041 at POINT (-122.9 38.2) took 9.1s\nlat: -122.9, long: 38.3\n6041 at POINT (-122.9 38.3) took 9.4s\nlat: -122.8, long: 38.0\n6041 at POINT (-122.8 38) took 9.0s\nlat: -122.8, long: 38.1\n6041 at POINT (-122.8 38.1) took 9.2s\nlat: -122.8, long: 38.2\n6041 at POINT (-122.8 38.2) took 9.1s\nlat: -122.7, long: 37.9\n6041 at POINT (-122.7 37.9) took 9.2s\nlat: -122.7, long: 38.0\n6041 at POINT (-122.7 38) took 9.1s\nlat: -122.7, long: 38.1\n6041 at POINT (-122.7 38.1) took 9.0s\nlat: -122.6, long: 37.9\n6041 at POINT (-122.6 37.9) took 9.4s\nlat: -122.6, long: 38.0\n6041 at POINT (-122.6 38) took 9.0s\nlat: -122.6, long: 38.1\n6041 at POINT (-122.6 38.1) took 9.2s\nlat: -122.5, long: 37.9\n6041 at POINT (-122.5 37.9) took 9.1s\nlat: -122.5, long: 38.1\n6041 at POINT (-122.5 38.1) took 9.2s\n6043 southwest to northeast: (-120.4, 37.2) to (-119.3, 37.9)\nlat: -120.3, long: 37.5\n6043 at POINT (-120.3 37.5) took 9.4s\nlat: -120.3, long: 37.6\n6043 at POINT (-120.3 37.6) took 9.7s\nlat: -120.3, long: 37.7\n6043 at POINT (-120.3 37.7) took 9.2s\nlat: -120.2, long: 37.4\n6043 at POINT (-120.2 37.4) took 9.3s\nlat: -120.2, long: 37.5\n6043 at POINT (-120.2 37.5) took 9.1s\nlat: -120.2, long: 37.6\n6043 at POINT (-120.2 37.6) took 9.5s\nlat: -120.2, long: 37.7\n6043 at POINT (-120.2 37.7) took 9.4s\nlat: -120.1, long: 37.3\n6043 at POINT (-120.1 37.3) took 9.3s\nlat: -120.1, long: 37.4\n6043 at POINT (-120.1 37.4) took 9.6s\nlat: -120.1, long: 37.5\n6043 at POINT (-120.1 37.5) took 9.4s\nlat: -120.1, long: 37.6\n6043 at POINT (-120.1 37.6) took 9.4s\nlat: -120.1, long: 37.7\n6043 at POINT (-120.1 37.7) took 9.3s\nlat: -120.1, long: 37.8\n6043 at POINT (-120.1 37.8) took 9.6s\nlat: -120.0, long: 37.3\n6043 at POINT (-120 37.3) took 9.2s\nlat: -120.0, long: 37.4\n6043 at POINT (-120 37.4) took 9.3s\nlat: -120.0, long: 37.6\n6043 at POINT (-120 37.6) took 10.2s\nlat: -120.0, long: 37.7\n6043 at POINT (-120 37.7) took 9.3s\nlat: -119.9, long: 37.4\n6043 at POINT (-119.9 37.4) took 9.4s\nlat: -119.9, long: 37.5\n6043 at POINT (-119.9 37.5) took 9.2s\nlat: -119.9, long: 37.6\n6043 at POINT (-119.9 37.6) took 9.3s\nlat: -119.9, long: 37.7\n6043 at POINT (-119.9 37.7) took 9.3s\nlat: -119.8, long: 37.4\n6043 at POINT (-119.8 37.4) took 9.4s\nlat: -119.8, long: 37.5\n6043 at POINT (-119.8 37.5) took 9.2s\nlat: -119.8, long: 37.6\n6043 at POINT (-119.8 37.6) took 9.3s\nlat: -119.8, long: 37.7\n6043 at POINT (-119.8 37.7) took 9.2s\nlat: -119.7, long: 37.5\n6043 at POINT (-119.7 37.5) took 8.9s\nlat: -119.7, long: 37.6\n6043 at POINT (-119.7 37.6) took 9.0s\nlat: -119.7, long: 37.7\n6043 at POINT (-119.7 37.7) took 9.3s\nlat: -119.6, long: 37.5\n6043 at POINT (-119.6 37.5) took 9.0s\nlat: -119.6, long: 37.6\n6043 at POINT (-119.6 37.6) took 9.1s\nlat: -119.6, long: 37.7\n6043 at POINT (-119.6 37.7) took 9.2s\nlat: -119.6, long: 37.8\n6043 at POINT (-119.6 37.8) took 9.1s\nlat: -119.5, long: 37.7\n6043 at POINT (-119.5 37.7) took 9.4s\nlat: -119.5, long: 37.8\n6043 at POINT (-119.5 37.8) took 9.0s\nlat: -119.4, long: 37.8\n6043 at POINT (-119.4 37.8) took 9.7s\n6045 southwest to northeast: (-124.0, 38.8) to (-122.8, 40.0)\nlat: -123.9, long: 39.9\n6045 at POINT (-123.9 39.9) took 9.7s\nlat: -123.9, long: 40.0\n6045 at POINT (-123.9 40) took 9.1s\nlat: -123.8, long: 39.4\n6045 at POINT (-123.8 39.4) took 9.4s\nlat: -123.8, long: 39.7\n6045 at POINT (-123.8 39.7) took 9.2s\nlat: -123.8, long: 39.8\n6045 at POINT (-123.8 39.8) took 9.1s\nlat: -123.8, long: 39.9\n6045 at POINT (-123.8 39.9) took 9.2s\nlat: -123.8, long: 40.0\n6045 at POINT (-123.8 40) took 9.4s\nlat: -123.7, long: 39.1\n6045 at POINT (-123.7 39.1) took 9.4s\nlat: -123.7, long: 39.2\n6045 at POINT (-123.7 39.2) took 9.4s\nlat: -123.7, long: 39.3\n6045 at POINT (-123.7 39.3) took 9.1s\nlat: -123.7, long: 39.4\n6045 at POINT (-123.7 39.4) took 9.2s\nlat: -123.7, long: 39.5\n6045 at POINT (-123.7 39.5) took 9.0s\nlat: -123.7, long: 39.6\n6045 at POINT (-123.7 39.6) took 9.3s\nlat: -123.7, long: 39.7\n6045 at POINT (-123.7 39.7) took 9.5s\nlat: -123.7, long: 39.8\n6045 at POINT (-123.7 39.8) took 9.3s\nlat: -123.7, long: 39.9\n6045 at POINT (-123.7 39.9) took 9.2s\nlat: -123.7, long: 40.0\n6045 at POINT (-123.7 40) took 9.2s\nlat: -123.6, long: 38.9\n6045 at POINT (-123.6 38.9) took 10.1s\nlat: -123.6, long: 39.0\n6045 at POINT (-123.6 39) took 10.3s\nlat: -123.6, long: 39.1\n6045 at POINT (-123.6 39.1) took 10.5s\nlat: -123.6, long: 39.2\n6045 at POINT (-123.6 39.2) took 9.3s\nlat: -123.6, long: 39.3\n6045 at POINT (-123.6 39.3) took 9.2s\nlat: -123.6, long: 39.4\n6045 at POINT (-123.6 39.4) took 9.2s\nlat: -123.6, long: 39.5\n6045 at POINT (-123.6 39.5) took 9.3s\nlat: -123.6, long: 39.6\n6045 at POINT (-123.6 39.6) took 9.1s\nlat: -123.6, long: 39.7\n6045 at POINT (-123.6 39.7) took 9.3s\nlat: -123.6, long: 39.8\n6045 at POINT (-123.6 39.8) took 9.1s\nlat: -123.6, long: 39.9\n6045 at POINT (-123.6 39.9) took 9.1s\nlat: -123.6, long: 40.0\n6045 at POINT (-123.6 40) took 9.0s\nlat: -123.5, long: 38.8\n6045 at POINT (-123.5 38.8) took 9.2s\nlat: -123.5, long: 38.9\n6045 at POINT (-123.5 38.9) took 9.0s\nlat: -123.5, long: 39.1\n6045 at POINT (-123.5 39.1) took 9.4s\nlat: -123.5, long: 39.2\n6045 at POINT (-123.5 39.2) took 9.0s\nlat: -123.5, long: 39.3\n6045 at POINT (-123.5 39.3) took 9.2s\nlat: -123.5, long: 39.4\n6045 at POINT (-123.5 39.4) took 9.2s\nlat: -123.5, long: 39.6\n6045 at POINT (-123.5 39.6) took 9.3s\nlat: -123.5, long: 39.7\n6045 at POINT (-123.5 39.7) took 9.6s\nlat: -123.5, long: 39.8\n6045 at POINT (-123.5 39.8) took 9.2s\nlat: -123.5, long: 39.9\n6045 at POINT (-123.5 39.9) took 9.6s\nlat: -123.4, long: 38.8\n6045 at POINT (-123.4 38.8) took 9.2s\nlat: -123.4, long: 38.9\n6045 at POINT (-123.4 38.9) took 9.1s\nlat: -123.4, long: 39.0\n6045 at POINT (-123.4 39) took 9.0s\nlat: -123.4, long: 39.1\n6045 at POINT (-123.4 39.1) took 9.1s\nlat: -123.4, long: 39.2\n6045 at POINT (-123.4 39.2) took 9.2s\nlat: -123.4, long: 39.3\n6045 at POINT (-123.4 39.3) took 9.2s\nlat: -123.4, long: 39.4\n6045 at POINT (-123.4 39.4) took 9.3s\nlat: -123.4, long: 39.5\n6045 at POINT (-123.4 39.5) took 9.2s\nlat: -123.4, long: 39.6\n6045 at POINT (-123.4 39.6) took 9.2s\nlat: -123.4, long: 39.7\n6045 at POINT (-123.4 39.7) took 9.2s\nlat: -123.4, long: 39.8\n6045 at POINT (-123.4 39.8) took 9.2s\nlat: -123.4, long: 39.9\n6045 at POINT (-123.4 39.9) took 9.2s\nlat: -123.3, long: 38.9\n6045 at POINT (-123.3 38.9) took 9.4s\nlat: -123.3, long: 39.0\n6045 at POINT (-123.3 39) took 9.3s\nlat: -123.3, long: 39.1\n6045 at POINT (-123.3 39.1) took 9.4s\nlat: -123.3, long: 39.2\n6045 at POINT (-123.3 39.2) took 9.3s\nlat: -123.3, long: 39.3\n6045 at POINT (-123.3 39.3) took 9.1s\nlat: -123.3, long: 39.4\n6045 at POINT (-123.3 39.4) took 9.5s\nlat: -123.3, long: 39.5\n6045 at POINT (-123.3 39.5) took 9.1s\nlat: -123.3, long: 39.6\n6045 at POINT (-123.3 39.6) took 9.3s\nlat: -123.3, long: 39.7\n6045 at POINT (-123.3 39.7) took 9.3s\nlat: -123.3, long: 39.8\n6045 at POINT (-123.3 39.8) took 9.3s\nlat: -123.3, long: 39.9\n6045 at POINT (-123.3 39.9) took 9.5s\nlat: -123.2, long: 38.9\n6045 at POINT (-123.2 38.9) took 9.3s\nlat: -123.2, long: 39.0\n6045 at POINT (-123.2 39) took 9.0s\nlat: -123.2, long: 39.1\n6045 at POINT (-123.2 39.1) took 9.2s\nlat: -123.2, long: 39.2\n6045 at POINT (-123.2 39.2) took 9.0s\nlat: -123.2, long: 39.3\n6045 at POINT (-123.2 39.3) took 8.8s\nlat: -123.2, long: 39.4\n6045 at POINT (-123.2 39.4) took 9.1s\nlat: -123.2, long: 39.5\n6045 at POINT (-123.2 39.5) took 9.3s\nlat: -123.2, long: 39.6\n6045 at POINT (-123.2 39.6) took 9.3s\nlat: -123.2, long: 39.7\n6045 at POINT (-123.2 39.7) took 9.2s\nlat: -123.2, long: 39.8\n6045 at POINT (-123.2 39.8) took 9.3s\nlat: -123.2, long: 39.9\n6045 at POINT (-123.2 39.9) took 9.2s\nlat: -123.1, long: 38.9\n6045 at POINT (-123.1 38.9) took 9.2s\nlat: -123.1, long: 39.0\n6045 at POINT (-123.1 39) took 9.2s\nlat: -123.1, long: 39.1\n6045 at POINT (-123.1 39.1) took 9.3s\nlat: -123.1, long: 39.2\n6045 at POINT (-123.1 39.2) took 9.3s\nlat: -123.1, long: 39.3\n6045 at POINT (-123.1 39.3) took 9.2s\nlat: -123.1, long: 39.4\n6045 at POINT (-123.1 39.4) took 9.3s\nlat: -123.1, long: 39.5\n6045 at POINT (-123.1 39.5) took 9.3s\nlat: -123.1, long: 39.6\n6045 at POINT (-123.1 39.6) took 9.1s\nlat: -123.1, long: 39.7\n6045 at POINT (-123.1 39.7) took 9.3s\nlat: -123.1, long: 39.8\n6045 at POINT (-123.1 39.8) took 9.6s\nlat: -123.1, long: 39.9\n6045 at POINT (-123.1 39.9) took 9.3s\nlat: -123.0, long: 38.9\n6045 at POINT (-123 38.9) took 9.3s\nlat: -123.0, long: 39.6\n6045 at POINT (-123 39.6) took 9.2s\nlat: -123.0, long: 39.7\n6045 at POINT (-123 39.7) took 9.2s\nlat: -123.0, long: 39.8\n6045 at POINT (-123 39.8) took 9.4s\nlat: -123.0, long: 39.9\n6045 at POINT (-123 39.9) took 9.2s\nlat: -122.9, long: 39.6\n6045 at POINT (-122.9 39.6) took 9.3s\nlat: -122.9, long: 39.7\n6045 at POINT (-122.9 39.7) took 9.3s\n6047 southwest to northeast: (-121.2, 36.7) to (-120.1, 37.6)\nlat: -121.2, long: 37.0\n6047 at POINT (-121.2 37) took 9.5s\nlat: -121.2, long: 37.1\n6047 at POINT (-121.2 37.1) took 9.1s\nlat: -121.1, long: 36.9\n6047 at POINT (-121.1 36.9) took 9.3s\nlat: -121.1, long: 37.0\n6047 at POINT (-121.1 37) took 9.1s\nlat: -121.1, long: 37.1\n6047 at POINT (-121.1 37.1) took 9.1s\nlat: -121.1, long: 37.2\n6047 at POINT (-121.1 37.2) took 9.1s\nlat: -121.0, long: 36.8\n6047 at POINT (-121 36.8) took 9.5s\nlat: -121.0, long: 36.9\n6047 at POINT (-121 36.9) took 9.1s\nlat: -121.0, long: 37.1\n6047 at POINT (-121 37.1) took 9.2s\nlat: -121.0, long: 37.2\n6047 at POINT (-121 37.2) took 9.1s\nlat: -121.0, long: 37.3\n6047 at POINT (-121 37.3) took 9.6s\nlat: -120.9, long: 36.8\n6047 at POINT (-120.9 36.8) took 9.4s\nlat: -120.9, long: 36.9\n6047 at POINT (-120.9 36.9) took 9.5s\nlat: -120.9, long: 37.0\n6047 at POINT (-120.9 37) took 9.4s\nlat: -120.9, long: 37.1\n6047 at POINT (-120.9 37.1) took 10.8s\nlat: -120.9, long: 37.2\n6047 at POINT (-120.9 37.2) took 9.3s\nlat: -120.9, long: 37.3\n6047 at POINT (-120.9 37.3) took 9.3s\nlat: -120.9, long: 37.4\n6047 at POINT (-120.9 37.4) took 9.3s\nlat: -120.8, long: 36.9\n6047 at POINT (-120.8 36.9) took 9.4s\nlat: -120.8, long: 37.0\n6047 at POINT (-120.8 37) took 9.2s\nlat: -120.8, long: 37.1\n6047 at POINT (-120.8 37.1) took 9.3s\nlat: -120.8, long: 37.2\n6047 at POINT (-120.8 37.2) took 9.4s\nlat: -120.8, long: 37.3\n6047 at POINT (-120.8 37.3) took 9.4s\nlat: -120.8, long: 37.4\n6047 at POINT (-120.8 37.4) took 9.2s\nlat: -120.7, long: 37.0\n6047 at POINT (-120.7 37) took 9.0s\nlat: -120.7, long: 37.1\n6047 at POINT (-120.7 37.1) took 9.4s\nlat: -120.7, long: 37.2\n6047 at POINT (-120.7 37.2) took 9.3s\nlat: -120.7, long: 37.3\n6047 at POINT (-120.7 37.3) took 9.4s\nlat: -120.7, long: 37.4\n6047 at POINT (-120.7 37.4) took 8.9s\nlat: -120.7, long: 37.5\n6047 at POINT (-120.7 37.5) took 9.0s\nlat: -120.6, long: 37.0\n6047 at POINT (-120.6 37) took 9.0s\nlat: -120.6, long: 37.1\n6047 at POINT (-120.6 37.1) took 9.0s\nlat: -120.6, long: 37.2\n6047 at POINT (-120.6 37.2) took 9.1s\nlat: -120.6, long: 37.3\n6047 at POINT (-120.6 37.3) took 9.0s\nlat: -120.6, long: 37.4\n6047 at POINT (-120.6 37.4) took 9.2s\nlat: -120.6, long: 37.5\n6047 at POINT (-120.6 37.5) took 9.0s\nlat: -120.5, long: 37.1\n6047 at POINT (-120.5 37.1) took 9.3s\nlat: -120.5, long: 37.2\n6047 at POINT (-120.5 37.2) took 9.2s\nlat: -120.5, long: 37.3\n6047 at POINT (-120.5 37.3) took 9.3s\nlat: -120.5, long: 37.4\n6047 at POINT (-120.5 37.4) took 9.3s\nlat: -120.4, long: 37.2\n6047 at POINT (-120.4 37.2) took 9.6s\nlat: -120.4, long: 37.3\n6047 at POINT (-120.4 37.3) took 9.3s\nlat: -120.4, long: 37.4\n6047 at POINT (-120.4 37.4) took 9.4s\nlat: -120.4, long: 37.5\n6047 at POINT (-120.4 37.5) took 9.2s\nlat: -120.4, long: 37.6\n6047 at POINT (-120.4 37.6) took 8.9s\nlat: -120.3, long: 37.2\n6047 at POINT (-120.3 37.2) took 9.5s\nlat: -120.3, long: 37.3\n6047 at POINT (-120.3 37.3) took 9.0s\nlat: -120.3, long: 37.4\n6047 at POINT (-120.3 37.4) took 9.3s\nlat: -120.2, long: 37.2\n6047 at POINT (-120.2 37.2) took 9.2s\nlat: -120.2, long: 37.3\n6047 at POINT (-120.2 37.3) took 9.1s\nlat: -120.1, long: 37.2\n6047 at POINT (-120.1 37.2) took 9.3s\n6049 southwest to northeast: (-121.5, 41.2) to (-120.0, 42.0)\nlat: -121.4, long: 41.2\n6049 at POINT (-121.4 41.2) took 9.4s\nlat: -121.4, long: 41.3\n6049 at POINT (-121.4 41.3) took 9.3s\nlat: -121.4, long: 41.4\n6049 at POINT (-121.4 41.4) took 9.2s\nlat: -121.4, long: 41.5\n6049 at POINT (-121.4 41.5) took 9.0s\nlat: -121.4, long: 41.6\n6049 at POINT (-121.4 41.6) took 9.0s\nlat: -121.4, long: 41.7\n6049 at POINT (-121.4 41.7) took 9.1s\nlat: -121.4, long: 41.8\n6049 at POINT (-121.4 41.8) took 8.9s\nlat: -121.4, long: 41.9\n6049 at POINT (-121.4 41.9) took 9.5s\nlat: -121.3, long: 41.2\n6049 at POINT (-121.3 41.2) took 9.3s\nlat: -121.3, long: 41.3\n6049 at POINT (-121.3 41.3) took 9.4s\nlat: -121.3, long: 41.4\n6049 at POINT (-121.3 41.4) took 9.3s\nlat: -121.3, long: 41.5\n6049 at POINT (-121.3 41.5) took 9.1s\nlat: -121.3, long: 41.6\n6049 at POINT (-121.3 41.6) took 9.3s\nlat: -121.3, long: 41.7\n6049 at POINT (-121.3 41.7) took 9.5s\nlat: -121.3, long: 41.8\n6049 at POINT (-121.3 41.8) took 9.3s\nlat: -121.3, long: 41.9\n6049 at POINT (-121.3 41.9) took 9.7s\nlat: -121.2, long: 41.2\n6049 at POINT (-121.2 41.2) took 9.4s\nlat: -121.2, long: 41.3\n6049 at POINT (-121.2 41.3) took 9.5s\nlat: -121.2, long: 41.4\n6049 at POINT (-121.2 41.4) took 9.5s\nlat: -121.2, long: 41.5\n6049 at POINT (-121.2 41.5) took 9.3s\nlat: -121.2, long: 41.6\n6049 at POINT (-121.2 41.6) took 9.8s\nlat: -121.2, long: 41.7\n6049 at POINT (-121.2 41.7) took 9.3s\nlat: -121.2, long: 41.8\n6049 at POINT (-121.2 41.8) took 9.5s\nlat: -121.2, long: 41.9\n6049 at POINT (-121.2 41.9) took 9.4s\nlat: -121.1, long: 41.2\n6049 at POINT (-121.1 41.2) took 9.4s\nlat: -121.1, long: 41.3\n6049 at POINT (-121.1 41.3) took 9.5s\nlat: -121.1, long: 41.4\n6049 at POINT (-121.1 41.4) took 9.8s\nlat: -121.1, long: 41.5\n6049 at POINT (-121.1 41.5) took 9.5s\nlat: -121.1, long: 41.6\n6049 at POINT (-121.1 41.6) took 9.5s\nlat: -121.1, long: 41.7\n6049 at POINT (-121.1 41.7) took 9.5s\nlat: -121.1, long: 41.8\n6049 at POINT (-121.1 41.8) took 9.5s\nlat: -121.1, long: 41.9\n6049 at POINT (-121.1 41.9) took 9.6s\nlat: -121.0, long: 41.2\n6049 at POINT (-121 41.2) took 9.3s\nlat: -121.0, long: 41.3\n6049 at POINT (-121 41.3) took 9.3s\nlat: -121.0, long: 41.4\n6049 at POINT (-121 41.4) took 9.3s\nlat: -121.0, long: 41.6\n6049 at POINT (-121 41.6) took 9.2s\nlat: -121.0, long: 41.7\n6049 at POINT (-121 41.7) took 9.2s\nlat: -121.0, long: 41.8\n6049 at POINT (-121 41.8) took 9.1s\nlat: -121.0, long: 41.9\n6049 at POINT (-121 41.9) took 9.4s\nlat: -120.9, long: 41.2\n6049 at POINT (-120.9 41.2) took 9.5s\nlat: -120.9, long: 41.3\n6049 at POINT (-120.9 41.3) took 9.3s\nlat: -120.9, long: 41.4\n6049 at POINT (-120.9 41.4) took 8.9s\nlat: -120.9, long: 41.5\n6049 at POINT (-120.9 41.5) took 9.2s\nlat: -120.9, long: 41.6\n6049 at POINT (-120.9 41.6) took 9.4s\nlat: -120.9, long: 41.7\n6049 at POINT (-120.9 41.7) took 9.2s\nlat: -120.9, long: 41.8\n6049 at POINT (-120.9 41.8) took 9.1s\nlat: -120.9, long: 41.9\n6049 at POINT (-120.9 41.9) took 9.5s\nlat: -120.8, long: 41.2\n6049 at POINT (-120.8 41.2) took 9.5s\nlat: -120.8, long: 41.3\n6049 at POINT (-120.8 41.3) took 9.1s\nlat: -120.8, long: 41.4\n6049 at POINT (-120.8 41.4) took 9.2s\nlat: -120.8, long: 41.5\n6049 at POINT (-120.8 41.5) took 9.2s\nlat: -120.8, long: 41.6\n6049 at POINT (-120.8 41.6) took 9.1s\nlat: -120.8, long: 41.7\n6049 at POINT (-120.8 41.7) took 9.4s\nlat: -120.8, long: 41.8\n6049 at POINT (-120.8 41.8) took 9.1s\nlat: -120.8, long: 41.9\n6049 at POINT (-120.8 41.9) took 9.3s\nlat: -120.7, long: 41.2\n6049 at POINT (-120.7 41.2) took 9.4s\nlat: -120.7, long: 41.3\n6049 at POINT (-120.7 41.3) took 9.1s\nlat: -120.7, long: 41.4\n6049 at POINT (-120.7 41.4) took 9.4s\nlat: -120.7, long: 41.5\n6049 at POINT (-120.7 41.5) took 9.2s\nlat: -120.7, long: 41.6\n6049 at POINT (-120.7 41.6) took 9.4s\nlat: -120.7, long: 41.7\n6049 at POINT (-120.7 41.7) took 9.1s\nlat: -120.7, long: 41.8\n6049 at POINT (-120.7 41.8) took 9.0s\nlat: -120.7, long: 41.9\n6049 at POINT (-120.7 41.9) took 9.2s\nlat: -120.6, long: 41.2\n6049 at POINT (-120.6 41.2) took 9.6s\nlat: -120.6, long: 41.3\n6049 at POINT (-120.6 41.3) took 9.1s\nlat: -120.6, long: 41.4\n6049 at POINT (-120.6 41.4) took 9.0s\nlat: -120.6, long: 41.5\n6049 at POINT (-120.6 41.5) took 9.1s\nlat: -120.6, long: 41.6\n6049 at POINT (-120.6 41.6) took 9.1s\nlat: -120.6, long: 41.7\n6049 at POINT (-120.6 41.7) took 8.9s\nlat: -120.6, long: 41.8\n6049 at POINT (-120.6 41.8) took 9.0s\nlat: -120.6, long: 41.9\n6049 at POINT (-120.6 41.9) took 9.1s\nlat: -120.5, long: 41.2\n6049 at POINT (-120.5 41.2) took 9.6s\nlat: -120.5, long: 41.3\n6049 at POINT (-120.5 41.3) took 9.3s\nlat: -120.5, long: 41.4\n6049 at POINT (-120.5 41.4) took 9.0s\nlat: -120.5, long: 41.6\n6049 at POINT (-120.5 41.6) took 9.0s\nlat: -120.5, long: 41.7\n6049 at POINT (-120.5 41.7) took 8.9s\nlat: -120.5, long: 41.8\n6049 at POINT (-120.5 41.8) took 9.1s\nlat: -120.5, long: 41.9\n6049 at POINT (-120.5 41.9) took 9.1s\nlat: -120.4, long: 41.2\n6049 at POINT (-120.4 41.2) took 9.3s\nlat: -120.4, long: 41.3\n6049 at POINT (-120.4 41.3) took 9.0s\nlat: -120.4, long: 41.4\n6049 at POINT (-120.4 41.4) took 9.1s\nlat: -120.4, long: 41.5\n6049 at POINT (-120.4 41.5) took 9.1s\nlat: -120.4, long: 41.6\n6049 at POINT (-120.4 41.6) took 9.2s\nlat: -120.4, long: 41.7\n6049 at POINT (-120.4 41.7) took 9.0s\nlat: -120.4, long: 41.8\n6049 at POINT (-120.4 41.8) took 9.3s\nlat: -120.4, long: 41.9\n6049 at POINT (-120.4 41.9) took 9.6s\nlat: -120.3, long: 41.2\n6049 at POINT (-120.3 41.2) took 9.4s\nlat: -120.3, long: 41.3\n6049 at POINT (-120.3 41.3) took 9.5s\nlat: -120.3, long: 41.4\n6049 at POINT (-120.3 41.4) took 9.2s\nlat: -120.3, long: 41.5\n6049 at POINT (-120.3 41.5) took 9.2s\nlat: -120.3, long: 41.6\n6049 at POINT (-120.3 41.6) took 9.3s\nlat: -120.3, long: 41.7\n6049 at POINT (-120.3 41.7) took 9.2s\nlat: -120.3, long: 41.8\n6049 at POINT (-120.3 41.8) took 9.5s\nlat: -120.3, long: 41.9\n6049 at POINT (-120.3 41.9) took 9.2s\nlat: -120.2, long: 41.2\n6049 at POINT (-120.2 41.2) took 9.2s\nlat: -120.2, long: 41.3\n6049 at POINT (-120.2 41.3) took 9.1s\nlat: -120.2, long: 41.4\n6049 at POINT (-120.2 41.4) took 9.0s\nlat: -120.2, long: 41.5\n6049 at POINT (-120.2 41.5) took 9.4s\nlat: -120.2, long: 41.6\n6049 at POINT (-120.2 41.6) took 9.5s\nlat: -120.2, long: 41.7\n6049 at POINT (-120.2 41.7) took 9.5s\nlat: -120.2, long: 41.8\n6049 at POINT (-120.2 41.8) took 9.2s\nlat: -120.2, long: 41.9\n6049 at POINT (-120.2 41.9) took 9.0s\nlat: -120.1, long: 41.2\n6049 at POINT (-120.1 41.2) took 9.3s\nlat: -120.1, long: 41.3\n6049 at POINT (-120.1 41.3) took 9.2s\nlat: -120.1, long: 41.4\n6049 at POINT (-120.1 41.4) took 9.0s\nlat: -120.1, long: 41.5\n6049 at POINT (-120.1 41.5) took 9.3s\nlat: -120.1, long: 41.6\n6049 at POINT (-120.1 41.6) took 9.0s\nlat: -120.1, long: 41.7\n6049 at POINT (-120.1 41.7) took 9.1s\nlat: -120.1, long: 41.8\n6049 at POINT (-120.1 41.8) took 9.0s\nlat: -120.1, long: 41.9\n6049 at POINT (-120.1 41.9) took 9.2s\nlat: -120.0, long: 41.6\n6049 at POINT (-120 41.6) took 9.1s\nlat: -120.0, long: 41.7\n6049 at POINT (-120 41.7) took 9.1s\nlat: -120.0, long: 41.8\n6049 at POINT (-120 41.8) took 8.9s\nlat: -120.0, long: 41.9\n6049 at POINT (-120 41.9) took 9.0s\n6051 southwest to northeast: (-119.7, 37.5) to (-117.8, 38.7)\nlat: -119.6, long: 38.2\n6051 at POINT (-119.6 38.2) took 9.2s\nlat: -119.6, long: 38.3\n6051 at POINT (-119.6 38.3) took 9.0s\nlat: -119.6, long: 38.4\n6051 at POINT (-119.6 38.4) took 9.1s\nlat: -119.6, long: 38.6\n6051 at POINT (-119.6 38.6) took 9.0s\nlat: -119.5, long: 38.2\n6051 at POINT (-119.5 38.2) took 9.2s\nlat: -119.5, long: 38.3\n6051 at POINT (-119.5 38.3) took 9.2s\nlat: -119.5, long: 38.4\n6051 at POINT (-119.5 38.4) took 9.1s\nlat: -119.5, long: 38.6\n6051 at POINT (-119.5 38.6) took 9.1s\nlat: -119.4, long: 38.2\n6051 at POINT (-119.4 38.2) took 9.4s\nlat: -119.4, long: 38.3\n6051 at POINT (-119.4 38.3) took 9.1s\nlat: -119.4, long: 38.4\n6051 at POINT (-119.4 38.4) took 9.2s\nlat: -119.4, long: 38.5\n6051 at POINT (-119.4 38.5) took 9.4s\nlat: -119.3, long: 38.0\n6051 at POINT (-119.3 38) took 9.2s\nlat: -119.3, long: 38.1\n6051 at POINT (-119.3 38.1) took 9.1s\nlat: -119.3, long: 38.2\n6051 at POINT (-119.3 38.2) took 9.1s\nlat: -119.3, long: 38.3\n6051 at POINT (-119.3 38.3) took 9.1s\nlat: -119.3, long: 38.4\n6051 at POINT (-119.3 38.4) took 9.0s\nlat: -119.3, long: 38.5\n6051 at POINT (-119.3 38.5) took 9.0s\nlat: -119.2, long: 37.8\n6051 at POINT (-119.2 37.8) took 9.1s\nlat: -119.2, long: 37.9\n6051 at POINT (-119.2 37.9) took 9.3s\nlat: -119.2, long: 38.0\n6051 at POINT (-119.2 38) took 9.2s\nlat: -119.2, long: 38.1\n6051 at POINT (-119.2 38.1) took 9.2s\nlat: -119.2, long: 38.2\n6051 at POINT (-119.2 38.2) took 9.1s\nlat: -119.2, long: 38.3\n6051 at POINT (-119.2 38.3) took 9.2s\nlat: -119.2, long: 38.4\n6051 at POINT (-119.2 38.4) took 9.3s\nlat: -119.1, long: 37.8\n6051 at POINT (-119.1 37.8) took 9.3s\nlat: -119.1, long: 37.9\n6051 at POINT (-119.1 37.9) took 9.2s\nlat: -119.1, long: 38.0\n6051 at POINT (-119.1 38) took 9.2s\nlat: -119.1, long: 38.1\n6051 at POINT (-119.1 38.1) took 9.1s\nlat: -119.1, long: 38.2\n6051 at POINT (-119.1 38.2) took 9.2s\nlat: -119.1, long: 38.3\n6051 at POINT (-119.1 38.3) took 9.3s\nlat: -119.0, long: 37.6\n6051 at POINT (-119 37.6) took 9.1s\nlat: -119.0, long: 37.7\n6051 at POINT (-119 37.7) took 9.0s\nlat: -119.0, long: 37.8\n6051 at POINT (-119 37.8) took 9.0s\nlat: -119.0, long: 37.9\n6051 at POINT (-119 37.9) took 9.0s\nlat: -119.0, long: 38.1\n6051 at POINT (-119 38.1) took 9.2s\nlat: -119.0, long: 38.2\n6051 at POINT (-119 38.2) took 9.4s\nlat: -119.0, long: 38.3\n6051 at POINT (-119 38.3) took 9.4s\nlat: -118.9, long: 37.6\n6051 at POINT (-118.9 37.6) took 9.2s\nlat: -118.9, long: 37.7\n6051 at POINT (-118.9 37.7) took 9.1s\nlat: -118.9, long: 37.8\n6051 at POINT (-118.9 37.8) took 9.1s\nlat: -118.9, long: 37.9\n6051 at POINT (-118.9 37.9) took 9.1s\nlat: -118.9, long: 38.0\n6051 at POINT (-118.9 38) took 9.1s\nlat: -118.9, long: 38.1\n6051 at POINT (-118.9 38.1) took 9.0s\nlat: -118.9, long: 38.2\n6051 at POINT (-118.9 38.2) took 9.3s\nlat: -118.8, long: 37.5\n6051 at POINT (-118.8 37.5) took 9.4s\nlat: -118.8, long: 37.6\n6051 at POINT (-118.8 37.6) took 9.1s\nlat: -118.8, long: 37.7\n6051 at POINT (-118.8 37.7) took 9.0s\nlat: -118.8, long: 37.8\n6051 at POINT (-118.8 37.8) took 9.2s\nlat: -118.8, long: 37.9\n6051 at POINT (-118.8 37.9) took 9.0s\nlat: -118.8, long: 38.0\n6051 at POINT (-118.8 38) took 9.3s\nlat: -118.8, long: 38.1\n6051 at POINT (-118.8 38.1) took 9.3s\nlat: -118.7, long: 37.5\n6051 at POINT (-118.7 37.5) took 9.1s\nlat: -118.7, long: 37.6\n6051 at POINT (-118.7 37.6) took 9.1s\nlat: -118.7, long: 37.7\n6051 at POINT (-118.7 37.7) took 9.1s\nlat: -118.7, long: 37.8\n6051 at POINT (-118.7 37.8) took 9.2s\nlat: -118.7, long: 37.9\n6051 at POINT (-118.7 37.9) took 9.3s\nlat: -118.7, long: 38.0\n6051 at POINT (-118.7 38) took 9.4s\nlat: -118.6, long: 37.5\n6051 at POINT (-118.6 37.5) took 9.1s\nlat: -118.6, long: 37.6\n6051 at POINT (-118.6 37.6) took 9.1s\nlat: -118.6, long: 37.7\n6051 at POINT (-118.6 37.7) took 9.0s\nlat: -118.6, long: 37.8\n6051 at POINT (-118.6 37.8) took 9.2s\nlat: -118.6, long: 37.9\n6051 at POINT (-118.6 37.9) took 9.1s\nlat: -118.6, long: 38.0\n6051 at POINT (-118.6 38) took 9.2s\nlat: -118.5, long: 37.6\n6051 at POINT (-118.5 37.6) took 9.2s\nlat: -118.5, long: 37.7\n6051 at POINT (-118.5 37.7) took 9.2s\nlat: -118.5, long: 37.8\n6051 at POINT (-118.5 37.8) took 9.3s\nlat: -118.5, long: 37.9\n6051 at POINT (-118.5 37.9) took 9.1s\nlat: -118.4, long: 37.5\n6051 at POINT (-118.4 37.5) took 9.3s\nlat: -118.4, long: 37.6\n6051 at POINT (-118.4 37.6) took 9.2s\nlat: -118.4, long: 37.7\n6051 at POINT (-118.4 37.7) took 9.5s\nlat: -118.4, long: 37.8\n6051 at POINT (-118.4 37.8) took 9.1s\nlat: -118.3, long: 37.5\n6051 at POINT (-118.3 37.5) took 9.5s\nlat: -118.3, long: 37.6\n6051 at POINT (-118.3 37.6) took 9.4s\nlat: -118.3, long: 37.7\n6051 at POINT (-118.3 37.7) took 9.3s\nlat: -118.3, long: 37.8\n6051 at POINT (-118.3 37.8) took 9.3s\nlat: -118.2, long: 37.5\n6051 at POINT (-118.2 37.5) took 9.2s\nlat: -118.2, long: 37.6\n6051 at POINT (-118.2 37.6) took 9.1s\nlat: -118.2, long: 37.7\n6051 at POINT (-118.2 37.7) took 9.5s\nlat: -118.1, long: 37.5\n6051 at POINT (-118.1 37.5) took 9.4s\nlat: -118.1, long: 37.6\n6051 at POINT (-118.1 37.6) took 9.3s\nlat: -117.9, long: 37.5\n6051 at POINT (-117.9 37.5) took 9.2s\n6053 southwest to northeast: (-122.0, 35.8) to (-120.2, 36.9)\nlat: -121.9, long: 36.4\n6053 at POINT (-121.9 36.4) took 10.1s\nlat: -121.9, long: 36.5\n6053 at POINT (-121.9 36.5) took 9.2s\nlat: -121.9, long: 36.6\n6053 at POINT (-121.9 36.6) took 9.1s\nlat: -121.8, long: 36.3\n6053 at POINT (-121.8 36.3) took 9.2s\nlat: -121.8, long: 36.4\n6053 at POINT (-121.8 36.4) took 9.3s\nlat: -121.8, long: 36.5\n6053 at POINT (-121.8 36.5) took 9.2s\nlat: -121.8, long: 36.6\n6053 at POINT (-121.8 36.6) took 9.2s\nlat: -121.8, long: 36.7\n6053 at POINT (-121.8 36.7) took 9.3s\nlat: -121.7, long: 36.2\n6053 at POINT (-121.7 36.2) took 9.1s\nlat: -121.7, long: 36.3\n6053 at POINT (-121.7 36.3) took 9.3s\nlat: -121.7, long: 36.4\n6053 at POINT (-121.7 36.4) took 9.1s\nlat: -121.7, long: 36.5\n6053 at POINT (-121.7 36.5) took 9.0s\nlat: -121.7, long: 36.6\n6053 at POINT (-121.7 36.6) took 9.3s\nlat: -121.7, long: 36.7\n6053 at POINT (-121.7 36.7) took 9.7s\nlat: -121.7, long: 36.8\n6053 at POINT (-121.7 36.8) took 9.4s\nlat: -121.7, long: 36.9\n6053 at POINT (-121.7 36.9) took 9.2s\nlat: -121.6, long: 36.1\n6053 at POINT (-121.6 36.1) took 9.5s\nlat: -121.6, long: 36.2\n6053 at POINT (-121.6 36.2) took 9.4s\nlat: -121.6, long: 36.3\n6053 at POINT (-121.6 36.3) took 9.3s\nlat: -121.6, long: 36.4\n6053 at POINT (-121.6 36.4) took 9.3s\nlat: -121.6, long: 36.5\n6053 at POINT (-121.6 36.5) took 9.2s\nlat: -121.6, long: 36.6\n6053 at POINT (-121.6 36.6) took 9.4s\nlat: -121.6, long: 36.7\n6053 at POINT (-121.6 36.7) took 10.5s\nlat: -121.6, long: 36.8\n6053 at POINT (-121.6 36.8) took 9.6s\nlat: -121.5, long: 36.1\n6053 at POINT (-121.5 36.1) took 9.5s\nlat: -121.5, long: 36.2\n6053 at POINT (-121.5 36.2) took 9.5s\nlat: -121.5, long: 36.3\n6053 at POINT (-121.5 36.3) took 9.4s\nlat: -121.5, long: 36.4\n6053 at POINT (-121.5 36.4) took 9.6s\nlat: -121.5, long: 36.6\n6053 at POINT (-121.5 36.6) took 9.5s\nlat: -121.5, long: 36.7\n6053 at POINT (-121.5 36.7) took 9.4s\nlat: -121.4, long: 35.9\n6053 at POINT (-121.4 35.9) took 9.8s\nlat: -121.4, long: 36.0\n6053 at POINT (-121.4 36) took 9.3s\nlat: -121.4, long: 36.1\n6053 at POINT (-121.4 36.1) took 9.4s\nlat: -121.4, long: 36.2\n6053 at POINT (-121.4 36.2) took 9.3s\nlat: -121.4, long: 36.3\n6053 at POINT (-121.4 36.3) took 9.4s\nlat: -121.4, long: 36.4\n6053 at POINT (-121.4 36.4) took 9.6s\nlat: -121.4, long: 36.5\n6053 at POINT (-121.4 36.5) took 9.6s\nlat: -121.4, long: 36.6\n6053 at POINT (-121.4 36.6) took 9.2s\nlat: -121.3, long: 35.8\n6053 at POINT (-121.3 35.8) took 9.4s\nlat: -121.3, long: 35.9\n6053 at POINT (-121.3 35.9) took 9.2s\nlat: -121.3, long: 36.0\n6053 at POINT (-121.3 36) took 9.3s\nlat: -121.3, long: 36.1\n6053 at POINT (-121.3 36.1) took 9.1s\nlat: -121.3, long: 36.2\n6053 at POINT (-121.3 36.2) took 9.0s\nlat: -121.3, long: 36.3\n6053 at POINT (-121.3 36.3) took 9.4s\nlat: -121.3, long: 36.4\n6053 at POINT (-121.3 36.4) took 9.3s\nlat: -121.3, long: 36.5\n6053 at POINT (-121.3 36.5) took 9.2s\nlat: -121.2, long: 35.8\n6053 at POINT (-121.2 35.8) took 9.2s\nlat: -121.2, long: 35.9\n6053 at POINT (-121.2 35.9) took 9.1s\nlat: -121.2, long: 36.0\n6053 at POINT (-121.2 36) took 9.1s\nlat: -121.2, long: 36.1\n6053 at POINT (-121.2 36.1) took 9.2s\nlat: -121.2, long: 36.2\n6053 at POINT (-121.2 36.2) took 9.1s\nlat: -121.2, long: 36.3\n6053 at POINT (-121.2 36.3) took 9.2s\nlat: -121.2, long: 36.4\n6053 at POINT (-121.2 36.4) took 9.2s\nlat: -121.1, long: 35.8\n6053 at POINT (-121.1 35.8) took 9.4s\nlat: -121.1, long: 35.9\n6053 at POINT (-121.1 35.9) took 9.1s\nlat: -121.1, long: 36.0\n6053 at POINT (-121.1 36) took 9.0s\nlat: -121.1, long: 36.1\n6053 at POINT (-121.1 36.1) took 9.1s\nlat: -121.1, long: 36.2\n6053 at POINT (-121.1 36.2) took 9.2s\nlat: -121.1, long: 36.3\n6053 at POINT (-121.1 36.3) took 9.2s\nlat: -121.0, long: 35.8\n6053 at POINT (-121 35.8) took 9.3s\nlat: -121.0, long: 35.9\n6053 at POINT (-121 35.9) took 9.0s\nlat: -121.0, long: 36.1\n6053 at POINT (-121 36.1) took 9.0s\nlat: -121.0, long: 36.2\n6053 at POINT (-121 36.2) took 9.0s\nlat: -120.9, long: 35.8\n6053 at POINT (-120.9 35.8) took 9.3s\nlat: -120.9, long: 35.9\n6053 at POINT (-120.9 35.9) took 9.4s\nlat: -120.9, long: 36.0\n6053 at POINT (-120.9 36) took 8.9s\nlat: -120.9, long: 36.1\n6053 at POINT (-120.9 36.1) took 9.2s\nlat: -120.9, long: 36.2\n6053 at POINT (-120.9 36.2) took 10.1s\nlat: -120.9, long: 36.3\n6053 at POINT (-120.9 36.3) took 9.1s\nlat: -120.8, long: 35.8\n6053 at POINT (-120.8 35.8) took 9.6s\nlat: -120.8, long: 35.9\n6053 at POINT (-120.8 35.9) took 9.2s\nlat: -120.8, long: 36.0\n6053 at POINT (-120.8 36) took 9.0s\nlat: -120.8, long: 36.1\n6053 at POINT (-120.8 36.1) took 9.1s\nlat: -120.8, long: 36.2\n6053 at POINT (-120.8 36.2) took 9.0s\nlat: -120.7, long: 35.8\n6053 at POINT (-120.7 35.8) took 9.6s\nlat: -120.7, long: 35.9\n6053 at POINT (-120.7 35.9) took 9.1s\nlat: -120.7, long: 36.0\n6053 at POINT (-120.7 36) took 9.0s\nlat: -120.7, long: 36.1\n6053 at POINT (-120.7 36.1) took 9.8s\nlat: -120.7, long: 36.2\n6053 at POINT (-120.7 36.2) took 9.1s\nlat: -120.6, long: 35.8\n6053 at POINT (-120.6 35.8) took 9.4s\nlat: -120.6, long: 35.9\n6053 at POINT (-120.6 35.9) took 9.0s\nlat: -120.6, long: 36.0\n6053 at POINT (-120.6 36) took 9.0s\nlat: -120.6, long: 36.1\n6053 at POINT (-120.6 36.1) took 9.1s\nlat: -120.5, long: 35.8\n6053 at POINT (-120.5 35.8) took 9.3s\nlat: -120.5, long: 35.9\n6053 at POINT (-120.5 35.9) took 9.1s\nlat: -120.4, long: 35.8\n6053 at POINT (-120.4 35.8) took 9.3s\nlat: -120.4, long: 35.9\n6053 at POINT (-120.4 35.9) took 9.3s\nlat: -120.3, long: 35.8\n6053 at POINT (-120.3 35.8) took 9.9s\nlat: -120.3, long: 35.9\n6053 at POINT (-120.3 35.9) took 9.0s\n6055 southwest to northeast: (-122.6, 38.2) to (-122.1, 38.9)\nlat: -122.6, long: 38.6\n6055 at POINT (-122.6 38.6) took 9.5s\nlat: -122.5, long: 38.6\n6055 at POINT (-122.5 38.6) took 9.4s\nlat: -122.4, long: 38.3\n6055 at POINT (-122.4 38.3) took 9.1s\nlat: -122.4, long: 38.4\n6055 at POINT (-122.4 38.4) took 9.2s\nlat: -122.4, long: 38.5\n6055 at POINT (-122.4 38.5) took 9.1s\nlat: -122.4, long: 38.6\n6055 at POINT (-122.4 38.6) took 9.1s\nlat: -122.4, long: 38.7\n6055 at POINT (-122.4 38.7) took 9.1s\nlat: -122.4, long: 38.8\n6055 at POINT (-122.4 38.8) took 9.2s\nlat: -122.3, long: 38.2\n6055 at POINT (-122.3 38.2) took 9.0s\nlat: -122.3, long: 38.3\n6055 at POINT (-122.3 38.3) took 9.2s\nlat: -122.3, long: 38.4\n6055 at POINT (-122.3 38.4) took 9.5s\nlat: -122.3, long: 38.5\n6055 at POINT (-122.3 38.5) took 9.2s\nlat: -122.3, long: 38.6\n6055 at POINT (-122.3 38.6) took 9.3s\nlat: -122.3, long: 38.7\n6055 at POINT (-122.3 38.7) took 9.6s\nlat: -122.3, long: 38.8\n6055 at POINT (-122.3 38.8) took 9.2s\nlat: -122.2, long: 38.3\n6055 at POINT (-122.2 38.3) took 9.4s\nlat: -122.2, long: 38.4\n6055 at POINT (-122.2 38.4) took 9.5s\nlat: -122.2, long: 38.5\n6055 at POINT (-122.2 38.5) took 9.1s\nlat: -122.2, long: 38.6\n6055 at POINT (-122.2 38.6) took 9.3s\n6057 southwest to northeast: (-121.3, 39.0) to (-120.0, 39.5)\nlat: -121.2, long: 39.1\n6057 at POINT (-121.2 39.1) took 9.2s\nlat: -121.2, long: 39.2\n6057 at POINT (-121.2 39.2) took 9.5s\nlat: -121.2, long: 39.3\n6057 at POINT (-121.2 39.3) took 9.1s\nlat: -121.1, long: 39.1\n6057 at POINT (-121.1 39.1) took 9.4s\nlat: -121.1, long: 39.2\n6057 at POINT (-121.1 39.2) took 9.4s\nlat: -121.1, long: 39.3\n6057 at POINT (-121.1 39.3) took 9.1s\nlat: -121.0, long: 39.1\n6057 at POINT (-121 39.1) took 9.3s\nlat: -121.0, long: 39.2\n6057 at POINT (-121 39.2) took 9.0s\nlat: -121.0, long: 39.3\n6057 at POINT (-121 39.3) took 9.2s\nlat: -121.0, long: 39.4\n6057 at POINT (-121 39.4) took 9.0s\nlat: -120.9, long: 39.2\n6057 at POINT (-120.9 39.2) took 9.2s\nlat: -120.9, long: 39.3\n6057 at POINT (-120.9 39.3) took 9.1s\nlat: -120.9, long: 39.4\n6057 at POINT (-120.9 39.4) took 9.3s\nlat: -120.8, long: 39.3\n6057 at POINT (-120.8 39.3) took 9.7s\nlat: -120.8, long: 39.4\n6057 at POINT (-120.8 39.4) took 9.2s\nlat: -120.7, long: 39.4\n6057 at POINT (-120.7 39.4) took 9.1s\nlat: -120.7, long: 39.5\n6057 at POINT (-120.7 39.5) took 9.2s\nlat: -120.6, long: 39.4\n6057 at POINT (-120.6 39.4) took 9.2s\nlat: -120.6, long: 39.5\n6057 at POINT (-120.6 39.5) took 9.1s\nlat: -120.5, long: 39.4\n6057 at POINT (-120.5 39.4) took 8.9s\nlat: -120.4, long: 39.4\n6057 at POINT (-120.4 39.4) took 9.1s\nlat: -120.3, long: 39.4\n6057 at POINT (-120.3 39.4) took 9.6s\nlat: -120.2, long: 39.4\n6057 at POINT (-120.2 39.4) took 9.5s\nlat: -120.1, long: 39.4\n6057 at POINT (-120.1 39.4) took 9.2s\n6059 southwest to northeast: (-118.1, 33.4) to (-117.4, 33.9)\nlat: -118.0, long: 33.7\n6059 at POINT (-118 33.7) took 9.2s\nlat: -118.0, long: 33.8\n6059 at POINT (-118 33.8) took 9.2s\nlat: -117.9, long: 33.6\n6059 at POINT (-117.9 33.6) took 9.2s\nlat: -117.9, long: 33.7\n6059 at POINT (-117.9 33.7) took 9.3s\nlat: -117.9, long: 33.8\n6059 at POINT (-117.9 33.8) took 9.4s\nlat: -117.9, long: 33.9\n6059 at POINT (-117.9 33.9) took 9.5s\nlat: -117.8, long: 33.6\n6059 at POINT (-117.8 33.6) took 9.5s\nlat: -117.8, long: 33.7\n6059 at POINT (-117.8 33.7) took 9.1s\nlat: -117.8, long: 33.8\n6059 at POINT (-117.8 33.8) took 9.1s\nlat: -117.8, long: 33.9\n6059 at POINT (-117.8 33.9) took 9.3s\nlat: -117.7, long: 33.5\n6059 at POINT (-117.7 33.5) took 9.3s\nlat: -117.7, long: 33.6\n6059 at POINT (-117.7 33.6) took 9.2s\nlat: -117.7, long: 33.7\n6059 at POINT (-117.7 33.7) took 9.1s\nlat: -117.7, long: 33.8\n6059 at POINT (-117.7 33.8) took 9.0s\nlat: -117.6, long: 33.4\n6059 at POINT (-117.6 33.4) took 9.3s\nlat: -117.6, long: 33.5\n6059 at POINT (-117.6 33.5) took 9.1s\nlat: -117.6, long: 33.6\n6059 at POINT (-117.6 33.6) took 9.1s\nlat: -117.6, long: 33.7\n6059 at POINT (-117.6 33.7) took 9.1s\nlat: -117.5, long: 33.6\n6059 at POINT (-117.5 33.6) took 9.2s\nlat: -117.5, long: 33.7\n6059 at POINT (-117.5 33.7) took 9.5s\n6061 southwest to northeast: (-121.5, 38.7) to (-120.0, 39.3)\nlat: -121.4, long: 38.8\n6061 at POINT (-121.4 38.8) took 9.5s\nlat: -121.4, long: 38.9\n6061 at POINT (-121.4 38.9) took 9.1s\nlat: -121.4, long: 39.0\n6061 at POINT (-121.4 39) took 9.1s\nlat: -121.3, long: 38.8\n6061 at POINT (-121.3 38.8) took 9.3s\nlat: -121.3, long: 38.9\n6061 at POINT (-121.3 38.9) took 9.5s\nlat: -121.3, long: 39.0\n6061 at POINT (-121.3 39) took 9.4s\nlat: -121.2, long: 38.8\n6061 at POINT (-121.2 38.8) took 9.7s\nlat: -121.2, long: 38.9\n6061 at POINT (-121.2 38.9) took 9.4s\nlat: -121.2, long: 39.0\n6061 at POINT (-121.2 39) took 9.3s\nlat: -121.1, long: 38.9\n6061 at POINT (-121.1 38.9) took 9.2s\nlat: -121.1, long: 39.0\n6061 at POINT (-121.1 39) took 9.4s\nlat: -120.9, long: 39.0\n6061 at POINT (-120.9 39) took 9.0s\nlat: -120.9, long: 39.1\n6061 at POINT (-120.9 39.1) took 9.5s\nlat: -120.8, long: 39.0\n6061 at POINT (-120.8 39) took 9.0s\nlat: -120.8, long: 39.1\n6061 at POINT (-120.8 39.1) took 9.0s\nlat: -120.8, long: 39.2\n6061 at POINT (-120.8 39.2) took 9.0s\nlat: -120.7, long: 39.0\n6061 at POINT (-120.7 39) took 9.0s\nlat: -120.7, long: 39.1\n6061 at POINT (-120.7 39.1) took 9.2s\nlat: -120.7, long: 39.2\n6061 at POINT (-120.7 39.2) took 9.3s\nlat: -120.7, long: 39.3\n6061 at POINT (-120.7 39.3) took 9.2s\nlat: -120.6, long: 39.0\n6061 at POINT (-120.6 39) took 9.1s\nlat: -120.6, long: 39.1\n6061 at POINT (-120.6 39.1) took 9.0s\nlat: -120.6, long: 39.2\n6061 at POINT (-120.6 39.2) took 9.1s\nlat: -120.6, long: 39.3\n6061 at POINT (-120.6 39.3) took 9.2s\nlat: -120.5, long: 39.1\n6061 at POINT (-120.5 39.1) took 9.3s\nlat: -120.5, long: 39.2\n6061 at POINT (-120.5 39.2) took 9.3s\nlat: -120.5, long: 39.3\n6061 at POINT (-120.5 39.3) took 9.4s\nlat: -120.4, long: 39.1\n6061 at POINT (-120.4 39.1) took 9.2s\nlat: -120.4, long: 39.2\n6061 at POINT (-120.4 39.2) took 9.2s\nlat: -120.4, long: 39.3\n6061 at POINT (-120.4 39.3) took 9.2s\nlat: -120.3, long: 39.1\n6061 at POINT (-120.3 39.1) took 9.1s\nlat: -120.3, long: 39.2\n6061 at POINT (-120.3 39.2) took 9.1s\nlat: -120.3, long: 39.3\n6061 at POINT (-120.3 39.3) took 9.4s\nlat: -120.2, long: 39.1\n6061 at POINT (-120.2 39.1) took 9.2s\nlat: -120.2, long: 39.2\n6061 at POINT (-120.2 39.2) took 9.2s\nlat: -120.2, long: 39.3\n6061 at POINT (-120.2 39.3) took 9.2s\nlat: -120.1, long: 39.1\n6061 at POINT (-120.1 39.1) took 9.0s\nlat: -120.1, long: 39.2\n6061 at POINT (-120.1 39.2) took 9.3s\nlat: -120.1, long: 39.3\n6061 at POINT (-120.1 39.3) took 9.1s\n6063 southwest to northeast: (-121.5, 39.6) to (-120.1, 40.4)\nlat: -121.4, long: 39.9\n6063 at POINT (-121.4 39.9) took 9.5s\nlat: -121.4, long: 40.0\n6063 at POINT (-121.4 40) took 9.4s\nlat: -121.4, long: 40.4\n6063 at POINT (-121.4 40.4) took 9.3s\nlat: -121.3, long: 39.8\n6063 at POINT (-121.3 39.8) took 9.5s\nlat: -121.3, long: 39.9\n6063 at POINT (-121.3 39.9) took 9.4s\nlat: -121.3, long: 40.0\n6063 at POINT (-121.3 40) took 9.2s\nlat: -121.3, long: 40.1\n6063 at POINT (-121.3 40.1) took 9.4s\nlat: -121.3, long: 40.2\n6063 at POINT (-121.3 40.2) took 9.4s\nlat: -121.3, long: 40.3\n6063 at POINT (-121.3 40.3) took 9.1s\nlat: -121.3, long: 40.4\n6063 at POINT (-121.3 40.4) took 9.4s\nlat: -121.2, long: 39.8\n6063 at POINT (-121.2 39.8) took 9.3s\nlat: -121.2, long: 39.9\n6063 at POINT (-121.2 39.9) took 9.3s\nlat: -121.2, long: 40.0\n6063 at POINT (-121.2 40) took 9.2s\nlat: -121.2, long: 40.1\n6063 at POINT (-121.2 40.1) took 9.3s\nlat: -121.2, long: 40.2\n6063 at POINT (-121.2 40.2) took 9.2s\nlat: -121.2, long: 40.3\n6063 at POINT (-121.2 40.3) took 9.2s\nlat: -121.2, long: 40.4\n6063 at POINT (-121.2 40.4) took 9.1s\nlat: -121.1, long: 39.7\n6063 at POINT (-121.1 39.7) took 9.1s\nlat: -121.1, long: 39.8\n6063 at POINT (-121.1 39.8) took 9.1s\nlat: -121.1, long: 39.9\n6063 at POINT (-121.1 39.9) took 9.3s\nlat: -121.1, long: 40.0\n6063 at POINT (-121.1 40) took 8.9s\nlat: -121.1, long: 40.1\n6063 at POINT (-121.1 40.1) took 9.3s\nlat: -121.1, long: 40.2\n6063 at POINT (-121.1 40.2) took 9.1s\nlat: -121.1, long: 40.3\n6063 at POINT (-121.1 40.3) took 9.2s\nlat: -121.1, long: 40.4\n6063 at POINT (-121.1 40.4) took 9.5s\nlat: -121.0, long: 39.7\n6063 at POINT (-121 39.7) took 9.4s\nlat: -121.0, long: 39.8\n6063 at POINT (-121 39.8) took 9.3s\nlat: -121.0, long: 39.9\n6063 at POINT (-121 39.9) took 9.0s\nlat: -121.0, long: 40.1\n6063 at POINT (-121 40.1) took 8.8s\nlat: -121.0, long: 40.2\n6063 at POINT (-121 40.2) took 9.1s\nlat: -120.9, long: 39.8\n6063 at POINT (-120.9 39.8) took 9.4s\nlat: -120.9, long: 39.9\n6063 at POINT (-120.9 39.9) took 9.1s\nlat: -120.9, long: 40.0\n6063 at POINT (-120.9 40) took 9.1s\nlat: -120.9, long: 40.1\n6063 at POINT (-120.9 40.1) took 9.0s\nlat: -120.8, long: 39.8\n6063 at POINT (-120.8 39.8) took 9.4s\nlat: -120.8, long: 39.9\n6063 at POINT (-120.8 39.9) took 9.0s\nlat: -120.8, long: 40.0\n6063 at POINT (-120.8 40) took 9.2s\nlat: -120.8, long: 40.1\n6063 at POINT (-120.8 40.1) took 9.2s\nlat: -120.8, long: 40.2\n6063 at POINT (-120.8 40.2) took 9.0s\nlat: -120.7, long: 39.7\n6063 at POINT (-120.7 39.7) took 9.6s\nlat: -120.7, long: 39.8\n6063 at POINT (-120.7 39.8) took 9.5s\nlat: -120.7, long: 39.9\n6063 at POINT (-120.7 39.9) took 9.5s\nlat: -120.7, long: 40.0\n6063 at POINT (-120.7 40) took 9.7s\nlat: -120.7, long: 40.1\n6063 at POINT (-120.7 40.1) took 10.0s\nlat: -120.7, long: 40.2\n6063 at POINT (-120.7 40.2) took 9.9s\nlat: -120.7, long: 40.3\n6063 at POINT (-120.7 40.3) took 10.5s\nlat: -120.6, long: 39.8\n6063 at POINT (-120.6 39.8) took 10.5s\nlat: -120.6, long: 39.9\n6063 at POINT (-120.6 39.9) took 9.1s\nlat: -120.6, long: 40.0\n6063 at POINT (-120.6 40) took 11.6s\nlat: -120.6, long: 40.1\n6063 at POINT (-120.6 40.1) took 9.0s\nlat: -120.6, long: 40.2\n6063 at POINT (-120.6 40.2) took 10.5s\nlat: -120.5, long: 39.8\n6063 at POINT (-120.5 39.8) took 11.0s\nlat: -120.5, long: 39.9\n6063 at POINT (-120.5 39.9) took 9.0s\nlat: -120.5, long: 40.1\n6063 at POINT (-120.5 40.1) took 10.1s\nlat: -120.5, long: 40.2\n6063 at POINT (-120.5 40.2) took 10.6s\nlat: -120.4, long: 39.8\n6063 at POINT (-120.4 39.8) took 9.1s\nlat: -120.4, long: 39.9\n6063 at POINT (-120.4 39.9) took 9.6s\nlat: -120.4, long: 40.0\n6063 at POINT (-120.4 40) took 9.5s\nlat: -120.4, long: 40.1\n6063 at POINT (-120.4 40.1) took 9.2s\nlat: -120.3, long: 39.8\n6063 at POINT (-120.3 39.8) took 9.5s\nlat: -120.3, long: 39.9\n6063 at POINT (-120.3 39.9) took 9.3s\nlat: -120.3, long: 40.0\n6063 at POINT (-120.3 40) took 9.3s\nlat: -120.3, long: 40.1\n6063 at POINT (-120.3 40.1) took 9.7s\nlat: -120.2, long: 39.8\n6063 at POINT (-120.2 39.8) took 9.3s\nlat: -120.2, long: 39.9\n6063 at POINT (-120.2 39.9) took 9.3s\nlat: -120.2, long: 40.0\n6063 at POINT (-120.2 40) took 9.3s\nlat: -120.1, long: 39.9\n6063 at POINT (-120.1 39.9) took 9.1s\n6065 southwest to northeast: (-117.7, 33.4) to (-114.4, 34.1)\nlat: -117.6, long: 33.8\n6065 at POINT (-117.6 33.8) took 9.4s\nlat: -117.6, long: 33.9\n6065 at POINT (-117.6 33.9) took 9.4s\nlat: -117.5, long: 33.8\n6065 at POINT (-117.5 33.8) took 9.2s\nlat: -117.5, long: 33.9\n6065 at POINT (-117.5 33.9) took 9.8s\nlat: -117.4, long: 33.6\n6065 at POINT (-117.4 33.6) took 9.3s\nlat: -117.4, long: 33.7\n6065 at POINT (-117.4 33.7) took 9.1s\nlat: -117.4, long: 33.8\n6065 at POINT (-117.4 33.8) took 9.4s\nlat: -117.4, long: 33.9\n6065 at POINT (-117.4 33.9) took 9.2s\nlat: -117.4, long: 34.0\n6065 at POINT (-117.4 34) took 9.2s\nlat: -117.3, long: 33.5\n6065 at POINT (-117.3 33.5) took 9.4s\nlat: -117.3, long: 33.6\n6065 at POINT (-117.3 33.6) took 9.9s\nlat: -117.3, long: 33.7\n6065 at POINT (-117.3 33.7) took 9.5s\nlat: -117.3, long: 33.8\n6065 at POINT (-117.3 33.8) took 9.7s\nlat: -117.3, long: 33.9\n6065 at POINT (-117.3 33.9) took 9.2s\nlat: -117.3, long: 34.0\n6065 at POINT (-117.3 34) took 9.2s\nlat: -117.2, long: 33.5\n6065 at POINT (-117.2 33.5) took 9.5s\nlat: -117.2, long: 33.6\n6065 at POINT (-117.2 33.6) took 9.6s\nlat: -117.2, long: 33.7\n6065 at POINT (-117.2 33.7) took 9.0s\nlat: -117.2, long: 33.8\n6065 at POINT (-117.2 33.8) took 9.5s\nlat: -117.2, long: 33.9\n6065 at POINT (-117.2 33.9) took 9.2s\nlat: -117.2, long: 34.0\n6065 at POINT (-117.2 34) took 9.2s\nlat: -117.1, long: 33.5\n6065 at POINT (-117.1 33.5) took 9.4s\nlat: -117.1, long: 33.6\n6065 at POINT (-117.1 33.6) took 9.1s\nlat: -117.1, long: 33.7\n6065 at POINT (-117.1 33.7) took 9.5s\nlat: -117.1, long: 33.8\n6065 at POINT (-117.1 33.8) took 9.2s\nlat: -117.1, long: 33.9\n6065 at POINT (-117.1 33.9) took 9.1s\nlat: -117.1, long: 34.0\n6065 at POINT (-117.1 34) took 9.0s\nlat: -117.0, long: 33.6\n6065 at POINT (-117 33.6) took 9.9s\nlat: -117.0, long: 33.7\n6065 at POINT (-117 33.7) took 9.2s\nlat: -117.0, long: 33.8\n6065 at POINT (-117 33.8) took 9.5s\nlat: -117.0, long: 33.9\n6065 at POINT (-117 33.9) took 9.2s\nlat: -116.9, long: 33.5\n6065 at POINT (-116.9 33.5) took 9.1s\nlat: -116.9, long: 33.6\n6065 at POINT (-116.9 33.6) took 9.3s\nlat: -116.9, long: 33.7\n6065 at POINT (-116.9 33.7) took 9.4s\nlat: -116.9, long: 33.8\n6065 at POINT (-116.9 33.8) took 9.1s\nlat: -116.9, long: 33.9\n6065 at POINT (-116.9 33.9) took 9.2s\nlat: -116.9, long: 34.0\n6065 at POINT (-116.9 34) took 9.5s\nlat: -116.8, long: 33.5\n6065 at POINT (-116.8 33.5) took 9.3s\nlat: -116.8, long: 33.6\n6065 at POINT (-116.8 33.6) took 9.3s\nlat: -116.8, long: 33.7\n6065 at POINT (-116.8 33.7) took 9.2s\nlat: -116.8, long: 33.8\n6065 at POINT (-116.8 33.8) took 9.1s\nlat: -116.8, long: 33.9\n6065 at POINT (-116.8 33.9) took 8.9s\nlat: -116.8, long: 34.0\n6065 at POINT (-116.8 34) took 8.9s\nlat: -116.7, long: 33.5\n6065 at POINT (-116.7 33.5) took 9.2s\nlat: -116.7, long: 33.6\n6065 at POINT (-116.7 33.6) took 9.3s\nlat: -116.7, long: 33.7\n6065 at POINT (-116.7 33.7) took 9.4s\nlat: -116.7, long: 33.8\n6065 at POINT (-116.7 33.8) took 9.1s\nlat: -116.7, long: 33.9\n6065 at POINT (-116.7 33.9) took 9.2s\nlat: -116.7, long: 34.0\n6065 at POINT (-116.7 34) took 9.0s\nlat: -116.6, long: 33.5\n6065 at POINT (-116.6 33.5) took 9.5s\nlat: -116.6, long: 33.6\n6065 at POINT (-116.6 33.6) took 9.2s\nlat: -116.6, long: 33.7\n6065 at POINT (-116.6 33.7) took 9.3s\nlat: -116.6, long: 33.8\n6065 at POINT (-116.6 33.8) took 9.3s\nlat: -116.6, long: 33.9\n6065 at POINT (-116.6 33.9) took 9.1s\nlat: -116.6, long: 34.0\n6065 at POINT (-116.6 34) took 9.1s\nlat: -116.5, long: 33.6\n6065 at POINT (-116.5 33.6) took 9.2s\nlat: -116.5, long: 33.7\n6065 at POINT (-116.5 33.7) took 9.3s\nlat: -116.5, long: 33.8\n6065 at POINT (-116.5 33.8) took 9.1s\nlat: -116.5, long: 33.9\n6065 at POINT (-116.5 33.9) took 9.3s\nlat: -116.4, long: 33.5\n6065 at POINT (-116.4 33.5) took 9.4s\nlat: -116.4, long: 33.6\n6065 at POINT (-116.4 33.6) took 9.3s\nlat: -116.4, long: 33.7\n6065 at POINT (-116.4 33.7) took 9.2s\nlat: -116.4, long: 33.8\n6065 at POINT (-116.4 33.8) took 9.3s\nlat: -116.4, long: 33.9\n6065 at POINT (-116.4 33.9) took 9.4s\nlat: -116.4, long: 34.0\n6065 at POINT (-116.4 34) took 9.0s\nlat: -116.3, long: 33.5\n6065 at POINT (-116.3 33.5) took 9.3s\nlat: -116.3, long: 33.6\n6065 at POINT (-116.3 33.6) took 9.1s\nlat: -116.3, long: 33.7\n6065 at POINT (-116.3 33.7) took 9.3s\nlat: -116.3, long: 33.8\n6065 at POINT (-116.3 33.8) took 9.2s\nlat: -116.3, long: 33.9\n6065 at POINT (-116.3 33.9) took 9.2s\nlat: -116.3, long: 34.0\n6065 at POINT (-116.3 34) took 9.5s\nlat: -116.2, long: 33.5\n6065 at POINT (-116.2 33.5) took 9.1s\nlat: -116.2, long: 33.6\n6065 at POINT (-116.2 33.6) took 9.2s\nlat: -116.2, long: 33.7\n6065 at POINT (-116.2 33.7) took 9.8s\nlat: -116.2, long: 33.8\n6065 at POINT (-116.2 33.8) took 9.5s\nlat: -116.2, long: 33.9\n6065 at POINT (-116.2 33.9) took 9.5s\nlat: -116.2, long: 34.0\n6065 at POINT (-116.2 34) took 9.1s\nlat: -116.1, long: 33.5\n6065 at POINT (-116.1 33.5) took 9.4s\nlat: -116.1, long: 33.6\n6065 at POINT (-116.1 33.6) took 9.2s\nlat: -116.1, long: 33.7\n6065 at POINT (-116.1 33.7) took 9.3s\nlat: -116.1, long: 33.8\n6065 at POINT (-116.1 33.8) took 9.5s\nlat: -116.1, long: 33.9\n6065 at POINT (-116.1 33.9) took 10.4s\nlat: -116.1, long: 34.0\n6065 at POINT (-116.1 34) took 9.3s\nlat: -116.0, long: 33.6\n6065 at POINT (-116 33.6) took 9.3s\nlat: -116.0, long: 33.7\n6065 at POINT (-116 33.7) took 9.3s\nlat: -116.0, long: 33.8\n6065 at POINT (-116 33.8) took 9.4s\nlat: -116.0, long: 33.9\n6065 at POINT (-116 33.9) took 9.4s\nlat: -115.9, long: 33.5\n6065 at POINT (-115.9 33.5) took 10.2s\nlat: -115.9, long: 33.6\n6065 at POINT (-115.9 33.6) took 9.6s\nlat: -115.9, long: 33.7\n6065 at POINT (-115.9 33.7) took 9.3s\nlat: -115.9, long: 33.8\n6065 at POINT (-115.9 33.8) took 9.4s\nlat: -115.9, long: 33.9\n6065 at POINT (-115.9 33.9) took 9.2s\nlat: -115.9, long: 34.0\n6065 at POINT (-115.9 34) took 9.2s\nlat: -115.8, long: 33.5\n6065 at POINT (-115.8 33.5) took 9.5s\nlat: -115.8, long: 33.6\n6065 at POINT (-115.8 33.6) took 9.5s\nlat: -115.8, long: 33.7\n6065 at POINT (-115.8 33.7) took 9.8s\nlat: -115.8, long: 33.8\n6065 at POINT (-115.8 33.8) took 9.5s\nlat: -115.8, long: 33.9\n6065 at POINT (-115.8 33.9) took 9.3s\nlat: -115.8, long: 34.0\n6065 at POINT (-115.8 34) took 9.4s\nlat: -115.7, long: 33.5\n6065 at POINT (-115.7 33.5) took 9.6s\nlat: -115.7, long: 33.6\n6065 at POINT (-115.7 33.6) took 9.0s\nlat: -115.7, long: 33.7\n6065 at POINT (-115.7 33.7) took 9.3s\nlat: -115.7, long: 33.8\n6065 at POINT (-115.7 33.8) took 9.6s\nlat: -115.7, long: 33.9\n6065 at POINT (-115.7 33.9) took 10.0s\nlat: -115.7, long: 34.0\n6065 at POINT (-115.7 34) took 9.0s\nlat: -115.6, long: 33.5\n6065 at POINT (-115.6 33.5) took 9.5s\nlat: -115.6, long: 33.6\n6065 at POINT (-115.6 33.6) took 10.4s\nlat: -115.6, long: 33.7\n6065 at POINT (-115.6 33.7) took 10.8s\nlat: -115.6, long: 33.8\n6065 at POINT (-115.6 33.8) took 9.2s\nlat: -115.6, long: 33.9\n6065 at POINT (-115.6 33.9) took 9.5s\nlat: -115.6, long: 34.0\n6065 at POINT (-115.6 34) took 9.1s\nlat: -115.5, long: 33.6\n6065 at POINT (-115.5 33.6) took 9.4s\nlat: -115.5, long: 33.7\n6065 at POINT (-115.5 33.7) took 9.3s\nlat: -115.5, long: 33.8\n6065 at POINT (-115.5 33.8) took 9.5s\nlat: -115.5, long: 33.9\n6065 at POINT (-115.5 33.9) took 9.2s\nlat: -115.4, long: 33.5\n6065 at POINT (-115.4 33.5) took 9.5s\nlat: -115.4, long: 33.6\n6065 at POINT (-115.4 33.6) took 9.6s\nlat: -115.4, long: 33.7\n6065 at POINT (-115.4 33.7) took 9.5s\nlat: -115.4, long: 33.8\n6065 at POINT (-115.4 33.8) took 9.2s\nlat: -115.4, long: 33.9\n6065 at POINT (-115.4 33.9) took 9.3s\nlat: -115.4, long: 34.0\n6065 at POINT (-115.4 34) took 9.2s\nlat: -115.3, long: 33.5\n6065 at POINT (-115.3 33.5) took 9.1s\nlat: -115.3, long: 33.6\n6065 at POINT (-115.3 33.6) took 9.1s\nlat: -115.3, long: 33.7\n6065 at POINT (-115.3 33.7) took 9.0s\nlat: -115.3, long: 33.8\n6065 at POINT (-115.3 33.8) took 9.4s\nlat: -115.3, long: 33.9\n6065 at POINT (-115.3 33.9) took 9.4s\nlat: -115.3, long: 34.0\n6065 at POINT (-115.3 34) took 9.2s\nlat: -115.2, long: 33.5\n6065 at POINT (-115.2 33.5) took 9.2s\nlat: -115.2, long: 33.6\n6065 at POINT (-115.2 33.6) took 9.3s\nlat: -115.2, long: 33.7\n6065 at POINT (-115.2 33.7) took 9.5s\nlat: -115.2, long: 33.8\n6065 at POINT (-115.2 33.8) took 9.2s\nlat: -115.2, long: 33.9\n6065 at POINT (-115.2 33.9) took 9.2s\nlat: -115.2, long: 34.0\n6065 at POINT (-115.2 34) took 9.3s\nlat: -115.1, long: 33.5\n6065 at POINT (-115.1 33.5) took 9.3s\nlat: -115.1, long: 33.6\n6065 at POINT (-115.1 33.6) took 9.2s\nlat: -115.1, long: 33.7\n6065 at POINT (-115.1 33.7) took 9.1s\nlat: -115.1, long: 33.8\n6065 at POINT (-115.1 33.8) took 9.3s\nlat: -115.1, long: 33.9\n6065 at POINT (-115.1 33.9) took 9.2s\nlat: -115.1, long: 34.0\n6065 at POINT (-115.1 34) took 9.2s\nlat: -115.0, long: 33.6\n6065 at POINT (-115 33.6) took 9.2s\nlat: -115.0, long: 33.7\n6065 at POINT (-115 33.7) took 9.3s\nlat: -115.0, long: 33.8\n6065 at POINT (-115 33.8) took 9.2s\nlat: -115.0, long: 33.9\n6065 at POINT (-115 33.9) took 9.2s\nlat: -114.9, long: 33.5\n6065 at POINT (-114.9 33.5) took 9.4s\nlat: -114.9, long: 33.6\n6065 at POINT (-114.9 33.6) took 9.2s\nlat: -114.9, long: 33.7\n6065 at POINT (-114.9 33.7) took 9.9s\nlat: -114.9, long: 33.8\n6065 at POINT (-114.9 33.8) took 9.5s\nlat: -114.9, long: 33.9\n6065 at POINT (-114.9 33.9) took 9.4s\nlat: -114.9, long: 34.0\n6065 at POINT (-114.9 34) took 9.3s\nlat: -114.8, long: 33.5\n6065 at POINT (-114.8 33.5) took 9.3s\nlat: -114.8, long: 33.6\n6065 at POINT (-114.8 33.6) took 9.3s\nlat: -114.8, long: 33.7\n6065 at POINT (-114.8 33.7) took 9.6s\nlat: -114.8, long: 33.8\n6065 at POINT (-114.8 33.8) took 9.5s\nlat: -114.8, long: 33.9\n6065 at POINT (-114.8 33.9) took 9.2s\nlat: -114.8, long: 34.0\n6065 at POINT (-114.8 34) took 9.2s\nlat: -114.7, long: 33.5\n6065 at POINT (-114.7 33.5) took 9.2s\nlat: -114.7, long: 33.6\n6065 at POINT (-114.7 33.6) took 9.3s\nlat: -114.7, long: 33.7\n6065 at POINT (-114.7 33.7) took 9.2s\nlat: -114.7, long: 33.8\n6065 at POINT (-114.7 33.8) took 9.5s\nlat: -114.7, long: 33.9\n6065 at POINT (-114.7 33.9) took 9.3s\nlat: -114.7, long: 34.0\n6065 at POINT (-114.7 34) took 9.5s\nlat: -114.6, long: 33.5\n6065 at POINT (-114.6 33.5) took 9.4s\nlat: -114.6, long: 33.6\n6065 at POINT (-114.6 33.6) took 9.3s\nlat: -114.6, long: 33.7\n6065 at POINT (-114.6 33.7) took 9.3s\nlat: -114.6, long: 33.8\n6065 at POINT (-114.6 33.8) took 9.5s\nlat: -114.6, long: 33.9\n6065 at POINT (-114.6 33.9) took 9.3s\nlat: -114.6, long: 34.0\n6065 at POINT (-114.6 34) took 9.3s\nlat: -114.5, long: 33.7\n6065 at POINT (-114.5 33.7) took 9.4s\n6067 southwest to northeast: (-121.8, 38.0) to (-121.0, 38.7)\nlat: -121.7, long: 38.1\n6067 at POINT (-121.7 38.1) took 9.3s\nlat: -121.6, long: 38.2\n6067 at POINT (-121.6 38.2) took 9.5s\nlat: -121.6, long: 38.3\n6067 at POINT (-121.6 38.3) took 9.1s\nlat: -121.6, long: 38.7\n6067 at POINT (-121.6 38.7) took 9.0s\nlat: -121.5, long: 38.3\n6067 at POINT (-121.5 38.3) took 9.5s\nlat: -121.5, long: 38.4\n6067 at POINT (-121.5 38.4) took 10.3s\nlat: -121.5, long: 38.6\n6067 at POINT (-121.5 38.6) took 9.2s\nlat: -121.5, long: 38.7\n6067 at POINT (-121.5 38.7) took 9.8s\nlat: -121.4, long: 38.3\n6067 at POINT (-121.4 38.3) took 9.7s\nlat: -121.4, long: 38.4\n6067 at POINT (-121.4 38.4) took 9.6s\nlat: -121.4, long: 38.5\n6067 at POINT (-121.4 38.5) took 9.6s\nlat: -121.4, long: 38.6\n6067 at POINT (-121.4 38.6) took 9.4s\nlat: -121.4, long: 38.7\n6067 at POINT (-121.4 38.7) took 9.2s\nlat: -121.3, long: 38.3\n6067 at POINT (-121.3 38.3) took 9.3s\nlat: -121.3, long: 38.4\n6067 at POINT (-121.3 38.4) took 9.3s\nlat: -121.3, long: 38.5\n6067 at POINT (-121.3 38.5) took 10.3s\nlat: -121.3, long: 38.6\n6067 at POINT (-121.3 38.6) took 9.4s\nlat: -121.3, long: 38.7\n6067 at POINT (-121.3 38.7) took 9.7s\nlat: -121.2, long: 38.3\n6067 at POINT (-121.2 38.3) took 9.3s\nlat: -121.2, long: 38.4\n6067 at POINT (-121.2 38.4) took 9.2s\nlat: -121.2, long: 38.5\n6067 at POINT (-121.2 38.5) took 9.5s\nlat: -121.2, long: 38.6\n6067 at POINT (-121.2 38.6) took 9.3s\nlat: -121.2, long: 38.7\n6067 at POINT (-121.2 38.7) took 9.4s\nlat: -121.1, long: 38.3\n6067 at POINT (-121.1 38.3) took 9.3s\nlat: -121.1, long: 38.4\n6067 at POINT (-121.1 38.4) took 9.5s\nlat: -121.1, long: 38.5\n6067 at POINT (-121.1 38.5) took 9.4s\nlat: -121.1, long: 38.6\n6067 at POINT (-121.1 38.6) took 9.4s\n6069 southwest to northeast: (-121.6, 36.2) to (-120.6, 37.0)\nlat: -121.5, long: 36.8\n6069 at POINT (-121.5 36.8) took 9.4s\nlat: -121.5, long: 36.9\n6069 at POINT (-121.5 36.9) took 9.2s\nlat: -121.4, long: 36.7\n6069 at POINT (-121.4 36.7) took 9.2s\nlat: -121.4, long: 36.8\n6069 at POINT (-121.4 36.8) took 9.5s\nlat: -121.4, long: 36.9\n6069 at POINT (-121.4 36.9) took 9.4s\nlat: -121.3, long: 36.6\n6069 at POINT (-121.3 36.6) took 9.4s\nlat: -121.3, long: 36.7\n6069 at POINT (-121.3 36.7) took 9.4s\nlat: -121.3, long: 36.8\n6069 at POINT (-121.3 36.8) took 9.5s\nlat: -121.3, long: 36.9\n6069 at POINT (-121.3 36.9) took 9.4s\nlat: -121.2, long: 36.5\n6069 at POINT (-121.2 36.5) took 9.5s\nlat: -121.2, long: 36.6\n6069 at POINT (-121.2 36.6) took 9.3s\nlat: -121.2, long: 36.7\n6069 at POINT (-121.2 36.7) took 9.4s\nlat: -121.2, long: 36.8\n6069 at POINT (-121.2 36.8) took 9.2s\nlat: -121.2, long: 36.9\n6069 at POINT (-121.2 36.9) took 9.2s\nlat: -121.1, long: 36.4\n6069 at POINT (-121.1 36.4) took 9.6s\nlat: -121.1, long: 36.5\n6069 at POINT (-121.1 36.5) took 9.5s\nlat: -121.1, long: 36.6\n6069 at POINT (-121.1 36.6) took 9.8s\nlat: -121.1, long: 36.7\n6069 at POINT (-121.1 36.7) took 9.2s\nlat: -121.1, long: 36.8\n6069 at POINT (-121.1 36.8) took 9.4s\nlat: -121.0, long: 36.3\n6069 at POINT (-121 36.3) took 9.6s\nlat: -121.0, long: 36.4\n6069 at POINT (-121 36.4) took 11.4s\nlat: -121.0, long: 36.6\n6069 at POINT (-121 36.6) took 9.4s\nlat: -121.0, long: 36.7\n6069 at POINT (-121 36.7) took 9.1s\nlat: -120.9, long: 36.4\n6069 at POINT (-120.9 36.4) took 9.6s\nlat: -120.9, long: 36.5\n6069 at POINT (-120.9 36.5) took 9.1s\nlat: -120.9, long: 36.6\n6069 at POINT (-120.9 36.6) took 9.2s\nlat: -120.9, long: 36.7\n6069 at POINT (-120.9 36.7) took 9.1s\nlat: -120.8, long: 36.3\n6069 at POINT (-120.8 36.3) took 9.4s\nlat: -120.8, long: 36.4\n6069 at POINT (-120.8 36.4) took 9.3s\nlat: -120.8, long: 36.5\n6069 at POINT (-120.8 36.5) took 9.7s\nlat: -120.8, long: 36.6\n6069 at POINT (-120.8 36.6) took 9.1s\nlat: -120.7, long: 36.3\n6069 at POINT (-120.7 36.3) took 9.3s\nlat: -120.7, long: 36.4\n6069 at POINT (-120.7 36.4) took 9.4s\nlat: -120.7, long: 36.5\n6069 at POINT (-120.7 36.5) took 9.1s\nlat: -120.6, long: 36.4\n6069 at POINT (-120.6 36.4) took 9.3s\n6071 southwest to northeast: (-117.8, 33.9) to (-114.1, 35.8)\nlat: -117.7, long: 33.9\n6071 at POINT (-117.7 33.9) took 9.8s\nlat: -117.7, long: 34.0\n6071 at POINT (-117.7 34) took 9.0s\nlat: -117.7, long: 34.1\n6071 at POINT (-117.7 34.1) took 9.3s\nlat: -117.6, long: 34.0\n6071 at POINT (-117.6 34) took 9.8s\nlat: -117.6, long: 34.1\n6071 at POINT (-117.6 34.1) took 9.3s\nlat: -117.6, long: 34.2\n6071 at POINT (-117.6 34.2) took 9.1s\nlat: -117.6, long: 34.3\n6071 at POINT (-117.6 34.3) took 9.5s\nlat: -117.6, long: 34.4\n6071 at POINT (-117.6 34.4) took 9.0s\nlat: -117.6, long: 34.5\n6071 at POINT (-117.6 34.5) took 9.3s\nlat: -117.6, long: 34.6\n6071 at POINT (-117.6 34.6) took 9.2s\nlat: -117.6, long: 34.7\n6071 at POINT (-117.6 34.7) took 9.3s\nlat: -117.6, long: 34.8\n6071 at POINT (-117.6 34.8) took 9.8s\nlat: -117.6, long: 34.9\n6071 at POINT (-117.6 34.9) took 13.3s\nlat: -117.6, long: 35.0\n6071 at POINT (-117.6 35) took 9.2s\nlat: -117.6, long: 35.1\n6071 at POINT (-117.6 35.1) took 69.2s\nlat: -117.6, long: 35.2\n6071 at POINT (-117.6 35.2) took 9.6s\nlat: -117.6, long: 35.3\n6071 at POINT (-117.6 35.3) took 9.3s\nlat: -117.6, long: 35.4\n6071 at POINT (-117.6 35.4) took 9.4s\nlat: -117.6, long: 35.5\n6071 at POINT (-117.6 35.5) took 9.5s\nlat: -117.6, long: 35.6\n6071 at POINT (-117.6 35.6) took 9.4s\nlat: -117.6, long: 35.7\n6071 at POINT (-117.6 35.7) took 9.4s\nlat: -117.5, long: 34.1\n6071 at POINT (-117.5 34.1) took 10.8s\nlat: -117.5, long: 34.2\n6071 at POINT (-117.5 34.2) took 9.6s\nlat: -117.5, long: 34.3\n6071 at POINT (-117.5 34.3) took 9.4s\nlat: -117.5, long: 34.4\n6071 at POINT (-117.5 34.4) took 9.6s\nlat: -117.5, long: 34.6\n6071 at POINT (-117.5 34.6) took 9.4s\nlat: -117.5, long: 34.7\n6071 at POINT (-117.5 34.7) took 9.5s\nlat: -117.5, long: 34.8\n6071 at POINT (-117.5 34.8) took 9.9s\nlat: -117.5, long: 34.9\n6071 at POINT (-117.5 34.9) took 9.5s\nlat: -117.5, long: 35.1\n6071 at POINT (-117.5 35.1) took 9.5s\nlat: -117.5, long: 35.2\n6071 at POINT (-117.5 35.2) took 9.8s\nlat: -117.5, long: 35.3\n6071 at POINT (-117.5 35.3) took 10.1s\nlat: -117.5, long: 35.4\n6071 at POINT (-117.5 35.4) took 9.4s\nlat: -117.5, long: 35.6\n6071 at POINT (-117.5 35.6) took 9.5s\nlat: -117.5, long: 35.7\n6071 at POINT (-117.5 35.7) took 9.5s\nlat: -117.4, long: 34.1\n6071 at POINT (-117.4 34.1) took 9.2s\nlat: -117.4, long: 34.2\n6071 at POINT (-117.4 34.2) took 9.3s\nlat: -117.4, long: 34.3\n6071 at POINT (-117.4 34.3) took 9.7s\nlat: -117.4, long: 34.4\n6071 at POINT (-117.4 34.4) took 9.4s\nlat: -117.4, long: 34.5\n6071 at POINT (-117.4 34.5) took 9.4s\nlat: -117.4, long: 34.6\n6071 at POINT (-117.4 34.6) took 9.3s\nlat: -117.4, long: 34.7\n6071 at POINT (-117.4 34.7) took 9.4s\nlat: -117.4, long: 34.8\n6071 at POINT (-117.4 34.8) took 9.3s\nlat: -117.4, long: 34.9\n6071 at POINT (-117.4 34.9) took 9.7s\nlat: -117.4, long: 35.0\n6071 at POINT (-117.4 35) took 9.2s\nlat: -117.4, long: 35.1\n6071 at POINT (-117.4 35.1) took 9.2s\nlat: -117.4, long: 35.2\n6071 at POINT (-117.4 35.2) took 9.8s\nlat: -117.4, long: 35.3\n6071 at POINT (-117.4 35.3) took 69.5s\nlat: -117.4, long: 35.4\n6071 at POINT (-117.4 35.4) took 9.2s\nlat: -117.4, long: 35.5\n6071 at POINT (-117.4 35.5) took 9.5s\nlat: -117.4, long: 35.6\n6071 at POINT (-117.4 35.6) took 9.3s\nlat: -117.4, long: 35.7\n6071 at POINT (-117.4 35.7) took 9.2s\nlat: -117.3, long: 34.1\n6071 at POINT (-117.3 34.1) took 8.9s\nlat: -117.3, long: 34.2\n6071 at POINT (-117.3 34.2) took 9.1s\nlat: -117.3, long: 34.3\n6071 at POINT (-117.3 34.3) took 9.3s\nlat: -117.3, long: 34.4\n6071 at POINT (-117.3 34.4) took 12.8s\nlat: -117.3, long: 34.5\n6071 at POINT (-117.3 34.5) took 9.2s\nlat: -117.3, long: 34.6\n6071 at POINT (-117.3 34.6) took 9.5s\nlat: -117.3, long: 34.7\n6071 at POINT (-117.3 34.7) took 9.1s\nlat: -117.3, long: 34.8\n6071 at POINT (-117.3 34.8) took 10.0s\nlat: -117.3, long: 34.9\n6071 at POINT (-117.3 34.9) took 9.1s\nlat: -117.3, long: 35.0\n6071 at POINT (-117.3 35) took 9.0s\nlat: -117.3, long: 35.1\n6071 at POINT (-117.3 35.1) took 9.4s\nlat: -117.3, long: 35.2\n6071 at POINT (-117.3 35.2) took 9.6s\nlat: -117.3, long: 35.3\n6071 at POINT (-117.3 35.3) took 9.1s\nlat: -117.3, long: 35.4\n6071 at POINT (-117.3 35.4) took 9.3s\nlat: -117.3, long: 35.5\n6071 at POINT (-117.3 35.5) took 9.2s\nlat: -117.3, long: 35.6\n6071 at POINT (-117.3 35.6) took 9.4s\nlat: -117.3, long: 35.7\n6071 at POINT (-117.3 35.7) took 10.2s\nlat: -117.2, long: 34.1\n6071 at POINT (-117.2 34.1) took 9.6s\nlat: -117.2, long: 34.2\n6071 at POINT (-117.2 34.2) took 9.2s\nlat: -117.2, long: 34.3\n6071 at POINT (-117.2 34.3) took 9.3s\nlat: -117.2, long: 34.4\n6071 at POINT (-117.2 34.4) took 9.7s\nlat: -117.2, long: 34.5\n6071 at POINT (-117.2 34.5) took 9.2s\nlat: -117.2, long: 34.6\n6071 at POINT (-117.2 34.6) took 9.5s\nlat: -117.2, long: 34.7\n6071 at POINT (-117.2 34.7) took 9.2s\nlat: -117.2, long: 34.8\n6071 at POINT (-117.2 34.8) took 9.5s\nlat: -117.2, long: 34.9\n6071 at POINT (-117.2 34.9) took 9.2s\nlat: -117.2, long: 35.0\n6071 at POINT (-117.2 35) took 9.2s\nlat: -117.2, long: 35.1\n6071 at POINT (-117.2 35.1) took 9.5s\nlat: -117.2, long: 35.2\n6071 at POINT (-117.2 35.2) took 9.1s\nlat: -117.2, long: 35.3\n6071 at POINT (-117.2 35.3) took 9.1s\nlat: -117.2, long: 35.4\n6071 at POINT (-117.2 35.4) took 9.7s\nlat: -117.2, long: 35.5\n6071 at POINT (-117.2 35.5) took 9.3s\nlat: -117.2, long: 35.6\n6071 at POINT (-117.2 35.6) took 9.7s\nlat: -117.2, long: 35.7\n6071 at POINT (-117.2 35.7) took 9.3s\nlat: -117.1, long: 34.1\n6071 at POINT (-117.1 34.1) took 9.2s\nlat: -117.1, long: 34.2\n6071 at POINT (-117.1 34.2) took 9.2s\nlat: -117.1, long: 34.3\n6071 at POINT (-117.1 34.3) took 9.2s\nlat: -117.1, long: 34.4\n6071 at POINT (-117.1 34.4) took 9.8s\nlat: -117.1, long: 34.5\n6071 at POINT (-117.1 34.5) took 9.1s\nlat: -117.1, long: 34.6\n6071 at POINT (-117.1 34.6) took 9.0s\nlat: -117.1, long: 34.7\n6071 at POINT (-117.1 34.7) took 9.2s\nlat: -117.1, long: 34.8\n6071 at POINT (-117.1 34.8) took 9.2s\nlat: -117.1, long: 34.9\n6071 at POINT (-117.1 34.9) took 9.3s\nlat: -117.1, long: 35.0\n6071 at POINT (-117.1 35) took 9.1s\nlat: -117.1, long: 35.1\n6071 at POINT (-117.1 35.1) took 9.2s\nlat: -117.1, long: 35.2\n6071 at POINT (-117.1 35.2) took 9.1s\nlat: -117.1, long: 35.3\n6071 at POINT (-117.1 35.3) took 9.4s\nlat: -117.1, long: 35.4\n6071 at POINT (-117.1 35.4) took 9.6s\nlat: -117.1, long: 35.5\n6071 at POINT (-117.1 35.5) took 9.2s\nlat: -117.1, long: 35.6\n6071 at POINT (-117.1 35.6) took 9.1s\nlat: -117.1, long: 35.7\n6071 at POINT (-117.1 35.7) took 9.6s\nlat: -117.0, long: 34.1\n6071 at POINT (-117 34.1) took 9.2s\nlat: -117.0, long: 34.2\n6071 at POINT (-117 34.2) took 9.1s\nlat: -117.0, long: 34.3\n6071 at POINT (-117 34.3) took 9.3s\nlat: -117.0, long: 34.4\n6071 at POINT (-117 34.4) took 9.0s\nlat: -117.0, long: 34.6\n6071 at POINT (-117 34.6) took 9.0s\nlat: -117.0, long: 34.7\n6071 at POINT (-117 34.7) took 9.2s\nlat: -117.0, long: 34.8\n6071 at POINT (-117 34.8) took 9.2s\nlat: -117.0, long: 34.9\n6071 at POINT (-117 34.9) took 9.1s\nlat: -117.0, long: 35.1\n6071 at POINT (-117 35.1) took 10.2s\nlat: -117.0, long: 35.2\n6071 at POINT (-117 35.2) took 9.3s\nlat: -117.0, long: 35.3\n6071 at POINT (-117 35.3) took 9.2s\nlat: -117.0, long: 35.4\n6071 at POINT (-117 35.4) took 9.6s\nlat: -117.0, long: 35.6\n6071 at POINT (-117 35.6) took 9.4s\nlat: -117.0, long: 35.7\n6071 at POINT (-117 35.7) took 9.2s\nlat: -116.9, long: 34.1\n6071 at POINT (-116.9 34.1) took 9.3s\nlat: -116.9, long: 34.2\n6071 at POINT (-116.9 34.2) took 9.2s\nlat: -116.9, long: 34.3\n6071 at POINT (-116.9 34.3) took 9.2s\nlat: -116.9, long: 34.4\n6071 at POINT (-116.9 34.4) took 10.3s\nlat: -116.9, long: 34.5\n6071 at POINT (-116.9 34.5) took 10.3s\nlat: -116.9, long: 34.6\n6071 at POINT (-116.9 34.6) took 9.3s\nlat: -116.9, long: 34.7\n6071 at POINT (-116.9 34.7) took 10.0s\nlat: -116.9, long: 34.8\n6071 at POINT (-116.9 34.8) took 9.4s\nlat: -116.9, long: 34.9\n6071 at POINT (-116.9 34.9) took 9.7s\nlat: -116.9, long: 35.0\n6071 at POINT (-116.9 35) took 9.6s\nlat: -116.9, long: 35.1\n6071 at POINT (-116.9 35.1) took 9.5s\nlat: -116.9, long: 35.2\n6071 at POINT (-116.9 35.2) took 9.4s\nlat: -116.9, long: 35.3\n6071 at POINT (-116.9 35.3) took 9.5s\nlat: -116.9, long: 35.4\n6071 at POINT (-116.9 35.4) took 9.3s\nlat: -116.9, long: 35.5\n6071 at POINT (-116.9 35.5) took 9.3s\nlat: -116.9, long: 35.6\n6071 at POINT (-116.9 35.6) took 9.7s\nlat: -116.9, long: 35.7\n6071 at POINT (-116.9 35.7) took 9.3s\nlat: -116.8, long: 34.1\n6071 at POINT (-116.8 34.1) took 9.7s\nlat: -116.8, long: 34.2\n6071 at POINT (-116.8 34.2) took 9.5s\nlat: -116.8, long: 34.3\n6071 at POINT (-116.8 34.3) took 9.2s\nlat: -116.8, long: 34.4\n6071 at POINT (-116.8 34.4) took 9.4s\nlat: -116.8, long: 34.5\n6071 at POINT (-116.8 34.5) took 9.7s\nlat: -116.8, long: 34.6\n6071 at POINT (-116.8 34.6) took 9.2s\nlat: -116.8, long: 34.7\n6071 at POINT (-116.8 34.7) took 10.3s\nlat: -116.8, long: 34.8\n6071 at POINT (-116.8 34.8) took 10.3s\nlat: -116.8, long: 34.9\n6071 at POINT (-116.8 34.9) took 9.3s\nlat: -116.8, long: 35.0\n6071 at POINT (-116.8 35) took 9.3s\nlat: -116.8, long: 35.1\n6071 at POINT (-116.8 35.1) took 9.8s\nlat: -116.8, long: 35.2\n6071 at POINT (-116.8 35.2) took 9.2s\nlat: -116.8, long: 35.3\n6071 at POINT (-116.8 35.3) took 9.9s\nlat: -116.8, long: 35.4\n6071 at POINT (-116.8 35.4) took 9.3s\nlat: -116.8, long: 35.5\n6071 at POINT (-116.8 35.5) took 9.1s\nlat: -116.8, long: 35.6\n6071 at POINT (-116.8 35.6) took 9.5s\nlat: -116.8, long: 35.7\n6071 at POINT (-116.8 35.7) took 9.2s\nlat: -116.7, long: 34.1\n6071 at POINT (-116.7 34.1) took 9.6s\nlat: -116.7, long: 34.2\n6071 at POINT (-116.7 34.2) took 9.3s\nlat: -116.7, long: 34.3\n6071 at POINT (-116.7 34.3) took 9.4s\nlat: -116.7, long: 34.4\n6071 at POINT (-116.7 34.4) took 9.3s\nlat: -116.7, long: 34.5\n6071 at POINT (-116.7 34.5) took 9.4s\nlat: -116.7, long: 34.6\n6071 at POINT (-116.7 34.6) took 9.2s\nlat: -116.7, long: 34.7\n6071 at POINT (-116.7 34.7) took 9.2s\nlat: -116.7, long: 34.8\n6071 at POINT (-116.7 34.8) took 9.4s\nlat: -116.7, long: 34.9\n6071 at POINT (-116.7 34.9) took 9.2s\nlat: -116.7, long: 35.0\n6071 at POINT (-116.7 35) took 9.5s\nlat: -116.7, long: 35.1\n6071 at POINT (-116.7 35.1) took 9.7s\nlat: -116.7, long: 35.2\n6071 at POINT (-116.7 35.2) took 9.4s\nlat: -116.7, long: 35.3\n6071 at POINT (-116.7 35.3) took 9.2s\nlat: -116.7, long: 35.4\n6071 at POINT (-116.7 35.4) took 9.2s\nlat: -116.7, long: 35.5\n6071 at POINT (-116.7 35.5) took 9.3s\nlat: -116.7, long: 35.6\n6071 at POINT (-116.7 35.6) took 9.4s\nlat: -116.7, long: 35.7\n6071 at POINT (-116.7 35.7) took 9.1s\nlat: -116.6, long: 34.1\n6071 at POINT (-116.6 34.1) took 9.3s\nlat: -116.6, long: 34.2\n6071 at POINT (-116.6 34.2) took 9.3s\nlat: -116.6, long: 34.3\n6071 at POINT (-116.6 34.3) took 9.3s\nlat: -116.6, long: 34.4\n6071 at POINT (-116.6 34.4) took 9.7s\nlat: -116.6, long: 34.5\n6071 at POINT (-116.6 34.5) took 9.5s\nlat: -116.6, long: 34.6\n6071 at POINT (-116.6 34.6) took 9.2s\nlat: -116.6, long: 34.7\n6071 at POINT (-116.6 34.7) took 9.2s\nlat: -116.6, long: 34.8\n6071 at POINT (-116.6 34.8) took 9.2s\nlat: -116.6, long: 34.9\n6071 at POINT (-116.6 34.9) took 9.3s\nlat: -116.6, long: 35.0\n6071 at POINT (-116.6 35) took 9.2s\nlat: -116.6, long: 35.1\n6071 at POINT (-116.6 35.1) took 9.5s\nlat: -116.6, long: 35.2\n6071 at POINT (-116.6 35.2) took 9.8s\nlat: -116.6, long: 35.3\n6071 at POINT (-116.6 35.3) took 9.1s\nlat: -116.6, long: 35.4\n6071 at POINT (-116.6 35.4) took 8.9s\nlat: -116.6, long: 35.5\n6071 at POINT (-116.6 35.5) took 9.0s\nlat: -116.6, long: 35.6\n6071 at POINT (-116.6 35.6) took 9.0s\nlat: -116.6, long: 35.7\n6071 at POINT (-116.6 35.7) took 9.3s\nlat: -116.5, long: 34.1\n6071 at POINT (-116.5 34.1) took 9.2s\nlat: -116.5, long: 34.2\n6071 at POINT (-116.5 34.2) took 9.1s\nlat: -116.5, long: 34.3\n6071 at POINT (-116.5 34.3) took 9.0s\nlat: -116.5, long: 34.4\n6071 at POINT (-116.5 34.4) took 9.3s\nlat: -116.5, long: 34.6\n6071 at POINT (-116.5 34.6) took 9.2s\nlat: -116.5, long: 34.7\n6071 at POINT (-116.5 34.7) took 9.1s\nlat: -116.5, long: 34.8\n6071 at POINT (-116.5 34.8) took 9.0s\nlat: -116.5, long: 34.9\n6071 at POINT (-116.5 34.9) took 9.4s\nlat: -116.5, long: 35.1\n6071 at POINT (-116.5 35.1) took 9.1s\nlat: -116.5, long: 35.2\n6071 at POINT (-116.5 35.2) took 9.1s\nlat: -116.5, long: 35.3\n6071 at POINT (-116.5 35.3) took 10.2s\nlat: -116.5, long: 35.4\n6071 at POINT (-116.5 35.4) took 9.3s\nlat: -116.5, long: 35.6\n6071 at POINT (-116.5 35.6) took 9.4s\nlat: -116.5, long: 35.7\n6071 at POINT (-116.5 35.7) took 9.5s\nlat: -116.4, long: 34.1\n6071 at POINT (-116.4 34.1) took 9.4s\nlat: -116.4, long: 34.2\n6071 at POINT (-116.4 34.2) took 9.4s\nlat: -116.4, long: 34.3\n6071 at POINT (-116.4 34.3) took 9.3s\nlat: -116.4, long: 34.4\n6071 at POINT (-116.4 34.4) took 9.4s\nlat: -116.4, long: 34.5\n6071 at POINT (-116.4 34.5) took 9.1s\nlat: -116.4, long: 34.6\n6071 at POINT (-116.4 34.6) took 9.2s\nlat: -116.4, long: 34.7\n6071 at POINT (-116.4 34.7) took 9.8s\nlat: -116.4, long: 34.8\n6071 at POINT (-116.4 34.8) took 9.2s\nlat: -116.4, long: 34.9\n6071 at POINT (-116.4 34.9) took 9.2s\nlat: -116.4, long: 35.0\n6071 at POINT (-116.4 35) took 9.0s\nlat: -116.4, long: 35.1\n6071 at POINT (-116.4 35.1) took 9.7s\nlat: -116.4, long: 35.2\n6071 at POINT (-116.4 35.2) took 9.3s\nlat: -116.4, long: 35.3\n6071 at POINT (-116.4 35.3) took 9.5s\nlat: -116.4, long: 35.4\n6071 at POINT (-116.4 35.4) took 9.3s\nlat: -116.4, long: 35.5\n6071 at POINT (-116.4 35.5) took 9.4s\nlat: -116.4, long: 35.6\n6071 at POINT (-116.4 35.6) took 9.8s\nlat: -116.4, long: 35.7\n6071 at POINT (-116.4 35.7) took 9.4s\nlat: -116.3, long: 34.1\n6071 at POINT (-116.3 34.1) took 69.4s\nlat: -116.3, long: 34.2\n6071 at POINT (-116.3 34.2) took 9.2s\nlat: -116.3, long: 34.3\n6071 at POINT (-116.3 34.3) took 10.0s\nlat: -116.3, long: 34.4\n6071 at POINT (-116.3 34.4) took 9.6s\nlat: -116.3, long: 34.5\n6071 at POINT (-116.3 34.5) took 9.3s\nlat: -116.3, long: 34.6\n6071 at POINT (-116.3 34.6) took 9.3s\nlat: -116.3, long: 34.7\n6071 at POINT (-116.3 34.7) took 9.8s\nlat: -116.3, long: 34.8\n6071 at POINT (-116.3 34.8) took 9.4s\nlat: -116.3, long: 34.9\n6071 at POINT (-116.3 34.9) took 9.4s\nlat: -116.3, long: 35.0\n6071 at POINT (-116.3 35) took 9.5s\nlat: -116.3, long: 35.1\n6071 at POINT (-116.3 35.1) took 9.4s\nlat: -116.3, long: 35.2\n6071 at POINT (-116.3 35.2) took 9.1s\nlat: -116.3, long: 35.3\n6071 at POINT (-116.3 35.3) took 9.3s\nlat: -116.3, long: 35.4\n6071 at POINT (-116.3 35.4) took 9.2s\nlat: -116.3, long: 35.5\n6071 at POINT (-116.3 35.5) took 9.5s\nlat: -116.3, long: 35.6\n6071 at POINT (-116.3 35.6) took 9.6s\nlat: -116.3, long: 35.7\n6071 at POINT (-116.3 35.7) took 69.4s\nlat: -116.2, long: 34.1\n6071 at POINT (-116.2 34.1) took 9.5s\nlat: -116.2, long: 34.2\n6071 at POINT (-116.2 34.2) took 9.2s\nlat: -116.2, long: 34.3\n6071 at POINT (-116.2 34.3) took 9.3s\nlat: -116.2, long: 34.4\n6071 at POINT (-116.2 34.4) took 9.7s\nlat: -116.2, long: 34.5\n6071 at POINT (-116.2 34.5) took 9.4s\nlat: -116.2, long: 34.6\n6071 at POINT (-116.2 34.6) took 9.3s\nlat: -116.2, long: 34.7\n6071 at POINT (-116.2 34.7) took 9.4s\nlat: -116.2, long: 34.8\n6071 at POINT (-116.2 34.8) took 9.6s\nlat: -116.2, long: 34.9\n6071 at POINT (-116.2 34.9) took 9.6s\nlat: -116.2, long: 35.0\n6071 at POINT (-116.2 35) took 9.3s\nlat: -116.2, long: 35.1\n6071 at POINT (-116.2 35.1) took 9.6s\nlat: -116.2, long: 35.2\n6071 at POINT (-116.2 35.2) took 9.7s\nlat: -116.2, long: 35.3\n6071 at POINT (-116.2 35.3) took 9.6s\nlat: -116.2, long: 35.4\n6071 at POINT (-116.2 35.4) took 10.1s\nlat: -116.2, long: 35.5\n6071 at POINT (-116.2 35.5) took 9.2s\nlat: -116.2, long: 35.6\n6071 at POINT (-116.2 35.6) took 9.3s\nlat: -116.2, long: 35.7\n6071 at POINT (-116.2 35.7) took 9.6s\nlat: -116.1, long: 34.1\n6071 at POINT (-116.1 34.1) took 9.5s\nlat: -116.1, long: 34.2\n6071 at POINT (-116.1 34.2) took 9.3s\nlat: -116.1, long: 34.3\n6071 at POINT (-116.1 34.3) took 9.5s\nlat: -116.1, long: 34.4\n6071 at POINT (-116.1 34.4) took 9.3s\nlat: -116.1, long: 34.5\n6071 at POINT (-116.1 34.5) took 9.8s\nlat: -116.1, long: 34.6\n6071 at POINT (-116.1 34.6) took 9.3s\nlat: -116.1, long: 34.7\n6071 at POINT (-116.1 34.7) took 9.7s\nlat: -116.1, long: 34.8\n6071 at POINT (-116.1 34.8) took 10.3s\nlat: -116.1, long: 34.9\n6071 at POINT (-116.1 34.9) took 9.8s\nlat: -116.1, long: 35.0\n6071 at POINT (-116.1 35) took 10.7s\nlat: -116.1, long: 35.1\n6071 at POINT (-116.1 35.1) took 10.0s\nlat: -116.1, long: 35.2\n6071 at POINT (-116.1 35.2) took 9.2s\nlat: -116.1, long: 35.3\n6071 at POINT (-116.1 35.3) took 9.1s\nlat: -116.1, long: 35.4\n6071 at POINT (-116.1 35.4) took 9.1s\nlat: -116.1, long: 35.5\n6071 at POINT (-116.1 35.5) took 9.2s\nlat: -116.1, long: 35.6\n6071 at POINT (-116.1 35.6) took 9.2s\nlat: -116.1, long: 35.7\n6071 at POINT (-116.1 35.7) took 9.2s\nlat: -116.0, long: 34.1\n6071 at POINT (-116 34.1) took 9.5s\nlat: -116.0, long: 34.2\n6071 at POINT (-116 34.2) took 69.1s\nlat: -116.0, long: 34.3\n6071 at POINT (-116 34.3) took 9.3s\nlat: -116.0, long: 34.4\n6071 at POINT (-116 34.4) took 9.1s\nlat: -116.0, long: 34.6\n6071 at POINT (-116 34.6) took 9.3s\nlat: -116.0, long: 34.7\n6071 at POINT (-116 34.7) took 9.2s\nlat: -116.0, long: 34.8\n6071 at POINT (-116 34.8) took 9.2s\nlat: -116.0, long: 34.9\n6071 at POINT (-116 34.9) took 9.0s\nlat: -116.0, long: 35.1\n6071 at POINT (-116 35.1) took 9.2s\nlat: -116.0, long: 35.2\n6071 at POINT (-116 35.2) took 9.1s\nlat: -116.0, long: 35.3\n6071 at POINT (-116 35.3) took 9.4s\nlat: -116.0, long: 35.4\n6071 at POINT (-116 35.4) took 9.1s\nlat: -116.0, long: 35.6\n6071 at POINT (-116 35.6) took 9.3s\nlat: -116.0, long: 35.7\n6071 at POINT (-116 35.7) took 9.3s\nlat: -115.9, long: 34.1\n6071 at POINT (-115.9 34.1) took 9.4s\nlat: -115.9, long: 34.2\n6071 at POINT (-115.9 34.2) took 9.5s\nlat: -115.9, long: 34.3\n6071 at POINT (-115.9 34.3) took 9.3s\nlat: -115.9, long: 34.4\n6071 at POINT (-115.9 34.4) took 9.0s\nlat: -115.9, long: 34.5\n6071 at POINT (-115.9 34.5) took 9.0s\nlat: -115.9, long: 34.6\n6071 at POINT (-115.9 34.6) took 9.1s\nlat: -115.9, long: 34.7\n6071 at POINT (-115.9 34.7) took 9.4s\nlat: -115.9, long: 34.8\n6071 at POINT (-115.9 34.8) took 9.1s\nlat: -115.9, long: 34.9\n6071 at POINT (-115.9 34.9) took 9.9s\nlat: -115.9, long: 35.0\n6071 at POINT (-115.9 35) took 9.0s\nlat: -115.9, long: 35.1\n6071 at POINT (-115.9 35.1) took 69.0s\nlat: -115.9, long: 35.2\n6071 at POINT (-115.9 35.2) took 9.2s\nlat: -115.9, long: 35.3\n6071 at POINT (-115.9 35.3) took 9.2s\nlat: -115.9, long: 35.4\n6071 at POINT (-115.9 35.4) took 9.6s\nlat: -115.9, long: 35.5\n6071 at POINT (-115.9 35.5) took 10.0s\nlat: -115.9, long: 35.6\n6071 at POINT (-115.9 35.6) took 9.3s\nlat: -115.9, long: 35.7\n6071 at POINT (-115.9 35.7) took 9.2s\nlat: -115.8, long: 34.1\n6071 at POINT (-115.8 34.1) took 9.2s\nlat: -115.8, long: 34.2\n6071 at POINT (-115.8 34.2) took 8.9s\nlat: -115.8, long: 34.3\n6071 at POINT (-115.8 34.3) took 9.2s\nlat: -115.8, long: 34.4\n6071 at POINT (-115.8 34.4) took 9.4s\nlat: -115.8, long: 34.5\n6071 at POINT (-115.8 34.5) took 9.4s\nlat: -115.8, long: 34.6\n6071 at POINT (-115.8 34.6) took 9.3s\nlat: -115.8, long: 34.7\n6071 at POINT (-115.8 34.7) took 9.0s\nlat: -115.8, long: 34.8\n6071 at POINT (-115.8 34.8) took 9.1s\nlat: -115.8, long: 34.9\n6071 at POINT (-115.8 34.9) took 9.4s\nlat: -115.8, long: 35.0\n6071 at POINT (-115.8 35) took 9.1s\nlat: -115.8, long: 35.1\n6071 at POINT (-115.8 35.1) took 9.3s\nlat: -115.8, long: 35.2\n6071 at POINT (-115.8 35.2) took 9.2s\nlat: -115.8, long: 35.3\n6071 at POINT (-115.8 35.3) took 9.3s\nlat: -115.8, long: 35.4\n6071 at POINT (-115.8 35.4) took 9.3s\nlat: -115.8, long: 35.5\n6071 at POINT (-115.8 35.5) took 9.2s\nlat: -115.8, long: 35.6\n6071 at POINT (-115.8 35.6) took 9.6s\nlat: -115.8, long: 35.7\n6071 at POINT (-115.8 35.7) took 9.2s\nlat: -115.7, long: 34.1\n6071 at POINT (-115.7 34.1) took 10.1s\nlat: -115.7, long: 34.2\n6071 at POINT (-115.7 34.2) took 9.2s\nlat: -115.7, long: 34.3\n6071 at POINT (-115.7 34.3) took 9.3s\nlat: -115.7, long: 34.4\n6071 at POINT (-115.7 34.4) took 10.1s\nlat: -115.7, long: 34.5\n6071 at POINT (-115.7 34.5) took 9.4s\nlat: -115.7, long: 34.6\n6071 at POINT (-115.7 34.6) took 9.4s\nlat: -115.7, long: 34.7\n6071 at POINT (-115.7 34.7) took 9.5s\nlat: -115.7, long: 34.8\n6071 at POINT (-115.7 34.8) took 9.4s\nlat: -115.7, long: 34.9\n6071 at POINT (-115.7 34.9) took 9.4s\nlat: -115.7, long: 35.0\n6071 at POINT (-115.7 35) took 9.4s\nlat: -115.7, long: 35.1\n6071 at POINT (-115.7 35.1) took 9.2s\nlat: -115.7, long: 35.2\n6071 at POINT (-115.7 35.2) took 9.2s\nlat: -115.7, long: 35.3\n6071 at POINT (-115.7 35.3) took 9.3s\nlat: -115.7, long: 35.4\n6071 at POINT (-115.7 35.4) took 70.0s\nlat: -115.7, long: 35.5\n6071 at POINT (-115.7 35.5) took 9.4s\nlat: -115.7, long: 35.6\n6071 at POINT (-115.7 35.6) took 9.3s\nlat: -115.7, long: 35.7\n6071 at POINT (-115.7 35.7) took 9.3s\nlat: -115.7, long: 35.8\n6071 at POINT (-115.7 35.8) took 9.3s\nlat: -115.6, long: 34.1\n6071 at POINT (-115.6 34.1) took 9.5s\nlat: -115.6, long: 34.2\n6071 at POINT (-115.6 34.2) took 9.5s\nlat: -115.6, long: 34.3\n6071 at POINT (-115.6 34.3) took 9.2s\nlat: -115.6, long: 34.4\n6071 at POINT (-115.6 34.4) took 9.4s\nlat: -115.6, long: 34.5\n6071 at POINT (-115.6 34.5) took 9.6s\nlat: -115.6, long: 34.6\n6071 at POINT (-115.6 34.6) took 9.2s\nlat: -115.6, long: 34.7\n6071 at POINT (-115.6 34.7) took 9.2s\nlat: -115.6, long: 34.8\n6071 at POINT (-115.6 34.8) took 9.5s\nlat: -115.6, long: 34.9\n6071 at POINT (-115.6 34.9) took 10.1s\nlat: -115.6, long: 35.0\n6071 at POINT (-115.6 35) took 9.1s\nlat: -115.6, long: 35.1\n6071 at POINT (-115.6 35.1) took 9.0s\nlat: -115.6, long: 35.2\n6071 at POINT (-115.6 35.2) took 9.3s\nlat: -115.6, long: 35.3\n6071 at POINT (-115.6 35.3) took 9.2s\nlat: -115.6, long: 35.4\n6071 at POINT (-115.6 35.4) took 9.8s\nlat: -115.6, long: 35.5\n6071 at POINT (-115.6 35.5) took 9.6s\nlat: -115.6, long: 35.6\n6071 at POINT (-115.6 35.6) took 9.6s\nlat: -115.6, long: 35.7\n6071 at POINT (-115.6 35.7) took 9.7s\nlat: -115.5, long: 34.1\n6071 at POINT (-115.5 34.1) took 9.4s\nlat: -115.5, long: 34.2\n6071 at POINT (-115.5 34.2) took 9.4s\nlat: -115.5, long: 34.3\n6071 at POINT (-115.5 34.3) took 9.4s\nlat: -115.5, long: 34.4\n6071 at POINT (-115.5 34.4) took 9.3s\nlat: -115.5, long: 34.6\n6071 at POINT (-115.5 34.6) took 9.2s\nlat: -115.5, long: 34.7\n6071 at POINT (-115.5 34.7) took 8.9s\nlat: -115.5, long: 34.8\n6071 at POINT (-115.5 34.8) took 9.3s\nlat: -115.5, long: 34.9\n6071 at POINT (-115.5 34.9) took 9.2s\nlat: -115.5, long: 35.1\n6071 at POINT (-115.5 35.1) took 9.3s\nlat: -115.5, long: 35.2\n6071 at POINT (-115.5 35.2) took 9.2s\nlat: -115.5, long: 35.3\n6071 at POINT (-115.5 35.3) took 9.5s\nlat: -115.5, long: 35.4\n6071 at POINT (-115.5 35.4) took 9.2s\nlat: -115.5, long: 35.6\n6071 at POINT (-115.5 35.6) took 9.4s\nlat: -115.4, long: 34.1\n6071 at POINT (-115.4 34.1) took 9.5s\nlat: -115.4, long: 34.2\n6071 at POINT (-115.4 34.2) took 9.8s\nlat: -115.4, long: 34.3\n6071 at POINT (-115.4 34.3) took 9.4s\nlat: -115.4, long: 34.4\n6071 at POINT (-115.4 34.4) took 9.4s\nlat: -115.4, long: 34.5\n6071 at POINT (-115.4 34.5) took 9.4s\nlat: -115.4, long: 34.6\n6071 at POINT (-115.4 34.6) took 9.3s\nlat: -115.4, long: 34.7\n6071 at POINT (-115.4 34.7) took 9.1s\nlat: -115.4, long: 34.8\n6071 at POINT (-115.4 34.8) took 10.1s\nlat: -115.4, long: 34.9\n6071 at POINT (-115.4 34.9) took 9.7s\nlat: -115.4, long: 35.0\n6071 at POINT (-115.4 35) took 9.4s\nlat: -115.4, long: 35.1\n6071 at POINT (-115.4 35.1) took 9.5s\nlat: -115.4, long: 35.2\n6071 at POINT (-115.4 35.2) took 9.1s\nlat: -115.4, long: 35.3\n6071 at POINT (-115.4 35.3) took 9.3s\nlat: -115.4, long: 35.4\n6071 at POINT (-115.4 35.4) took 9.5s\nlat: -115.4, long: 35.5\n6071 at POINT (-115.4 35.5) took 9.6s\nlat: -115.4, long: 35.6\n6071 at POINT (-115.4 35.6) took 9.4s\nlat: -115.3, long: 34.1\n6071 at POINT (-115.3 34.1) took 11.5s\nlat: -115.3, long: 34.2\n6071 at POINT (-115.3 34.2) took 10.3s\nlat: -115.3, long: 34.3\n6071 at POINT (-115.3 34.3) took 9.3s\nlat: -115.3, long: 34.4\n6071 at POINT (-115.3 34.4) took 9.4s\nlat: -115.3, long: 34.5\n6071 at POINT (-115.3 34.5) took 9.2s\nlat: -115.3, long: 34.6\n6071 at POINT (-115.3 34.6) took 9.2s\nlat: -115.3, long: 34.7\n6071 at POINT (-115.3 34.7) took 9.1s\nlat: -115.3, long: 34.8\n6071 at POINT (-115.3 34.8) took 9.7s\nlat: -115.3, long: 34.9\n6071 at POINT (-115.3 34.9) took 9.6s\nlat: -115.3, long: 35.0\n6071 at POINT (-115.3 35) took 9.1s\nlat: -115.3, long: 35.1\n6071 at POINT (-115.3 35.1) took 9.3s\nlat: -115.3, long: 35.2\n6071 at POINT (-115.3 35.2) took 9.5s\nlat: -115.3, long: 35.3\n6071 at POINT (-115.3 35.3) took 9.2s\nlat: -115.3, long: 35.4\n6071 at POINT (-115.3 35.4) took 9.7s\nlat: -115.3, long: 35.5\n6071 at POINT (-115.3 35.5) took 9.6s\nlat: -115.2, long: 34.1\n6071 at POINT (-115.2 34.1) took 9.2s\nlat: -115.2, long: 34.2\n6071 at POINT (-115.2 34.2) took 9.1s\nlat: -115.2, long: 34.3\n6071 at POINT (-115.2 34.3) took 9.2s\nlat: -115.2, long: 34.4\n6071 at POINT (-115.2 34.4) took 9.7s\nlat: -115.2, long: 34.5\n6071 at POINT (-115.2 34.5) took 9.8s\nlat: -115.2, long: 34.6\n6071 at POINT (-115.2 34.6) took 9.4s\nlat: -115.2, long: 34.7\n6071 at POINT (-115.2 34.7) took 9.3s\nlat: -115.2, long: 34.8\n6071 at POINT (-115.2 34.8) took 9.4s\nlat: -115.2, long: 34.9\n6071 at POINT (-115.2 34.9) took 9.2s\nlat: -115.2, long: 35.0\n6071 at POINT (-115.2 35) took 9.2s\nlat: -115.2, long: 35.1\n6071 at POINT (-115.2 35.1) took 9.0s\nlat: -115.2, long: 35.2\n6071 at POINT (-115.2 35.2) took 9.4s\nlat: -115.2, long: 35.3\n6071 at POINT (-115.2 35.3) took 9.1s\nlat: -115.2, long: 35.4\n6071 at POINT (-115.2 35.4) took 9.1s\nlat: -115.1, long: 34.1\n6071 at POINT (-115.1 34.1) took 9.5s\nlat: -115.1, long: 34.2\n6071 at POINT (-115.1 34.2) took 9.1s\nlat: -115.1, long: 34.3\n6071 at POINT (-115.1 34.3) took 9.0s\nlat: -115.1, long: 34.4\n6071 at POINT (-115.1 34.4) took 9.0s\nlat: -115.1, long: 34.5\n6071 at POINT (-115.1 34.5) took 9.2s\nlat: -115.1, long: 34.6\n6071 at POINT (-115.1 34.6) took 9.1s\nlat: -115.1, long: 34.7\n6071 at POINT (-115.1 34.7) took 9.4s\nlat: -115.1, long: 34.8\n6071 at POINT (-115.1 34.8) took 9.4s\nlat: -115.1, long: 34.9\n6071 at POINT (-115.1 34.9) took 69.2s\nlat: -115.1, long: 35.0\n6071 at POINT (-115.1 35) took 9.4s\nlat: -115.1, long: 35.1\n6071 at POINT (-115.1 35.1) took 9.4s\nlat: -115.1, long: 35.2\n6071 at POINT (-115.1 35.2) took 9.9s\nlat: -115.1, long: 35.3\n6071 at POINT (-115.1 35.3) took 9.6s\nlat: -115.0, long: 34.1\n6071 at POINT (-115 34.1) took 10.3s\nlat: -115.0, long: 34.2\n6071 at POINT (-115 34.2) took 9.2s\nlat: -115.0, long: 34.3\n6071 at POINT (-115 34.3) took 9.1s\nlat: -115.0, long: 34.4\n6071 at POINT (-115 34.4) took 9.6s\nlat: -115.0, long: 34.6\n6071 at POINT (-115 34.6) took 9.1s\nlat: -115.0, long: 34.7\n6071 at POINT (-115 34.7) took 9.5s\nlat: -115.0, long: 34.8\n6071 at POINT (-115 34.8) took 9.9s\nlat: -115.0, long: 34.9\n6071 at POINT (-115 34.9) took 9.6s\nlat: -115.0, long: 35.1\n6071 at POINT (-115 35.1) took 9.2s\nlat: -115.0, long: 35.2\n6071 at POINT (-115 35.2) took 9.1s\nlat: -114.9, long: 34.1\n6071 at POINT (-114.9 34.1) took 9.7s\nlat: -114.9, long: 34.2\n6071 at POINT (-114.9 34.2) took 9.3s\nlat: -114.9, long: 34.3\n6071 at POINT (-114.9 34.3) took 9.5s\nlat: -114.9, long: 34.4\n6071 at POINT (-114.9 34.4) took 9.6s\nlat: -114.9, long: 34.5\n6071 at POINT (-114.9 34.5) took 9.0s\nlat: -114.9, long: 34.6\n6071 at POINT (-114.9 34.6) took 9.2s\nlat: -114.9, long: 34.7\n6071 at POINT (-114.9 34.7) took 9.5s\nlat: -114.9, long: 34.8\n6071 at POINT (-114.9 34.8) took 9.8s\nlat: -114.9, long: 34.9\n6071 at POINT (-114.9 34.9) took 9.5s\nlat: -114.9, long: 35.0\n6071 at POINT (-114.9 35) took 9.3s\nlat: -114.9, long: 35.1\n6071 at POINT (-114.9 35.1) took 9.2s\nlat: -114.9, long: 35.2\n6071 at POINT (-114.9 35.2) took 9.3s\nlat: -114.8, long: 34.1\n6071 at POINT (-114.8 34.1) took 9.3s\nlat: -114.8, long: 34.2\n6071 at POINT (-114.8 34.2) took 9.4s\nlat: -114.8, long: 34.3\n6071 at POINT (-114.8 34.3) took 9.4s\nlat: -114.8, long: 34.4\n6071 at POINT (-114.8 34.4) took 10.7s\nlat: -114.8, long: 34.5\n6071 at POINT (-114.8 34.5) took 9.7s\nlat: -114.8, long: 34.6\n6071 at POINT (-114.8 34.6) took 9.4s\nlat: -114.8, long: 34.7\n6071 at POINT (-114.8 34.7) took 9.2s\nlat: -114.8, long: 34.8\n6071 at POINT (-114.8 34.8) took 9.3s\nlat: -114.8, long: 34.9\n6071 at POINT (-114.8 34.9) took 9.2s\nlat: -114.8, long: 35.0\n6071 at POINT (-114.8 35) took 9.2s\nlat: -114.8, long: 35.1\n6071 at POINT (-114.8 35.1) took 10.3s\nlat: -114.7, long: 34.1\n6071 at POINT (-114.7 34.1) took 9.5s\nlat: -114.7, long: 34.2\n6071 at POINT (-114.7 34.2) took 9.1s\nlat: -114.7, long: 34.3\n6071 at POINT (-114.7 34.3) took 9.6s\nlat: -114.7, long: 34.4\n6071 at POINT (-114.7 34.4) took 9.1s\nlat: -114.7, long: 34.5\n6071 at POINT (-114.7 34.5) took 9.2s\nlat: -114.7, long: 34.6\n6071 at POINT (-114.7 34.6) took 9.9s\nlat: -114.7, long: 34.7\n6071 at POINT (-114.7 34.7) took 9.1s\nlat: -114.7, long: 34.8\n6071 at POINT (-114.7 34.8) took 9.0s\nlat: -114.7, long: 34.9\n6071 at POINT (-114.7 34.9) took 9.6s\nlat: -114.7, long: 35.0\n6071 at POINT (-114.7 35) took 9.0s\nlat: -114.6, long: 34.1\n6071 at POINT (-114.6 34.1) took 9.1s\nlat: -114.6, long: 34.2\n6071 at POINT (-114.6 34.2) took 9.1s\nlat: -114.6, long: 34.3\n6071 at POINT (-114.6 34.3) took 9.4s\nlat: -114.6, long: 34.4\n6071 at POINT (-114.6 34.4) took 9.2s\nlat: -114.6, long: 34.5\n6071 at POINT (-114.6 34.5) took 9.3s\nlat: -114.6, long: 34.6\n6071 at POINT (-114.6 34.6) took 9.2s\nlat: -114.6, long: 34.7\n6071 at POINT (-114.6 34.7) took 9.3s\nlat: -114.6, long: 34.8\n6071 at POINT (-114.6 34.8) took 9.4s\nlat: -114.5, long: 34.1\n6071 at POINT (-114.5 34.1) took 9.5s\nlat: -114.5, long: 34.2\n6071 at POINT (-114.5 34.2) took 9.1s\nlat: -114.5, long: 34.3\n6071 at POINT (-114.5 34.3) took 9.4s\nlat: -114.5, long: 34.4\n6071 at POINT (-114.5 34.4) took 9.2s\nlat: -114.5, long: 34.6\n6071 at POINT (-114.5 34.6) took 9.2s\nlat: -114.5, long: 34.7\n6071 at POINT (-114.5 34.7) took 8.9s\nlat: -114.4, long: 34.2\n6071 at POINT (-114.4 34.2) took 9.5s\nlat: -114.4, long: 34.3\n6071 at POINT (-114.4 34.3) took 9.3s\nlat: -114.4, long: 34.4\n6071 at POINT (-114.4 34.4) took 9.1s\nlat: -114.4, long: 34.5\n6071 at POINT (-114.4 34.5) took 9.2s\nlat: -114.3, long: 34.2\n6071 at POINT (-114.3 34.2) took 9.0s\nlat: -114.3, long: 34.3\n6071 at POINT (-114.3 34.3) took 9.0s\nlat: -114.3, long: 34.4\n6071 at POINT (-114.3 34.4) took 9.5s\nlat: -114.2, long: 34.3\n6071 at POINT (-114.2 34.3) took 9.5s\n6073 southwest to northeast: (-117.6, 32.5) to (-116.1, 33.5)\nlat: -117.5, long: 33.4\n6073 at POINT (-117.5 33.4) took 9.0s\nlat: -117.4, long: 33.3\n6073 at POINT (-117.4 33.3) took 9.5s\nlat: -117.4, long: 33.4\n6073 at POINT (-117.4 33.4) took 9.4s\nlat: -117.4, long: 33.5\n6073 at POINT (-117.4 33.5) took 9.2s\nlat: -117.3, long: 33.1\n6073 at POINT (-117.3 33.1) took 9.2s\nlat: -117.3, long: 33.2\n6073 at POINT (-117.3 33.2) took 9.1s\nlat: -117.3, long: 33.3\n6073 at POINT (-117.3 33.3) took 9.2s\nlat: -117.3, long: 33.4\n6073 at POINT (-117.3 33.4) took 9.3s\nlat: -117.2, long: 32.7\n6073 at POINT (-117.2 32.7) took 9.1s\nlat: -117.2, long: 32.8\n6073 at POINT (-117.2 32.8) took 9.3s\nlat: -117.2, long: 32.9\n6073 at POINT (-117.2 32.9) took 9.4s\nlat: -117.2, long: 33.0\n6073 at POINT (-117.2 33) took 11.0s\nlat: -117.2, long: 33.1\n6073 at POINT (-117.2 33.1) took 10.1s\nlat: -117.2, long: 33.2\n6073 at POINT (-117.2 33.2) took 10.2s\nlat: -117.2, long: 33.3\n6073 at POINT (-117.2 33.3) took 10.4s\nlat: -117.2, long: 33.4\n6073 at POINT (-117.2 33.4) took 9.4s\nlat: -117.1, long: 32.6\n6073 at POINT (-117.1 32.6) took 9.3s\nlat: -117.1, long: 32.7\n6073 at POINT (-117.1 32.7) took 9.8s\nlat: -117.1, long: 32.8\n6073 at POINT (-117.1 32.8) took 9.5s\nlat: -117.1, long: 32.9\n6073 at POINT (-117.1 32.9) took 9.4s\nlat: -117.1, long: 33.0\n6073 at POINT (-117.1 33) took 9.3s\nlat: -117.1, long: 33.1\n6073 at POINT (-117.1 33.1) took 9.3s\nlat: -117.1, long: 33.2\n6073 at POINT (-117.1 33.2) took 10.0s\nlat: -117.1, long: 33.3\n6073 at POINT (-117.1 33.3) took 9.3s\nlat: -117.1, long: 33.4\n6073 at POINT (-117.1 33.4) took 9.3s\nlat: -117.0, long: 32.6\n6073 at POINT (-117 32.6) took 9.4s\nlat: -117.0, long: 32.7\n6073 at POINT (-117 32.7) took 9.4s\nlat: -117.0, long: 32.8\n6073 at POINT (-117 32.8) took 9.2s\nlat: -117.0, long: 32.9\n6073 at POINT (-117 32.9) took 9.2s\nlat: -117.0, long: 33.1\n6073 at POINT (-117 33.1) took 9.3s\nlat: -117.0, long: 33.2\n6073 at POINT (-117 33.2) took 9.2s\nlat: -117.0, long: 33.3\n6073 at POINT (-117 33.3) took 9.6s\nlat: -117.0, long: 33.4\n6073 at POINT (-117 33.4) took 9.2s\nlat: -116.9, long: 32.6\n6073 at POINT (-116.9 32.6) took 9.3s\nlat: -116.9, long: 32.7\n6073 at POINT (-116.9 32.7) took 9.8s\nlat: -116.9, long: 32.8\n6073 at POINT (-116.9 32.8) took 9.5s\nlat: -116.9, long: 32.9\n6073 at POINT (-116.9 32.9) took 9.3s\nlat: -116.9, long: 33.0\n6073 at POINT (-116.9 33) took 9.3s\nlat: -116.9, long: 33.1\n6073 at POINT (-116.9 33.1) took 9.1s\nlat: -116.9, long: 33.2\n6073 at POINT (-116.9 33.2) took 9.1s\nlat: -116.9, long: 33.3\n6073 at POINT (-116.9 33.3) took 9.3s\nlat: -116.9, long: 33.4\n6073 at POINT (-116.9 33.4) took 9.2s\nlat: -116.8, long: 32.6\n6073 at POINT (-116.8 32.6) took 9.7s\nlat: -116.8, long: 32.7\n6073 at POINT (-116.8 32.7) took 9.1s\nlat: -116.8, long: 32.8\n6073 at POINT (-116.8 32.8) took 9.6s\nlat: -116.8, long: 32.9\n6073 at POINT (-116.8 32.9) took 69.6s\nlat: -116.8, long: 33.0\n6073 at POINT (-116.8 33) took 9.1s\nlat: -116.8, long: 33.1\n6073 at POINT (-116.8 33.1) took 9.5s\nlat: -116.8, long: 33.2\n6073 at POINT (-116.8 33.2) took 9.8s\nlat: -116.8, long: 33.3\n6073 at POINT (-116.8 33.3) took 9.5s\nlat: -116.8, long: 33.4\n6073 at POINT (-116.8 33.4) took 9.2s\nlat: -116.7, long: 32.6\n6073 at POINT (-116.7 32.6) took 9.7s\nlat: -116.7, long: 32.7\n6073 at POINT (-116.7 32.7) took 10.3s\nlat: -116.7, long: 32.8\n6073 at POINT (-116.7 32.8) took 9.5s\nlat: -116.7, long: 32.9\n6073 at POINT (-116.7 32.9) took 9.3s\nlat: -116.7, long: 33.0\n6073 at POINT (-116.7 33) took 9.1s\nlat: -116.7, long: 33.1\n6073 at POINT (-116.7 33.1) took 9.1s\nlat: -116.7, long: 33.2\n6073 at POINT (-116.7 33.2) took 9.0s\nlat: -116.7, long: 33.3\n6073 at POINT (-116.7 33.3) took 8.9s\nlat: -116.7, long: 33.4\n6073 at POINT (-116.7 33.4) took 9.4s\nlat: -116.6, long: 32.6\n6073 at POINT (-116.6 32.6) took 9.4s\nlat: -116.6, long: 32.7\n6073 at POINT (-116.6 32.7) took 9.2s\nlat: -116.6, long: 32.8\n6073 at POINT (-116.6 32.8) took 9.3s\nlat: -116.6, long: 32.9\n6073 at POINT (-116.6 32.9) took 9.2s\nlat: -116.6, long: 33.0\n6073 at POINT (-116.6 33) took 9.0s\nlat: -116.6, long: 33.1\n6073 at POINT (-116.6 33.1) took 9.0s\nlat: -116.6, long: 33.2\n6073 at POINT (-116.6 33.2) took 9.1s\nlat: -116.6, long: 33.3\n6073 at POINT (-116.6 33.3) took 10.2s\nlat: -116.6, long: 33.4\n6073 at POINT (-116.6 33.4) took 10.3s\nlat: -116.5, long: 32.6\n6073 at POINT (-116.5 32.6) took 9.5s\nlat: -116.5, long: 32.7\n6073 at POINT (-116.5 32.7) took 9.4s\nlat: -116.5, long: 32.8\n6073 at POINT (-116.5 32.8) took 9.2s\nlat: -116.5, long: 32.9\n6073 at POINT (-116.5 32.9) took 9.8s\nlat: -116.5, long: 33.1\n6073 at POINT (-116.5 33.1) took 9.8s\nlat: -116.5, long: 33.2\n6073 at POINT (-116.5 33.2) took 9.0s\nlat: -116.5, long: 33.3\n6073 at POINT (-116.5 33.3) took 9.1s\nlat: -116.5, long: 33.4\n6073 at POINT (-116.5 33.4) took 9.2s\nlat: -116.4, long: 32.6\n6073 at POINT (-116.4 32.6) took 9.7s\nlat: -116.4, long: 32.7\n6073 at POINT (-116.4 32.7) took 9.7s\nlat: -116.4, long: 32.8\n6073 at POINT (-116.4 32.8) took 9.2s\nlat: -116.4, long: 32.9\n6073 at POINT (-116.4 32.9) took 9.3s\nlat: -116.4, long: 33.0\n6073 at POINT (-116.4 33) took 9.2s\nlat: -116.4, long: 33.1\n6073 at POINT (-116.4 33.1) took 9.4s\nlat: -116.4, long: 33.2\n6073 at POINT (-116.4 33.2) took 9.0s\nlat: -116.4, long: 33.3\n6073 at POINT (-116.4 33.3) took 10.0s\nlat: -116.4, long: 33.4\n6073 at POINT (-116.4 33.4) took 9.5s\nlat: -116.3, long: 32.7\n6073 at POINT (-116.3 32.7) took 9.7s\nlat: -116.3, long: 32.8\n6073 at POINT (-116.3 32.8) took 9.9s\nlat: -116.3, long: 32.9\n6073 at POINT (-116.3 32.9) took 9.1s\nlat: -116.3, long: 33.0\n6073 at POINT (-116.3 33) took 9.1s\nlat: -116.3, long: 33.1\n6073 at POINT (-116.3 33.1) took 9.3s\nlat: -116.3, long: 33.2\n6073 at POINT (-116.3 33.2) took 9.1s\nlat: -116.3, long: 33.3\n6073 at POINT (-116.3 33.3) took 9.7s\nlat: -116.3, long: 33.4\n6073 at POINT (-116.3 33.4) took 9.4s\nlat: -116.2, long: 32.7\n6073 at POINT (-116.2 32.7) took 9.2s\nlat: -116.2, long: 32.8\n6073 at POINT (-116.2 32.8) took 9.3s\nlat: -116.2, long: 32.9\n6073 at POINT (-116.2 32.9) took 9.5s\nlat: -116.2, long: 33.0\n6073 at POINT (-116.2 33) took 9.2s\nlat: -116.2, long: 33.1\n6073 at POINT (-116.2 33.1) took 9.1s\nlat: -116.2, long: 33.2\n6073 at POINT (-116.2 33.2) took 9.4s\nlat: -116.2, long: 33.3\n6073 at POINT (-116.2 33.3) took 10.3s\nlat: -116.2, long: 33.4\n6073 at POINT (-116.2 33.4) took 9.3s\nlat: -116.1, long: 33.1\n6073 at POINT (-116.1 33.1) took 9.4s\nlat: -116.1, long: 33.2\n6073 at POINT (-116.1 33.2) took 9.2s\nlat: -116.1, long: 33.3\n6073 at POINT (-116.1 33.3) took 9.2s\nlat: -116.1, long: 33.4\n6073 at POINT (-116.1 33.4) took 9.7s\n6075 southwest to northeast: (-122.5, 37.7) to (-122.4, 37.8)\nlat: -122.4, long: 37.8\n6075 at POINT (-122.4 37.8) took 10.5s\n6077 southwest to northeast: (-121.6, 37.5) to (-120.9, 38.3)\nlat: -121.5, long: 37.6\n6077 at POINT (-121.5 37.6) took 9.4s\nlat: -121.5, long: 37.7\n6077 at POINT (-121.5 37.7) took 69.6s\nlat: -121.5, long: 37.8\n6077 at POINT (-121.5 37.8) took 9.9s\nlat: -121.5, long: 37.9\n6077 at POINT (-121.5 37.9) took 9.2s\nlat: -121.5, long: 38.1\n6077 at POINT (-121.5 38.1) took 9.2s\nlat: -121.5, long: 38.2\n6077 at POINT (-121.5 38.2) took 9.3s\nlat: -121.4, long: 37.6\n6077 at POINT (-121.4 37.6) took 9.3s\nlat: -121.4, long: 37.7\n6077 at POINT (-121.4 37.7) took 9.4s\nlat: -121.4, long: 37.8\n6077 at POINT (-121.4 37.8) took 9.6s\nlat: -121.4, long: 37.9\n6077 at POINT (-121.4 37.9) took 9.2s\nlat: -121.4, long: 38.0\n6077 at POINT (-121.4 38) took 9.3s\nlat: -121.4, long: 38.1\n6077 at POINT (-121.4 38.1) took 9.3s\nlat: -121.4, long: 38.2\n6077 at POINT (-121.4 38.2) took 9.5s\nlat: -121.3, long: 37.7\n6077 at POINT (-121.3 37.7) took 9.5s\nlat: -121.3, long: 37.8\n6077 at POINT (-121.3 37.8) took 9.2s\nlat: -121.3, long: 37.9\n6077 at POINT (-121.3 37.9) took 9.3s\nlat: -121.3, long: 38.0\n6077 at POINT (-121.3 38) took 9.4s\nlat: -121.3, long: 38.1\n6077 at POINT (-121.3 38.1) took 9.3s\nlat: -121.3, long: 38.2\n6077 at POINT (-121.3 38.2) took 9.5s\nlat: -121.2, long: 37.7\n6077 at POINT (-121.2 37.7) took 9.8s\nlat: -121.2, long: 37.8\n6077 at POINT (-121.2 37.8) took 9.4s\nlat: -121.2, long: 37.9\n6077 at POINT (-121.2 37.9) took 9.3s\nlat: -121.2, long: 38.0\n6077 at POINT (-121.2 38) took 9.3s\nlat: -121.2, long: 38.1\n6077 at POINT (-121.2 38.1) took 9.4s\nlat: -121.2, long: 38.2\n6077 at POINT (-121.2 38.2) took 10.0s\nlat: -121.1, long: 37.8\n6077 at POINT (-121.1 37.8) took 9.5s\nlat: -121.1, long: 37.9\n6077 at POINT (-121.1 37.9) took 9.8s\nlat: -121.1, long: 38.0\n6077 at POINT (-121.1 38) took 9.1s\nlat: -121.1, long: 38.1\n6077 at POINT (-121.1 38.1) took 9.5s\nlat: -121.1, long: 38.2\n6077 at POINT (-121.1 38.2) took 9.8s\nlat: -121.0, long: 37.8\n6077 at POINT (-121 37.8) took 9.3s\nlat: -121.0, long: 37.9\n6077 at POINT (-121 37.9) took 8.9s\nlat: -121.0, long: 38.1\n6077 at POINT (-121 38.1) took 9.1s\nlat: -121.0, long: 38.2\n6077 at POINT (-121 38.2) took 9.2s\n6079 southwest to northeast: (-121.3, 34.9) to (-119.5, 35.8)\nlat: -121.3, long: 35.7\n6079 at POINT (-121.3 35.7) took 9.5s\nlat: -121.2, long: 35.7\n6079 at POINT (-121.2 35.7) took 9.3s\nlat: -121.1, long: 35.6\n6079 at POINT (-121.1 35.6) took 9.1s\nlat: -121.1, long: 35.7\n6079 at POINT (-121.1 35.7) took 9.4s\nlat: -121.0, long: 35.6\n6079 at POINT (-121 35.6) took 9.0s\nlat: -121.0, long: 35.7\n6079 at POINT (-121 35.7) took 9.2s\nlat: -120.9, long: 35.5\n6079 at POINT (-120.9 35.5) took 9.1s\nlat: -120.9, long: 35.6\n6079 at POINT (-120.9 35.6) took 9.2s\nlat: -120.9, long: 35.7\n6079 at POINT (-120.9 35.7) took 9.2s\nlat: -120.8, long: 35.2\n6079 at POINT (-120.8 35.2) took 9.2s\nlat: -120.8, long: 35.3\n6079 at POINT (-120.8 35.3) took 9.5s\nlat: -120.8, long: 35.4\n6079 at POINT (-120.8 35.4) took 9.2s\nlat: -120.8, long: 35.5\n6079 at POINT (-120.8 35.5) took 9.1s\nlat: -120.8, long: 35.6\n6079 at POINT (-120.8 35.6) took 9.2s\nlat: -120.8, long: 35.7\n6079 at POINT (-120.8 35.7) took 9.1s\nlat: -120.7, long: 35.2\n6079 at POINT (-120.7 35.2) took 9.0s\nlat: -120.7, long: 35.3\n6079 at POINT (-120.7 35.3) took 9.4s\nlat: -120.7, long: 35.4\n6079 at POINT (-120.7 35.4) took 9.4s\nlat: -120.7, long: 35.5\n6079 at POINT (-120.7 35.5) took 9.2s\nlat: -120.7, long: 35.6\n6079 at POINT (-120.7 35.6) took 9.3s\nlat: -120.7, long: 35.7\n6079 at POINT (-120.7 35.7) took 9.2s\nlat: -120.6, long: 35.0\n6079 at POINT (-120.6 35) took 9.2s\nlat: -120.6, long: 35.1\n6079 at POINT (-120.6 35.1) took 9.0s\nlat: -120.6, long: 35.2\n6079 at POINT (-120.6 35.2) took 9.1s\nlat: -120.6, long: 35.3\n6079 at POINT (-120.6 35.3) took 9.3s\nlat: -120.6, long: 35.4\n6079 at POINT (-120.6 35.4) took 69.2s\nlat: -120.6, long: 35.5\n6079 at POINT (-120.6 35.5) took 9.2s\nlat: -120.6, long: 35.6\n6079 at POINT (-120.6 35.6) took 9.1s\nlat: -120.6, long: 35.7\n6079 at POINT (-120.6 35.7) took 9.5s\nlat: -120.5, long: 35.1\n6079 at POINT (-120.5 35.1) took 9.3s\nlat: -120.5, long: 35.2\n6079 at POINT (-120.5 35.2) took 9.4s\nlat: -120.5, long: 35.3\n6079 at POINT (-120.5 35.3) took 9.8s\nlat: -120.5, long: 35.4\n6079 at POINT (-120.5 35.4) took 9.2s\nlat: -120.5, long: 35.6\n6079 at POINT (-120.5 35.6) took 9.7s\nlat: -120.5, long: 35.7\n6079 at POINT (-120.5 35.7) took 9.3s\nlat: -120.4, long: 35.0\n6079 at POINT (-120.4 35) took 9.1s\nlat: -120.4, long: 35.1\n6079 at POINT (-120.4 35.1) took 9.7s\nlat: -120.4, long: 35.2\n6079 at POINT (-120.4 35.2) took 9.2s\nlat: -120.4, long: 35.3\n6079 at POINT (-120.4 35.3) took 9.7s\nlat: -120.4, long: 35.4\n6079 at POINT (-120.4 35.4) took 9.7s\nlat: -120.4, long: 35.5\n6079 at POINT (-120.4 35.5) took 10.4s\nlat: -120.4, long: 35.6\n6079 at POINT (-120.4 35.6) took 9.2s\nlat: -120.4, long: 35.7\n6079 at POINT (-120.4 35.7) took 9.9s\nlat: -120.3, long: 35.1\n6079 at POINT (-120.3 35.1) took 9.6s\nlat: -120.3, long: 35.2\n6079 at POINT (-120.3 35.2) took 9.1s\nlat: -120.3, long: 35.3\n6079 at POINT (-120.3 35.3) took 9.5s\nlat: -120.3, long: 35.4\n6079 at POINT (-120.3 35.4) took 9.4s\nlat: -120.3, long: 35.5\n6079 at POINT (-120.3 35.5) took 9.1s\nlat: -120.3, long: 35.6\n6079 at POINT (-120.3 35.6) took 9.9s\nlat: -120.3, long: 35.7\n6079 at POINT (-120.3 35.7) took 9.2s\nlat: -120.2, long: 35.1\n6079 at POINT (-120.2 35.1) took 10.4s\nlat: -120.2, long: 35.2\n6079 at POINT (-120.2 35.2) took 9.4s\nlat: -120.2, long: 35.3\n6079 at POINT (-120.2 35.3) took 9.5s\nlat: -120.2, long: 35.4\n6079 at POINT (-120.2 35.4) took 9.4s\nlat: -120.2, long: 35.5\n6079 at POINT (-120.2 35.5) took 9.2s\nlat: -120.2, long: 35.6\n6079 at POINT (-120.2 35.6) took 9.4s\nlat: -120.2, long: 35.7\n6079 at POINT (-120.2 35.7) took 9.2s\nlat: -120.1, long: 35.2\n6079 at POINT (-120.1 35.2) took 9.4s\nlat: -120.1, long: 35.3\n6079 at POINT (-120.1 35.3) took 11.5s\nlat: -120.1, long: 35.4\n6079 at POINT (-120.1 35.4) took 9.4s\nlat: -120.1, long: 35.5\n6079 at POINT (-120.1 35.5) took 9.4s\nlat: -120.1, long: 35.6\n6079 at POINT (-120.1 35.6) took 10.2s\nlat: -120.0, long: 35.1\n6079 at POINT (-120 35.1) took 9.5s\nlat: -120.0, long: 35.2\n6079 at POINT (-120 35.2) took 9.6s\nlat: -120.0, long: 35.3\n6079 at POINT (-120 35.3) took 9.1s\nlat: -120.0, long: 35.4\n6079 at POINT (-120 35.4) took 9.5s\nlat: -119.9, long: 35.1\n6079 at POINT (-119.9 35.1) took 9.3s\nlat: -119.9, long: 35.2\n6079 at POINT (-119.9 35.2) took 9.1s\nlat: -119.9, long: 35.3\n6079 at POINT (-119.9 35.3) took 9.2s\nlat: -119.9, long: 35.4\n6079 at POINT (-119.9 35.4) took 9.4s\nlat: -119.8, long: 35.0\n6079 at POINT (-119.8 35) took 9.2s\nlat: -119.8, long: 35.1\n6079 at POINT (-119.8 35.1) took 8.9s\nlat: -119.8, long: 35.2\n6079 at POINT (-119.8 35.2) took 8.9s\nlat: -119.7, long: 35.0\n6079 at POINT (-119.7 35) took 9.3s\nlat: -119.7, long: 35.1\n6079 at POINT (-119.7 35.1) took 9.8s\nlat: -119.7, long: 35.2\n6079 at POINT (-119.7 35.2) took 9.9s\nlat: -119.6, long: 35.0\n6079 at POINT (-119.6 35) took 9.8s\nlat: -119.6, long: 35.1\n6079 at POINT (-119.6 35.1) took 9.2s\nlat: -119.5, long: 34.9\n6079 at POINT (-119.5 34.9) took 9.5s\n6081 southwest to northeast: (-122.5, 37.1) to (-122.1, 37.7)\nlat: -122.5, long: 37.6\n6081 at POINT (-122.5 37.6) took 10.3s\nlat: -122.5, long: 37.7\n6081 at POINT (-122.5 37.7) took 9.7s\nlat: -122.4, long: 37.2\n6081 at POINT (-122.4 37.2) took 10.6s\nlat: -122.4, long: 37.3\n6081 at POINT (-122.4 37.3) took 9.6s\nlat: -122.4, long: 37.4\n6081 at POINT (-122.4 37.4) took 9.3s\nlat: -122.4, long: 37.5\n6081 at POINT (-122.4 37.5) took 9.5s\nlat: -122.4, long: 37.6\n6081 at POINT (-122.4 37.6) took 10.1s\nlat: -122.4, long: 37.7\n6081 at POINT (-122.4 37.7) took 9.5s\nlat: -122.3, long: 37.2\n6081 at POINT (-122.3 37.2) took 9.6s\nlat: -122.3, long: 37.3\n6081 at POINT (-122.3 37.3) took 9.6s\nlat: -122.3, long: 37.4\n6081 at POINT (-122.3 37.4) took 9.4s\nlat: -122.3, long: 37.5\n6081 at POINT (-122.3 37.5) took 9.3s\nlat: -122.2, long: 37.3\n6081 at POINT (-122.2 37.3) took 10.3s\nlat: -122.2, long: 37.4\n6081 at POINT (-122.2 37.4) took 9.6s\nlat: -122.2, long: 37.5\n6081 at POINT (-122.2 37.5) took 69.9s\n6083 southwest to northeast: (-120.7, 34.4) to (-119.4, 35.1)\nlat: -120.6, long: 34.6\n6083 at POINT (-120.6 34.6) took 9.8s\nlat: -120.6, long: 34.7\n6083 at POINT (-120.6 34.7) took 9.6s\nlat: -120.6, long: 34.8\n6083 at POINT (-120.6 34.8) took 9.3s\nlat: -120.6, long: 34.9\n6083 at POINT (-120.6 34.9) took 9.9s\nlat: -120.5, long: 34.6\n6083 at POINT (-120.5 34.6) took 9.3s\nlat: -120.5, long: 34.7\n6083 at POINT (-120.5 34.7) took 11.5s\nlat: -120.5, long: 34.8\n6083 at POINT (-120.5 34.8) took 10.1s\nlat: -120.5, long: 34.9\n6083 at POINT (-120.5 34.9) took 9.8s\nlat: -120.4, long: 34.5\n6083 at POINT (-120.4 34.5) took 9.3s\nlat: -120.4, long: 34.6\n6083 at POINT (-120.4 34.6) took 9.7s\nlat: -120.4, long: 34.7\n6083 at POINT (-120.4 34.7) took 9.4s\nlat: -120.4, long: 34.8\n6083 at POINT (-120.4 34.8) took 9.6s\nlat: -120.4, long: 34.9\n6083 at POINT (-120.4 34.9) took 9.4s\nlat: -120.3, long: 34.5\n6083 at POINT (-120.3 34.5) took 9.5s\nlat: -120.3, long: 34.6\n6083 at POINT (-120.3 34.6) took 9.6s\nlat: -120.3, long: 34.7\n6083 at POINT (-120.3 34.7) took 9.6s\nlat: -120.3, long: 34.8\n6083 at POINT (-120.3 34.8) took 10.4s\nlat: -120.3, long: 34.9\n6083 at POINT (-120.3 34.9) took 9.7s\nlat: -120.3, long: 35.0\n6083 at POINT (-120.3 35) took 9.2s\nlat: -120.2, long: 34.5\n6083 at POINT (-120.2 34.5) took 9.8s\nlat: -120.2, long: 34.6\n6083 at POINT (-120.2 34.6) took 9.5s\nlat: -120.2, long: 34.7\n6083 at POINT (-120.2 34.7) took 9.4s\nlat: -120.2, long: 34.8\n6083 at POINT (-120.2 34.8) took 9.7s\nlat: -120.2, long: 34.9\n6083 at POINT (-120.2 34.9) took 9.6s\nlat: -120.2, long: 35.0\n6083 at POINT (-120.2 35) took 10.1s\nlat: -120.1, long: 34.5\n6083 at POINT (-120.1 34.5) took 9.6s\nlat: -120.1, long: 34.6\n6083 at POINT (-120.1 34.6) took 9.6s\nlat: -120.1, long: 34.7\n6083 at POINT (-120.1 34.7) took 9.5s\nlat: -120.1, long: 34.8\n6083 at POINT (-120.1 34.8) took 9.4s\nlat: -120.1, long: 34.9\n6083 at POINT (-120.1 34.9) took 10.5s\nlat: -120.1, long: 35.0\n6083 at POINT (-120.1 35) took 9.8s\nlat: -120.1, long: 35.1\n6083 at POINT (-120.1 35.1) took 9.5s\nlat: -120.0, long: 34.6\n6083 at POINT (-120 34.6) took 9.2s\nlat: -120.0, long: 34.7\n6083 at POINT (-120 34.7) took 9.0s\nlat: -120.0, long: 34.8\n6083 at POINT (-120 34.8) took 9.1s\nlat: -120.0, long: 34.9\n6083 at POINT (-120 34.9) took 9.4s\nlat: -119.9, long: 34.5\n6083 at POINT (-119.9 34.5) took 9.6s\nlat: -119.9, long: 34.6\n6083 at POINT (-119.9 34.6) took 9.5s\nlat: -119.9, long: 34.7\n6083 at POINT (-119.9 34.7) took 9.5s\nlat: -119.9, long: 34.8\n6083 at POINT (-119.9 34.8) took 9.6s\nlat: -119.9, long: 34.9\n6083 at POINT (-119.9 34.9) took 9.5s\nlat: -119.9, long: 35.0\n6083 at POINT (-119.9 35) took 10.0s\nlat: -119.8, long: 34.5\n6083 at POINT (-119.8 34.5) took 9.4s\nlat: -119.8, long: 34.6\n6083 at POINT (-119.8 34.6) took 9.5s\nlat: -119.8, long: 34.7\n6083 at POINT (-119.8 34.7) took 9.3s\nlat: -119.8, long: 34.8\n6083 at POINT (-119.8 34.8) took 9.4s\nlat: -119.8, long: 34.9\n6083 at POINT (-119.8 34.9) took 9.6s\nlat: -119.7, long: 34.5\n6083 at POINT (-119.7 34.5) took 9.4s\nlat: -119.7, long: 34.6\n6083 at POINT (-119.7 34.6) took 9.7s\nlat: -119.7, long: 34.7\n6083 at POINT (-119.7 34.7) took 10.2s\nlat: -119.7, long: 34.8\n6083 at POINT (-119.7 34.8) took 10.1s\nlat: -119.7, long: 34.9\n6083 at POINT (-119.7 34.9) took 9.7s\nlat: -119.6, long: 34.5\n6083 at POINT (-119.6 34.5) took 9.7s\nlat: -119.6, long: 34.6\n6083 at POINT (-119.6 34.6) took 9.8s\nlat: -119.6, long: 34.7\n6083 at POINT (-119.6 34.7) took 9.4s\nlat: -119.6, long: 34.8\n6083 at POINT (-119.6 34.8) took 9.6s\nlat: -119.6, long: 34.9\n6083 at POINT (-119.6 34.9) took 10.1s\nlat: -119.5, long: 34.4\n6083 at POINT (-119.5 34.4) took 9.6s\nlat: -119.5, long: 34.6\n6083 at POINT (-119.5 34.6) took 9.2s\nlat: -119.5, long: 34.7\n6083 at POINT (-119.5 34.7) took 9.3s\nlat: -119.5, long: 34.8\n6083 at POINT (-119.5 34.8) took 9.4s\n6085 southwest to northeast: (-122.2, 36.9) to (-121.2, 37.5)\nlat: -122.1, long: 37.3\n6085 at POINT (-122.1 37.3) took 10.2s\nlat: -122.1, long: 37.4\n6085 at POINT (-122.1 37.4) took 9.4s\nlat: -122.0, long: 37.2\n6085 at POINT (-122 37.2) took 9.4s\nlat: -122.0, long: 37.3\n6085 at POINT (-122 37.3) took 9.7s\nlat: -122.0, long: 37.4\n6085 at POINT (-122 37.4) took 9.7s\nlat: -121.9, long: 37.2\n6085 at POINT (-121.9 37.2) took 9.8s\nlat: -121.9, long: 37.3\n6085 at POINT (-121.9 37.3) took 9.4s\nlat: -121.9, long: 37.4\n6085 at POINT (-121.9 37.4) took 10.0s\nlat: -121.8, long: 37.1\n6085 at POINT (-121.8 37.1) took 10.2s\nlat: -121.8, long: 37.2\n6085 at POINT (-121.8 37.2) took 9.4s\nlat: -121.8, long: 37.3\n6085 at POINT (-121.8 37.3) took 9.5s\nlat: -121.8, long: 37.4\n6085 at POINT (-121.8 37.4) took 9.6s\nlat: -121.7, long: 37.0\n6085 at POINT (-121.7 37) took 9.5s\nlat: -121.7, long: 37.1\n6085 at POINT (-121.7 37.1) took 9.7s\nlat: -121.7, long: 37.2\n6085 at POINT (-121.7 37.2) took 9.4s\nlat: -121.7, long: 37.3\n6085 at POINT (-121.7 37.3) took 9.3s\nlat: -121.7, long: 37.4\n6085 at POINT (-121.7 37.4) took 9.6s\nlat: -121.6, long: 37.0\n6085 at POINT (-121.6 37) took 9.6s\nlat: -121.6, long: 37.1\n6085 at POINT (-121.6 37.1) took 9.2s\nlat: -121.6, long: 37.2\n6085 at POINT (-121.6 37.2) took 9.9s\nlat: -121.6, long: 37.3\n6085 at POINT (-121.6 37.3) took 9.9s\nlat: -121.6, long: 37.4\n6085 at POINT (-121.6 37.4) took 9.7s\nlat: -121.5, long: 37.1\n6085 at POINT (-121.5 37.1) took 9.5s\nlat: -121.5, long: 37.2\n6085 at POINT (-121.5 37.2) took 9.3s\nlat: -121.5, long: 37.3\n6085 at POINT (-121.5 37.3) took 9.3s\nlat: -121.5, long: 37.4\n6085 at POINT (-121.5 37.4) took 10.2s\nlat: -121.4, long: 37.0\n6085 at POINT (-121.4 37) took 10.4s\nlat: -121.4, long: 37.1\n6085 at POINT (-121.4 37.1) took 10.4s\nlat: -121.3, long: 37.0\n6085 at POINT (-121.3 37) took 9.4s\nlat: -121.3, long: 37.1\n6085 at POINT (-121.3 37.1) took 9.3s\n6087 southwest to northeast: (-122.3, 36.9) to (-121.6, 37.3)\nlat: -122.2, long: 37.1\n6087 at POINT (-122.2 37.1) took 10.0s\nlat: -122.2, long: 37.2\n6087 at POINT (-122.2 37.2) took 9.3s\nlat: -122.1, long: 37.0\n6087 at POINT (-122.1 37) took 9.6s\nlat: -122.1, long: 37.1\n6087 at POINT (-122.1 37.1) took 9.0s\nlat: -122.1, long: 37.2\n6087 at POINT (-122.1 37.2) took 9.1s\nlat: -122.0, long: 37.1\n6087 at POINT (-122 37.1) took 9.4s\nlat: -121.9, long: 37.0\n6087 at POINT (-121.9 37) took 8.9s\nlat: -121.9, long: 37.1\n6087 at POINT (-121.9 37.1) took 9.3s\nlat: -121.8, long: 36.9\n6087 at POINT (-121.8 36.9) took 9.6s\nlat: -121.8, long: 37.0\n6087 at POINT (-121.8 37) took 9.4s\nlat: -121.6, long: 36.9\n6087 at POINT (-121.6 36.9) took 9.6s\n6089 southwest to northeast: (-123.1, 40.3) to (-121.3, 41.2)\nlat: -122.9, long: 40.4\n6089 at POINT (-122.9 40.4) took 10.0s\nlat: -122.8, long: 40.4\n6089 at POINT (-122.8 40.4) took 9.7s\nlat: -122.8, long: 40.5\n6089 at POINT (-122.8 40.5) took 9.5s\nlat: -122.7, long: 40.4\n6089 at POINT (-122.7 40.4) took 9.3s\nlat: -122.7, long: 40.5\n6089 at POINT (-122.7 40.5) took 9.4s\nlat: -122.7, long: 40.6\n6089 at POINT (-122.7 40.6) took 9.1s\nlat: -122.7, long: 40.7\n6089 at POINT (-122.7 40.7) took 9.3s\nlat: -122.6, long: 40.4\n6089 at POINT (-122.6 40.4) took 9.5s\nlat: -122.6, long: 40.5\n6089 at POINT (-122.6 40.5) took 9.3s\nlat: -122.6, long: 40.6\n6089 at POINT (-122.6 40.6) took 9.0s\nlat: -122.6, long: 40.7\n6089 at POINT (-122.6 40.7) took 9.5s\nlat: -122.6, long: 40.8\n6089 at POINT (-122.6 40.8) took 9.5s\nlat: -122.5, long: 40.4\n6089 at POINT (-122.5 40.4) took 9.3s\nlat: -122.5, long: 40.6\n6089 at POINT (-122.5 40.6) took 10.0s\nlat: -122.5, long: 40.7\n6089 at POINT (-122.5 40.7) took 9.4s\nlat: -122.5, long: 40.8\n6089 at POINT (-122.5 40.8) took 9.3s\nlat: -122.5, long: 40.9\n6089 at POINT (-122.5 40.9) took 9.8s\nlat: -122.4, long: 40.4\n6089 at POINT (-122.4 40.4) took 9.5s\nlat: -122.4, long: 40.5\n6089 at POINT (-122.4 40.5) took 9.5s\nlat: -122.4, long: 40.6\n6089 at POINT (-122.4 40.6) took 9.6s\nlat: -122.4, long: 40.7\n6089 at POINT (-122.4 40.7) took 9.5s\nlat: -122.4, long: 40.8\n6089 at POINT (-122.4 40.8) took 9.3s\nlat: -122.4, long: 40.9\n6089 at POINT (-122.4 40.9) took 9.3s\nlat: -122.4, long: 41.0\n6089 at POINT (-122.4 41) took 9.9s\nlat: -122.4, long: 41.1\n6089 at POINT (-122.4 41.1) took 9.5s\nlat: -122.3, long: 40.4\n6089 at POINT (-122.3 40.4) took 9.5s\nlat: -122.3, long: 40.5\n6089 at POINT (-122.3 40.5) took 9.3s\nlat: -122.3, long: 40.6\n6089 at POINT (-122.3 40.6) took 9.6s\nlat: -122.3, long: 40.7\n6089 at POINT (-122.3 40.7) took 9.3s\nlat: -122.3, long: 40.8\n6089 at POINT (-122.3 40.8) took 10.8s\nlat: -122.3, long: 40.9\n6089 at POINT (-122.3 40.9) took 9.8s\nlat: -122.3, long: 41.0\n6089 at POINT (-122.3 41) took 9.5s\nlat: -122.3, long: 41.1\n6089 at POINT (-122.3 41.1) took 10.5s\nlat: -122.2, long: 40.4\n6089 at POINT (-122.2 40.4) took 9.3s\nlat: -122.2, long: 40.5\n6089 at POINT (-122.2 40.5) took 9.5s\nlat: -122.2, long: 40.6\n6089 at POINT (-122.2 40.6) took 9.7s\nlat: -122.2, long: 40.7\n6089 at POINT (-122.2 40.7) took 9.2s\nlat: -122.2, long: 40.8\n6089 at POINT (-122.2 40.8) took 70.0s\nlat: -122.2, long: 40.9\n6089 at POINT (-122.2 40.9) took 9.3s\nlat: -122.2, long: 41.0\n6089 at POINT (-122.2 41) took 9.3s\nlat: -122.2, long: 41.1\n6089 at POINT (-122.2 41.1) took 9.2s\nlat: -122.1, long: 40.5\n6089 at POINT (-122.1 40.5) took 9.4s\nlat: -122.1, long: 40.6\n6089 at POINT (-122.1 40.6) took 9.3s\nlat: -122.1, long: 40.7\n6089 at POINT (-122.1 40.7) took 9.0s\nlat: -122.1, long: 40.8\n6089 at POINT (-122.1 40.8) took 9.3s\nlat: -122.1, long: 40.9\n6089 at POINT (-122.1 40.9) took 9.5s\nlat: -122.1, long: 41.0\n6089 at POINT (-122.1 41) took 9.1s\nlat: -122.1, long: 41.1\n6089 at POINT (-122.1 41.1) took 9.3s\nlat: -122.0, long: 40.6\n6089 at POINT (-122 40.6) took 9.9s\nlat: -122.0, long: 40.7\n6089 at POINT (-122 40.7) took 9.1s\nlat: -122.0, long: 40.8\n6089 at POINT (-122 40.8) took 8.9s\nlat: -122.0, long: 40.9\n6089 at POINT (-122 40.9) took 8.9s\nlat: -122.0, long: 41.1\n6089 at POINT (-122 41.1) took 9.1s\nlat: -121.9, long: 40.5\n6089 at POINT (-121.9 40.5) took 9.7s\nlat: -121.9, long: 40.6\n6089 at POINT (-121.9 40.6) took 9.8s\nlat: -121.9, long: 40.7\n6089 at POINT (-121.9 40.7) took 69.7s\nlat: -121.9, long: 40.8\n6089 at POINT (-121.9 40.8) took 69.2s\nlat: -121.9, long: 40.9\n6089 at POINT (-121.9 40.9) took 9.1s\nlat: -121.9, long: 41.0\n6089 at POINT (-121.9 41) took 9.1s\nlat: -121.9, long: 41.1\n6089 at POINT (-121.9 41.1) took 9.2s\nlat: -121.8, long: 40.5\n6089 at POINT (-121.8 40.5) took 9.1s\nlat: -121.8, long: 40.6\n6089 at POINT (-121.8 40.6) took 9.0s\nlat: -121.8, long: 40.7\n6089 at POINT (-121.8 40.7) took 9.1s\nlat: -121.8, long: 40.8\n6089 at POINT (-121.8 40.8) took 9.1s\nlat: -121.8, long: 40.9\n6089 at POINT (-121.8 40.9) took 9.5s\nlat: -121.8, long: 41.0\n6089 at POINT (-121.8 41) took 9.1s\nlat: -121.8, long: 41.1\n6089 at POINT (-121.8 41.1) took 9.2s\nlat: -121.7, long: 40.5\n6089 at POINT (-121.7 40.5) took 9.7s\nlat: -121.7, long: 40.6\n6089 at POINT (-121.7 40.6) took 9.4s\nlat: -121.7, long: 40.7\n6089 at POINT (-121.7 40.7) took 9.3s\nlat: -121.7, long: 40.8\n6089 at POINT (-121.7 40.8) took 9.3s\nlat: -121.7, long: 40.9\n6089 at POINT (-121.7 40.9) took 9.1s\nlat: -121.7, long: 41.0\n6089 at POINT (-121.7 41) took 9.1s\nlat: -121.7, long: 41.1\n6089 at POINT (-121.7 41.1) took 9.3s\nlat: -121.6, long: 40.5\n6089 at POINT (-121.6 40.5) took 9.4s\nlat: -121.6, long: 40.6\n6089 at POINT (-121.6 40.6) took 9.4s\nlat: -121.6, long: 40.7\n6089 at POINT (-121.6 40.7) took 9.0s\nlat: -121.6, long: 40.8\n6089 at POINT (-121.6 40.8) took 9.2s\nlat: -121.6, long: 40.9\n6089 at POINT (-121.6 40.9) took 9.2s\nlat: -121.6, long: 41.0\n6089 at POINT (-121.6 41) took 9.1s\nlat: -121.6, long: 41.1\n6089 at POINT (-121.6 41.1) took 9.2s\nlat: -121.5, long: 40.6\n6089 at POINT (-121.5 40.6) took 9.3s\nlat: -121.5, long: 40.7\n6089 at POINT (-121.5 40.7) took 9.0s\nlat: -121.5, long: 40.8\n6089 at POINT (-121.5 40.8) took 69.4s\nlat: -121.5, long: 40.9\n6089 at POINT (-121.5 40.9) took 9.1s\nlat: -121.5, long: 41.1\n6089 at POINT (-121.5 41.1) took 9.2s\nlat: -121.4, long: 40.5\n6089 at POINT (-121.4 40.5) took 9.9s\nlat: -121.4, long: 40.6\n6089 at POINT (-121.4 40.6) took 9.6s\nlat: -121.4, long: 40.7\n6089 at POINT (-121.4 40.7) took 10.2s\nlat: -121.4, long: 40.8\n6089 at POINT (-121.4 40.8) took 11.2s\nlat: -121.4, long: 40.9\n6089 at POINT (-121.4 40.9) took 10.0s\nlat: -121.4, long: 41.0\n6089 at POINT (-121.4 41) took 9.1s\nlat: -121.4, long: 41.1\n6089 at POINT (-121.4 41.1) took 9.3s\n6091 southwest to northeast: (-121.1, 39.4) to (-120.0, 39.8)\nlat: -121.0, long: 39.6\n6091 at POINT (-121 39.6) took 9.1s\nlat: -120.9, long: 39.5\n6091 at POINT (-120.9 39.5) took 9.7s\nlat: -120.9, long: 39.6\n6091 at POINT (-120.9 39.6) took 8.9s\nlat: -120.9, long: 39.7\n6091 at POINT (-120.9 39.7) took 9.1s\nlat: -120.8, long: 39.5\n6091 at POINT (-120.8 39.5) took 9.3s\nlat: -120.8, long: 39.6\n6091 at POINT (-120.8 39.6) took 9.1s\nlat: -120.8, long: 39.7\n6091 at POINT (-120.8 39.7) took 9.2s\nlat: -120.7, long: 39.6\n6091 at POINT (-120.7 39.6) took 9.2s\nlat: -120.6, long: 39.6\n6091 at POINT (-120.6 39.6) took 9.4s\nlat: -120.6, long: 39.7\n6091 at POINT (-120.6 39.7) took 9.3s\nlat: -120.5, long: 39.6\n6091 at POINT (-120.5 39.6) took 9.3s\nlat: -120.5, long: 39.7\n6091 at POINT (-120.5 39.7) took 9.2s\nlat: -120.4, long: 39.5\n6091 at POINT (-120.4 39.5) took 9.1s\nlat: -120.4, long: 39.6\n6091 at POINT (-120.4 39.6) took 9.2s\nlat: -120.4, long: 39.7\n6091 at POINT (-120.4 39.7) took 9.4s\nlat: -120.3, long: 39.5\n6091 at POINT (-120.3 39.5) took 9.3s\nlat: -120.3, long: 39.6\n6091 at POINT (-120.3 39.6) took 9.6s\nlat: -120.3, long: 39.7\n6091 at POINT (-120.3 39.7) took 9.7s\nlat: -120.2, long: 39.5\n6091 at POINT (-120.2 39.5) took 9.4s\nlat: -120.2, long: 39.6\n6091 at POINT (-120.2 39.6) took 9.2s\nlat: -120.2, long: 39.7\n6091 at POINT (-120.2 39.7) took 9.5s\nlat: -120.1, long: 39.5\n6091 at POINT (-120.1 39.5) took 9.2s\nlat: -120.1, long: 39.6\n6091 at POINT (-120.1 39.6) took 9.6s\nlat: -120.1, long: 39.7\n6091 at POINT (-120.1 39.7) took 9.3s\n6093 southwest to northeast: (-123.7, 41.0) to (-121.4, 42.0)\nlat: -123.7, long: 41.6\n6093 at POINT (-123.7 41.6) took 10.0s\nlat: -123.6, long: 41.4\n6093 at POINT (-123.6 41.4) took 9.5s\nlat: -123.6, long: 41.5\n6093 at POINT (-123.6 41.5) took 9.4s\nlat: -123.6, long: 41.6\n6093 at POINT (-123.6 41.6) took 9.3s\nlat: -123.6, long: 41.7\n6093 at POINT (-123.6 41.7) took 9.2s\nlat: -123.6, long: 41.8\n6093 at POINT (-123.6 41.8) took 9.1s\nlat: -123.5, long: 41.4\n6093 at POINT (-123.5 41.4) took 9.2s\nlat: -123.5, long: 41.6\n6093 at POINT (-123.5 41.6) took 9.0s\nlat: -123.5, long: 41.7\n6093 at POINT (-123.5 41.7) took 9.3s\nlat: -123.5, long: 41.8\n6093 at POINT (-123.5 41.8) took 9.0s\nlat: -123.5, long: 41.9\n6093 at POINT (-123.5 41.9) took 9.2s\nlat: -123.4, long: 41.2\n6093 at POINT (-123.4 41.2) took 10.2s\nlat: -123.4, long: 41.3\n6093 at POINT (-123.4 41.3) took 9.3s\nlat: -123.4, long: 41.4\n6093 at POINT (-123.4 41.4) took 11.1s\nlat: -123.4, long: 41.5\n6093 at POINT (-123.4 41.5) took 9.6s\nlat: -123.4, long: 41.6\n6093 at POINT (-123.4 41.6) took 9.2s\nlat: -123.4, long: 41.7\n6093 at POINT (-123.4 41.7) took 9.7s\nlat: -123.4, long: 41.8\n6093 at POINT (-123.4 41.8) took 9.3s\nlat: -123.4, long: 41.9\n6093 at POINT (-123.4 41.9) took 9.1s\nlat: -123.3, long: 41.2\n6093 at POINT (-123.3 41.2) took 9.4s\nlat: -123.3, long: 41.3\n6093 at POINT (-123.3 41.3) took 9.3s\nlat: -123.3, long: 41.4\n6093 at POINT (-123.3 41.4) took 9.2s\nlat: -123.3, long: 41.5\n6093 at POINT (-123.3 41.5) took 9.5s\nlat: -123.3, long: 41.6\n6093 at POINT (-123.3 41.6) took 9.5s\nlat: -123.3, long: 41.7\n6093 at POINT (-123.3 41.7) took 9.3s\nlat: -123.3, long: 41.8\n6093 at POINT (-123.3 41.8) took 9.4s\nlat: -123.3, long: 41.9\n6093 at POINT (-123.3 41.9) took 9.5s\nlat: -123.3, long: 42.0\n6093 at POINT (-123.3 42) took 9.5s\nlat: -123.2, long: 41.1\n6093 at POINT (-123.2 41.1) took 9.3s\nlat: -123.2, long: 41.2\n6093 at POINT (-123.2 41.2) took 9.4s\nlat: -123.2, long: 41.3\n6093 at POINT (-123.2 41.3) took 9.3s\nlat: -123.2, long: 41.4\n6093 at POINT (-123.2 41.4) took 9.4s\nlat: -123.2, long: 41.5\n6093 at POINT (-123.2 41.5) took 9.3s\nlat: -123.2, long: 41.6\n6093 at POINT (-123.2 41.6) took 9.4s\nlat: -123.2, long: 41.7\n6093 at POINT (-123.2 41.7) took 9.4s\nlat: -123.2, long: 41.8\n6093 at POINT (-123.2 41.8) took 9.7s\nlat: -123.2, long: 41.9\n6093 at POINT (-123.2 41.9) took 9.3s\nlat: -123.2, long: 42.0\n6093 at POINT (-123.2 42) took 9.3s\nlat: -123.1, long: 41.1\n6093 at POINT (-123.1 41.1) took 10.2s\nlat: -123.1, long: 41.2\n6093 at POINT (-123.1 41.2) took 9.3s\nlat: -123.1, long: 41.3\n6093 at POINT (-123.1 41.3) took 9.6s\nlat: -123.1, long: 41.4\n6093 at POINT (-123.1 41.4) took 9.1s\nlat: -123.1, long: 41.5\n6093 at POINT (-123.1 41.5) took 9.2s\nlat: -123.1, long: 41.6\n6093 at POINT (-123.1 41.6) took 10.0s\nlat: -123.1, long: 41.7\n6093 at POINT (-123.1 41.7) took 9.5s\nlat: -123.1, long: 41.8\n6093 at POINT (-123.1 41.8) took 9.3s\nlat: -123.1, long: 41.9\n6093 at POINT (-123.1 41.9) took 9.2s\nlat: -123.1, long: 42.0\n6093 at POINT (-123.1 42) took 9.1s\nlat: -123.0, long: 41.1\n6093 at POINT (-123 41.1) took 9.3s\nlat: -123.0, long: 41.2\n6093 at POINT (-123 41.2) took 9.3s\nlat: -123.0, long: 41.3\n6093 at POINT (-123 41.3) took 9.1s\nlat: -123.0, long: 41.4\n6093 at POINT (-123 41.4) took 9.2s\nlat: -123.0, long: 41.6\n6093 at POINT (-123 41.6) took 9.1s\nlat: -123.0, long: 41.7\n6093 at POINT (-123 41.7) took 9.4s\nlat: -123.0, long: 41.8\n6093 at POINT (-123 41.8) took 11.7s\nlat: -123.0, long: 41.9\n6093 at POINT (-123 41.9) took 9.2s\nlat: -122.9, long: 41.2\n6093 at POINT (-122.9 41.2) took 10.3s\nlat: -122.9, long: 41.3\n6093 at POINT (-122.9 41.3) took 9.2s\nlat: -122.9, long: 41.4\n6093 at POINT (-122.9 41.4) took 9.2s\nlat: -122.9, long: 41.5\n6093 at POINT (-122.9 41.5) took 9.4s\nlat: -122.9, long: 41.6\n6093 at POINT (-122.9 41.6) took 9.4s\nlat: -122.9, long: 41.7\n6093 at POINT (-122.9 41.7) took 9.5s\nlat: -122.9, long: 41.8\n6093 at POINT (-122.9 41.8) took 9.0s\nlat: -122.9, long: 41.9\n6093 at POINT (-122.9 41.9) took 8.9s\nlat: -122.9, long: 42.0\n6093 at POINT (-122.9 42) took 9.2s\nlat: -122.8, long: 41.3\n6093 at POINT (-122.8 41.3) took 9.7s\nlat: -122.8, long: 41.4\n6093 at POINT (-122.8 41.4) took 9.2s\nlat: -122.8, long: 41.5\n6093 at POINT (-122.8 41.5) took 9.6s\nlat: -122.8, long: 41.6\n6093 at POINT (-122.8 41.6) took 9.4s\nlat: -122.8, long: 41.7\n6093 at POINT (-122.8 41.7) took 9.3s\nlat: -122.8, long: 41.8\n6093 at POINT (-122.8 41.8) took 9.3s\nlat: -122.8, long: 41.9\n6093 at POINT (-122.8 41.9) took 9.3s\nlat: -122.8, long: 42.0\n6093 at POINT (-122.8 42) took 9.3s\nlat: -122.7, long: 41.3\n6093 at POINT (-122.7 41.3) took 9.4s\nlat: -122.7, long: 41.4\n6093 at POINT (-122.7 41.4) took 9.3s\nlat: -122.7, long: 41.5\n6093 at POINT (-122.7 41.5) took 9.3s\nlat: -122.7, long: 41.6\n6093 at POINT (-122.7 41.6) took 9.2s\nlat: -122.7, long: 41.7\n6093 at POINT (-122.7 41.7) took 9.3s\nlat: -122.7, long: 41.8\n6093 at POINT (-122.7 41.8) took 9.3s\nlat: -122.7, long: 41.9\n6093 at POINT (-122.7 41.9) took 9.4s\nlat: -122.7, long: 42.0\n6093 at POINT (-122.7 42) took 9.3s\nlat: -122.6, long: 41.4\n6093 at POINT (-122.6 41.4) took 9.4s\nlat: -122.6, long: 41.5\n6093 at POINT (-122.6 41.5) took 9.3s\nlat: -122.6, long: 41.6\n6093 at POINT (-122.6 41.6) took 9.2s\nlat: -122.6, long: 41.7\n6093 at POINT (-122.6 41.7) took 9.0s\nlat: -122.6, long: 41.8\n6093 at POINT (-122.6 41.8) took 9.2s\nlat: -122.6, long: 41.9\n6093 at POINT (-122.6 41.9) took 9.2s\nlat: -122.6, long: 42.0\n6093 at POINT (-122.6 42) took 9.1s\nlat: -122.5, long: 41.2\n6093 at POINT (-122.5 41.2) took 9.2s\nlat: -122.5, long: 41.4\n6093 at POINT (-122.5 41.4) took 9.1s\nlat: -122.5, long: 41.6\n6093 at POINT (-122.5 41.6) took 9.8s\nlat: -122.5, long: 41.7\n6093 at POINT (-122.5 41.7) took 9.1s\nlat: -122.5, long: 41.8\n6093 at POINT (-122.5 41.8) took 9.1s\nlat: -122.5, long: 41.9\n6093 at POINT (-122.5 41.9) took 9.7s\nlat: -122.4, long: 41.2\n6093 at POINT (-122.4 41.2) took 9.5s\nlat: -122.4, long: 41.3\n6093 at POINT (-122.4 41.3) took 9.9s\nlat: -122.4, long: 41.4\n6093 at POINT (-122.4 41.4) took 9.3s\nlat: -122.4, long: 41.5\n6093 at POINT (-122.4 41.5) took 9.1s\nlat: -122.4, long: 41.6\n6093 at POINT (-122.4 41.6) took 9.1s\nlat: -122.4, long: 41.7\n6093 at POINT (-122.4 41.7) took 9.1s\nlat: -122.4, long: 41.8\n6093 at POINT (-122.4 41.8) took 11.5s\nlat: -122.4, long: 41.9\n6093 at POINT (-122.4 41.9) took 9.3s\nlat: -122.4, long: 42.0\n6093 at POINT (-122.4 42) took 9.1s\nlat: -122.3, long: 41.2\n6093 at POINT (-122.3 41.2) took 9.9s\nlat: -122.3, long: 41.3\n6093 at POINT (-122.3 41.3) took 9.1s\nlat: -122.3, long: 41.4\n6093 at POINT (-122.3 41.4) took 9.0s\nlat: -122.3, long: 41.5\n6093 at POINT (-122.3 41.5) took 9.1s\nlat: -122.3, long: 41.6\n6093 at POINT (-122.3 41.6) took 9.1s\nlat: -122.3, long: 41.7\n6093 at POINT (-122.3 41.7) took 9.2s\nlat: -122.3, long: 41.8\n6093 at POINT (-122.3 41.8) took 9.3s\nlat: -122.3, long: 41.9\n6093 at POINT (-122.3 41.9) took 9.4s\nlat: -122.3, long: 42.0\n6093 at POINT (-122.3 42) took 9.1s\nlat: -122.2, long: 41.2\n6093 at POINT (-122.2 41.2) took 9.4s\nlat: -122.2, long: 41.3\n6093 at POINT (-122.2 41.3) took 9.1s\nlat: -122.2, long: 41.4\n6093 at POINT (-122.2 41.4) took 9.1s\nlat: -122.2, long: 41.5\n6093 at POINT (-122.2 41.5) took 9.4s\nlat: -122.2, long: 41.6\n6093 at POINT (-122.2 41.6) took 10.4s\nlat: -122.2, long: 41.7\n6093 at POINT (-122.2 41.7) took 9.2s\nlat: -122.2, long: 41.8\n6093 at POINT (-122.2 41.8) took 9.5s\nlat: -122.2, long: 41.9\n6093 at POINT (-122.2 41.9) took 9.6s\nlat: -122.2, long: 42.0\n6093 at POINT (-122.2 42) took 10.4s\nlat: -122.1, long: 41.2\n6093 at POINT (-122.1 41.2) took 10.0s\nlat: -122.1, long: 41.3\n6093 at POINT (-122.1 41.3) took 9.1s\nlat: -122.1, long: 41.4\n6093 at POINT (-122.1 41.4) took 9.4s\nlat: -122.1, long: 41.5\n6093 at POINT (-122.1 41.5) took 9.4s\nlat: -122.1, long: 41.6\n6093 at POINT (-122.1 41.6) took 9.4s\nlat: -122.1, long: 41.7\n6093 at POINT (-122.1 41.7) took 9.3s\nlat: -122.1, long: 41.8\n6093 at POINT (-122.1 41.8) took 9.1s\nlat: -122.1, long: 41.9\n6093 at POINT (-122.1 41.9) took 70.6s\nlat: -122.1, long: 42.0\n6093 at POINT (-122.1 42) took 9.2s\nlat: -122.0, long: 41.2\n6093 at POINT (-122 41.2) took 9.1s\nlat: -122.0, long: 41.3\n6093 at POINT (-122 41.3) took 9.2s\nlat: -122.0, long: 41.4\n6093 at POINT (-122 41.4) took 9.4s\nlat: -122.0, long: 41.6\n6093 at POINT (-122 41.6) took 9.3s\nlat: -122.0, long: 41.7\n6093 at POINT (-122 41.7) took 9.7s\nlat: -122.0, long: 41.8\n6093 at POINT (-122 41.8) took 9.7s\nlat: -122.0, long: 41.9\n6093 at POINT (-122 41.9) took 9.6s\nlat: -121.9, long: 41.2\n6093 at POINT (-121.9 41.2) took 9.5s\nlat: -121.9, long: 41.3\n6093 at POINT (-121.9 41.3) took 9.5s\nlat: -121.9, long: 41.4\n6093 at POINT (-121.9 41.4) took 9.2s\nlat: -121.9, long: 41.5\n6093 at POINT (-121.9 41.5) took 9.3s\nlat: -121.9, long: 41.6\n6093 at POINT (-121.9 41.6) took 9.6s\nlat: -121.9, long: 41.7\n6093 at POINT (-121.9 41.7) took 9.3s\nlat: -121.9, long: 41.8\n6093 at POINT (-121.9 41.8) took 9.2s\nlat: -121.9, long: 41.9\n6093 at POINT (-121.9 41.9) took 9.4s\nlat: -121.9, long: 42.0\n6093 at POINT (-121.9 42) took 9.3s\nlat: -121.8, long: 41.2\n6093 at POINT (-121.8 41.2) took 10.0s\nlat: -121.8, long: 41.3\n6093 at POINT (-121.8 41.3) took 9.9s\nlat: -121.8, long: 41.4\n6093 at POINT (-121.8 41.4) took 9.2s\nlat: -121.8, long: 41.5\n6093 at POINT (-121.8 41.5) took 70.6s\nlat: -121.8, long: 41.6\n6093 at POINT (-121.8 41.6) took 11.5s\nlat: -121.8, long: 41.7\n6093 at POINT (-121.8 41.7) took 11.4s\nlat: -121.8, long: 41.8\n6093 at POINT (-121.8 41.8) took 9.3s\nlat: -121.8, long: 41.9\n6093 at POINT (-121.8 41.9) took 9.6s\nlat: -121.8, long: 42.0\n6093 at POINT (-121.8 42) took 13.1s\nlat: -121.7, long: 41.2\n6093 at POINT (-121.7 41.2) took 9.3s\nlat: -121.7, long: 41.3\n6093 at POINT (-121.7 41.3) took 10.5s\nlat: -121.7, long: 41.4\n6093 at POINT (-121.7 41.4) took 9.2s\nlat: -121.7, long: 41.5\n6093 at POINT (-121.7 41.5) took 10.4s\nlat: -121.7, long: 41.6\n6093 at POINT (-121.7 41.6) took 9.9s\nlat: -121.7, long: 41.7\n6093 at POINT (-121.7 41.7) took 9.5s\nlat: -121.7, long: 41.8\n6093 at POINT (-121.7 41.8) took 9.4s\nlat: -121.7, long: 41.9\n6093 at POINT (-121.7 41.9) took 9.9s\nlat: -121.7, long: 42.0\n6093 at POINT (-121.7 42) took 9.6s\nlat: -121.6, long: 41.2\n6093 at POINT (-121.6 41.2) took 9.6s\nlat: -121.6, long: 41.3\n6093 at POINT (-121.6 41.3) took 10.4s\nlat: -121.6, long: 41.4\n6093 at POINT (-121.6 41.4) took 11.0s\nlat: -121.6, long: 41.5\n6093 at POINT (-121.6 41.5) took 12.1s\nlat: -121.6, long: 41.6\n6093 at POINT (-121.6 41.6) took 13.8s\nlat: -121.6, long: 41.7\n6093 at POINT (-121.6 41.7) took 11.6s\nlat: -121.6, long: 41.8\n6093 at POINT (-121.6 41.8) took 9.9s\nlat: -121.6, long: 41.9\n6093 at POINT (-121.6 41.9) took 9.3s\nlat: -121.5, long: 41.2\n6093 at POINT (-121.5 41.2) took 9.5s\nlat: -121.5, long: 41.3\n6093 at POINT (-121.5 41.3) took 10.0s\nlat: -121.5, long: 41.4\n6093 at POINT (-121.5 41.4) took 11.0s\nlat: -121.5, long: 41.6\n6093 at POINT (-121.5 41.6) took 9.8s\nlat: -121.5, long: 41.7\n6093 at POINT (-121.5 41.7) took 69.9s\nlat: -121.5, long: 41.8\n6093 at POINT (-121.5 41.8) took 9.4s\nlat: -121.5, long: 41.9\n6093 at POINT (-121.5 41.9) took 11.3s\n6095 southwest to northeast: (-122.4, 38.0) to (-121.6, 38.5)\nlat: -122.2, long: 38.1\n6095 at POINT (-122.2 38.1) took 9.5s\nlat: -122.2, long: 38.2\n6095 at POINT (-122.2 38.2) took 9.5s\nlat: -122.1, long: 38.1\n6095 at POINT (-122.1 38.1) took 9.7s\nlat: -122.1, long: 38.2\n6095 at POINT (-122.1 38.2) took 10.1s\nlat: -122.1, long: 38.3\n6095 at POINT (-122.1 38.3) took 11.1s\nlat: -122.1, long: 38.4\n6095 at POINT (-122.1 38.4) took 12.8s\nlat: -122.1, long: 38.5\n6095 at POINT (-122.1 38.5) took 10.8s\nlat: -122.0, long: 38.1\n6095 at POINT (-122 38.1) took 10.9s\nlat: -122.0, long: 38.2\n6095 at POINT (-122 38.2) took 12.2s\nlat: -122.0, long: 38.3\n6095 at POINT (-122 38.3) took 9.3s\nlat: -122.0, long: 38.4\n6095 at POINT (-122 38.4) took 9.8s\nlat: -121.9, long: 38.1\n6095 at POINT (-121.9 38.1) took 9.4s\nlat: -121.9, long: 38.2\n6095 at POINT (-121.9 38.2) took 10.5s\nlat: -121.9, long: 38.3\n6095 at POINT (-121.9 38.3) took 10.3s\nlat: -121.9, long: 38.4\n6095 at POINT (-121.9 38.4) took 11.8s\nlat: -121.9, long: 38.5\n6095 at POINT (-121.9 38.5) took 13.3s\nlat: -121.8, long: 38.1\n6095 at POINT (-121.8 38.1) took 10.9s\nlat: -121.8, long: 38.2\n6095 at POINT (-121.8 38.2) took 11.3s\nlat: -121.8, long: 38.3\n6095 at POINT (-121.8 38.3) took 9.9s\nlat: -121.8, long: 38.4\n6095 at POINT (-121.8 38.4) took 11.8s\nlat: -121.8, long: 38.5\n6095 at POINT (-121.8 38.5) took 15.1s\nlat: -121.7, long: 38.2\n6095 at POINT (-121.7 38.2) took 9.3s\nlat: -121.7, long: 38.3\n6095 at POINT (-121.7 38.3) took 10.6s\nlat: -121.7, long: 38.4\n6095 at POINT (-121.7 38.4) took 9.7s\nlat: -121.7, long: 38.5\n6095 at POINT (-121.7 38.5) took 9.4s\n6097 southwest to northeast: (-123.5, 38.1) to (-122.3, 38.9)\nlat: -123.4, long: 38.7\n6097 at POINT (-123.4 38.7) took 9.8s\nlat: -123.3, long: 38.6\n6097 at POINT (-123.3 38.6) took 10.7s\nlat: -123.3, long: 38.7\n6097 at POINT (-123.3 38.7) took 9.3s\nlat: -123.3, long: 38.8\n6097 at POINT (-123.3 38.8) took 9.6s\nlat: -123.2, long: 38.5\n6097 at POINT (-123.2 38.5) took 10.8s\nlat: -123.2, long: 38.6\n6097 at POINT (-123.2 38.6) took 10.6s\nlat: -123.2, long: 38.7\n6097 at POINT (-123.2 38.7) took 9.8s\nlat: -123.2, long: 38.8\n6097 at POINT (-123.2 38.8) took 9.5s\nlat: -123.1, long: 38.5\n6097 at POINT (-123.1 38.5) took 69.3s\nlat: -123.1, long: 38.6\n6097 at POINT (-123.1 38.6) took 9.5s\nlat: -123.1, long: 38.7\n6097 at POINT (-123.1 38.7) took 12.6s\nlat: -123.1, long: 38.8\n6097 at POINT (-123.1 38.8) took 73.6s\nlat: -123.0, long: 38.3\n6097 at POINT (-123 38.3) took 10.5s\nlat: -123.0, long: 38.4\n6097 at POINT (-123 38.4) took 10.5s\nlat: -123.0, long: 38.6\n6097 at POINT (-123 38.6) took 9.7s\nlat: -123.0, long: 38.7\n6097 at POINT (-123 38.7) took 10.6s\nlat: -123.0, long: 38.8\n6097 at POINT (-123 38.8) took 12.4s\nlat: -122.9, long: 38.4\n6097 at POINT (-122.9 38.4) took 9.5s\nlat: -122.9, long: 38.5\n6097 at POINT (-122.9 38.5) took 10.6s\nlat: -122.9, long: 38.6\n6097 at POINT (-122.9 38.6) took 13.6s\nlat: -122.9, long: 38.7\n6097 at POINT (-122.9 38.7) took 9.8s\nlat: -122.9, long: 38.8\n6097 at POINT (-122.9 38.8) took 9.7s\nlat: -122.8, long: 38.3\n6097 at POINT (-122.8 38.3) took 10.4s\nlat: -122.8, long: 38.4\n6097 at POINT (-122.8 38.4) took 11.3s\nlat: -122.8, long: 38.5\n6097 at POINT (-122.8 38.5) took 13.3s\nlat: -122.8, long: 38.6\n6097 at POINT (-122.8 38.6) took 10.5s\nlat: -122.8, long: 38.7\n6097 at POINT (-122.8 38.7) took 11.8s\nlat: -122.8, long: 38.8\n6097 at POINT (-122.8 38.8) took 10.5s\nlat: -122.7, long: 38.2\n6097 at POINT (-122.7 38.2) took 10.4s\nlat: -122.7, long: 38.3\n6097 at POINT (-122.7 38.3) took 11.2s\nlat: -122.7, long: 38.4\n6097 at POINT (-122.7 38.4) took 10.6s\nlat: -122.7, long: 38.5\n6097 at POINT (-122.7 38.5) took 9.5s\nlat: -122.7, long: 38.6\n6097 at POINT (-122.7 38.6) took 11.9s\nlat: -122.7, long: 38.7\n6097 at POINT (-122.7 38.7) took 11.4s\nlat: -122.6, long: 38.2\n6097 at POINT (-122.6 38.2) took 72.5s\nlat: -122.6, long: 38.3\n6097 at POINT (-122.6 38.3) took 10.9s\nlat: -122.6, long: 38.4\n6097 at POINT (-122.6 38.4) took 10.2s\nlat: -122.6, long: 38.5\n6097 at POINT (-122.6 38.5) took 12.4s\nlat: -122.5, long: 38.2\n6097 at POINT (-122.5 38.2) took 11.1s\nlat: -122.5, long: 38.3\n6097 at POINT (-122.5 38.3) took 11.9s\nlat: -122.5, long: 38.4\n6097 at POINT (-122.5 38.4) took 10.3s\nlat: -122.4, long: 38.2\n6097 at POINT (-122.4 38.2) took 12.4s\n6099 southwest to northeast: (-121.5, 37.1) to (-120.4, 38.1)\nlat: -121.4, long: 37.2\n6099 at POINT (-121.4 37.2) took 14.6s\nlat: -121.4, long: 37.3\n6099 at POINT (-121.4 37.3) took 10.8s\nlat: -121.4, long: 37.4\n6099 at POINT (-121.4 37.4) took 16.4s\nlat: -121.4, long: 37.5\n6099 at POINT (-121.4 37.5) took 12.1s\nlat: -121.3, long: 37.2\n6099 at POINT (-121.3 37.2) took 12.8s\nlat: -121.3, long: 37.3\n6099 at POINT (-121.3 37.3) took 10.8s\nlat: -121.3, long: 37.4\n6099 at POINT (-121.3 37.4) took 13.6s\nlat: -121.3, long: 37.5\n6099 at POINT (-121.3 37.5) took 11.8s\nlat: -121.3, long: 37.6\n6099 at POINT (-121.3 37.6) took 14.2s\nlat: -121.2, long: 37.2\n6099 at POINT (-121.2 37.2) took 73.4s\nlat: -121.2, long: 37.3\n6099 at POINT (-121.2 37.3) took 11.0s\nlat: -121.2, long: 37.4\n6099 at POINT (-121.2 37.4) took 10.5s\nlat: -121.2, long: 37.5\n6099 at POINT (-121.2 37.5) took 9.2s\nlat: -121.2, long: 37.6\n6099 at POINT (-121.2 37.6) took 9.1s\nlat: -121.1, long: 37.3\n6099 at POINT (-121.1 37.3) took 10.9s\nlat: -121.1, long: 37.4\n6099 at POINT (-121.1 37.4) took 9.8s\nlat: -121.1, long: 37.5\n6099 at POINT (-121.1 37.5) took 11.3s\nlat: -121.1, long: 37.6\n6099 at POINT (-121.1 37.6) took 9.4s\nlat: -121.1, long: 37.7\n6099 at POINT (-121.1 37.7) took 9.4s\nlat: -121.0, long: 37.4\n6099 at POINT (-121 37.4) took 12.3s\nlat: -121.0, long: 37.6\n6099 at POINT (-121 37.6) took 11.5s\nlat: -121.0, long: 37.7\n6099 at POINT (-121 37.7) took 12.1s\nlat: -120.9, long: 37.5\n6099 at POINT (-120.9 37.5) took 11.1s\nlat: -120.9, long: 37.6\n6099 at POINT (-120.9 37.6) took 15.5s\nlat: -120.9, long: 37.7\n6099 at POINT (-120.9 37.7) took 9.5s\nlat: -120.9, long: 37.8\n6099 at POINT (-120.9 37.8) took 11.1s\nlat: -120.9, long: 37.9\n6099 at POINT (-120.9 37.9) took 11.3s\nlat: -120.9, long: 38.0\n6099 at POINT (-120.9 38) took 11.6s\nlat: -120.8, long: 37.5\n6099 at POINT (-120.8 37.5) took 11.7s\nlat: -120.8, long: 37.6\n6099 at POINT (-120.8 37.6) took 10.3s\nlat: -120.8, long: 37.7\n6099 at POINT (-120.8 37.7) took 9.2s\nlat: -120.8, long: 37.8\n6099 at POINT (-120.8 37.8) took 11.0s\nlat: -120.8, long: 37.9\n6099 at POINT (-120.8 37.9) took 12.2s\nlat: -120.7, long: 37.6\n6099 at POINT (-120.7 37.6) took 10.4s\nlat: -120.7, long: 37.7\n6099 at POINT (-120.7 37.7) took 10.9s\nlat: -120.7, long: 37.8\n6099 at POINT (-120.7 37.8) took 12.4s\nlat: -120.6, long: 37.6\n6099 at POINT (-120.6 37.6) took 9.3s\nlat: -120.6, long: 37.7\n6099 at POINT (-120.6 37.7) took 13.4s\nlat: -120.5, long: 37.6\n6099 at POINT (-120.5 37.6) took 9.6s\nlat: -120.5, long: 37.7\n6099 at POINT (-120.5 37.7) took 9.6s\n6101 southwest to northeast: (-121.9, 38.7) to (-121.4, 39.3)\nlat: -121.9, long: 39.2\n6101 at POINT (-121.9 39.2) took 12.3s\nlat: -121.9, long: 39.3\n6101 at POINT (-121.9 39.3) took 9.8s\nlat: -121.8, long: 38.9\n6101 at POINT (-121.8 38.9) took 11.0s\nlat: -121.8, long: 39.0\n6101 at POINT (-121.8 39) took 9.9s\nlat: -121.8, long: 39.1\n6101 at POINT (-121.8 39.1) took 11.2s\nlat: -121.8, long: 39.2\n6101 at POINT (-121.8 39.2) took 11.9s\nlat: -121.8, long: 39.3\n6101 at POINT (-121.8 39.3) took 9.8s\nlat: -121.7, long: 38.9\n6101 at POINT (-121.7 38.9) took 10.8s\nlat: -121.7, long: 39.0\n6101 at POINT (-121.7 39) took 10.5s\nlat: -121.7, long: 39.1\n6101 at POINT (-121.7 39.1) took 9.6s\nlat: -121.7, long: 39.2\n6101 at POINT (-121.7 39.2) took 10.8s\nlat: -121.7, long: 39.3\n6101 at POINT (-121.7 39.3) took 9.3s\nlat: -121.6, long: 38.8\n6101 at POINT (-121.6 38.8) took 9.5s\nlat: -121.6, long: 38.9\n6101 at POINT (-121.6 38.9) took 11.5s\nlat: -121.6, long: 39.0\n6101 at POINT (-121.6 39) took 10.9s\nlat: -121.5, long: 38.8\n6101 at POINT (-121.5 38.8) took 9.7s\nlat: -121.5, long: 38.9\n6101 at POINT (-121.5 38.9) took 9.1s\n6103 southwest to northeast: (-123.1, 39.8) to (-121.3, 40.5)\nlat: -123.0, long: 40.3\n6103 at POINT (-123 40.3) took 10.0s\nlat: -122.9, long: 39.8\n6103 at POINT (-122.9 39.8) took 9.3s\nlat: -122.9, long: 39.9\n6103 at POINT (-122.9 39.9) took 9.3s\nlat: -122.9, long: 40.0\n6103 at POINT (-122.9 40) took 9.7s\nlat: -122.9, long: 40.1\n6103 at POINT (-122.9 40.1) took 10.0s\nlat: -122.9, long: 40.2\n6103 at POINT (-122.9 40.2) took 9.6s\nlat: -122.9, long: 40.3\n6103 at POINT (-122.9 40.3) took 9.4s\nlat: -122.8, long: 39.9\n6103 at POINT (-122.8 39.9) took 9.3s\nlat: -122.8, long: 40.0\n6103 at POINT (-122.8 40) took 9.4s\nlat: -122.8, long: 40.1\n6103 at POINT (-122.8 40.1) took 10.9s\nlat: -122.8, long: 40.2\n6103 at POINT (-122.8 40.2) took 9.4s\nlat: -122.8, long: 40.3\n6103 at POINT (-122.8 40.3) took 9.4s\nlat: -122.7, long: 39.9\n6103 at POINT (-122.7 39.9) took 9.3s\nlat: -122.7, long: 40.0\n6103 at POINT (-122.7 40) took 9.0s\nlat: -122.7, long: 40.1\n6103 at POINT (-122.7 40.1) took 9.1s\nlat: -122.7, long: 40.2\n6103 at POINT (-122.7 40.2) took 9.5s\nlat: -122.7, long: 40.3\n6103 at POINT (-122.7 40.3) took 9.2s\nlat: -122.6, long: 39.8\n6103 at POINT (-122.6 39.8) took 9.7s\nlat: -122.6, long: 39.9\n6103 at POINT (-122.6 39.9) took 9.4s\nlat: -122.6, long: 40.0\n6103 at POINT (-122.6 40) took 9.2s\nlat: -122.6, long: 40.1\n6103 at POINT (-122.6 40.1) took 9.4s\nlat: -122.6, long: 40.2\n6103 at POINT (-122.6 40.2) took 9.5s\nlat: -122.6, long: 40.3\n6103 at POINT (-122.6 40.3) took 9.3s\nlat: -122.5, long: 39.8\n6103 at POINT (-122.5 39.8) took 9.5s\nlat: -122.5, long: 39.9\n6103 at POINT (-122.5 39.9) took 9.2s\nlat: -122.5, long: 40.1\n6103 at POINT (-122.5 40.1) took 9.4s\nlat: -122.5, long: 40.2\n6103 at POINT (-122.5 40.2) took 9.2s\nlat: -122.5, long: 40.3\n6103 at POINT (-122.5 40.3) took 9.7s\nlat: -122.4, long: 39.8\n6103 at POINT (-122.4 39.8) took 9.7s\nlat: -122.4, long: 39.9\n6103 at POINT (-122.4 39.9) took 9.3s\nlat: -122.4, long: 40.0\n6103 at POINT (-122.4 40) took 9.9s\nlat: -122.4, long: 40.1\n6103 at POINT (-122.4 40.1) took 9.2s\nlat: -122.4, long: 40.2\n6103 at POINT (-122.4 40.2) took 69.4s\nlat: -122.4, long: 40.3\n6103 at POINT (-122.4 40.3) took 9.6s\nlat: -122.3, long: 39.8\n6103 at POINT (-122.3 39.8) took 10.6s\nlat: -122.3, long: 39.9\n6103 at POINT (-122.3 39.9) took 9.1s\nlat: -122.3, long: 40.0\n6103 at POINT (-122.3 40) took 9.2s\nlat: -122.3, long: 40.1\n6103 at POINT (-122.3 40.1) took 9.2s\nlat: -122.3, long: 40.2\n6103 at POINT (-122.3 40.2) took 9.1s\nlat: -122.3, long: 40.3\n6103 at POINT (-122.3 40.3) took 9.3s\nlat: -122.2, long: 39.8\n6103 at POINT (-122.2 39.8) took 9.7s\nlat: -122.2, long: 39.9\n6103 at POINT (-122.2 39.9) took 9.2s\nlat: -122.2, long: 40.0\n6103 at POINT (-122.2 40) took 9.1s\nlat: -122.2, long: 40.1\n6103 at POINT (-122.2 40.1) took 9.3s\nlat: -122.2, long: 40.2\n6103 at POINT (-122.2 40.2) took 9.4s\nlat: -122.2, long: 40.3\n6103 at POINT (-122.2 40.3) took 9.0s\nlat: -122.1, long: 39.8\n6103 at POINT (-122.1 39.8) took 9.4s\nlat: -122.1, long: 39.9\n6103 at POINT (-122.1 39.9) took 70.0s\nlat: -122.1, long: 40.0\n6103 at POINT (-122.1 40) took 9.2s\nlat: -122.1, long: 40.1\n6103 at POINT (-122.1 40.1) took 9.4s\nlat: -122.1, long: 40.2\n6103 at POINT (-122.1 40.2) took 9.1s\nlat: -122.1, long: 40.3\n6103 at POINT (-122.1 40.3) took 9.3s\nlat: -122.1, long: 40.4\n6103 at POINT (-122.1 40.4) took 9.1s\nlat: -122.0, long: 39.9\n6103 at POINT (-122 39.9) took 9.5s\nlat: -122.0, long: 40.1\n6103 at POINT (-122 40.1) took 9.2s\nlat: -122.0, long: 40.2\n6103 at POINT (-122 40.2) took 9.1s\nlat: -122.0, long: 40.3\n6103 at POINT (-122 40.3) took 9.6s\nlat: -122.0, long: 40.4\n6103 at POINT (-122 40.4) took 9.2s\nlat: -121.9, long: 39.9\n6103 at POINT (-121.9 39.9) took 9.6s\nlat: -121.9, long: 40.0\n6103 at POINT (-121.9 40) took 10.5s\nlat: -121.9, long: 40.1\n6103 at POINT (-121.9 40.1) took 9.3s\nlat: -121.9, long: 40.2\n6103 at POINT (-121.9 40.2) took 9.3s\nlat: -121.9, long: 40.3\n6103 at POINT (-121.9 40.3) took 9.3s\nlat: -121.9, long: 40.4\n6103 at POINT (-121.9 40.4) took 9.1s\nlat: -121.8, long: 39.9\n6103 at POINT (-121.8 39.9) took 9.2s\nlat: -121.8, long: 40.0\n6103 at POINT (-121.8 40) took 9.5s\nlat: -121.8, long: 40.1\n6103 at POINT (-121.8 40.1) took 9.1s\nlat: -121.8, long: 40.2\n6103 at POINT (-121.8 40.2) took 9.2s\nlat: -121.8, long: 40.3\n6103 at POINT (-121.8 40.3) took 9.2s\nlat: -121.8, long: 40.4\n6103 at POINT (-121.8 40.4) took 9.8s\nlat: -121.7, long: 40.0\n6103 at POINT (-121.7 40) took 9.1s\nlat: -121.7, long: 40.1\n6103 at POINT (-121.7 40.1) took 9.4s\nlat: -121.7, long: 40.2\n6103 at POINT (-121.7 40.2) took 69.2s\nlat: -121.7, long: 40.3\n6103 at POINT (-121.7 40.3) took 9.3s\nlat: -121.7, long: 40.4\n6103 at POINT (-121.7 40.4) took 9.0s\nlat: -121.6, long: 40.1\n6103 at POINT (-121.6 40.1) took 9.5s\nlat: -121.6, long: 40.2\n6103 at POINT (-121.6 40.2) took 9.4s\nlat: -121.6, long: 40.3\n6103 at POINT (-121.6 40.3) took 9.2s\nlat: -121.6, long: 40.4\n6103 at POINT (-121.6 40.4) took 9.1s\nlat: -121.5, long: 40.2\n6103 at POINT (-121.5 40.2) took 9.3s\nlat: -121.5, long: 40.3\n6103 at POINT (-121.5 40.3) took 9.3s\nlat: -121.5, long: 40.4\n6103 at POINT (-121.5 40.4) took 9.7s\nlat: -121.4, long: 40.2\n6103 at POINT (-121.4 40.2) took 9.6s\nlat: -121.4, long: 40.3\n6103 at POINT (-121.4 40.3) took 9.2s\n6105 southwest to northeast: (-123.6, 40.0) to (-122.4, 41.4)\nlat: -123.6, long: 40.9\n6105 at POINT (-123.6 40.9) took 9.8s\nlat: -123.5, long: 40.1\n6105 at POINT (-123.5 40.1) took 9.4s\nlat: -123.5, long: 40.2\n6105 at POINT (-123.5 40.2) took 9.2s\nlat: -123.5, long: 40.3\n6105 at POINT (-123.5 40.3) took 9.5s\nlat: -123.5, long: 40.4\n6105 at POINT (-123.5 40.4) took 9.4s\nlat: -123.5, long: 40.6\n6105 at POINT (-123.5 40.6) took 9.8s\nlat: -123.5, long: 40.7\n6105 at POINT (-123.5 40.7) took 70.8s\nlat: -123.5, long: 40.8\n6105 at POINT (-123.5 40.8) took 9.7s\nlat: -123.5, long: 40.9\n6105 at POINT (-123.5 40.9) took 9.4s\nlat: -123.4, long: 40.0\n6105 at POINT (-123.4 40) took 11.2s\nlat: -123.4, long: 40.1\n6105 at POINT (-123.4 40.1) took 9.9s\nlat: -123.4, long: 40.2\n6105 at POINT (-123.4 40.2) took 10.5s\nlat: -123.4, long: 40.3\n6105 at POINT (-123.4 40.3) took 9.5s\nlat: -123.4, long: 40.4\n6105 at POINT (-123.4 40.4) took 10.7s\nlat: -123.4, long: 40.5\n6105 at POINT (-123.4 40.5) took 10.4s\nlat: -123.4, long: 40.6\n6105 at POINT (-123.4 40.6) took 10.5s\nlat: -123.4, long: 40.7\n6105 at POINT (-123.4 40.7) took 9.8s\nlat: -123.4, long: 40.8\n6105 at POINT (-123.4 40.8) took 10.5s\nlat: -123.4, long: 40.9\n6105 at POINT (-123.4 40.9) took 11.4s\nlat: -123.4, long: 41.0\n6105 at POINT (-123.4 41) took 11.3s\nlat: -123.4, long: 41.1\n6105 at POINT (-123.4 41.1) took 10.9s\nlat: -123.3, long: 40.0\n6105 at POINT (-123.3 40) took 11.5s\nlat: -123.3, long: 40.1\n6105 at POINT (-123.3 40.1) took 10.5s\nlat: -123.3, long: 40.2\n6105 at POINT (-123.3 40.2) took 9.8s\nlat: -123.3, long: 40.3\n6105 at POINT (-123.3 40.3) took 10.4s\nlat: -123.3, long: 40.4\n6105 at POINT (-123.3 40.4) took 10.8s\nlat: -123.3, long: 40.5\n6105 at POINT (-123.3 40.5) took 11.1s\nlat: -123.3, long: 40.6\n6105 at POINT (-123.3 40.6) took 10.2s\nlat: -123.3, long: 40.7\n6105 at POINT (-123.3 40.7) took 10.3s\nlat: -123.3, long: 40.8\n6105 at POINT (-123.3 40.8) took 10.2s\nlat: -123.3, long: 40.9\n6105 at POINT (-123.3 40.9) took 10.0s\nlat: -123.3, long: 41.0\n6105 at POINT (-123.3 41) took 69.5s\nlat: -123.3, long: 41.1\n6105 at POINT (-123.3 41.1) took 9.5s\nlat: -123.2, long: 40.0\n6105 at POINT (-123.2 40) took 9.5s\nlat: -123.2, long: 40.1\n6105 at POINT (-123.2 40.1) took 9.8s\nlat: -123.2, long: 40.2\n6105 at POINT (-123.2 40.2) took 9.8s\nlat: -123.2, long: 40.3\n6105 at POINT (-123.2 40.3) took 9.8s\nlat: -123.2, long: 40.4\n6105 at POINT (-123.2 40.4) took 9.6s\nlat: -123.2, long: 40.5\n6105 at POINT (-123.2 40.5) took 9.6s\nlat: -123.2, long: 40.6\n6105 at POINT (-123.2 40.6) took 9.4s\nlat: -123.2, long: 40.7\n6105 at POINT (-123.2 40.7) took 10.5s\nlat: -123.2, long: 40.8\n6105 at POINT (-123.2 40.8) took 9.7s\nlat: -123.2, long: 40.9\n6105 at POINT (-123.2 40.9) took 10.0s\nlat: -123.2, long: 41.0\n6105 at POINT (-123.2 41) took 9.8s\nlat: -123.1, long: 40.0\n6105 at POINT (-123.1 40) took 9.5s\nlat: -123.1, long: 40.1\n6105 at POINT (-123.1 40.1) took 9.8s\nlat: -123.1, long: 40.2\n6105 at POINT (-123.1 40.2) took 9.5s\nlat: -123.1, long: 40.3\n6105 at POINT (-123.1 40.3) took 10.0s\nlat: -123.1, long: 40.4\n6105 at POINT (-123.1 40.4) took 9.9s\nlat: -123.1, long: 40.5\n6105 at POINT (-123.1 40.5) took 10.3s\nlat: -123.1, long: 40.6\n6105 at POINT (-123.1 40.6) took 10.8s\nlat: -123.1, long: 40.7\n6105 at POINT (-123.1 40.7) took 11.3s\nlat: -123.1, long: 40.8\n6105 at POINT (-123.1 40.8) took 12.1s\nlat: -123.1, long: 40.9\n6105 at POINT (-123.1 40.9) took 12.4s\nlat: -123.1, long: 41.0\n6105 at POINT (-123.1 41) took 11.5s\nlat: -123.0, long: 40.1\n6105 at POINT (-123 40.1) took 10.9s\nlat: -123.0, long: 40.2\n6105 at POINT (-123 40.2) took 11.4s\nlat: -123.0, long: 40.4\n6105 at POINT (-123 40.4) took 11.1s\nlat: -123.0, long: 40.6\n6105 at POINT (-123 40.6) took 10.5s\nlat: -123.0, long: 40.7\n6105 at POINT (-123 40.7) took 10.2s\nlat: -123.0, long: 40.8\n6105 at POINT (-123 40.8) took 10.9s\nlat: -123.0, long: 40.9\n6105 at POINT (-123 40.9) took 11.1s\nlat: -122.9, long: 40.5\n6105 at POINT (-122.9 40.5) took 11.2s\nlat: -122.9, long: 40.6\n6105 at POINT (-122.9 40.6) took 11.3s\nlat: -122.9, long: 40.7\n6105 at POINT (-122.9 40.7) took 10.5s\nlat: -122.9, long: 40.8\n6105 at POINT (-122.9 40.8) took 11.5s\nlat: -122.9, long: 40.9\n6105 at POINT (-122.9 40.9) took 11.5s\nlat: -122.9, long: 41.0\n6105 at POINT (-122.9 41) took 10.4s\nlat: -122.9, long: 41.1\n6105 at POINT (-122.9 41.1) took 10.8s\nlat: -122.8, long: 40.6\n6105 at POINT (-122.8 40.6) took 10.9s\nlat: -122.8, long: 40.7\n6105 at POINT (-122.8 40.7) took 10.2s\nlat: -122.8, long: 40.8\n6105 at POINT (-122.8 40.8) took 10.6s\nlat: -122.8, long: 40.9\n6105 at POINT (-122.8 40.9) took 9.4s\nlat: -122.8, long: 41.0\n6105 at POINT (-122.8 41) took 10.4s\nlat: -122.8, long: 41.1\n6105 at POINT (-122.8 41.1) took 10.2s\nlat: -122.8, long: 41.2\n6105 at POINT (-122.8 41.2) took 10.4s\nlat: -122.7, long: 40.8\n6105 at POINT (-122.7 40.8) took 10.8s\nlat: -122.7, long: 40.9\n6105 at POINT (-122.7 40.9) took 10.5s\nlat: -122.7, long: 41.0\n6105 at POINT (-122.7 41) took 11.2s\nlat: -122.7, long: 41.1\n6105 at POINT (-122.7 41.1) took 11.1s\nlat: -122.7, long: 41.2\n6105 at POINT (-122.7 41.2) took 9.9s\nlat: -122.6, long: 40.9\n6105 at POINT (-122.6 40.9) took 9.9s\nlat: -122.6, long: 41.0\n6105 at POINT (-122.6 41) took 9.5s\nlat: -122.6, long: 41.1\n6105 at POINT (-122.6 41.1) took 10.5s\nlat: -122.6, long: 41.2\n6105 at POINT (-122.6 41.2) took 10.6s\nlat: -122.6, long: 41.3\n6105 at POINT (-122.6 41.3) took 10.6s\nlat: -122.5, long: 41.1\n6105 at POINT (-122.5 41.1) took 10.3s\nlat: -122.5, long: 41.3\n6105 at POINT (-122.5 41.3) took 11.4s\n6107 southwest to northeast: (-119.6, 35.8) to (-118.0, 36.8)\nlat: -119.5, long: 35.8\n6107 at POINT (-119.5 35.8) took 13.0s\nlat: -119.5, long: 35.9\n6107 at POINT (-119.5 35.9) took 13.7s\nlat: -119.5, long: 36.1\n6107 at POINT (-119.5 36.1) took 12.2s\nlat: -119.5, long: 36.2\n6107 at POINT (-119.5 36.2) took 14.0s\nlat: -119.4, long: 35.8\n6107 at POINT (-119.4 35.8) took 12.1s\nlat: -119.4, long: 35.9\n6107 at POINT (-119.4 35.9) took 131.0s\nlat: -119.4, long: 36.0\n6107 at POINT (-119.4 36) took 10.5s\nlat: -119.4, long: 36.1\n6107 at POINT (-119.4 36.1) took 11.4s\nlat: -119.4, long: 36.2\n6107 at POINT (-119.4 36.2) took 11.2s\nlat: -119.4, long: 36.3\n6107 at POINT (-119.4 36.3) took 11.0s\nlat: -119.4, long: 36.4\n6107 at POINT (-119.4 36.4) took 12.6s\nlat: -119.4, long: 36.5\n6107 at POINT (-119.4 36.5) took 13.9s\nlat: -119.3, long: 35.8\n6107 at POINT (-119.3 35.8) took 13.3s\nlat: -119.3, long: 35.9\n6107 at POINT (-119.3 35.9) took 11.1s\nlat: -119.3, long: 36.0\n6107 at POINT (-119.3 36) took 10.3s\nlat: -119.3, long: 36.1\n6107 at POINT (-119.3 36.1) took 10.6s\nlat: -119.3, long: 36.2\n6107 at POINT (-119.3 36.2) took 11.1s\nlat: -119.3, long: 36.3\n6107 at POINT (-119.3 36.3) took 11.4s\nlat: -119.3, long: 36.4\n6107 at POINT (-119.3 36.4) took 11.5s\nlat: -119.3, long: 36.5\n6107 at POINT (-119.3 36.5) took 11.0s\nlat: -119.3, long: 36.6\n6107 at POINT (-119.3 36.6) took 10.9s\nlat: -119.2, long: 35.8\n6107 at POINT (-119.2 35.8) took 11.9s\nlat: -119.2, long: 35.9\n6107 at POINT (-119.2 35.9) took 10.6s\nlat: -119.2, long: 36.0\n6107 at POINT (-119.2 36) took 11.9s\nlat: -119.2, long: 36.1\n6107 at POINT (-119.2 36.1) took 11.6s\nlat: -119.2, long: 36.2\n6107 at POINT (-119.2 36.2) took 13.4s\nlat: -119.2, long: 36.3\n6107 at POINT (-119.2 36.3) took 11.1s\nlat: -119.2, long: 36.4\n6107 at POINT (-119.2 36.4) took 11.2s\nlat: -119.2, long: 36.5\n6107 at POINT (-119.2 36.5) took 10.8s\nlat: -119.2, long: 36.6\n6107 at POINT (-119.2 36.6) took 10.8s\nlat: -119.1, long: 35.8\n6107 at POINT (-119.1 35.8) took 11.0s\nlat: -119.1, long: 35.9\n6107 at POINT (-119.1 35.9) took 11.0s\nlat: -119.1, long: 36.0\n6107 at POINT (-119.1 36) took 11.3s\nlat: -119.1, long: 36.1\n6107 at POINT (-119.1 36.1) took 11.2s\nlat: -119.1, long: 36.2\n6107 at POINT (-119.1 36.2) took 11.1s\nlat: -119.1, long: 36.3\n6107 at POINT (-119.1 36.3) took 12.0s\nlat: -119.1, long: 36.4\n6107 at POINT (-119.1 36.4) took 10.7s\nlat: -119.1, long: 36.5\n6107 at POINT (-119.1 36.5) took 10.5s\nlat: -119.1, long: 36.6\n6107 at POINT (-119.1 36.6) took 10.5s\nlat: -119.0, long: 35.8\n6107 at POINT (-119 35.8) took 10.8s\nlat: -119.0, long: 35.9\n6107 at POINT (-119 35.9) took 11.4s\nlat: -119.0, long: 36.1\n6107 at POINT (-119 36.1) took 11.1s\nlat: -119.0, long: 36.2\n6107 at POINT (-119 36.2) took 10.3s\nlat: -119.0, long: 36.3\n6107 at POINT (-119 36.3) took 9.9s\nlat: -119.0, long: 36.4\n6107 at POINT (-119 36.4) took 10.3s\nlat: -119.0, long: 36.6\n6107 at POINT (-119 36.6) took 71.8s\nlat: -118.9, long: 35.8\n6107 at POINT (-118.9 35.8) took 11.3s\nlat: -118.9, long: 35.9\n6107 at POINT (-118.9 35.9) took 10.5s\nlat: -118.9, long: 36.0\n6107 at POINT (-118.9 36) took 10.4s\nlat: -118.9, long: 36.1\n6107 at POINT (-118.9 36.1) took 10.6s\nlat: -118.9, long: 36.2\n6107 at POINT (-118.9 36.2) took 11.1s\nlat: -118.9, long: 36.3\n6107 at POINT (-118.9 36.3) took 11.3s\nlat: -118.9, long: 36.4\n6107 at POINT (-118.9 36.4) took 11.1s\nlat: -118.9, long: 36.5\n6107 at POINT (-118.9 36.5) took 12.4s\nlat: -118.9, long: 36.6\n6107 at POINT (-118.9 36.6) took 10.4s\nlat: -118.9, long: 36.7\n6107 at POINT (-118.9 36.7) took 10.6s\nlat: -118.8, long: 35.8\n6107 at POINT (-118.8 35.8) took 11.2s\nlat: -118.8, long: 35.9\n6107 at POINT (-118.8 35.9) took 11.6s\nlat: -118.8, long: 36.0\n6107 at POINT (-118.8 36) took 10.3s\nlat: -118.8, long: 36.1\n6107 at POINT (-118.8 36.1) took 10.4s\nlat: -118.8, long: 36.2\n6107 at POINT (-118.8 36.2) took 10.3s\nlat: -118.8, long: 36.3\n6107 at POINT (-118.8 36.3) took 11.0s\nlat: -118.8, long: 36.4\n6107 at POINT (-118.8 36.4) took 12.5s\nlat: -118.8, long: 36.5\n6107 at POINT (-118.8 36.5) took 10.8s\nlat: -118.8, long: 36.6\n6107 at POINT (-118.8 36.6) took 10.9s\nlat: -118.8, long: 36.7\n6107 at POINT (-118.8 36.7) took 11.0s\nlat: -118.7, long: 35.8\n6107 at POINT (-118.7 35.8) took 11.3s\nlat: -118.7, long: 35.9\n6107 at POINT (-118.7 35.9) took 11.6s\nlat: -118.7, long: 36.0\n6107 at POINT (-118.7 36) took 11.0s\nlat: -118.7, long: 36.1\n6107 at POINT (-118.7 36.1) took 10.9s\nlat: -118.7, long: 36.2\n6107 at POINT (-118.7 36.2) took 10.5s\nlat: -118.7, long: 36.3\n6107 at POINT (-118.7 36.3) took 10.9s\nlat: -118.7, long: 36.4\n6107 at POINT (-118.7 36.4) took 10.5s\nlat: -118.7, long: 36.5\n6107 at POINT (-118.7 36.5) took 10.2s\nlat: -118.7, long: 36.6\n6107 at POINT (-118.7 36.6) took 11.7s\nlat: -118.7, long: 36.7\n6107 at POINT (-118.7 36.7) took 10.6s\nlat: -118.6, long: 35.8\n6107 at POINT (-118.6 35.8) took 11.6s\nlat: -118.6, long: 35.9\n6107 at POINT (-118.6 35.9) took 12.4s\nlat: -118.6, long: 36.0\n6107 at POINT (-118.6 36) took 10.7s\nlat: -118.6, long: 36.1\n6107 at POINT (-118.6 36.1) took 70.7s\nlat: -118.6, long: 36.2\n6107 at POINT (-118.6 36.2) took 10.4s\nlat: -118.6, long: 36.3\n6107 at POINT (-118.6 36.3) took 10.7s\nlat: -118.6, long: 36.4\n6107 at POINT (-118.6 36.4) took 10.3s\nlat: -118.6, long: 36.5\n6107 at POINT (-118.6 36.5) took 10.8s\nlat: -118.6, long: 36.6\n6107 at POINT (-118.6 36.6) took 11.0s\nlat: -118.6, long: 36.7\n6107 at POINT (-118.6 36.7) took 11.1s\nlat: -118.5, long: 35.8\n6107 at POINT (-118.5 35.8) took 11.0s\nlat: -118.5, long: 35.9\n6107 at POINT (-118.5 35.9) took 10.1s\nlat: -118.5, long: 36.1\n6107 at POINT (-118.5 36.1) took 11.8s\nlat: -118.5, long: 36.2\n6107 at POINT (-118.5 36.2) took 11.1s\nlat: -118.5, long: 36.3\n6107 at POINT (-118.5 36.3) took 10.8s\nlat: -118.5, long: 36.4\n6107 at POINT (-118.5 36.4) took 10.5s\nlat: -118.5, long: 36.6\n6107 at POINT (-118.5 36.6) took 12.2s\nlat: -118.5, long: 36.7\n6107 at POINT (-118.5 36.7) took 11.2s\nlat: -118.4, long: 35.8\n6107 at POINT (-118.4 35.8) took 72.2s\nlat: -118.4, long: 35.9\n6107 at POINT (-118.4 35.9) took 11.1s\nlat: -118.4, long: 36.0\n6107 at POINT (-118.4 36) took 11.2s\nlat: -118.4, long: 36.1\n6107 at POINT (-118.4 36.1) took 10.3s\nlat: -118.4, long: 36.2\n6107 at POINT (-118.4 36.2) took 11.0s\nlat: -118.4, long: 36.3\n6107 at POINT (-118.4 36.3) took 10.7s\nlat: -118.4, long: 36.4\n6107 at POINT (-118.4 36.4) took 11.0s\nlat: -118.4, long: 36.5\n6107 at POINT (-118.4 36.5) took 10.3s\nlat: -118.4, long: 36.6\n6107 at POINT (-118.4 36.6) took 70.2s\nlat: -118.4, long: 36.7\n6107 at POINT (-118.4 36.7) took 10.4s\nlat: -118.3, long: 35.8\n6107 at POINT (-118.3 35.8) took 70.8s\nlat: -118.3, long: 35.9\n6107 at POINT (-118.3 35.9) took 10.8s\nlat: -118.3, long: 36.0\n6107 at POINT (-118.3 36) took 10.6s\nlat: -118.3, long: 36.1\n6107 at POINT (-118.3 36.1) took 13.2s\nlat: -118.3, long: 36.2\n6107 at POINT (-118.3 36.2) took 10.9s\nlat: -118.3, long: 36.3\n6107 at POINT (-118.3 36.3) took 10.6s\nlat: -118.3, long: 36.4\n6107 at POINT (-118.3 36.4) took 11.5s\nlat: -118.3, long: 36.5\n6107 at POINT (-118.3 36.5) took 12.2s\nlat: -118.3, long: 36.6\n6107 at POINT (-118.3 36.6) took 11.1s\nlat: -118.2, long: 35.8\n6107 at POINT (-118.2 35.8) took 11.2s\nlat: -118.2, long: 35.9\n6107 at POINT (-118.2 35.9) took 10.6s\nlat: -118.2, long: 36.0\n6107 at POINT (-118.2 36) took 10.7s\nlat: -118.2, long: 36.1\n6107 at POINT (-118.2 36.1) took 14.5s\nlat: -118.2, long: 36.2\n6107 at POINT (-118.2 36.2) took 11.3s\nlat: -118.2, long: 36.3\n6107 at POINT (-118.2 36.3) took 10.6s\nlat: -118.2, long: 36.4\n6107 at POINT (-118.2 36.4) took 10.7s\nlat: -118.1, long: 35.8\n6107 at POINT (-118.1 35.8) took 12.1s\nlat: -118.1, long: 35.9\n6107 at POINT (-118.1 35.9) took 10.6s\nlat: -118.1, long: 36.0\n6107 at POINT (-118.1 36) took 10.7s\nlat: -118.1, long: 36.1\n6107 at POINT (-118.1 36.1) took 10.8s\nlat: -118.1, long: 36.2\n6107 at POINT (-118.1 36.2) took 11.7s\nlat: -118.0, long: 35.9\n6107 at POINT (-118 35.9) took 11.2s\n6109 southwest to northeast: (-120.7, 37.6) to (-119.2, 38.4)\nlat: -120.6, long: 37.8\n6109 at POINT (-120.6 37.8) took 11.5s\nlat: -120.5, long: 37.8\n6109 at POINT (-120.5 37.8) took 11.6s\nlat: -120.5, long: 37.9\n6109 at POINT (-120.5 37.9) took 11.1s\nlat: -120.4, long: 37.7\n6109 at POINT (-120.4 37.7) took 11.1s\nlat: -120.4, long: 37.8\n6109 at POINT (-120.4 37.8) took 11.3s\nlat: -120.4, long: 37.9\n6109 at POINT (-120.4 37.9) took 10.5s\nlat: -120.4, long: 38.0\n6109 at POINT (-120.4 38) took 10.8s\nlat: -120.4, long: 38.1\n6109 at POINT (-120.4 38.1) took 10.7s\nlat: -120.3, long: 37.8\n6109 at POINT (-120.3 37.8) took 10.7s\nlat: -120.3, long: 37.9\n6109 at POINT (-120.3 37.9) took 11.6s\nlat: -120.3, long: 38.0\n6109 at POINT (-120.3 38) took 11.7s\nlat: -120.3, long: 38.1\n6109 at POINT (-120.3 38.1) took 10.7s\nlat: -120.3, long: 38.2\n6109 at POINT (-120.3 38.2) took 11.6s\nlat: -120.2, long: 37.8\n6109 at POINT (-120.2 37.8) took 11.2s\nlat: -120.2, long: 37.9\n6109 at POINT (-120.2 37.9) took 10.8s\nlat: -120.2, long: 38.0\n6109 at POINT (-120.2 38) took 11.8s\nlat: -120.2, long: 38.1\n6109 at POINT (-120.2 38.1) took 10.3s\nlat: -120.2, long: 38.2\n6109 at POINT (-120.2 38.2) took 11.9s\nlat: -120.2, long: 38.3\n6109 at POINT (-120.2 38.3) took 11.9s\nlat: -120.1, long: 37.9\n6109 at POINT (-120.1 37.9) took 12.8s\nlat: -120.1, long: 38.0\n6109 at POINT (-120.1 38) took 11.5s\nlat: -120.1, long: 38.1\n6109 at POINT (-120.1 38.1) took 11.8s\nlat: -120.1, long: 38.2\n6109 at POINT (-120.1 38.2) took 11.5s\nlat: -120.1, long: 38.3\n6109 at POINT (-120.1 38.3) took 12.5s\nlat: -120.1, long: 38.4\n6109 at POINT (-120.1 38.4) took 11.5s\nlat: -120.0, long: 37.8\n6109 at POINT (-120 37.8) took 11.8s\nlat: -120.0, long: 37.9\n6109 at POINT (-120 37.9) took 11.2s\nlat: -120.0, long: 38.1\n6109 at POINT (-120 38.1) took 10.9s\nlat: -120.0, long: 38.2\n6109 at POINT (-120 38.2) took 11.0s\nlat: -120.0, long: 38.3\n6109 at POINT (-120 38.3) took 11.7s\nlat: -120.0, long: 38.4\n6109 at POINT (-120 38.4) took 10.3s\nlat: -119.9, long: 37.8\n6109 at POINT (-119.9 37.8) took 11.5s\nlat: -119.9, long: 37.9\n6109 at POINT (-119.9 37.9) took 11.6s\nlat: -119.9, long: 38.0\n6109 at POINT (-119.9 38) took 11.2s\nlat: -119.9, long: 38.1\n6109 at POINT (-119.9 38.1) took 70.8s\nlat: -119.9, long: 38.2\n6109 at POINT (-119.9 38.2) took 10.3s\nlat: -119.9, long: 38.3\n6109 at POINT (-119.9 38.3) took 10.5s\nlat: -119.8, long: 37.8\n6109 at POINT (-119.8 37.8) took 10.4s\nlat: -119.8, long: 37.9\n6109 at POINT (-119.8 37.9) took 11.7s\nlat: -119.8, long: 38.0\n6109 at POINT (-119.8 38) took 10.7s\nlat: -119.8, long: 38.1\n6109 at POINT (-119.8 38.1) took 10.6s\nlat: -119.8, long: 38.2\n6109 at POINT (-119.8 38.2) took 10.2s\nlat: -119.8, long: 38.3\n6109 at POINT (-119.8 38.3) took 10.9s\nlat: -119.7, long: 37.8\n6109 at POINT (-119.7 37.8) took 10.3s\nlat: -119.7, long: 37.9\n6109 at POINT (-119.7 37.9) took 10.6s\nlat: -119.7, long: 38.0\n6109 at POINT (-119.7 38) took 10.5s\nlat: -119.7, long: 38.1\n6109 at POINT (-119.7 38.1) took 11.0s\nlat: -119.7, long: 38.2\n6109 at POINT (-119.7 38.2) took 11.5s\nlat: -119.7, long: 38.3\n6109 at POINT (-119.7 38.3) took 11.8s\nlat: -119.7, long: 38.4\n6109 at POINT (-119.7 38.4) took 71.7s\nlat: -119.6, long: 37.9\n6109 at POINT (-119.6 37.9) took 11.3s\nlat: -119.6, long: 38.0\n6109 at POINT (-119.6 38) took 10.5s\nlat: -119.6, long: 38.1\n6109 at POINT (-119.6 38.1) took 10.3s\nlat: -119.5, long: 37.9\n6109 at POINT (-119.5 37.9) took 10.4s\nlat: -119.5, long: 38.1\n6109 at POINT (-119.5 38.1) took 11.5s\nlat: -119.4, long: 37.9\n6109 at POINT (-119.4 37.9) took 10.3s\nlat: -119.4, long: 38.0\n6109 at POINT (-119.4 38) took 11.8s\nlat: -119.4, long: 38.1\n6109 at POINT (-119.4 38.1) took 10.7s\nlat: -119.3, long: 37.8\n6109 at POINT (-119.3 37.8) took 11.3s\nlat: -119.3, long: 37.9\n6109 at POINT (-119.3 37.9) took 11.2s\n6111 southwest to northeast: (-119.5, 34.0) to (-118.6, 34.9)\nlat: -119.4, long: 34.4\n6111 at POINT (-119.4 34.4) took 10.8s\nlat: -119.4, long: 34.5\n6111 at POINT (-119.4 34.5) took 11.1s\nlat: -119.4, long: 34.6\n6111 at POINT (-119.4 34.6) took 10.9s\nlat: -119.4, long: 34.7\n6111 at POINT (-119.4 34.7) took 10.6s\nlat: -119.4, long: 34.8\n6111 at POINT (-119.4 34.8) took 11.2s\nlat: -119.4, long: 34.9\n6111 at POINT (-119.4 34.9) took 10.6s\nlat: -119.3, long: 34.3\n6111 at POINT (-119.3 34.3) took 10.8s\nlat: -119.3, long: 34.4\n6111 at POINT (-119.3 34.4) took 10.1s\nlat: -119.3, long: 34.5\n6111 at POINT (-119.3 34.5) took 10.4s\nlat: -119.3, long: 34.6\n6111 at POINT (-119.3 34.6) took 14.1s\nlat: -119.3, long: 34.7\n6111 at POINT (-119.3 34.7) took 11.5s\nlat: -119.3, long: 34.8\n6111 at POINT (-119.3 34.8) took 11.2s\nlat: -119.2, long: 34.2\n6111 at POINT (-119.2 34.2) took 10.9s\nlat: -119.2, long: 34.3\n6111 at POINT (-119.2 34.3) took 10.9s\nlat: -119.2, long: 34.4\n6111 at POINT (-119.2 34.4) took 10.5s\nlat: -119.2, long: 34.5\n6111 at POINT (-119.2 34.5) took 11.1s\nlat: -119.2, long: 34.6\n6111 at POINT (-119.2 34.6) took 10.4s\nlat: -119.2, long: 34.7\n6111 at POINT (-119.2 34.7) took 12.0s\nlat: -119.2, long: 34.8\n6111 at POINT (-119.2 34.8) took 10.3s\nlat: -119.1, long: 34.1\n6111 at POINT (-119.1 34.1) took 10.7s\nlat: -119.1, long: 34.2\n6111 at POINT (-119.1 34.2) took 11.6s\nlat: -119.1, long: 34.3\n6111 at POINT (-119.1 34.3) took 11.8s\nlat: -119.1, long: 34.4\n6111 at POINT (-119.1 34.4) took 12.4s\nlat: -119.1, long: 34.5\n6111 at POINT (-119.1 34.5) took 10.9s\nlat: -119.1, long: 34.6\n6111 at POINT (-119.1 34.6) took 10.5s\nlat: -119.1, long: 34.7\n6111 at POINT (-119.1 34.7) took 14.2s\nlat: -119.1, long: 34.8\n6111 at POINT (-119.1 34.8) took 14.1s\nlat: -119.0, long: 34.1\n6111 at POINT (-119 34.1) took 11.0s\nlat: -119.0, long: 34.2\n6111 at POINT (-119 34.2) took 11.1s\nlat: -119.0, long: 34.3\n6111 at POINT (-119 34.3) took 10.8s\nlat: -119.0, long: 34.4\n6111 at POINT (-119 34.4) took 11.1s\nlat: -119.0, long: 34.6\n6111 at POINT (-119 34.6) took 12.4s\nlat: -119.0, long: 34.7\n6111 at POINT (-119 34.7) took 10.6s\nlat: -119.0, long: 34.8\n6111 at POINT (-119 34.8) took 10.5s\nlat: -118.9, long: 34.1\n6111 at POINT (-118.9 34.1) took 11.4s\nlat: -118.9, long: 34.2\n6111 at POINT (-118.9 34.2) took 11.0s\nlat: -118.9, long: 34.3\n6111 at POINT (-118.9 34.3) took 10.1s\nlat: -118.9, long: 34.4\n6111 at POINT (-118.9 34.4) took 10.7s\nlat: -118.9, long: 34.5\n6111 at POINT (-118.9 34.5) took 10.8s\nlat: -118.9, long: 34.6\n6111 at POINT (-118.9 34.6) took 11.5s\nlat: -118.9, long: 34.7\n6111 at POINT (-118.9 34.7) took 10.1s\nlat: -118.8, long: 34.2\n6111 at POINT (-118.8 34.2) took 10.5s\nlat: -118.8, long: 34.3\n6111 at POINT (-118.8 34.3) took 11.5s\nlat: -118.8, long: 34.4\n6111 at POINT (-118.8 34.4) took 11.1s\nlat: -118.8, long: 34.5\n6111 at POINT (-118.8 34.5) took 11.3s\nlat: -118.8, long: 34.6\n6111 at POINT (-118.8 34.6) took 12.1s\nlat: -118.7, long: 34.2\n6111 at POINT (-118.7 34.2) took 10.4s\nlat: -118.7, long: 34.3\n6111 at POINT (-118.7 34.3) took 10.9s\nlat: -118.7, long: 34.4\n6111 at POINT (-118.7 34.4) took 10.5s\n6113 southwest to northeast: (-122.4, 38.3) to (-121.5, 38.9)\nlat: -122.4, long: 38.9\n6113 at POINT (-122.4 38.9) took 11.9s\nlat: -122.3, long: 38.9\n6113 at POINT (-122.3 38.9) took 10.6s\nlat: -122.2, long: 38.7\n6113 at POINT (-122.2 38.7) took 11.0s\nlat: -122.2, long: 38.8\n6113 at POINT (-122.2 38.8) took 18.4s\nlat: -122.2, long: 38.9\n6113 at POINT (-122.2 38.9) took 10.4s\nlat: -122.1, long: 38.6\n6113 at POINT (-122.1 38.6) took 11.0s\nlat: -122.1, long: 38.7\n6113 at POINT (-122.1 38.7) took 10.4s\nlat: -122.1, long: 38.8\n6113 at POINT (-122.1 38.8) took 11.7s\nlat: -122.1, long: 38.9\n6113 at POINT (-122.1 38.9) took 10.2s\nlat: -122.0, long: 38.6\n6113 at POINT (-122 38.6) took 10.4s\nlat: -122.0, long: 38.7\n6113 at POINT (-122 38.7) took 10.8s\nlat: -122.0, long: 38.8\n6113 at POINT (-122 38.8) took 10.2s\nlat: -122.0, long: 38.9\n6113 at POINT (-122 38.9) took 9.7s\nlat: -121.9, long: 38.6\n6113 at POINT (-121.9 38.6) took 10.9s\nlat: -121.9, long: 38.7\n6113 at POINT (-121.9 38.7) took 10.6s\nlat: -121.9, long: 38.8\n6113 at POINT (-121.9 38.8) took 10.3s\nlat: -121.9, long: 38.9\n6113 at POINT (-121.9 38.9) took 10.6s\nlat: -121.8, long: 38.6\n6113 at POINT (-121.8 38.6) took 11.3s\nlat: -121.8, long: 38.7\n6113 at POINT (-121.8 38.7) took 11.8s\nlat: -121.8, long: 38.8\n6113 at POINT (-121.8 38.8) took 10.2s\nlat: -121.7, long: 38.6\n6113 at POINT (-121.7 38.6) took 11.1s\nlat: -121.7, long: 38.7\n6113 at POINT (-121.7 38.7) took 10.2s\nlat: -121.7, long: 38.8\n6113 at POINT (-121.7 38.8) took 10.4s\nlat: -121.6, long: 38.4\n6113 at POINT (-121.6 38.4) took 11.6s\nlat: -121.6, long: 38.5\n6113 at POINT (-121.6 38.5) took 10.5s\nlat: -121.6, long: 38.6\n6113 at POINT (-121.6 38.6) took 11.3s\n6115 southwest to northeast: (-121.6, 38.9) to (-121.0, 39.6)\nlat: -121.6, long: 39.1\n6115 at POINT (-121.6 39.1) took 10.2s\nlat: -121.6, long: 39.2\n6115 at POINT (-121.6 39.2) took 11.3s\nlat: -121.6, long: 39.3\n6115 at POINT (-121.6 39.3) took 11.5s\nlat: -121.5, long: 39.1\n6115 at POINT (-121.5 39.1) took 11.1s\nlat: -121.5, long: 39.2\n6115 at POINT (-121.5 39.2) took 12.7s\nlat: -121.5, long: 39.3\n6115 at POINT (-121.5 39.3) took 11.0s\nlat: -121.4, long: 39.1\n6115 at POINT (-121.4 39.1) took 11.7s\nlat: -121.4, long: 39.2\n6115 at POINT (-121.4 39.2) took 10.8s\nlat: -121.4, long: 39.3\n6115 at POINT (-121.4 39.3) took 10.7s\nlat: -121.3, long: 39.1\n6115 at POINT (-121.3 39.1) took 11.4s\nlat: -121.3, long: 39.2\n6115 at POINT (-121.3 39.2) took 11.0s\nlat: -121.3, long: 39.3\n6115 at POINT (-121.3 39.3) took 10.5s\nlat: -121.3, long: 39.4\n6115 at POINT (-121.3 39.4) took 10.7s\nlat: -121.3, long: 39.5\n6115 at POINT (-121.3 39.5) took 11.4s\nlat: -121.2, long: 39.4\n6115 at POINT (-121.2 39.4) took 11.6s\nlat: -121.2, long: 39.5\n6115 at POINT (-121.2 39.5) took 10.4s\nlat: -121.1, long: 39.4\n6115 at POINT (-121.1 39.4) took 10.8s\nlat: -121.1, long: 39.5\n6115 at POINT (-121.1 39.5) took 10.9s\n"
]
],
[
[
"Create a `drought` table for holding the drought score for all California counties between 1 Jan 2000 and 21 Dec 2018.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS drought')\ncur.execute('''CREATE TABLE drought (\n date TEXT NOT NULL,\n fips INTEGER NOT NULL,\n drought_score REAL,\n PRIMARY KEY(date, fips)\n)''')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Pull the drought scores from the [US Drought Monitor website](https://droughtmonitor.unl.edu/).",
"_____no_output_____"
]
],
[
[
"import requests\n\ndef fetch_drought(fips):\n return requests.get(\n 'https://usdmdataservices.unl.edu/api/CountyStatistics/GetDroughtSeverityStatisticsByAreaPercent',\n {\n 'aoi': fips,\n 'startdate': '10/1/1999',\n 'enddate': '12/31/2018',\n 'statisticsType': 1,\n }\n ).json()",
"_____no_output_____"
]
],
[
[
"For each county that doesn't have a drought score pull the drought score from US Drought Monintor.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('SELECT DISTINCT fips FROM drought WHERE drought_score IS NULL')\n\nfor row in cur.fetchall():\n fips = row[0]\n fips_5_char = f'0{str(fips)}' if fips < 10000 else str(fips)\n\n print(f'Fetch drought score for {fips_5_char}')\n json = fetch_drought(fips_5_char)\n\n for item in json:\n drought_score = float(item['D0'])/100 + float(item['D1'])/100 + float(item['D2'])/100 + float(item['D3'])/100 + float(item['D4'])/100\n\n # Backfill Jan 4 score to Jan 1-3 of 2000 as it seems to be missing\n start = '2000-01-01' if item['ValidStart'] <= '2000-01-04' else item['ValidStart']\n\n drought_params = { 'fips': fips, 'drought_score': drought_score, 'start': start, 'end': item['ValidEnd'] }\n \n cur.execute('''\n UPDATE drought SET\n drought_score = :drought_score\n WHERE\n fips = :fips AND date >= :start AND date <= :end\n ''', drought_params)\n\n conn.commit()\n \nconn.close()\n",
"Fetch drought score for 06001\nFetch drought score for 06003\nFetch drought score for 06005\nFetch drought score for 06007\nFetch drought score for 06009\nFetch drought score for 06011\nFetch drought score for 06013\nFetch drought score for 06015\nFetch drought score for 06017\nFetch drought score for 06019\nFetch drought score for 06021\nFetch drought score for 06023\nFetch drought score for 06025\nFetch drought score for 06027\nFetch drought score for 06029\nFetch drought score for 06031\nFetch drought score for 06033\nFetch drought score for 06035\nFetch drought score for 06037\nFetch drought score for 06039\nFetch drought score for 06041\nFetch drought score for 06043\nFetch drought score for 06045\nFetch drought score for 06047\nFetch drought score for 06049\nFetch drought score for 06051\nFetch drought score for 06053\nFetch drought score for 06055\nFetch drought score for 06057\nFetch drought score for 06059\nFetch drought score for 06061\nFetch drought score for 06063\nFetch drought score for 06065\nFetch drought score for 06067\nFetch drought score for 06069\nFetch drought score for 06071\nFetch drought score for 06073\nFetch drought score for 06075\nFetch drought score for 06077\nFetch drought score for 06079\nFetch drought score for 06081\nFetch drought score for 06083\nFetch drought score for 06085\nFetch drought score for 06087\nFetch drought score for 06089\nFetch drought score for 06091\nFetch drought score for 06093\nFetch drought score for 06095\nFetch drought score for 06097\nFetch drought score for 06099\nFetch drought score for 06101\nFetch drought score for 06103\nFetch drought score for 06105\nFetch drought score for 06107\nFetch drought score for 06109\nFetch drought score for 06111\nFetch drought score for 06113\nFetch drought score for 06115\n"
]
],
[
[
"Backfill any missing county identifiers (FIPS codes) for California fires.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('SELECT longitude, latitude FROM fires WHERE fips = 0 order by longitude, latitude')\n\nfor row in cur.fetchall():\n long = row[0]\n lat = row[1]\n found = False\n min_dist = 180\n closest_fips = 0\n\n for fips, county in df_county.iterrows():\n region = shapely.wkt.loads(county['geo_multipolygon'])\n point = Point(long, lat)\n\n if region.contains(point):\n print(f'{point} is in {fips}')\n cur.execute('''\n UPDATE fires SET fips = :fips\n WHERE longitude = :longitude AND latitude = :latitude\n ''', { 'fips': fips, 'longitude': long, 'latitude': lat })\n conn.commit()\n found = True\n break\n\n dist = region.boundary.distance(point)\n\n if min_dist > dist:\n min_dist = dist\n closest_fips = fips\n\n if not found:\n print(f'{point} not found. Closest county, by {round(min_dist, 3)}, is {closest_fips}')\n cur.execute('''\n UPDATE fires SET fips = :fips\n WHERE longitude = :longitude AND latitude = :latitude\n ''', { 'fips': closest_fips, 'longitude': long, 'latitude': lat })\n conn.commit()\n \nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ndf = pd.read_sql_query('select * from fires', conn)\n\nfinal = df.isna().sum()\ncols = []\nfor count, col in zip(final,list(df.columns)):\n if count > 0:\n cols.append(col)\n\nprint(f'Columns with null: {cols}')\n\nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute(\"\"\"UPDATE weather_county SET drought_score = (\n select drought_score\n from drought\n where\n drought.date = weather_county.date\n and drought.fips = weather_county.fips\n)\nwhere\n weather_county.drought_score is null\n\"\"\")\n\ncur.execute(\"\"\"UPDATE weather_geo SET drought_score = (\n select drought_score\n from drought\n where\n drought.date = weather_geo.date\n and drought.fips = weather_geo.fips\n)\nwhere\n weather_geo.drought_score = 0\n\"\"\")\n\nconn.commit()\nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('ALTER TABLE weather_county ADD COLUMN month INTEGER NOT NULL DEFAULT 0')\ncur.execute(\"\"\"UPDATE weather_county SET month = CAST(strftime('%m', date) as 'INTEGER') WHERE month = 0\"\"\")\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_county_fips_date')\ncur.execute('CREATE INDEX idx_weather_county_fips_date ON weather_county(date, fips)')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Load the geospatial perimeter from the California State GeoPortal API[<sup>6</sup>](#acknowledgements) For large fires (class `D` of 100+ acres)",
"_____no_output_____"
]
],
[
[
"import urllib.parse\nimport re\nfrom datetime import datetime, timedelta\nfrom shapely.geometry import Polygon\nfrom shapely import wkt\n\nconn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ndf_fires = pd.read_sql_query(\"\"\"\nselect\n fpa_id, longitude, latitude, year, date, fire_name, fire_size,\n ics_209_plus_incident_join_id, local_incident_id, local_fire_report_id, nwcg_reporting_unit_id\nfrom fires\nwhere\n geo_polygon is null\n and fire_size_class >= 'D'\n and (\n fire_name is not null\n or local_incident_id is not null\n or ics_209_plus_incident_join_id like '%_%'\n )\norder by date\n\"\"\", conn)\n\nurl = 'https://egis.fire.ca.gov/arcgis/rest/services/FRAP/FirePerimeters_FS/FeatureServer/0/query?outFields=*&outSR=4326&f=json'\n\nfor i, fire in df_fires.iterrows():\n fire_name = (fire.fire_name or 'N/A').replace(\"'\", \"''\").replace(\" COMPLEX\", \"\").replace(\" LIGHTNING\", \"\")\n fire_name_alt = 'N/A'\n report_id = str(fire.local_fire_report_id or 'N/A')\n\n regex_match = re.search(r'_(CA|NV)-([^-]+)-([^-]+)_([^_]+)', fire.ics_209_plus_incident_join_id or '')\n if (regex_match):\n unit_id = regex_match.group(2)\n incident_id = regex_match.group(3) or 'N/A'\n fire_name_alt = (regex_match.group(4) or 'N/A').replace(\"'\", \"''\").replace(\" COMPLEX\", \"\").replace(\" LIGHTNING\", \"\")\n else:\n regex_match = re.search(r'^([A-Z]{3})(\\d+)', fire.local_incident_id or '')\n if (regex_match):\n unit_id = regex_match.group(1)\n incident_id = regex_match.group(2) or 'N/A'\n else:\n unit_id = (fire.nwcg_reporting_unit_id or '').replace('USCA', '')\n incident_id = str(fire.ics_209_plus_incident_join_id or fire.local_incident_id or fire.local_fire_report_id or 'N/A').replace(unit_id, '')\n\n date = datetime.strptime(fire.date, '%Y-%m-%d')\n date_before = date + timedelta(days=-1)\n date_after = date + timedelta(days=1)\n date_range = f\"ALARM_DATE>=DATE '{datetime.strftime(date_before, '%Y-%m-%d')}' and ALARM_DATE<=DATE '{datetime.strftime(date_after, '%Y-%m-%d')}'\"\n\n geo_query = f\"geometryType=esriGeometryPoint&geometry={fire.longitude},{fire.latitude}&spatialRel=esriSpatialRelIntersects&inSR=4326\"\n\n where = urllib.parse.quote(f\"{date_range} \\\nand STATE='CA' \\\nand ( \\\n UNIT_ID='{unit_id}' or REPORT_AC={fire.fire_size or -1} or FIRE_NAME like '{fire_name}%' or FIRE_NAME like '{fire_name_alt}%' \\\n or INC_NUM='{incident_id.zfill(8)}' or INC_NUM='{incident_id.zfill(6)}' or INC_NUM='{report_id.zfill(8)}' or FIRE_NUM='{report_id.zfill(6)}' )\")\n result = requests.get(f'{url}&where={where}&{geo_query}').json()['features']\n\n if (len(result) == 0):\n where = urllib.parse.quote(f\"{date_range} \\\n and STATE='CA' \\\n and ( UNIT_ID='{unit_id}' or REPORT_AC={fire.fire_size or -1} ) \\\n and ( FIRE_NAME='{fire_name}' or FIRE_NAME='{fire_name_alt}' or INC_NUM='{incident_id.zfill(8)}' or INC_NUM='{incident_id.zfill(6)}' or INC_NUM='{report_id.zfill(8)}' or FIRE_NUM='{report_id.zfill(6)}' )\")\n result = requests.get(f'{url}&where={where}').json()['features']\n\n if (len(result) == 0):\n where = urllib.parse.quote(f\"{date_range} \\\nand STATE='CA' \\\nand ( INC_NUM='{incident_id.zfill(8)}' or INC_NUM='{incident_id.zfill(6)}' or INC_NUM='{report_id.zfill(8)}' or FIRE_NUM='{report_id.zfill(6)}' or REPORT_AC={fire.fire_size or -1} ) \\\nand ( FIRE_NAME like '{fire_name}%' or FIRE_NAME like '{fire_name_alt}%' )\")\n result = requests.get(f'{url}&where={where}').json()['features']\n\n if (len(result) == 0):\n where = urllib.parse.quote(f\"YEAR_={fire.year} \\\nand STATE='CA' \\\nand UNIT_ID='{unit_id}' \\\nand ( FIRE_NAME='{fire_name}' or FIRE_NAME='{fire_name_alt}' ) \\\nand ( REPORT_AC={fire.fire_size or -1} or INC_NUM='{incident_id.zfill(8)}' or INC_NUM='{incident_id.zfill(6)}' or INC_NUM='{report_id.zfill(8)}' or FIRE_NUM='{report_id.zfill(6)}' )\")\n result = requests.get(f'{url}&where={where}').json()['features']\n\n if (len(result) >= 1):\n geo_json = result[0]['geometry']['rings'][0]\n geo = Polygon(geo_json)\n\n print(f'Saved {len(result)} match(es) for {fire.fpa_id if fire_name == \"N/A\" else fire_name} --- {where} {geo_query}')\n cur.execute(\"\"\"\n update fires set geo_polygon = :polygon where fpa_id = :fpa_id\n \"\"\", { 'polygon': wkt.dumps(geo), 'fpa_id': fire.fpa_id })\n\n conn.commit()\n # else:\n # print(f'MISSING: {fire_name} --- {geo_query}')\n # print(f'Could not find {fire.fire_name} on {date_before} - {date_after} ({incident_id}, size: {fire.fire_size})')\n\nconn.close()",
"Saved HILL --- ALARM_DATE%3E%3DDATE%20%272012-06-22%27%20and%20ALARM_DATE%3C%3DDATE%20%272012-06-24%27%20%20%20and%20STATE%3D%27CA%27%20%20%20and%20%28%20UNIT_ID%3D%27LPF%27%20or%20REPORT_AC%3D689.0%20%29%20%20%20and%20%28%20FIRE_NAME%3D%27HILL%27%20or%20FIRE_NAME%3D%27HILL%27%20or%20INC_NUM%3D%2700001505%27%20or%20INC_NUM%3D%27001505%27%20or%20INC_NUM%3D%27000021.0%27%20or%20FIRE_NUM%3D%270021.0%27%20%29 geometryType=esriGeometryPoint&geometry=-118.8683333,34.73111111&spatialRel=esriSpatialRelIntersects&inSR=4326\n"
]
],
[
[
"Backfill prior fires at the same longitude and latitude for 1, 2, 3, 4 and 5 years back respectively.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute(\"\"\"UPDATE fires SET prior_fire_0_1_year = 1\nwhere\n exists(\n select 1\n from fires prior\n where\n prior.longitude = fires.longitude\n and prior.latitude = fires.latitude\n and julianday(fires.date) - julianday(prior.date) > 30\n\t and julianday(fires.date) - julianday(prior.date) <= 365\n )\n\"\"\")\n\ncur.execute(\"\"\"UPDATE fires SET prior_fire_1_2_year = 1\nwhere\n exists(\n select 1\n from fires prior\n where\n prior.longitude = fires.longitude\n and prior.latitude = fires.latitude\n and julianday(fires.date) - julianday(prior.date) > 365\n\t and julianday(fires.date) - julianday(prior.date) <= (365 * 2)\n )\n\"\"\")\n\ncur.execute(\"\"\"UPDATE fires SET prior_fire_2_3_year = 1\nwhere\n exists(\n select 1\n from fires prior\n where\n prior.longitude = fires.longitude\n and prior.latitude = fires.latitude\n and julianday(fires.date) - julianday(prior.date) > (365 * 2)\n\t and julianday(fires.date) - julianday(prior.date) <= (365 * 3)\n )\n\"\"\")\n\ncur.execute(\"\"\"UPDATE fires SET prior_fire_3_4_year = 1\nwhere\n exists(\n select 1\n from fires prior\n where\n prior.longitude = fires.longitude\n and prior.latitude = fires.latitude\n and julianday(fires.date) - julianday(prior.date) > (365 * 3)\n\t and julianday(fires.date) - julianday(prior.date) <= (365 * 4)\n )\n\"\"\")\n\ncur.execute(\"\"\"UPDATE fires SET prior_fire_4_5_year = 1\nwhere\n exists(\n select 1\n from fires prior\n where\n prior.longitude = fires.longitude\n and prior.latitude = fires.latitude\n and julianday(fires.date) - julianday(prior.date) > (365 * 4)\n\t and julianday(fires.date) - julianday(prior.date) <= (365 * 5)\n )\n\"\"\")\n\nconn.commit()\nconn.close()",
"_____no_output_____"
]
],
[
[
"Backfill fires if the fire's longitude and latitude origin was within the perimeter of the prior fire for the last 1, 2, 3, 4 and 5 years respectively.",
"_____no_output_____"
]
],
[
[
"import time\nimport numpy as np\n\nconn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ndf_prior_all = pd.read_sql_query(\"\"\"\n select date, geo_polygon from fires\n where geo_polygon is not null\n order by date\n\"\"\", conn, parse_dates={'date': {'format': '%Y-%m-%d'}})\n\ndef to_geo(pair):\n return (pair.date, shapely.wkt.loads(pair.geo_polygon))\n\nprior_all = tuple(map(to_geo, df_prior_all.itertuples()))\n\ndf_fires = pd.read_sql_query(\"\"\"\n select fpa_id, fire_name, date, longitude, latitude from fires\n order by date\n\"\"\", conn, index_col=\"fpa_id\", parse_dates={'date': {'format': '%Y-%m-%d'}})\n\nfor fire_id, fire in df_fires.iterrows():\n start = time.time()\n found = False\n fire_pt = Point(fire.longitude, fire.latitude)\n\n for year_back in range(0, 5):\n max_date = fire.date - pd.DateOffset(days=(365 * year_back))\n min_date = fire.date - pd.DateOffset(days=(365 * (year_back + 1)))\n\n for prior in prior_all:\n prior_date = prior[0]\n \n if (prior_date < max_date and prior_date >= min_date):\n prior_perimeter = prior[1]\n\n if prior_perimeter.contains(fire_pt):\n print(f'{fire.fire_name or fire_id} on {fire.date.strftime(\"%Y-%m-%d\")} was within prior fire perimeter {year_back}-{year_back+1} years before')\n cur.execute(f\"UPDATE fires SET prior_fire_{year_back}_{year_back+1}_year = 1 WHERE fpa_id = :fpa_id\", { 'fpa_id': fire_id })\n conn.commit()\n found = True\n break\n \n if (prior_date >= fire.date):\n break\n\n if (found):\n end = time.time()\n print(f'{fire.fire_name or fire_id} took {round(end - start, 3)}s')\n\nconn.close()",
"DEL LOMA on 2010-08-25 was within prior fire perimeter 2-3 years before\nDEL LOMA took 0.028s\nGOBBLER on 2010-08-25 was within prior fire perimeter 0-1 years before\nGOBBLER took 0.04s\nWEBER on 2010-08-25 was within prior fire perimeter 2-3 years before\nWEBER took 0.028s\nMISSION on 2010-08-25 was within prior fire perimeter 2-3 years before\nMISSION took 0.028s\nMESA on 2010-08-25 was within prior fire perimeter 2-3 years before\nMESA took 0.028s\nCASTILLE on 2010-08-26 was within prior fire perimeter 3-4 years before\nCASTILLE took 0.028s\nCASTILLE CPLX POPPET on 2010-08-26 was within prior fire perimeter 3-4 years before\nCASTILLE CPLX POPPET took 0.028s\nDEVIL on 2010-09-02 was within prior fire perimeter 1-2 years before\nDEVIL took 0.029s\nKINGSFORD on 2010-09-06 was within prior fire perimeter 2-3 years before\nKINGSFORD took 0.028s\nSFO-2010-CACDFTCU020705 on 2010-09-13 was within prior fire perimeter 1-2 years before\nSFO-2010-CACDFTCU020705 took 1.499s\nHWY 243 on 2010-09-20 was within prior fire perimeter 3-4 years before\nHWY 243 took 0.027s\nPINES on 2010-09-20 was within prior fire perimeter 3-4 years before\nPINES took 0.027s\nLOOKOUT on 2010-09-21 was within prior fire perimeter 4-5 years before\nLOOKOUT took 0.026s\nCREEK on 2010-09-22 was within prior fire perimeter 1-2 years before\nCREEK took 0.028s\nSAILOR on 2010-09-27 was within prior fire perimeter 2-3 years before\nSAILOR took 0.028s\nCOVINGTON on 2010-09-29 was within prior fire perimeter 4-5 years before\nCOVINGTON took 0.025s\nSLAUGHTERHOUSE RD / PRIVA on 2010-09-30 was within prior fire perimeter 2-3 years before\nSLAUGHTERHOUSE RD / PRIVA took 0.029s\nBAKER on 2010-10-02 was within prior fire perimeter 0-1 years before\nBAKER took 0.029s\nHILDRETH on 2010-10-14 was within prior fire perimeter 3-4 years before\nHILDRETH took 0.035s\nBORDER 15 on 2010-10-15 was within prior fire perimeter 2-3 years before\nBORDER 15 took 0.028s\nSANDY on 2010-10-17 was within prior fire perimeter 2-3 years before\nSANDY took 0.027s\nDON on 2010-10-17 was within prior fire perimeter 2-3 years before\nDON took 0.027s\nKESSLER on 2010-10-24 was within prior fire perimeter 3-4 years before\nKESSLER took 0.027s\nWHITE WING DR DEERHORN_V on 2010-10-25 was within prior fire perimeter 3-4 years before\nWHITE WING DR DEERHORN_V took 0.026s\nBLACK on 2010-11-02 was within prior fire perimeter 3-4 years before\nBLACK took 0.028s\nDIP on 2010-11-15 was within prior fire perimeter 3-4 years before\nDIP took 0.034s\nREDWOOD on 2010-11-22 was within prior fire perimeter 2-3 years before\nREDWOOD took 0.028s\nTWIN on 2010-12-13 was within prior fire perimeter 4-5 years before\nTWIN took 0.027s\nBORDER on 2011-01-29 was within prior fire perimeter 3-4 years before\nBORDER took 0.034s\nGRASS on 2011-02-09 was within prior fire perimeter 3-4 years before\nGRASS took 0.033s\nNEWHALL on 2011-02-10 was within prior fire perimeter 2-3 years before\nNEWHALL took 0.034s\n2011CAIRS17491570 on 2011-02-26 was within prior fire perimeter 4-5 years before\n2011CAIRS17491570 took 0.035s\nCOLBY on 2011-02-26 was within prior fire perimeter 1-2 years before\nCOLBY took 0.038s\nHWY 94 DULZURA 5 on 2011-03-18 was within prior fire perimeter 3-4 years before\nHWY 94 DULZURA 5 took 0.035s\nSTATE on 2011-03-29 was within prior fire perimeter 4-5 years before\nSTATE took 0.033s\nSKYWAY on 2011-04-02 was within prior fire perimeter 2-3 years before\nSKYWAY took 0.035s\nTOYOTA on 2011-04-03 was within prior fire perimeter 4-5 years before\nTOYOTA took 0.037s\nCADILLAC on 2011-04-03 was within prior fire perimeter 2-3 years before\nCADILLAC took 0.036s\nSFO-2011CACDFLAC008009 on 2011-04-08 was within prior fire perimeter 2-3 years before\nSFO-2011CACDFLAC008009 took 0.036s\nSUZUKI on 2011-04-12 was within prior fire perimeter 1-2 years before\nSUZUKI took 0.035s\nMAXIMA on 2011-04-15 was within prior fire perimeter 2-3 years before\nMAXIMA took 0.034s\nGRASS VALLEY on 2011-04-16 was within prior fire perimeter 3-4 years before\nGRASS VALLEY took 0.038s\nI 15 S/ MISSION RD on 2011-04-20 was within prior fire perimeter 3-4 years before\nI 15 S/ MISSION RD took 0.037s\nLONE TREE RD PALERMO on 2011-04-23 was within prior fire perimeter 2-3 years before\nLONE TREE RD PALERMO took 0.036s\n2011CAIRS17962857 on 2011-04-24 was within prior fire perimeter 4-5 years before\n2011CAIRS17962857 took 0.034s\nRABBIT on 2011-04-29 was within prior fire perimeter 0-1 years before\nRABBIT took 0.036s\nSFO-2011CACDFLAC009746 on 2011-04-30 was within prior fire perimeter 3-4 years before\nSFO-2011CACDFLAC009746 took 0.037s\nLITTLE on 2011-05-02 was within prior fire perimeter 1-2 years before\nLITTLE took 0.037s\nBING on 2011-05-06 was within prior fire perimeter 3-4 years before\nBING took 0.034s\nSTAGE on 2011-05-08 was within prior fire perimeter 3-4 years before\nSTAGE took 0.037s\nGARCIA on 2011-05-12 was within prior fire perimeter 4-5 years before\nGARCIA took 0.036s\nRUBBISH on 2011-05-16 was within prior fire perimeter 3-4 years before\nRUBBISH took 0.034s\nROUND on 2011-05-26 was within prior fire perimeter 3-4 years before\nROUND took 0.034s\nTRASH on 2011-05-29 was within prior fire perimeter 4-5 years before\nTRASH took 0.033s\nSFO-2011CACDFLAC012145 on 2011-05-30 was within prior fire perimeter 3-4 years before\nSFO-2011CACDFLAC012145 took 0.036s\nSIXTH on 2011-05-30 was within prior fire perimeter 4-5 years before\nSIXTH took 0.034s\nBEE on 2011-06-05 was within prior fire perimeter 3-4 years before\nBEE took 0.035s\nLIVE on 2011-06-11 was within prior fire perimeter 4-5 years before\nLIVE took 0.039s\n2011CAIRS18003170 on 2011-06-14 was within prior fire perimeter 4-5 years before\n2011CAIRS18003170 took 0.035s\nHONEY on 2011-06-17 was within prior fire perimeter 3-4 years before\nHONEY took 0.035s\nLAKE on 2011-06-17 was within prior fire perimeter 3-4 years before\nLAKE took 0.035s\nCALAC on 2011-06-18 was within prior fire perimeter 3-4 years before\nCALAC took 0.034s\nTADPOLE on 2011-06-19 was within prior fire perimeter 2-3 years before\nTADPOLE took 0.035s\nY3 on 2011-06-19 was within prior fire perimeter 3-4 years before\nY3 took 0.036s\nOREGON on 2011-06-20 was within prior fire perimeter 4-5 years before\nOREGON took 0.034s\nBORDER 9 on 2011-06-20 was within prior fire perimeter 4-5 years before\nBORDER 9 took 0.034s\nTOYON HEIGHTS DR FALLBRO on 2011-06-24 was within prior fire perimeter 3-4 years before\nTOYON HEIGHTS DR FALLBRO took 0.034s\nFARRELL on 2011-06-27 was within prior fire perimeter 2-3 years before\nFARRELL took 0.036s\nGIBBON on 2011-06-27 was within prior fire perimeter 3-4 years before\nGIBBON took 0.034s\nSKYWAY on 2011-07-02 was within prior fire perimeter 3-4 years before\nSKYWAY took 0.034s\nFORD on 2011-07-02 was within prior fire perimeter 2-3 years before\nFORD took 0.035s\nWILLOW on 2011-07-03 was within prior fire perimeter 3-4 years before\nWILLOW took 0.035s\n2011CAIRS19462156 on 2011-07-06 was within prior fire perimeter 4-5 years before\n2011CAIRS19462156 took 0.033s\nGREEN VALLEY on 2011-07-07 was within prior fire perimeter 3-4 years before\nGREEN VALLEY took 0.037s\nCRANSTON on 2011-07-07 was within prior fire perimeter 4-5 years before\nCRANSTON took 0.037s\nLONG on 2011-07-07 was within prior fire perimeter 3-4 years before\nLONG took 0.036s\nCOLGATE on 2011-07-09 was within prior fire perimeter 1-2 years before\nCOLGATE took 0.037s\nRIVER on 2011-07-10 was within prior fire perimeter 0-1 years before\nRIVER took 0.038s\nEUCALYPTUS on 2011-07-10 was within prior fire perimeter 0-1 years before\nEUCALYPTUS on 2011-07-10 was within prior fire perimeter 3-4 years before\nEUCALYPTUS took 0.036s\nLA_JOLLA on 2011-07-10 was within prior fire perimeter 3-4 years before\nLA_JOLLA took 0.037s\nMACK on 2011-07-11 was within prior fire perimeter 3-4 years before\nMACK took 0.039s\nPIPE on 2011-07-11 was within prior fire perimeter 1-2 years before\nPIPE took 0.039s\nSPUR on 2011-07-16 was within prior fire perimeter 3-4 years before\nSPUR took 0.034s\nHOBO on 2011-07-17 was within prior fire perimeter 0-1 years before\nHOBO took 0.037s\nDYER on 2011-07-21 was within prior fire perimeter 2-3 years before\nDYER took 0.036s\nFEATHERSTONE CANYON RD / on 2011-07-23 was within prior fire perimeter 3-4 years before\nFEATHERSTONE CANYON RD / took 0.036s\nHARRISON on 2011-07-24 was within prior fire perimeter 3-4 years before\nHARRISON took 0.035s\nBAN13 on 2011-07-30 was within prior fire perimeter 4-5 years before\nBAN13 took 0.034s\nCRYSTAL CAVE on 2011-08-02 was within prior fire perimeter 2-3 years before\nCRYSTAL CAVE took 0.037s\nDIOS on 2011-08-03 was within prior fire perimeter 3-4 years before\nDIOS took 0.034s\nVALLEY on 2011-08-03 was within prior fire perimeter 4-5 years before\nVALLEY took 0.036s\nHYAMPOM on 2011-08-04 was within prior fire perimeter 3-4 years before\nHYAMPOM took 0.036s\nARROWHEAD on 2011-08-05 was within prior fire perimeter 4-5 years before\nARROWHEAD took 0.033s\nSFO-2011CACDFLAC017753 on 2011-08-06 was within prior fire perimeter 3-4 years before\nSFO-2011CACDFLAC017753 took 0.035s\nCOPE on 2011-08-07 was within prior fire perimeter 4-5 years before\nCOPE took 0.036s\nBANNING on 2011-08-07 was within prior fire perimeter 4-5 years before\nBANNING took 0.039s\nBARONA # 2 on 2011-08-07 was within prior fire perimeter 3-4 years before\nBARONA # 2 took 0.035s\nLOOKOUT on 2011-08-08 was within prior fire perimeter 3-4 years before\nLOOKOUT took 0.042s\nLANE on 2011-08-12 was within prior fire perimeter 1-2 years before\nLANE took 0.036s\n2011CAIRS19493098 on 2011-08-13 was within prior fire perimeter 4-5 years before\n2011CAIRS19493098 took 0.034s\nPOTRERO on 2011-08-16 was within prior fire perimeter 3-4 years before\nPOTRERO took 0.034s\nIRON 2 on 2011-08-17 was within prior fire perimeter 3-4 years before\nIRON 2 took 0.038s\n2011CAIRS18132746 on 2011-08-18 was within prior fire perimeter 3-4 years before\n2011CAIRS18132746 took 0.034s\nDEL on 2011-08-23 was within prior fire perimeter 4-5 years before\nDEL took 0.036s\nSERRANO on 2011-08-27 was within prior fire perimeter 4-5 years before\nSERRANO took 0.034s\nBARRETT on 2011-08-27 was within prior fire perimeter 3-4 years before\nBARRETT took 0.035s\nFOOTHILL on 2011-08-30 was within prior fire perimeter 2-3 years before\nFOOTHILL took 0.039s\nPALA on 2011-08-30 was within prior fire perimeter 0-1 years before\nPALA took 0.037s\nBAR on 2011-09-01 was within prior fire perimeter 3-4 years before\nBAR took 0.036s\nFIGUEROA on 2011-09-03 was within prior fire perimeter 2-3 years before\nFIGUEROA took 0.045s\nTWIN on 2011-09-06 was within prior fire perimeter 4-5 years before\nTWIN took 0.037s\nWATT on 2011-09-07 was within prior fire perimeter 2-3 years before\nWATT took 0.039s\nQUARRY on 2011-09-10 was within prior fire perimeter 2-3 years before\nQUARRY took 0.037s\nARNOLD COMPLEX on 2011-09-10 was within prior fire perimeter 2-3 years before\nARNOLD COMPLEX took 0.042s\nMARE on 2011-09-10 was within prior fire perimeter 3-4 years before\nMARE took 0.037s\nTENNESSEE on 2011-09-11 was within prior fire perimeter 4-5 years before\nTENNESSEE took 0.033s\nDEVORE on 2011-09-11 was within prior fire perimeter 3-4 years before\nDEVORE took 0.037s\nSOUTH on 2011-09-13 was within prior fire perimeter 3-4 years before\nSOUTH took 0.035s\nHOWARD on 2011-09-14 was within prior fire perimeter 4-5 years before\nHOWARD took 0.084s\nCONTRERAS on 2011-09-14 was within prior fire perimeter 2-3 years before\nCONTRERAS took 0.038s\nROUND on 2011-09-14 was within prior fire perimeter 2-3 years before\nROUND took 0.039s\nEAGLE on 2011-09-15 was within prior fire perimeter 3-4 years before\nEAGLE took 0.034s\nTECATE RD / TECATE MISS 2 on 2011-09-22 was within prior fire perimeter 3-4 years before\nTECATE RD / TECATE MISS 2 took 0.037s\nLIBRARY on 2011-09-23 was within prior fire perimeter 3-4 years before\nLIBRARY took 0.039s\nWILBUR on 2011-09-30 was within prior fire perimeter 3-4 years before\nWILBUR took 0.037s\nGREY on 2011-10-06 was within prior fire perimeter 2-3 years before\nGREY took 0.038s\nHONEY SPRINGS RD LAWSO 7 on 2011-10-10 was within prior fire perimeter 3-4 years before\nHONEY SPRINGS RD LAWSO 7 took 0.035s\nWILD on 2011-10-12 was within prior fire perimeter 3-4 years before\nWILD took 0.037s\nCHECK POINT on 2011-10-12 was within prior fire perimeter 3-4 years before\nCHECK POINT took 0.039s\nCIGG on 2011-10-13 was within prior fire perimeter 3-4 years before\nCIGG took 0.042s\nFREE on 2011-10-22 was within prior fire perimeter 4-5 years before\nFREE took 0.036s\nCHRISTIAN on 2011-10-26 was within prior fire perimeter 2-3 years before\nCHRISTIAN took 0.04s\n2E on 2011-10-27 was within prior fire perimeter 0-1 years before\n2E took 0.039s\nPELONA on 2011-10-29 was within prior fire perimeter 4-5 years before\nPELONA took 0.039s\nI 15 N/ MISSION RD 5 on 2011-10-31 was within prior fire perimeter 4-5 years before\nI 15 N/ MISSION RD 5 took 0.036s\nBORDER 25 on 2011-10-31 was within prior fire perimeter 4-5 years before\nBORDER 25 took 0.037s\nLOPEZ on 2011-11-01 was within prior fire perimeter 3-4 years before\nLOPEZ took 0.036s\nBORDER 26 on 2011-11-01 was within prior fire perimeter 4-5 years before\nBORDER 26 took 0.038s\nSFO-2011CACDFORC007315 on 2011-11-02 was within prior fire perimeter 2-3 years before\nSFO-2011CACDFORC007315 took 0.037s\nVALLEY FIRE on 2011-11-02 was within prior fire perimeter 4-5 years before\nVALLEY FIRE took 0.037s\nCAJON on 2011-11-10 was within prior fire perimeter 4-5 years before\nCAJON took 0.036s\nCHIP on 2011-11-20 was within prior fire perimeter 2-3 years before\nCHIP took 0.042s\nSIERRA CIELO LAWSON_V 2 on 2011-11-27 was within prior fire perimeter 4-5 years before\nSIERRA CIELO LAWSON_V 2 took 0.039s\nPOST on 2011-12-02 was within prior fire perimeter 3-4 years before\nPOST took 0.036s\nSTONE on 2011-12-06 was within prior fire perimeter 3-4 years before\nSTONE took 0.036s\nCHRISTMAS on 2011-12-25 was within prior fire perimeter 2-3 years before\nCHRISTMAS took 0.038s\nMURPHYS on 2012-01-15 was within prior fire perimeter 4-5 years before\nMURPHYS took 0.037s\nMARTINEZ on 2012-02-09 was within prior fire perimeter 4-5 years before\nMARTINEZ took 0.035s\nSFO-2012CACDFORC001034 on 2012-02-10 was within prior fire perimeter 4-5 years before\nSFO-2012CACDFORC001034 took 0.035s\nGLEN on 2012-02-13 was within prior fire perimeter 3-4 years before\nGLEN took 0.041s\nOTAY MTN TRL / MARRON VAL on 2012-02-21 was within prior fire perimeter 4-5 years before\nOTAY MTN TRL / MARRON VAL took 0.036s\nGIBRALTER on 2012-02-25 was within prior fire perimeter 2-3 years before\nGIBRALTER took 0.04s\nSHOOTING on 2012-02-26 was within prior fire perimeter 3-4 years before\nSHOOTING took 0.036s\nMOTHER GRUNDY TRUCK TRL on 2012-02-26 was within prior fire perimeter 4-5 years before\nMOTHER GRUNDY TRUCK TRL took 0.038s\nOROVISTA on 2012-03-03 was within prior fire perimeter 2-3 years before\nOROVISTA took 0.04s\nRUSH on 2012-03-04 was within prior fire perimeter 4-5 years before\nRUSH took 0.04s\nN OLD HIGHWAY 395 /W RAI on 2012-03-12 was within prior fire perimeter 4-5 years before\nN OLD HIGHWAY 395 /W RAI took 0.037s\nTURNOUT on 2012-03-13 was within prior fire perimeter 2-3 years before\nTURNOUT took 0.04s\nREDWOOD on 2012-03-21 was within prior fire perimeter 3-4 years before\nREDWOOD took 0.035s\nKUUPAT on 2012-03-30 was within prior fire perimeter 4-5 years before\nKUUPAT took 0.035s\nSTAGING on 2012-04-02 was within prior fire perimeter 4-5 years before\nSTAGING took 0.036s\nBONFIRE on 2012-04-04 was within prior fire perimeter 2-3 years before\nBONFIRE took 0.037s\nCOLBY on 2012-04-22 was within prior fire perimeter 2-3 years before\nCOLBY took 0.041s\nCABIN on 2012-04-24 was within prior fire perimeter 2-3 years before\nCABIN took 0.036s\nTUJUNGA on 2012-04-28 was within prior fire perimeter 2-3 years before\nTUJUNGA took 0.037s\nYSABEL on 2012-04-28 was within prior fire perimeter 4-5 years before\nYSABEL took 0.037s\nHIGHWAY on 2012-05-03 was within prior fire perimeter 2-3 years before\nHIGHWAY took 0.036s\nSTA 64 - RINCON RES FI 29 on 2012-05-10 was within prior fire perimeter 4-5 years before\nSTA 64 - RINCON RES FI 29 took 0.036s\nYSABEL_2 on 2012-05-14 was within prior fire perimeter 4-5 years before\nYSABEL_2 took 0.036s\nBORDER 7 on 2012-05-18 was within prior fire perimeter 4-5 years before\nBORDER 7 took 0.038s\nRIPPLE on 2012-05-23 was within prior fire perimeter 3-4 years before\nRIPPLE took 0.037s\n2012CAIRS19847451 on 2012-05-23 was within prior fire perimeter 4-5 years before\n2012CAIRS19847451 took 0.036s\nMOTHER on 2012-05-23 was within prior fire perimeter 4-5 years before\nMOTHER took 0.035s\nGRUNDY on 2012-05-23 was within prior fire perimeter 4-5 years before\nGRUNDY took 0.037s\nBANNER 4 on 2012-05-24 was within prior fire perimeter 4-5 years before\nBANNER 4 took 0.036s\nPINK on 2012-05-25 was within prior fire perimeter 4-5 years before\nPINK took 0.035s\n2012CAIRS19847452 on 2012-05-26 was within prior fire perimeter 4-5 years before\n2012CAIRS19847452 took 0.038s\nLONE on 2012-05-27 was within prior fire perimeter 4-5 years before\nLONE took 0.038s\nPOWERLINE on 2012-05-28 was within prior fire perimeter 4-5 years before\nPOWERLINE took 0.038s\nCAMPGROUND on 2012-05-29 was within prior fire perimeter 4-5 years before\nCAMPGROUND took 0.036s\nNEAL RD / WAYLAND RD on 2012-06-02 was within prior fire perimeter 3-4 years before\nNEAL RD / WAYLAND RD took 0.036s\nTINTA on 2012-06-03 was within prior fire perimeter 4-5 years before\nTINTA took 0.036s\nROLLOVER on 2012-06-06 was within prior fire perimeter 2-3 years before\nROLLOVER took 0.036s\nGOULD on 2012-06-11 was within prior fire perimeter 2-3 years before\nGOULD took 0.043s\nSUTHERLAND on 2012-06-16 was within prior fire perimeter 4-5 years before\nSUTHERLAND took 0.036s\nINDIAN OAKS RD RAMONA on 2012-06-18 was within prior fire perimeter 4-5 years before\nINDIAN OAKS RD RAMONA took 0.03s\nOTAY 3 on 2012-06-20 was within prior fire perimeter 4-5 years before\nOTAY 3 took 0.035s\nPIT FIRE on 2012-06-24 was within prior fire perimeter 4-5 years before\nPIT FIRE took 0.036s\nBORDER 11 on 2012-06-24 was within prior fire perimeter 4-5 years before\nBORDER 11 took 0.035s\nHY 91 E/ GREEN RIVER RD on 2012-06-25 was within prior fire perimeter 3-4 years before\nHY 91 E/ GREEN RIVER RD took 0.052s\nSTEFFY RD RAMONA 10 on 2012-06-30 was within prior fire perimeter 4-5 years before\nSTEFFY RD RAMONA 10 took 0.037s\nMARRON on 2012-07-01 was within prior fire perimeter 4-5 years before\nMARRON took 0.036s\nOTAY LAKES RD OTAY_LA 14 on 2012-07-01 was within prior fire perimeter 4-5 years before\nOTAY LAKES RD OTAY_LA 14 took 0.036s\nSFO-2012CACDFORC004888 on 2012-07-02 was within prior fire perimeter 3-4 years before\nSFO-2012CACDFORC004888 took 0.036s\nHALFWAY on 2012-07-03 was within prior fire perimeter 1-2 years before\nHALFWAY took 0.038s\nHILLSIDE on 2012-07-03 was within prior fire perimeter 1-2 years before\nHILLSIDE took 0.037s\nWOOD on 2012-07-04 was within prior fire perimeter 2-3 years before\nWOOD took 0.036s\nLIME on 2012-07-06 was within prior fire perimeter 3-4 years before\nLIME took 0.038s\nSEMI on 2012-07-06 was within prior fire perimeter 4-5 years before\nSEMI took 0.049s\nARRON on 2012-07-08 was within prior fire perimeter 4-5 years before\nARRON took 0.039s\nMOTORCYCLE on 2012-07-10 was within prior fire perimeter 2-3 years before\nMOTORCYCLE took 0.041s\nMAGNOLIA 2 on 2012-07-11 was within prior fire perimeter 4-5 years before\nMAGNOLIA 2 took 0.04s\nBORDER 13 on 2012-07-11 was within prior fire perimeter 4-5 years before\nBORDER 13 took 0.039s\nSKYWAY on 2012-07-12 was within prior fire perimeter 4-5 years before\nSKYWAY took 0.042s\nDULZURA on 2012-07-12 was within prior fire perimeter 4-5 years before\nDULZURA took 0.038s\nPICO on 2012-07-16 was within prior fire perimeter 4-5 years before\nPICO took 0.036s\nFALLS on 2012-07-18 was within prior fire perimeter 4-5 years before\nFALLS took 0.036s\nTAYLOR on 2012-07-19 was within prior fire perimeter 4-5 years before\nTAYLOR took 0.036s\nHAWKINSVILLE on 2012-07-20 was within prior fire perimeter 0-1 years before\nHAWKINSVILLE took 0.04s\nCDF_2012_21249689 on 2012-07-21 was within prior fire perimeter 1-2 years before\nCDF_2012_21249689 took 0.04s\n2012CAIRS20806193 on 2012-07-21 was within prior fire perimeter 2-3 years before\n2012CAIRS20806193 took 0.039s\nSUGARLOAF on 2012-07-23 was within prior fire perimeter 2-3 years before\nSUGARLOAF took 0.041s\nOCEAN VIEW on 2012-07-24 was within prior fire perimeter 4-5 years before\nOCEAN VIEW took 0.036s\nOLIVEVIEW on 2012-07-24 was within prior fire perimeter 3-4 years before\nOLIVEVIEW took 0.04s\nPROCTOR on 2012-07-25 was within prior fire perimeter 4-5 years before\nPROCTOR took 0.037s\nPOTRERO VALLEY RD POT 13 on 2012-07-25 was within prior fire perimeter 4-5 years before\nPOTRERO VALLEY RD POT 13 took 0.036s\nZUMAQUE RANCHO_SANTA_FE on 2012-07-26 was within prior fire perimeter 4-5 years before\nZUMAQUE RANCHO_SANTA_FE took 0.038s\nBEE on 2012-07-26 was within prior fire perimeter 2-3 years before\nBEE took 0.039s\nYELLOW on 2012-07-28 was within prior fire perimeter 4-5 years before\nYELLOW took 0.036s\nTECATE RD / HWY 94 on 2012-07-30 was within prior fire perimeter 4-5 years before\nTECATE RD / HWY 94 took 0.036s\nBROWN on 2012-08-04 was within prior fire perimeter 4-5 years before\nBROWN took 0.039s\nLIEBEL on 2012-08-04 was within prior fire perimeter 4-5 years before\nLIEBEL took 0.04s\nTWIN on 2012-08-05 was within prior fire perimeter 0-1 years before\nTWIN took 0.04s\nBLUE on 2012-08-05 was within prior fire perimeter 4-5 years before\nBLUE took 0.04s\n2012CAIRS20806222 on 2012-08-05 was within prior fire perimeter 2-3 years before\n2012CAIRS20806222 took 0.042s\nCEDAR BASIN on 2012-08-06 was within prior fire perimeter 0-1 years before\nCEDAR BASIN took 0.042s\nGOFF BUTTE on 2012-08-06 was within prior fire perimeter 0-1 years before\nGOFF BUTTE took 0.041s\nDARK on 2012-08-08 was within prior fire perimeter 2-3 years before\nDARK took 0.042s\nBUTTE on 2012-08-09 was within prior fire perimeter 1-2 years before\nBUTTE took 0.041s\nWOOLSTAFF on 2012-08-10 was within prior fire perimeter 4-5 years before\nWOOLSTAFF took 0.037s\nGASOLINE 2 on 2012-08-11 was within prior fire perimeter 4-5 years before\nGASOLINE 2 took 0.036s\nOLD JULIAN HWY WITCH_C 3 on 2012-08-12 was within prior fire perimeter 4-5 years before\nOLD JULIAN HWY WITCH_C 3 took 0.037s\nWRIGHTS on 2012-08-13 was within prior fire perimeter 4-5 years before\nWRIGHTS took 0.038s\nLAKE on 2012-08-13 was within prior fire perimeter 2-3 years before\nLAKE took 0.038s\nGOODENOUGH 2 on 2012-08-15 was within prior fire perimeter 4-5 years before\nGOODENOUGH 2 took 0.038s\nSFO-2012CACDFORC006288 on 2012-08-17 was within prior fire perimeter 4-5 years before\nSFO-2012CACDFORC006288 took 0.039s\nWATERSPOUT on 2012-08-18 was within prior fire perimeter 4-5 years before\nWATERSPOUT took 0.037s\nRAILROAD on 2012-08-19 was within prior fire perimeter 4-5 years before\nRAILROAD took 0.037s\nCLEAR on 2012-08-23 was within prior fire perimeter 2-3 years before\nCLEAR took 0.039s\nLADYBUG on 2012-08-26 was within prior fire perimeter 3-4 years before\nLADYBUG took 0.039s\nCIRCLE 2 on 2012-08-31 was within prior fire perimeter 2-3 years before\nCIRCLE 2 took 0.039s\nCAR ASSIST on 2012-09-01 was within prior fire perimeter 4-5 years before\nCAR ASSIST took 0.041s\nNEW LONG VALLEY RD CLEAR on 2012-09-02 was within prior fire perimeter 0-1 years before\nNEW LONG VALLEY RD CLEAR took 0.045s\nNAVIGATOR on 2012-09-02 was within prior fire perimeter 3-4 years before\nNAVIGATOR took 0.038s\nCOCHERA VIA MARRON_VA 4 on 2012-09-02 was within prior fire perimeter 4-5 years before\nCOCHERA VIA MARRON_VA 4 took 0.038s\nSHU DUTCH on 2012-09-03 was within prior fire perimeter 4-5 years before\nSHU DUTCH took 0.042s\nCDF_2012_21245072 on 2012-09-03 was within prior fire perimeter 2-3 years before\nCDF_2012_21245072 took 0.042s\nCDF_2012_21245182 on 2012-09-04 was within prior fire perimeter 2-3 years before\nCDF_2012_21245182 took 0.037s\nJULIAN 3 on 2012-09-06 was within prior fire perimeter 4-5 years before\nJULIAN 3 took 0.037s\nNEAL on 2012-09-09 was within prior fire perimeter 4-5 years before\nNEAL took 0.036s\nCDF_2012_21246508 on 2012-09-09 was within prior fire perimeter 1-2 years before\nCDF_2012_21246508 took 0.039s\nDEEP on 2012-09-09 was within prior fire perimeter 4-5 years before\nDEEP took 0.035s\nVETERAN on 2012-09-11 was within prior fire perimeter 3-4 years before\nVETERAN took 0.037s\nLOOKOUT POINT PP 109 PD 2 on 2012-09-27 was within prior fire perimeter 4-5 years before\nLOOKOUT POINT PP 109 PD 2 took 0.039s\nLOOKOUT POINT PP 109 PDSE on 2012-09-27 was within prior fire perimeter 4-5 years before\nLOOKOUT POINT PP 109 PDSE took 0.035s\nLOOKOUT POINT PP 109 PD 3 on 2012-09-27 was within prior fire perimeter 4-5 years before\nLOOKOUT POINT PP 109 PD 3 took 0.037s\nGEMMIL on 2012-09-28 was within prior fire perimeter 4-5 years before\nGEMMIL took 0.043s\n2012CAIRS21436291 on 2012-10-01 was within prior fire perimeter 3-4 years before\n2012CAIRS21436291 took 0.041s\nLOPEZ on 2012-10-02 was within prior fire perimeter 3-4 years before\nLOPEZ took 0.039s\nSHEEP on 2012-10-07 was within prior fire perimeter 0-1 years before\nSHEEP took 0.04s\nHILLS on 2012-10-09 was within prior fire perimeter 1-2 years before\nHILLS took 0.039s\nDELTA on 2012-10-11 was within prior fire perimeter 3-4 years before\nDELTA took 0.038s\nVOGEL on 2012-10-25 was within prior fire perimeter 3-4 years before\nVOGEL took 0.04s\nSPUR on 2012-10-29 was within prior fire perimeter 3-4 years before\nSPUR took 0.04s\nLONE on 2012-10-31 was within prior fire perimeter 3-4 years before\nLONE took 0.038s\nMORRIS on 2012-11-04 was within prior fire perimeter 3-4 years before\nMORRIS took 0.039s\nW WHITLOCK RD BEAR_VALLE on 2012-11-15 was within prior fire perimeter 4-5 years before\nW WHITLOCK RD BEAR_VALLE took 0.042s\nKAGEL on 2012-11-24 was within prior fire perimeter 4-5 years before\nKAGEL took 0.038s\n2012CAIRS21404195 on 2012-12-15 was within prior fire perimeter 3-4 years before\n2012CAIRS21404195 took 0.04s\nPICKUP on 2012-12-20 was within prior fire perimeter 3-4 years before\nPICKUP took 0.04s\nWILDWOOD on 2012-12-25 was within prior fire perimeter 3-4 years before\nWILDWOOD took 0.039s\nGRILLS on 2013-02-10 was within prior fire perimeter 3-4 years before\nGRILLS took 0.038s\n2013CAIRS21735731 on 2013-02-24 was within prior fire perimeter 4-5 years before\n2013CAIRS21735731 took 0.04s\nWARMING on 2013-03-02 was within prior fire perimeter 3-4 years before\nWARMING took 0.039s\nSFO-2013CACDFLNU001894 on 2013-03-22 was within prior fire perimeter 4-5 years before\nSFO-2013CACDFLNU001894 took 0.038s\nSFO-2013CACDFORC003222 on 2013-04-06 was within prior fire perimeter 4-5 years before\nSFO-2013CACDFORC003222 took 0.039s\n2013CAIRS22390176 on 2013-04-14 was within prior fire perimeter 3-4 years before\n2013CAIRS22390176 took 0.039s\nBUELL on 2013-04-15 was within prior fire perimeter 4-5 years before\nBUELL took 0.037s\nSWITZERS on 2013-04-21 was within prior fire perimeter 3-4 years before\nSWITZERS took 0.039s\nJADESTONE on 2013-04-26 was within prior fire perimeter 3-4 years before\nJADESTONE took 0.041s\nFORD on 2013-04-28 was within prior fire perimeter 4-5 years before\nFORD took 0.039s\nMELVINA on 2013-04-30 was within prior fire perimeter 4-5 years before\nMELVINA took 0.041s\nCARR on 2013-05-01 was within prior fire perimeter 4-5 years before\nCARR took 0.038s\nGAINOR on 2013-05-02 was within prior fire perimeter 4-5 years before\nGAINOR took 0.036s\nMESSENGER on 2013-05-03 was within prior fire perimeter 3-4 years before\nMESSENGER took 0.041s\nSUNSET AV / MESA ST 2 on 2013-05-08 was within prior fire perimeter 0-1 years before\nSUNSET AV / MESA ST 2 took 0.04s\nHAPPY on 2013-05-11 was within prior fire perimeter 0-1 years before\nHAPPY took 0.044s\n2013CAIRS22395555 on 2013-05-11 was within prior fire perimeter 1-2 years before\n2013CAIRS22395555 took 0.043s\nCARIBOU on 2013-05-17 was within prior fire perimeter 4-5 years before\nCARIBOU took 0.041s\nCHERRY on 2013-05-20 was within prior fire perimeter 0-1 years before\nCHERRY took 0.043s\nTWIN on 2013-05-26 was within prior fire perimeter 4-5 years before\nTWIN took 0.044s\n2013CAIRS22397443 on 2013-05-26 was within prior fire perimeter 2-3 years before\n2013CAIRS22397443 took 0.042s\nVAIL on 2013-05-26 was within prior fire perimeter 4-5 years before\nVAIL took 0.044s\nBLACKBURN on 2013-05-31 was within prior fire perimeter 2-3 years before\nBLACKBURN took 0.039s\nSFO-2013CACDFBTU007220 on 2013-06-02 was within prior fire perimeter 4-5 years before\nSFO-2013CACDFBTU007220 took 0.038s\nHONEY RUN RD STH_CHICO on 2013-06-09 was within prior fire perimeter 4-5 years before\nHONEY RUN RD STH_CHICO took 0.041s\nDODGE on 2013-06-09 was within prior fire perimeter 1-2 years before\nDODGE took 0.041s\nMINE on 2013-06-10 was within prior fire perimeter 0-1 years before\nMINE took 0.046s\nCUBBY on 2013-06-12 was within prior fire perimeter 4-5 years before\nCUBBY took 0.04s\nMESA on 2013-06-26 was within prior fire perimeter 0-1 years before\nMESA took 0.041s\nFRUITVALE on 2013-06-29 was within prior fire perimeter 4-5 years before\nFRUITVALE took 0.039s\nROCKY on 2013-07-03 was within prior fire perimeter 3-4 years before\nROCKY took 0.04s\nSFO-2013CACDFORC006090 on 2013-07-04 was within prior fire perimeter 4-5 years before\nSFO-2013CACDFORC006090 took 0.048s\nGLENHAVEN on 2013-07-05 was within prior fire perimeter 4-5 years before\nGLENHAVEN took 0.04s\nWILD on 2013-07-06 was within prior fire perimeter 3-4 years before\nWILD took 0.393s\nINDIAN on 2013-07-07 was within prior fire perimeter 4-5 years before\nINDIAN took 0.039s\nEAGLE on 2013-07-08 was within prior fire perimeter 4-5 years before\nEAGLE took 0.49s\nKAGEL on 2013-07-21 was within prior fire perimeter 4-5 years before\nKAGEL took 0.369s\nRAYWOOD on 2013-07-21 was within prior fire perimeter 0-1 years before\nRAYWOOD took 0.043s\nCANYON on 2013-07-23 was within prior fire perimeter 3-4 years before\nCANYON took 0.039s\nSPANISH on 2013-07-25 was within prior fire perimeter 0-1 years before\nSPANISH took 0.042s\nLONE on 2013-07-26 was within prior fire perimeter 3-4 years before\nLONE took 0.254s\nMONTE on 2013-08-01 was within prior fire perimeter 3-4 years before\nMONTE took 0.044s\nSENECA on 2013-08-03 was within prior fire perimeter 1-2 years before\nSENECA took 0.916s\nSALVAGE on 2013-08-06 was within prior fire perimeter 1-2 years before\nSALVAGE took 0.044s\nBRANCH on 2013-08-09 was within prior fire perimeter 0-1 years before\nBRANCH took 0.293s\nSOAP FIRE on 2013-08-17 was within prior fire perimeter 1-2 years before\nSOAP FIRE took 0.04s\nHY 243 / TWIN PINES RD on 2013-08-20 was within prior fire perimeter 0-1 years before\nHY 243 / TWIN PINES RD took 0.043s\nBUTTON R 2 on 2013-08-22 was within prior fire perimeter 1-2 years before\nBUTTON R 2 took 0.042s\nPAINTER R 4 on 2013-08-22 was within prior fire perimeter 1-2 years before\nPAINTER R 4 took 0.043s\nLOG on 2013-09-01 was within prior fire perimeter 4-5 years before\nLOG took 0.04s\nSTONEY on 2013-09-01 was within prior fire perimeter 4-5 years before\nSTONEY took 0.04s\nPATROL on 2013-09-09 was within prior fire perimeter 3-4 years before\nPATROL took 0.048s\nINDIAN on 2013-09-21 was within prior fire perimeter 2-3 years before\nINDIAN took 0.155s\nROUNDUP on 2013-09-28 was within prior fire perimeter 2-3 years before\nROUNDUP took 0.04s\nWILSON on 2013-10-01 was within prior fire perimeter 4-5 years before\nWILSON took 0.478s\nNOBELS FIRE on 2013-10-02 was within prior fire perimeter 1-2 years before\nNOBELS FIRE took 0.046s\nSEDAN on 2013-10-05 was within prior fire perimeter 4-5 years before\nSEDAN took 0.049s\nSFO-2013CACDFORC130326 on 2013-10-05 was within prior fire perimeter 4-5 years before\nSFO-2013CACDFORC130326 took 0.051s\nRAM on 2013-10-13 was within prior fire perimeter 0-1 years before\nRAM took 0.073s\n2013CAIRS23797530 on 2013-10-31 was within prior fire perimeter 2-3 years before\n2013CAIRS23797530 took 0.042s\nFOREST on 2013-11-02 was within prior fire perimeter 4-5 years before\nFOREST took 0.042s\nBRIDGE on 2013-11-12 was within prior fire perimeter 4-5 years before\nBRIDGE took 0.151s\nDAVID on 2013-11-25 was within prior fire perimeter 4-5 years before\nDAVID took 0.039s\nSFO-2014CACDFTCU001064 on 2014-01-18 was within prior fire perimeter 0-1 years before\nSFO-2014CACDFTCU001064 took 0.497s\nHIDDEN on 2014-01-18 was within prior fire perimeter 4-5 years before\nHIDDEN took 0.043s\nFALLS on 2014-01-18 was within prior fire perimeter 4-5 years before\nFALLS took 0.042s\nSFO-2014CACDFTCU001167 on 2014-01-19 was within prior fire perimeter 0-1 years before\nSFO-2014CACDFTCU001167 took 0.044s\nBUICK on 2014-02-10 was within prior fire perimeter 4-5 years before\nBUICK took 0.04s\nLOWE on 2014-02-14 was within prior fire perimeter 4-5 years before\nLOWE took 0.041s\nWARMING on 2014-03-13 was within prior fire perimeter 4-5 years before\nWARMING took 0.18s\nCREEK on 2014-04-14 was within prior fire perimeter 0-1 years before\nCREEK took 0.049s\nALRAY on 2014-04-16 was within prior fire perimeter 2-3 years before\nALRAY took 0.049s\nOPDYKE on 2014-04-21 was within prior fire perimeter 4-5 years before\nOPDYKE took 0.041s\nHUGHES on 2014-04-30 was within prior fire perimeter 0-1 years before\nHUGHES took 0.045s\nSTONEY on 2014-05-12 was within prior fire perimeter 4-5 years before\nSTONEY took 0.158s\nSIGNAL on 2014-05-21 was within prior fire perimeter 4-5 years before\nSIGNAL took 0.041s\nDUCATI on 2014-05-25 was within prior fire perimeter 4-5 years before\nDUCATI took 0.483s\nFOBES on 2014-05-25 was within prior fire perimeter 0-1 years before\nFOBES took 0.044s\nBEE on 2014-05-26 was within prior fire perimeter 4-5 years before\nBEE took 0.689s\nDIABLO on 2014-05-29 was within prior fire perimeter 3-4 years before\nDIABLO took 0.166s\nCRANSTON on 2014-06-19 was within prior fire perimeter 4-5 years before\nCRANSTON took 0.04s\nSFO-2014CACDFLNU004959 on 2014-06-21 was within prior fire perimeter 1-2 years before\nSFO-2014CACDFLNU004959 took 0.042s\nSFO-2014CACDFLNU004972 on 2014-06-21 was within prior fire perimeter 1-2 years before\nSFO-2014CACDFLNU004972 took 0.041s\nSFO-2014CACDFMVU013659 on 2014-06-28 was within prior fire perimeter 1-2 years before\nSFO-2014CACDFMVU013659 took 0.914s\nSFO-2014CACDFSCU003886 on 2014-06-30 was within prior fire perimeter 4-5 years before\nSFO-2014CACDFSCU003886 took 0.232s\nMDF DH SCORPION on 2014-07-01 was within prior fire perimeter 2-3 years before\nMDF DH SCORPION took 0.074s\nDYER on 2014-07-09 was within prior fire perimeter 4-5 years before\nDYER took 0.04s\nPIONEER on 2014-07-12 was within prior fire perimeter 1-2 years before\nPIONEER took 0.044s\nASPEN on 2014-07-15 was within prior fire perimeter 0-1 years before\nASPEN took 0.043s\nLOOKOUT on 2014-07-20 was within prior fire perimeter 0-1 years before\nLOOKOUT took 0.045s\nTULE on 2014-07-21 was within prior fire perimeter 1-2 years before\nTULE took 0.049s\nSMOKEY on 2014-07-24 was within prior fire perimeter 0-1 years before\nSMOKEY took 0.108s\nCRANE on 2014-07-26 was within prior fire perimeter 0-1 years before\nCRANE took 0.192s\nTURNOUT on 2014-08-09 was within prior fire perimeter 4-5 years before\nTURNOUT took 0.049s\nHAZEL on 2014-08-10 was within prior fire perimeter 0-1 years before\nHAZEL took 0.693s\nANTHONY on 2014-08-12 was within prior fire perimeter 0-1 years before\nANTHONY took 0.297s\nWHISKEY on 2014-08-12 was within prior fire perimeter 0-1 years before\nWHISKEY took 0.608s\nCAMPBELL on 2014-08-12 was within prior fire perimeter 0-1 years before\nCAMPBELL took 0.052s\nMAN on 2014-08-12 was within prior fire perimeter 0-1 years before\nMAN took 0.052s\nHUCKLEBERRY on 2014-08-12 was within prior fire perimeter 0-1 years before\nHUCKLEBERRY took 0.053s\nSCOTT on 2014-08-12 was within prior fire perimeter 0-1 years before\nSCOTT took 0.054s\nGUFFY on 2014-08-15 was within prior fire perimeter 0-1 years before\nGUFFY took 0.048s\nCORRAL on 2014-08-16 was within prior fire perimeter 3-4 years before\nCORRAL took 0.047s\nAZUSA on 2014-08-17 was within prior fire perimeter 0-1 years before\nAZUSA took 0.077s\nMILLARD on 2014-08-19 was within prior fire perimeter 4-5 years before\nMILLARD took 0.049s\nSUNRISE on 2014-08-21 was within prior fire perimeter 1-2 years before\nSUNRISE took 0.038s\nELEANOR on 2014-09-09 was within prior fire perimeter 1-2 years before\nELEANOR took 0.592s\nCONSTANTIA on 2014-09-24 was within prior fire perimeter 4-5 years before\nCONSTANTIA took 0.048s\nFISH GULCH on 2014-10-09 was within prior fire perimeter 0-1 years before\nFISH GULCH took 0.05s\nHOLLYHOCK on 2014-10-26 was within prior fire perimeter 1-2 years before\nHOLLYHOCK took 1.21s\nO'NEIL on 2014-10-27 was within prior fire perimeter 4-5 years before\nO'NEIL took 0.049s\nSADDLE on 2014-10-28 was within prior fire perimeter 1-2 years before\nSADDLE took 0.049s\nSYCAMORE on 2015-01-22 was within prior fire perimeter 1-2 years before\nSYCAMORE took 0.042s\nGREEN on 2015-03-11 was within prior fire perimeter 1-2 years before\nGREEN took 0.052s\nSFO-2015CACDFSHU003002 on 2015-04-06 was within prior fire perimeter 0-1 years before\nSFO-2015CACDFSHU003002 took 0.048s\nTWO MILE on 2015-04-21 was within prior fire perimeter 1-2 years before\nTWO MILE took 0.049s\nSFO-2015CACDFFKU007097 on 2015-05-05 was within prior fire perimeter 1-2 years before\nSFO-2015CACDFFKU007097 took 0.047s\nHONN on 2015-05-08 was within prior fire perimeter 0-1 years before\nHONN took 0.037s\nLUCY on 2015-05-13 was within prior fire perimeter 1-2 years before\nLUCY took 0.038s\nRED ROCK on 2015-05-22 was within prior fire perimeter 0-1 years before\nRED ROCK took 0.793s\nRED on 2015-05-25 was within prior fire perimeter 0-1 years before\nRED took 0.049s\nSUGER on 2015-05-27 was within prior fire perimeter 0-1 years before\nSUGER took 0.051s\nHELLHOLE on 2015-06-05 was within prior fire perimeter 0-1 years before\nHELLHOLE took 0.051s\nSECRET on 2015-06-05 was within prior fire perimeter 1-2 years before\nSECRET took 0.049s\nSFO-2015CACDFSHU005315 on 2015-06-10 was within prior fire perimeter 1-2 years before\nSFO-2015CACDFSHU005315 took 0.048s\nSHINN on 2015-06-30 was within prior fire perimeter 2-3 years before\nSHINN took 0.049s\nSAGE HEN on 2015-06-30 was within prior fire perimeter 2-3 years before\nSAGE HEN took 0.049s\nLUMGREY on 2015-07-02 was within prior fire perimeter 0-1 years before\nLUMGREY took 0.236s\nPROSPECT on 2015-07-02 was within prior fire perimeter 2-3 years before\nPROSPECT took 0.047s\nMOSQUITO on 2015-07-03 was within prior fire perimeter 2-3 years before\nMOSQUITO took 0.043s\nLITTLE on 2015-07-03 was within prior fire perimeter 0-1 years before\nLITTLE took 0.495s\nMUD on 2015-07-07 was within prior fire perimeter 0-1 years before\nMUD took 0.048s\nRUSH on 2015-07-08 was within prior fire perimeter 1-2 years before\nRUSH took 1.193s\nHARDEN on 2015-07-09 was within prior fire perimeter 1-2 years before\nHARDEN took 0.049s\nGROVE on 2015-07-09 was within prior fire perimeter 1-2 years before\nGROVE took 0.052s\nGIN on 2015-07-09 was within prior fire perimeter 1-2 years before\nGIN took 0.051s\nEAST GIN on 2015-07-09 was within prior fire perimeter 1-2 years before\nEAST GIN took 0.05s\nSPANISH on 2015-07-18 was within prior fire perimeter 2-3 years before\nSPANISH took 0.051s\nSANDUNES on 2015-07-18 was within prior fire perimeter 2-3 years before\nSANDUNES took 0.046s\nTHOMAS on 2015-07-18 was within prior fire perimeter 2-3 years before\nTHOMAS took 0.189s\nPAROLE on 2015-07-19 was within prior fire perimeter 1-2 years before\nPAROLE took 0.083s\nOAK on 2015-07-21 was within prior fire perimeter 1-2 years before\nOAK took 0.043s\nCARLON on 2015-07-21 was within prior fire perimeter 1-2 years before\nCARLON took 0.04s\nSTUMP on 2015-07-21 was within prior fire perimeter 1-2 years before\nSTUMP took 0.047s\nROSASCO on 2015-07-28 was within prior fire perimeter 1-2 years before\nROSASCO took 0.049s\nLONG on 2015-07-29 was within prior fire perimeter 2-3 years before\nLONG took 1.204s\nFORBES on 2015-07-31 was within prior fire perimeter 0-1 years before\nFORBES took 0.049s\nPILOT on 2015-07-31 was within prior fire perimeter 0-1 years before\nPILOT took 0.049s\nTHOMPKINS on 2015-07-31 was within prior fire perimeter 0-1 years before\nTHOMPKINS took 0.063s\nBARKER on 2015-07-31 was within prior fire perimeter 0-1 years before\nBARKER took 1.095s\nSODA 2 on 2015-07-31 was within prior fire perimeter 0-1 years before\nSODA 2 took 0.693s\nBLACK on 2015-07-31 was within prior fire perimeter 1-2 years before\nBLACK took 0.29s\n1-45 on 2015-08-01 was within prior fire perimeter 0-1 years before\n1-45 took 0.19s\n1-56 15AC on 2015-08-01 was within prior fire perimeter 0-1 years before\n1-56 15AC took 0.898s\n1-54 HAMMER on 2015-08-01 was within prior fire perimeter 0-1 years before\n1-54 HAMMER took 0.05s\n1-52 on 2015-08-01 was within prior fire perimeter 0-1 years before\n1-52 took 0.051s\nELK on 2015-08-01 was within prior fire perimeter 0-1 years before\nELK took 0.051s\nEAST on 2015-08-01 was within prior fire perimeter 1-2 years before\nEAST took 0.046s\nCRANE on 2015-08-01 was within prior fire perimeter 1-2 years before\nCRANE took 0.05s\nLONG on 2015-08-01 was within prior fire perimeter 1-2 years before\nLONG took 0.049s\nISLAND on 2015-08-02 was within prior fire perimeter 0-1 years before\nISLAND took 0.587s\nFISH on 2015-08-02 was within prior fire perimeter 0-1 years before\nFISH took 0.052s\nWALKER 2 on 2015-08-02 was within prior fire perimeter 0-1 years before\nWALKER 2 took 0.055s\nSTONES on 2015-08-02 was within prior fire perimeter 0-1 years before\nSTONES took 0.079s\nFOURTY THREE on 2015-08-02 was within prior fire perimeter 1-2 years before\nFOURTY THREE took 0.105s\nWEST on 2015-08-02 was within prior fire perimeter 2-3 years before\nWEST took 0.04s\nSODA on 2015-08-03 was within prior fire perimeter 3-4 years before\nSODA took 0.11s\nBELDEN on 2015-08-04 was within prior fire perimeter 3-4 years before\nBELDEN took 0.053s\nBUFFALO on 2015-08-04 was within prior fire perimeter 1-2 years before\nBUFFALO took 0.057s\nRANCHO 4 on 2015-08-04 was within prior fire perimeter 1-2 years before\nRANCHO 4 took 0.054s\nW2 SMITH on 2015-08-07 was within prior fire perimeter 2-3 years before\nW2 SMITH took 0.993s\nGAS on 2015-08-08 was within prior fire perimeter 1-2 years before\nGAS took 0.082s\nDEER on 2015-08-18 was within prior fire perimeter 3-4 years before\nDEER took 0.049s\nHAKWINSVILLE on 2015-08-19 was within prior fire perimeter 3-4 years before\nHAKWINSVILLE took 0.043s\nCAMP on 2015-09-10 was within prior fire perimeter 2-3 years before\nCAMP took 0.05s\nWALKER on 2015-09-11 was within prior fire perimeter 2-3 years before\nWALKER took 0.047s\nBREAK CHECK on 2015-09-17 was within prior fire perimeter 4-5 years before\nBREAK CHECK took 0.045s\nLODGE on 2015-09-22 was within prior fire perimeter 2-3 years before\nLODGE took 0.052s\nCOYOTE on 2015-09-26 was within prior fire perimeter 0-1 years before\nCOYOTE took 0.053s\nSEVEN on 2015-10-08 was within prior fire perimeter 0-1 years before\nSEVEN took 0.115s\nRECHE CANYON RD MOVA on 2015-10-17 was within prior fire perimeter 2-3 years before\nRECHE CANYON RD MOVA took 0.048s\nCOBBLESTONE LN IGO on 2015-10-18 was within prior fire perimeter 2-3 years before\nCOBBLESTONE LN IGO took 0.053s\nSTONEY on 2015-10-29 was within prior fire perimeter 0-1 years before\nSTONEY took 0.5s\nCEMETERY on 2015-11-01 was within prior fire perimeter 1-2 years before\nCEMETERY took 0.6s\nELLSTREE on 2016-01-04 was within prior fire perimeter 2-3 years before\nELLSTREE took 0.05s\nANGELLY WY / HOBERG DR S on 2016-02-29 was within prior fire perimeter 0-1 years before\nANGELLY WY / HOBERG DR S took 0.105s\nSKY on 2016-03-21 was within prior fire perimeter 1-2 years before\nSKY took 0.05s\nGRADE on 2016-03-30 was within prior fire perimeter 0-1 years before\nGRADE took 0.072s\nSEIGLER on 2016-04-17 was within prior fire perimeter 0-1 years before\nSEIGLER took 0.041s\nSFO-2016CACDFLNU517028 on 2016-05-17 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLNU517028 took 0.051s\nCARNEGIE on 2016-05-20 was within prior fire perimeter 0-1 years before\nCARNEGIE took 0.051s\nROSASCO on 2016-05-22 was within prior fire perimeter 2-3 years before\nROSASCO took 0.054s\nSFO-2016CACDFLAC015630 on 2016-05-31 was within prior fire perimeter 3-4 years before\nSFO-2016CACDFLAC015630 took 0.053s\nCEDAR SPRINGS RD MT_RANC on 2016-06-02 was within prior fire perimeter 0-1 years before\nCEDAR SPRINGS RD MT_RANC took 0.312s\nSFO-2016CACDFKRN161993 on 2016-06-02 was within prior fire perimeter 4-5 years before\nSFO-2016CACDFKRN161993 took 0.192s\nSPORTSMAN on 2016-06-04 was within prior fire perimeter 3-4 years before\nSPORTSMAN took 0.398s\nWASH on 2016-06-04 was within prior fire perimeter 2-3 years before\nWASH took 0.056s\nBUTTS on 2016-06-05 was within prior fire perimeter 0-1 years before\nBUTTS took 0.059s\nLITTLE GRIDER on 2016-06-06 was within prior fire perimeter 1-2 years before\nLITTLE GRIDER took 0.047s\nHORSE on 2016-06-07 was within prior fire perimeter 1-2 years before\nHORSE took 0.046s\nGIN on 2016-06-12 was within prior fire perimeter 2-3 years before\nGIN took 0.399s\nHAZEL on 2016-06-13 was within prior fire perimeter 2-3 years before\nHAZEL took 0.051s\nSTAND BY on 2016-06-13 was within prior fire perimeter 2-3 years before\nSTAND BY took 0.053s\nCREEK on 2016-06-13 was within prior fire perimeter 2-3 years before\nCREEK took 0.054s\nSCOTT on 2016-06-16 was within prior fire perimeter 1-2 years before\nSCOTT took 0.7s\nDAILEY on 2016-06-20 was within prior fire perimeter 0-1 years before\nDAILEY took 0.054s\nPIONEER on 2016-06-23 was within prior fire perimeter 2-3 years before\nPIONEER took 0.313s\nCALPINE on 2016-06-26 was within prior fire perimeter 2-3 years before\nCALPINE took 0.058s\nCUTOFF on 2016-06-28 was within prior fire perimeter 0-1 years before\nCUTOFF took 0.043s\nLOCK on 2016-06-30 was within prior fire perimeter 0-1 years before\nLOCK took 0.698s\nBRUSHY on 2016-07-02 was within prior fire perimeter 1-2 years before\nBRUSHY took 0.241s\nFOOTHILL on 2016-07-03 was within prior fire perimeter 1-2 years before\nFOOTHILL took 0.06s\nEUCALYPTUS on 2016-07-07 was within prior fire perimeter 1-2 years before\nEUCALYPTUS took 0.052s\nSLAB on 2016-07-08 was within prior fire perimeter 1-2 years before\nSLAB took 0.162s\nSFO-2016CACDFLNU710003 on 2016-07-10 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLNU710003 took 0.053s\nWHISKEY on 2016-07-10 was within prior fire perimeter 0-1 years before\nWHISKEY took 0.052s\nSFO-2016CACDFLNU711025 on 2016-07-11 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLNU711025 took 0.051s\nPOINT on 2016-07-12 was within prior fire perimeter 1-2 years before\nPOINT took 0.106s\n1S62 on 2016-07-13 was within prior fire perimeter 2-3 years before\n1S62 took 0.702s\nROAD on 2016-07-16 was within prior fire perimeter 0-1 years before\nROAD took 0.051s\nNORTH FORK on 2016-07-17 was within prior fire perimeter 2-3 years before\nNORTH FORK took 0.248s\nBAILY on 2016-07-20 was within prior fire perimeter 0-1 years before\nBAILY took 0.055s\nGOTT on 2016-07-21 was within prior fire perimeter 1-2 years before\nGOTT took 0.157s\nSFO-2016CACDFVNC021371 on 2016-07-24 was within prior fire perimeter 3-4 years before\nSFO-2016CACDFVNC021371 took 0.062s\nSFO-2016CACDFLNU006830 on 2016-07-26 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLNU006830 took 0.052s\nBOURLAND on 2016-07-26 was within prior fire perimeter 2-3 years before\nBOURLAND took 0.994s\nRAILROAD on 2016-07-27 was within prior fire perimeter 0-1 years before\nRAILROAD took 0.072s\nSFO-2016CACDFLAC021732 on 2016-07-27 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLAC021732 took 0.044s\nSTRUCKMAN on 2016-07-28 was within prior fire perimeter 0-1 years before\nSTRUCKMAN took 0.699s\nMURKEN on 2016-07-31 was within prior fire perimeter 2-3 years before\nMURKEN took 0.169s\nTESLA on 2016-08-01 was within prior fire perimeter 0-1 years before\nTESLA took 0.052s\nSHAKE on 2016-08-03 was within prior fire perimeter 2-3 years before\nSHAKE took 0.044s\nSFO-2016CACDFMVU009658 on 2016-08-04 was within prior fire perimeter 2-3 years before\nSFO-2016CACDFMVU009658 took 0.044s\nSFO-2016CACDFKRN162948 on 2016-08-06 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFKRN162948 took 0.051s\nHWY 70 / CARIBOU RD on 2016-08-07 was within prior fire perimeter 4-5 years before\nHWY 70 / CARIBOU RD took 0.054s\n120 on 2016-08-12 was within prior fire perimeter 2-3 years before\n120 took 0.052s\nBORDER 5 on 2016-08-13 was within prior fire perimeter 0-1 years before\nBORDER 5 took 0.053s\nBLAKE on 2016-08-16 was within prior fire perimeter 1-2 years before\nBLAKE took 0.186s\nEVANS on 2016-08-19 was within prior fire perimeter 1-2 years before\nEVANS took 0.056s\nCALISTOGA ST MIDDLETOWN on 2016-08-20 was within prior fire perimeter 0-1 years before\nCALISTOGA ST MIDDLETOWN took 0.049s\nMOUNTAINEER on 2016-08-22 was within prior fire perimeter 1-2 years before\nMOUNTAINEER took 0.235s\nSFO-2016CACDFMVU010568 on 2016-08-24 was within prior fire perimeter 2-3 years before\nSFO-2016CACDFMVU010568 took 0.296s\nUPPER on 2016-08-25 was within prior fire perimeter 1-2 years before\nUPPER took 0.042s\nFOSTER on 2016-08-26 was within prior fire perimeter 4-5 years before\nFOSTER took 0.055s\nHWY 173 HESPERIA_CITY on 2016-08-26 was within prior fire perimeter 0-1 years before\nHWY 173 HESPERIA_CITY took 0.049s\nRESORT on 2016-08-28 was within prior fire perimeter 2-3 years before\nRESORT took 0.054s\nEARLY on 2016-08-29 was within prior fire perimeter 3-4 years before\nEARLY took 0.054s\nPINE on 2016-09-11 was within prior fire perimeter 0-1 years before\nPINE took 0.06s\nSFO-2016CACDFMVU011370 on 2016-09-11 was within prior fire perimeter 2-3 years before\nSFO-2016CACDFMVU011370 took 0.053s\nPONDEROSA on 2016-09-12 was within prior fire perimeter 4-5 years before\nPONDEROSA took 0.058s\nOWENS RIVER on 2016-09-17 was within prior fire perimeter 0-1 years before\nOWENS RIVER took 0.047s\nSAWMILL on 2016-09-25 was within prior fire perimeter 2-3 years before\nSAWMILL took 0.043s\nVISTA on 2016-09-25 was within prior fire perimeter 2-3 years before\nVISTA took 0.486s\nSFO-2016CACDFLAC028058 on 2016-09-26 was within prior fire perimeter 0-1 years before\nSFO-2016CACDFLAC028058 took 0.055s\nCHERRY on 2016-09-28 was within prior fire perimeter 3-4 years before\nCHERRY took 0.051s\nAPPLE on 2016-09-29 was within prior fire perimeter 0-1 years before\nAPPLE took 0.162s\nHORSETHIEF on 2016-09-30 was within prior fire perimeter 2-3 years before\nHORSETHIEF took 0.056s\nHORSETHIEF 2 on 2016-09-30 was within prior fire perimeter 2-3 years before\nHORSETHIEF 2 took 0.055s\nSAND on 2016-10-03 was within prior fire perimeter 4-5 years before\nSAND took 0.159s\nCUESTA on 2016-10-07 was within prior fire perimeter 1-2 years before\nCUESTA took 0.045s\nTWIN PINE CASINO & HOTEL on 2016-10-08 was within prior fire perimeter 1-2 years before\nTWIN PINE CASINO & HOTEL took 0.057s\nE HWY 20 CLEARLAKE_OAKS on 2016-10-08 was within prior fire perimeter 4-5 years before\nE HWY 20 CLEARLAKE_OAKS took 0.051s\nHILL on 2016-10-10 was within prior fire perimeter 0-1 years before\nHILL took 0.165s\nHWY 173 / ARROWHEAD LA 2 on 2016-10-10 was within prior fire perimeter 0-1 years before\nHWY 173 / ARROWHEAD LA 2 took 0.055s\nSTOCKTON on 2016-10-11 was within prior fire perimeter 0-1 years before\nSTOCKTON took 0.055s\nFORKS on 2016-10-14 was within prior fire perimeter 2-3 years before\nFORKS took 0.052s\nLONE on 2016-10-21 was within prior fire perimeter 0-1 years before\nLONE took 0.058s\nWELDON on 2016-11-16 was within prior fire perimeter 2-3 years before\nWELDON took 0.054s\nHWY 198 / FRAME LN on 2017-03-13 was within prior fire perimeter 0-1 years before\nHWY 198 / FRAME LN took 0.053s\nGOLDEN ARROW on 2017-03-17 was within prior fire perimeter 3-4 years before\nGOLDEN ARROW took 0.596s\nSFO-2017CACDFLAC009120 on 2017-03-27 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFLAC009120 took 0.054s\nWEST on 2017-04-01 was within prior fire perimeter 4-5 years before\nWEST took 0.052s\nARROW on 2017-04-04 was within prior fire perimeter 3-4 years before\nARROW took 0.054s\nTOWER on 2017-04-30 was within prior fire perimeter 0-1 years before\nTOWER took 0.047s\nSFO-2017CACDFLAC012811 on 2017-05-01 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFLAC012811 took 0.29s\nDELUZ on 2017-05-19 was within prior fire perimeter 3-4 years before\nDELUZ took 0.054s\nCALISTOGA ST / YOUNG ST on 2017-05-20 was within prior fire perimeter 1-2 years before\nCALISTOGA ST / YOUNG ST took 0.065s\nSFO-2017CACDFLNU522015 on 2017-05-22 was within prior fire perimeter 1-2 years before\nSFO-2017CACDFLNU522015 took 0.171s\nSARAH on 2017-05-23 was within prior fire perimeter 0-1 years before\nSARAH took 0.093s\nOAKHILL on 2017-05-24 was within prior fire perimeter 0-1 years before\nOAKHILL took 0.054s\nGUENOC on 2017-05-27 was within prior fire perimeter 1-2 years before\nGUENOC took 0.064s\nGUENOC 2 on 2017-05-27 was within prior fire perimeter 1-2 years before\nGUENOC 2 took 0.059s\nWOLF on 2017-05-28 was within prior fire perimeter 3-4 years before\nWOLF took 0.07s\nMICHEL on 2017-05-29 was within prior fire perimeter 1-2 years before\nMICHEL took 0.05s\nLOADER on 2017-05-30 was within prior fire perimeter 2-3 years before\nLOADER took 0.808s\nINVESTIGATION - FD/NON FI on 2017-06-02 was within prior fire perimeter 3-4 years before\nINVESTIGATION - FD/NON FI took 0.052s\nOAK on 2017-06-03 was within prior fire perimeter 0-1 years before\nOAK took 0.054s\nOAKWOOD on 2017-06-03 was within prior fire perimeter 0-1 years before\nOAKWOOD took 0.055s\nSHIRELION on 2017-06-04 was within prior fire perimeter 3-4 years before\nSHIRELION took 0.054s\nBIG CANYON RD / SEIGLER C on 2017-06-08 was within prior fire perimeter 1-2 years before\nBIG CANYON RD / SEIGLER C took 0.048s\nSOUTH on 2017-06-13 was within prior fire perimeter 4-5 years before\nSOUTH took 0.108s\nBONHAM on 2017-06-15 was within prior fire perimeter 0-1 years before\nBONHAM took 0.054s\nBEAR on 2017-06-16 was within prior fire perimeter 0-1 years before\nBEAR took 0.598s\nMUSTANG on 2017-06-16 was within prior fire perimeter 0-1 years before\nMUSTANG took 0.054s\nPIONEER on 2017-06-18 was within prior fire perimeter 3-4 years before\nPIONEER took 0.111s\nROAD 19 on 2017-06-25 was within prior fire perimeter 2-3 years before\nROAD 19 took 0.053s\nCOUGAR on 2017-06-25 was within prior fire perimeter 2-3 years before\nCOUGAR took 0.11s\nSFO-2017CACDFLAC004483 on 2017-07-01 was within prior fire perimeter 2-3 years before\nSFO-2017CACDFLAC004483 took 0.063s\nGEOFFREY on 2017-07-02 was within prior fire perimeter 3-4 years before\nGEOFFREY took 0.052s\nNISSAN on 2017-07-03 was within prior fire perimeter 0-1 years before\nNISSAN took 0.065s\nGROVE on 2017-07-04 was within prior fire perimeter 1-2 years before\nGROVE took 0.056s\nSHORTY on 2017-07-07 was within prior fire perimeter 2-3 years before\nSHORTY took 0.049s\nWALL on 2017-07-07 was within prior fire perimeter 3-4 years before\nWALL took 0.05s\nTOWER on 2017-07-07 was within prior fire perimeter 1-2 years before\nTOWER took 1.111s\nSTRIKE on 2017-07-09 was within prior fire perimeter 1-2 years before\nSTRIKE took 0.054s\nGRADE on 2017-07-15 was within prior fire perimeter 1-2 years before\nGRADE took 0.052s\nMARIA on 2017-07-17 was within prior fire perimeter 1-2 years before\nMARIA took 0.068s\nSANTA SUSANA on 2017-07-20 was within prior fire perimeter 0-1 years before\nSANTA SUSANA took 0.053s\nHWY 94 HWY POTRERO 5 on 2017-07-20 was within prior fire perimeter 1-2 years before\nHWY 94 HWY POTRERO 5 took 0.054s\nELSWOOD on 2017-07-21 was within prior fire perimeter 1-2 years before\nELSWOOD took 1.094s\nTRAIL on 2017-07-22 was within prior fire perimeter 1-2 years before\nTRAIL took 0.051s\nHORSE on 2017-07-24 was within prior fire perimeter 0-1 years before\nHORSE took 0.1s\nR9 SHINN on 2017-07-24 was within prior fire perimeter 4-5 years before\nR9 SHINN took 0.136s\nR10 PAINTER on 2017-07-24 was within prior fire perimeter 4-5 years before\nR10 PAINTER took 0.169s\nR16 BIG SPRINGS on 2017-07-24 was within prior fire perimeter 4-5 years before\nR16 BIG SPRINGS took 0.712s\nR14 BUCKHORN on 2017-07-24 was within prior fire perimeter 1-2 years before\nR14 BUCKHORN took 0.054s\nBARK on 2017-07-25 was within prior fire perimeter 2-3 years before\nBARK took 0.055s\nTYLER on 2017-07-25 was within prior fire perimeter 2-3 years before\nTYLER took 0.057s\nLAKE MTN on 2017-07-25 was within prior fire perimeter 2-3 years before\nLAKE MTN took 0.057s\nPOWER POLE on 2017-07-25 was within prior fire perimeter 0-1 years before\nPOWER POLE took 0.054s\nFRANK on 2017-07-26 was within prior fire perimeter 2-3 years before\nFRANK took 0.051s\nMILLER on 2017-07-26 was within prior fire perimeter 0-1 years before\nMILLER took 0.055s\nBIRD on 2017-07-26 was within prior fire perimeter 0-1 years before\nBIRD took 0.095s\nSTEELE on 2017-07-26 was within prior fire perimeter 0-1 years before\nSTEELE took 0.07s\nROCK2 on 2017-07-28 was within prior fire perimeter 0-1 years before\nROCK2 took 0.902s\nDIAMOND II on 2017-07-28 was within prior fire perimeter 0-1 years before\nDIAMOND II took 0.055s\nDIAMOND 4 on 2017-07-29 was within prior fire perimeter 0-1 years before\nDIAMOND 4 took 0.054s\nJOSE on 2017-08-02 was within prior fire perimeter 3-4 years before\nJOSE took 0.054s\nLOST 2 on 2017-08-02 was within prior fire perimeter 0-1 years before\nLOST 2 took 0.601s\nMARKER on 2017-08-06 was within prior fire perimeter 0-1 years before\nMARKER took 0.061s\nLASSIC on 2017-08-07 was within prior fire perimeter 2-3 years before\nLASSIC took 0.055s\nKELLY on 2017-08-07 was within prior fire perimeter 4-5 years before\nKELLY took 0.164s\nW7 TULE MTN on 2017-08-07 was within prior fire perimeter 4-5 years before\nW7 TULE MTN took 0.043s\nSTAFFORD on 2017-08-08 was within prior fire perimeter 4-5 years before\nSTAFFORD took 0.053s\nROBINSON on 2017-08-08 was within prior fire perimeter 0-1 years before\nROBINSON took 0.042s\nLONG BARN on 2017-08-08 was within prior fire perimeter 3-4 years before\nLONG BARN took 0.057s\nHAYSHED on 2017-08-09 was within prior fire perimeter 2-3 years before\nHAYSHED took 0.058s\nR4 RANCH on 2017-08-09 was within prior fire perimeter 4-5 years before\nR4 RANCH took 0.053s\nZULU 7 on 2017-08-11 was within prior fire perimeter 0-1 years before\nZULU 7 took 0.047s\nLICK GULCH on 2017-08-11 was within prior fire perimeter 0-1 years before\nLICK GULCH took 1.239s\nMONUMENT on 2017-08-11 was within prior fire perimeter 0-1 years before\nMONUMENT took 0.051s\nVIEW on 2017-08-12 was within prior fire perimeter 0-1 years before\nVIEW took 0.049s\nPYRAMID on 2017-08-12 was within prior fire perimeter 0-1 years before\nPYRAMID took 0.058s\nEAST on 2017-08-14 was within prior fire perimeter 0-1 years before\nEAST took 0.072s\nCOOK on 2017-08-14 was within prior fire perimeter 0-1 years before\nCOOK took 0.057s\nSFO-2017CACDFLAC024860 on 2017-08-20 was within prior fire perimeter 1-2 years before\nSFO-2017CACDFLAC024860 took 0.701s\nBLUE on 2017-08-21 was within prior fire perimeter 4-5 years before\nBLUE took 0.053s\nPRESCOTT on 2017-08-22 was within prior fire perimeter 0-1 years before\nPRESCOTT took 0.705s\nHOSKINS on 2017-08-23 was within prior fire perimeter 0-1 years before\nHOSKINS took 0.045s\nSPRUCE on 2017-08-29 was within prior fire perimeter 1-2 years before\nSPRUCE took 0.058s\nPILOT on 2017-08-29 was within prior fire perimeter 4-5 years before\nPILOT took 0.394s\nDEER on 2017-08-31 was within prior fire perimeter 4-5 years before\nDEER took 0.043s\nSFO-2017CACDFLAC026053 on 2017-08-31 was within prior fire perimeter 1-2 years before\nSFO-2017CACDFLAC026053 took 0.056s\nSIERRA OAKS DR RAILROAD_ on 2017-09-05 was within prior fire perimeter 1-2 years before\nSIERRA OAKS DR RAILROAD_ took 0.248s\nDOGGETT on 2017-09-06 was within prior fire perimeter 3-4 years before\nDOGGETT took 0.051s\nCEDAR WY / JESUS MARIA RD on 2017-09-08 was within prior fire perimeter 1-2 years before\nCEDAR WY / JESUS MARIA RD took 0.045s\nDAVIS on 2017-09-12 was within prior fire perimeter 2-3 years before\nDAVIS took 1.115s\nLOST on 2017-09-13 was within prior fire perimeter 4-5 years before\nLOST took 0.054s\nCOVE on 2017-09-23 was within prior fire perimeter 1-2 years before\nCOVE took 0.996s\nEMERFORD on 2017-10-09 was within prior fire perimeter 2-3 years before\nEMERFORD took 0.096s\nPARTRICK on 2017-10-09 was within prior fire perimeter 0-1 years before\nPARTRICK took 0.058s\nICE on 2017-10-10 was within prior fire perimeter 3-4 years before\nICE took 0.164s\nBONE on 2017-10-11 was within prior fire perimeter 2-3 years before\nBONE took 0.054s\nSFO-2017CACDFLAC030665 on 2017-10-13 was within prior fire perimeter 1-2 years before\nSFO-2017CACDFLAC030665 took 0.11s\nROUTE on 2017-10-13 was within prior fire perimeter 1-2 years before\nROUTE took 0.048s\nCAJON on 2017-10-18 was within prior fire perimeter 1-2 years before\nCAJON took 0.497s\nMORRIS on 2017-11-14 was within prior fire perimeter 1-2 years before\nMORRIS took 0.056s\nVINEYARD on 2017-11-23 was within prior fire perimeter 0-1 years before\nVINEYARD took 0.057s\nSWARTHOUT on 2017-11-29 was within prior fire perimeter 1-2 years before\nSWARTHOUT took 0.057s\nSFO-2017CACDFVNC010391 on 2017-12-05 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFVNC010391 took 1.006s\nSFO-2017CACDFLAC036355 on 2017-12-06 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFLAC036355 took 0.066s\nSFO-2017CACDFSBC010806 on 2017-12-09 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFSBC010806 took 0.25s\nDEER CREEK on 2017-12-10 was within prior fire perimeter 4-5 years before\nDEER CREEK took 0.055s\nVAULT on 2017-12-10 was within prior fire perimeter 1-2 years before\nVAULT took 0.056s\nLONGHORN on 2017-12-13 was within prior fire perimeter 4-5 years before\nLONGHORN took 0.055s\nSFO-2017CACDFVNC011095 on 2017-12-15 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFVNC011095 took 0.057s\nSFO-2017CACDFVNC011299 on 2017-12-20 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFVNC011299 took 0.059s\nSFO-2017CACDFVNC011421 on 2017-12-24 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFVNC011421 took 0.056s\nSFO-2017CACDFSBC011491 on 2017-12-26 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFSBC011491 took 0.066s\nSFO-2017CACDFSBC011651 on 2017-12-31 was within prior fire perimeter 0-1 years before\nSFO-2017CACDFSBC011651 took 0.184s\nRUBY on 2018-01-03 was within prior fire perimeter 4-5 years before\nRUBY took 0.054s\nBOGGS MOUNTAIN STATE FORE on 2018-02-08 was within prior fire perimeter 2-3 years before\nBOGGS MOUNTAIN STATE FORE took 0.046s\nPILOT on 2018-02-24 was within prior fire perimeter 3-4 years before\nPILOT took 0.053s\nLUMPKIN on 2018-02-27 was within prior fire perimeter 2-3 years before\nLUMPKIN took 0.053s\nEMBLEM on 2018-03-02 was within prior fire perimeter 1-2 years before\nEMBLEM took 0.054s\nBRIDGE on 2018-04-21 was within prior fire perimeter 0-1 years before\nBRIDGE took 0.057s\nGRAND on 2018-04-27 was within prior fire perimeter 4-5 years before\nGRAND took 0.057s\nPOINT on 2018-04-30 was within prior fire perimeter 4-5 years before\nPOINT took 0.056s\nSUN RD MT_RANCH on 2018-05-05 was within prior fire perimeter 2-3 years before\nSUN RD MT_RANCH took 0.05s\nSFO-2018CACDFLAC013630 on 2018-05-06 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFLAC013630 took 0.056s\nPOST on 2018-05-08 was within prior fire perimeter 2-3 years before\nPOST took 0.051s\nSTRUCKMAN RD / RAILROAD F on 2018-05-10 was within prior fire perimeter 2-3 years before\nSTRUCKMAN RD / RAILROAD F took 0.051s\nSFO-2018CACDFLNU513010 on 2018-05-13 was within prior fire perimeter 2-3 years before\nSFO-2018CACDFLNU513010 took 0.042s\nDEN on 2018-05-15 was within prior fire perimeter 2-3 years before\nDEN took 0.057s\nCOBBLESTONE on 2018-05-16 was within prior fire perimeter 4-5 years before\nCOBBLESTONE took 0.055s\nFARMS on 2018-05-28 was within prior fire perimeter 4-5 years before\nFARMS took 0.055s\nMY WAY on 2018-05-30 was within prior fire perimeter 2-3 years before\nMY WAY took 0.06s\nVERJELES on 2018-06-01 was within prior fire perimeter 0-1 years before\nVERJELES took 0.043s\nGORGE on 2018-06-04 was within prior fire perimeter 1-2 years before\nGORGE took 0.054s\nBONANZA WY / LOMA RICA RD on 2018-06-06 was within prior fire perimeter 0-1 years before\nBONANZA WY / LOMA RICA RD took 0.059s\nORTEGA on 2018-06-08 was within prior fire perimeter 4-5 years before\nORTEGA took 0.05s\nSFO-2018CACDFRRU000266 on 2018-06-11 was within prior fire perimeter 4-5 years before\nSFO-2018CACDFRRU000266 took 0.055s\nBEAVER on 2018-06-12 was within prior fire perimeter 0-1 years before\nBEAVER took 0.056s\nMCNELL on 2018-06-14 was within prior fire perimeter 0-1 years before\nMCNELL took 0.05s\nGRAND on 2018-06-14 was within prior fire perimeter 4-5 years before\nGRAND took 0.057s\nTUNNEL on 2018-06-15 was within prior fire perimeter 0-1 years before\nTUNNEL took 0.055s\nWOOD on 2018-06-15 was within prior fire perimeter 0-1 years before\nWOOD took 0.047s\nCHINA on 2018-06-17 was within prior fire perimeter 1-2 years before\nCHINA took 0.056s\nRV on 2018-06-19 was within prior fire perimeter 0-1 years before\nRV took 0.06s\nOAK on 2018-06-19 was within prior fire perimeter 1-2 years before\nOAK took 0.058s\nSFO-2018CACDFLNU622024 on 2018-06-22 was within prior fire perimeter 2-3 years before\nSFO-2018CACDFLNU622024 took 0.044s\nOAKHAVEN on 2018-06-23 was within prior fire perimeter 0-1 years before\nOAKHAVEN took 0.054s\nMOUNTAIN on 2018-06-25 was within prior fire perimeter 0-1 years before\nMOUNTAIN took 0.056s\nMICHEL RD MT_RANCH on 2018-06-28 was within prior fire perimeter 2-3 years before\nMICHEL RD MT_RANCH took 0.051s\nTESLA on 2018-06-29 was within prior fire perimeter 2-3 years before\nTESLA took 0.049s\nSFO-2018CACDFLNU630016 on 2018-06-30 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFLNU630016 took 0.056s\nSFO-2018CACDFLNU630018 on 2018-06-30 was within prior fire perimeter 2-3 years before\nSFO-2018CACDFLNU630018 took 0.056s\nHWY 175 / SAN DIEGO AV on 2018-07-01 was within prior fire perimeter 2-3 years before\nHWY 175 / SAN DIEGO AV took 0.06s\nWOODEN on 2018-07-01 was within prior fire perimeter 0-1 years before\nWOODEN took 0.047s\nSFO-2018CACDFTGU000202 on 2018-07-05 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFTGU000202 took 0.06s\nSOLEDAD on 2018-07-06 was within prior fire perimeter 1-2 years before\nSOLEDAD took 0.058s\nPIGEON on 2018-07-07 was within prior fire perimeter 0-1 years before\nPIGEON took 0.055s\nLOMA on 2018-07-07 was within prior fire perimeter 0-1 years before\nLOMA took 0.055s\nMURRY on 2018-07-07 was within prior fire perimeter 2-3 years before\nMURRY took 0.056s\nPEACH on 2018-07-07 was within prior fire perimeter 4-5 years before\nPEACH took 0.046s\nTUJUNGA on 2018-07-07 was within prior fire perimeter 0-1 years before\nTUJUNGA took 0.056s\nHORSE on 2018-07-07 was within prior fire perimeter 4-5 years before\nHORSE took 0.043s\nLOPEZ on 2018-07-08 was within prior fire perimeter 0-1 years before\nLOPEZ took 0.056s\nWALLACE on 2018-07-12 was within prior fire perimeter 0-1 years before\nWALLACE took 0.059s\nHARBIN on 2018-07-12 was within prior fire perimeter 2-3 years before\nHARBIN took 0.059s\nLOST on 2018-07-12 was within prior fire perimeter 4-5 years before\nLOST took 0.056s\nNORTHBOUND on 2018-07-13 was within prior fire perimeter 1-2 years before\nNORTHBOUND took 0.056s\nHORSE on 2018-07-15 was within prior fire perimeter 3-4 years before\nHORSE took 0.059s\nGUPPY on 2018-07-15 was within prior fire perimeter 3-4 years before\nGUPPY took 0.058s\nFISH on 2018-07-15 was within prior fire perimeter 3-4 years before\nFISH took 0.058s\nH-2 CAMP on 2018-07-15 was within prior fire perimeter 3-4 years before\nH-2 CAMP took 0.055s\n3-14 TAYLOR on 2018-07-15 was within prior fire perimeter 3-4 years before\n3-14 TAYLOR took 0.056s\nBRIGGS on 2018-07-16 was within prior fire perimeter 1-2 years before\nBRIGGS took 0.052s\nWEST on 2018-07-18 was within prior fire perimeter 0-1 years before\nWEST took 0.055s\nRAILROAD on 2018-07-21 was within prior fire perimeter 2-3 years before\nRAILROAD took 0.045s\nTESLA on 2018-07-25 was within prior fire perimeter 2-3 years before\nTESLA took 0.054s\nRAILROAD on 2018-07-25 was within prior fire perimeter 2-3 years before\nRAILROAD took 0.055s\nPACIFIC on 2018-07-26 was within prior fire perimeter 4-5 years before\nPACIFIC took 0.055s\nGRANGE on 2018-07-29 was within prior fire perimeter 2-3 years before\nGRANGE took 0.049s\nCREST on 2018-07-31 was within prior fire perimeter 3-4 years before\nCREST took 0.057s\nVERJELLES on 2018-08-01 was within prior fire perimeter 0-1 years before\nVERJELLES took 0.061s\nGRAND on 2018-08-02 was within prior fire perimeter 1-2 years before\nGRAND took 0.059s\nCREEK on 2018-08-03 was within prior fire perimeter 1-2 years before\nCREEK took 0.056s\nCONRAD on 2018-08-04 was within prior fire perimeter 0-1 years before\nCONRAD took 0.062s\nSFO-2018CACDFLNU805002 on 2018-08-05 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFLNU805002 took 0.06s\nHYUNDI on 2018-08-06 was within prior fire perimeter 1-2 years before\nHYUNDI took 0.057s\nPINE on 2018-08-07 was within prior fire perimeter 1-2 years before\nPINE took 0.056s\nETTAWA on 2018-08-09 was within prior fire perimeter 2-3 years before\nETTAWA took 0.057s\nSFO-2018CACDFLNU811018 on 2018-08-11 was within prior fire perimeter 3-4 years before\nSFO-2018CACDFLNU811018 took 0.054s\nSFO-2018CACDFLNU813019 on 2018-08-13 was within prior fire perimeter 3-4 years before\nSFO-2018CACDFLNU813019 took 0.056s\nRUNNING on 2018-08-15 was within prior fire perimeter 2-3 years before\nRUNNING took 0.054s\nORO on 2018-08-18 was within prior fire perimeter 0-1 years before\nORO took 0.059s\nWATERSHED on 2018-08-20 was within prior fire perimeter 1-2 years before\nWATERSHED took 0.057s\nDIABLO on 2018-08-23 was within prior fire perimeter 4-5 years before\nDIABLO took 0.054s\nENTERPRISE on 2018-08-28 was within prior fire perimeter 2-3 years before\nENTERPRISE took 0.055s\nMCCABE on 2018-08-28 was within prior fire perimeter 2-3 years before\nMCCABE took 0.058s\nBULLION on 2018-09-06 was within prior fire perimeter 1-2 years before\nBULLION took 0.059s\nSPRINGS on 2018-09-20 was within prior fire perimeter 2-3 years before\nSPRINGS took 0.043s\nSCOTT on 2018-09-21 was within prior fire perimeter 0-1 years before\nSCOTT took 0.044s\nMC CALL PARK RD / HY 74 on 2018-09-21 was within prior fire perimeter 0-1 years before\nMC CALL PARK RD / HY 74 took 0.067s\nCRAZY on 2018-09-23 was within prior fire perimeter 3-4 years before\nCRAZY took 0.045s\nEAGLE on 2018-10-02 was within prior fire perimeter 1-2 years before\nEAGLE took 0.044s\nSHADOW on 2018-10-05 was within prior fire perimeter 1-2 years before\nSHADOW took 0.042s\nJOURNEY on 2018-10-10 was within prior fire perimeter 3-4 years before\nJOURNEY took 0.042s\nSFO-2018CACDFLNU014021 on 2018-10-14 was within prior fire perimeter 3-4 years before\nSFO-2018CACDFLNU014021 took 0.04s\nCONOVER on 2018-10-15 was within prior fire perimeter 0-1 years before\nCONOVER took 0.044s\nCOYOTE on 2018-10-16 was within prior fire perimeter 1-2 years before\nCOYOTE took 0.043s\nCASTLE 2 on 2018-10-16 was within prior fire perimeter 1-2 years before\nCASTLE 2 took 0.046s\nTRIMMER on 2018-10-17 was within prior fire perimeter 2-3 years before\nTRIMMER took 0.043s\nHUMMINGBIRD on 2018-10-19 was within prior fire perimeter 1-2 years before\nHUMMINGBIRD took 0.042s\nW HWY 20 UPPER_LAKE on 2018-10-27 was within prior fire perimeter 0-1 years before\nW HWY 20 UPPER_LAKE took 0.047s\nCANYON on 2018-10-28 was within prior fire perimeter 4-5 years before\nCANYON took 0.04s\nCOY on 2018-10-29 was within prior fire perimeter 1-2 years before\nCOY took 0.043s\nBARTON FLATS on 2018-11-10 was within prior fire perimeter 3-4 years before\nBARTON FLATS took 0.042s\nADOBE CANYON RD KENWOOD on 2018-11-11 was within prior fire perimeter 1-2 years before\nADOBE CANYON RD KENWOOD took 0.045s\nCAMP B on 2018-11-12 was within prior fire perimeter 0-1 years before\nCAMP B took 0.047s\nROCK on 2018-11-13 was within prior fire perimeter 0-1 years before\nROCK took 0.045s\nPARK on 2018-11-13 was within prior fire perimeter 0-1 years before\nPARK took 0.045s\nMC CALL PARK RD MOUN on 2018-11-13 was within prior fire perimeter 0-1 years before\nMC CALL PARK RD MOUN took 0.043s\nPOWERHOUSE on 2018-11-19 was within prior fire perimeter 1-2 years before\nPOWERHOUSE took 0.049s\nMORGAN VALLEY RD / BONHAM on 2018-11-20 was within prior fire perimeter 2-3 years before\nMORGAN VALLEY RD / BONHAM took 0.042s\nSFO-2018CACDFVNC009802 on 2018-11-21 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFVNC009802 took 0.045s\nHOOD MOUNTAIN REGIONAL PA on 2018-11-22 was within prior fire perimeter 1-2 years before\nHOOD MOUNTAIN REGIONAL PA took 0.044s\nCALISTOGA RD SANTA_ROSA on 2018-11-23 was within prior fire perimeter 1-2 years before\nCALISTOGA RD SANTA_ROSA took 0.044s\nRAILROAD FLAT RD S MT_RAN on 2018-11-28 was within prior fire perimeter 3-4 years before\nRAILROAD FLAT RD S MT_RAN took 0.041s\nMELROSE LN LOMA_RICA_FD on 2018-12-03 was within prior fire perimeter 1-2 years before\nMELROSE LN LOMA_RICA_FD took 0.042s\nSFO-2018CACDFLAC039207 on 2018-12-27 was within prior fire perimeter 0-1 years before\nSFO-2018CACDFLAC039207 took 0.045s\n"
]
],
[
[
"Add a rank for each fire cause so that natural/accidental causes are prioritized as the model only deals with fires that can be predicted based on natural weather and soil events.",
"_____no_output_____"
]
],
[
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS cause_rank')\ncur.execute(\"\"\"\n\tCREATE TABLE cause_rank (\n\t\trank\t\tINTEGER NOT NULL DEFAULT 0,\n\t\tcause\t\tTEXT NOT NULL,\n\t\tPRIMARY KEY (cause)\n\t)\n\"\"\")\n\n# Rank by natural or made more likely by natural conditions (ie drought)\ncur.execute(\"\"\"\nINSERT INTO cause_rank (cause, rank)\nselect 'Natural', 1 union\nselect 'Power', 2 union\nselect 'Recreation', 3 union\nselect 'Other causes', 4 union\nselect 'Smoking', 5 union\nselect 'Railroad', 6 union\nselect 'Equipment Use', 7 union\nselect 'Missing/Undefined', 8 union\nselect 'Children', 9 union\nselect 'Debris Burning', 10 union\nselect 'Firearms', 11 union\nselect 'Fireworks', 12 union\nselect 'Arson', 13\n\"\"\")\n\ncur.execute('DROP TABLE fires_rollup')\ncur.execute(\"\"\"\n\tCREATE TABLE fires_rollup AS\n\t\tselect\n\t\t\tdate,\n\t\t\tlong,\n\t\t\tlat,\n\t\t\tcause,\n\t\t\tfire_size_class,\n\t\t\tprior_fire_0_1_year,\n\t\t\tprior_fire_1_2_year,\n\t\t\tprior_fire_2_3_year,\n\t\t\tprior_fire_3_4_year,\n\t\t\tprior_fire_4_5_year\n\t\tfrom (\n\t\t\tselect\n\t\t\t\tdate,\n\t\t\t\tlong,\n\t\t\t\tlat,\n\t\t\t\tmin(rank) as cause_rank,\n\t\t\t\tmax(fire_size_class) as fire_size_class,\n\t\t\t\tmax(prior_fire_0_1_year) as prior_fire_0_1_year,\n\t\t\t\tmax(prior_fire_1_2_year) as prior_fire_1_2_year,\n\t\t\t\tmax(prior_fire_2_3_year) as prior_fire_2_3_year,\n\t\t\t\tmax(prior_fire_3_4_year) as prior_fire_3_4_year,\n\t\t\t\tmax(prior_fire_4_5_year) as prior_fire_4_5_year\n\t\t\tfrom (\n\t\t\t\t-- Join date and 1, 2 and 3 days earlier in case the discovery date is delayed\n\t\t\t\tselect date, long, lat, cause, fire_size_class, prior_fire_0_1_year, prior_fire_1_2_year, prior_fire_2_3_year, prior_fire_3_4_year, prior_fire_4_5_year\n\t\t\t\tfrom fires\n\t\t\t\tunion\n\t\t\t\tselect date_1d_before as date, long, lat, cause, fire_size_class, prior_fire_0_1_year, prior_fire_1_2_year, prior_fire_2_3_year, prior_fire_3_4_year, prior_fire_4_5_year\n\t\t\t\tfrom fires\n\t\t\t\tunion\n\t\t\t\tselect date_2d_before as date, long, lat, cause, fire_size_class, prior_fire_0_1_year, prior_fire_1_2_year, prior_fire_2_3_year, prior_fire_3_4_year, prior_fire_4_5_year\n\t\t\t\tfrom fires\n\t\t\t\tunion\n\t\t\t\tselect date_3d_before as date, long, lat, cause, fire_size_class, prior_fire_0_1_year, prior_fire_1_2_year, prior_fire_2_3_year, prior_fire_3_4_year, prior_fire_4_5_year\n\t\t\t\tfrom fires\n\t\t\t) as fires\n\t\t\tinner join cause_rank\n\t\t\t\ton cause_rank.cause = fires.cause\n\t\t\tgroup by date, long, lat\n\t\t) as fires\n\t\tleft join cause_rank\n\t\t\ton cause_rank.rank = fires.cause_rank\n\"\"\")\n\ncur.execute('DROP INDEX IF EXISTS idx_fires_rollup_date_long_lat')\ncur.execute('CREATE INDEX idx_fires_rollup_date_long_lat ON fires_rollup(date, long, lat)')\n\nconn.commit()\nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS weather_geo_no_fire')\ncur.execute(\"\"\"\ncreate table weather_geo_no_fire as\n\tselect *, 0 as prior_fire_0_1_year, 0 as prior_fire_1_2_year, 0 as prior_fire_2_3_year, 0 as prior_fire_3_4_year, 0 as prior_fire_4_5_year\n\tfrom weather_geo\n\twhere not exists(\n\t\tselect 1\n\t\tfrom fires_rollup\n\t\twhere\n\t\t\tfires_rollup.date = weather_geo.date\n\t\t\tand fires_rollup.long = weather_geo.long\n\t\t\tand fires_rollup.lat = weather_geo.lat\n\t)\n\"\"\")\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_no_fire_fips')\ncur.execute('CREATE INDEX idx_weather_geo_no_fire_fips ON weather_geo_no_fire(fips)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_no_fire_date_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_no_fire_date_long_lat ON weather_geo_no_fire(date, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_no_fire_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_no_fire_long_lat ON weather_geo_no_fire(long, lat)')\n\nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS weather_geo_no_fire_100k')\ncur.execute(\"\"\"\ncreate table weather_geo_no_fire_100k as\nselect *\nfrom weather_geo_no_fire\norder by random()\nlimit 100000\n\"\"\")\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_no_fire_100k_date_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_no_fire_100k_date_long_lat ON weather_geo_no_fire_100k(date, long, lat)')\n\ncur.execute('DROP INDEX IF EXISTS idx_weather_geo_no_fire_100k_long_lat')\ncur.execute('CREATE INDEX idx_weather_geo_no_fire_100k_long_lat ON weather_geo_no_fire_100k(long, lat)')\n\nconn.close()",
"_____no_output_____"
],
[
"conn = sqlite3.connect('../data/fires.sqlite')\ncur = conn.cursor()\n\none_year_in_sec = 365 * 24 * 60 * 60\n\ndef diff_years(date1, date2):\n\treturn math.floor((date1 - date2) / one_year_in_sec)\n\nfor fire_year in range(2000, 2019):\n\tprint(f'Fire year {fire_year}')\n\tdf_fires_large = pd.read_sql_query(\"\"\"\n\tselect long, lat, strftime('%s', date) as date_in_sec\n\tfrom fires_rollup\n\twhere\n\t\tfire_size_class >= 'D'\n\t\tand date <= :max_date\n\t\tand date >= :min_date\n\torder by long, lat, date\n\t\"\"\", conn, params = {'max_date': f'{fire_year}-12-31', 'min_date': f'{fire_year-5}-01-01'})\n\n\tdf_fires_large['date_in_sec'] = df_fires_large.date_in_sec.astype(int)\n\n\tfire_dict = df_fires_large.groupby(['long', 'lat'])['date_in_sec'].apply(np.array).to_dict()\n\n\tdf_weather = pd.read_sql_query(\"\"\"\n\tselect rowid, long, lat, strftime('%s', date) as date_in_sec\n\tfrom weather_geo_no_fire\n\twhere\n\t\tyear = :fire_year\n\t\"\"\", conn, index_col=['rowid'], params = {'fire_year': fire_year})\n\n\ti = 0\n\n\tfor rowid, row in df_weather.iterrows():\n\t\tpt = (row.long, row.lat)\n\t\tfire_dates = fire_dict.get(pt)\n\n\t\tif fire_dates is not None:\n\t\t\tdate_in_sec = int(row.date_in_sec)\n\t\t\tfive_years_before_in_sec = date_in_sec - (one_year_in_sec * 5)\n\t\t\tmatches = fire_dates[(fire_dates <= date_in_sec) & (fire_dates >= five_years_before_in_sec)]\t\n\t\t\tyears_back = np.unique([diff_years(date_in_sec, match) for match in matches])\n\n\t\t\tsql = \"\"\n\t\t\tfor year in years_back:\n\t\t\t\tif year >=0 and year < 5:\n\t\t\t\t\tsql += f\" prior_fire_{year}_{year+1}_year = 1,\"\n\n\t\t\tif sql != \"\":\n\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\tprint(f\"UPDATE weather_geo_no_fire SET {sql[:-1]} WHERE rowid = {rowid}\")\n\t\t\t\tcur.execute(f\"UPDATE weather_geo_no_fire SET {sql[:-1]} WHERE rowid = {rowid}\")\n\t\t\t\tconn.commit()\n\t\t\t\ti += 1\n\nconn.close()",
"Fire year 2003\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 3\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 1825186\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 3641313\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 5455520\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 7274630\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 9123828\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 10955960\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 12775037\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 14601463\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 16426606\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 18249826\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 20123128\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 21968079\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 23820255\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 25669137\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27514838\nFire year 2004\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 258\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1 WHERE rowid = 1403586\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 2802747\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 4188656\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 5577823\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6982327\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 8368386\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 9767774\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 11149694\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 12521455\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 13886263\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 15262437\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 16628473\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 18022202\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 19389826\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 20755795\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 22144177\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 23501913\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 24886273\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 26278450\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27664848\nFire year 2005\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1 WHERE rowid = 215\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 1251413\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 2470063\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 3734177\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 4960539\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 6191892\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 7423513\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 8663147\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 9879445\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 11113726\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 12352839\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 13601481\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 14848075\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 16085236\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 17314129\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18527249\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 19763907\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 20999452\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 22227331\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23485087\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 24713016\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 25948278\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 27173477\nFire year 2006\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 91\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 1157291\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 2307498\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 3489280\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 4647263\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 5824848\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6991024\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 8129718\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 9283629\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 10449395\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 11627143\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 12800861\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 13960719\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 15131477\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 16290549\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 17423666\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 18583221\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 19748522\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 20892828\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 22049861\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23202135\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 24365128\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 25515698\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 26669785\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27820288\nFire year 2007\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 52\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 1091542\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 2207803\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 3318285\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 4429798\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 5550121\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6663060\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 7785601\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 8892768\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 9985761\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 11102884\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 12208804\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 13306508\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 14418806\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 15547656\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 16655784\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 17796677\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18911014\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 20035100\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 21153403\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 22267143\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 23371547\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 24498657\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 25618913\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 26736795\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27858610\nFire year 2008\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 327\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 1074294\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 2135162\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 3174184\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 4217793\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 5276615\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 6319746\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 7365861\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 8411170\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 9476323\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 10527252\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 11570825\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 12622745\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 13663997\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 14704409\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 15771149\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 16832283\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 17876312\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18920982\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 19969458\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 21019261\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 22075349\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 23127947\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 24188725\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 25228502\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 26273155\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 27313076\nFire year 2009\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 35\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 1045517\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 2115841\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 3177235\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 4232785\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 5294242\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 6361535\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 7413601\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 8467822\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 9514270\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 10591126\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 11646916\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 12708162\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 13759457\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 14826838\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 15874404\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 16942989\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 17992289\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 19040067\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 20107197\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 21167986\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 22222082\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 23264762\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 24320146\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 25389160\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 26449083\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 27519939\nFire year 2010\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 1\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 1096733\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 2192880\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 3296240\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 4378849\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1 WHERE rowid = 5491555\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 6576187\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 7684638\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 8784639\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 9883137\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 10978552\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 12081280\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 13173731\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 14274989\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 15379757\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 16475423\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 17567544\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 18651720\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 19743587\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 20826203\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 21912441\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23004366\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 24095521\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 25185885\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 26270118\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 27350376\nFire year 2011\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 62\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 1190400\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 2390830\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 3580734\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 4794792\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 5989033\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 7193244\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 8380473\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 9591349\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 10772958\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 11971402\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 13159857\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1 WHERE rowid = 14349299\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 15551802\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 16745849\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 17956567\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 19141839\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 20340559\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 21546467\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 22761827\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23964273\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 25127803\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 26294733\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27483674\nFire year 2012\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 1302840\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 2596126\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 3892118\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 5177812\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6464302\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 7737783\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 9024473\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 10311785\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 11602888\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 12899811\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 14186092\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 15462743\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 16744960\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 18010033\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 19286056\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 20576281\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 21880609\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23170708\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 24464318\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 25753558\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 27044897\nFire year 2013\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 202\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 1439534\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 2858255\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 4274779\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 5682592\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 7111341\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 8508758\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 9928683\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 11353684\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 12758133\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 14195059\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 15621236\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 17044831\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18445260\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 19861704\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 21254285\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 22687842\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 24125835\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 25559658\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 26985423\nFire year 2014\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 208\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 1573968\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 3116427\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 4658943\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 6217648\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_2_3_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 7757877\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 9327446\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 10894646\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 12440977\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_3_4_year = 1 WHERE rowid = 14014268\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 15542701\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 17089210\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18641027\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 20168538\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 21678546\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23234523\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 24780386\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 26354930\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 27885142\nFire year 2015\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 459\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 1544937\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 3108837\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 4665321\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 6186252\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 7744831\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 9318737\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 10869067\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 12422649\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 13970329\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 15530127\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 17108997\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 18673598\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 20245057\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 21809025\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 23354274\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 24910567\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1 WHERE rowid = 26456417\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 28012930\nFire year 2016\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 8\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1, prior_fire_2_3_year = 1 WHERE rowid = 1515300\nUPDATE weather_geo_no_fire SET prior_fire_2_3_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 2997485\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 4505010\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 6023350\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 7546192\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 9076597\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 10593510\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 12127762\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 13649602\nUPDATE weather_geo_no_fire SET prior_fire_1_2_year = 1 WHERE rowid = 15155162\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 16636303\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 18150796\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 19626144\nUPDATE weather_geo_no_fire SET prior_fire_3_4_year = 1 WHERE rowid = 21148223\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_1_2_year = 1 WHERE rowid = 22648749\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1 WHERE rowid = 24190730\nUPDATE weather_geo_no_fire SET prior_fire_4_5_year = 1 WHERE rowid = 25711394\nUPDATE weather_geo_no_fire SET prior_fire_0_1_year = 1, prior_fire_3_4_year = 1, prior_fire_4_5_year = 1 WHERE rowid = 27225568\n"
]
],
[
[
"### Export to CSV for Re-import and Google Vertex AI",
"_____no_output_____"
]
],
[
[
"df_no_fire = get_no_fires_df()\n\ndf_no_fire.describe().transpose()",
"_____no_output_____"
],
[
"df_yes_fire = get_fires_df()\n\ndf_yes_fire.describe().transpose()",
"_____no_output_____"
],
[
"df_fires = pd.concat([df_no_fire, df_yes_fire], axis=0).sample(frac=1)\ndf_fires['has_fire'] = (df_fires.fire_size_class > '').astype(np.int8)\n\nnum_no_fire = len(df_fires[df_fires.fire_size_class == ''])\nnum_yes_fire = len(df_fires[df_fires.fire_size_class > ''])\nprint(f'Balanced classes of NO fires: {num_no_fire:,} and YES fire: {num_yes_fire:,}')\n\ndf_fires_export = df_fires.set_index(['date', 'long', 'lat'])\ndf_fires_export.to_csv('../data/df_fire_sample.csv')",
"Balanced classes of NO fires: 100,000 and YES fire: 92,659\n"
]
],
[
[
"### Acknowledgements\n\n1. Short, Karen C. 2021. Spatial wildfire occurrence data for the United States, 1992-2018 [FPA_FOD_20210617]. 5th Edition. Fort Collins, CO: Forest Service Research Data Archive. https://doi.org/10.2737/RDS-2013-0009.5\nAcknowledgements\n2. These data were obtained from the NASA Langley Research Center (LaRC) POWER Project funded through the NASA Earth Science/Applied Science Program.\n3. The U.S. Drought Monitor is produced through a partnership between the National Drought Mitigation Center at the University of Nebraska-Lincoln, the United States Department of Agriculture, and the National Oceanic and Atmospheric Administration.\n4. This dataset utilizes the Harmonized World Soil Database by Fischer, G., F. Nachtergaele, S. Prieler, H.T. van Velthuizen, L. Verelst, D. Wiberg, 2008. Global Agro-ecological Zones Assessment for Agriculture (GAEZ 2008). IIASA, Laxenburg, Austria and FAO, Rome, Italy.\n5. State of California Employeement Development Department for California County geospatial shape\n6. California fire perimeters https://gis.data.ca.gov/datasets/CALFIRE-Forestry::california-fire-perimeters-all/about",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e715a11a321b2a04073db6e75a3f0e91ccfd6912 | 10,435 | ipynb | Jupyter Notebook | application_model_zoo/Example - NOAA Habcam Underwater Fish Detection.ipynb | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | application_model_zoo/Example - NOAA Habcam Underwater Fish Detection.ipynb | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | application_model_zoo/Example - NOAA Habcam Underwater Fish Detection.ipynb | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | 22.934066 | 316 | 0.553905 | [
[
[
"<a href=\"https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20NOAA%20Habcam%20Underwater%20Fish%20Detection.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Table of contents\n\n\n## 1. Installation Instructions\n\n\n## 2. How to train using MMdetection wrapper",
"_____no_output_____"
],
[
"# Installation\n\n - Run these commands\n \n - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git\n \n - cd Monk_Object_Detection/16_mmdet/installation\n \n - Select the right file and run\n \n - chmod +x install.sh && ./install.sh",
"_____no_output_____"
]
],
[
[
"! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git",
"_____no_output_____"
],
[
"! cd Monk_Object_Detection/16_mmdet/installation && chmod +x install.sh && ./install.sh",
"_____no_output_____"
]
],
[
[
"# Training your own detector",
"_____no_output_____"
],
[
"## Dataset\n - Credits: https://www.viametoolkit.org/cvpr-2018-workshop-data-challenge/challenge-data-description/",
"_____no_output_____"
]
],
[
[
"! wget https://challenge.kitware.com/api/v1/file/5adecdee56357d4ff85705f8/download -O data-challenge-training-imagery.tar.gz\n! wget https://challenge.kitware.com/api/v1/item/5ada39f756357d4ff856f550/download -O data-challenge-training-annotations.tar.gz",
"_____no_output_____"
],
[
"! tar -xzf data-challenge-training-imagery.tar.gz\n! tar -xzf data-challenge-training-annotations.tar.gz",
"_____no_output_____"
],
[
"! mkdir habcamseq0\n! mkdir habcamseq0/annotations\n! cp annotations/habcam_seq0_training.mscoco.json habcamseq0/annotations/instances_images.json\n! mv imagery/habcam_seq0 habcamseq0/images",
"_____no_output_____"
],
[
"import json\n\nwith open('habcamseq0/annotations/instances_images.json') as f:\n data = json.load(f)\n\ng = open(\"habcamseq0/annotations/classes.txt\", 'w')\nfor i in range(len(data[\"categories\"])):\n g.write(data[\"categories\"][0][\"name\"] + \"\\n\");\n\ng.close();",
"_____no_output_____"
]
],
[
[
"# Training",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/16_mmdet/lib\")",
"_____no_output_____"
],
[
"from train_engine import Detector",
"_____no_output_____"
],
[
"gtf = Detector();",
"_____no_output_____"
],
[
"img_dir = \"habcamseq0/images\";\nannofile = \"habcamseq0/annotations/instances_images.json\"\nclass_file = \"habcamseq0/annotations/classes.txt\"\n\ngtf.Train_Dataset(img_dir, annofile, class_file);",
"_____no_output_____"
],
[
"gtf.Val_Dataset(img_dir, annofile);",
"_____no_output_____"
],
[
"gtf.Dataset_Params(batch_size=8, num_workers=4)",
"_____no_output_____"
],
[
"gtf.List_Models();",
"1. Model - faster_rcnn_fpn50\n2. Model - faster_rcnn_fpn101\n3. Model - faster_rcnn_x101_32x4d_fpn\n4. Model - faster_rcnn_x101_64x4d_fpn\n5. Model - cascade_rcnn_fpn50\n6. Model - cascade_rcnn_fpn101\n7. Model - cascade_rcnn_x101_32x4d_fpn\n8. Model - cascade_rcnn_x101_64x4d_fpn\n9. Model - retinanet_r50_fpn\n10. Model - retinanet_r101_fpn\n11. Model - retinanet_x101_32x4d_fpn\n12. Model - retinanet_x101_64x4d_fpn\n13. Model - retinanet_ghm_r50_fpn\n14. Model - retinanet_ghm_r101_fpn\n15. Model - retinanet_ghm_x101_32x4d_fpn\n16. Model - retinanet_ghm_x101_64x4d_fpn\n17. Model - dh_faster_rcnn_fpn50\n18. Model - libra_faster_rcnn_fpn50\n19. Model - libra_faster_rcnn_fpn101\n20. Model - libra_faster_rcnn_x101_64x4d_fpn\n21. Model - libra_retinanet_r50_fpn\n22. Model - ga_faster_rcnn_x101_32x4d_fpn\n23. Model - ga_faster_rcnn_x101_64x4d_fpn\n24. Model - ga_retinanet_x101_32x4d_fpn\n25. Model - ga_retinanet_x101_64x4d_fpn\n26. Model - fovea_r50_fpn_4x4\n27. Model - fovea_r101_fpn_4x4\n28. Model - fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4\n29. Model - fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4\n30. Model - free_anchor_retinanet_r50_fpn\n31. Model - free_anchor_retinanet_r101_fpn\n32. Model - free_anchor_retinanet_x101_32x4d_fpn\n33. Model - atss_r50_fpn\n34. Model - pafpn_faster_rcnn_r50\n35. Model - faster_rcnn_r50_fpn_mdpool\n36. Model - faster_rcnn_r50_fpn_dpool\n"
],
[
"gtf.Model_Params(model_name=\"free_anchor_retinanet_r101_fpn\");",
"_____no_output_____"
],
[
"gtf.Hyper_Params(lr=0.02, momentum=0.9, weight_decay=0.0001);",
"_____no_output_____"
],
[
"gtf.Training_Params(num_epochs=100, val_interval=50);",
"_____no_output_____"
],
[
"gtf.Train();",
"_____no_output_____"
]
],
[
[
"# Run inference on images",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/16_mmdet/lib\")",
"_____no_output_____"
],
[
"from infer_engine import Infer",
"_____no_output_____"
],
[
"gtf = Infer();",
"_____no_output_____"
],
[
"gtf.Model_Params(\"work_dirs/config_updated/config_updated.py\", \n \"work_dirs/config_updated/latest.pth\")",
"_____no_output_____"
],
[
"import os\nimg_list = os.listdir(\"habcamseq0/images\");",
"_____no_output_____"
],
[
"result = gtf.Predict(img_path=\"habcamseq0/images/\" + img_list[0],\n out_img_path=\"result.jpg\",\n thresh=0.8);\n\nfrom IPython.display import Image\nImage(filename='result.jpg', width=490, height=640) ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e715a39f29b9ecc0994e3210b7b69b70da966340 | 9,251 | ipynb | Jupyter Notebook | src/notebooks/adversarial-cone.ipynb | ash-aldujaili/blackbox-adv-examples-signhunter | 9279730522d6127ecb332133a090256e90904f2a | [
"MIT"
] | 19 | 2020-02-07T13:57:25.000Z | 2022-03-07T15:05:33.000Z | src/notebooks/adversarial-cone.ipynb | ALFA-group/blackbox-adv-examples-signhunter | 9279730522d6127ecb332133a090256e90904f2a | [
"MIT"
] | 5 | 2020-03-29T17:02:54.000Z | 2020-09-16T12:51:49.000Z | src/notebooks/adversarial-cone.ipynb | ALFA-group/blackbox-adv-examples-signhunter | 9279730522d6127ecb332133a090256e90904f2a | [
"MIT"
] | 4 | 2020-04-08T18:16:25.000Z | 2021-11-16T02:47:16.000Z | 25.768802 | 254 | 0.454005 | [
[
[
"Given a vector $v\\in \\{-1,+1\\}^n$, we are interested in the set of $k$ orthogonal vectors $r_1,\\ldots, r_k \\in \\{-1,+1\\}^n$, that solves\n$$\\max_{\\{r_i\\}_k}\\min_{i\\in [k]}v^Tr_i$$\n\nFrom Tramer et al., 2018, it was shown that if for all $i$, $v^Tr_i \\geq \\alpha n$ for $\\alpha \\in (0,1)$, then $\\alpha \\leq 1/\\sqrt{k}$. In other words, $OPT=n/\\sqrt{k}$. Let's denote by $R$ the matrix formed by stacking $r_i$ vertically.",
"_____no_output_____"
]
],
[
[
"from scipy.linalg import hadamard\nimport numpy as np\nimport itertools",
"_____no_output_____"
],
[
"# dim\nn = 108 # imagenet dim\n# adv cone size < n\nk = 36\n# target vector\nv = np.sign(np.random.randn(n))\nOPT= n / np.sqrt(k)",
"_____no_output_____"
]
],
[
[
"#### Naive Construction\n\nThe following naive method (Tramer et al, 2018) can be used to achieve ~$n/k$, a factor of $\\sqrt{k}$ worse than OPT, assuming $k$ divides $n$.",
"_____no_output_____"
]
],
[
[
"def chain_lol(lol):\n return list(itertools.chain(*lol))\n\ndef construct_idx(chunk_size, k, n):\n \"\"\"a method to get 1d idxs for the R matrix used by the naive construction method\n \"\"\"\n return list(filter(\n lambda x: x < n*k,\n chain_lol(\n [range(i*(chunk_size)+j,i*(chunk_size ) + chunk_size+j) for i,j in enumerate(range(0,n*k,n))])))\n\ndef naive_R(n, k):\n chunk_size = (n + k - 1) // k\n R = np.zeros((k, n))\n #print(construct_idx(chunk_size, k, n))\n R.ravel()[construct_idx(chunk_size, k, n)] = v\n return R",
"_____no_output_____"
],
[
"R = naive_R(n,k)",
"_____no_output_____"
],
[
"R.dot(v), OPT ",
"_____no_output_____"
]
],
[
[
"A factor of 10 worse for Imagenet. Let's consider another construction from Tramer et al., 2018.",
"_____no_output_____"
],
[
"#### Tight Randomized Construction with Regular Hadamard matrix",
"_____no_output_____"
]
],
[
[
"Hs = {\n '4': np.load('../reg_hadamard_mats/reg_hadamard_mat_order-4.npy'),\n '16':np.load('../reg_hadamard_mats/reg_hadamard_mat_order-16.npy'),\n '36':np.load('../reg_hadamard_mats/reg_hadamard_mat_order-36.npy'),\n '64':np.load('../reg_hadamard_mats/reg_hadamard_mat_order-64.npy'),\n '100':np.load('../reg_hadamard_mats/reg_hadamard_mat_order-100.npy'),\n}",
"_____no_output_____"
],
[
"H = Hs['100']",
"_____no_output_____"
],
[
"k = H.shape[0]\n# target vector\nv = np.sign(np.random.randn(n))\nOPT= n / np.sqrt(k)",
"_____no_output_____"
],
[
"R = np.zeros((k, n))\nR[:, :n // k * k ] = np.repeat(H, n // k, axis=1)\nR *= v[None, :]",
"_____no_output_____"
],
[
"R.dot(v), OPT ",
"_____no_output_____"
],
[
"sum(H)",
"_____no_output_____"
],
[
"H.dot(H.T)",
"_____no_output_____"
]
],
[
[
"In the above discussion, we considered the case where $||r_i||_\\infty\\leq1$ and $v\\in \\{-1,+1\\}^n$, how about the case of $||r_i||_2\\leq 1$ and $v\\in \\mathbb{R}^n$.\n\nA similar result can be shown such that $r_i^Tv \\geq k^{-1/2} ||v||$ with $k=\\min(\\lfloor 1/\\alpha^2 \\rfloor, d)$ (See Tramer et al., 2017). Let's define our setup below, before we show one possible construction.",
"_____no_output_____"
]
],
[
[
"n = 1000\n\ndef ei(n, i):\n \"\"\"return the ith basis vector\"\"\"\n ei = np.zeros((n, 1))\n ei[i] = 1\n return ei\n\nv = np.random.randn(n,1)\nk = 6\nnorm_v = np.linalg.norm(v)\nOPT = norm_v / np.sqrt(k)",
"_____no_output_____"
]
],
[
[
"The following is one possible construction",
"_____no_output_____"
]
],
[
[
"z = np.sum(np.eye(n)[:, :k], axis=1, keepdims=True) / np.sqrt(k)",
"_____no_output_____"
],
[
"S = ei(n, 1).dot(v.T) / norm_v\nT = ei(n, 1).dot(z.T) / np.linalg.norm(z)**2\nU = S.T.dot(T)",
"_____no_output_____"
],
[
"R = U.dot(np.eye(n)[:, :k])",
"_____no_output_____"
],
[
"v.T.dot(R), OPT",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e715a6b9e417fd3f4649e916c7d2a310a734bbca | 56,163 | ipynb | Jupyter Notebook | JSON/JSON.ipynb | CaptSolo/Python_Workshop_Humanities_July_2019 | df3801a727c390e1d5ce9b3fb28ecac7e2951b23 | [
"MIT"
] | 5 | 2019-07-23T09:19:38.000Z | 2019-09-03T04:13:09.000Z | JSON/JSON.ipynb | CaptSolo/Python_Workshop_Humanities_July_2019 | df3801a727c390e1d5ce9b3fb28ecac7e2951b23 | [
"MIT"
] | 8 | 2020-01-28T22:54:14.000Z | 2022-02-10T00:17:47.000Z | JSON/JSON.ipynb | ValRCS/RCS_Data_Analysis_Python_2019_July | 19e2f8310f41b697f9c86d7a085a9ff19390eeac | [
"MIT"
] | 1 | 2019-07-18T11:08:13.000Z | 2019-07-18T11:08:13.000Z | 25.715659 | 403 | 0.420116 | [
[
[
"## JSON",
"_____no_output_____"
],
[
"# JSON - Javascript Object Notation\n#### Invented by Douglas Crockford when working at Yahoo in early 2000s.\n\n* Goal - Human Readable, Machine Parsable\n\n* Specification: https://www.json.org/",
"_____no_output_____"
],
[
"JSON — short for JavaScript Object Notation — format for sharing data. \n\nJSON is derived from the JavaScript programming language\n\nAvailable for use by many languages including Python \n\nusually file extension is .json when stored\n\n",
"_____no_output_____"
]
],
[
[
"# Sample JSON below from https://json.org/example.html\n# Question why is Syntax highlighting working properly ? :)",
"_____no_output_____"
],
[
"{\"widget\": {\n \"debug\": \"on\",\n \"window\": {\n \"title\": \"Sample Konfabulator Widget\",\n \"name\": \"main_window\",\n \"width\": 500,\n \"height\": 500\n },\n \"image\": { \n \"src\": \"Images/Sun.png\",\n \"name\": \"sun1\",\n \"hOffset\": 250,\n \"vOffset\": 250,\n \"alignment\": \"center\"\n },\n \"text\": {\n \"data\": \"Click Here\",\n \"size\": 36,\n \"style\": \"bold\",\n \"name\": \"text1\",\n \"hOffset\": 250,\n \"vOffset\": 100,\n \"alignment\": \"center\",\n \"onMouseUp\": \"sun1.opacity = (sun1.opacity / 100) * 90;\"\n }\n}} \n",
"_____no_output_____"
],
[
"# if this was string starting with { it would be our json\nmydata = {\n \"firstName\": \"Jane\",\n \"lastName\": \"Doe\",\n \"hobbies\": [\"running\", \"sky diving\", \"dancing\"],\n \"age\": 43,\n \"children\": [\n {\n \"firstName\": \"Alice\",\n \"age\": 7\n },\n {\n \"firstName\": \"Bob\",\n \"age\": 13\n }\n ]\n}",
"_____no_output_____"
],
[
"type(mydata)",
"_____no_output_____"
],
[
"print(mydata)",
"{'firstName': 'Jane', 'lastName': 'Doe', 'hobbies': ['running', 'sky diving', 'dancing'], 'age': 43, 'children': [{'firstName': 'Alice', 'age': 7}, {'firstName': 'Bob', 'age': 13}]}\n"
],
[
"mylist = list(range(10))\nprint(mylist)",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
]
],
[
[
"The process of encoding JSON is usually called serialization. This term refers to the transformation of data into a series of bytes (hence serial) to be stored or transmitted across a network. You may also hear the term marshaling, but that’s a whole other discussion. Naturally, deserialization is the reciprocal process of decoding data that has been stored or delivered in the JSON standard.\n\nAll we’re talking about here is reading and writing. Think of it like this: encoding is for writing data to disk, while decoding is for reading data into memory.\n https://realpython.com/python-json/",
"_____no_output_____"
]
],
[
[
"import json",
"_____no_output_____"
],
[
"with open(\"data_file.json\", mode=\"w\") as write_file:\n json.dump(mydata, write_file)",
"_____no_output_____"
],
[
"with open(\"numbers.json\", mode=\"w\") as write_file:\n json.dump(mylist, write_file)",
"_____no_output_____"
],
[
"# use json string in our program\njson_string = json.dumps(mydata)\nprint(json_string)",
"{\"firstName\": \"Jane\", \"lastName\": \"Doe\", \"hobbies\": [\"running\", \"sky diving\", \"dancing\"], \"age\": 43, \"children\": [{\"firstName\": \"Alice\", \"age\": 7}, {\"firstName\": \"Bob\", \"age\": 13}]}\n"
],
[
"print(mydata)",
"{'firstName': 'Jane', 'lastName': 'Doe', 'hobbies': ['running', 'sky diving', 'dancing'], 'age': 43, 'children': [{'firstName': 'Alice', 'age': 7}, {'firstName': 'Bob', 'age': 13}]}\n"
],
[
"# Convert Json_string back to our Python Object\nmy_obj = json.loads(json_string)\nmy_obj",
"_____no_output_____"
],
[
"newlist = json.loads('[1,3,5,\"Valdis\"]')\nnewlist",
"_____no_output_____"
],
[
"badlist = json.loads('[1,3,5,\"Vald\"]')\nbadlist",
"_____no_output_____"
],
[
"type(json_string)",
"_____no_output_____"
],
[
"# Avove example JSON and Python object have the same syntax but there are some differences",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"Simple Python objects are translated to JSON according to a fairly intuitive conversion.\n\nPython\tJSON\n\ndict\tobject\n\nlist, tuple\tarray\n\nstr\tstring\n\nint, long, \n\nfloat\tnumber\n\nTrue\ttrue\n\nFalse\tfalse\n\nNone\tnull",
"_____no_output_____"
]
],
[
[
"newlist = json.loads('[true,2,null, false, 555.333]')\nnewlist",
"_____no_output_____"
],
[
"# The first option most people want to change is whitespace. You can use the indent keyword argument to specify the indentation size for nested structures. Check out the difference for yourself by using data, which we defined above, and running the following commands in a console:\n\njson.dumps(mydata)\n",
"_____no_output_____"
],
[
"# very useful for visibility!\nprint(json.dumps(mydata, indent=4))",
"{\n \"firstName\": \"Jane\",\n \"lastName\": \"Doe\",\n \"hobbies\": [\n \"running\",\n \"sky diving\",\n \"dancing\"\n ],\n \"age\": 43,\n \"children\": [\n {\n \"firstName\": \"Alice\",\n \"age\": 7\n },\n {\n \"firstName\": \"Bob\",\n \"age\": 13\n }\n ]\n}\n"
],
[
"with open(\"data_file.json\", \"w\") as write_file:\n json.dump(mydata, write_file, indent=4)",
"_____no_output_____"
],
[
"with open(\"data_file.json\", \"r\") as read_file:\n data = json.load(read_file)\ndata",
"_____no_output_____"
],
[
"type(data)",
"_____no_output_____"
],
[
"len(data)",
"_____no_output_____"
],
[
"type(data[0]), type(data[1])",
"_____no_output_____"
]
],
[
[
"Keep in mind that the result of this method could return any of the allowed data types from the conversion table. This is only important if you’re loading in data you haven’t seen before. In most cases, the root object will be a dict or a list.",
"_____no_output_____"
],
[
"If you've gotten JSON data in from another program or have otherwise obtained a string of JSON formatted data in Python, you can easily deserialize that with loads(), which naturally loads from a string:",
"_____no_output_____"
]
],
[
[
"json_string = \"\"\"\n{\n \"researcher\": {\n \"name\": \"Ford Prefect\",\n \"species\": \"Betelgeusian\",\n \"relatives\": [\n {\n \"name\": \"Zaphod Beeblebrox\",\n \"species\": \"Betelgeusian\"\n }\n ]\n }\n}\n\"\"\"\ndata = json.loads(json_string)\ndata",
"_____no_output_____"
],
[
"# get value of relative's name\ndata['researcher']",
"_____no_output_____"
],
[
"# get value of relative's name\ndata['researcher']['relatives']",
"_____no_output_____"
],
[
"# get value of relative's name\ndata['researcher']['relatives'][0]",
"_____no_output_____"
],
[
"# get value of relative's name\ndata['researcher']['relatives'][0]['name']",
"_____no_output_____"
],
[
"data['researcher']['relatives'][0]['name'].split()[0]",
"_____no_output_____"
],
[
"data['researcher']['relatives'][0]['name'].split()[0][:4]",
"_____no_output_____"
],
[
"type(data)",
"_____no_output_____"
],
[
"import json\nimport requests",
"_____no_output_____"
],
[
"## Lets get some data https://jsonplaceholder.typicode.com/",
"_____no_output_____"
],
[
"response = requests.get(\"https://jsonplaceholder.typicode.com/todos\")\nif response.status_code != 200:\n print(\"Bad Response: \", response.status_code)\nprint(response.status_code)\ntodos = json.loads(response.text)\n",
"200\n"
]
],
[
[
"can open https://jsonplaceholder.typicode.com/todos in regular browser too..",
"_____no_output_____"
]
],
[
[
"type(todos)",
"_____no_output_____"
],
[
"len(todos)",
"_____no_output_____"
],
[
"todos[:10]",
"_____no_output_____"
],
[
"myl = [('Valdis', 40), ('Alice',35), ('Bob', 23),('Carol',70)]",
"_____no_output_____"
],
[
"# Lambda = anonymous function",
"_____no_output_____"
],
[
"def myfun(el):\n return el[1]\n# same as myfun = lambda el: el[1]",
"_____no_output_____"
],
[
"sorted(myl, key = lambda el: el[1], reverse=True)",
"_____no_output_____"
],
[
"# Exercise find out top 3 users with most tasks completed!\n\n# TIPS\n# we need some sort of structure to store these user results before finding out top 3\n# at least two good data structure choices here :)\n# here the simplest might actually be the best if we consider userId values\n",
"_____no_output_____"
],
[
"todos[0]",
"_____no_output_____"
],
[
"todos[0]['userId']",
"_____no_output_____"
],
[
"todos[0]['completed']",
"_____no_output_____"
],
[
"# Here we create a new dictionary and and count the completed works by id\nnewdict = {}\nfor todo in todos:\n if todo['completed'] == True:\n if todo['userId'] in newdict:\n newdict[todo['userId']] += 1\n else:\n newdict[todo['userId']] = 1",
"_____no_output_____"
],
[
"newdict",
"_____no_output_____"
],
[
"sorted(newdict.items())",
"_____no_output_____"
],
[
"bestworkers = sorted(newdict.items(), key=lambda el: el[1], reverse=True)\nbestworkers[:3]",
"_____no_output_____"
],
[
"users = [ el['userId'] for el in todos]\nlen(users),users[:15]",
"_____no_output_____"
],
[
"uniqusers = set(users)\nuniqusers",
"_____no_output_____"
],
[
"# dictionary comprehension but could live without one\nusers = { el['userId'] : 0 for el in todos} ",
"_____no_output_____"
],
[
"users",
"_____no_output_____"
],
[
"users.keys()",
"_____no_output_____"
],
[
"users.value",
"_____no_output_____"
],
[
"#{'completed': True,\n# 'id': 8,\n# 'title': 'quo adipisci enim quam ut ab',\n# 'userId': 1}",
"_____no_output_____"
],
[
"#idiomatic\nfor el in todos:\n users[el['userId']] += el['completed'] # Boolean False is 0 True is 1 obviously this might not be too readable",
"_____no_output_____"
],
[
"# same as above could be useful in more complicated cases\nfor el in todos:\n if el['completed'] == True:\n users[el['userId']] += 1",
"_____no_output_____"
],
[
"# there could be a one liner or a solution with from collections import Counter",
"_____no_output_____"
],
[
"users.items()",
"_____no_output_____"
],
[
"list(users.items())",
"_____no_output_____"
],
[
"userlist=list(users.items())",
"_____no_output_____"
],
[
"type(userlist[0])",
"_____no_output_____"
],
[
"# we pass a key anonymous(lambda) function\nsorted(userlist, key=lambda el: el[1], reverse=True)[:3]",
"_____no_output_____"
],
[
"# lets try a simple way",
"_____no_output_____"
],
[
"mylist=[0]\nmylist*=11",
"_____no_output_____"
],
[
"for el in todos:\n if el['completed'] == True:\n mylist[el['userId']] +=1",
"_____no_output_____"
],
[
"mylist",
"_____no_output_____"
],
[
"mylist.index(max(mylist))",
"_____no_output_____"
],
[
"# kind of hard to get more values need to get tricky",
"_____no_output_____"
]
],
[
[
"# How about Pandas and Json ?",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_json('https://jsonplaceholder.typicode.com/todos')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.groupby(['userId'])['completed'].sum()",
"_____no_output_____"
],
[
"df.groupby(['userId'])['completed'].sum().sort_values()",
"_____no_output_____"
],
[
"df.groupby(['userId'])['completed'].sum().sort_values(ascending=False)",
"_____no_output_____"
]
],
[
[
"# Exercise Find Public JSON API get data and convert it into Pandas DataFrame\n\n## Many possible sources\n\nhttps://github.com/toddmotto/public-apis\n \n### You want the ones without authorization and WITH CORS unless you are feeling adventurous and want to try with auth\n\n",
"_____no_output_____"
]
],
[
[
"## For authorization you generally need some sort of token(key)\n# One example for zendesk API https://develop.zendesk.com/hc/en-us/community/posts/360001652447-API-auth-in-python\n\n\n# For an API token, append '/token' to your username and use the token as the password:\n## This will not work for those without zendesk access token\n\nurl = 'https://your_subdomain.zendesk.com/api/v2/users/123.json'\nr = requests.get(url, auth=('[email protected]/token', 'your_token'))\n# For an OAuth token, set an Authorization header:\n\nbearer_token = 'Bearer ' + access_token\nheader = {'Authorization': bearer_token}\nurl = 'https://your_subdomain.zendesk.com/api/v2/users/123.json'\nr = requests.get(url, headers=header)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e715a9d3f37710e2d15d283bbf701229d6ac8113 | 323,150 | ipynb | Jupyter Notebook | notebooks/PythonForScientificComputing.ipynb | modenaxe/BMC | b6f6e473878ab7b0c19430d1b66b6dba09059c63 | [
"MIT"
] | 1 | 2018-06-23T20:09:07.000Z | 2018-06-23T20:09:07.000Z | notebooks/PythonForScientificComputing.ipynb | modenaxe/BMC | b6f6e473878ab7b0c19430d1b66b6dba09059c63 | [
"MIT"
] | null | null | null | notebooks/PythonForScientificComputing.ipynb | modenaxe/BMC | b6f6e473878ab7b0c19430d1b66b6dba09059c63 | [
"MIT"
] | 1 | 2019-01-02T23:17:40.000Z | 2019-01-02T23:17:40.000Z | 428.013245 | 168,334 | 0.92488 | [
[
[
"# Python for scientific computing\n\n> Marcos Duarte \n> Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/)) \n> Federal University of ABC, Brazil",
"_____no_output_____"
],
[
"The [Python programming language](https://www.python.org/) with [its ecosystem for scientific programming](https://scipy.org/) has features, maturity, and a community of developers and users that makes it the ideal environment for the scientific community. \n\nThis talk will show some of these features and usage examples. ",
"_____no_output_____"
],
[
"## Computing as a third kind of Science\n\nTraditionally, science has been divided into experimental and theoretical disciplines, but nowadays computing plays an important role in science. Scientific computation is sometimes related to theory, and at other times to experimental work. Hence, it is often seen as a new third branch of science.\n\n<figure><img src=\"https://raw.githubusercontent.com/jrjohansson/scientific-python-lectures/master/images/theory-experiment-computation.png\" width=300 alt=\"theory-experiment-computation\"/></figure> \nFigure from [J.R. Johansson](http://nbviewer.jupyter.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-0-Scientific-Computing-with-Python.ipynb).",
"_____no_output_____"
],
[
"## The lifecycle of a scientific idea",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename='../images/lifecycle_FPerez.png', width=600) # image from Fernando Perez",
"_____no_output_____"
]
],
[
[
"## About Python [[Python documentation](http://www.python.org/doc/essays/blurb/)]\n\n*Python is a programming language that lets you work more quickly and integrate your systems more effectively. You can learn to use Python and see almost immediate gains in productivity and lower maintenance costs* [[python.org](http://python.org/)].",
"_____no_output_____"
],
[
"- *Python is an interpreted, object-oriented, high-level programming language with dynamic semantics. Its high-level built in data structures, combined with dynamic typing and dynamic binding, make it very attractive for Rapid Application Development, as well as for use as a scripting or glue language to connect existing components together*. \n- *Python's simple, easy to learn syntax emphasizes readability and therefore reduces the cost of program maintenance. Python supports modules and packages, which encourages program modularity and code reuse*. \n- Python is free and open source.",
"_____no_output_____"
],
[
"## About Python [[Python documentation](http://www.python.org/doc/essays/blurb/)]\n\n- *Often, programmers fall in love with Python because of the increased productivity it provides. Since there is no compilation step, the edit-test-debug cycle is incredibly fast. Debugging Python programs is easy: a bug or bad input will never cause a segmentation fault. Instead, when the interpreter discovers an error, it raises an exception. When the program doesn't catch the exception, the interpreter prints a stack trace.* \n- A source level debugger allows inspection of local and global variables, evaluation of arbitrary expressions, setting breakpoints, stepping through the code a line at a time, and so on. The debugger is written in Python itself, testifying to Python's introspective power. On the other hand, often the quickest way to debug a program is to add a few print statements to the source: the fast edit-test-debug cycle makes this simple approach very effective.*",
"_____no_output_____"
],
[
"## Glossary for the Python technical characteristics I\n\n - Programming language: a formal language designed to communicate instructions to a computer. A sequence of instructions that specifies how to perform a computation is called a program.\n - Interpreted language: a program in an interpreted language is executed or interpreted by an interpreter program. This interpreter executes the program source code, statement by statement.\n - Compiled language: a program in a compiled language is first explicitly translated by the user into a lower-level machine language executable (with a compiler) and then this program can be executed.\n - Python interpreter: an interpreter is the computer program that executes the program. The most-widely used implementation of the Python programming language, referred as CPython or simply Python, is written in C (another programming language, which is lower-level and compiled).\n - High-level: a high-level programming language has a strong abstraction from the details of the computer and the language is independent of a particular type of computer. A high-level programming language is closer to human languages than to the programming language running inside the computer that communicate instructions to its hardware, the machine language. The machine language is a low-level programming language, in fact, the lowest one. \n - Object-oriented programming: a programming paradigm that represents concepts as \"objects\" that have data fields (attributes that describe the object) and associated procedures known as methods.\n - Semantics and syntax: the term semantics refers to the meaning of a language, as opposed to its form, the syntax.\n - Static and dynamic semantics: static and dynamic refer to the point in time at which some programming element is resolved. Static indicates that resolution takes place at the time a program is written. Dynamic indicates that resolution takes place at the time a program is executed.\n - Static and dynamic typing and binding: in dynamic typing, the type of the variable (e.g., if it is an integer or a string or a different type of element) is not explicitly declared, it can change, and in general is not known until execution time. In static typing, the type of the variable must be declared and it is known before the execution time. \n - Rapid Application Development: a software development methodology that uses minimal planning in favor of rapid prototyping. \n - Scripting: the writing of scripts, small pieces of simple instructions (programs) that can be rapidly executed. ",
"_____no_output_____"
],
[
"## Glossary for the Python technical characteristics II\n\n - Glue language: a programming language for writing programs to connect software components (inluding programs written in other programming languages).\n - Modules and packages: a module is a file containing Python definitions (e.g., functions) and statements. Packages are a way of structuring Python’s module namespace by using “dotted module names”. For example, the module name A.B designates a submodule named B in a package named A. To be used, modules and packages have to be imported in Python with the import function. Namespace is a container for a set of identifiers (names), and allows the disambiguation of homonym identifiers residing in different namespaces. For example, with the command `import math`, we will have all the functions and statements defined in this module in the namespace '`math.`', for example, `math.pi` is the $\\pi$ constant and `math.cos()`, the cosine function.\n - Program modularity and code reuse: the degree that programs can be compartmentalized (divided in smaller programs) to facilitate program reuse.\n - Source or binary form: source refers to the original code of the program (typically in a text format) which would need to be compiled to a binary form (not anymore human readable) to be able to be executed.\n - Major platforms: typically refers to the main operating systems (OS) in the market: Windows (by Microsoft), Mac OSX (by Apple), and Linux distributions (such as Debian, Ubuntu, Mint, etc.). Mac OSX and Linux distros are derived from, or heavily inspired by, another operating system called Unix.\n - Edit-test-debug cycle: the typical cycle in the life of a programmer; write (edit) the code, run (test) it, and correct errors or improve it (debug). The read–eval–print loop (REPL) is another related term.\n - Segmentation fault: an error in a program that is generated by the hardware which notifies the operating system about a memory access violation.\n - Exception: an error in a program detected during execution is called an exception and the Python interpreter raises a message about this error (an exception is not necessarily fatal, i.e., does not necessarily terminate or break the program).\n - Stack trace: information related to what caused the exception describing the line of the program where it occurred with a possible history of related events.\n - Source level debugger: Python has a module (named pdb) for interactive source code debugging.\n - Local and global variables: refers to the scope of the variables. A local variable is defined inside a function and typically can be accessed (it exists) only inside that function unless declared as global.",
"_____no_output_____"
],
[
"## About Python\n\nPython is also the name of the software with the most-widely used implementation of the language (maintained by the [Python Software Foundation](http://www.python.org/psf/)). \nThis implementation is written mostly in the *C* programming language and it is nicknamed CPython. \nSo, the following phrase is correct: download Python *(the software)* to program in Python *(the language)* because Python *(both)* is great! ",
"_____no_output_____"
],
[
"## Python\n\nThe origin of the name for the Python language in fact is not because of the big snake, the author of the Python language, Guido van Rossum, named the language after Monty Python, a famous British comedy group in the 70's. \nBy coincidence, the Monty Python group was also interested in human movement science:",
"_____no_output_____"
]
],
[
[
"from IPython.display import YouTubeVideo\nYouTubeVideo('9ZlBUglE6Hc', width=480, height=360, rel=0)",
"_____no_output_____"
]
],
[
[
"## Why Python and not 'X' (put any other language here)\n\nPython is not the best programming language for all needs and for all people. There is no such language. \nNow, if you are doing scientific computing, chances are that Python is perfect for you because (and might also be perfect for lots of other needs):\n\n- Python is free, open source, and cross-platform. \n- Python is easy to learn, with readable code, well documented, and with a huge and friendly user community. \n- Python is a real programming language, able to handle a variety of problems, easy to scale from small to huge problems, and easy to integrate with other systems (including other programming languages).\n- Python code is not the fastest but Python is one the fastest languages for programming. It is not uncommon in science to care more about the time we spend programming than the time the program took to run. But if code speed is important, one can easily integrate in different ways a code written in other languages (such as C and Fortran) with Python.\n- The IPython Notebook is a versatile tool for programming, data visualization, ploting, simulation, numeric and symbolic mathematics, and writting for daily use.",
"_____no_output_____"
],
[
"## Popularity of Python for teaching",
"_____no_output_____"
]
],
[
[
"from IPython.display import IFrame\nIFrame('http://cacm.acm.org/blogs/blog-cacm/176450-python-is-now-the-most-popular-' +\n 'introductory-teaching-language-at-top-us-universities/fulltext',\n width='100%', height=450)",
"_____no_output_____"
]
],
[
[
"## Python ecosystem for scientific computing (main libraries)\n\n- [Python](https://www.python.org/) of course (the CPython distribution): a free, open source and cross-platform programming language that lets you work more quickly and integrate your systems more effectively.\n- [Numpy](http://numpy.scipy.org): fundamental package for scientific computing with a N-dimensional array package.\n- [Scipy](http://scipy.org/scipylib/index.html): numerical routines for scientific computing.\n- [Matplotlib](http://matplotlib.org): comprehensive 2D Plotting.\n- [Sympy](http://sympy.org): symbolic mathematics.\n- [Pandas](http://pandas.pydata.org/): data structures and data analysis tools.\n- [IPython](http://ipython.org): provides a rich architecture for interactive computing with powerful interactive shell, kernel for Jupyter, support for interactive data visualization and use of GUI toolkits, flexible embeddable interpreters, and high performance tools for parallel computing. \n- [Jupyter Notebook](https://jupyter.org/): web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text.\n- [Statsmodels](http://statsmodels.sourceforge.net/): to explore data, estimate statistical models, and perform statistical tests.\n- [Scikit-learn](http://scikit-learn.org/stable/): tools for data mining and data analysis (including machine learning).\n- [Pillow](http://python-pillow.github.io/): Python Imaging Library.\n- [Spyder](https://code.google.com/p/spyderlib/): interactive development environment with advanced editing, interactive testing, debugging and introspection features.",
"_____no_output_____"
],
[
"## The Jupyter Notebook\n\nThe Jupyter Notebook App is a server-client application that allows editing and running notebook documents via a web browser. The Jupyter Notebook App can be executed on a local desktop requiring no internet access (as described in this document) or installed on a remote server and accessed through the internet. \n\nNotebook documents (or “notebooks”, all lower case) are documents produced by the Jupyter Notebook App which contain both computer code (e.g. python) and rich text elements (paragraph, equations, figures, links, etc...). Notebook documents are both human-readable documents containing the analysis description and the results (figures, tables, etc..) as well as executable documents which can be run to perform data analysis.\n\n[Try Jupyter Notebook in your browser](https://try.jupyter.org/).",
"_____no_output_____"
],
[
"## Jupyter Notebook and IPython kernel architectures\n\n<figure><img src=\"./../images/jupyternotebook.png\" width=800 alt=\"Jupyter Notebook and IPython kernel architectures\"/></figure>",
"_____no_output_____"
],
[
"## Installing the Python ecosystem\n\n**The easy way** \nThe easiest way to get Python and the most popular packages for scientific programming is to install them with a Python distribution such as [Anaconda](https://www.continuum.io/anaconda-overview). \nIn fact, you don't even need to install Python in your computer, you can run Python for scientific programming in the cloud using [python.org](https://www.python.org/shell/), [pythonanywhere](https://www.pythonanywhere.com/), or [repl.it](https://repl.it/languages/python3).\n\n**The hard way** \nYou can download Python and all individual packages you need and install them one by one. In general, it's not that difficult, but it can become challenging and painful for certain big packages heavily dependent on math, image visualization, and your operating system (i.e., Microsoft Windows).",
"_____no_output_____"
],
[
"## Anaconda\n\nGo to the [*Anaconda* website](https://www.anaconda.com/download/) and download the appropriate version for your computer (but download Anaconda3! for Python 3.x). The file is big (about 500 MB). [From their website](https://docs.anaconda.com/anaconda/install/): \n**Linux Install** \nIn your terminal window type and follow the instructions: \n```\nbash Anaconda3-4.4.0-Linux-x86_64.sh \n```\n**OS X Install** \nFor the graphical installer, double-click the downloaded .pkg file and follow the instructions \nFor the command-line installer, in your terminal window type and follow the instructions: \n```\nbash Anaconda3-4.4.0-MacOSX-x86_64.sh \n```\n**Windows** \nDouble-click the .exe file to install Anaconda and follow the instructions on the screen ",
"_____no_output_____"
],
[
"## Miniconda\n\nA variation of *Anaconda* is [*Miniconda*](http://conda.pydata.org/miniconda.html) (Miniconda3 for Python 3.x), which contains only the *Conda* package manager and Python. \n\nOnce *Miniconda* is installed, you can use the `conda` command to install any other packages and create environments, etc.",
"_____no_output_____"
],
[
"# My current installation",
"_____no_output_____"
]
],
[
[
"# pip install version_information\n%load_ext version_information\n%version_information numpy, scipy, matplotlib, sympy, pandas, ipython, jupyter",
"_____no_output_____"
]
],
[
[
"## IDE for Python\n\nYou might want an Integrated Development Environment (IDE) for programming in Python. \nSee [Top 5 Python IDEs For Data Science](https://www.datacamp.com/community/tutorials/data-science-python-ide#gs.mN_Wu0M) for possible IDEs. \nSoon there will be a new IDE for scientific computing with Python: [JupyterLab](https://github.com/jupyterlab/jupyterlab), developed by the Jupyter team. See [this video about JupyterLab](https://channel9.msdn.com/Events/PyData/Seattle2017/BRK11).",
"_____no_output_____"
],
[
"## To learn about Python\n\nThere is a lot of good material in the internet about Python for scientific computing, some of them are: \n\n - [How To Think Like A Computer Scientist](http://openbookproject.net/thinkcs/python/english3e/) or [the interactive edition](https://runestone.academy/runestone/static/thinkcspy/index.html) (book)\n - [Python Scientific Lecture Notes](http://scipy-lectures.github.io/) (lecture notes) \n - [A Whirlwind Tour of Python](https://github.com/jakevdp/WhirlwindTourOfPython) (tutorial/book) \n - [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) (tutorial/book) \n - [Lectures on scientific computing with Python](https://github.com/jrjohansson/scientific-python-lectures#lectures-on-scientific-computing-with-python) (lecture notes)",
"_____no_output_____"
],
[
"## More examples of Jupyter Notebooks\n\nLet's run stuff from:\n- [https://github.com/demotu/BMC](https://github.com/demotu/BMC)\n- [A gallery of interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks)",
"_____no_output_____"
],
[
"## Questions?\n\n- https://www.reddit.com/r/learnpython/\n- https://stackoverflow.com/questions/tagged/python\n- https://www.reddit.com/r/Python/\n- https://python-forum.io/ ",
"_____no_output_____"
]
],
[
[
"Image(data='http://imgs.xkcd.com/comics/python.png')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e715ce09b77d51688e33de2f0f73327886f7f503 | 356,599 | ipynb | Jupyter Notebook | content/NOTES 06.03 - DIMENSIONALITY REDUCTION.ipynb | restrepo/ai4eng.v1 | ee143630570c83395c1c4cbe40f7f6904cc2d6f2 | [
"BSD-3-Clause"
] | null | null | null | content/NOTES 06.03 - DIMENSIONALITY REDUCTION.ipynb | restrepo/ai4eng.v1 | ee143630570c83395c1c4cbe40f7f6904cc2d6f2 | [
"BSD-3-Clause"
] | null | null | null | content/NOTES 06.03 - DIMENSIONALITY REDUCTION.ipynb | restrepo/ai4eng.v1 | ee143630570c83395c1c4cbe40f7f6904cc2d6f2 | [
"BSD-3-Clause"
] | null | null | null | 356,599 | 356,599 | 0.936194 | [
[
[
"# 06.03 - PCA, NMF IN PRACTICE",
"_____no_output_____"
]
],
[
[
"!wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py\nimport init; init.init(force_download=False); init.get_weblink()",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Reducción de dimensionalidad para tareas de clasificación",
"_____no_output_____"
]
],
[
[
"mnist = pd.read_csv(\"local/data/mnist1.5k.csv.gz\", compression=\"gzip\", header=None).values\nd=mnist[:,1:785]\nc=mnist[:,0]\nprint (\"dimension de las imagenes y las clases\", d.shape, c.shape)",
"dimension de las imagenes y las clases (1500, 784) (1500,)\n"
],
[
"plt.imshow(d[9].reshape(28,28), cmap=plt.cm.gray)\n",
"_____no_output_____"
],
[
"perm = np.random.permutation(range(d.shape[0]))[0:50]\nrandom_imgs = d[perm]\nrandom_labels = c[perm] \nfig = plt.figure(figsize=(10,6))\nfor i in range(random_imgs.shape[0]):\n ax=fig.add_subplot(5,10,i+1)\n plt.imshow(random_imgs[i].reshape(28,28), interpolation=\"nearest\", cmap = plt.cm.Greys_r)\n ax.set_title(int(random_labels[i]))\n ax.set_xticklabels([])\n ax.set_yticklabels([])",
"_____no_output_____"
]
],
[
[
"## Principal Component Analysis",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import PCA\n\nmnist = pd.read_csv(\"local/data/mnist1.5k.csv.gz\", compression=\"gzip\", header=None).values\nX=mnist[:,1:785]\ny=mnist[:,0]\n\npca = PCA(n_components=10)\nXp = pca.fit_transform(X)\n",
"_____no_output_____"
],
[
"X.shape, Xp.shape",
"_____no_output_____"
],
[
"for i in np.unique(y):\n print (i, np.sum(y==i))",
"0 150\n1 157\n2 186\n3 125\n4 151\n5 138\n6 152\n7 154\n8 141\n9 146\n"
],
[
"from sklearn.model_selection import train_test_split\n\nXtr, Xts, ytr, yts = train_test_split(X,y,test_size=.3)\nXtr.shape, Xts.shape, ytr.shape, yts.shape",
"_____no_output_____"
],
[
"from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\ndt = GaussianNB()\ndt.fit(Xtr, ytr)\ndt.score(Xtr, ytr), dt.score(Xts, yts)",
"_____no_output_____"
],
[
"cs = range(10,200,5)",
"_____no_output_____"
],
[
"dtr, dts = [], []\nfor n_components in cs:\n print (\".\", end=\"\")\n pca = PCA(n_components=n_components)\n pca.fit(Xtr)\n\n Xt_tr = pca.transform(Xtr)\n Xt_ts = pca.transform(Xts)\n\n dt.fit(Xt_tr,ytr)\n ypreds_tr = dt.predict(Xt_tr)\n ypreds_ts = dt.predict(Xt_ts)\n ypreds_tr.shape, ypreds_ts.shape\n dtr.append(np.mean(ytr==ypreds_tr))\n dts.append(np.mean(yts==ypreds_ts))\n",
"......................................"
],
[
"len(dtr), len(dts)",
"_____no_output_____"
],
[
"plt.plot(cs, dtr, label=\"train\")\nplt.plot(cs, dts, label=\"test\")\nplt.xlabel(\"n components\")\nplt.ylabel(\"% acierto\")\nplt.legend()",
"_____no_output_____"
],
[
"best_cs = cs[np.argmax(dts)]\nbest_cs",
"_____no_output_____"
]
],
[
[
"### clasificación en el nuevo espacio de representación",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=best_cs)\npca.fit(Xtr)\n\nXt_tr = pca.transform(Xtr)\nXt_ts = pca.transform(Xts)\ndt.fit(Xt_tr,ytr)\nypreds_tr = dt.predict(Xt_tr)\nypreds_ts = dt.predict(Xt_ts)\nypreds_tr.shape, ypreds_ts.shape\nnp.mean(ytr==ypreds_tr),np.mean(yts==ypreds_ts)",
"_____no_output_____"
]
],
[
[
"### pipelines\n\ndebemos de tener cuidado cuando usamos transformaciones en clasificación, ya que tenemos que ajustarlas (de manera no supervisada) sólo con los datos de entrenamiento\n",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import Pipeline\n\nestimator = Pipeline(((\"pca\", PCA(n_components=best_cs)), (\"naive\", dt)))\nestimator.fit(Xtr, ytr)\nestimator.score(Xtr, ytr), estimator.score(Xts, yts)",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score\npip = Pipeline([(\"PCA\", PCA(n_components=best_cs)), (\"gaussian\", GaussianNB())])\nscores = cross_val_score(pip, X,y, cv=5 )\nprint (\"%.2f +/- %.4f\"%(np.mean(scores), np.std(scores)))",
"0.84 +/- 0.0168\n"
]
],
[
[
"### obtenemos los componentes principales",
"_____no_output_____"
]
],
[
[
"cols=20\nplt.figure(figsize=(15,3))\nfor i in range(len(pca.components_)):\n plt.subplot(np.ceil(len(pca.components_)/15.),15,i+1)\n plt.imshow((pca.components_[i].reshape(28,28)), cmap = plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
]
],
[
[
"### verificamos la reconstrucción con los componentes principales",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=best_cs)\npca.fit(Xtr)\nXp = pca.transform(X)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nfor i in range(6):\n plt.subplot(3,6,i+1)\n k = np.random.randint(len(X))\n plt.imshow((np.sum((pca.components_*Xp[k].reshape(-1,1)), axis=0)).reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])\n plt.subplot(3,6,6+i+1)\n plt.imshow(X[k].reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
]
],
[
[
"### observa la nueva representación de la primera imagen",
"_____no_output_____"
]
],
[
[
"X[0]",
"_____no_output_____"
],
[
"Xp[0]",
"_____no_output_____"
]
],
[
[
"which correspond to the same components above for PCA.",
"_____no_output_____"
],
[
"## Non negative matrix factorization\n\nDescomponemos una matriz $V \\in \\mathbb{R}_+^{m\\times n}$ en el producto $W \\times H$, con $W \\in \\mathbb{R}_+^{m\\times r}$ y $H \\in \\mathbb{R}_+^{r\\times n}$ con la restricción de que todo sea positivo ($\\in \\mathbb{R}_+$), de forma que:\n\n$$V \\approx W \\times H$$\n\nLas filas de $H$ son los _componentes base_, y se soluciona planteándolo como un problema de optimización matemática con restricciones.\n\n$$\\begin{split}\nargmin_{W,H}\\;& ||V-W\\times H||\\\\\ns.t.&\\;W,H \\in \\mathbb{R}_+\n\\end{split}$$",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename='local/imgs/nmf.png')",
"_____no_output_____"
]
],
[
[
"### obtenemos la descomposición",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import NMF\nX=mnist[:,1:785]; y=mnist[:,0]\n\nnmf = NMF(n_components=15, init=\"random\")\nXn = nmf.fit_transform(X)",
"_____no_output_____"
],
[
"cols=20\nplt.figure(figsize=(15,3))\nfor i in range(len(nmf.components_)):\n plt.subplot(len(nmf.components_)/15,15,i+1)\n plt.imshow(np.abs(nmf.components_[i].reshape(28,28)), cmap = plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
],
[
"Xn[0,:]",
"_____no_output_____"
]
],
[
[
"### verfiicamos la reconstrucción",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,6))\nfor i in range(6):\n plt.subplot(3,6,i+1)\n k = np.random.randint(len(X))\n plt.imshow(np.abs(np.sum((nmf.components_*Xn[k].reshape(-1,1)), axis=0)).reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])\n plt.subplot(3,6,6+i+1)\n plt.imshow(X[k].reshape(28,28), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
]
],
[
[
"### clasificamos en el nuevo espacio de representación",
"_____no_output_____"
]
],
[
[
"print (np.mean(cross_val_score(GaussianNB(), X,y, cv=5 )))\nprint (np.mean(cross_val_score(GaussianNB(), Xn,y, cv=5 )))",
"0.5953333333333334\n0.7733333333333333\n"
]
],
[
[
"### la primera imagen en el nuevo espacio de representación\nobserva que todos los componentes son positivos",
"_____no_output_____"
]
],
[
[
"plt.imshow(X[0].reshape(28,28))",
"_____no_output_____"
],
[
"Xn[0]",
"_____no_output_____"
],
[
"cols=20\nplt.figure(figsize=(15,3))\nfor i in range(len(nmf.components_)):\n plt.subplot(len(nmf.components_)/15,15,i+1)\n plt.imshow(np.abs(nmf.components_[i].reshape(28,28)), cmap = plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
]
],
[
[
"## NMF para el reconocimiento de rostros",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfaces = np.load(\"local/data/faces.npy\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,2))\nfor i in range(30):\n plt.subplot(2,15,i+1)\n plt.imshow(faces[np.random.randint(len(faces))].reshape(19,19), cmap=plt.cm.Greys_r)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
],
[
"nmf = NMF(n_components=30, init=\"random\")\nfaces_n = nmf.fit_transform(faces)\ncols=20\nplt.figure(figsize=(15,2))\nfor i in range(len(nmf.components_)):\n plt.subplot(np.ceil(len(nmf.components_)/15.),15,i+1)\n plt.imshow(np.abs(nmf.components_[i].reshape(19,19)), cmap = plt.cm.Greys)\n plt.xticks([]); plt.yticks([])",
"_____no_output_____"
]
],
[
[
"forzamos dispersión en los componentes, y extendemos el problema de optimización con la norma $L_1$ en los componentes base.\n\n$$\\begin{split}\nargmin_{W,H}\\;& ||V-W\\times H|| + ||H||^2_1\\\\\ns.t.&\\;W,H \\in \\mathbb{R}_+\n\\end{split}$$\n\ntambién podríamos forzar dispersión en la nueva representación\n$$\\begin{split}\nargmin_{W,H}\\;& ||V-W\\times H|| + ||W||^2_1\\\\\ns.t.&\\;W,H \\in \\mathbb{R}_+\n\\end{split}$$\n",
"_____no_output_____"
]
],
[
[
"nmf = NMF(n_components=30, init=\"nndsvd\", alpha=1000, l1_ratio=1)\nfaces_n = nmf.fit_transform(faces)\ncols=20\nplt.figure(figsize=(15,2))\nprint (np.sum(nmf.components_))\nfor i in range(len(nmf.components_)):\n plt.subplot(np.ceil(len(nmf.components_)/15.),15,i+1)\n plt.imshow(np.abs(nmf.components_[i].reshape(19,19)), cmap = plt.cm.Greys)\n plt.xticks([]); plt.yticks([])",
"14650.57060382788\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e715d0fe234873cb976337f97492001786959d1a | 5,808 | ipynb | Jupyter Notebook | notes/ssh.ipynb | jmsung/jmsung-research | c31f5a68e78b75e76b383bf6d2a2c4c478f6f566 | [
"MIT"
] | null | null | null | notes/ssh.ipynb | jmsung/jmsung-research | c31f5a68e78b75e76b383bf6d2a2c4c478f6f566 | [
"MIT"
] | null | null | null | notes/ssh.ipynb | jmsung/jmsung-research | c31f5a68e78b75e76b383bf6d2a2c4c478f6f566 | [
"MIT"
] | null | null | null | 20.669039 | 114 | 0.488809 | [
[
[
"# SSH\n* Secure Shell (SSH)\n\n### Setup\n\n1. Following is a protocol to connect to the simc2 server (shared google cloud) as an example. \n\n\n2. Make `~/.ssh/configure` \n\n\n3. Paste the following in the `configure` file: \n\n```\nHost simc2\nHostName 10.159.230.56\n\nHost sungj4-server\nHostName 10.159.234.227\n\nHost sungj4-sim\nHostName 10.159.234.230\n\nHost sungj4-deb9\nHostName 10.159.234.231\n\nPort 22\nuser sungj4\nForwardX11 yes\nForwardX11Timeout 10h\nForwardAgent yes\n```\n\n\n4. Generate `id_rsa` and `id_rsa.pub` key \n`$ ssh-keygen -t rsa`\n\n\n5. Copy the public key into the server (simc2). \n`$ ssh-copy-id sungj4@simc2`\n\nThis will make a file in the server (simc2). \n`.ssh/authrized_keys`\n\n\n6. Update the permission of .ssh in the local and the server \n```\n$ chmod 755 ~ \n$ chmod 700 ~/.ssh\n$ chmod 600 ~/.ssh/id_rsa\n$ chmod 600 ~/.ssh/authorized_keys\n```\n\n\nNow I can connect to simc2 from my local machine without id/passwd. \n`$ ssh simc2`\n\n\n",
"_____no_output_____"
],
[
"# SSHFS\n* Mount Remote File Systems Over SSH\n* https://www.digitalocean.com/community/tutorials/how-to-use-sshfs-to-mount-remote-file-systems-over-ssh\n\n\n### Install sshfs\n* Ubuntu: $ sudo apt-get install sshfs \n* Mac: Install FUSE and SSHFS from the osxfuse site \n\n### Mount \n1. Create a local mount_point \n`$ sudo mkdir /mnt/D1` \n`$ sudo chown sungj4 /mnt/D1` \n\n\n2. Manual mount \n`$ sshfs -o allow_other sungj4@sungj4-server:/mnt/D1 /mnt/D1`\n\n\n3. Automatic mount\n\n* Put this in the `.bashrc`. Disk will be mounted unless already mounted. \n\n```\n(Mac)\nif ! df | awk '{print $9}' | grep -Ex \"/mnt/D1\"; then\n sshfs -o allow_other sungj4@sungj4-server:/mnt/D1 /mnt/D1\nfi\n\n(Linux)\nif ! grep -qs '/mnt/D1 ' /proc/mounts; then\n sshfs -o allow_other sungj4@sungj4-server:/mnt/D1 /mnt/D1\nfi\n```\n\n4. Unmounting the Remote File System \n`$ sudo umount /mnt/D1`\n\n5. Permanently Mounting the Remote File System \n\n* Open fstab \n`$ sudo vim /etc/fstab` \n\n* And add the following line in the file. \n`sshfs#[email protected]:/ /mnt/droplet`\n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown"
]
] |
e715de9934be7a2d5045326eae9ff5a21aa3e0f3 | 81,653 | ipynb | Jupyter Notebook | 1-intro/nb02_data_import_and_networks.ipynb | simonepoetto/Complessit- | 5b4ffa5f71f906ed2dd826ee4ce5d2df9c58feac | [
"MIT"
] | null | null | null | 1-intro/nb02_data_import_and_networks.ipynb | simonepoetto/Complessit- | 5b4ffa5f71f906ed2dd826ee4ce5d2df9c58feac | [
"MIT"
] | null | null | null | 1-intro/nb02_data_import_and_networks.ipynb | simonepoetto/Complessit- | 5b4ffa5f71f906ed2dd826ee4ce5d2df9c58feac | [
"MIT"
] | null | null | null | 121.146884 | 38,284 | 0.875204 | [
[
[
"<center>\n<hr>\n<h1>Complessità nei sistemi sociali</h1>\n<h3>Laurea Magistrale in Fisica Dei Sistemi Complessi</h3> \n<h3>A.A. 2018/19</h3>\n<h3>Daniela Paolotti & Michele Tizzoni</h3>\n<h2>Notebook 2 - Data import and network representation</h2>\n<hr>\n</center>",
"_____no_output_____"
],
[
"Here, we play a bit with some network datasets.",
"_____no_output_____"
],
[
"We analyze the dataset 'cit-HepTh' available from the SNAP repository: http://snap.stanford.edu/data/index.html\n\nThere are several other repositories of network datasets, for instance:\n- http://konect.uni-koblenz.de/\n- http://www-personal.umich.edu/~mejn/netdata/\n- http://networkrepository.com/\n- http://cnets.indiana.edu/resources/data-repository/\n- http://www.sociopatterns.org/datasets/",
"_____no_output_____"
]
],
[
[
"import sys, math",
"_____no_output_____"
],
[
"%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
]
],
[
[
"# Basic network import and representation",
"_____no_output_____"
]
],
[
[
"import collections as col",
"_____no_output_____"
]
],
[
[
"We use a dictionary that associates a key (node) to a list of nodes (neighbours)",
"_____no_output_____"
]
],
[
[
"links_out=col.defaultdict(list)\nprint(links_out)",
"defaultdict(<class 'list'>, {})\n"
]
],
[
[
"We open the file containing the network and read each line",
"_____no_output_____"
]
],
[
[
"filepath='./../network_data/cit-HepTh.txt'",
"_____no_output_____"
],
[
"fh=open(filepath,'r')",
"_____no_output_____"
],
[
"s=fh.readlines()",
"_____no_output_____"
],
[
"s[:4]",
"_____no_output_____"
],
[
"s[0].strip().split(':')",
"_____no_output_____"
],
[
"for line in s:\n #remove \"\\n\" characters (.strip()) and split the line at blank spaces (split.())\n t=line.strip().split()\n if t[0]!='#':\n #the first lines are comments\n origin=int(t[0])\n dest=int(t[1])\n links_out[origin].append(dest)\n \n#close the file\nfh.close()",
"_____no_output_____"
]
],
[
[
"How many nodes are in the network?",
"_____no_output_____"
]
],
[
[
"tot_nodes=len(links_out)\nprint(tot_nodes)",
"25059\n"
]
],
[
[
"We calculate the out-degree distribution of the network.",
"_____no_output_____"
]
],
[
[
"degree={}\n\nfor i in links_out:\n\n deg=len(links_out[i])\n\n if deg in degree:\n degree[deg]+=1\n else:\n degree[deg]=1",
"_____no_output_____"
],
[
"print(degree.keys())",
"dict_keys([83, 1, 7, 4, 2, 40, 9, 27, 10, 14, 21, 25, 17, 15, 19, 16, 29, 53, 28, 22, 37, 39, 35, 20, 26, 32, 23, 6, 11, 30, 41, 65, 24, 31, 34, 56, 18, 13, 58, 36, 8, 42, 47, 52, 38, 5, 3, 72, 121, 54, 75, 12, 55, 84, 59, 33, 51, 46, 63, 44, 50, 73, 71, 74, 45, 165, 126, 214, 562, 68, 64, 48, 57, 134, 49, 216, 154, 61, 159, 85, 359, 43, 78, 60, 62, 66, 157, 67, 98, 70, 115, 104, 86, 207, 99, 109, 81, 123, 97, 100, 167, 91, 201, 69, 125, 77, 89, 79, 87, 90, 82, 106, 263, 143, 102, 80, 158, 95, 181, 274, 76, 122, 175, 149, 212, 101, 302, 96, 120, 200, 108, 226, 142, 107, 88, 246, 160, 94, 93, 135, 146, 211, 124, 198, 177, 289, 169, 180, 170, 156, 92, 103, 139, 136, 112])\n"
]
],
[
[
"We export the degree distribution to an output file.",
"_____no_output_____"
]
],
[
[
"s_deg=sorted(degree.keys())",
"_____no_output_____"
],
[
"fout=open('./../network_data/Cit-HepTh-degout-distri.dat','w')\nfor d in s_deg:\n deg_freq=float(degree[d])/tot_nodes \n fout.write(str(d)+' '+str(deg_freq)+'\\n')\nfout.close()",
"_____no_output_____"
],
[
"for i in degree.items():\n print(i)",
"(83, 6)\n(1, 2449)\n(7, 1087)\n(4, 1405)\n(2, 1951)\n(40, 104)\n(9, 847)\n(27, 250)\n(10, 790)\n(14, 622)\n(21, 398)\n(25, 333)\n(17, 503)\n(15, 556)\n(19, 429)\n(16, 519)\n(29, 246)\n(53, 32)\n(28, 244)\n(22, 384)\n(37, 135)\n(39, 114)\n(35, 162)\n(20, 412)\n(26, 284)\n(32, 194)\n(23, 339)\n(6, 1172)\n(11, 771)\n(30, 240)\n(41, 98)\n(65, 17)\n(24, 304)\n(31, 184)\n(34, 166)\n(56, 31)\n(18, 498)\n(13, 640)\n(58, 35)\n(36, 149)\n(8, 975)\n(42, 91)\n(47, 60)\n(52, 39)\n(38, 111)\n(5, 1272)\n(3, 1657)\n(72, 6)\n(121, 4)\n(54, 34)\n(75, 8)\n(12, 651)\n(55, 37)\n(84, 4)\n(59, 23)\n(33, 160)\n(51, 45)\n(46, 55)\n(63, 23)\n(44, 77)\n(50, 45)\n(73, 11)\n(71, 13)\n(74, 7)\n(45, 61)\n(165, 1)\n(126, 2)\n(214, 1)\n(562, 1)\n(68, 10)\n(64, 17)\n(48, 51)\n(57, 33)\n(134, 3)\n(49, 48)\n(216, 1)\n(154, 3)\n(61, 10)\n(159, 1)\n(85, 3)\n(359, 1)\n(43, 81)\n(78, 7)\n(60, 14)\n(62, 16)\n(66, 15)\n(157, 1)\n(67, 14)\n(98, 1)\n(70, 14)\n(115, 2)\n(104, 4)\n(86, 5)\n(207, 1)\n(99, 4)\n(109, 2)\n(81, 5)\n(123, 1)\n(97, 2)\n(100, 1)\n(167, 1)\n(91, 2)\n(201, 1)\n(69, 13)\n(125, 1)\n(77, 8)\n(89, 4)\n(79, 6)\n(87, 1)\n(90, 5)\n(82, 5)\n(106, 4)\n(263, 1)\n(143, 1)\n(102, 1)\n(80, 5)\n(158, 1)\n(95, 3)\n(181, 1)\n(274, 1)\n(76, 4)\n(122, 2)\n(175, 1)\n(149, 1)\n(212, 1)\n(101, 2)\n(302, 1)\n(96, 3)\n(120, 2)\n(200, 1)\n(108, 2)\n(226, 1)\n(142, 1)\n(107, 1)\n(88, 3)\n(246, 1)\n(160, 1)\n(94, 1)\n(93, 4)\n(135, 1)\n(146, 1)\n(211, 1)\n(124, 1)\n(198, 1)\n(177, 1)\n(289, 1)\n(169, 1)\n(180, 1)\n(170, 1)\n(156, 1)\n(92, 1)\n(103, 1)\n(139, 1)\n(136, 1)\n(112, 1)\n"
],
[
"from operator import itemgetter",
"_____no_output_____"
],
[
"x=[]\ny=[]\nfor i in sorted(degree.items(), key=itemgetter(0)):\n x.append(i[0])\n y.append(float(i[1])/tot_nodes)\n\nplt.figure(figsize=(10,7)) \nplt.plot(x,y)\nplt.xlabel('$k_{out}$', fontsize=24)\nplt.ylabel('$P(k_{out})$', fontsize=24)\nplt.xticks(fontsize=24)\nplt.yticks(fontsize=24)\nplt.yscale('log')\nplt.xscale('log')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Let's have a look at the degree-in distribution.",
"_____no_output_____"
]
],
[
[
"links_in=col.defaultdict(list)\n\nfh=open(filepath,'r')\n#reading all the file lines\nfor line in fh.readlines():\n #remove \"\\n\" characters (.strip()) and split the line at blank spaces (split.())\n s=line.strip().split()\n if s[0]!='#':\n #the first lines are comments\n origin=int(s[0])\n dest=int(s[1])\n links_in[dest].append(origin)\n \n#chiudo il file\nfh.close()",
"_____no_output_____"
],
[
"degree_in=col.defaultdict(int)\nfor i in links_in.keys():\n deg=len(links_in[i])\n degree_in[deg]+=1\n\ntot_nodes_in=len(links_in)\nprint(tot_nodes_in)",
"23180\n"
]
],
[
[
"What is the difference from an exponential distribution?",
"_____no_output_____"
]
],
[
[
"def f(t):\n return np.exp(-0.5*t)\n\nx=[]\ny=[]\nfor i in sorted(degree_in.items(), key=itemgetter(0)):\n x.append(i[0])\n y.append(float(i[1])/tot_nodes_in)\n\nplt.figure(figsize=(10,7)) \n \nplt.plot(np.array(x),np.array(y))\nplt.plot(np.array(x), f(np.array(x)), label='Exponential')\nplt.xlabel('$k_{in}$', fontsize=24)\nplt.ylabel('$P(k_{in})$', fontsize=24)\nplt.xticks(fontsize=24)\nplt.yticks(fontsize=24)\nplt.yscale('log')\nplt.xscale('log')\nplt.axis([1,10000,0.00001,1])\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e715e3e1882e4ccc7cdb8ccdaa9607c1ccd8601d | 21,431 | ipynb | Jupyter Notebook | chatbot/20.gru-birnn-seq2seq-luong.ipynb | huseinzol05/Tensorflow-NLP-Models | 0741216aa8235e1228b3de7903cc36d73f8f2b45 | [
"MIT"
] | 1,705 | 2018-11-03T17:34:22.000Z | 2022-03-29T04:30:01.000Z | chatbot/20.gru-birnn-seq2seq-luong.ipynb | eridgd/NLP-Models-Tensorflow | d46e746cd038f25e8ee2df434facbe12e31576a1 | [
"MIT"
] | 26 | 2019-03-16T17:23:00.000Z | 2021-10-08T08:06:09.000Z | chatbot/20.gru-birnn-seq2seq-luong.ipynb | eridgd/NLP-Models-Tensorflow | d46e746cd038f25e8ee2df434facbe12e31576a1 | [
"MIT"
] | 705 | 2018-11-03T17:34:25.000Z | 2022-03-24T02:29:14.000Z | 36.323729 | 141 | 0.519434 | [
[
[
"import numpy as np\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\nimport re\nimport time\nimport collections\nimport os",
"_____no_output_____"
],
[
"def build_dataset(words, n_words, atleast=1):\n count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]\n counter = collections.Counter(words).most_common(n_words)\n counter = [i for i in counter if i[1] >= atleast]\n count.extend(counter)\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n if index == 0:\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reversed_dictionary",
"_____no_output_____"
],
[
"lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\\n')\nconv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\\n')\n\nid2line = {}\nfor line in lines:\n _line = line.split(' +++$+++ ')\n if len(_line) == 5:\n id2line[_line[0]] = _line[4]\n \nconvs = [ ]\nfor line in conv_lines[:-1]:\n _line = line.split(' +++$+++ ')[-1][1:-1].replace(\"'\",\"\").replace(\" \",\"\")\n convs.append(_line.split(','))\n \nquestions = []\nanswers = []\n\nfor conv in convs:\n for i in range(len(conv)-1):\n questions.append(id2line[conv[i]])\n answers.append(id2line[conv[i+1]])\n \ndef clean_text(text):\n text = text.lower()\n text = re.sub(r\"i'm\", \"i am\", text)\n text = re.sub(r\"he's\", \"he is\", text)\n text = re.sub(r\"she's\", \"she is\", text)\n text = re.sub(r\"it's\", \"it is\", text)\n text = re.sub(r\"that's\", \"that is\", text)\n text = re.sub(r\"what's\", \"that is\", text)\n text = re.sub(r\"where's\", \"where is\", text)\n text = re.sub(r\"how's\", \"how is\", text)\n text = re.sub(r\"\\'ll\", \" will\", text)\n text = re.sub(r\"\\'ve\", \" have\", text)\n text = re.sub(r\"\\'re\", \" are\", text)\n text = re.sub(r\"\\'d\", \" would\", text)\n text = re.sub(r\"\\'re\", \" are\", text)\n text = re.sub(r\"won't\", \"will not\", text)\n text = re.sub(r\"can't\", \"cannot\", text)\n text = re.sub(r\"n't\", \" not\", text)\n text = re.sub(r\"n'\", \"ng\", text)\n text = re.sub(r\"'bout\", \"about\", text)\n text = re.sub(r\"'til\", \"until\", text)\n text = re.sub(r\"[-()\\\"#/@;:<>{}`+=~|.!?,]\", \"\", text)\n return ' '.join([i.strip() for i in filter(None, text.split())])\n\nclean_questions = []\nfor question in questions:\n clean_questions.append(clean_text(question))\n \nclean_answers = [] \nfor answer in answers:\n clean_answers.append(clean_text(answer))\n \nmin_line_length = 2\nmax_line_length = 5\nshort_questions_temp = []\nshort_answers_temp = []\n\ni = 0\nfor question in clean_questions:\n if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:\n short_questions_temp.append(question)\n short_answers_temp.append(clean_answers[i])\n i += 1\n\nshort_questions = []\nshort_answers = []\n\ni = 0\nfor answer in short_answers_temp:\n if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:\n short_answers.append(answer)\n short_questions.append(short_questions_temp[i])\n i += 1\n\nquestion_test = short_questions[500:550]\nanswer_test = short_answers[500:550]\nshort_questions = short_questions[:500]\nshort_answers = short_answers[:500]",
"_____no_output_____"
],
[
"concat_from = ' '.join(short_questions+question_test).split()\nvocabulary_size_from = len(list(set(concat_from)))\ndata_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)\nprint('vocab from size: %d'%(vocabulary_size_from))\nprint('Most common words', count_from[4:10])\nprint('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])\nprint('filtered vocab size:',len(dictionary_from))\nprint(\"% of vocab used: {}%\".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))",
"vocab from size: 657\nMost common words [('you', 132), ('is', 78), ('i', 68), ('what', 51), ('it', 50), ('that', 49)]\nSample data [7, 28, 129, 35, 61, 42, 12, 22, 82, 225] ['what', 'good', 'stuff', 'she', 'okay', 'they', 'do', 'to', 'hey', 'sweet']\nfiltered vocab size: 661\n% of vocab used: 100.61%\n"
],
[
"concat_to = ' '.join(short_answers+answer_test).split()\nvocabulary_size_to = len(list(set(concat_to)))\ndata_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)\nprint('vocab from size: %d'%(vocabulary_size_to))\nprint('Most common words', count_to[4:10])\nprint('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])\nprint('filtered vocab size:',len(dictionary_to))\nprint(\"% of vocab used: {}%\".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))",
"vocab from size: 660\nMost common words [('i', 97), ('you', 91), ('is', 62), ('it', 58), ('not', 47), ('what', 39)]\nSample data [12, 216, 5, 4, 94, 25, 59, 10, 8, 79] ['the', 'real', 'you', 'i', 'hope', 'so', 'they', 'do', 'not', 'hi']\nfiltered vocab size: 664\n% of vocab used: 100.61%\n"
],
[
"GO = dictionary_from['GO']\nPAD = dictionary_from['PAD']\nEOS = dictionary_from['EOS']\nUNK = dictionary_from['UNK']",
"_____no_output_____"
],
[
"for i in range(len(short_answers)):\n short_answers[i] += ' EOS'",
"_____no_output_____"
],
[
"class Chatbot:\n def __init__(self, size_layer, num_layers, embedded_size,\n from_dict_size, to_dict_size, learning_rate, batch_size):\n \n def cells(size,reuse=False):\n return tf.nn.rnn_cell.GRUCell(size,reuse=reuse)\n \n self.X = tf.placeholder(tf.int32, [None, None])\n self.Y = tf.placeholder(tf.int32, [None, None])\n self.X_seq_len = tf.placeholder(tf.int32, [None])\n self.Y_seq_len = tf.placeholder(tf.int32, [None])\n batch_size = tf.shape(self.X)[0]\n \n encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))\n decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))\n encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)\n main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])\n decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)\n decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input)\n \n def attention():\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer//2, \n memory = encoder_embedded)\n return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size_layer//2), \n attention_mechanism = attention_mechanism,\n attention_layer_size = size_layer//2)\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = attention(),\n cell_bw = attention(),\n inputs = encoder_embedded,\n sequence_length = self.X_seq_len,\n dtype = tf.float32,\n scope = 'bidirectional_rnn_%d'%(n))\n encoder_embedded = tf.concat((out_fw, out_bw), 2)\n \n bi_state = tf.concat((state_fw[0],state_bw[0]), -1)\n last_state = tuple([bi_state] * num_layers)\n \n with tf.variable_scope(\"decoder\"):\n rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)])\n outputs, _ = tf.nn.dynamic_rnn(rnn_cells_dec, decoder_embedded, \n initial_state = last_state,\n dtype = tf.float32)\n self.logits = tf.layers.dense(outputs,to_dict_size)\n masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)\n self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.logits,\n targets = self.Y,\n weights = masks)\n self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)\n y_t = tf.argmax(self.logits,axis=2)\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(y_t, masks)\n mask_label = tf.boolean_mask(self.Y, masks)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))",
"_____no_output_____"
],
[
"size_layer = 256\nnum_layers = 2\nembedded_size = 128\nlearning_rate = 0.001\nbatch_size = 16\nepoch = 20",
"_____no_output_____"
],
[
"tf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from), \n len(dictionary_to), learning_rate,batch_size)\nsess.run(tf.global_variables_initializer())",
"_____no_output_____"
],
[
"def str_idx(corpus, dic):\n X = []\n for i in corpus:\n ints = []\n for k in i.split():\n ints.append(dic.get(k,UNK))\n X.append(ints)\n return X",
"_____no_output_____"
],
[
"X = str_idx(short_questions, dictionary_from)\nY = str_idx(short_answers, dictionary_to)\nX_test = str_idx(question_test, dictionary_from)\nY_test = str_idx(answer_test, dictionary_from)",
"_____no_output_____"
],
[
"maxlen_question = max([len(x) for x in X]) * 2\nmaxlen_answer = max([len(y) for y in Y]) * 2\n\nmaxlen_question, maxlen_answer",
"_____no_output_____"
],
[
"def pad_sentence_batch(sentence_batch, pad_int, maxlen):\n padded_seqs = []\n seq_lens = []\n max_sentence_len = maxlen\n for sentence in sentence_batch:\n padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n seq_lens.append(maxlen)\n return padded_seqs, seq_lens",
"_____no_output_____"
],
[
"for i in range(epoch):\n total_loss, total_accuracy = 0, 0\n X, Y = shuffle(X, Y)\n for k in range(0, len(short_questions), batch_size):\n index = min(k + batch_size, len(short_questions))\n batch_x, seq_x = pad_sentence_batch(X[k: index], PAD, maxlen_answer)\n batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD, maxlen_answer)\n predicted, accuracy, loss, _ = sess.run([tf.argmax(model.logits,2),\n model.accuracy, model.cost, model.optimizer], \n feed_dict={model.X:batch_x,\n model.Y:batch_y,\n model.X_seq_len:seq_x,\n model.Y_seq_len:seq_y})\n total_loss += loss\n total_accuracy += accuracy\n total_loss /= (len(short_questions) / batch_size)\n total_accuracy /= (len(short_questions) / batch_size)\n print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))",
"epoch: 1, avg loss: 2.549845, avg accuracy: 0.639333\nepoch: 2, avg loss: 1.987446, avg accuracy: 0.669167\nepoch: 3, avg loss: 1.950939, avg accuracy: 0.674167\nepoch: 4, avg loss: 1.901767, avg accuracy: 0.676333\nepoch: 5, avg loss: 1.884292, avg accuracy: 0.679167\nepoch: 6, avg loss: 1.850528, avg accuracy: 0.679167\nepoch: 7, avg loss: 1.832903, avg accuracy: 0.676833\nepoch: 8, avg loss: 1.812006, avg accuracy: 0.682333\nepoch: 9, avg loss: 1.782332, avg accuracy: 0.684333\nepoch: 10, avg loss: 1.751309, avg accuracy: 0.689000\nepoch: 11, avg loss: 1.732150, avg accuracy: 0.692833\nepoch: 12, avg loss: 1.678405, avg accuracy: 0.699667\nepoch: 13, avg loss: 1.649477, avg accuracy: 0.705333\nepoch: 14, avg loss: 1.607285, avg accuracy: 0.709167\nepoch: 15, avg loss: 1.562710, avg accuracy: 0.718167\nepoch: 16, avg loss: 1.521515, avg accuracy: 0.721833\nepoch: 17, avg loss: 1.470522, avg accuracy: 0.730500\nepoch: 18, avg loss: 1.427211, avg accuracy: 0.733167\nepoch: 19, avg loss: 1.367032, avg accuracy: 0.742833\nepoch: 20, avg loss: 1.336174, avg accuracy: 0.743833\n"
],
[
"for i in range(len(batch_x)):\n print('row %d'%(i+1))\n print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))\n print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))\n print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\\n')",
"row 1\nQUESTION: none too discreet is he\nREAL ANSWER: no sir he is not\nPREDICTED ANSWER: i is is is not \n\nrow 2\nQUESTION: what shall we do\nREAL ANSWER: tea would be nice\nPREDICTED ANSWER: i is not know \n\nrow 3\nQUESTION: cannot it wait\nREAL ANSWER: no mr president it cannot\nPREDICTED ANSWER: i is not it \n\nrow 4\nQUESTION: i do not know\nREAL ANSWER: proceed inside\nPREDICTED ANSWER: i is \n\n"
],
[
"batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD, maxlen_answer)\nbatch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD, maxlen_answer)\npredicted = sess.run(tf.argmax(model.logits,2), feed_dict={model.X:batch_x,model.X_seq_len:seq_x})\n\nfor i in range(len(batch_x)):\n print('row %d'%(i+1))\n print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))\n print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))\n print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\\n')",
"row 1\nQUESTION: but david\nREAL ANSWER: is here that\nPREDICTED ANSWER: i is it \n\nrow 2\nQUESTION: hopeless it is hopeless\nREAL ANSWER: tell ballet then back\nPREDICTED ANSWER: i is \n\nrow 3\nQUESTION: miss price\nREAL ANSWER: yes learning\nPREDICTED ANSWER: i is \n\nrow 4\nQUESTION: mr kessler wake up please\nREAL ANSWER: is here are\nPREDICTED ANSWER: i is \n\nrow 5\nQUESTION: there were witnesses\nREAL ANSWER: why she out\nPREDICTED ANSWER: i is not is \n\nrow 6\nQUESTION: what about it\nREAL ANSWER: not you are\nPREDICTED ANSWER: i is \n\nrow 7\nQUESTION: go on ask them\nREAL ANSWER: i just home\nPREDICTED ANSWER: i is \n\nrow 8\nQUESTION: beware the moon\nREAL ANSWER: seen hi is he\nPREDICTED ANSWER: i am \n\nrow 9\nQUESTION: did you hear that\nREAL ANSWER: is down what\nPREDICTED ANSWER: i i i not \n\nrow 10\nQUESTION: i heard that\nREAL ANSWER: it here not\nPREDICTED ANSWER: i is not \n\nrow 11\nQUESTION: the hound of the baskervilles\nREAL ANSWER: heard\nPREDICTED ANSWER: i me \n\nrow 12\nQUESTION: it is moving\nREAL ANSWER: not you hear\nPREDICTED ANSWER: i is \n\nrow 13\nQUESTION: nice doggie good boy\nREAL ANSWER: bill stupid\nPREDICTED ANSWER: i is \n\nrow 14\nQUESTION: it sounds far away\nREAL ANSWER: that pecos baby seen hi\nPREDICTED ANSWER: i is \n\nrow 15\nQUESTION: debbie klein cried a lot\nREAL ANSWER: is will srai not\nPREDICTED ANSWER: i is \n\nrow 16\nQUESTION: what are you doing here\nREAL ANSWER: is know look i\nPREDICTED ANSWER: i is a know \n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e715e84aedab69a3122075d65c4ee75f77245d78 | 11,306 | ipynb | Jupyter Notebook | models/ML Pipeline Preparation.ipynb | debjani-bhowmick/disaster-response-pipeline | 143f3ef9c8c76a3b02df93c4754cf0b2f5c1db97 | [
"Unlicense"
] | null | null | null | models/ML Pipeline Preparation.ipynb | debjani-bhowmick/disaster-response-pipeline | 143f3ef9c8c76a3b02df93c4754cf0b2f5c1db97 | [
"Unlicense"
] | null | null | null | models/ML Pipeline Preparation.ipynb | debjani-bhowmick/disaster-response-pipeline | 143f3ef9c8c76a3b02df93c4754cf0b2f5c1db97 | [
"Unlicense"
] | 1 | 2021-07-16T14:59:05.000Z | 2021-07-16T14:59:05.000Z | 29.596859 | 337 | 0.583584 | [
[
[
"# ML Pipeline Preparation\nFollow the instructions below to help you create your ML pipeline.\n### 1. Import libraries and load data from database.\n- Import Python libraries\n- Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)\n- Define feature and target variables X and Y",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport re\nimport nltk\nimport pickle\nimport numpy as np\nimport pandas as pd\nnltk.download('stopwords')\nnltk.download(['punkt', 'wordnet'])\nfrom nltk.corpus import stopwords \nfrom sqlalchemy import create_engine\nfrom sklearn.pipeline import Pipeline\nfrom nltk.tokenize import word_tokenize \nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom nltk.tokenize import word_tokenize, RegexpTokenizer\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer",
"_____no_output_____"
],
[
"# load data from database\nengine = create_engine('sqlite:///InsertDatabaseName.db')\ndf = pd.read_sql_table('InsertTableName',engine)\nX = df['message']\ny = df[df.columns[5:]]",
"_____no_output_____"
]
],
[
[
"### 2. Write a tokenization function to process your text data",
"_____no_output_____"
]
],
[
[
"def tokenize(text):\n \"\"\"\n Behaviour: Split text into words and return the root form of the words\n Args:\n text(str): text data.\n Return:\n clean_tokens(list of str): List of tokens extracted from the provided text\n \"\"\"\n # Normalize text:Convert to lowercase and Remove punctuation\n text = text.lower() \n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n \n # Replace all urls with a urlplaceholder string\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n \n # Extract all the urls from the provided text \n detected_urls = re.findall(url_regex, text)\n \n # Replace url with a url placeholder string\n for detected_url in detected_urls:\n text = text.replace(detected_url, url_place_holder_string)\n \n # Tokenize text:Split text into words using NLTK\n words = word_tokenize(text)\n \n # Remove stop words\n stop = stopwords.words(\"english\")\n words = [t for t in words if t not in stop]\n \n # lemmatize as shown in the lesson\n lemmatizer = WordNetLemmatizer()\n clean_tokens = [lemmatizer.lemmatize(w) for w in words]\n return clean_tokens",
"_____no_output_____"
]
],
[
[
"### 3. Build a machine learning pipeline\nThis machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.",
"_____no_output_____"
]
],
[
[
"# Create a instance for RandomFrorestClassifier()\nestimator_rf = MultiOutputClassifier(RandomForestClassifier())\n\npipeline_rf = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', estimator_rf)\n ])",
"_____no_output_____"
]
],
[
[
"### 4. Train pipeline\n- Split data into train and test sets\n- Train pipeline",
"_____no_output_____"
]
],
[
[
"# Train-test splitting\nX_train, X_test, y_train, y_test = train_test_split(X, y)\npipeline_rf.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"### 5. Test your model\nReport the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.",
"_____no_output_____"
]
],
[
[
"# Get results and add them to a dataframe.\ndef print_results(y_test, y_pred):\n # declare an empty dataframe to store the results\n results = pd.DataFrame(columns=['Category', 'f_score', 'precision', 'recall'])\n num = 0\n for col in y_test.columns:\n precision, recall, f_score, support = precision_recall_fscore_support(y_test[col], y_pred[:,num], average='weighted')\n results.set_value(num+1, 'Category', col)\n results.set_value(num+1, 'f_score', f_score)\n results.set_value(num+1, 'precision', precision)\n results.set_value(num+1, 'recall', recall)\n num += 1\n print('Aggregated f_score:', results['f_score'].mean())\n print('Aggregated precision:', results['precision'].mean())\n print('Aggregated recall:', results['recall'].mean())\n return results",
"_____no_output_____"
],
[
"# Perform prediction\ny_pred = pipeline_rf.predict(X_test)\n",
"_____no_output_____"
],
[
"category_names = y_test.columns",
"_____no_output_____"
],
[
"print(classification_report(y_test, y_pred, target_names=category_names))",
"_____no_output_____"
],
[
"# Printing the classification report for each output category\nresults = print_results(y_test, y_pred)\nresults",
"_____no_output_____"
]
],
[
[
"### 6. Improve your model\nUse grid search to find better parameters. ",
"_____no_output_____"
]
],
[
[
"# Show parameters for the pipline\npipeline_rf.get_params()",
"_____no_output_____"
],
[
"# Using grid search\n# Create Grid search parameters for Random Forest Classifier \nparameters_rf = {'clf__estimator__max_depth': [10, 50, None],\n 'clf__estimator__min_samples_leaf':[1, 2, 5, 10],\n 'clf__estimator__n_estimators': [10, 20]}\n\ncv_rf = GridSearchCV(pipeline_rf, param_grid = parameters_rf)",
"_____no_output_____"
]
],
[
[
"### 7. Test your model\nShow the accuracy, precision, and recall of the tuned model. \n\nSince this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!",
"_____no_output_____"
]
],
[
[
"cv_rf.fit(X_train, y_train)\ny_pred = cv_rf.predict(X_test)\nresults2 = print_results(y_test, y_pred)\nresults2",
"_____no_output_____"
]
],
[
[
"### 8. Try improving your model further. Here are a few ideas:\n* try other machine learning algorithms\n* add other features besides the TF-IDF",
"_____no_output_____"
]
],
[
[
"# testing a pure decision tree classifier\nestimator_ada_boost = MultiOutputClassifier(AdaBoostClassifier())\n\npipeline_ada = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', estimator_ada_boost)\n ])",
"_____no_output_____"
],
[
"pipeline_ada.get_params()",
"_____no_output_____"
],
[
"parameters_ada = {\n 'tfidf__use_idf': (True, False),\n 'clf__estimator__n_estimators': [50, 60, 70]\n}\n\ncv_ada = GridSearchCV(pipeline_ada, param_grid = parameters_ada)\n\ncv_ada.fit(X_train, y_train)\ny_pred_ada = cv_ada.predict(X_test)\nresults3 = print_results(y_test, y_pred_ada)\nresults3",
"_____no_output_____"
]
],
[
[
"### 9. Export your model as a pickle file",
"_____no_output_____"
]
],
[
[
"pickle.dump(cv, open('model.pkl', 'wb'))",
"_____no_output_____"
]
],
[
[
"### 10. Use this notebook to complete `train.py`\nUse the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e71604767cacf2c03b641dacb36e19f788755e1a | 279,675 | ipynb | Jupyter Notebook | Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb | sean578/BayesianMethodsForHackers | 96b71da3dbab75b96ef94a433db7892baa9178c4 | [
"MIT"
] | null | null | null | Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb | sean578/BayesianMethodsForHackers | 96b71da3dbab75b96ef94a433db7892baa9178c4 | [
"MIT"
] | null | null | null | Chapter4_TheGreatestTheoremNeverTold/Ch4_LawOfLargeNumbers_PyMC3.ipynb | sean578/BayesianMethodsForHackers | 96b71da3dbab75b96ef94a433db7892baa9178c4 | [
"MIT"
] | null | null | null | 265.598291 | 68,088 | 0.900828 | [
[
[
"# Chapter 4\n`Original content created by Cam Davidson-Pilon`\n\n`Ported to Python 3 and PyMC3 by Max Margenot (@clean_utensils) and Thomas Wiecki (@twiecki) at Quantopian (@quantopian)`\n\n______\n\n## The greatest theorem never told\n\n\nThis chapter focuses on an idea that is always bouncing around our minds, but is rarely made explicit outside books devoted to statistics. In fact, we've been using this simple idea in every example thus far. ",
"_____no_output_____"
],
[
"### The Law of Large Numbers\n\nLet $Z_i$ be $N$ independent samples from some probability distribution. According to *the Law of Large numbers*, so long as the expected value $E[Z]$ is finite, the following holds,\n\n$$\\frac{1}{N} \\sum_{i=1}^N Z_i \\rightarrow E[ Z ], \\;\\;\\; N \\rightarrow \\infty.$$\n\nIn words:\n\n> The average of a sequence of random variables from the same distribution converges to the expected value of that distribution.\n\nThis may seem like a boring result, but it will be the most useful tool you use.",
"_____no_output_____"
],
[
"### Intuition \n\nIf the above Law is somewhat surprising, it can be made more clear by examining a simple example. \n\nConsider a random variable $Z$ that can take only two values, $c_1$ and $c_2$. Suppose we have a large number of samples of $Z$, denoting a specific sample $Z_i$. The Law says that we can approximate the expected value of $Z$ by averaging over all samples. Consider the average:\n\n\n$$ \\frac{1}{N} \\sum_{i=1}^N \\;Z_i $$\n\n\nBy construction, $Z_i$ can only take on $c_1$ or $c_2$, hence we can partition the sum over these two values:\n\n\\begin{align}\n\\frac{1}{N} \\sum_{i=1}^N \\;Z_i\n& =\\frac{1}{N} \\big( \\sum_{ Z_i = c_1}c_1 + \\sum_{Z_i=c_2}c_2 \\big) \\\\\\\\[5pt]\n& = c_1 \\sum_{ Z_i = c_1}\\frac{1}{N} + c_2 \\sum_{ Z_i = c_2}\\frac{1}{N} \\\\\\\\[5pt]\n& = c_1 \\times \\text{ (approximate frequency of $c_1$) } \\\\\\\\ \n& \\;\\;\\;\\;\\;\\;\\;\\;\\; + c_2 \\times \\text{ (approximate frequency of $c_2$) } \\\\\\\\[5pt]\n& \\approx c_1 \\times P(Z = c_1) + c_2 \\times P(Z = c_2 ) \\\\\\\\[5pt]\n& = E[Z]\n\\end{align}\n\n\nEquality holds in the limit, but we can get closer and closer by using more and more samples in the average. This Law holds for almost *any distribution*, minus some important cases we will encounter later.\n\n##### Example\n____\n\n\nBelow is a diagram of the Law of Large numbers in action for three different sequences of Poisson random variables. \n\n We sample `sample_size = 100000` Poisson random variables with parameter $\\lambda = 4.5$. (Recall the expected value of a Poisson random variable is equal to its parameter.) We calculate the average for the first $n$ samples, for $n=1$ to `sample_size`. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nfrom IPython.core.pylabtools import figsize\nimport matplotlib.pyplot as plt\n\nfigsize( 12.5, 5 )\n\nsample_size = 100000\nexpected_value = lambda_ = 4.5\npoi = np.random.poisson\nN_samples = range(1,sample_size,100)\n\nfor k in range(3):\n\n samples = poi( lambda_, sample_size ) \n \n partial_average = [ samples[:i].mean() for i in N_samples ]\n \n plt.plot( N_samples, partial_average, lw=1.5,label=\"average \\\nof $n$ samples; seq. %d\"%k)\n \n\nplt.plot( N_samples, expected_value*np.ones_like( partial_average), \n ls = \"--\", label = \"true expected value\", c = \"k\" )\n\nplt.ylim( 4.35, 4.65) \nplt.title( \"Convergence of the average of \\n random variables to its \\\nexpected value\" )\nplt.ylabel( \"average of $n$ samples\" )\nplt.xlabel( \"# of samples, $n$\")\nplt.legend();",
"_____no_output_____"
]
],
[
[
"Looking at the above plot, it is clear that when the sample size is small, there is greater variation in the average (compare how *jagged and jumpy* the average is initially, then *smooths* out). All three paths *approach* the value 4.5, but just flirt with it as $N$ gets large. Mathematicians and statistician have another name for *flirting*: convergence. \n\nAnother very relevant question we can ask is *how quickly am I converging to the expected value?* Let's plot something new. For a specific $N$, let's do the above trials thousands of times and compute how far away we are from the true expected value, on average. But wait — *compute on average*? This is simply the law of large numbers again! For example, we are interested in, for a specific $N$, the quantity:\n\n$$D(N) = \\sqrt{ \\;E\\left[\\;\\; \\left( \\frac{1}{N}\\sum_{i=1}^NZ_i - 4.5 \\;\\right)^2 \\;\\;\\right] \\;\\;}$$\n\nThe above formulae is interpretable as a distance away from the true value (on average), for some $N$. (We take the square root so the dimensions of the above quantity and our random variables are the same). As the above is an expected value, it can be approximated using the law of large numbers: instead of averaging $Z_i$, we calculate the following multiple times and average them:\n\n$$ Y_k = \\left( \\;\\frac{1}{N}\\sum_{i=1}^NZ_i - 4.5 \\; \\right)^2 $$\n\nBy computing the above many, $N_y$, times (remember, it is random), and averaging them:\n\n$$ \\frac{1}{N_Y} \\sum_{k=1}^{N_Y} Y_k \\rightarrow E[ Y_k ] = E\\;\\left[\\;\\; \\left( \\frac{1}{N}\\sum_{i=1}^NZ_i - 4.5 \\;\\right)^2 \\right]$$\n\nFinally, taking the square root:\n\n$$ \\sqrt{\\frac{1}{N_Y} \\sum_{k=1}^{N_Y} Y_k} \\approx D(N) $$ ",
"_____no_output_____"
]
],
[
[
"figsize( 12.5, 4)\n\nN_Y = 250 #use this many to approximate D(N)\nN_array = np.arange( 1000, 50000, 2500 ) #use this many samples in the approx. to the variance.\nD_N_results = np.zeros( len( N_array ) )\n\nlambda_ = 4.5 \nexpected_value = lambda_ #for X ~ Poi(lambda) , E[ X ] = lambda\n\ndef D_N( n ):\n \"\"\"\n This function approx. D_n, the average variance of using n samples.\n \"\"\"\n Z = poi( lambda_, (n, N_Y) )\n average_Z = Z.mean(axis=0)\n return np.sqrt( ( (average_Z - expected_value)**2 ).mean() )\n \n \nfor i,n in enumerate(N_array):\n D_N_results[i] = D_N(n)\n\n\nplt.xlabel( \"$N$\" )\nplt.ylabel( \"expected squared-distance from true value\" )\nplt.plot(N_array, D_N_results, lw = 3, \n label=\"expected distance between\\n\\\nexpected value and \\naverage of $N$ random variables.\")\nplt.plot( N_array, np.sqrt(expected_value)/np.sqrt(N_array), lw = 2, ls = \"--\", \n label = r\"$\\frac{\\sqrt{\\lambda}}{\\sqrt{N}}$\" )\nplt.legend()\nplt.title( \"How 'fast' is the sample average converging? \" );",
"_____no_output_____"
]
],
[
[
"As expected, the expected distance between our sample average and the actual expected value shrinks as $N$ grows large. But also notice that the *rate* of convergence decreases, that is, we need only 10 000 additional samples to move from 0.020 to 0.015, a difference of 0.005, but *20 000* more samples to again decrease from 0.015 to 0.010, again only a 0.005 decrease.\n\n\nIt turns out we can measure this rate of convergence. Above I have plotted a second line, the function $\\sqrt{\\lambda}/\\sqrt{N}$. This was not chosen arbitrarily. In most cases, given a sequence of random variable distributed like $Z$, the rate of convergence to $E[Z]$ of the Law of Large Numbers is \n\n$$ \\frac{ \\sqrt{ \\; Var(Z) \\; } }{\\sqrt{N} }$$\n\nThis is useful to know: for a given large $N$, we know (on average) how far away we are from the estimate. On the other hand, in a Bayesian setting, this can seem like a useless result: Bayesian analysis is OK with uncertainty so what's the *statistical* point of adding extra precise digits? Though drawing samples can be so computationally cheap that having a *larger* $N$ is fine too. \n\n### How do we compute $Var(Z)$ though?\n\nThe variance is simply another expected value that can be approximated! Consider the following, once we have the expected value (by using the Law of Large Numbers to estimate it, denote it $\\mu$), we can estimate the variance:\n\n$$ \\frac{1}{N}\\sum_{i=1}^N \\;(Z_i - \\mu)^2 \\rightarrow E[ \\;( Z - \\mu)^2 \\;] = Var( Z )$$\n\n### Expected values and probabilities \nThere is an even less explicit relationship between expected value and estimating probabilities. Define the *indicator function*\n\n$$\\mathbb{1}_A(x) = \n\\begin{cases} 1 & x \\in A \\\\\\\\\n 0 & else\n\\end{cases}\n$$\nThen, by the law of large numbers, if we have many samples $X_i$, we can estimate the probability of an event $A$, denoted $P(A)$, by:\n\n$$ \\frac{1}{N} \\sum_{i=1}^N \\mathbb{1}_A(X_i) \\rightarrow E[\\mathbb{1}_A(X)] = P(A) $$\n\nAgain, this is fairly obvious after a moments thought: the indicator function is only 1 if the event occurs, so we are summing only the times the event occurs and dividing by the total number of trials (consider how we usually approximate probabilities using frequencies). For example, suppose we wish to estimate the probability that a $Z \\sim Exp(.5)$ is greater than 5, and we have many samples from a $Exp(.5)$ distribution. \n\n\n$$ P( Z > 5 ) = \\frac{1}{N}\\sum_{i=1}^N \\mathbb{1}_{z > 5 }(Z_i) $$\n",
"_____no_output_____"
]
],
[
[
"N = 10000\nprint( np.mean( [ np.random.exponential( 0.5 ) > 2.0 for i in range(N) ] ) )",
"0.0193\n"
]
],
[
[
"### What does this all have to do with Bayesian statistics? \n\n\n*Point estimates*, to be introduced in the next chapter, in Bayesian inference are computed using expected values. In more analytical Bayesian inference, we would have been required to evaluate complicated expected values represented as multi-dimensional integrals. No longer. If we can sample from the posterior distribution directly, we simply need to evaluate averages. Much easier. If accuracy is a priority, plots like the ones above show how fast you are converging. And if further accuracy is desired, just take more samples from the posterior. \n\nWhen is enough enough? When can you stop drawing samples from the posterior? That is the practitioners decision, and also dependent on the variance of the samples (recall from above a high variance means the average will converge slower). \n\nWe also should understand when the Law of Large Numbers fails. As the name implies, and comparing the graphs above for small $N$, the Law is only true for large sample sizes. Without this, the asymptotic result is not reliable. Knowing in what situations the Law fails can give us *confidence in how unconfident we should be*. The next section deals with this issue.",
"_____no_output_____"
],
[
"## The Disorder of Small Numbers\n\nThe Law of Large Numbers is only valid as $N$ gets *infinitely* large: never truly attainable. While the law is a powerful tool, it is foolhardy to apply it liberally. Our next example illustrates this.\n\n\n##### Example: Aggregated geographic data\n\n\nOften data comes in aggregated form. For instance, data may be grouped by state, county, or city level. Of course, the population numbers vary per geographic area. If the data is an average of some characteristic of each the geographic areas, we must be conscious of the Law of Large Numbers and how it can *fail* for areas with small populations.\n\nWe will observe this on a toy dataset. Suppose there are five thousand counties in our dataset. Furthermore, population number in each state are uniformly distributed between 100 and 1500. The way the population numbers are generated is irrelevant to the discussion, so we do not justify this. We are interested in measuring the average height of individuals per county. Unbeknownst to us, height does **not** vary across county, and each individual, regardless of the county he or she is currently living in, has the same distribution of what their height may be:\n\n$$ \\text{height} \\sim \\text{Normal}(150, 15 ) $$\n\nWe aggregate the individuals at the county level, so we only have data for the *average in the county*. What might our dataset look like?",
"_____no_output_____"
]
],
[
[
"figsize( 12.5, 4) \nstd_height = 15\nmean_height = 150\n\nn_counties = 5000\npop_generator = np.random.randint\nnorm = np.random.normal\n\n#generate some artificial population numbers\npopulation = pop_generator(100, 1500, n_counties )\n\naverage_across_county = np.zeros( n_counties )\nfor i in range( n_counties ):\n #generate some individuals and take the mean\n average_across_county[i] = norm(mean_height, 1./std_height,\n population[i] ).mean()\n \n#located the counties with the apparently most extreme average heights.\ni_min = np.argmin( average_across_county )\ni_max = np.argmax( average_across_county )\n\n#plot population size vs. recorded average\nplt.scatter( population, average_across_county, alpha = 0.5, c=\"#7A68A6\")\nplt.scatter( [ population[i_min], population[i_max] ], \n [average_across_county[i_min], average_across_county[i_max] ],\n s = 60, marker = \"o\", facecolors = \"none\",\n edgecolors = \"#A60628\", linewidths = 1.5, \n label=\"extreme heights\")\n\nplt.xlim( 100, 1500 )\nplt.title( \"Average height vs. County Population\")\nplt.xlabel(\"County Population\")\nplt.ylabel(\"Average height in county\")\nplt.plot( [100, 1500], [150, 150], color = \"k\", label = \"true expected \\\nheight\", ls=\"--\" )\nplt.legend(scatterpoints = 1);",
"_____no_output_____"
]
],
[
[
"What do we observe? *Without accounting for population sizes* we run the risk of making an enormous inference error: if we ignored population size, we would say that the county with the shortest and tallest individuals have been correctly circled. But this inference is wrong for the following reason. These two counties do *not* necessarily have the most extreme heights. The error results from the calculated average of smaller populations not being a good reflection of the true expected value of the population (which in truth should be $\\mu =150$). The sample size/population size/$N$, whatever you wish to call it, is simply too small to invoke the Law of Large Numbers effectively. \n\nWe provide more damning evidence against this inference. Recall the population numbers were uniformly distributed over 100 to 1500. Our intuition should tell us that the counties with the most extreme population heights should also be uniformly spread over 100 to 1500, and certainly independent of the county's population. Not so. Below are the population sizes of the counties with the most extreme heights.",
"_____no_output_____"
]
],
[
[
"print(\"Population sizes of 10 'shortest' counties: \")\nprint(population[ np.argsort( average_across_county )[:10] ], '\\n')\nprint(\"Population sizes of 10 'tallest' counties: \")\nprint(population[ np.argsort( -average_across_county )[:10] ])",
"Population sizes of 10 'shortest' counties: \n[119 123 116 118 103 100 253 171 458 101] \n\nPopulation sizes of 10 'tallest' counties: \n[164 116 218 130 129 131 224 154 179 142]\n"
]
],
[
[
"Not at all uniform over 100 to 1500. This is an absolute failure of the Law of Large Numbers. \n\n##### Example: Kaggle's *U.S. Census Return Rate Challenge*\n\nBelow is data from the 2010 US census, which partitions populations beyond counties to the level of block groups (which are aggregates of city blocks or equivalents). The dataset is from a Kaggle machine learning competition some colleagues and I participated in. The objective was to predict the census letter mail-back rate of a group block, measured between 0 and 100, using census variables (median income, number of females in the block-group, number of trailer parks, average number of children etc.). Below we plot the census mail-back rate versus block group population:",
"_____no_output_____"
]
],
[
[
"figsize( 12.5, 6.5 )\ndata = np.genfromtxt( \"./data/census_data.csv\", skip_header=1, \n delimiter= \",\")\nplt.scatter( data[:,1], data[:,0], alpha = 0.5, c=\"#7A68A6\")\nplt.title(\"Census mail-back rate vs Population\")\nplt.ylabel(\"Mail-back rate\")\nplt.xlabel(\"population of block-group\")\nplt.xlim(-100, 15e3 )\nplt.ylim( -5, 105)\n\ni_min = np.argmin( data[:,0] )\ni_max = np.argmax( data[:,0] )\n \nplt.scatter( [ data[i_min,1], data[i_max, 1] ], \n [ data[i_min,0], data[i_max,0] ],\n s = 60, marker = \"o\", facecolors = \"none\",\n edgecolors = \"#A60628\", linewidths = 1.5, \n label=\"most extreme points\")\n\nplt.legend(scatterpoints = 1);",
"_____no_output_____"
]
],
[
[
"The above is a classic phenomenon in statistics. I say *classic* referring to the \"shape\" of the scatter plot above. It follows a classic triangular form, that tightens as we increase the sample size (as the Law of Large Numbers becomes more exact). \n\nI am perhaps overstressing the point and maybe I should have titled the book *\"You don't have big data problems!\"*, but here again is an example of the trouble with *small datasets*, not big ones. Simply, small datasets cannot be processed using the Law of Large Numbers. Compare with applying the Law without hassle to big datasets (ex. big data). I mentioned earlier that paradoxically big data prediction problems are solved by relatively simple algorithms. The paradox is partially resolved by understanding that the Law of Large Numbers creates solutions that are *stable*, i.e. adding or subtracting a few data points will not affect the solution much. On the other hand, adding or removing data points to a small dataset can create very different results. \n\nFor further reading on the hidden dangers of the Law of Large Numbers, I would highly recommend the excellent manuscript [The Most Dangerous Equation](http://nsm.uh.edu/~dgraur/niv/TheMostDangerousEquation.pdf). ",
"_____no_output_____"
],
[
"##### Example: How to order Reddit submissions\n\nYou may have disagreed with the original statement that the Law of Large numbers is known to everyone, but only implicitly in our subconscious decision making. Consider ratings on online products: how often do you trust an average 5-star rating if there is only 1 reviewer? 2 reviewers? 3 reviewers? We implicitly understand that with such few reviewers that the average rating is **not** a good reflection of the true value of the product.\n\nThis has created flaws in how we sort items, and more generally, how we compare items. Many people have realized that sorting online search results by their rating, whether the objects be books, videos, or online comments, return poor results. Often the seemingly top videos or comments have perfect ratings only from a few enthusiastic fans, and truly more quality videos or comments are hidden in later pages with *falsely-substandard* ratings of around 4.8. How can we correct this?\n\nConsider the popular site Reddit (I purposefully did not link to the website as you would never come back). The site hosts links to stories or images, called submissions, for people to comment on. Redditors can vote up or down on each submission (called upvotes and downvotes). Reddit, by default, will sort submissions to a given subreddit by Hot, that is, the submissions that have the most upvotes recently.\n\n<img src=\"http://i.imgur.com/3v6bz9f.png\" />\n\n\nHow would you determine which submissions are the best? There are a number of ways to achieve this:\n\n1. *Popularity*: A submission is considered good if it has many upvotes. A problem with this model is that a submission with hundreds of upvotes, but thousands of downvotes. While being very *popular*, the submission is likely more controversial than best.\n2. *Difference*: Using the *difference* of upvotes and downvotes. This solves the above problem, but fails when we consider the temporal nature of submission. Depending on when a submission is posted, the website may be experiencing high or low traffic. The difference method will bias the *Top* submissions to be the those made during high traffic periods, which have accumulated more upvotes than submissions that were not so graced, but are not necessarily the best.\n3. *Time adjusted*: Consider using Difference divided by the age of the submission. This creates a *rate*, something like *difference per second*, or *per minute*. An immediate counter-example is, if we use per second, a 1 second old submission with 1 upvote would be better than a 100 second old submission with 99 upvotes. One can avoid this by only considering at least t second old submission. But what is a good t value? Does this mean no submission younger than t is good? We end up comparing unstable quantities with stable quantities (young vs. old submissions).\n3. *Ratio*: Rank submissions by the ratio of upvotes to total number of votes (upvotes plus downvotes). This solves the temporal issue, such that new submissions who score well can be considered Top just as likely as older submissions, provided they have many upvotes to total votes. The problem here is that a submission with a single upvote (ratio = 1.0) will beat a submission with 999 upvotes and 1 downvote (ratio = 0.999), but clearly the latter submission is *more likely* to be better.\n\nI used the phrase *more likely* for good reason. It is possible that the former submission, with a single upvote, is in fact a better submission than the later with 999 upvotes. The hesitation to agree with this is because we have not seen the other 999 potential votes the former submission might get. Perhaps it will achieve an additional 999 upvotes and 0 downvotes and be considered better than the latter, though not likely.\n\nWhat we really want is an estimate of the *true upvote ratio*. Note that the true upvote ratio is not the same as the observed upvote ratio: the true upvote ratio is hidden, and we only observe upvotes vs. downvotes (one can think of the true upvote ratio as \"what is the underlying probability someone gives this submission a upvote, versus a downvote\"). So the 999 upvote/1 downvote submission probably has a true upvote ratio close to 1, which we can assert with confidence thanks to the Law of Large Numbers, but on the other hand we are much less certain about the true upvote ratio of the submission with only a single upvote. Sounds like a Bayesian problem to me.\n\n",
"_____no_output_____"
],
[
"One way to determine a prior on the upvote ratio is to look at the historical distribution of upvote ratios. This can be accomplished by scraping Reddit's submissions and determining a distribution. There are a few problems with this technique though:\n\n1. Skewed data: The vast majority of submissions have very few votes, hence there will be many submissions with ratios near the extremes (see the \"triangular plot\" in the above Kaggle dataset), effectively skewing our distribution to the extremes. One could try to only use submissions with votes greater than some threshold. Again, problems are encountered. There is a tradeoff between number of submissions available to use and a higher threshold with associated ratio precision. \n2. Biased data: Reddit is composed of different subpages, called subreddits. Two examples are *r/aww*, which posts pics of cute animals, and *r/politics*. It is very likely that the user behaviour towards submissions of these two subreddits are very different: visitors are likely friendly and affectionate in the former, and would therefore upvote submissions more, compared to the latter, where submissions are likely to be controversial and disagreed upon. Therefore not all submissions are the same. \n\n\nIn light of these, I think it is better to use a `Uniform` prior.\n\n\nWith our prior in place, we can find the posterior of the true upvote ratio. The Python script `top_showerthoughts_submissions.py` will scrape the best posts from the `showerthoughts` community on Reddit. This is a text-only community so the title of each post *is* the post. Below is the top post as well as some other sample posts:",
"_____no_output_____"
]
],
[
[
"#adding a number to the end of the %run call will get the ith top post.\n%run top_showerthoughts_submissions.py 2\n\nprint(\"Post contents: \\n\")\nprint(top_post)",
"here\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\nPost contents: \n\nFinding food in the forest is easy, the problem is finding poison is 20 times easier\n"
],
[
"\"\"\"\ncontents: an array of the text from the last 100 top submissions to a subreddit\nvotes: a 2d numpy array of upvotes, downvotes for each submission.\n\"\"\"\nn_submissions = len(votes)\nsubmissions = np.random.randint( n_submissions, size=4)\nprint(\"Some Submissions (out of %d total) \\n-----------\"%n_submissions)\nfor i in submissions:\n print('\"' + contents[i] + '\"')\n print(\"upvotes/downvotes: \",votes[i,:], \"\\n\")",
"Some Submissions (out of 1 total) \n-----------\n\"We teach small children naps are good, but it takes 1-2 decades to understand how true that is.\"\nupvotes/downvotes: [37 1] \n\n\"We teach small children naps are good, but it takes 1-2 decades to understand how true that is.\"\nupvotes/downvotes: [37 1] \n\n\"We teach small children naps are good, but it takes 1-2 decades to understand how true that is.\"\nupvotes/downvotes: [37 1] \n\n\"We teach small children naps are good, but it takes 1-2 decades to understand how true that is.\"\nupvotes/downvotes: [37 1] \n\n"
]
],
[
[
" For a given true upvote ratio $p$ and $N$ votes, the number of upvotes will look like a Binomial random variable with parameters $p$ and $N$. (This is because of the equivalence between upvote ratio and probability of upvoting versus downvoting, out of $N$ possible votes/trials). We create a function that performs Bayesian inference on $p$, for a particular submission's upvote/downvote pair.",
"_____no_output_____"
]
],
[
[
"import pymc3 as pm\n\ndef posterior_upvote_ratio( upvotes, downvotes, samples = 20000):\n \"\"\"\n This function accepts the number of upvotes and downvotes a particular submission recieved, \n and the number of posterior samples to return to the user. Assumes a uniform prior.\n \"\"\"\n N = upvotes + downvotes\n with pm.Model() as model:\n upvote_ratio = pm.Uniform(\"upvote_ratio\", 0, 1)\n observations = pm.Binomial( \"obs\", N, upvote_ratio, observed=upvotes)\n \n trace = pm.sample(samples, step=pm.Metropolis())\n \n burned_trace = trace[int(samples/4):]\n return burned_trace[\"upvote_ratio\"]\n ",
"_____no_output_____"
]
],
[
[
"Below are the resulting posterior distributions.",
"_____no_output_____"
]
],
[
[
"figsize( 11., 8)\nposteriors = []\ncolours = [\"#348ABD\", \"#A60628\", \"#7A68A6\", \"#467821\", \"#CF4457\"]\nfor i in range(len(submissions)):\n j = submissions[i]\n posteriors.append( posterior_upvote_ratio( votes[j, 0], votes[j,1] ) )\n plt.hist( posteriors[i], bins = 10, normed = True, alpha = .9, \n histtype=\"step\",color = colours[i%5], lw = 3,\n label = '(%d up:%d down)\\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )\n plt.hist( posteriors[i], bins = 10, normed = True, alpha = .2, \n histtype=\"stepfilled\",color = colours[i], lw = 3, )\n \nplt.legend(loc=\"upper left\")\nplt.xlim( 0, 1)\nplt.title(\"Posterior distributions of upvote ratios on different submissions\");",
"_____no_output_____"
]
],
[
[
"Some distributions are very tight, others have very long tails (relatively speaking), expressing our uncertainty with what the true upvote ratio might be.\n\n### Sorting!\n\nWe have been ignoring the goal of this exercise: how do we sort the submissions from *best to worst*? Of course, we cannot sort distributions, we must sort scalar numbers. There are many ways to distill a distribution down to a scalar: expressing the distribution through its expected value, or mean, is one way. Choosing the mean is a bad choice though. This is because the mean does not take into account the uncertainty of distributions.\n\nI suggest using the *95% least plausible value*, defined as the value such that there is only a 5% chance the true parameter is lower (think of the lower bound on the 95% credible region). Below are the posterior distributions with the 95% least-plausible value plotted:",
"_____no_output_____"
]
],
[
[
"N = posteriors[0].shape[0]\nlower_limits = []\n\nfor i in range(len(submissions)):\n j = submissions[i]\n plt.hist( posteriors[i], bins = 20, normed = True, alpha = .9, \n histtype=\"step\",color = colours[i], lw = 3,\n label = '(%d up:%d down)\\n%s...'%(votes[j, 0], votes[j,1], contents[j][:50]) )\n plt.hist( posteriors[i], bins = 20, normed = True, alpha = .2, \n histtype=\"stepfilled\",color = colours[i], lw = 3, )\n v = np.sort( posteriors[i] )[ int(0.05*N) ]\n #plt.vlines( v, 0, 15 , color = \"k\", alpha = 1, linewidths=3 )\n plt.vlines( v, 0, 10 , color = colours[i], linestyles = \"--\", linewidths=3 )\n lower_limits.append(v)\n plt.legend(loc=\"upper left\")\n\nplt.legend(loc=\"upper left\")\nplt.title(\"Posterior distributions of upvote ratios on different submissions\");\norder = np.argsort( -np.array( lower_limits ) )\nprint(order, lower_limits)",
"_____no_output_____"
]
],
[
[
"The best submissions, according to our procedure, are the submissions that are *most-likely* to score a high percentage of upvotes. Visually those are the submissions with the 95% least plausible value close to 1.\n\nWhy is sorting based on this quantity a good idea? By ordering by the 95% least plausible value, we are being the most conservative with what we think is best. When using the lower-bound of the 95% credible interval, we believe with high certainty that the 'true upvote ratio' is at the very least equal to this value (or greater), thereby ensuring that the best submissions are still on top. Under this ordering, we impose the following very natural properties:\n\n1. given two submissions with the same observed upvote ratio, we will assign the submission with more votes as better (since we are more confident it has a higher ratio).\n2. given two submissions with the same number of votes, we still assign the submission with more upvotes as *better*.\n\n### But this is too slow for real-time!\n\nI agree, computing the posterior of every submission takes a long time, and by the time you have computed it, likely the data has changed. I delay the mathematics to the appendix, but I suggest using the following formula to compute the lower bound very fast.\n\n$$ \\frac{a}{a + b} - 1.65\\sqrt{ \\frac{ab}{ (a+b)^2(a + b +1 ) } }$$\n\nwhere \n\\begin{align}\n& a = 1 + u \\\\\\\\\n& b = 1 + d \\\\\\\\\n\\end{align}\n\n$u$ is the number of upvotes, and $d$ is the number of downvotes. The formula is a shortcut in Bayesian inference, which will be further explained in Chapter 6 when we discuss priors in more detail.\n",
"_____no_output_____"
]
],
[
[
"def intervals(u,d):\n a = 1. + u\n b = 1. + d\n mu = a/(a+b)\n std_err = 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1.) ) )\n return ( mu, std_err )\n\nprint(\"Approximate lower bounds:\")\nposterior_mean, std_err = intervals(votes[:,0],votes[:,1])\nlb = posterior_mean - std_err\nprint(lb)\nprint(\"\\n\")\nprint(\"Top 40 Sorted according to approximate lower bounds:\")\nprint(\"\\n\")\norder = np.argsort( -lb )\nordered_contents = []\nfor i in order[:40]:\n ordered_contents.append( contents[i] )\n print(votes[i,0], votes[i,1], contents[i])\n print(\"-------------\")",
"_____no_output_____"
]
],
[
[
"We can view the ordering visually by plotting the posterior mean and bounds, and sorting by the lower bound. In the plot below, notice that the left error-bar is sorted (as we suggested this is the best way to determine an ordering), so the means, indicated by dots, do not follow any strong pattern. ",
"_____no_output_____"
]
],
[
[
"r_order = order[::-1][-40:]\nplt.errorbar( posterior_mean[r_order], np.arange( len(r_order) ), \n xerr=std_err[r_order], capsize=0, fmt=\"o\",\n color = \"#7A68A6\")\nplt.xlim( 0.3, 1)\nplt.yticks( np.arange( len(r_order)-1,-1,-1 ), map( lambda x: x[:30].replace(\"\\n\",\"\"), ordered_contents) );",
"_____no_output_____"
]
],
[
[
"In the graphic above, you can see why sorting by mean would be sub-optimal.",
"_____no_output_____"
],
[
"### Extension to Starred rating systems\n\nThe above procedure works well for upvote-downvotes schemes, but what about systems that use star ratings, e.g. 5 star rating systems. Similar problems apply with simply taking the average: an item with two perfect ratings would beat an item with thousands of perfect ratings, but a single sub-perfect rating. \n\n\nWe can consider the upvote-downvote problem above as binary: 0 is a downvote, 1 if an upvote. A $N$-star rating system can be seen as a more continuous version of above, and we can set $n$ stars rewarded is equivalent to rewarding $\\frac{n}{N}$. For example, in a 5-star system, a 2 star rating corresponds to 0.4. A perfect rating is a 1. We can use the same formula as before, but with $a,b$ defined differently:\n\n\n$$ \\frac{a}{a + b} - 1.65\\sqrt{ \\frac{ab}{ (a+b)^2(a + b +1 ) } }$$\n\nwhere \n\n\\begin{align}\n& a = 1 + S \\\\\\\\\n& b = 1 + N - S \\\\\\\\\n\\end{align}\n\nwhere $N$ is the number of users who rated, and $S$ is the sum of all the ratings, under the equivalence scheme mentioned above. ",
"_____no_output_____"
],
[
"##### Example: Counting Github stars\n\nWhat is the average number of stars a Github repository has? How would you calculate this? There are over 6 million respositories, so there is more than enough data to invoke the Law of Large numbers. Let's start pulling some data. TODO",
"_____no_output_____"
],
[
"### Conclusion\n\nWhile the Law of Large Numbers is cool, it is only true so much as its name implies: with large sample sizes only. We have seen how our inference can be affected by not considering *how the data is shaped*. \n\n1. By (cheaply) drawing many samples from the posterior distributions, we can ensure that the Law of Large Number applies as we approximate expected values (which we will do in the next chapter).\n\n2. Bayesian inference understands that with small sample sizes, we can observe wild randomness. Our posterior distribution will reflect this by being more spread rather than tightly concentrated. Thus, our inference should be correctable.\n\n3. There are major implications of not considering the sample size, and trying to sort objects that are unstable leads to pathological orderings. The method provided above solves this problem.\n",
"_____no_output_____"
],
[
"### Appendix\n\n##### Derivation of sorting submissions formula\n\nBasically what we are doing is using a Beta prior (with parameters $a=1, b=1$, which is a uniform distribution), and using a Binomial likelihood with observations $u, N = u+d$. This means our posterior is a Beta distribution with parameters $a' = 1 + u, b' = 1 + (N - u) = 1+d$. We then need to find the value, $x$, such that 0.05 probability is less than $x$. This is usually done by inverting the CDF ([Cumulative Distribution Function](http://en.wikipedia.org/wiki/Cumulative_Distribution_Function)), but the CDF of the beta, for integer parameters, is known but is a large sum [3]. \n\nWe instead use a Normal approximation. The mean of the Beta is $\\mu = a'/(a'+b')$ and the variance is \n\n$$\\sigma^2 = \\frac{a'b'}{ (a' + b')^2(a'+b'+1) }$$\n\nHence we solve the following equation for $x$ and have an approximate lower bound. \n\n$$ 0.05 = \\Phi\\left( \\frac{(x - \\mu)}{\\sigma}\\right) $$ \n\n$\\Phi$ being the [cumulative distribution for the normal distribution](http://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution)\n\n\n\n\n",
"_____no_output_____"
],
[
"##### Exercises\n\n1\\. How would you estimate the quantity $E\\left[ \\cos{X} \\right]$, where $X \\sim \\text{Exp}(4)$? What about $E\\left[ \\cos{X} | X \\lt 1\\right]$, i.e. the expected value *given* we know $X$ is less than 1? Would you need more samples than the original samples size to be equally accurate?",
"_____no_output_____"
]
],
[
[
"## Enter code here\nimport scipy.stats as stats\nexp = stats.expon( scale=4 )\nN = 1e5\nX = exp.rvs( int(N) )\n## ...",
"_____no_output_____"
]
],
[
[
"2\\. The following table was located in the paper \"Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression\" [2]. The table ranks football field-goal kickers by their percent of non-misses. What mistake have the researchers made?\n\n-----\n\n#### Kicker Careers Ranked by Make Percentage\n<table><tbody><tr><th>Rank </th><th>Kicker </th><th>Make % </th><th>Number of Kicks</th></tr><tr><td>1 </td><td>Garrett Hartley </td><td>87.7 </td><td>57</td></tr><tr><td>2</td><td> Matt Stover </td><td>86.8 </td><td>335</td></tr><tr><td>3 </td><td>Robbie Gould </td><td>86.2 </td><td>224</td></tr><tr><td>4 </td><td>Rob Bironas </td><td>86.1 </td><td>223</td></tr><tr><td>5</td><td> Shayne Graham </td><td>85.4 </td><td>254</td></tr><tr><td>… </td><td>… </td><td>…</td><td> </td></tr><tr><td>51</td><td> Dave Rayner </td><td>72.2 </td><td>90</td></tr><tr><td>52</td><td> Nick Novak </td><td>71.9 </td><td>64</td></tr><tr><td>53 </td><td>Tim Seder </td><td>71.0 </td><td>62</td></tr><tr><td>54 </td><td>Jose Cortez </td><td>70.7</td><td> 75</td></tr><tr><td>55 </td><td>Wade Richey </td><td>66.1</td><td> 56</td></tr></tbody></table>",
"_____no_output_____"
],
[
"In August 2013, [a popular post](http://bpodgursky.wordpress.com/2013/08/21/average-income-per-programming-language/) on the average income per programmer of different languages was trending. Here's the summary chart: (reproduced without permission, cause when you lie with stats, you gunna get the hammer). What do you notice about the extremes?\n\n------\n\n#### Average household income by programming language\n\n<table >\n <tr><td>Language</td><td>Average Household Income ($)</td><td>Data Points</td></tr>\n <tr><td>Puppet</td><td>87,589.29</td><td>112</td></tr>\n <tr><td>Haskell</td><td>89,973.82</td><td>191</td></tr>\n <tr><td>PHP</td><td>94,031.19</td><td>978</td></tr>\n <tr><td>CoffeeScript</td><td>94,890.80</td><td>435</td></tr>\n <tr><td>VimL</td><td>94,967.11</td><td>532</td></tr>\n <tr><td>Shell</td><td>96,930.54</td><td>979</td></tr>\n <tr><td>Lua</td><td>96,930.69</td><td>101</td></tr>\n <tr><td>Erlang</td><td>97,306.55</td><td>168</td></tr>\n <tr><td>Clojure</td><td>97,500.00</td><td>269</td></tr>\n <tr><td>Python</td><td>97,578.87</td><td>2314</td></tr>\n <tr><td>JavaScript</td><td>97,598.75</td><td>3443</td></tr>\n <tr><td>Emacs Lisp</td><td>97,774.65</td><td>355</td></tr>\n <tr><td>C#</td><td>97,823.31</td><td>665</td></tr>\n <tr><td>Ruby</td><td>98,238.74</td><td>3242</td></tr>\n <tr><td>C++</td><td>99,147.93</td><td>845</td></tr>\n <tr><td>CSS</td><td>99,881.40</td><td>527</td></tr>\n <tr><td>Perl</td><td>100,295.45</td><td>990</td></tr>\n <tr><td>C</td><td>100,766.51</td><td>2120</td></tr>\n <tr><td>Go</td><td>101,158.01</td><td>231</td></tr>\n <tr><td>Scala</td><td>101,460.91</td><td>243</td></tr>\n <tr><td>ColdFusion</td><td>101,536.70</td><td>109</td></tr>\n <tr><td>Objective-C</td><td>101,801.60</td><td>562</td></tr>\n <tr><td>Groovy</td><td>102,650.86</td><td>116</td></tr>\n <tr><td>Java</td><td>103,179.39</td><td>1402</td></tr>\n <tr><td>XSLT</td><td>106,199.19</td><td>123</td></tr>\n <tr><td>ActionScript</td><td>108,119.47</td><td>113</td></tr>\n</table>",
"_____no_output_____"
],
[
"### References\n\n1. Wainer, Howard. *The Most Dangerous Equation*. American Scientist, Volume 95.\n2. Clarck, Torin K., Aaron W. Johnson, and Alexander J. Stimpson. \"Going for Three: Predicting the Likelihood of Field Goal Success with Logistic Regression.\" (2013): n. page. [Web](http://www.sloansportsconference.com/wp-content/uploads/2013/Going%20for%20Three%20Predicting%20the%20Likelihood%20of%20Field%20Goal%20Success%20with%20Logistic%20Regression.pdf). 20 Feb. 2013.\n3. http://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()",
"_____no_output_____"
]
],
[
[
"<style>\n img{\n max-width:800px}\n</style>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7160f81313cb909eda8d3bb042ce62b7560bc7b | 64,737 | ipynb | Jupyter Notebook | doc/source/notebooks/0_tutorial.ipynb | forest1040/scikit-qulacs | 3d7153fb966189486196a491aed2a65436d992bf | [
"MIT"
] | null | null | null | doc/source/notebooks/0_tutorial.ipynb | forest1040/scikit-qulacs | 3d7153fb966189486196a491aed2a65436d992bf | [
"MIT"
] | null | null | null | doc/source/notebooks/0_tutorial.ipynb | forest1040/scikit-qulacs | 3d7153fb966189486196a491aed2a65436d992bf | [
"MIT"
] | null | null | null | 96.622388 | 22,486 | 0.842965 | [
[
[
"# scikit-qulacs チュートリアル\n\n## scikit-qulacs とは \n\n量子回路シミュレータ [Qulacs-Osaka](https://github.com/Qulacs-Osaka/qulacs-osaka) ([Qulacs](https://github.com/qulacs/qulacs) の大阪大学バージョン) をバックエンドとした量子機械学習アルゴリズムのシミュレータです。scikit-learn の名前をもじっています。",
"_____no_output_____"
],
[
"## インストール\nPyPI からインストールできます。\n```\npip install skqulacs\n```",
"_____no_output_____"
],
[
"## Quantum neural network\n入力データ ${\\boldsymbol x}$ に対して、学習パラメータ ${\\boldsymbol \\theta}$ を使って\n$$y_{{\\boldsymbol \\theta}}({\\boldsymbol x}) = \\langle 0|U^\\dagger({\\boldsymbol \\theta}, {\\boldsymbol x}) O U({\\boldsymbol \\theta}, {\\boldsymbol x})|0\\rangle$$\nの形でモデル $y_{{\\boldsymbol \\theta}}({\\boldsymbol x})$ を構築する手法を量子ニューラルネットワークと呼びます。scikit-qulacs では `skqulacs.qnn` モジュールに実装されています。分類問題をとく `skqulacs.qnn.classifier` と回帰問題を解く `skqulacs.qnn.regressor` の2つがあります。",
"_____no_output_____"
],
[
"### `skqulacs.qnn.QNNClassifier` の使い方\n例として、有名な `iris` データセットを分類する量子ニューラルネットワークを組んでみます。まず、以下のコードによって `scikit-learn` から `iris` データセットを取り出し、訓練データとテストデータに分けます。",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\niris = datasets.load_iris()\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\nx = df.loc[:, [\"petal length (cm)\", \"petal width (cm)\"]]\nx_train, x_test, y_train, y_test = train_test_split(\n x, iris.target, test_size=0.25, random_state=0\n)\nx_train = x_train.to_numpy()\nx_test = x_test.to_numpy()",
"_____no_output_____"
]
],
[
[
"変数はそれぞれ\n- `x_train`: 訓練データの入力\n- `y_train`: 訓練データのラベル\n- `x_test`: テストデータの入力\n- `y_test`: テストデータのラベル\n\nです。",
"_____no_output_____"
],
[
"次に簡単な量子回路によってこれらの分類を行ってみます。",
"_____no_output_____"
]
],
[
[
"from skqulacs.circuit.pre_defined import create_qcl_ansatz\nfrom skqulacs.qnn import QNNClassifier\n\nnqubit = 5 # qubitの数。必要とする入力、出力の次元数以上が必要である。\nc_depth = 3 # circuitの深さ。ニューラルネットワークでの隠れ層に対応する。\ntime_step = 1. # qcl_ansatz に必要となるパラメータ。横磁場イジングモデルによって発展させる時間を決める。\nnum_class = 3 # 分類数(ここでは3つの品種に分類)\nsolver=\"BFGS\" # アルゴリズム。ほかには、\"Adam\" が使える。\nmaxiter = 20 # ループの最大。これが多いほど、正確になるが、時間がかかる。\ncircuit = create_qcl_ansatz(nqubit, c_depth, time_step) # LearningCircuitを作る\nqcl = QNNClassifier(circuit, num_class, solver) # モデル構築を行う\nopt_loss, opt_params = qcl.fit(x_train, y_train, maxiter) # 学習\nprint(\"trained parameters\", opt_params)\nprint(\"loss\", opt_loss)",
"trained parameters [ 1.85410272 3.75676148 2.21646191 2.52509231 2.1647195 2.43798985\n 1.50367222 0.04751448 2.54785017 2.13940606 4.98429799 4.14497582\n 5.47831644 4.83193472 1.52245352 4.56050188 2.51816388 1.40210201\n 3.95273519 1.34362204 0.89885804 4.01058809 2.8836804 1.50820136\n 5.51325943 0.47711481 3.44328084 3.65547664 -0.55173648 6.3383248\n 0.66032895 5.16735433 -1.04875626 6.14244395 6.99287629 1.31751974\n 0.19759514 4.00321349 3.90846605 1.9431826 0.53312599 3.28469292\n 6.0037434 4.20836194 1.83071748]\nloss 0.0860431460366711\n"
]
],
[
[
"テストデータと比べて結果を出力します。",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import f1_score\ny_pred = qcl.predict(x_test)\nprint(f1_score(y_test, y_pred, average=\"weighted\"))",
"0.9473684210526315\n"
]
],
[
[
"うまく分類できていることがわかります。",
"_____no_output_____"
],
[
"### `skqulacs.qnn.QNNRegressor` の使い方\n回帰問題を解くには `QNNRegressor` を使います。ここでは $y=\\sin(\\pi x)$ を学習させてみます。まずデータセットを作ります。",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom numpy.random import default_rng\n\ndef generate_noisy_sine(x_min, x_max, num_x):\n rng = default_rng(0)\n x_train = [[rng.uniform(x_min, x_max)] for _ in range(num_x)]\n y_train = [np.sin(np.pi*x[0]) for x in x_train]\n mag_noise = 0.01\n y_train += mag_noise * rng.random(num_x)\n return x_train, y_train\n \nx_min = -1.0\nx_max = 1.0\nnum_x = 80\nx_train, y_train = generate_noisy_sine(x_min, x_max, num_x)\nx_test, y_test = generate_noisy_sine(x_min, x_max, num_x)",
"_____no_output_____"
]
],
[
[
"作成したデータに対して学習を行います。",
"_____no_output_____"
]
],
[
[
"from skqulacs.qnn import QNNRegressor\n\nn_qubit = 4\ndepth = 4\ntime_step = 0.5\nsolver=\"BFGS\"\nmaxiter=20\ncircuit = create_qcl_ansatz(n_qubit, depth, time_step, 0)\nqnn = QNNRegressor(circuit, solver)\nopt_loss, opt_params = qnn.fit(x_train, y_train, maxiter)\nprint(\"trained parameters\", opt_params)\nprint(\"loss\", opt_loss)",
"[-2.52118172e-03 1.19277020e-03 3.10128326e-04 -6.42683364e-04\n 8.35436474e-04 -5.24981892e-04 -7.52008036e-04 -4.07462371e-04\n -5.74044155e-04 1.50774930e-03 6.30109294e-05 -8.19459615e-04\n 1.42078707e-03 2.09110696e-03 -3.36846534e-04 4.88690753e-04\n 7.46908451e-05 -2.22482857e-04 -9.25977438e-04 -9.12545452e-05\n 1.04324965e-03 -5.63038944e-04 -4.62934646e-04 -3.62787336e-04\n -8.51174859e-04 2.90978510e-03 1.96177705e-03 6.80510436e-05\n 1.69540322e-04 3.45813053e-04 3.20652348e-04 -2.24440888e-04\n -5.65850928e-05 -7.10191967e-04 5.10276819e-04 7.51022395e-04\n 1.50621087e-03 -1.01846464e-03 -8.74319743e-04 3.54710895e-17\n 3.59299055e-17 3.59218882e-17 3.58373194e-17 3.59945889e-17\n 3.55306677e-17 3.67906416e-17 3.59392206e-17 3.58473687e-17]\ntrained parameters [ 4.76002724 1.92362022 -0.48900136 0.35047068 4.82948963 6.46225974\n 3.40609952 5.00474857 3.37636039 6.31050608 5.49376378 0.139052\n 5.47590209 0.64912218 4.67870237 1.41841969 5.53849844 3.7883166\n 1.84668854 2.68946397 0.04043366 0.96776472 4.49352776 3.60008095\n 3.88877639 2.58504726 6.23553393 6.16278554 4.24511007 3.92287119\n 4.33309344 2.56906654 0.6973149 4.8681623 3.38829756 1.61272237\n 3.30808541 5.46822145 5.94478902 2.24809352 3.59102784 2.02236503\n 3.73409722 2.12315885 2.46061475 5.59375873 1.42727325 3.91560031]\nloss 0.008518100629961275\n"
]
],
[
[
"訓練したモデルとテストデータを並べてプロットしてみます。",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\ny_pred = qnn.predict(x_test)\nplt.plot(x_test, y_test, \"o\", label=\"Test\")\nplt.plot(np.sort(np.array(x_test).flatten()), np.array(y_pred)[np.argsort(np.array(x_test).flatten())], label=\"Prediction\")\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"よく学習できていることがわかります。",
"_____no_output_____"
],
[
"## 量子カーネル法\n入力データ ${\\boldsymbol x}$ を量子状態 $|\\psi({\\boldsymbol x})\\rangle$ にマップし、この量子状態間の内積で定義されるカーネル関数\n$$K({\\boldsymbol x},{\\boldsymbol x}')=|\\langle \\psi({\\boldsymbol x})| \\psi({\\boldsymbol x}')\\rangle|^2$$\nを使ったカーネル法を量子カーネル法と呼びます。scikit-qulacs には、support vector classifier `skqulacs.qsvm.QSVC` と support vector regressor `skqulacs.qsvm.QSVR` の2つが実装されています。",
"_____no_output_____"
],
[
"### `skqulacs.qsvm.QSVC` の使い方\nここでは IBM のグループが論文 [Supervised learning with quantum-enhanced feature spaces](https://www.nature.com/articles/s41586-019-0980-2) で提案した $|\\psi({\\boldsymbol x})\\rangle$ を使った分類を行います。この状態を作る量子回路は `skqulacs.circuit.create_ibm_embedding_circuit` によって作り出せます。\n\nまずデータセットを準備します。",
"_____no_output_____"
]
],
[
[
"iris = datasets.load_iris()\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\nx = df.loc[:, [\"petal length (cm)\", \"petal width (cm)\"]]\nx_train, x_test, y_train, y_test = train_test_split(\n x, iris.target, test_size=0.25, random_state=0\n)\nx_train = x_train.to_numpy()\nx_test = x_test.to_numpy()",
"_____no_output_____"
]
],
[
[
"回路を作成し、QSVC を訓練、予測値を出力させます。",
"_____no_output_____"
]
],
[
[
"from skqulacs.circuit import create_ibm_embedding_circuit\nfrom skqulacs.qsvm import QSVC\nn_qubit = 4 # x_train の次元数以上必要。あまり小さいと結果が悪くなる。\ncircuit = create_ibm_embedding_circuit(n_qubit)\nqsvm = QSVC(circuit)\nqsvm.fit(x_train, y_train)\ny_pred = qsvm.predict(x_test)",
"_____no_output_____"
]
],
[
[
"評価してみます。",
"_____no_output_____"
]
],
[
[
"print(f1_score(y_test, y_pred, average=\"weighted\"))",
"0.8978070175438595\n"
]
],
[
[
"qnn よりも悪い結果となりました。回路形がこのデータセットに適していないからだと考えられます。",
"_____no_output_____"
],
[
"### `skqulacs.qsvm.QSVR` の使い方\n同様の回路を使い、回帰を行ってみます。QNN の場合と同様に、$\\sin(\\pi x)$ を学習します。まずデータセットを作ります。",
"_____no_output_____"
]
],
[
[
"x_min = -1.0\nx_max = 1.0\nnum_x = 1000\nx_train, y_train = generate_noisy_sine(x_min, x_max, num_x)\nx_test, y_test = generate_noisy_sine(x_min, x_max, num_x)",
"_____no_output_____"
]
],
[
[
"学習します。",
"_____no_output_____"
]
],
[
[
"from skqulacs.qsvm import QSVR\nn_qubit = 4 # x_train の次元数以上必要。あまり小さいと結果が悪くなる。\ncircuit = create_ibm_embedding_circuit(n_qubit)\nqsvm = QSVR(circuit)\nqsvm.fit(x_train, y_train)\ny_pred = qsvm.predict(x_test)",
"_____no_output_____"
]
],
[
[
"学習結果をプロットしてみます。",
"_____no_output_____"
]
],
[
[
"plt.plot(x_test, y_test, \"o\", label=\"Test\")\nplt.plot(np.sort(np.array(x_test).flatten()), np.array(y_pred)[np.argsort(np.array(x_test).flatten())], label=\"Prediction\")\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"うまく学習できています。",
"_____no_output_____"
],
[
"## 用意されている量子回路\n\nscikit-qulacs では代表的な論文で発表されている機械学習用量子回路を手軽に使えるようにするため、量子回路の実装に努めています。機械学習用量子回路は `LearningCircuit` クラスのインスタンスとして実装されていて、`skqulacs.circuit` モジュールにある `create_...()` という関数を呼ぶことにより作成可能です。\n\n代表的なansatzとして、\n- create_farhi_neven_ansatz(n_qubit: int, c_depth: int, seed: Optional[int] = None): [arXiv:1802.06002](https://arxiv.org/pdf/1802.06002)\n\n- create_ibm_embedding_circuit(n_qubit: int): [arXiv:1804.11326](https://arxiv.org/abs/1804.11326) \n\n\nが用意されています。\n\n他に利用可能な量子回路のリストについては[このページ](circuit_visualize.ipynb)をご覧ください。\n\n基本的には `QNNClassifier` などの引数として与えることを推奨しますが、自前で新たなアルゴリズムを作りたいときなど、これらの量子回路を直接扱いたいときもあると思います。以下に使用例を掲載します。\n\n以下のコードで QCL ansatz を作成します。",
"_____no_output_____"
]
],
[
[
"n_qubits = 4\nc_depth = 4\ntime_step = 1.\nansatz = create_qcl_ansatz(n_qubits, c_depth, time_step)",
"_____no_output_____"
]
],
[
[
"`LearningCircuit` にセットされている訓練パラメータは `get_parameters()` によって取り出せます。",
"_____no_output_____"
]
],
[
[
"parameters = ansatz.get_parameters()\nprint(len(parameters), parameters) # show parameters in the circuit. Parameters are chosen randomly within create_qcl_ansatz()",
"48 [0.7238575654756965, 4.521772018945494, 5.300166829620294, 2.4396786706992875, 3.372497641445515, 0.0691143830945458, 2.4305919508432194, 0.897581496914585, 0.8153164989629146, 4.19580716083789, 4.614797622397918, 2.951713965321396, 2.449149087341643, 2.4619419636304105, 1.0029754525418129, 2.334478387370933, 2.5589036813723762, 4.50591651911791, 4.486222509952132, 1.559585289816079, 6.164351024607971, 1.47322926380012, 2.0729721326039914, 2.275646680431729, 2.9195023453638385, 2.7015553984398526, 5.814853292230643, 5.161654841481886, 0.7527848104233957, 1.728308187107492, 5.360006969176836, 5.201498343709625, 0.42675480228093243, 1.967788806417369, 1.7897854085117577, 5.321631318940725, 5.6122834079775785, 5.437876706508988, 2.4878547042811134, 3.796733572414534, 0.9677624343101473, 1.8208670050419042, 0.4657427370080585, 5.820313505901443, 1.2863802672251754, 2.4653914032427604, 0.3882421258541333, 1.4073421893586033]\n"
]
],
[
[
"`update_parameters()` を使うと、パラメータを更新できます。以下では全てのパラメータをゼロにセットします。",
"_____no_output_____"
]
],
[
[
"ansatz.update_parameters(np.zeros(len(parameters)))\nprint(ansatz.get_parameters())",
"[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
]
],
[
[
"`run(x)` を使うと、入力データ ${\\boldsymbol x}$ に対して、初期状態 $|0\\rangle$ に量子回路を作用させたときの出力状態が `qulacs.QuantumState` として返されます。QCL ansatz は `n_qubits` 次元までの入力を扱うことのできる ansatz なので、今回は 2 次元入力を与えます。\n\n多くの回路では、データ次元数をaとして、それが`n_qubits`以下の場合、回路のx番目には、n_qubits % a 番目の入力が入ります。",
"_____no_output_____"
]
],
[
[
"state = ansatz.run([np.pi/2, np.pi/2])\nprint(state.get_vector())",
"[ 0.30869612+0.13283004j -0.0759879 -0.06176279j -0.21316566-0.1438611j\n 0.21877642+0.06614673j -0.33638863+0.12504632j 0.24610584+0.23178805j\n 0.05948781+0.05711437j 0.03556247-0.08914448j 0.03556247-0.08914448j\n 0.05948781+0.05711437j 0.24610584+0.23178805j -0.33638863+0.12504632j\n 0.21877642+0.06614673j -0.21316566-0.1438611j -0.0759879 -0.06176279j\n 0.30869612+0.13283004j]\n"
]
],
[
[
"## ユーザー定義の量子回路を学習に利用する\nここでは自分でオリジナルの量子回路を定義し、それを学習に利用する方法を説明します。scikit-qulacs では、`LearningCircuit` クラスによって量子回路を表現しています。これは qulacs の `ParametricQuantumCircuit` のラッパーとなっていて、入力データと訓練される回路パラメータを一元管理し、それらをすべて qulacs の parametric gate で扱えるようにしたものです。\n\n`LearningCircuit` クラスは qulacs の `ParametricQuantumCircuit` のうちの主要な `add_.._gate()` 系のメソッドを機能そのままにラップしています。それに加えて、`ParametricQuantumCircuit` を拡張する機能として、以下のメソッドが追加されています。\n\n- `add_input_RX_gate(index, input_func)`, `add_input_RY_gate(index, input_func)`, `add_input_RZ_gate(index, input_func)`: 入力データを入れるためのゲートを追加します。引数にはそれぞれ以下のものを与えます。\n - `index`: そのゲートを作用させる量子ビット\n - `input_func`: 入力データ ${\\boldsymbol x}$ -> 回転ゲートの回転角 $\\phi = \\phi({\\boldsymbol x})$ の変換を行う関数。回路実行時には `input_func(x)` が角度にセットされます。\n- `add_parametric_input_RX_gate(index, parameter, input_func)` (RY, RZ も同様) :入力データと訓練パラメータが同時に含まれているようなゲートを追加します。\n - `index`: そのゲートを作用させる量子ビット\n - `parameter`: 実数の訓練パラメータ $\\theta$\n - `input_func`: 入力データ ${\\boldsymbol x}$, 訓練パラメータ $\\theta$ -> 回転ゲートの回転角 $\\phi = \\phi(\\theta, {\\boldsymbol x})$ の変換を行う関数。回路実行時には `input_func(theta, x)` が角度にセットされます。\n\n例えば以下のコードは、各量子ビットに ${\\boldsymbol x} = \\{x_i\\}_{i=0}^3$ の各成分を $R_y$ ゲートで入力したあと、各量子ビットを訓練可能な $R_y$ ゲートで変換し、最後に全ての量子ビットにアダマールゲートを一度だけ作用させる `LearningCircuit` を構成するコードです。(例のためだけに書いていて、特に意味は無い回路です。) ",
"_____no_output_____"
]
],
[
[
"from skqulacs.circuit import LearningCircuit\nn_qubit = 4\ncircuit = LearningCircuit(n_qubit)\nfor i in range(n_qubit):\n circuit.add_input_RY_gate(i, lambda x: x[i%2]) # input 2 dimensional x into the circuit by RY rotation\nfor i in range(n_qubit):\n circuit.add_parametric_RY_gate(i, 0.) # first argument is qubit index, second argument is parameter\nfor i in range(n_qubit):\n circuit.add_H_gate(i)",
"_____no_output_____"
]
],
[
[
"`run()` メソッドによって初期状態 $|0\\rangle$ にこの回路を作用させたときの量子状態が返されます。このメソッドは入力データ ${\\boldsymbol x}$ を引数として取ります。上の回路は 2 次元の ${\\boldsymbol x}$ を取れるように設計したので、2次元の入力を与えます。",
"_____no_output_____"
]
],
[
[
"x = [np.pi/2,np.pi/2]\nstate = circuit.run(x)\nprint(state.get_vector())",
"[-6.93889390e-18+0.j -1.96261557e-17+0.j -2.08166817e-17+0.j\n -2.77555756e-17+0.j -2.08166817e-17+0.j 1.96261557e-17+0.j\n -6.93889390e-18+0.j 0.00000000e+00+0.j -2.08166817e-17+0.j\n 1.96261557e-17+0.j -6.93889390e-18+0.j 2.77555756e-17+0.j\n -6.93889390e-18+0.j 5.88784672e-17+0.j 9.02056208e-17+0.j\n 1.00000000e+00+0.j]\n"
]
],
[
[
"予想どおりの出力が得られることが確認できます。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7161be192fc2fcec45119dab75faaea76b49b78 | 8,367 | ipynb | Jupyter Notebook | PaperPinelineCodes/headProboscisMotion/P1_HeadAndBodyTracking.ipynb | TanviDeora/ProboscisTracking | 71b66f99d922a1afafc05e76125cb645831c26d8 | [
"MIT"
] | null | null | null | PaperPinelineCodes/headProboscisMotion/P1_HeadAndBodyTracking.ipynb | TanviDeora/ProboscisTracking | 71b66f99d922a1afafc05e76125cb645831c26d8 | [
"MIT"
] | null | null | null | PaperPinelineCodes/headProboscisMotion/P1_HeadAndBodyTracking.ipynb | TanviDeora/ProboscisTracking | 71b66f99d922a1afafc05e76125cb645831c26d8 | [
"MIT"
] | null | null | null | 32.180769 | 188 | 0.558504 | [
[
[
"import numpy as np\nimport pandas as pd\nimport glob\nimport os\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"from scipy import signal\ndef smooth(Pixels):\n win_gauss = signal.gaussian(10,3)\n smooth_Pixels = signal.convolve(Pixels, win_gauss, mode='same', method = 'direct') / win_gauss.sum()\n return(smooth_Pixels)",
"_____no_output_____"
],
[
"def FlowerCenteredTracks(track):\n \n strt = track.first_valid_index()\n stop = track.last_valid_index()\n\n x = track.loc[strt:stop, 'x'].values\n y = track.loc[strt:stop, 'y'].values\n\n name = [n for n in circ_parameters.name if n == mothID][0]\n\n circ_x = circ_parameters.loc[circ_parameters.name == name, 'circ_x'].values\n circ_y = circ_parameters.loc[circ_parameters.name == name, 'circ_y'].values\n circ_radii = circ_parameters.loc[circ_parameters.name == name, 'circ_radii'].values\n\n filtered_x = x \n filtered_y = y \n filtered_r = np.linalg.norm([filtered_x-circ_x, filtered_y-circ_y], axis = 0)\n filtered_r = filtered_r/circ_radii\n trajectory = pd.DataFrame([filtered_x, filtered_y, filtered_r]).T\n trajectory.columns = ['x', 'y', 'r']\n return(trajectory)",
"_____no_output_____"
],
[
"# use the list of final proboscis tracks as starting point to get the corresponding head and thorax data\n\nvisitnum = 0\ndirec = r\"./dataFolders/PaperPipelineOutput/v3/FilteredTracks/FinalCompiledTracks/FirstVisit/\"\nprob_track_list = glob.glob(direc + '*.csv')",
"_____no_output_____"
],
[
"# get all the relevant paths and list of files\n\npath_for_DLCAnnotation = r\"G:\\My Drive\\Tom-Tanvi\\Shared With Bing,Tom and Tanvi\\Video Analysis\\DeepLabCut-ImageAnalysis\\take5\\outputFromDLC\\VideoResults\\EntireDataSet/\"\ndlc_files = glob.glob(path_for_DLCAnnotation + '*DeepCut_' + '*.h5')\n\npath_for_visit_frames = r\"../MothLearning/dataFolders/Output/Step5_FilesWith_TrueTrialAnd_ProboscisDetect_v2/\"\n\n# Get moth tracking data\nmothTrackPath = r\"../MothLearning/dataFolders/Output/Step1_MotionDetection/\"\nmothTracks = glob.glob(mothTrackPath + \"*.csv\")\n\n# get all the circle parameters\ncirc_parameters_path = glob.glob('./dataFolders/PaperPipelineOutput/CircleParameters/' + '*.csv')\ncirc_parameters = pd.read_csv(circ_parameters_path[0])",
"_____no_output_____"
],
[
"len('RawDataForExplorationTime.csv')",
"_____no_output_____"
],
[
"output_fig_path = r\"./dataFolders/PaperPipelineOutput/Figures/v3/OtherBodyPartTracks/FirstVisit/\"\noutput_data = r'./dataFolders/PaperPipelineOutput/v3/OtherBodyParts/FirstVisit/' ",
"_____no_output_____"
],
[
"for ff in prob_track_list:\n \n a, b, _ = os.path.basename(ff).split('_')\n mothID = a + '_' + b\n\n # read in head data\n prob = pd.read_csv(ff)\n prob_track = prob.loc[:,'x':'y'].copy()\n\n # read the head data\n if not [f for f in dlc_files if mothID + 'DeepCut_' in f]:\n mothID = mothID + '_cropped'\n \n DLC_track = [f for f in dlc_files if mothID + 'DeepCut_' in f][0]\n temp = pd.read_hdf(DLC_track)\n\n head = temp.loc[slice(None),(slice(None),'head')]\n\n path_frame_Reference = glob.glob(path_for_visit_frames + mothID + '_RawDataForExplorationTime.csv')\n Visit_info_f = pd.read_csv(path_frame_Reference[0])\n Visit_info = Visit_info_f[['MothIN', 'MothOut','ProboscisDetect']]\n\n fin = Visit_info.iloc[visitnum, 0]\n fout = Visit_info.iloc[visitnum, 2]\n if np.isnan(fout):\n fout = Visit_info.iloc[visitnum, 1]\n\n fin = int(fin)\n fout = int(fout)\n# print(fin, fout)\n\n head_track = head[fin:fout].copy()\n head_track = head_track.droplevel(level = [0,1], axis = 1)\n\n\n # get tracks for moth for that mothID\n\n specificMothTracks = [f for f in mothTracks if mothID + '.csv' in f][0]\n\n mothTrack = pd.read_csv(specificMothTracks)\n moth_x = mothTrack.Centroid_hull_x.values[fin:fout]\n moth_y = mothTrack.Centroid_hull_y.values[fin:fout]\n\n filt_moth_x = smooth(moth_x)\n filt_moth_y = smooth(moth_y)\n moth_center = pd.DataFrame({'x': filt_moth_x, 'y' : filt_moth_y})\n\n # convert all the data to flower centric data\n proboscis = FlowerCenteredTracks(prob_track)\n headJoint = FlowerCenteredTracks(head_track)\n mothCenter = FlowerCenteredTracks(moth_center)\n \n \n f = plt.figure(figsize = (15,6))\n ax0 = f.add_subplot(131)\n ax1 = f.add_subplot(132)\n ax2 = f.add_subplot(133)\n \n ax0.plot(proboscis.r, 'm')\n ax0.plot(headJoint.r, 'b')\n ax0.plot(mothCenter.r, 'k')\n ax0.set_title('Relative Radial Position')\n ax0.set_ylabel('Relative Radial Position')\n\n ax1.plot(proboscis.x, 'm')\n ax1.plot(headJoint.x, 'b')\n ax1.plot(mothCenter.x, 'k')\n ax1.set_title('x position')\n ax1.set_ylabel('pixels')\n\n ax2.plot(proboscis.y, 'm', label = 'proboscisTip')\n ax2.plot(headJoint.y, 'b', label = 'Head-Proboscis Joint')\n ax2.plot(mothCenter.y, 'k', label = 'Moth Center of mass')\n ax2.set_title('y position')\n ax2.set_ylabel('pixels')\n ax2.legend()\n\n ax1.text(0.5, - 0.15, 'Frame Number (@100fps)', horizontalalignment='center',\n verticalalignment='bottom', transform=ax1.transAxes)\n\n\n plt.savefig(output_fig_path + mothID + '_FirstVisit.pdf')\n plt.close('all')\n \n proboscis.to_csv(output_data + mothID + '_proboscis.csv')\n headJoint.to_csv(output_data + mothID + '_HeadprobJoint.csv')\n mothCenter.to_csv(output_data + mothID + '_MothCOM.csv')",
"_____no_output_____"
],
[
"# prob = temp.loc[slice(None),(slice(None),'proboscisTip')]\n\n# prob_track = prob[fin:fout].copy()\n# prob_track = prob_track.droplevel(level = [0,1], axis = 1)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e71628d5fb44b161d3e3c664ce7b924d9a48c959 | 9,234 | ipynb | Jupyter Notebook | 03_conditionals_02_exercises.ipynb | smartedukart/Coding_Gym_Projects | 4165a0558e8a8e4ab4dc472f2de28f9b7fc22846 | [
"MIT"
] | null | null | null | 03_conditionals_02_exercises.ipynb | smartedukart/Coding_Gym_Projects | 4165a0558e8a8e4ab4dc472f2de28f9b7fc22846 | [
"MIT"
] | null | null | null | 03_conditionals_02_exercises.ipynb | smartedukart/Coding_Gym_Projects | 4165a0558e8a8e4ab4dc472f2de28f9b7fc22846 | [
"MIT"
] | 1 | 2021-12-04T20:11:35.000Z | 2021-12-04T20:11:35.000Z | 31.623288 | 795 | 0.583712 | [
[
[
"**Important**: Click on \"*Kernel*\" > \"*Restart Kernel and Run All*\" *after* finishing the exercises in [JupyterLab <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyterlab.readthedocs.io/en/stable/) (e.g., in the cloud on [MyBinder <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_mb.png\">](https://mybinder.org/v2/gh/webartifex/intro-to-python/master?urlpath=lab/tree/03_conditionals_02_exercises.ipynb)) to ensure that your solution runs top to bottom *without* any errors",
"_____no_output_____"
],
[
"# Chapter 3: Conditionals & Exceptions",
"_____no_output_____"
],
[
"## Coding Exercises",
"_____no_output_____"
],
[
"The exercises below assume that you have read [Chapter 3 <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_nb.png\">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/master/03_conditionals_00_content.ipynb) in the book.\n\nThe `...`'s in the code cells indicate where you need to fill in code snippets. The number of `...`'s within a code cell give you a rough idea of how many lines of code are needed to solve the task. You should not need to create any additional code cells for your final solution. However, you may want to use temporary code cells to try out some ideas.",
"_____no_output_____"
],
[
"### Discounting Customer Orders",
"_____no_output_____"
],
[
"**Q1.1**: Write a function `discounted_price()` that takes the positional arguments `unit_price` (of type `float`) and `quantity` (of type `int`) and implements a discount scheme for a line item in a customer order as follows:\n\n- if the unit price is over 100 dollars, grant 10% relative discount\n- if a customer orders more than 10 items, one in every five items is for free\n\nOnly one of the two discounts is granted, whichever is better for the customer.\n\nThe function should then return the overall price for the line item. Do not forget to round appropriately.",
"_____no_output_____"
]
],
[
[
"def discounted_price(unit_price, quantity):\n \"\"\"Calculate the price of a line item in an order.\n\n Args:\n unit_price (float): price of one ordered item\n quantity (int): number of items ordered\n\n Returns:\n line_item_price (float)\n \"\"\"\n ...\n ...\n ...\n ...\n\n ...\n ...\n ...\n ...\n ...\n\n return ...",
"_____no_output_____"
]
],
[
[
"**Q1.2**: Calculate the final price for the following line items of an order:\n- $7$ smartphones @ $99.00$ USD\n- $3$ workstations @ $999.00$ USD\n- $19$ GPUs @ $879.95$ USD\n- $14$ Raspberry Pis @ $35.00$ USD",
"_____no_output_____"
]
],
[
[
"discounted_price(...)",
"_____no_output_____"
],
[
"discounted_price(...)",
"_____no_output_____"
],
[
"discounted_price(...)",
"_____no_output_____"
],
[
"discounted_price(...)",
"_____no_output_____"
]
],
[
[
"**Q1.3**: Calculate the last two line items with order quantities of $20$ and $15$. What do you observe?",
"_____no_output_____"
]
],
[
[
"discounted_price(...)",
"_____no_output_____"
],
[
"discounted_price(...)",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
],
[
"**Q1.4**: Looking at the `if`-`else`-logic in the function, why do you think the four example line items in **Q1.2** were chosen as they were?",
"_____no_output_____"
],
[
" < your answer >",
"_____no_output_____"
],
[
"### Fizz Buzz",
"_____no_output_____"
],
[
"The kids game [Fizz Buzz <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Fizz_buzz) is said to be often used in job interviews for entry-level positions. However, opinions vary as to how good of a test it is (cf., [source <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_hn.png\">](https://news.ycombinator.com/item?id=16446774)).\n\nIn its simplest form, a group of people starts counting upwards in an alternating fashion. Whenever a number is divisible by $3$, the person must say \"Fizz\" instead of the number. The same holds for numbers divisible by $5$ when the person must say \"Buzz.\" If a number is divisible by both numbers, one must say \"FizzBuzz.\" Probably, this game would also make a good drinking game with the \"right\" beverages.",
"_____no_output_____"
],
[
"**Q2.1**: First, create a list `numbers` with the numbers from 1 through 100. You could type all numbers manually, but there is, of course, a smarter way. The built-in [range() <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/library/functions.html#func-range) may be useful here. Read how it works in the documentation. To make the output of [range() <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/library/functions.html#func-range) a `list` object, you have to wrap it with the [list() <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/library/functions.html#func-list) built-in (i.e., `list(range(...))`).",
"_____no_output_____"
]
],
[
[
"numbers = ...",
"_____no_output_____"
]
],
[
[
"**Q2.2**: Loop over the `numbers` list and *replace* numbers for which one of the two (or both) conditions apply with text strings `\"Fizz\"`, `\"Buzz\"`, or `\"FizzBuzz\"` using the indexing operator `[]` and the assignment statement `=`.\n\nIn [Chapter 1 <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_nb.png\">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/master/01_elements_00_content.ipynb#Who-am-I?-And-how-many?), we saw that Python starts indexing with `0` as the first element. Keep that in mind.\n\nSo in each iteration of the `for`-loop, you have to determine an `index` variable as well as check the actual `number` for its divisors.\n\nHint: the order of the conditions is important!",
"_____no_output_____"
]
],
[
[
"for number in numbers:\n ...\n\n ...\n ...\n ...\n ...\n ...\n ...",
"_____no_output_____"
]
],
[
[
"**Q2.3**: Create a loop that prints out either the number or any of the Fizz Buzz substitutes. Do it in such a way that we do not end up with 100 lines of output here.",
"_____no_output_____"
]
],
[
[
"for number in numbers:\n print(...)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7164f96a97aad1d90a552718f70a62d5f7780d8 | 124,756 | ipynb | Jupyter Notebook | notebooks/eda+datacleaning2.ipynb | diedrebrown/predictionwithalexareviews | 58e11068e2aea474f38474513c16caf464719206 | [
"MIT"
] | 1 | 2022-03-28T13:37:12.000Z | 2022-03-28T13:37:12.000Z | notebooks/eda+datacleaning2.ipynb | diedrebrown/predictionwithalexareviews | 58e11068e2aea474f38474513c16caf464719206 | [
"MIT"
] | null | null | null | notebooks/eda+datacleaning2.ipynb | diedrebrown/predictionwithalexareviews | 58e11068e2aea474f38474513c16caf464719206 | [
"MIT"
] | null | null | null | 66.678781 | 19,636 | 0.72948 | [
[
[
"# Predicting User Satisfaction with Amazon Alexa\n## Final Project for Machine Learning INFO 656 at Pratt Institute\n### By Elena Korshakova and Diedre Brown\nThis project aims to analyze the reviews and ratings of Amazon Alexa devices from 2017 (as the training data) and 2018 (as the test data) to develop an algorithm to predict star ratings based on user reviews. Through the process, we aim to determine if star ratings and an end-to-end sentiment analysis of user review comments from the 2017 dataset, we can predict with accuracy, the star ratings of 2018. If successful, our findings have the potential to provide tech companies with a summary of user preferences and top criteria or the development of future products, as well as, assist in predicting user reactions to recently implemented features based on data collected from previous reviews of similar features. \n",
"_____no_output_____"
],
[
"## Import Libraries",
"_____no_output_____"
]
],
[
[
"#standard libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools # ability to complex iterators\nimport os\nimport re # for regular expressions\n\n",
"_____no_output_____"
],
[
"%matplotlib inline\n# makes inline plots to have better quality\n%config InlineBackend.figure_format = 'svg'\n# Set the default style\nplt.style.use(\"seaborn\")",
"_____no_output_____"
],
[
"pd.set_option('mode.chained_assignment', None)",
"_____no_output_____"
],
[
"# sklearn libraries\nimport sklearn\nsklearn.__version__>=\"0.20\"\nfrom sklearn import linear_model\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.linear_model import LinearRegression,ElasticNetCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB",
"_____no_output_____"
],
[
"# Natural Language Toolkit Library - NLTK\nimport nltk\nnltk.download(\"words\")\nnltk.download(\"stopwords\")",
"[nltk_data] Downloading package words to\n[nltk_data] /Users/diedrebrown/nltk_data...\n[nltk_data] Package words is already up-to-date!\n[nltk_data] Downloading package stopwords to\n[nltk_data] /Users/diedrebrown/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
],
[
"# pip install spacy",
"Collecting spacy\n Downloading spacy-2.3.2-cp37-cp37m-macosx_10_9_x86_64.whl (10.0 MB)\n\u001b[K |████████████████████████████████| 10.0 MB 9.3 MB/s eta 0:00:01\n\u001b[?25hCollecting catalogue<1.1.0,>=0.0.7\n Downloading catalogue-1.0.0-py2.py3-none-any.whl (7.7 kB)\nCollecting wasabi<1.1.0,>=0.4.0\n Downloading wasabi-0.8.0-py3-none-any.whl (23 kB)\nCollecting thinc==7.4.1\n Downloading thinc-7.4.1-cp37-cp37m-macosx_10_9_x86_64.whl (2.1 MB)\n\u001b[K |████████████████████████████████| 2.1 MB 1.4 MB/s eta 0:00:01\n\u001b[?25hCollecting cymem<2.1.0,>=2.0.2\n Downloading cymem-2.0.4-cp37-cp37m-macosx_10_9_x86_64.whl (31 kB)\nCollecting plac<1.2.0,>=0.9.6\n Downloading plac-1.1.3-py2.py3-none-any.whl (20 kB)\nCollecting srsly<1.1.0,>=1.0.2\n Downloading srsly-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl (286 kB)\n\u001b[K |████████████████████████████████| 286 kB 7.3 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: setuptools in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from spacy) (46.0.0.post20200309)\nRequirement already satisfied: numpy>=1.15.0 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from spacy) (1.18.1)\nCollecting murmurhash<1.1.0,>=0.28.0\n Downloading murmurhash-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl (18 kB)\nCollecting preshed<3.1.0,>=3.0.2\n Downloading preshed-3.0.4-cp37-cp37m-macosx_10_9_x86_64.whl (262 kB)\n\u001b[K |████████████████████████████████| 262 kB 9.8 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from spacy) (4.42.1)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from spacy) (2.24.0)\nCollecting blis<0.5.0,>=0.4.0\n Downloading blis-0.4.1-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (4.0 MB)\n\u001b[K |████████████████████████████████| 4.0 MB 878 kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: importlib-metadata>=0.20; python_version < \"3.8\" in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from catalogue<1.1.0,>=0.0.7->spacy) (1.5.0)\nRequirement already satisfied: idna<3,>=2.5 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from requests<3.0.0,>=2.13.0->spacy) (2.8)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from requests<3.0.0,>=2.13.0->spacy) (1.25.8)\nRequirement already satisfied: chardet<4,>=3.0.2 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from requests<3.0.0,>=2.13.0->spacy) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from requests<3.0.0,>=2.13.0->spacy) (2019.11.28)\nRequirement already satisfied: zipp>=0.5 in /Users/diedrebrown/opt/anaconda3/lib/python3.7/site-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy) (2.2.0)\nInstalling collected packages: catalogue, wasabi, cymem, plac, murmurhash, preshed, blis, srsly, thinc, spacy\nSuccessfully installed blis-0.4.1 catalogue-1.0.0 cymem-2.0.4 murmurhash-1.0.4 plac-1.1.3 preshed-3.0.4 spacy-2.3.2 srsly-1.0.4 thinc-7.4.1 wasabi-0.8.0\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"# spaCy another Natural Language Processing library built for Python/Cython\n# spaCy has the fastest syntactic parser, which helps increase it's accuracy over nltk\nimport string\nimport spacy\n#spacy_en = spacy.load(\"en_core_web_sm\")",
"_____no_output_____"
],
[
"from spacy.lang.en import English\nnlp = English() # use directly",
"_____no_output_____"
],
[
"# import the logging library to expose the interface that the application code directly uses.\nimport logging\n# logging levels\n# CRITICAL (50) - A serious error, indicating that the program itself may be unable to continue running.\n# ERROR (40) - Due to a more serious problem, the software has not been able to perform some function.\n# WARNING (30) - An indication that something unexpected happened, or indicative of some problem in the near future (e.g. 'disk space low'). The software is still working as expected.\n# INFO (20) - Confirmation that things are working as expected.\n# DEBUG (10) - Detailed information, typically of interest only when diagnosing problems.\n# NOTSET (0) - ",
"_____no_output_____"
],
[
"logFormatter = '%(asctime)s - %(levelname)s - %(message)s' # logging formatted as time, level name, and message\nlogging.basicConfig(format=logFormatter, level=logging.INFO) # sets the default logging level, and the log formatting\nlogger = logging.getLogger(__name__) # \n# run first log\nlogger.info(\"initial log\")",
"2020-12-04 20:57:50,697 - INFO - initial log\n"
]
],
[
[
"## Loading the data",
"_____no_output_____"
],
[
"<b>2017 Reviews - Training dataset: [Amazon Echo Dot 2 Reviews Dataset](https://www.kaggle.com/PromptCloudHQ/amazon-echo-dot-2-reviews-dataset)</b>\n\n\n<b>2018 Review - Test dataset: [Amazon Alexa Reviews](https://www.kaggle.com/sid321axn/amazon-alexa-reviews)</b>",
"_____no_output_____"
]
],
[
[
"df_train_raw = pd.read_csv(\"data/reviews_2017.csv\")\ndf_test_raw = pd.read_csv(\"data/reviews_2018.tsv\", sep = \"\\t\")",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis of Each Dataset",
"_____no_output_____"
],
[
"### Training Data Amazon Echo Dot2 Reviews ",
"_____no_output_____"
]
],
[
[
"df_train_raw.head()",
"_____no_output_____"
],
[
"df_train_raw.shape",
"_____no_output_____"
],
[
"# Place raw data for use into a separate working dataframe with simplified column names\ndf_train = df_train_raw[['Review Text', 'Rating']]\ndf_train.columns = ['review', 'rating']",
"_____no_output_____"
],
[
"df_train.head()",
"_____no_output_____"
],
[
"df_train.shape",
"_____no_output_____"
],
[
"# Examine training set for NAs, verify data types, and get summary statistics\ndf_train.isnull().sum()",
"_____no_output_____"
],
[
"df_train.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6855 entries, 0 to 6854\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 review 6852 non-null object\n 1 rating 6855 non-null int64 \ndtypes: int64(1), object(1)\nmemory usage: 107.2+ KB\n"
],
[
"# Get summary statistics of the rating column\ndf_train.describe().T",
"_____no_output_____"
],
[
"# perctage of missing reviews to overall data\n(3/6855)*100",
"_____no_output_____"
]
],
[
[
"Though three reviews are blank, as no null values are present in the ratings data and these rows are less than 1% of the values of the dataset, they can be eliminated from the analysis.",
"_____no_output_____"
]
],
[
[
"# Drop url column and rename columns\ndf_train_raw = df_train_raw.drop(columns = ['Pageurl'])\ndf_train_raw.columns = ['title', 'review', 'review_color', 'user_virified', 'date', \n 'useful_count', 'config', 'rating', 'declaration_text']\ndf_train_raw.fillna(0, inplace=True)\ndf_train_raw.head()",
"_____no_output_____"
],
[
"df_train_raw['review_color'].value_counts()",
"_____no_output_____"
],
[
"df_train_raw['config'].value_counts()",
"_____no_output_____"
]
],
[
[
"The majority of the reviews are about Echo Dot device and only one review is about Echo Dot + Vaux Speaker. As the Vaux speaker is just a combination speaker and battery base accessory to the Echo Dot, it can be included in evaluation as an Echo Dot.",
"_____no_output_____"
]
],
[
[
"df_train_raw['useful_count'].value_counts()",
"_____no_output_____"
]
],
[
[
"Only 28 reviews out of 6855 were upvoted (marked as useful) by other users.",
"_____no_output_____"
]
],
[
[
"# Convert date column to datetime format\ndf_train_raw['date'] = pd.to_datetime(df_train_raw['date'])\n\n# Extract year and month-day\ndf_train_raw['year'] = df_train_raw['date'].dt.year\ndf_train_raw['mm-dd'] = df_train_raw['date'].dt.strftime('%m-%d')",
"_____no_output_____"
],
[
"df_train_raw.head()",
"_____no_output_____"
],
[
"df_train_raw['year'].unique()",
"_____no_output_____"
]
],
[
[
"Reviews from the training dataset were collected in September and October 2017.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,5))\ndf_train_raw['mm-dd'].value_counts().sort_index().plot(kind = 'bar')\nplt.ylabel(\"Reviews count\")\nplt.title(\"Count of reviews by date, year 2017\")\nplt.savefig('img/2017-reviews_by_date.png', dpi=100)\nplt.show()\n",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(7,5))\ndf_train_raw['rating'].value_counts().sort_index().plot(kind = 'bar')\nplt.ylabel(\"Reviews count\")\nplt.title(\"Ratings distribution, training dataset\")\nplt.savefig('img/2017-ratings_distribution.png', dpi=100)\nplt.show()",
"_____no_output_____"
]
],
[
[
"People mostly left positive reviews about the Echo Dot.",
"_____no_output_____"
],
[
"### Test Data Amazon Alexa Reviews",
"_____no_output_____"
]
],
[
[
"df_test_raw.head()",
"_____no_output_____"
],
[
"df_test = df_test_raw[['verified_reviews', 'rating']]\ndf_test.columns = ['review', 'rating']",
"_____no_output_____"
],
[
"df_test.head()",
"_____no_output_____"
],
[
"df_test.shape",
"_____no_output_____"
],
[
"df_test_raw.head()",
"_____no_output_____"
],
[
"df_test_raw.shape",
"_____no_output_____"
],
[
"df_test_raw['variation'].value_counts().plot(kind = 'barh')\nplt.xlabel(\"Reviews count\")\nplt.title(\"Distribution of the reviews by device\")\nplt.savefig('img/2018-reviews_by_device.png', dpi=100)\nplt.show()",
"_____no_output_____"
]
],
[
[
"The reviews in the test dataset are about different variations of Echo Dot device and other devices like Fire TV Stick. The most popular device is Black Dot. As finish variations are physical characteristic choice of the device and not Alexa, we will not consider the difference in finishes.",
"_____no_output_____"
]
],
[
[
"# Convert date column to datetime format\ndf_test_raw['date'] = pd.to_datetime(df_test_raw['date'])\n\n# Extract year and month-day\ndf_test_raw['year'] = df_test_raw['date'].dt.year\ndf_test_raw['mm-dd'] = df_test_raw['date'].dt.strftime('%m-%d')",
"_____no_output_____"
],
[
"df_test_raw.head()",
"_____no_output_____"
],
[
"df_test_raw['year'].unique()",
"_____no_output_____"
]
],
[
[
"Reviews from the test dataset were collected in July 2018.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,5))\ndf_test_raw['mm-dd'].value_counts().sort_index().plot(kind = 'bar')\nplt.tick_params(axis='x', which='both', labelsize=9)\nplt.ylabel(\"Reviews count\")\nplt.title(\"Count of reviews by date, year 2018\")\nplt.savefig('img/2018-reviews_by_date.png', dpi=100)\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(7,5))\ndf_test_raw['rating'].value_counts().sort_index().plot(kind = 'bar')\nplt.ylabel(\"Reviews count\")\nplt.title(\"Ratings distribution, test dataset\")\nplt.savefig('img/2018-ratings_distribution.png', dpi=100)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Rating distribution is very similar to the data from 2017, where most people left positive reviews.",
"_____no_output_____"
],
[
"## Review cleaning\nThe definition below uses the library spaCy, which will prepare the text for modeling and deep learning techniques with TensorFlow. The result is two pickle files.",
"_____no_output_____"
]
],
[
[
"def preprocess_reviews(df_raw, stop_words: set, eng_words: set):\n \"\"\"Perform lemmatization, remove stopwords, punctuation, non-english words.\"\"\"\n def delete_words(row): \n \"\"\"Delete stop words, punctuation, and non-english words from a review.\"\"\"\n row = row.split()\n result_row = [word for word in row \n if word not in stop_words\n and word in eng_words]\n return result_row\n \n df = df_raw.copy()\n \n # Lowercase reviews\n df['review'] = df.apply(lambda row: str(row['review']).lower(), axis=1)\n \n # Lemmatization\n df['review'] = df['review'].apply(lambda row: \" \".join([w.lemma_ for w in spacy_en(row)]))\n \n # Delete stop words and non-english words\n df['review'] = df['review'].apply(delete_words)\n \n # Join list of tokens\n df['review'] = df['review'].apply(lambda row: \" \".join(row))\n \n return df",
"_____no_output_____"
],
[
"# Load stop words\nstop_words = spacy_en.Defaults.stop_words\n\n# Add punctuation to stop words\npunctuation = string.punctuation\nstop_words.update(set(punctuation))\n\n# Add name of device to stop words\nstop_words.add('echo')\nstop_words.add('dot')\n\n\n# Load english words\neng_words = set(nltk.corpus.words.words())",
"_____no_output_____"
],
[
"%%time\ndf_train_clean = preprocess_reviews(df_train, stop_words, eng_words)\ndf_test_clean = preprocess_reviews(df_test, stop_words, eng_words)",
"CPU times: user 1min 38s, sys: 321 ms, total: 1min 38s\nWall time: 1min 38s\n"
],
[
"#delete empty reviews\ndf_train_clean = df_train_clean[df_train_clean['review'] != '']\ndf_test_clean = df_test_clean[df_test_clean['review'] != '']",
"_____no_output_____"
],
[
"# Save preprocessed data to pickle files\ndf_train_clean.to_pickle(\"data/df_train.pickle\")\ndf_test_clean.to_pickle(\"data/df_test.pickle\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7165841ed75d1be42740aad0061bbce655dbd27 | 626 | ipynb | Jupyter Notebook | notebooks/001_Revisar instalacion de Jupyter Notebook.ipynb | AltamarMx/mas20problemas | 795b1589e10927f8bdbedb44d0e587a0ac4dc049 | [
"MIT"
] | 5 | 2022-01-25T06:06:24.000Z | 2022-02-14T04:12:50.000Z | notebooks/001_Revisar instalacion de Jupyter Notebook.ipynb | AltamarMx/mas20problemas | 795b1589e10927f8bdbedb44d0e587a0ac4dc049 | [
"MIT"
] | null | null | null | notebooks/001_Revisar instalacion de Jupyter Notebook.ipynb | AltamarMx/mas20problemas | 795b1589e10927f8bdbedb44d0e587a0ac4dc049 | [
"MIT"
] | 2 | 2022-01-24T19:40:27.000Z | 2022-01-24T21:09:59.000Z | 17.388889 | 43 | 0.527157 | [
[
[
"# Jupyter Notebook\n1. Arrancar en el repo que quieres\n1. ",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e716782f9a7cb963cf92bb375a77045089c19256 | 81,240 | ipynb | Jupyter Notebook | Ch2-3/tlfornlp-chapters2-3-spam-bert.ipynb | KayO-GH/transfer-learning-for-nlp | f71d92885aeb0dac5f0962d9538535d3ca61c942 | [
"MIT"
] | 68 | 2020-04-13T12:09:40.000Z | 2022-03-25T13:08:50.000Z | Ch2-3/tlfornlp-chapters2-3-spam-bert.ipynb | KayO-GH/transfer-learning-for-nlp | f71d92885aeb0dac5f0962d9538535d3ca61c942 | [
"MIT"
] | 123 | 2020-03-17T01:44:03.000Z | 2022-03-12T01:04:45.000Z | Ch2-3/tlfornlp-chapters2-3-spam-bert.ipynb | KayO-GH/transfer-learning-for-nlp | f71d92885aeb0dac5f0962d9538535d3ca61c942 | [
"MIT"
] | 31 | 2020-03-07T19:34:11.000Z | 2022-03-22T01:45:44.000Z | 73.255185 | 21,128 | 0.68742 | [
[
[
"# WARNING\n**Please make sure to \"COPY AND EDIT NOTEBOOK\" to use compatible library dependencies! DO NOT CREATE A NEW NOTEBOOK AND COPY+PASTE THE CODE - this will use latest Kaggle dependencies at the time you do that, and the code will need to be modified to make it work. Also make sure internet connectivity is enabled on your notebook**",
"_____no_output_____"
],
[
"# Preliminaries\nFirst install critical dependencies not already on the Kaggle docker image. **NOTE THAT THIS NOTEBOOK USES TENSORFLOW 1.14 IN ORDER TO BE COMPARED WITH ELMo, WHICH WAS NOT PORTED TO TENSORFLOW 2.X. To see equivalent Tensorflow 2.X BERT Code, see https://www.kaggle.com/azunre/tlfornlp-chapters2-3-spam-bert-tf2** ",
"_____no_output_____"
]
],
[
[
"!pip install keras==2.2.4 # critical dependency\n!pip install -q bert-tensorflow==1.0.1",
"Collecting keras==2.2.4\r\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/5e/10/aa32dad071ce52b5502266b5c659451cfd6ffcbf14e6c8c4f16c0ff5aaab/Keras-2.2.4-py2.py3-none-any.whl (312kB)\r\n\u001b[K |████████████████████████████████| 317kB 866kB/s \r\n\u001b[?25hRequirement already satisfied: scipy>=0.14 in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (1.2.1)\r\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (1.1.0)\r\nRequirement already satisfied: numpy>=1.9.1 in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (1.16.4)\r\nRequirement already satisfied: pyyaml in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (5.1.2)\r\nRequirement already satisfied: h5py in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (2.9.0)\r\nRequirement already satisfied: six>=1.9.0 in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (1.12.0)\r\nRequirement already satisfied: keras-applications>=1.0.6 in /opt/conda/lib/python3.6/site-packages (from keras==2.2.4) (1.0.8)\r\nInstalling collected packages: keras\r\n Found existing installation: Keras 2.3.0\r\n Uninstalling Keras-2.3.0:\r\n Successfully uninstalled Keras-2.3.0\r\nSuccessfully installed keras-2.2.4\r\n"
]
],
[
[
"\nWrite requirements to file, anytime you run it, in case you have to go back and recover Kaggle dependencies. **MOST OF THESE REQUIREMENTS WOULD NOT BE NECESSARY FOR LOCAL INSTALLATION**\n\nRequirements are hosted for each notebook in the companion github repo, and can be pulled down and installed here if needed. Companion github repo is located at https://github.com/azunre/transfer-learning-for-nlp",
"_____no_output_____"
]
],
[
[
"!pip freeze > kaggle_image_requirements.txt",
"_____no_output_____"
],
[
"# Import neural network libraries\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom bert.tokenization import FullTokenizer\nfrom tensorflow.keras import backend as K\n\n# Initialize session\nsess = tf.Session()",
"_____no_output_____"
],
[
"# Some other key imports\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm",
"_____no_output_____"
]
],
[
[
"# Define Tokenization, Stop-word and Punctuation Removal Functions\nBefore proceeding, we must decide how many samples to draw from each class. We must also decide the maximum number of tokens per email, and the maximum length of each token. This is done by setting the following overarching hyperparameters",
"_____no_output_____"
]
],
[
[
"# Params for bert model and tokenization\nNsamp = 1000 # number of samples to generate in each class - 'spam', 'not spam'\nmaxtokens = 200 # the maximum number of tokens per document\nmaxtokenlen = 100 # the maximum length of each token",
"_____no_output_____"
]
],
[
[
"**Tokenization**",
"_____no_output_____"
]
],
[
[
"def tokenize(row):\n if row is None or row is '':\n tokens = \"\"\n else:\n try:\n tokens = row.split(\" \")[:maxtokens]\n except:\n tokens=\"\"\n return tokens",
"_____no_output_____"
]
],
[
[
"**Use regular expressions to remove unnecessary characters**\n\nNext, we define a function to remove punctuation marks and other nonword characters (using regular expressions) from the emails with the help of the ubiquitous python regex library. In the same step, we truncate all tokens to hyperparameter maxtokenlen defined above.",
"_____no_output_____"
]
],
[
[
"def reg_expressions(row):\n tokens = []\n try:\n for token in row:\n token = token.lower()\n token = re.sub(r'[\\W\\d]', \"\", token)\n token = token[:maxtokenlen] # truncate token\n tokens.append(token)\n except:\n token = \"\"\n tokens.append(token)\n return tokens",
"_____no_output_____"
]
],
[
[
"**Stop-word removal**\n\nLet’s define a function to remove stopwords - words that occur so frequently in language that they offer no useful information for classification. This includes words such as “the” and “are”, and the popular library NLTK provides a heavily-used list that will employ.",
"_____no_output_____"
]
],
[
[
"import nltk\n\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nstopwords = stopwords.words('english') \nprint(stopwords) # see default stopwords\n\ndef stop_word_removal(row):\n token = [token for token in row if token not in stopwords]\n token = filter(None, token)\n return token",
"[nltk_data] Downloading package stopwords to /usr/share/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her', 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', \"don't\", 'should', \"should've\", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', \"aren't\", 'couldn', \"couldn't\", 'didn', \"didn't\", 'doesn', \"doesn't\", 'hadn', \"hadn't\", 'hasn', \"hasn't\", 'haven', \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\", 'mustn', \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\", 'shouldn', \"shouldn't\", 'wasn', \"wasn't\", 'weren', \"weren't\", 'won', \"won't\", 'wouldn', \"wouldn't\"]\n"
]
],
[
[
"# Read and Preprocess Enron dataset\nRead Enron dataset and get a sense for the data by printing sample messages to screen",
"_____no_output_____"
]
],
[
[
"# Input data files are available in the \"../input/\" directory.\nfilepath = \"../input/enron-email-dataset/emails.csv\"\n\n# Read the data into a pandas dataframe called emails\nemails = pd.read_csv(filepath)\n\nprint(\"Successfully loaded {} rows and {} columns!\".format(emails.shape[0], emails.shape[1]))\nprint(emails.head())",
"Successfully loaded 517401 rows and 2 columns!\n file message\n0 allen-p/_sent_mail/1. Message-ID: <18782981.1075855378110.JavaMail.e...\n1 allen-p/_sent_mail/10. Message-ID: <15464986.1075855378456.JavaMail.e...\n2 allen-p/_sent_mail/100. Message-ID: <24216240.1075855687451.JavaMail.e...\n3 allen-p/_sent_mail/1000. Message-ID: <13505866.1075863688222.JavaMail.e...\n4 allen-p/_sent_mail/1001. Message-ID: <30922949.1075863688243.JavaMail.e...\n"
],
[
"# take a closer look at the first email\nprint(emails.loc[0][\"message\"])",
"Message-ID: <18782981.1075855378110.JavaMail.evans@thyme>\nDate: Mon, 14 May 2001 16:39:00 -0700 (PDT)\nFrom: [email protected]\nTo: [email protected]\nSubject: \nMime-Version: 1.0\nContent-Type: text/plain; charset=us-ascii\nContent-Transfer-Encoding: 7bit\nX-From: Phillip K Allen\nX-To: Tim Belden <Tim Belden/Enron@EnronXGate>\nX-cc: \nX-bcc: \nX-Folder: \\Phillip_Allen_Jan2002_1\\Allen, Phillip K.\\'Sent Mail\nX-Origin: Allen-P\nX-FileName: pallen (Non-Privileged).pst\n\nHere is our forecast\n\n \n"
],
[
"# Separate headers from the message bodies\nimport email\n\ndef extract_messages(df):\n messages = []\n for item in df[\"message\"]:\n # Return a message object structure from a string\n e = email.message_from_string(item) \n # get message body \n message_body = e.get_payload()\n messages.append(message_body)\n print(\"Successfully retrieved message body from e-mails!\")\n return messages\n\nbodies = extract_messages(emails)",
"Successfully retrieved message body from e-mails!\n"
],
[
"# extract random 10000 enron email bodies for building dataset\nimport random\nbodies_df = pd.DataFrame(random.sample(bodies, 10000))\n\n# expand default pandas display options to make emails more clearly visible when printed\npd.set_option('display.max_colwidth', 300)\n\nbodies_df.head() # you could do print(bodies_df.head()), but Jupyter displays this nicer for pandas DataFrames",
"_____no_output_____"
]
],
[
[
"The following (commented out) code is arguably the more \"pythonic\" way of achieving the extraction of bodies from messages. It is only 2 lines long and achieves the same result. However, we feel the code above is more transparent with regards to how the processing is carried out, and as such leave this here for the python experts if they prefer.",
"_____no_output_____"
]
],
[
[
"#messages = emails[\"message\"].apply(email.message_from_string)\n#bodies_df = messages.apply(lambda x: x.get_payload()).sample(10000)",
"_____no_output_____"
]
],
[
[
"# Read and Preprocess Fraudulent \"419\" Email Corpus",
"_____no_output_____"
]
],
[
[
"filepath = \"../input/fraudulent-email-corpus/fradulent_emails.txt\"\nwith open(filepath, 'r',encoding=\"latin1\") as file:\n data = file.read()\n \n# split on a code word appearing close to the beginning of each email\nfraud_emails = data.split(\"From r\")\n\nprint(\"Successfully loaded {} spam emails!\".format(len(fraud_emails)))",
"Successfully loaded 3978 spam emails!\n"
],
[
"fraud_bodies = extract_messages(pd.DataFrame(fraud_emails,columns=[\"message\"],dtype=str))\nfraud_bodies_df = pd.DataFrame(fraud_bodies[1:])\n\nfraud_bodies_df.head() # you could do print(fraud_bodies_df.head()), but Jupyter displays this nicer for pandas DataFrames",
"Successfully retrieved message body from e-mails!\n"
]
],
[
[
"# Putting It All Together To Assemble Dataset\n\nNow, putting all the preprocessing steps together we assemble our dataset...",
"_____no_output_____"
]
],
[
[
"import random\n\n# Convert everything to lower-case, truncate to maxtokens and truncate each token to maxtokenlen\nEnronEmails = bodies_df.iloc[:,0].apply(tokenize)\nEnronEmails = EnronEmails.apply(stop_word_removal)\nEnronEmails = EnronEmails.apply(reg_expressions)\nEnronEmails = EnronEmails.sample(Nsamp)\n\nSpamEmails = fraud_bodies_df.iloc[:,0].apply(tokenize)\nSpamEmails = SpamEmails.apply(stop_word_removal)\nSpamEmails = SpamEmails.apply(reg_expressions)\nSpamEmails = SpamEmails.sample(Nsamp)\n\nraw_data = pd.concat([SpamEmails,EnronEmails], axis=0).values",
"_____no_output_____"
],
[
"print(\"Shape of combined data represented as numpy array is:\")\nprint(raw_data.shape)\nprint(\"Data represented as numpy array is:\")\nprint(raw_data)\n\n# corresponding labels\nCategories = ['spam','notspam']\nheader = ([1]*Nsamp)\nheader.extend(([0]*Nsamp))",
"Shape of combined data represented as numpy array is:\n(2000,)\nData represented as numpy array is:\n[list(['dear', 'madamsiri', 'greet', 'name', 'godi', 'mrs', 'rebecca', 'atkins', 'south', 'africai', 'married', 'late', 'mr', 'smith', 'atkins', 'ofblessed', 'memory', 'oil', 'explorer', 'inkuwait', 'brunei', 'fifteen', 'years', 'beforehe', 'died', 'year', 'we', 'married', 'twelve', 'years', 'without', 'achild', 'he', 'died', 'brief', 'illness', 'thatlasted', 'four', 'days', 'before', 'deathwe', 'devoted', 'god', 'respectother', 'serve', 'himsince', 'death', 'i', 'battling', 'withboth', 'cancer', 'fibroid', 'problems', 'when', 'latehusband', 'alive', 'deposited', 'sum', 'ofmillion', 'eighteen', 'million', 'five', 'hundredthousand', 'us', 'dollars', 'deposit', 'companyin', 'europethe', 'money', 'deposited', 'familyvaluablestreasure', 'security', 'reasonsrecently', 'doctor', 'told', 'i', 'sixmonths', 'live', 'due', 'cancer', 'problem', 'thislead', 'stroke', 'attack', 'bother', 'thoughbecause', 'i', 'cannot', 'move', 'i', 'use', 'tohaving', 'known', 'condition', 'i', 'decided', 'entrust', 'thisfund', 'either', 'philanthropic', 'organization', 'ordevoted', 'individual', 'utilize', 'moneythe', 'way', 'i', 'going', 'instructs', 'herein', 'theusmillion', 'goes', 'ensure', 'areprepared', 'task', 'ahead', '', 'money', 'willbe', 'used', 'sincerity'])\n list(['attention', 'permit', 'inform', 'desire', 'going', 'business', 'relationship', 'you', 'i', 'got', 'contact', 'information', 'countrys', 'informationdirectory', 'desperate', 'search', 'someone', 'assist', 'mesecretly', 'confidentially', 'relocating', 'managing', 'funds', 'i', 'prayed', 'selected', 'name', 'among', 'names', 'due', 'esteeming', 'nature', 'trustworthy', 'person', 'i', 'business', '', 'i', 'must', 'hesitate', 'confine', 'simple', 'sincere', 'businessi', 'mrs', 'hilda', 'p', 'tomson', 'legal', 'wife', 'chief', 'securityofficer', 'late', 'major', 'tomson', 'page', 'my', 'husband', 'former', 'headof', 'presidential', 'guard', 'mr', 'charles', 'taylor', 'liberia', 'presidentbefore', 'death', 'husband', 'may', 'th', 'attemptby', 'rebels', 'life', 'president', 'ctaylor', 'brutally', 'injured', 'subsequently', 'rushed', 'presidential', 'hospital', 'monrovia', 'later', 'diedbefore', 'deathhe', 'secretly', 'called', 'bedside', 'told', 'sum', 'usmillion', 'dollars', 'seventeen', 'million', 'dollarswhich', 'deposited', 'name', 'legal', 'wife', 'beneficiary', 'next', 'kin', 'depositing', 'fund'])\n list(['from', 'george', 'amachreegeneral', 'manager', 'financenigeria', 'lng', 'limitedc', '', 'c', 'towersplot', '', 'sanusi', 'fafunwa', 'streetvictoria', 'islandp', 'm', 'b', '', 'marina', 'lagosnigeriawwwnlngcomre', 'transfer', 'of', 'usdtwenty', 'million', 'five', 'hundred', 'thousand', 'united', 'states', 'dollars', 'only', 'to', 'a', 'safe', 'accountsirit', 'warmest', 'pleasure', 'writing', 'confidential', 'business', 'offer', 'irrespective', 'factthat', 'met', 'done', 'thing', 'reimpose', 'absolute', 'confidence', 'nevertheless', 'adage', 'says', 'day', 'begins', 'story', 'line', 'strong', 'perspective', 'i', 'determined', 'to', 'communicate', 'much', 'conviction', 'give', 'proposal', 'a', 'second', 'thought', 'considerationas', 'earlier', 'stated', 'i', 'mr', 'daniel', 'amachree', 'general', 'manager', 'finance', 'working', 'nigeria', 'liquefied', 'natural', 'gas', 'nlng', 'my', 'agency', 'produce', 'export', 'nlg', 'ngl', 'safely', 'reliably', 'profitable', 'grow', 'business', 'full', 'potential', 'helping', 'put', 'flares', 'nigeria', 'and', 'virtue', 'unique', 'position', 'office', 'general', 'manager', 'finance', 'i', 'elevated', 'commission', 'become', 'chairman', 'foreign', 'contract', 'tender', 'board', 'committee', 'whose', 'responsibility', 'award', 'supervise', 'foreign', 'contract', 'ensure', 'executed', 'promptlyconsequently', 'i', 'chairman'])\n ...\n list(['', 'forwarded', 'daren', 'j', 'farmerhouect', '', '', 'am', 'jamie', 'lynnenron', '', 'pmto', 'mark', 'warnerhouectect', 'toby', 'kuehletsenronenron', 'sean', 'sargentcorpenronenron', 'teresa', 'wrighthrcorpenronenron', 'kevin', 'm', 'prestohouectect', 'micardo', 'johnscorpenronenron', 'greg', 'pipercorpenronenron', 'allan', 'weatherfordgcoenronenron', 'brad', 'morsehouectect', 'rafael', 'rizopatronlonectect', 'gary', 'w', 'lamphierhouectect', 'daren', 'j', 'farmerhouectect', 'perron', 'rogershouectect', 'ryan', 'smithaacorpenronenron', 'archie', 'n', 'eubanksenron_developmentenron_development', 'mike', 'd', 'smithhoueesees', 'fred', 'rhodesaacorpenronenron', 'chris', 'barnesenrongateway', 'eric', 'bensonetsenronenron', 'charles', 'caineecfenronenron', 'mike', 'croucherhouectect', 'clarence', 'davisecfenronenron', 'john', 'garrettcorpenronenron', 'willie', 'haggertyecfenronenron', 'eric', 'j', 'hardyhouectect', 'dwight', 'jameshouectect', 'kevin', 'kuykendallhouectect', 'richard', 'a', 'lammersenron_developmentenron_development', 'john', 'meeksetsenronenron', 'richard', 'orellanacorpenronenron', 'john', 'meeksetsenronenron', 'fimber', 'phillipecfenronenron', 'ruben', 'salinasecfenronenron', 'john', 'shupakhouectectcc', 'subject', 'spring', 'basketball', 'tournament', 'forwarded', 'jamie', 'lynnetsenron', '', '', 'pm', 'jamie', 'lynn', '', 'pmto', 'oliver', 'brownenron_developmentenron_development', 'martin', 'rosettaenron_developmentenron_development', 'pam', 'newsomeenron_developmentenron_development', 'ryan', 'woodsenron_developmentenron_development', 'mike', 'layneenron_developmentenron_development', 'fred', 'salinasenron_developmentenron_development', 'sean', 'longenron_developmentenron_development', 'kevin', 'ruffcornenron_developmentenron_development', 'keith', 'sparksenron_developmentenron_development', 'aaron', 'mackeyenron_developmentenron_development', 'braedi', 'craigenron', 'communicationsenron', 'communications', 'jennifer', 'mcclainenron', 'communicationsenron', 'communications', 'john', 'garrettcorpenronenron', 'darrell', 'schoolcraftetsenronenron', 'stanley', 'hortoncorpenronenron', 'pamela', 'carteretsenronenron', 'brandon', 'whittakerenron_developmentenron_development', 'chris', 'williamsenrongateway', 'johnny', 'mitchelletsenronenron', 'mike', 'bryantotsenronenron', 'rick', 'buyhouectect', 'bjorn', 'hagelmannhouectect', 'tom', 'moranhouectect', 'patrick', 'hickeyenron', 'communicationsenron', 'communications', 'brant', 'reveshouectect', 'samantha', 't', 'davidsonhouectect', 'tangie', 'dykesetsenronenron', 'tara', 'e', 'turkhouectect', 'dan', 'leffhoueesees', 'mark', 'pratoriushoueesees', 'brad', 'pedenhoueesees', 'david', 'blankenshipcorpenronenron', 'milton', 'brownhrcorpenronenron', 'ken', 'reeveshouectect', 'rory', 'junemancorpenronenron', 'fred', 'bridgewaterenron_developmentenron_development', 'sandy', 'robertscorpenronenron', 'derek', 'andersonhouectect', 'david', 'odellhrcorpenronenroncc', 'bcc', 'jamie', 'lynnetsenronsubject', 'spring', 'basketball', 'tournamentenron', 'is'])\n list(['i', 'forwarded', 'copy', 'fernleys', 'announcement', 'regarding', 'new', 'role', 'forglobal', 'products', 'trading', 'support', 'your', 'discussions', 'houston', 'last', 'week', 'musthave', 'swayed', 'accept', 'challenges', 'there', 'congratulations', 'onyour', 'new', 'role', 'as', 'step', 'new', 'position', 'please', 'let', 'know', 'ican', 'best', 'support', 'role', 'global', 'risk', 'management', 'operations', 'i', 'amforwarding', 'copy', 'announcement', 'regarding', 'expanded', 'role', 'wassent', 'january', 'assuming', 'paying', 'particular', 'attentionto', 'trading', 'operations', 'issues', 'point', 'time', 'may', 'one', 'thoseemail', 'messages', 'skipped', 'i', 'thought', 'might', 'helpful', 'youto', 'understand', 'focus', 'i', 'working', 'trading', 'operationsgroups', 'around', 'globe', 'i', 'certainly', 'look', 'forward', 'working', 'youtim', 'kevin', 'i', 'understand', 'kevin', 'move', 'role', 'focused', 'fulltime', 'onsystems', 'implementation', 'who', 'assume', 'daytoday', 'lead', 'responsibilityfor', 'global', 'products', 'trading', 'operations', 'houston', 'thatindividual', 'report', 'you', 'i', 'london', 'office', 'week', 'march', '', 'i', 'leavehouston'])\n list(['would', 'guys', 'see', 'flowing', 'current', 'day', 'points', 'please', 'forwarded', 'chris', 'germanyhouect', '', '', 'pm', 'alleman', 'alden', 'allemanaldenepenergycom', '', '', 'pmto', 'cc', 'subject', 'high', 'btu', 'level', 'on', 'the', 'larosa', 'lateraljanuary', '', '', '', 'pm', 'cctto', 'all', 'tennessee', 'customersre', 'high', 'btu', 'level', 'on', 'the', 'larosa', 'lateraltennessee', 'encountering', 'high', 'btu', 'levels', 'larosa', 'lateral', 'these', 'highbtulevels', 'subject', 'customer', 'complaints', 'delivered', 'gasis', 'interfering', 'proper', 'operation', 'various', 'lines', 'regulatorsmeters', 'appliances', 'located', 'larosa', 'lateral', 'beginning', 'thejanuary', '', 'gas', 'day', 'tennessee', 'requests', 'point', 'operators', 'receipt', 'meterslocated', 'larosa', 'lateral', 'tender', 'tennessee', 'gas', 'meets', 'gasquality', 'specifications', 'found', 'general', 'terms', 'conditions', 'thetennessee', 'gas', 'tariffthe', 'following', 'list', 'receipt', 'meters', 'located', 'larosa', 'lateral', 'mustang', 'island', 'dehyd', 'redfish', 'bay', 'dehyd', 'stedman', 'island', 'dehyd', 'ingleside', 'dehyd', 'mustang', 'island', '', 'red', 'fish', 'bay', 'transport', 'mustang', 'islandabsent', 'resolution', 'problems', 'beginning', 'january', '', 'gas', 'daytennessee', 'accept', 'nominations', 'meters', 'larosa', 'lateraltendering', 'specification'])]\n"
],
[
"# function for shuffling data in unison with labels/header\ndef unison_shuffle(a, b):\n p = np.random.permutation(len(b))\n data = a[p]\n header = np.asarray(b)[p]\n return data, header\n\n# function for converting data into the right format, due to the difference in required format from sklearn models\n# we expect a single string per email here, versus a list of tokens for the sklearn models previously explored\ndef convert_data(raw_data,header):\n converted_data, labels = [], []\n for i in range(raw_data.shape[0]):\n out = ' '.join(raw_data[i])\n converted_data.append(out)\n labels.append(header[i])\n #print(i)\n converted_data = np.array(converted_data, dtype=object)[:, np.newaxis]\n \n return converted_data, np.array(labels)\n\nraw_data, header = unison_shuffle(raw_data, header)\n\n# split into independent 70% training and 30% testing sets\nidx = int(0.7*raw_data.shape[0])\n# 70% of data for training\ntrain_x, train_y = convert_data(raw_data[:idx],header[:idx])\n# remaining 30% for testing\ntest_x, test_y = convert_data(raw_data[idx:],header[idx:])\n\nprint(\"train_x/train_y list details, to make sure it is of the right form:\")\nprint(len(train_x))\nprint(train_x)\nprint(train_y[:5])\nprint(train_y.shape)",
"train_x/train_y list details, to make sure it is of the right form:\n1400\n[['image image american express gold card events dear louise american express gold card events offers prime tickets nations hottest entertainment events exclusive benefit american express gold card platinum card centurion card corporate centurion card corporate platinum card executive corporate card small business members for select cardmembers like you american express making access prime tickets easier ever you receive regular email updates notifying you upcoming events cases shows announced to general public if wish receive email updates please see instructions bottom email look for this weeks highlights proof in new haven to view new events image link complete listing gold card events already sale please select a city image atlanta image select florida cities image image phoenix image boston image houston image st louis image charlotte image los angeles image washington dc image chicago']\n ['set wpatti thompson x']\n ['not plane boat train ill call pm cdt original messagefrom steffes james d senttuesday october pmtokeene patricksubjectre intervention nevada power rate caseshow pm friday afternoon you planejim original messagefrom keene patrick senttuesday october pmtosteffes james dsubjectre intervention nevada power rate casesim nm thur fri always available phone got time mind original messagefrom steffes james d senttuesday october pmtokeene patricksubjectre intervention nevada power rate casespat if friday lets sit thanksjim original messagefrom keene patrick senttuesday october pmtobonnie drinkwater bdrinkwatermcdonaldcaranocomenron kaufman paulccsteffes james dsubjectre intervention nevada power rate caseswe intervene based potential interest serving customers pursuant abshort answerpauls language it possible may want reduce revenue requirements recommending use financial products minimize utility revenue risk we intervened ab regulation proceeding enron energy services inc enron power marketing inc if think raise issues outside unbundling limit']\n ...\n ['roger presented comments transwetern permits which issued klagetoh luepp compressor station the critical sensitive issue us item below the permits really restrict us cause us major operational concerns held ppmvd nox co throughout entire operating range turbines we would really appreciate help assistance issue let know additional information may need the responsible official name change danny pribble vp southwestoperationsthe draft permit limits nox co emissions ppmv o basedon hour average except periods startup shutdown at fand ge data indicates rates achievable alloperating conditions however ge data indicates nox co may high ppmv lowload conditions lowerthe permit application represented turbine operations base load ofthe time reduced load operations loadoccurring of time twp also included safety factor cover periods ofreduced operating loads']\n ['done mail woman']\n ['oml sa nd floor building grayston drive sandton joburgsouth africadear i mrsmith anderso head audit department african developemnet banksouth africa and reasons become obvious read on i obtained address particulars internet address listing please exercise patience read message i urgent confidential business proposition you on june th crude oil contractor kuwait national petroleum corporation engr ahmed youseff mustafa national kuwait made numbered time fixed deposit calendar months value musd thirty six million united states dollars onlyin branch on maturity sent a routine notification forwarding address got reply after month sent reminder finally contract employers the kuwait national petroleum corporation wrote inform us engr a y mustafa died automobile accident died without making a will attempts trace next kin kuwait embassy fruitless i therefore made investigations discovered mr mustafa actively opposed government country kuwait']]\n[0 0 0 1 1]\n(1400,)\n"
]
],
[
[
"Since 70% of 2000 is 1400, looks good! (for Nsamp=1000)\n\nOnwards!",
"_____no_output_____"
],
[
"# Build, Train and Evaluate BERT Model\nFirst define critical functions that define various components of the BERT model ",
"_____no_output_____"
]
],
[
[
"class InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\ndef create_tokenizer_from_hub_module(bert_path):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n bert_module = hub.Module(bert_path)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n vocab_file, do_lower_case = sess.run(\n [tokenization_info[\"vocab_file\"], tokenization_info[\"do_lower_case\"]]\n )\n\n return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\ndef convert_single_example(tokenizer, example, max_seq_length=256):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n tokens_a = tokenizer.tokenize(example.text_a)\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0 : (max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n return input_ids, input_mask, segment_ids, example.label\n\n\ndef convert_examples_to_features(tokenizer, examples, max_seq_length=256):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n input_ids, input_masks, segment_ids, labels = [], [], [], []\n for example in tqdm(examples, desc=\"Converting examples to features\"):\n input_id, input_mask, segment_id, label = convert_single_example(\n tokenizer, example, max_seq_length\n )\n input_ids.append(input_id)\n input_masks.append(input_mask)\n segment_ids.append(segment_id)\n labels.append(label)\n return (\n np.array(input_ids),\n np.array(input_masks),\n np.array(segment_ids),\n np.array(labels).reshape(-1, 1),\n )\n\n\ndef convert_text_to_examples(texts, labels):\n \"\"\"Create InputExamples\"\"\"\n InputExamples = []\n for text, label in zip(texts, labels):\n InputExamples.append(\n InputExample(guid=None, text_a=\" \".join(text), text_b=None, label=label)\n )\n return InputExamples",
"_____no_output_____"
]
],
[
[
"Next, we define a custom tf hub BERT layer",
"_____no_output_____"
]
],
[
[
"class BertLayer(tf.keras.layers.Layer):\n def __init__(\n self,\n n_fine_tune_layers=10,\n pooling=\"mean\",\n bert_path=\"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\",\n **kwargs,\n ):\n self.n_fine_tune_layers = n_fine_tune_layers\n self.trainable = True\n self.output_size = 768\n self.pooling = pooling\n self.bert_path = bert_path\n if self.pooling not in [\"first\", \"mean\"]:\n raise NameError(\n f\"Undefined pooling type (must be either first or mean, but is {self.pooling}\"\n )\n\n super(BertLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.bert = hub.Module(\n self.bert_path, trainable=self.trainable, name=f\"{self.name}_module\"\n )\n\n # Remove unused layers\n trainable_vars = self.bert.variables\n if self.pooling == \"first\":\n trainable_vars = [var for var in trainable_vars if not \"/cls/\" in var.name]\n trainable_layers = [\"pooler/dense\"]\n\n elif self.pooling == \"mean\":\n trainable_vars = [\n var\n for var in trainable_vars\n if not \"/cls/\" in var.name and not \"/pooler/\" in var.name\n ]\n trainable_layers = []\n else:\n raise NameError(\n f\"Undefined pooling type (must be either first or mean, but is {self.pooling}\"\n )\n\n # Select how many layers to fine tune\n for i in range(self.n_fine_tune_layers):\n trainable_layers.append(f\"encoder/layer_{str(11 - i)}\")\n\n # Update trainable vars to contain only the specified layers\n trainable_vars = [\n var\n for var in trainable_vars\n if any([l in var.name for l in trainable_layers])\n ]\n\n # Add to trainable weights\n for var in trainable_vars:\n self._trainable_weights.append(var)\n\n for var in self.bert.variables:\n if var not in self._trainable_weights:\n self._non_trainable_weights.append(var)\n\n super(BertLayer, self).build(input_shape)\n\n def call(self, inputs):\n inputs = [K.cast(x, dtype=\"int32\") for x in inputs]\n input_ids, input_mask, segment_ids = inputs\n bert_inputs = dict(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids\n )\n if self.pooling == \"first\":\n pooled = self.bert(inputs=bert_inputs, signature=\"tokens\", as_dict=True)[\n \"pooled_output\"\n ]\n elif self.pooling == \"mean\":\n result = self.bert(inputs=bert_inputs, signature=\"tokens\", as_dict=True)[\n \"sequence_output\"\n ]\n\n mul_mask = lambda x, m: x * tf.expand_dims(m, axis=-1)\n masked_reduce_mean = lambda x, m: tf.reduce_sum(mul_mask(x, m), axis=1) / (\n tf.reduce_sum(m, axis=1, keepdims=True) + 1e-10)\n input_mask = tf.cast(input_mask, tf.float32)\n pooled = masked_reduce_mean(result, input_mask)\n else:\n raise NameError(f\"Undefined pooling type (must be either first or mean, but is {self.pooling}\")\n\n return pooled\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.output_size)",
"_____no_output_____"
]
],
[
[
"We now use the custom TF hub BERT embedding layer within a higher-level function to define the overall model. More specifically, we put a dense trainable layer of output dimension 256 on top of the BERT embedding.",
"_____no_output_____"
]
],
[
[
"# Function to build overall model\ndef build_model(max_seq_length):\n in_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n in_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n in_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n bert_inputs = [in_id, in_mask, in_segment]\n \n # just extract BERT features, don't fine-tune\n bert_output = BertLayer(n_fine_tune_layers=0)(bert_inputs)\n # train dense classification layer on top of extracted features\n dense = tf.keras.layers.Dense(256, activation=\"relu\")(bert_output)\n pred = tf.keras.layers.Dense(1, activation=\"sigmoid\")(dense)\n\n model = tf.keras.models.Model(inputs=bert_inputs, outputs=pred)\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n model.summary()\n\n return model\n\n# Function to initialize variables correctly\ndef initialize_vars(sess):\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n K.set_session(sess)",
"_____no_output_____"
],
[
"# tf hub bert model path\nbert_path = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\" \n\n# Instantiate tokenizer\ntokenizer = create_tokenizer_from_hub_module(bert_path)\n\n# Convert data to InputExample format\ntrain_examples = convert_text_to_examples(train_x, train_y)\ntest_examples = convert_text_to_examples(test_x, test_y)\n\n# Convert to features\n(train_input_ids,train_input_masks,train_segment_ids,train_labels) = \\\nconvert_examples_to_features(tokenizer, train_examples, max_seq_length=maxtokens)\n(test_input_ids,test_input_masks,test_segment_ids,test_labels) = \\\nconvert_examples_to_features(tokenizer, test_examples, max_seq_length=maxtokens)\n\n# Build model\nmodel = build_model(maxtokens)\n\n# Instantiate variables\ninitialize_vars(sess)\n\n# Train model\nhistory = model.fit([train_input_ids, train_input_masks, train_segment_ids],train_labels,\n validation_data=([test_input_ids, test_input_masks, test_segment_ids],test_labels),\n epochs=5,batch_size=32)",
"Converting examples to features: 100%|██████████| 1400/1400 [00:04<00:00, 285.24it/s]\nConverting examples to features: 100%|██████████| 600/600 [00:01<00:00, 319.39it/s]\n"
]
],
[
[
"**Visualize Convergence**",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ndf_history = pd.DataFrame(history.history)\nfig,ax = plt.subplots()\nplt.plot(range(df_history.shape[0]),df_history['val_acc'],'bs--',label='validation')\nplt.plot(range(df_history.shape[0]),df_history['acc'],'r^--',label='training')\nplt.xlabel('epoch')\nplt.ylabel('accuracy')\nplt.title('BERT Email Classification Training')\nplt.legend(loc='best')\nplt.grid()\nplt.show()\n\nfig.savefig('BERTConvergence.eps', format='eps')\nfig.savefig('BERTConvergence.pdf', format='pdf')\nfig.savefig('BERTConvergence.png', format='png')\nfig.savefig('BERTConvergence.svg', format='svg')",
"_____no_output_____"
]
],
[
[
"**Make figures downloadable to local system in interactive mode**",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\ndef create_download_link(title = \"Download file\", filename = \"data.csv\"): \n html = '<a href={filename}>{title}</a>'\n html = html.format(title=title,filename=filename)\n return HTML(html)\n\ncreate_download_link(filename='BERTConvergence.svg')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7167c433bb2f98216512faf7ec94f8482ca15dc | 9,576 | ipynb | Jupyter Notebook | notebooks/chap10.ipynb | maciejkos/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | 2 | 2019-04-27T22:43:12.000Z | 2019-11-11T15:12:23.000Z | notebooks/chap10.ipynb | maciejkos/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | 33 | 2019-10-09T18:50:22.000Z | 2022-03-21T01:39:48.000Z | notebooks/chap10.ipynb | maciejkos/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | null | null | null | 21.375 | 224 | 0.535923 | [
[
[
"# Modeling and Simulation in Python\n\nChapter 10\n\nCopyright 2017 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)\n",
"_____no_output_____"
]
],
[
[
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *\n\nfrom pandas import read_html",
"_____no_output_____"
]
],
[
[
"### Under the hood\n\nTo get a `DataFrame` and a `Series`, I'll read the world population data and select a column.\n\n`DataFrame` and `Series` contain a variable called `shape` that indicates the number of rows and columns.",
"_____no_output_____"
]
],
[
[
"filename = 'data/World_population_estimates.html'\ntables = read_html(filename, header=0, index_col=0, decimal='M')\ntable2 = tables[2]\ntable2.columns = ['census', 'prb', 'un', 'maddison', \n 'hyde', 'tanton', 'biraben', 'mj', \n 'thomlinson', 'durand', 'clark']\ntable2.shape",
"_____no_output_____"
],
[
"census = table2.census / 1e9\ncensus.shape",
"_____no_output_____"
],
[
"un = table2.un / 1e9\nun.shape",
"_____no_output_____"
]
],
[
[
"A `DataFrame` contains `index`, which labels the rows. It is an `Int64Index`, which is similar to a NumPy array.",
"_____no_output_____"
]
],
[
[
"table2.index",
"_____no_output_____"
]
],
[
[
"And `columns`, which labels the columns.",
"_____no_output_____"
]
],
[
[
"table2.columns",
"_____no_output_____"
]
],
[
[
"And `values`, which is an array of values.",
"_____no_output_____"
]
],
[
[
"table2.values",
"_____no_output_____"
]
],
[
[
"A `Series` does not have `columns`, but it does have `name`.",
"_____no_output_____"
]
],
[
[
"census.name",
"_____no_output_____"
]
],
[
[
"It contains `values`, which is an array.",
"_____no_output_____"
]
],
[
[
"census.values",
"_____no_output_____"
]
],
[
[
"And it contains `index`:",
"_____no_output_____"
]
],
[
[
"census.index",
"_____no_output_____"
]
],
[
[
"If you ever wonder what kind of object a variable refers to, you can use the `type` function. The result indicates what type the object is, and the module where that type is defined.\n\n`DataFrame`, `Int64Index`, `Index`, and `Series` are defined by Pandas.\n\n`ndarray` is defined by NumPy.",
"_____no_output_____"
]
],
[
[
"type(table2)",
"_____no_output_____"
],
[
"type(table2.index)",
"_____no_output_____"
],
[
"type(table2.columns)",
"_____no_output_____"
],
[
"type(table2.values)",
"_____no_output_____"
],
[
"type(census)",
"_____no_output_____"
],
[
"type(census.index)",
"_____no_output_____"
],
[
"type(census.values)",
"_____no_output_____"
]
],
[
[
"## Optional exercise\n\nThe following exercise provides a chance to practice what you have learned so far, and maybe develop a different growth model. If you feel comfortable with what we have done so far, you might want to give it a try.\n\n**Optional Exercise:** On the Wikipedia page about world population estimates, the first table contains estimates for prehistoric populations. The following cells process this table and plot some of the results.",
"_____no_output_____"
]
],
[
[
"filename = 'data/World_population_estimates.html'\ntables = read_html(filename, header=0, index_col=0, decimal='M')\nlen(tables)",
"_____no_output_____"
]
],
[
[
"Select `tables[1]`, which is the second table on the page.",
"_____no_output_____"
]
],
[
[
"table1 = tables[1]\ntable1.head()",
"_____no_output_____"
]
],
[
[
"Not all agencies and researchers provided estimates for the same dates. Again `NaN` is the special value that indicates missing data.",
"_____no_output_____"
]
],
[
[
"table1.tail()",
"_____no_output_____"
]
],
[
[
"Again, we'll replace the long column names with more convenient abbreviations.",
"_____no_output_____"
]
],
[
[
"table1.columns = ['PRB', 'UN', 'Maddison', 'HYDE', 'Tanton', \n 'Biraben', 'McEvedy & Jones', 'Thomlinson', 'Durand', 'Clark']",
"_____no_output_____"
]
],
[
[
"Some of the estimates are in a form Pandas doesn't recognize as numbers, but we can coerce them to be numeric.",
"_____no_output_____"
]
],
[
[
"for col in table1.columns:\n table1[col] = pd.to_numeric(table1[col], errors='coerce')",
"_____no_output_____"
]
],
[
[
"Here are the results. Notice that we are working in millions now, not billions.",
"_____no_output_____"
]
],
[
[
"table1.plot()\ndecorate(xlim=[-10000, 2000], xlabel='Year', \n ylabel='World population (millions)',\n title='Prehistoric population estimates')\nplt.legend(fontsize='small');",
"_____no_output_____"
]
],
[
[
"We can use `xlim` to zoom in on everything after Year 0.",
"_____no_output_____"
]
],
[
[
"table1.plot()\ndecorate(xlim=[0, 2000], xlabel='Year', \n ylabel='World population (millions)',\n title='CE population estimates')\nplt.legend(fontsize='small');",
"_____no_output_____"
]
],
[
[
"See if you can find a model that fits these data well from Year 0 to 1950.\n\nHow well does your best model predict actual population growth from 1950 to the present?",
"_____no_output_____"
]
],
[
[
"# Solution goes here",
"_____no_output_____"
],
[
"# Solution goes here",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7168a60277f7f14146e8ea2410daf9d8c750786 | 395,250 | ipynb | Jupyter Notebook | Images_concatinating_ andTestImages.ipynb | Eng-Mo/CarND-Advanced-Lane-Lines | 1fc98e892f22ecdae81e1b02b10335be5eabcd88 | [
"MIT"
] | null | null | null | Images_concatinating_ andTestImages.ipynb | Eng-Mo/CarND-Advanced-Lane-Lines | 1fc98e892f22ecdae81e1b02b10335be5eabcd88 | [
"MIT"
] | null | null | null | Images_concatinating_ andTestImages.ipynb | Eng-Mo/CarND-Advanced-Lane-Lines | 1fc98e892f22ecdae81e1b02b10335be5eabcd88 | [
"MIT"
] | 1 | 2020-04-21T10:50:43.000Z | 2020-04-21T10:50:43.000Z | 2,037.371134 | 170,276 | 0.948736 | [
[
[
"import numpy as np\nimport PIL\nfrom PIL import Image\n\nlist_im = [\n# './output_images/colorTH-test1.png'\n# ,'./output_images/combined-test1.png'\n './output_images/fs.png'\n , './output_images/cc.png'\n ]\nimgs = [ PIL.Image.open(i) for i in list_im ]\n# #pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n# min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]\n# imgs_comb = np.hstack( (np.asarray( i) ) for i in imgs ) \n\n# # save that beautiful picture\n# imgs_comb = PIL.Image.fromarray( imgs_comb)\n# imgs_comb.save( './output_images/cc.png' ) \n\n# for a vertical stacking it is simple: use vstack\nimgs_comb = np.vstack( (np.asarray( i ) for i in imgs ) )\nimgs_comb = PIL.Image.fromarray( imgs_comb)\nimgs_comb.save( './output_images/Lane_thresh.png' )",
"_____no_output_____"
],
[
"from LaneDetect import *\n\nimage=cv2.imread('./test_images/test4.jpg')\nimage=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n\nplt.imshow(image)\nplt.show()\n\n[ret, mtx, dist, rvecs,tvecs] =undistorT(image)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nframe=image\ngray =cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)\nsobel_edge= SobelThr(gray) \nplt.title('Sobel')\nplt.imshow(sobel_edge,cmap='gray')\nplt.savefig('./output_images/sobelTH-test1.png')\nplt.show()\ncolor_threshld= ColorThreshold(frame)\nplt.title('Color threshold')\nplt.imshow(color_threshld,cmap='gray')\nplt.savefig('./output_images/colorTH-test1.png')\nplt.show()\ncomI=combinI(sobel_edge,color_threshld) \nplt.title('Combined threshold')\nplt.imshow(comI,cmap='gray')\nplt.savefig('./output_images/combined-test1.png')\nplt.show()\nroib=region_of_interest(comI)\nplt.title('ROI')\nplt.imshow(roib,cmap='gray')\nplt.savefig('./output_images/roi-test1.png')\nplt.show()\n\n\nundistI=undistresult(roib, mtx,dist)\npI, pM=prespectI(undistI) \n# pI = cv2.inRange(pI, 10, 255)\nMinv = np.linalg.inv(pM)\nplt.title('Perspective transform')\nplt.imshow(pI,cmap='gray')\nplt.savefig('./output_images/pi-test1.png')\nplt.show()\n[left_fit, ploty,right_fit,lc, rc, offset]= LineFitting(pI)\nuW=unwrappedframe(frame,pI,Minv,left_fit, ploty,right_fit)\nplt.title('result')\nplt.imshow(uW)\nplt.savefig('./output_images/result-test1.png')\nplt.show()\nuW=cv2.putText(uW,'Curvature left: %.1f m'%lc,(50,50), \n font, 1,(255,255,255),2,cv2.LINE_AA)\nuW=cv2.putText(uW,'Curvature right: %.1f m'%rc,(50,100),\n font, 1,(255,255,255),2,cv2.LINE_AA)\nuW=cv2.putText(uW,'Car position to centre: %.1f m'%offset,(50,150),\n font, 1,(255,255,255),2,cv2.LINE_AA)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7168ad8d985ae3a44392f5adf60448abe8ec65d | 10,108 | ipynb | Jupyter Notebook | federated learning with multiprocessing (CIFAR-10).ipynb | sonhamin/parallel_fedlearning | a12d4322988a0097403dc2c176ec572a833db38b | [
"MIT"
] | null | null | null | federated learning with multiprocessing (CIFAR-10).ipynb | sonhamin/parallel_fedlearning | a12d4322988a0097403dc2c176ec572a833db38b | [
"MIT"
] | null | null | null | federated learning with multiprocessing (CIFAR-10).ipynb | sonhamin/parallel_fedlearning | a12d4322988a0097403dc2c176ec572a833db38b | [
"MIT"
] | 2 | 2020-12-09T07:21:45.000Z | 2021-01-03T11:59:41.000Z | 25.851662 | 121 | 0.479521 | [
[
[
"import matplotlib\nmatplotlib.use('Agg')\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\nfrom utils.customloader import CustomDataset, DatasetSplit\nfrom utils.dataloader import get_dataloader\nfrom utils.train_glob import train_global_model, test_model\n\nfrom models.Update import LocalUpdate\nfrom models.Fed import FedAvg\n\nimport random\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nimport numpy as np\nimport copy\n\n\n\nimport torch\nclass Args:\n #federated arugments\n epochs=50\n num_users=10\n \n local_ep=3\n local_bs=100\n bs=128\n lr=0.01\n momentum=0.5\n\n num_channels=1\n num_classes=10\n verbose='store_true'\n seed=1\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n \n\nargs = Args() \n##############SET SEEDS FOR REPRODUCIBILITY#############\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\ntorch.manual_seed(args.seed)\n##############~SET SEEDS FOR REPRODUCIBILITY#############",
"_____no_output_____"
]
],
[
[
"# Define Dataloader / Model / Optimizer / Loss",
"_____no_output_____"
]
],
[
[
"transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ndataset_train = datasets.CIFAR10('./data/cifar', train=True, download=True, transform=transform)\nglobal_train_loader = DataLoader(dataset_train, batch_size=1000, shuffle=True)\n\ndataset_test = datasets.CIFAR10('./data/cifar', train=False, download=True, transform=transform)\ntest_loader = DataLoader(dataset_test, batch_size=1000, shuffle=False)\n\n",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
],
[
"import torch.nn as nn\nclass CNNCifar(nn.Module):\n def __init__(self, args):\n super(CNNCifar, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, args.num_classes)\n\n def forward(self, x):\n \n x = self.conv1(x)\n x = F.relu(x)\n x = self.pool(x)\n \n x = self.conv2(x)\n x = F.relu(x)\n x = self.pool(x)\n \n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x \n",
"_____no_output_____"
],
[
"net_glob = CNNCifar(args=args).to(args.device)\nnet_glob.train()\n\noptimizer = optim.SGD(net_glob.parameters(), lr=args.lr, momentum=args.momentum)\nsloss = F.cross_entropy\n",
"_____no_output_____"
],
[
"checkpoint_globalnet = copy.deepcopy(net_glob)",
"_____no_output_____"
]
],
[
[
"# Define Distribution of Data",
"_____no_output_____"
]
],
[
[
"unique, counts = np.unique(global_train_loader.dataset.targets, return_counts=True)\nprint(unique)\nprint(counts)\nsorted_y = copy.deepcopy(global_train_loader.dataset.targets)\nsorted_index_y = np.argsort(np.squeeze(sorted_y))\n\nclass_dist=[]\n\nfor i in range(args.num_classes):\n print(i)\n class_dist.append(np.array(sorted_index_y[sum(counts[:i]):sum(counts[:i+1])], dtype=np.int64))\n \nnon_iid = np.array(class_dist)",
"[0 1 2 3 4 5 6 7 8 9]\n[5000 5000 5000 5000 5000 5000 5000 5000 5000 5000]\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n"
],
[
"individual = []\nfor j in range(10):\n individual.append(np.array_split(class_dist[j], 10))\n\nuser_dist=[]\nfor i in range(10):\n temp=[]\n for j in range(10):\n temp.append(individual[j][i])\n \n \n user_dist.append((np.concatenate(temp)).astype(np.int64)) \n \niid=np.array(user_dist)",
"_____no_output_____"
]
],
[
[
"# Train Model",
"_____no_output_____"
]
],
[
[
"from utils.multiprocessing import multi_train_local_dif\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision import datasets, transforms\nimport torch.multiprocessing as mp",
"_____no_output_____"
],
[
"distribution = iid\n\nif __name__ == '__main__':\n \n mp.set_start_method('fork', force=True)\n torch.set_num_threads(1)\n \n checkpoint_globalnet11 = copy.deepcopy(net_glob)\n \n for i in range(args.epochs):\n \n print('--------------------------------------------')\n print(\"\\n\\n\\nstart training epoch : \" + str(i) + \"\\n\\n\\n\")\n print('--------------------------------------------')\n \n procs=[]\n loss_locals=[]\n w_locals=[] \n \n q_l = mp.Queue()\n q_w = mp.Queue() \n \n for i in range(args.num_users):\n\n p = mp.Process(target=multi_train_local_dif, args=(q_l, q_w, args, \n i, sloss, global_train_loader, \n distribution, checkpoint_globalnet11))\n procs.append(p)\n p.start()\n\n for p in procs:\n loss_locals.append(q_l.get(p))\n w_locals.append(q_w.get(p))\n\n for p in procs:\n p.join()\n\n \n print('--------------------------------------------\\n\\n')\n w_glob = FedAvg(w_locals)\n checkpoint_globalnet11.load_state_dict(w_glob)\n test_model(checkpoint_globalnet11, test_loader, sloss, args)\n print('\\n\\n--------------------------------------------')\n \n\n\n\n",
"_____no_output_____"
]
],
[
[
"# Test Model",
"_____no_output_____"
]
],
[
[
"#After fedlearning\nprint('Before Federated Learning')\ntest_model(net_glob, test_loader, sloss)",
"Before Federated Learning\n\nTest set: Average loss: 2.30375 \nAccuracy: 943/10000 (9.43%)\n\n"
],
[
"#After fedlearning\nprint('After Federated Learning -- checkpoint')\ntest_model(checkpoint_globalnet11, test_loader, sloss)",
"After Federated Learning -- checkpoint\n\nTest set: Average loss: 1.13745 \nAccuracy: 6007/10000 (60.07%)\n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7168e2f852d8a5a80ce7f114f19e2529a515203 | 4,982 | ipynb | Jupyter Notebook | walrus/walrus-bmi-psfetch.ipynb | eWaterCycle/grpc4bmi-examples | d9291c11de3859248f3926aa5e2aa0f987c9afdf | [
"Apache-2.0"
] | null | null | null | walrus/walrus-bmi-psfetch.ipynb | eWaterCycle/grpc4bmi-examples | d9291c11de3859248f3926aa5e2aa0f987c9afdf | [
"Apache-2.0"
] | 14 | 2018-08-21T07:43:14.000Z | 2021-04-02T12:44:30.000Z | walrus/walrus-bmi-psfetch.ipynb | eWaterCycle/grpc4bmi-examples | d9291c11de3859248f3926aa5e2aa0f987c9afdf | [
"Apache-2.0"
] | null | null | null | 19.848606 | 355 | 0.505219 | [
[
[
"Run [WALRUS](https://github.com/ClaudiaBrauer/WALRUS) on it's example dataset using \n1. Parametersetdb\n2. grpc4bmi",
"_____no_output_____"
]
],
[
[
"from ewatercycle.parametersetdb import build_from_urls\nfrom os.path import abspath",
"_____no_output_____"
],
[
"# Prepare input\nparameter_set = build_from_urls(\n config_format='yaml', config_url='data:text/plain,data: data/PEQ_Hupsel.dat\\nparameters:\\n cW: 200\\n cV: 4\\n cG: 5.0e+6\\n cQ: 10\\n cS: 4\\n dG0: 1250\\n cD: 1500\\n aS: 0.01\\n st: loamy_sand\\nstart: 367416 # 2011120000\\nend: 368904 # 2012020000\\nstep: 1\\ncentroid:\\n lon: 6.65443600418465\\n lat: 52.06132311280826\\n',\n datafiles_format='svn', datafiles_url='https://github.com/ClaudiaBrauer/WALRUS/trunk/demo/data',\n)",
"_____no_output_____"
],
[
"parameter_set.save_datafiles('./input')",
"_____no_output_____"
],
[
"parameter_set.config['data'] = abspath('input/PEQ_Hupsel.dat')\nparameter_set.save_config('input/walrus_hupsel.yml')",
"_____no_output_____"
],
[
"from grpc4bmi.bmi_client_docker import BmiClientDocker",
"_____no_output_____"
],
[
"model = BmiClientDocker(image='ewatercycle/walrus-grpc4bmi:v0.2.0', image_port=55555, \n input_dirs=(\"./input\",), work_dir='.')",
"_____no_output_____"
],
[
"model.get_component_name()",
"_____no_output_____"
],
[
"model.initialize(abspath('input/walrus_hupsel.yml'))",
"_____no_output_____"
],
[
"model.update()\nmodel.update()\nmodel.update()",
"_____no_output_____"
],
[
"model.get_start_time()",
"_____no_output_____"
],
[
"model.get_current_time()",
"_____no_output_____"
],
[
"model.get_output_var_names()",
"_____no_output_____"
],
[
"model.get_value('Q')",
"_____no_output_____"
],
[
"model.get_var_units('Q')",
"_____no_output_____"
],
[
"del model",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7168f8d55d41ada712c8922a2fc3622351be9cc | 592 | ipynb | Jupyter Notebook | intro_apren_auto/chapters/ch6/ch6-svm-02-rbf.ipynb | bmalcover/intro_apren_auto | 65d9bdc707648627cf71b30943be4c255d8b58a0 | [
"MIT"
] | 1 | 2022-03-10T10:49:39.000Z | 2022-03-10T10:49:39.000Z | intro_apren_auto/chapters/ch6/ch6-svm-02-rbf.ipynb | bmalcover/intro_apren_auto | 65d9bdc707648627cf71b30943be4c255d8b58a0 | [
"MIT"
] | null | null | null | intro_apren_auto/chapters/ch6/ch6-svm-02-rbf.ipynb | bmalcover/intro_apren_auto | 65d9bdc707648627cf71b30943be4c255d8b58a0 | [
"MIT"
] | 2 | 2020-10-01T16:16:13.000Z | 2022-03-10T10:46:28.000Z | 17.939394 | 74 | 0.550676 | [
[
[
"# Máquinas de vector soporte: Radial basis function and kernel trick",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e716b1a6e19d6f6c0270920e003e262edb7c0258 | 436,910 | ipynb | Jupyter Notebook | cadl-tutorial/Session_1/.ipynb_checkpoints/session_1-checkpoint.ipynb | GChaudhary/Completed-ML-DL-Online-Tutorials | 3eb7bbc5ac71ad628e86929b32e780e38100815a | [
"Apache-2.0"
] | null | null | null | cadl-tutorial/Session_1/.ipynb_checkpoints/session_1-checkpoint.ipynb | GChaudhary/Completed-ML-DL-Online-Tutorials | 3eb7bbc5ac71ad628e86929b32e780e38100815a | [
"Apache-2.0"
] | null | null | null | cadl-tutorial/Session_1/.ipynb_checkpoints/session_1-checkpoint.ipynb | GChaudhary/Completed-ML-DL-Online-Tutorials | 3eb7bbc5ac71ad628e86929b32e780e38100815a | [
"Apache-2.0"
] | null | null | null | 382.583187 | 191,210 | 0.927589 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"plt.style.use('ggplot')",
"_____no_output_____"
],
[
"from libs import utils",
"_____no_output_____"
],
[
"files = utils.get_celeb_files()",
"_____no_output_____"
],
[
"idx = np.random.randint(100)\nprint (\"Index = \", idx)\nimg = plt.imread(files[idx])\nprint (img.shape)\nplt.imshow(img)",
"_____no_output_____"
],
[
"plt.figure()\nplt.imshow(img[:,:,0], cmap='gray')\n\nplt.figure()\nplt.imshow(img[:,:,1], cmap='gray')\n\nplt.figure()\nplt.imshow(img[:,:,2], cmap='gray')",
"_____no_output_____"
],
[
"imgs = utils.get_celeb_imgs()",
"_____no_output_____"
],
[
"plt.imshow(imgs[43])",
"_____no_output_____"
],
[
"imgs[21].dtype",
"_____no_output_____"
],
[
"data = np.array(imgs)\ndata.shape, data.dtype",
"_____no_output_____"
],
[
"mean_img = np.mean(data, axis=0)\nplt.imshow(mean_img.astype(np.uint8))",
"_____no_output_____"
],
[
"std_img = np.std(data, axis=0)\nplt.imshow(std_img.astype(np.uint8))",
"_____no_output_____"
],
[
"plt.imshow(np.mean(std_img, axis=2).astype(np.uint8))",
"_____no_output_____"
],
[
"flattened = data.ravel()\nprint (flattened.shape)\nprint (data[:1])\nprint (flattened[:10])",
"_____no_output_____"
],
[
"plt.hist(flattened, 255)",
"_____no_output_____"
],
[
"plt.hist(mean_img.ravel(), 255)",
"_____no_output_____"
],
[
"plt.hist(std_img.ravel(), 255)",
"_____no_output_____"
],
[
"bins = 20\nfig, axs = plt.subplots(1, 3, figsize=(12, 6), sharey=True, sharex=True)\n\naxs[0].hist((data[1]).ravel(), bins)\naxs[0].set_title('img distribution')\n\naxs[1].hist((mean_img).ravel(), bins)\naxs[1].set_title('mean distribution')\n\naxs[2].hist((data[1] - mean_img).ravel(), bins)\naxs[2].set_title('(img - mean) distribution')",
"_____no_output_____"
],
[
"plt.imshow((data[0] - mean_img).astype(np.short))",
"_____no_output_____"
],
[
"data[0].shape, mean_img.shape, std_img.shape",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(1, 3, figsize=(12, 6), sharey=True, sharex=True)\n\naxs[0].hist((data[0] - mean_img).ravel(), bins)\naxs[0].set_title('(img - mean) distribution')\n\naxs[1].hist((std_img).ravel(), bins)\naxs[1].set_title('std deviation distribution')\n\naxs[2].hist(((data[0] - mean_img) / std_img).ravel(), bins)\naxs[2].set_title('((img - mean) / std_dev) distribution')",
"_____no_output_____"
],
[
"axs[2].set_xlim([-150, 150])\naxs[2].set_xlim([-100, 100])\naxs[2].set_xlim([-50, 50])\naxs[2].set_xlim([-10, 10])\naxs[2].set_xlim([-5, 5])",
"_____no_output_____"
]
],
[
[
"# ------------------------------------------------------------------",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"x = np.linspace(-3.0, 3.0, 100)\nprint (x)\nprint (x.shape)\nprint (x.dtype)",
"[-3. -2.93939394 -2.87878788 -2.81818182 -2.75757576 -2.6969697\n -2.63636364 -2.57575758 -2.51515152 -2.45454545 -2.39393939 -2.33333333\n -2.27272727 -2.21212121 -2.15151515 -2.09090909 -2.03030303 -1.96969697\n -1.90909091 -1.84848485 -1.78787879 -1.72727273 -1.66666667 -1.60606061\n -1.54545455 -1.48484848 -1.42424242 -1.36363636 -1.3030303 -1.24242424\n -1.18181818 -1.12121212 -1.06060606 -1. -0.93939394 -0.87878788\n -0.81818182 -0.75757576 -0.6969697 -0.63636364 -0.57575758 -0.51515152\n -0.45454545 -0.39393939 -0.33333333 -0.27272727 -0.21212121 -0.15151515\n -0.09090909 -0.03030303 0.03030303 0.09090909 0.15151515 0.21212121\n 0.27272727 0.33333333 0.39393939 0.45454545 0.51515152 0.57575758\n 0.63636364 0.6969697 0.75757576 0.81818182 0.87878788 0.93939394\n 1. 1.06060606 1.12121212 1.18181818 1.24242424 1.3030303\n 1.36363636 1.42424242 1.48484848 1.54545455 1.60606061 1.66666667\n 1.72727273 1.78787879 1.84848485 1.90909091 1.96969697 2.03030303\n 2.09090909 2.15151515 2.21212121 2.27272727 2.33333333 2.39393939\n 2.45454545 2.51515152 2.57575758 2.63636364 2.6969697 2.75757576\n 2.81818182 2.87878788 2.93939394 3. ]\n(100,)\nfloat64\n"
],
[
"x = tf.linspace(-3.0, 3.0, 100)\nprint (x)",
"Tensor(\"LinSpace:0\", shape=(100,), dtype=float32)\n"
],
[
"g = tf.get_default_graph()",
"_____no_output_____"
],
[
"[op.name for op in g.get_operations()]",
"_____no_output_____"
],
[
"g.get_tensor_by_name('LinSpace'+':0')",
"_____no_output_____"
],
[
"sess = tf.Session()\ncomputed_x = sess.run(x)\nprint (computed_x)\n\ncomputed_x_eval = x.eval(session=sess)\nprint (computed_x_eval)\n\nsess.close()",
"[-3. -2.939394 -2.87878799 -2.81818175 -2.75757575 -2.69696975\n -2.63636351 -2.5757575 -2.5151515 -2.4545455 -2.3939395 -2.33333325\n -2.27272725 -2.21212125 -2.15151501 -2.090909 -2.030303 -1.969697\n -1.90909088 -1.84848475 -1.78787875 -1.72727275 -1.66666663 -1.6060605\n -1.5454545 -1.4848485 -1.42424238 -1.36363626 -1.30303025 -1.24242425\n -1.18181813 -1.12121201 -1.060606 -1. -0.939394 -0.87878776\n -0.81818175 -0.75757575 -0.69696951 -0.63636351 -0.5757575 -0.5151515\n -0.4545455 -0.39393926 -0.33333325 -0.27272725 -0.21212101 -0.15151501\n -0.090909 -0.030303 0.030303 0.09090924 0.15151525 0.21212125\n 0.27272749 0.33333349 0.3939395 0.4545455 0.5151515 0.57575774\n 0.63636374 0.69696975 0.75757599 0.81818199 0.87878799 0.939394 1.\n 1.060606 1.12121201 1.18181849 1.24242449 1.30303049 1.36363649\n 1.4242425 1.4848485 1.5454545 1.60606098 1.66666698 1.72727299\n 1.78787899 1.84848499 1.909091 1.969697 2.030303 2.090909\n 2.15151548 2.21212149 2.27272749 2.33333349 2.3939395 2.4545455\n 2.5151515 2.57575798 2.63636398 2.69696999 2.75757599 2.81818199\n 2.87878799 2.939394 3. ]\n[-3. -2.939394 -2.87878799 -2.81818175 -2.75757575 -2.69696975\n -2.63636351 -2.5757575 -2.5151515 -2.4545455 -2.3939395 -2.33333325\n -2.27272725 -2.21212125 -2.15151501 -2.090909 -2.030303 -1.969697\n -1.90909088 -1.84848475 -1.78787875 -1.72727275 -1.66666663 -1.6060605\n -1.5454545 -1.4848485 -1.42424238 -1.36363626 -1.30303025 -1.24242425\n -1.18181813 -1.12121201 -1.060606 -1. -0.939394 -0.87878776\n -0.81818175 -0.75757575 -0.69696951 -0.63636351 -0.5757575 -0.5151515\n -0.4545455 -0.39393926 -0.33333325 -0.27272725 -0.21212101 -0.15151501\n -0.090909 -0.030303 0.030303 0.09090924 0.15151525 0.21212125\n 0.27272749 0.33333349 0.3939395 0.4545455 0.5151515 0.57575774\n 0.63636374 0.69696975 0.75757599 0.81818199 0.87878799 0.939394 1.\n 1.060606 1.12121201 1.18181849 1.24242449 1.30303049 1.36363649\n 1.4242425 1.4848485 1.5454545 1.60606098 1.66666698 1.72727299\n 1.78787899 1.84848499 1.909091 1.969697 2.030303 2.090909\n 2.15151548 2.21212149 2.27272749 2.33333349 2.3939395 2.4545455\n 2.5151515 2.57575798 2.63636398 2.69696999 2.75757599 2.81818199\n 2.87878799 2.939394 3. ]\n"
],
[
"sess = tf.Session(graph=g)\nsess.close()",
"_____no_output_____"
],
[
"g2 = tf.Graph()",
"_____no_output_____"
],
[
"sess = tf.InteractiveSession()\na = x.eval()",
"_____no_output_____"
],
[
"print (x.get_shape())\n\nprint (x.get_shape().as_list())",
"(100,)\n[100]\n"
],
[
"mean = 0.0\nsigma = 1.0\n\nz = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /\n (2.0 * tf.pow(sigma, 2.0)))) *\n (1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))",
"_____no_output_____"
],
[
"res = z.eval()\nplt.plot(res)",
"_____no_output_____"
],
[
"ksize = z.get_shape().as_list()[0]\n\nz_2d = tf.matmul(tf.reshape(z,[ksize, 1]), tf.transpose(tf.reshape(z,[ksize, 1])))\n\nplt.imshow(z_2d.eval())",
"_____no_output_____"
],
[
"img = plt.imread(\"Emilia Clarke.jpg\")\nfrom scipy.misc import imresize\nimg = imresize(img, [300,400], interp='bilinear')\nplt.imshow(img)\nprint (img.shape)",
"(300, 400, 3)\n"
],
[
"def rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])",
"_____no_output_____"
],
[
"img_gray = rgb2gray(img)",
"_____no_output_____"
],
[
"img_4d = img_gray.astype(np.float32).reshape([1, img_gray.shape[0], img_gray.shape[1], 1])\nprint (img_4d.shape)\nimg_4d.dtype",
"(1, 300, 400, 1)\n"
],
[
"img_4d_tensor = tf.reshape(img_gray.astype(np.float32), [1, img_gray.shape[0], img_gray.shape[1], 1])\nprint (img_4d_tensor.get_shape().as_list())\nimg_4d_tensor",
"[1, 300, 400, 1]\n"
],
[
"z_4d = tf.reshape(z_2d, [ksize, ksize, 1, 1])\nprint (z_4d.get_shape().as_list())",
"[100, 100, 1, 1]\n"
],
[
"convolved = tf.nn.conv2d(img_4d_tensor, z_4d, strides=[1,1,1,1], padding='SAME')\nres = convolved.eval()\nprint (res.shape)",
"(1, 300, 400, 1)\n"
],
[
"plt.imshow(res[0,:,:,0], cmap='gray')",
"_____no_output_____"
],
[
"xs = tf.linspace(-3.0, 3.0, ksize)",
"_____no_output_____"
],
[
"ys = tf.sin(xs)",
"_____no_output_____"
],
[
"plt.figure()\nplt.plot(ys.eval())",
"_____no_output_____"
],
[
"ys = tf.reshape(ys, [ksize, 1])",
"_____no_output_____"
],
[
"ones = tf.ones((1, ksize))\nwave = tf.matmul(ys, ones)\nplt.imshow(wave.eval(), cmap='gray')",
"_____no_output_____"
],
[
"gabor = tf.multiply(wave, z_2d)\nplt.imshow(gabor.eval(), cmap='gray')",
"_____no_output_____"
],
[
"wave, z_2d",
"_____no_output_____"
],
[
"a = 100\nblur_filter = np.array([[0.0625, 0.125, 0.0625],\n [0.1250, 0.250, 0.1250],\n [0.0625, 0.125, 0.0625]])\nprint (blur_filter.shape)\nblur_filter = np.stack((blur_filter, blur_filter, blur_filter), axis=2)\nprint (blur_filter.shape)\n\nblur_filter = tf.reshape(a * blur_filter.astype(np.float32), [3,3,3,1])\n",
"(3, 3)\n(3, 3, 3)\n"
],
[
"img = tf.placeholder(tf.float32, shape=[None, None, 3], name='img')\n#img_3d = tf.expand_dims(img, 2)\nprint (img.get_shape().as_list())\n\nimg_4d = tf.expand_dims(img, 0)\nprint(img_4d.get_shape().as_list())\n\nmean = tf.placeholder(tf.float32, name='mean')\nsigma = tf.placeholder(tf.float32, name='sigma')\nksize = tf.placeholder(tf.int32, name='ksize')\n\nx = tf.linspace(-3.0, 3.0, ksize)\nz = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /\n (2.0 * tf.pow(sigma, 2.0)))) *\n (1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))\nz_2d = tf.matmul(\n tf.reshape(z, tf.stack([ksize, 1])),\n tf.reshape(z, tf.stack([1, ksize])))\nys = tf.sin(x)\nys = tf.reshape(ys, tf.stack([ksize, 1]))\nones = tf.ones(tf.stack([1, ksize]))\nwave = tf.matmul(ys, ones)\ngabor = tf.multiply(wave, z_2d)\ngabor_4d = tf.reshape(gabor, tf.stack([ksize, ksize, 1, 1]))\n\nconvolved = tf.nn.conv2d(img_4d, blur_filter, strides=[1, 1, 1, 1], padding='SAME', name='convolved')\nconvolved_img = convolved[0, :, :, 0]",
"[None, None, 3]\n[1, None, None, 3]\n"
],
[
"resimage = convolved_img.eval(feed_dict={\n img: (plt.imread('img_align_celeba/000029.jpg')),\n mean: 0.0,\n sigma: 0.5,\n ksize: 5})",
"_____no_output_____"
],
[
"plt.figure()\nplt.imshow(rgb2gray(plt.imread('img_align_celeba/000029.jpg')), cmap='gray')\nplt.title('Original')\n\nplt.figure()\nplt.imshow(resimage, cmap='gray')\nplt.title('Convolved')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e716c237a387c217ba7ad85dc5ee8ab2e49b7e79 | 300,580 | ipynb | Jupyter Notebook | .ipynb_checkpoints/FirstModel-checkpoint.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | .ipynb_checkpoints/FirstModel-checkpoint.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | .ipynb_checkpoints/FirstModel-checkpoint.ipynb | PaolaRondon/Biomol_classifier | 5590942222561441792eb9eab877fd1159a2e0ac | [
"BSD-3-Clause"
] | null | null | null | 231.750193 | 54,676 | 0.888209 | [
[
[
"# Basic imports",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline \nplt.rcParams.update({'figure.max_open_warning': 0})",
"_____no_output_____"
]
],
[
[
"# Loading datasets",
"_____no_output_____"
]
],
[
[
"cns_df=pd.read_csv(\"cns_molecules.csv\", sep=\"\\t\")\nnon_cns_df=pd.read_csv(\"non_cns_molecules.csv\", sep=\"\\t\")",
"_____no_output_____"
],
[
"cns_df_length=len(cns_df)\nnon_cns_df_length=len(non_cns_df)\n\nprint(\"cns rows: {}\".format(cns_df_length))\nprint(\"non cns rows: {}\".format(non_cns_df_length))",
"cns rows: 316\nnon cns rows: 624\n"
]
],
[
[
"# New column for both datasets (1= true, 0=false)",
"_____no_output_____"
]
],
[
[
"new_cns_column=[1 for i in range(cns_df_length)]\nnew_non_cns_columns=[0 for i in range(non_cns_df_length)]",
"_____no_output_____"
],
[
"cns_df[\"is_cns_molecule\"]=new_cns_column\nnon_cns_df[\"is_cns_molecule\"]=new_non_cns_columns",
"_____no_output_____"
]
],
[
[
"### Merged dataset",
"_____no_output_____"
]
],
[
[
"mixed_df=cns_df\nmixed_df=mixed_df.append(non_cns_df)",
"_____no_output_____"
]
],
[
[
"### Shuffle dataset",
"_____no_output_____"
],
[
"The idiomatic way to do this with Pandas is to use the .sample method of your dataframe to sample all rows without replacement:\n\ndf.sample(frac=1)\nThe frac keyword argument specifies the fraction of rows to return in the random sample, so frac=1 means return all rows (in random order).\n\nNote: If you wish to shuffle your dataframe in-place and reset the index, you could do e.g.\n\ndf = df.sample(frac=1).reset_index(drop=True)\nHere, specifying drop=True prevents .reset_index from creating a column containing the old index entries.",
"_____no_output_____"
]
],
[
[
"mixed_df=mixed_df.sample(frac=1,random_state=0).reset_index(drop=True)",
"_____no_output_____"
],
[
"mixed_df.to_csv(\"molecules_v1.csv\",sep=\"\\t\",index = False, header=True); mixed_df",
"_____no_output_____"
],
[
"for c in mixed_df.columns.values:\n print(c)",
"m_name\nn_atoms_without_Hydrogen\nn_atoms_with_Hydrogen\nm_weight\nm_avg_weigth\nm_weigth_without_Hydrogen\nn_radical_electrons\nn_valence_electrons\nn_aliphatic_carbocycles\nn_aliphatic_heterocycles\nn_aliphatic_rings\nn_amide_bonds\nn_aromatic_carbocycles\nn_aromatic_heterocycles\nn_aromatic_rings\nn_saturated_carbocycles\nn_saturated_heterocycles\nn_saturated_rings\nn_HBA\nn_HBD\nn_hetero_atoms\nn_hetero_cycles\nn_rings\nn_strict_rotable_bonds\nn_non_strict_rotable_bonds\nn_primary_carbon_atoms\nn_HOH\nn_O\nn_Hydrogen_acceptors\nn_Hydrogen_donnors\nn_briged_head_atoms\nn_atoms_stereo_centers\nn_atoms_unspecified_stereo_centers\nn_spiro_atoms\nm_logp\nm_mr\nfraction_CSP3\nis_cns_molecule\n"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import LinearSVC",
"_____no_output_____"
],
[
"clf = LinearSVC(random_state=0, tol=1e-5, dual=False)",
"_____no_output_____"
],
[
"data_frame=mixed_df.drop([\"m_name\"],axis=1)\ny=data_frame[\"is_cns_molecule\"]\nx=data_frame.drop([\"is_cns_molecule\"],axis=1)\nx=x.drop(['n_radical_electrons'],axis=1) # radical electrons is constant. WARNING ALERT",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test=train_test_split(x,y,random_state=0,test_size=0.2)",
"_____no_output_____"
],
[
"clf.fit(x_train,y_train)",
"_____no_output_____"
],
[
"predicted=clf.predict(x_test)",
"_____no_output_____"
],
[
"matrix=confusion_matrix(y_test,predicted)\nmatrix_labels=[[\"True positive\",\"False positive\"],\n [\"False negative\",\"True negative\"]]",
"_____no_output_____"
],
[
"for i in range(2):\n for j in range(2):\n print(\"{} {}\".format(matrix_labels[i][j],matrix[i][j]))",
"True positive 104\nFalse positive 9\nFalse negative 29\nTrue negative 46\n"
],
[
"print(\"f1 score: {}%\".format(f1_score(y_test,predicted)*100)) \nprint(\"accuracy score: {}%\".format(accuracy_score(y_test,predicted)*100))",
"f1 score: 70.76923076923075%\naccuracy score: 79.7872340425532%\n"
]
],
[
[
"# Feature selection functions",
"_____no_output_____"
],
[
"#### Default parameters \nselection_type=f_classif\nselected_k=len(x.columns)/2\ncolumns_length=len(x.columns)\ndata=x\nhistorical_class=y",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_selection import *\n\ndef selector_scores(selection_type=mutual_info_regression,selected_k=10,columns_length=len(x.columns),\n data=x,historical_class=y):\n \n def get_scores(selector,columns_length): \n scores = selector.scores_\n plt.figure(figsize=(12,8),dpi=80)\n plt.plot(scores)\n plt.xticks(np.arange(columns_length),[x for x in range(columns_length)]);\n \n return scores\n \n selector = SelectKBest(selection_type, k=selected_k)\n selector.fit(data,historical_class)\n results=get_scores(selector,columns_length)\n \n return results",
"_____no_output_____"
],
[
"from itertools import chain \n\ndef get_fixed_list(lst: list): \n def remove_duplicates(lst_1d: list):\n unique_lst = []\n for data in lst_1d:\n if data not in unique_lst:\n unique_lst.append(data)\n return unique_lst \n \n lst_1d=list(chain.from_iterable(lst))\n return remove_duplicates(lst_1d)",
"_____no_output_____"
],
[
"def get_weigth_zero_features(data,scores):\n weight_zero_features=[]\n for label,i in zip(data.columns,scores):\n if i==0: weight_zero_features.append(label)\n \n return weight_zero_features",
"_____no_output_____"
],
[
"def get_weigths_n_iterations(x,y,n): \n lst=[]\n for i in range(n):\n scores=selector_scores(data=x,historical_class=y)\n weight_zero_features=get_weigth_zero_features(x,scores)\n lst.append(weight_zero_features)\n \n return get_fixed_list(lst)",
"_____no_output_____"
]
],
[
[
"# Weight grahps",
"_____no_output_____"
],
[
"#### n=5 for best performance, any greater value overreads weigth zero features",
"_____no_output_____"
]
],
[
[
"weight_zero_features=get_weigths_n_iterations(x,y,n=5)",
"_____no_output_____"
]
],
[
[
"# Getting rid of weigth 0 features",
"_____no_output_____"
]
],
[
[
"weight_zero_features",
"_____no_output_____"
]
],
[
[
"['n_aliphatic_carbocycles',\n 'n_aliphatic_heterocycles',\n 'n_aromatic_rings',\n 'n_saturated_carbocycles',\n 'n_saturated_heterocycles',\n 'n_saturated_rings',\n 'n_atoms_stereo_centers',\n 'n_atoms_with_Hydrogen',\n 'n_radical_electrons',\n 'n_amide_bonds',\n 'n_rings',\n 'n_aromatic_heterocycles',\n 'n_briged_head_atoms',\n 'n_spiro_atoms',\n 'n_hetero_cycles']",
"_____no_output_____"
]
],
[
[
"data_frame=data_frame.drop(weight_zero_features,axis=1); data_frame.shape",
"_____no_output_____"
]
],
[
[
"# New csv",
"_____no_output_____"
]
],
[
[
"data_frame.insert(0,\"m_name\",mixed_df[\"m_name\"].values)\ndata_frame.head()",
"_____no_output_____"
],
[
"data_frame.to_csv(\"molecules_v2.csv\",sep=\"\\t\",index = False, header=True)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e716c6f4261fc3966914bcd09e345f7ccd0d66eb | 13,911 | ipynb | Jupyter Notebook | setup.ipynb | SergioRAgostinho/bootstrap-ml | 1f96c58ee09a8a7fcb61e5f1017c9dea74c31805 | [
"Apache-2.0"
] | 4 | 2018-03-22T10:54:52.000Z | 2019-09-16T09:55:42.000Z | setup.ipynb | SergioRAgostinho/bootstrap-ml | 1f96c58ee09a8a7fcb61e5f1017c9dea74c31805 | [
"Apache-2.0"
] | 2 | 2018-03-22T20:24:35.000Z | 2020-03-26T09:07:32.000Z | setup.ipynb | SergioRAgostinho/bootstrap-ml | 1f96c58ee09a8a7fcb61e5f1017c9dea74c31805 | [
"Apache-2.0"
] | null | null | null | 50.402174 | 5,408 | 0.739702 | [
[
[
"# Setup\n\nFor this workshop we're gonna leverage the power of *pip* as our package manager to set up all of our dependencies. \n\n**Note**: all material in this workshop is aimed at Python 3.5 and up. Some OSs package *pip* for Python 2 as `pip` and *pip* for Python 3 as `pip3` e.g, Ubuntu and Homebrew for Mac OS X. Although (I'm convinced that) *pip* is actually version agnostic, as long as it detects correctly your python installations, you might be required to invoke `pip3` in order to ensure *pip* is called with the right environment.\n\nFor more information on setting up *pip* please check the [official documentation](https://pip.pypa.io/en/latest/installing/).\n\n**Note**: `pip` is an application which is installed alongside your Python runtime environment. By this I mean, all commands make use of `pip` are to be called in the terminal/command prompt, **not** inside the Python interpreter.\n\nIf *pip* is properly set up, invoking respectively for Unix and Windows platforms\n\n```\n$ which pip3\n``` \nor\n\n```\n$ where pip3\n``` \n\nshould return the path to the pip3 executable.\n\n### pip on Ubuntu\n\nSimply run \n\n```\n$ sudo apt-get install python3-pip\n```\n\nYou'll it invoke as `pip3` afterwards.\n\n### pip on Mac OS X using Homebrew\n\n[Homebrew](https://brew.sh/) is a popular unofficial package manager for the Mac OS X platform. If you install Python 3 from Homebrew it'll come with pip already so you just need to invoke `pip3`.\n\n\n## Dependencies\n\nWe're gonna use a number of publicly available open source libraries to provide basic linear algebra, visualization and algorithm implementations\n\n- [numpy](http://www.numpy.org/): array processing for numbers, strings, records, and objects.\n- [matplotlib](https://matplotlib.org/): Python plotting package\n- [scipy](https://www.scipy.org/): Scientific Library for Python.\n- [scikit-learn](scikit-learn.org/): A set of python modules for machine learning and data mining\n\n\nTo install the dependencies simply invoke the following in your terminal.\n\n```\n$ pip3 install scikit-learn scipy matplotlib numpy\n```\nAlternatively you can download the requirements.txt file which is part of the repo's root folder and run\n\n```\n$ pip3 install -r requirements.txt\n```\n\nmaking sure you provide the appropriate path to the requirements file.\n\n\nWith some luck all goes through without any issues and your environment is ready for set up. \n\nAt the moment these are the current depencies we'll be using in the pratical part of the workshop. If something get's added I'll let you know.",
"_____no_output_____"
],
[
"\n### Jupyter (optional)\n\nThe material for this workshop is provided in the format of interactive Python notebooks (`*.ipynb` files). GitHub is able to handle these pretty well, which is what is allowing you to visualize this small tutorial. But why watch when you can run, edit and augment things? Let *Jupyter* enter the scene.\n\nJupyter (previously known as iPython) is a live web server which is able to display, edit and run interactive Python notebooks. The amount of things it can do is really impressive but for our purposes we'll stick to it's most basic features. Like most things in the Python world, Jupyter is conveniently shipped through pip.\n\n```\n$ pip3 install jupyter\n```\n\nDo you need Jupyter to run the code from the tutorials? No! The Python engine and the modules mentioned in the previous section are more than enough to get the job done, that's why it's not included as a dependency. It's more of a \"recommended\" dependecy in Aptitude lingo.",
"_____no_output_____"
],
[
"## Testing things\n\nIf everything went ok with pip's installation, the following lines should execute without any problem.\n\n### *numpy*",
"_____no_output_____"
]
],
[
[
"import numpy\nprint(numpy.__version__)\n\nprint(numpy.eye(3))",
"1.14.1\n[[1. 0. 0.]\n [0. 1. 0.]\n [0. 0. 1.]]\n"
]
],
[
[
"### *matplotlib*\n\n**IMPORTANT**: *matplotlib* requires additional configuration of a rendering backend in order to display plots interactively. The [documentation](https://matplotlib.org/faq/usage_faq.html#what-is-a-backend) provides instructions on how to achieve this.\n\nOne possible way of configuring the backend is by providing a configuration file to matplotlib named `matplotlibrc`. Each environment is unique but on Ubuntu this configuration file should be located at `$HOME/.config/matplotlib/matplotlibrc`. On Windows it should be placed at `%HOME%/.matplotlib/matplotlibrc`.\nHere's configuration of the machine this document is being written on\n\n```\n#### CONFIGURATION BEGINS HERE\n\n# The default backend; one of GTK GTKAgg GTKCairo GTK3Agg GTK3Cairo\n# MacOSX Qt4Agg Qt5Agg TkAgg WX WXAgg Agg Cairo GDK PS PDF SVG\n# Template.\n# You can also deploy your own backend outside of matplotlib by\n# referring to the module name (which must be in the PYTHONPATH) as\n# 'module://my_backend'.\nbackend : TkAgg\n```\n\nNow let's test it out.",
"_____no_output_____"
]
],
[
[
"#the command below only applies to jupyter\n%matplotlib inline\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nprint(matplotlib.__version__)\n\nplt.scatter(range(1,10), range(1,10))\nplt.show()",
"1.5.1\n"
]
],
[
[
"### *scikit learn*",
"_____no_output_____"
]
],
[
[
"import sklearn\nsklearn.__version__",
"_____no_output_____"
]
],
[
[
"### *SciPy*",
"_____no_output_____"
]
],
[
[
"import scipy\nscipy.__version__",
"_____no_output_____"
]
],
[
[
"## Issues\n\nIf you run into troubles or find mistakes, bugs, please open an issue on the [issue tracker](https://github.com/SergioRAgostinho/bootstrap-ml/issues).\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e716d05600e71b5e1a49a2d049d4dc865242bf9d | 253,611 | ipynb | Jupyter Notebook | vgg16_keras_data_image_augmentation.ipynb | jqli0201/etsy-analysis | 5cf429277ca320abcc9ec363d0c4525aaa574be8 | [
"MIT"
] | null | null | null | vgg16_keras_data_image_augmentation.ipynb | jqli0201/etsy-analysis | 5cf429277ca320abcc9ec363d0c4525aaa574be8 | [
"MIT"
] | null | null | null | vgg16_keras_data_image_augmentation.ipynb | jqli0201/etsy-analysis | 5cf429277ca320abcc9ec363d0c4525aaa574be8 | [
"MIT"
] | null | null | null | 642.053165 | 118,714 | 0.928718 | [
[
[
"import matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom google.colab import drive \ndrive.mount('/content/gdrive')\nimport pandas as pd \nimport numpy as np\ndf = pd.read_pickle('gdrive/My Drive/Learn/Stanford/CS230/image_224x224_sale.pkl')\nX = df['image_pixel']\ny = df['high_sale']\nX = X.to_numpy()\nX = np.stack(X, axis=0)\nplt.imshow(X[0])",
"Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
],
[
"datagen = ImageDataGenerator(\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')",
"_____no_output_____"
],
[
"X0 = X[0].reshape((1,) + X[0].shape)\n\n# prepare iterator\nit = datagen.flow(X0, batch_size=1)\n# generate samples and plot\nfor i in range(9):\n\t# define subplot\n\tplt.subplot(330 + 1 + i)\n\t# generate batch of images\n\tbatch = it.next()\n\t# convert to unsigned integers for viewing\n\timage = batch[0].astype('uint8')\n\t# plot raw pixel data\n\tplt.imshow(image)\n# show the figure\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n# Split into 0.75:0.125:0.125\nX = X/255.\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.125, random_state=42)\nX_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=0.14286, random_state=42)\nprint (\"number of training examples = \" + str(X_train.shape[0]))\nprint (\"number of dev examples = \" + str(X_dev.shape[0]))\nprint (\"number of test examples = \" + str(X_test.shape[0]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(y_train.shape))\nprint (\"X_dev shape: \" + str(X_dev.shape))\nprint (\"Y_dev shape: \" + str(y_dev.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(y_test.shape))",
"number of training examples = 1179\nnumber of dev examples = 197\nnumber of test examples = 197\nX_train shape: (1179, 224, 224, 3)\nY_train shape: (1179,)\nX_dev shape: (197, 224, 224, 3)\nY_dev shape: (197,)\nX_test shape: (197, 224, 224, 3)\nY_test shape: (197,)\n"
],
[
"# prepare iterator\ntrain_generator = datagen.flow(X_train, y_train, batch_size = 32)\nvalidation_generator = datagen.flow(X_dev, y_dev, batch_size = 8)",
"_____no_output_____"
],
[
"from keras.applications.vgg16 import VGG16\nbase_model = VGG16(weights='imagenet')\nmodel = tf.keras.models.Sequential()\nmodel.add(base_model)\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dropout(0.5))\nmodel.add(tf.keras.layers.Dense(1, activation='sigmoid'))\nmodel.layers[0].trainable = False\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),\n metrics=['accuracy'])\n\nmodel.summary()",
"Model: \"sequential_2\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n vgg16 (Functional) (None, 1000) 138357544 \n \n flatten_2 (Flatten) (None, 1000) 0 \n \n dropout_2 (Dropout) (None, 1000) 0 \n \n dense_2 (Dense) (None, 1) 1001 \n \n=================================================================\nTotal params: 138,358,545\nTrainable params: 1,001\nNon-trainable params: 138,357,544\n_________________________________________________________________\n"
],
[
"EPOCHS = 50\n\nmodel.fit_generator(train_generator,\n validation_data=validation_generator,\n steps_per_epoch=len(X_train) / 32, epochs=EPOCHS)",
"/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:5: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n \"\"\"\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e716d63b0d87745eb36f0ed3a4af6d519f1b23ec | 7,649 | ipynb | Jupyter Notebook | label_songs/create_target_columns.ipynb | JonathanElejalde/reggaeton_songs_nlp | 8f3de5911225d23f78edc3e874eb7307ca136e89 | [
"MIT"
] | null | null | null | label_songs/create_target_columns.ipynb | JonathanElejalde/reggaeton_songs_nlp | 8f3de5911225d23f78edc3e874eb7307ca136e89 | [
"MIT"
] | null | null | null | label_songs/create_target_columns.ipynb | JonathanElejalde/reggaeton_songs_nlp | 8f3de5911225d23f78edc3e874eb7307ca136e89 | [
"MIT"
] | 1 | 2022-02-14T11:27:46.000Z | 2022-02-14T11:27:46.000Z | 50.993333 | 1,691 | 0.448817 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv('..\\data\\lyrics.csv')\ndf.head()",
"_____no_output_____"
],
[
"# create the new columns\ndf['sexual_content'] = -1\ndf['women_denigration'] = -1\ndf['drugs'] = -1\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Understanding the new colums\n\nThese new columns are the ones that we are going to use as target values in our nlp classification tasks.\n\n### In the sexual_content column we are going to classify the lyrics as: 0=no_sexual_content, 1=explicit, 2=implicit\n### Then, the women_denigration and drugs column are going to be a binary classification task where 0 is for no and 1 for yes.\n\n\n",
"_____no_output_____"
]
],
[
[
"df.to_csv('..\\data\\lyrics.csv', index=False)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e716e4e3eeefc03b3bdf18ef8be2467b85e83e5a | 463,241 | ipynb | Jupyter Notebook | OpenCV/openCV 15-4-2018.ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | null | null | null | OpenCV/openCV 15-4-2018.ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | null | null | null | OpenCV/openCV 15-4-2018.ipynb | afcarl/Useful-python | 5d1947052fb25b2388704926e4692511cc162031 | [
"MIT"
] | 1 | 2018-09-05T21:48:57.000Z | 2018-09-05T21:48:57.000Z | 1,408.027356 | 210,094 | 0.951781 | [
[
[
"https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/image_processing/opencv.py\n\nhttp://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Object_Detection_Face_Detection_Haar_Cascade_Classifiers.php",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport signal\n%matplotlib inline",
"_____no_output_____"
],
[
"cv2.__version__",
"_____no_output_____"
],
[
"img = mpimg.imread('me.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)",
"_____no_output_____"
],
[
"plt.imshow(gray);",
"_____no_output_____"
]
],
[
[
"External popup window with image",
"_____no_output_____"
]
],
[
[
"cv2.imshow('image',img)\nk = cv2.waitKey(0)\nif k == 27: # wait for ESC key to exit\n cv2.destroyAllWindows()",
"_____no_output_____"
]
],
[
[
"External popup window with video",
"_____no_output_____"
]
],
[
[
"cap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('frame',gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()",
"_____no_output_____"
]
],
[
[
"## Image classification \nAppear to need to init the classifier and manually load it..",
"_____no_output_____"
]
],
[
[
"face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')",
"_____no_output_____"
],
[
"face_cascade.load('/Users/robincole/anaconda3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')",
"_____no_output_____"
],
[
"eye_cascade.load('/Users/robincole/anaconda3/share/OpenCV/haarcascades/haarcascade_eye.xml')",
"_____no_output_____"
],
[
"faces = face_cascade.detectMultiScale(gray, 1.3, 5)",
"_____no_output_____"
],
[
"for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n\nplt.imshow(img)",
"_____no_output_____"
]
],
[
[
"## Video\n\nhttps://medium.com/@neotheicebird/webcam-based-image-processing-in-ipython-notebooks-47c75a022514",
"_____no_output_____"
]
],
[
[
"vc = cv2.VideoCapture(0)",
"_____no_output_____"
],
[
"plt.ion()",
"_____no_output_____"
],
[
"if vc.isOpened(): # try to get the first frame\n is_capturing, frame = vc.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # makes the blues image look real colored\n webcam_preview = plt.imshow(frame) \nelse:\n is_capturing = False\n print('Noting')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e716ec721f9f8e9b25784f7cede573f9d44084be | 22,063 | ipynb | Jupyter Notebook | notebooks/Plot Arctic Ocean Map.ipynb | bludka/pyTMD | 2f866508f743b8b130b21c5379c400b39f73d3f0 | [
"MIT"
] | null | null | null | notebooks/Plot Arctic Ocean Map.ipynb | bludka/pyTMD | 2f866508f743b8b130b21c5379c400b39f73d3f0 | [
"MIT"
] | null | null | null | notebooks/Plot Arctic Ocean Map.ipynb | bludka/pyTMD | 2f866508f743b8b130b21c5379c400b39f73d3f0 | [
"MIT"
] | null | null | null | 43.516765 | 125 | 0.589312 | [
[
[
"Plot Arctic Ocean Map\n==================\n\nDemonstrates plotting hourly tidal displacements for the Arctic Ocean\n\nOTIS format tidal solutions provided by Ohio State University and ESR \n- http://volkov.oce.orst.edu/tides/region.html \n- https://www.esr.org/research/polar-tide-models/list-of-polar-tide-models/\n- ftp://ftp.esr.org/pub/datasets/tmd/ \n\nGlobal Tide Model (GOT) solutions provided by Richard Ray at GSFC \n\nFinite Element Solution (FES) provided by AVISO \n- https://www.aviso.altimetry.fr/en/data/products/auxiliary-products/global-tide-fes.html\n\n#### Python Dependencies\n - [numpy: Scientific Computing Tools For Python](https://www.numpy.org) \n - [scipy: Scientific Tools for Python](https://www.scipy.org/) \n - [pyproj: Python interface to PROJ library](https://pypi.org/project/pyproj/) \n - [netCDF4: Python interface to the netCDF C library](https://unidata.github.io/netcdf4-python/) \n - [matplotlib: Python 2D plotting library](http://matplotlib.org/) \n - [cartopy: Python package designed for geospatial data processing](https://scitools.org.uk/cartopy/docs/latest/) \n\n#### Program Dependencies\n\n- `calc_astrol_longitudes.py`: computes the basic astronomical mean longitudes \n- `calc_delta_time.py`: calculates difference between universal and dynamic time \n- `convert_ll_xy.py`: convert lat/lon points to and from projected coordinates \n- `load_constituent.py`: loads parameters for a given tidal constituent \n- `load_nodal_corrections.py`: load the nodal corrections for tidal constituents \n- `infer_minor_corrections.py`: return corrections for minor constituents \n- `read_tide_model.py`: extract tidal harmonic constants from OTIS tide models \n- `read_netcdf_model.py`: extract tidal harmonic constants from netcdf models \n- `read_GOT_model.py`: extract tidal harmonic constants from GSFC GOT models \n- `read_FES_model.py`: extract tidal harmonic constants from FES tide models \n- `predict_tide.py`: predict tidal elevation at a single time using harmonic constants \n\nThis notebook uses Jupyter widgets to set parameters for calculating the tidal maps. \nThe widgets can be installed as described below. \n```\npip3 install --user ipywidgets\njupyter nbextension install --user --py widgetsnbextension\njupyter nbextension enable --user --py widgetsnbextension\njupyter-notebook\n```",
"_____no_output_____"
],
[
"#### Load modules",
"_____no_output_____"
]
],
[
[
"import os\nimport pyproj\nimport datetime\nimport numpy as np\nimport matplotlib\nmatplotlib.rcParams['axes.linewidth'] = 2.0\nmatplotlib.rcParams[\"animation.html\"] = \"jshtml\"\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport cartopy.crs as ccrs\nimport ipywidgets as widgets\nfrom IPython.display import HTML\n\nimport pyTMD.time\nfrom pyTMD.calc_delta_time import calc_delta_time\nfrom pyTMD.read_tide_model import extract_tidal_constants\nfrom pyTMD.read_netcdf_model import extract_netcdf_constants\nfrom pyTMD.read_GOT_model import extract_GOT_constants\nfrom pyTMD.read_FES_model import extract_FES_constants\nfrom pyTMD.infer_minor_corrections import infer_minor_corrections\nfrom pyTMD.predict_tide import predict_tide\n#-- autoreload\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"#### Set parameters for program\n\n- Model directory \n- Tide model \n- Date to run ",
"_____no_output_____"
]
],
[
[
"#-- set the directory with tide models\ndirText = widgets.Text(\n value=os.getcwd(),\n description='Directory:',\n disabled=False\n)\n\n#-- dropdown menu for setting tide model\nmodel_list = ['TPXO9-atlas','TPXO9-atlas-v2','TPXO9-atlas-v3',\n 'TPXO9-atlas-v4','TPXO9.1','TPXO8-atlas','TPXO7.2',\n 'AODTM-5','AOTIM-5','AOTIM-5-2018','Gr1km-v2',\n 'GOT4.7','GOT4.8','GOT4.10','FES2014']\nmodelDropdown = widgets.Dropdown(\n options=model_list,\n value='AOTIM-5-2018',\n description='Model:',\n disabled=False,\n)\n\n#-- date picker widget for setting time\ndatepick = widgets.DatePicker(\n description='Date:',\n value = datetime.date.today(),\n disabled=False\n)\n\n#-- display widgets for setting directory, model and date\nwidgets.VBox([dirText,modelDropdown,datepick])",
"_____no_output_____"
]
],
[
[
"#### Setup tide model parameters",
"_____no_output_____"
]
],
[
[
"#-- directory with tide models\ntide_dir = os.path.expanduser(dirText.value)\nMODEL = modelDropdown.value\n#-- select between tide models\nif (MODEL == 'TPXO9-atlas'):\n model_directory = os.path.join(tide_dir,'TPXO9_atlas')\n grid_file = os.path.join(model_directory,'grid_tpxo9_atlas.nc.gz')\n model_files = ['h_q1_tpxo9_atlas_30.nc.gz','h_o1_tpxo9_atlas_30.nc.gz',\n 'h_p1_tpxo9_atlas_30.nc.gz','h_k1_tpxo9_atlas_30.nc.gz',\n 'h_n2_tpxo9_atlas_30.nc.gz','h_m2_tpxo9_atlas_30.nc.gz',\n 'h_s2_tpxo9_atlas_30.nc.gz','h_k2_tpxo9_atlas_30.nc.gz',\n 'h_m4_tpxo9_atlas_30.nc.gz','h_ms4_tpxo9_atlas_30.nc.gz',\n 'h_mn4_tpxo9_atlas_30.nc.gz','h_2n2_tpxo9_atlas_30.nc.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n reference = 'http://volkov.oce.orst.edu/tides/tpxo9_atlas.html'\n model_format = 'netcdf'\n TYPE = 'z'\n SCALE = 1.0/1000.0\n GZIP = True\nelif (MODEL == 'TPXO9-atlas-v2'):\n model_directory = os.path.join(tide_dir,'TPXO9_atlas_v2')\n grid_file = os.path.join(model_directory,'grid_tpxo9_atlas_30_v2.nc.gz')\n model_files = ['h_q1_tpxo9_atlas_30_v2.nc.gz','h_o1_tpxo9_atlas_30_v2.nc.gz',\n 'h_p1_tpxo9_atlas_30_v2.nc.gz','h_k1_tpxo9_atlas_30_v2.nc.gz',\n 'h_n2_tpxo9_atlas_30_v2.nc.gz','h_m2_tpxo9_atlas_30_v2.nc.gz',\n 'h_s2_tpxo9_atlas_30_v2.nc.gz','h_k2_tpxo9_atlas_30_v2.nc.gz',\n 'h_m4_tpxo9_atlas_30_v2.nc.gz','h_ms4_tpxo9_atlas_30_v2.nc.gz',\n 'h_mn4_tpxo9_atlas_30_v2.nc.gz','h_2n2_tpxo9_atlas_30_v2.nc.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n model_format = 'netcdf'\n TYPE = 'z'\n SCALE = 1.0/1000.0\n GZIP = True\nelif (MODEL == 'TPXO9-atlas-v3'):\n model_directory = os.path.join(tide_dir,'TPXO9_atlas_v3')\n grid_file = os.path.join(model_directory,'grid_tpxo9_atlas_30_v3.nc.gz')\n model_files = ['h_q1_tpxo9_atlas_30_v3.nc.gz','h_o1_tpxo9_atlas_30_v3.nc.gz',\n 'h_p1_tpxo9_atlas_30_v3.nc.gz','h_k1_tpxo9_atlas_30_v3.nc.gz',\n 'h_n2_tpxo9_atlas_30_v3.nc.gz','h_m2_tpxo9_atlas_30_v3.nc.gz',\n 'h_s2_tpxo9_atlas_30_v3.nc.gz','h_k2_tpxo9_atlas_30_v3.nc.gz',\n 'h_m4_tpxo9_atlas_30_v3.nc.gz','h_ms4_tpxo9_atlas_30_v3.nc.gz',\n 'h_mn4_tpxo9_atlas_30_v3.nc.gz','h_2n2_tpxo9_atlas_30_v3.nc.gz',\n 'h_mf_tpxo9_atlas_30_v3.nc.gz','h_mm_tpxo9_atlas_30_v3.nc.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n model_format = 'netcdf'\n TYPE = 'z'\n SCALE = 1.0/1000.0\n GZIP = True\nelif (MODEL == 'TPXO9-atlas-v4'):\n model_directory = os.path.join(tide_dir,'TPXO9_atlas_v4')\n grid_file = os.path.join(model_directory,'grid_tpxo9_atlas_30_v4')\n model_files = ['h_q1_tpxo9_atlas_30_v4','h_o1_tpxo9_atlas_30_v4',\n 'h_p1_tpxo9_atlas_30_v4','h_k1_tpxo9_atlas_30_v4',\n 'h_n2_tpxo9_atlas_30_v4','h_m2_tpxo9_atlas_30_v4',\n 'h_s2_tpxo9_atlas_30_v4','h_k2_tpxo9_atlas_30_v4',\n 'h_m4_tpxo9_atlas_30_v4','h_ms4_tpxo9_atlas_30_v4',\n 'h_mn4_tpxo9_atlas_30_v4','h_2n2_tpxo9_atlas_30_v4',\n 'h_mf_tpxo9_atlas_30_v4','h_mm_tpxo9_atlas_30_v4']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n model_format = 'OTIS'\n EPSG = '4326'\n TYPE = 'z'\nelif (MODEL == 'TPXO9.1'):\n grid_file = os.path.join(tide_dir,'TPXO9.1','DATA','grid_tpxo9')\n model_file = os.path.join(tide_dir,'TPXO9.1','DATA','h_tpxo9.v1')\n reference = 'http://volkov.oce.orst.edu/tides/global.html'\n model_format = 'OTIS'\n EPSG = '4326'\n TYPE = 'z'\nelif (MODEL == 'TPXO8-atlas'):\n grid_file = os.path.join(tide_dir,'tpxo8_atlas','grid_tpxo8atlas_30_v1')\n model_file = os.path.join(tide_dir,'tpxo8_atlas','hf.tpxo8_atlas_30_v1')\n reference = 'http://volkov.oce.orst.edu/tides/tpxo8_atlas.html'\n model_format = 'ATLAS'\n EPSG = '4326'\n TYPE = 'z'\nelif (MODEL == 'TPXO7.2'):\n grid_file = os.path.join(tide_dir,'TPXO7.2_tmd','grid_tpxo7.2')\n model_file = os.path.join(tide_dir,'TPXO7.2_tmd','h_tpxo7.2')\n reference = 'http://volkov.oce.orst.edu/tides/global.html'\n model_format = 'OTIS'\n EPSG = '4326'\n TYPE = 'z'\nelif (MODEL == 'AODTM-5'):\n grid_file = os.path.join(tide_dir,'aodtm5_tmd','grid_Arc5km')\n model_file = os.path.join(tide_dir,'aodtm5_tmd','h0_Arc5km.oce')\n reference = ('https://www.esr.org/research/polar-tide-models/'\n 'list-of-polar-tide-models/aodtm-5/')\n model_format = 'OTIS'\n EPSG = 'PSNorth'\n TYPE = 'z'\nelif (MODEL == 'AOTIM-5'):\n grid_file = os.path.join(tide_dir,'aotim5_tmd','grid_Arc5km')\n model_file = os.path.join(tide_dir,'aotim5_tmd','h_Arc5km.oce')\n reference = ('https://www.esr.org/research/polar-tide-models/'\n 'list-of-polar-tide-models/aotim-5/')\n model_format = 'OTIS'\n EPSG = 'PSNorth'\n TYPE = 'z'\nelif (MODEL == 'AOTIM-5-2018'):\n grid_file = os.path.join(tide_dir,'Arc5km2018','grid_Arc5km2018')\n model_file = os.path.join(tide_dir,'Arc5km2018','h_Arc5km2018')\n reference = ('https://www.esr.org/research/polar-tide-models/'\n 'list-of-polar-tide-models/aotim-5/')\n model_format = 'OTIS'\n EPSG = 'PSNorth'\n TYPE = 'z'\nelif (MODEL == 'Gr1km-v2'):\n grid_file = os.path.join(tide_dir,'greenlandTMD_v2','grid_Greenland8.v2')\n model_file = os.path.join(tide_dir,'greenlandTMD_v2','h_Greenland8.v2')\n reference = 'https://doi.org/10.1002/2016RG000546'\n model_format = 'OTIS'\n EPSG = '3413'\n TYPE = 'z'\nelif (MODEL == 'GOT4.7'):\n model_directory = os.path.join(tide_dir,'GOT4.7','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n reference = ('https://denali.gsfc.nasa.gov/personal_pages/ray/'\n 'MiscPubs/19990089548_1999150788.pdf')\n model_format = 'GOT'\n SCALE = 1.0/100.0\n GZIP = True\nelif (MODEL == 'GOT4.8'):\n model_directory = os.path.join(tide_dir,'got4.8','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n reference = ('https://denali.gsfc.nasa.gov/personal_pages/ray/'\n 'MiscPubs/19990089548_1999150788.pdf')\n model_format = 'GOT'\n SCALE = 1.0/100.0\n GZIP = True\nelif (MODEL == 'GOT4.10'):\n model_directory = os.path.join(tide_dir,'GOT4.10c','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n reference = ('https://denali.gsfc.nasa.gov/personal_pages/ray/'\n 'MiscPubs/19990089548_1999150788.pdf')\n model_format = 'GOT'\n SCALE = 1.0/100.0\n GZIP = True\nelif (MODEL == 'FES2014'):\n model_directory = os.path.join(tide_dir,'fes2014','ocean_tide')\n model_files = ['2n2.nc.gz','eps2.nc.gz','j1.nc.gz','k1.nc.gz',\n 'k2.nc.gz','l2.nc.gz','la2.nc.gz','m2.nc.gz','m3.nc.gz','m4.nc.gz',\n 'm6.nc.gz','m8.nc.gz','mf.nc.gz','mks2.nc.gz','mm.nc.gz',\n 'mn4.nc.gz','ms4.nc.gz','msf.nc.gz','msqm.nc.gz','mtm.nc.gz',\n 'mu2.nc.gz','n2.nc.gz','n4.nc.gz','nu2.nc.gz','o1.nc.gz','p1.nc.gz',\n 'q1.nc.gz','r2.nc.gz','s1.nc.gz','s2.nc.gz','s4.nc.gz','sa.nc.gz',\n 'ssa.nc.gz','t2.nc.gz']\n model_file = [os.path.join(model_directory,m) for m in model_files]\n c = ['2n2','eps2','j1','k1','k2','l2','lambda2','m2','m3','m4','m6',\n 'm8','mf','mks2','mm','mn4','ms4','msf','msqm','mtm','mu2','n2',\n 'n4','nu2','o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']\n model_format = 'FES'\n TYPE = 'z'\n SCALE = 1.0/100.0\n GZIP = True",
"_____no_output_____"
]
],
[
[
"#### Setup coordinates for calculating tides",
"_____no_output_____"
]
],
[
[
"#-- create an image around the Arctic Ocean\n#-- use NSIDC Polar Stereographic definitions\n#-- https://nsidc.org/data/polar-stereo/ps_grids.html\nxlimits = [-3850000,3750000]\nylimits = [-5350000,5850000]\nspacing = [5e3,-5e3]\n#-- x and y coordinates\nx = np.arange(xlimits[0],xlimits[1]+spacing[0],spacing[0])\ny = np.arange(ylimits[1],ylimits[0]+spacing[1],spacing[1])\nxgrid,ygrid = np.meshgrid(x,y)\n#-- x and y dimensions\nnx = int((xlimits[1]-xlimits[0])/spacing[0])+1\nny = int((ylimits[0]-ylimits[1])/spacing[1])+1\n#-- convert image coordinates from polar stereographic to latitude/longitude\ncrs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(3413))\ncrs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\ntransformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\nlon,lat = transformer.transform(xgrid.flatten(), ygrid.flatten())",
"_____no_output_____"
]
],
[
[
"#### Calculate tide map",
"_____no_output_____"
]
],
[
[
"#-- convert from calendar date to days relative to Jan 1, 1992 (48622 MJD)\nYMD = datepick.value\ntide_time = pyTMD.time.convert_calendar_dates(YMD.year, YMD.month,\n YMD.day, hour=np.arange(24))\n#-- delta time (TT - UT1) file\ndelta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])\n\n#-- read tidal constants and interpolate to grid points\nif model_format in ('OTIS','ATLAS'):\n amp,ph,D,c = extract_tidal_constants(lon, lat, grid_file, model_file,\n EPSG, TYPE=TYPE, METHOD='spline', GRID=model_format)\n DELTAT = np.zeros_like(tide_time)\nelif (model_format == 'netcdf'):\n amp,ph,D,c = extract_netcdf_constants(lon, lat, grid_file, model_file,\n TYPE=TYPE, METHOD='spline', SCALE=SCALE, GZIP=GZIP)\n DELTAT = np.zeros_like(tide_time)\nelif (model_format == 'GOT'):\n amp,ph,c = extract_GOT_constants(lon, lat, model_file,\n METHOD='spline', SCALE=SCALE, GZIP=GZIP)\n #-- interpolate delta times from calendar dates to tide time\n DELTAT = calc_delta_time(delta_file, tide_time)\nelif (model_format == 'FES'):\n amp,ph = extract_FES_constants(lon, lat, model_file, TYPE=TYPE,\n VERSION=MODEL, METHOD='spline', SCALE=SCALE, GZIP=GZIP)\n #-- interpolate delta times from calendar dates to tide time\n DELTAT = calc_delta_time(delta_file, tide_time)\n \n#-- calculate complex phase in radians for Euler's\ncph = -1j*ph*np.pi/180.0\n#-- calculate constituent oscillation\nhc = amp*np.exp(cph)\n \n#-- allocate for tide map calculated every hour\ntide_cm = np.ma.zeros((ny,nx,24))\nfor hour in range(24):\n #-- predict tidal elevations at time and infer minor corrections\n TIDE = predict_tide(tide_time[hour], hc, c, DELTAT=DELTAT[hour],\n CORRECTIONS=model_format)\n MINOR = infer_minor_corrections(tide_time[hour], hc, c,\n DELTAT=DELTAT[hour], CORRECTIONS=model_format)\n #-- add major and minor components and reform grid\n #-- convert from meters to centimeters\n tide_cm[:,:,hour] = 100.0*np.reshape((TIDE+MINOR),(ny,nx))",
"_____no_output_____"
]
],
[
[
"#### Create animation of hourly tidal oscillation",
"_____no_output_____"
]
],
[
[
"#-- output Arctic Ocean Tide Animation\nprojection = ccrs.Stereographic(central_longitude=-45.0,\n central_latitude=+90.0,true_scale_latitude=+70.0)\nfig, ax = plt.subplots(num=1, figsize=(8,9),\n subplot_kw=dict(projection=projection))\n#-- plot tide height\nvmin,vmax = (np.min(tide_cm), np.max(tide_cm))\nextent = (xlimits[0],xlimits[1],ylimits[0],ylimits[1])\nim = ax.imshow(np.zeros((ny,nx)), interpolation='nearest',\n vmin=vmin, vmax=vmax, transform=projection,\n extent=extent, origin='upper', animated=True)\n#-- add 50m resolution cartopy coastlines\nax.coastlines('50m')\n\n#-- Add colorbar and adjust size\n#-- pad = distance from main plot axis\n#-- extend = add extension triangles to upper and lower bounds\n#-- options: neither, both, min, max\n#-- shrink = percent size of colorbar\n#-- aspect = lengthXwidth aspect of colorbar\ncbar = plt.colorbar(im, ax=ax, pad=0.025, extend='both',\n extendfrac=0.0375, shrink=0.90, aspect=25.5, drawedges=False)\n#-- rasterized colorbar to remove lines\ncbar.solids.set_rasterized(True)\n#-- Add label to the colorbar\ncbar.ax.set_ylabel('{0} Tide Height'.format(MODEL), fontsize=13)\ncbar.ax.set_xlabel('cm', fontsize=13)\ncbar.ax.xaxis.set_label_coords(0.50, 1.04)\n#-- ticks lines all the way across\ncbar.ax.tick_params(which='both', width=1, length=19,\n labelsize=13, direction='in')\n#-- add title (date and time)\nttl = ax.set_title(None, fontsize=13)\n#-- set x and y limits\nax.set_xlim(xlimits)\nax.set_ylim(ylimits)\n\n# stronger linewidth on frame\nax.spines['geo'].set_linewidth(2.0)\nax.spines['geo'].set_capstyle('projecting')\n# adjust subplot within figure\nfig.subplots_adjust(left=0.02,right=0.98,bottom=0.05,top=0.95)\n \n#-- animate each map\ndef animate_maps(hour):\n #-- set map data\n im.set_data(tide_cm[:,:,hour])\n #-- set title\n args = (YMD.year,YMD.month,YMD.day,hour)\n ttl.set_text('{0:4d}-{1:02d}-{2:02d}T{3:02d}:00:00'.format(*args))\n\n#-- set animation\nanim = animation.FuncAnimation(fig, animate_maps, frames=24)\n%matplotlib inline\nHTML(anim.to_jshtml())",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e716fc55a3d0ee04d3cc2909da47fbc04dfc6d98 | 10,418 | ipynb | Jupyter Notebook | Chapter09/Exercise9.03/Exercise9_03.ipynb | khieunguyen/The-Data-Science-Workshop | 52cab305e6e2e8bb6820cf488ddb6e16b5567ac9 | [
"MIT"
] | 1 | 2020-05-08T08:59:30.000Z | 2020-05-08T08:59:30.000Z | Chapter09/Exercise9.03/Exercise9_03.ipynb | khieunguyen/The-Data-Science-Workshop | 52cab305e6e2e8bb6820cf488ddb6e16b5567ac9 | [
"MIT"
] | 1 | 2022-03-12T00:33:29.000Z | 2022-03-12T00:33:29.000Z | Chapter09/Exercise9.03/Exercise9_03.ipynb | khieunguyen/The-Data-Science-Workshop | 52cab305e6e2e8bb6820cf488ddb6e16b5567ac9 | [
"MIT"
] | null | null | null | 40.695313 | 2,447 | 0.473027 | [
[
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom mlxtend.evaluate import feature_importance_permutation\nimport altair as alt",
"_____no_output_____"
],
[
"file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter09/Dataset/phpYYZ4Qc.csv'",
"_____no_output_____"
],
[
"df = pd.read_csv(file_url)",
"_____no_output_____"
],
[
"y = df.pop('rej')",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=1)",
"_____no_output_____"
],
[
"rf_model = RandomForestRegressor(random_state=1, n_estimators=50, max_depth=6, min_samples_leaf=60)",
"_____no_output_____"
],
[
"rf_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"imp_vals, _ = feature_importance_permutation(predict_method=rf_model.predict, X=X_test.values, y=y_test.values, metric='r2', num_rounds=1, seed=2)\nimp_vals",
"_____no_output_____"
],
[
"varimp_df = pd.DataFrame({'feature': df.columns, 'importance': imp_vals})",
"_____no_output_____"
],
[
"alt.Chart(varimp_df).mark_bar().encode(\n x='importance',\n y=\"feature\"\n)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.