code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **input - print**
name = input("enter a name: ")
print("your name is "+ name)
# # formating
# ---
"say hi to " + 12
print(f"Your number is {input('enter your name')}")
# # list
[1,
'amin',
'computer']
ls = [1, 2, 3, 4, 5, 6, 7]
print(ls)
# +
#ls[start:end:step]
ls[3:]
# -
ls[-1:0:-1]
# ## add and remove from lists
# `append`
#
# `extend`
#
# `pop`
#
ls[-1] = 10
# ls
ls.append(20)
# ls
ls_2 = [-1, -2, -4 , -100, 200]
# ls + ls_2
ls.extend(ls_2)
# ls
ls.append(['Ahmad'])
# ls
# del
del ls[-1]
# ls
print(ls.pop())
# ls
sorted(ls)
[*reversed(ls)]
# for
- for in list
- how for work
- using range
- facorial
for item in ls:
print(item)
for number in ls:
print(number ** 3
)
# range(start,stop,step)
for number in range(10):
print(number)
for number in range(5,100,4):
print(number)
for number in range(0 , 100, 2):
print(number)
-100 in ls
# ls * 2
# ## dict
# +
dc = {
'/': 'do the /'
}
# -
dc['/']
'/' in dc
'*' in dc
dc.get('/', 'Not Found')
# ## functions
refactor samples to function
def func(param):
return param
func(100)
# # classes
class className:
class_variable = 100
class_int = className
class_int
class_int.class_variable
class Person:
def __init__(self, name, personal_id, age):
print(name , personal_id , age)
self.name = name
self.personal_id = personal_id
self.age = age
print("class created")
def print_info(self):
print(f"""
name: {self.name}
age: {self.age}
id: {self.personal_id}
""")
def set_name(self, name):
self.name = name
person = Person('Amin', 1, 24)
person.set_name("Ali")
person.print_info()
person.name = "Ahmad"
person.print_info()
person.print_info()
| session-2/notebook_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="s4x2sL4askdI"
# + [markdown] id="_aOcipcj3bdH"
# # 5. Automatic Differentiation
#
# My learning noteSbook following the live lesson series "Calculus for Machine Learning" by Dr. <NAME>. I adapted parts of the source code provided in the lecture, wrote some additional features to better understand Calculus.
# Learning notes and code largely based on the lesson's source code. For the original, please see:
# - https://learning.oreilly.com/videos/calculus-for-machine/9780137398171/
# - https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb
#
# <br />
#
# Autodiff/autograd
# - Computational diff.
# - Reverse mode diff.
# - Algorithmic diff.
#
# Distinct from classical methods:
# - Numerical diff. (delta method; introduces rounding errors)
# - Symbolic diff. (algebraic rules; computationally inefficiant)
#
# Relativei to classical methods, better handles:
# - Functions with many inputs (which is common in ML)
# - Higher-order derivatives
# + id="S9NaUntQ5MVH"
import torch
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
from google.colab.patches import cv2_imshow
# + [markdown] id="v-V_824a5LUy"
#
# + [markdown] id="dvJzppYq3sB0"
#
# ## 5.1 Introduction
# - Application of chain rule (typically to partial derivative) to sequence ($\text{forward pass} of arithmetic operations
#
# Differences
# - Chain rule by hand typically begins at most-nested function.
# - Autodiff proceeds from outermost function inward.
# - Small constant factor more compute than forward pass (at most)
#
# <br />
#
# **$Gradient$**
# an increase or decrease in the magnitude of a property (e.g. temperature, pressure, or concentration) observed in passing from one point or moment to another.
# + [markdown] id="OwsEPsOb31sV"
# ## 5.2 Autodiff with PyTorch
#
# TensorFlow and PyTorch are the two most popular automatic differentiation libraries.
#
# Let's use them to calculate $\frac{dy}{dx}$ at $x = 5$ where:
#
# $$y = x^2$$
# $$ \frac{dy}{dx} = 2x = 2(5) = 10 $$
#
# + [markdown] id="_rwpmdtN7fat"
# ### PyTorch functions
# - ```x.requires_grad_()```
# - ```y.backward()```
# - ```x.grad()```
# + colab={"base_uri": "https://localhost:8080/"} id="xrAHAlvY37oD" outputId="a6ed91b1-b361-4b64-b01c-30ff6d1c7aa2"
x = torch.tensor(5.0) # create a float tensor
x
# + colab={"base_uri": "https://localhost:8080/"} id="XZI6Nqee5SJI" outputId="e1747162-63c2-4564-a8cd-1c469b77b2f8"
x.requires_grad_() # contagiously track gradients through forward pass
# + id="s5BO8Byi5SEE"
y = x**2
# + id="_4Ng1qtT5R9Z"
y.backward() # use autodiff
# + colab={"base_uri": "https://localhost:8080/"} id="qePN9AW_7X_x" outputId="703d7d4f-08af-4550-aa41-0b84ff3412f4"
x.grad
# + [markdown] id="npYbkPME327W"
# ## 5.3 Autodiff with TensorFlow
#
#
# + id="GuD75q5r37bL"
x = tf.Variable(5.0)
# + id="0_oIb2x_73vJ"
with tf.GradientTape() as t: # track gradient
t.watch(x) # declare the variable to watch - track forward pass
y = x**2 # define the pass
# + colab={"base_uri": "https://localhost:8080/"} id="Xt181m0L8Czj" outputId="8d799d19-f585-4e18-9d71-c3b745662a87"
t.gradient(y, x) # use autodiff
# + [markdown] id="juRG-Vxq34Mf"
# ## 5.4 Directed Acyclic Graph of a Line Equation
# $$y=mx+b$$
# - $m$ = $m_{tan}$, slope
# - $b$ = bias
#
# <br />
#
# Nodes are input, output, parameters or operations.
# Directed edges ("arrows") are tensors.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 181} id="x-cOzIDd36Mu" outputId="46a6b4fe-a382-4807-a2fa-4c7d90fc76a2"
path = "/content/line_equation.png"
eq = cv2.imread(path)
eq = cv2.resize(eq, dsize=(0,0), fx=0.65, fy=0.65)
cv2_imshow(eq)
# lecture slide from 5.4 Directed Acyclic Graph of a Line Equation by <NAME>
# + [markdown] id="XdlHcP2c_T5e"
# ### Regression in PyTorch
# Here we use calculus to solve the same regression problem that we used the Moore-Penrose Pseudoinverse
# + colab={"base_uri": "https://localhost:8080/"} id="Kd7RcUut_OTa" outputId="fcd4726b-eeda-4048-af4b-e1f101de1813"
x = torch.tensor(np.arange(0, 8)) # E.g.: Dosage of drug for treating Alzheimer's disease
x
# + [markdown] id="wjcpZV6qAP4w"
# The $y$ values were created using the equation of a line $y = mx + b$. This way, we know what the model parameters to be learned are, say, $m = -0.5$ and $b = 2$. $\text{Random, normally-distributed noise}$ has been added to simulate sampling error:
# + id="ERd-tU62_OPH"
# y = -0.5*x + 2 + torch.normal(mean=torch.zeros(8), std=0.2)
# + colab={"base_uri": "https://localhost:8080/"} id="w2sqlz7K_OHU" outputId="942286e6-22da-40e6-e707-f7d0185b0941"
y = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37]) # E.g.: Patient's "forgetfulness score"
y
# + colab={"base_uri": "https://localhost:8080/", "height": 243} id="OpSo7OHX_ODw" outputId="f4f10818-d045-4147-a8c3-0214b421f17a"
std = [0.2, 0.5, 1]
ys = []
fig, ax = plt.subplots(1, 3, figsize=(15, 3))
for i in range(3):
y = -0.5*x + 2 + torch.normal(mean=torch.zeros(8), std=std[i])
ys.append(y)
ax[i].set_xlabel("Drug dosage (mL)")
ax[i].set_ylabel("Forgetfulness")
ax[i].scatter(x, y, c='darkorange')
ax[i].set_title(f"Clinical Trial {i+1}", fontsize=15)
# + [markdown] id="MXFIQiHSCXd2"
# Initialize the slope parameter $m$ with a "random" value of 0.9...
#
# (N.B.: In this simple demo, we could guess approximately-correct parameter values to start with. Or, we could use an algebraic (e.g., Moore-Penrose pseudoinverse) or statistical (e.g., ordinary-least-squares regression) to solve for the parameters quickly. This tiny machine learning demo with two parameters and eight data points scales, however, to millions of parameters and millions of data points. The other approaches -- guessing, algebra, statistics -- do not come close to scaling in this way.)
# + colab={"base_uri": "https://localhost:8080/"} id="L-TYGZU8JWbs" outputId="a730b14f-1f3d-48ce-c4e6-c82477a5a3b6"
x = torch.tensor(np.arange(0,8))
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
x, m, b
# + id="YhSpytSrJqRa"
def regression(x, m, b):
return m*x + b
# + id="cJoAw3R3Jkdu"
def regression_plot(x, y, m, b):
fig, ax = plt.subplots()
ax.scatter(x, y)
x_min, x_max = ax.get_xlim()
y_min = regression(x_min, m, b)
y_max = regression(x_max, m, b)
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max])
# + [markdown] id="NkOGI_sy78B5"
# Observation. As the value of standard deviation increases, the data points fluctuates and the linearity gradually get lost.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="K5fOzjTJMEeF" outputId="6bde52c3-d66d-41cb-99cc-5953a7ad86de"
regression_plot(x, y, m, b)
# + [markdown] id="79lyMRd935fs"
# ## 5.5 Fitting a Line with Machine Learning
#
# When performing regression, we compare the prediction value $\hat y$ to the ground truth $y$. This comparison is performed by means of "$\text{lost function}$ (or $\text{cost function}$).
#
# - Prediction: $\hat y = f(x, m, b)$
# - Cost function: $C = g(\hat y, y)$
# - $C = g(f(x, m, b), y)$
#
# <br/>
#
# **ML Regression Process**
# 1) Forward pass
# 2) Compare $\hat y$ with true $y$ to calculate cost $C$.
# 3) Chain rule comes in handy here: it calculates the gradient of $C$ with respect to $parameters$.
# 4) Ajust to $m$ and $b$ to reduce $C$.
# + [markdown] id="a5L-eZIv-kWT"
# ### **Step 1. Forward Pass**
# + colab={"base_uri": "https://localhost:8080/"} id="0tpOvTh--Uod" outputId="d00e2eec-0c2e-437a-cb7e-056a08471a27"
# produce prediction values for each input x value.
y_hat = regression(x, m, b)
y_hat
# + [markdown] id="C8g3uXE2AKv7"
# As shown in the graph right above, the first prediction set will be no way near the ground truth. This is because b m is initiated at a random value just to start the regression process.
#
# So how do we evaluate the difference? The typical and best solution is MSE cost.
#
# $$C = \frac{1}{n} \sum_{i=1}^n (\hat{y_i}-y_i)^2 $$
# (Sum of all squared differences between the prediction and the truth.)
#
# Why square?
# - All resulting values will be positive $\rightarrow$ easier to compare values.
# - Squaring $\text{exponentially penalises}$ the differences between values; and relatively tolerant to relatively smaller differences.
# + [markdown] id="4d6Tsp0FMaJ7"
# ### Step 2. Compare $\hat y$ with true $y$ to calculate cost $C$.
# + id="E4uVa3bI354R"
def mse(y_hat, y):
sigma = torch.sum((y_hat - y)**2)
return sigma/len(y)
# + id="pRI86QKM_Lex" colab={"base_uri": "https://localhost:8080/"} outputId="3368aa28-ccc0-46cb-e9be-99e920237902"
C = mse(y_hat, y)
C
# + colab={"base_uri": "https://localhost:8080/", "height": 340} id="cnpdRVMwB6k5" outputId="dfde1207-80d4-4f20-af8c-2a42caea6710"
print(y_hat)
print(y)
def see_difference(y_hat, y):
y = y.detach().numpy()
y_hat = y_hat.detach().numpy()
diff = y_hat - y
fig, ax = plt.subplots()
ax.scatter(x, diff, s=60, label='difference')
ax.scatter(x, y, c='red', s=60, label="truth")
ax.scatter(x, y_hat, c='blue', s=60, label="pred")
for i in range(len(y)):
ax.plot([x[i], x[i]],[y_hat[i], y[i]],'k-')
ax.legend()
plt.title("Prediction-Truth difference", fontsize=20, fontweight='bold')
see_difference(y_hat, y)
# + [markdown] id="qqBLd00WIxuV"
# ### Step 3. Use autodiff to calculate gradient of $C$ with respect to parameters.
#
# "We perform automatic differentiation backward from that end point, and because we're tracking gradients on both M and B, we can obtain what the slope of C is with respect to both M and B, given all of the inputs that we have right now, into our model." ([5.4 Directed Acyclic Graph](https://learning.oreilly.com/videos/calculus-for-machine/9780137398171/))*
#
# - a gradient is a derivative of a function that has more than one input variable.
# - $gradient$ = $derivative$
# + id="aSwhBDQH_L9S"
C.backward()
# + id="skNM3HKc_LyI" colab={"base_uri": "https://localhost:8080/"} outputId="bc4b9295-dae4-44b5-8781-7ce2015878d4"
m.grad
# + id="MivnHfdO_Llu" colab={"base_uri": "https://localhost:8080/"} outputId="e06ff080-9c93-4b4f-d3df-ab5ccb5c8dbf"
b.grad
# + [markdown] id="K7ByPtVMKLTP"
# ### Step 4. Gradient Descent
# + colab={"base_uri": "https://localhost:8080/"} id="3gcRYrGOI_r3" outputId="4be712ba-8077-4b0e-962f-79c42c440615"
optimizer = torch.optim.SGD([m, b], lr=0.01)
optimizer
# + [markdown] id="FsghxHk-LQtg"
# Gradient descent allows up to take a step with $m$ and $b$ values to adjust $m$ and $b$ in the direction, the gradient tells us, will reduce cost.
# + id="_NmbS1KPLilM"
optimizer.step()
# + colab={"base_uri": "https://localhost:8080/"} id="8LALb8UMLl8L" outputId="590cce43-42b7-44b3-d596-4691acf70010"
m
# + colab={"base_uri": "https://localhost:8080/"} id="O2PUj8pALm9T" outputId="bac3b187-46e3-4270-97b3-b648b0ff5275"
b
# + [markdown] id="DCAKz2vpL-W0"
# Seemingly, the graph below looks hardly different from the first graph above. This is because we set the learning rate to a minuscule difference (0.1), but the slope is adjusted.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="mOLlESPELr_h" outputId="3aa2c824-cb45-49c8-b12c-85fe910b55d4"
regression_plot(x, y, m, b)
# + [markdown] id="EwIY-rXZMnj7"
# ### Repeat Steps 1. and 2.
# + colab={"base_uri": "https://localhost:8080/"} id="PDp7Z3DPMNDt" outputId="5700a2ae-8713-4bb4-d992-04725dc63ceb"
# Steps 1 and 2 in one basket.
C = mse(regression(x, m, b), y)
C
# + [markdown] id="aQkhaflXODJo"
# ### Iteration - Loop through the 4 steps
# in order to iteratively minimise the cose towards zero.
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="hp2ZEYRhQGDW" outputId="b58a67d2-e5f4-4440-b171-bbcf8d3f0d7d"
import cv2
from google.colab.patches import cv2_imshow
path = "/content/ml process.png"
ml_process = cv2.imread(path)
ml_process = cv2.resize(ml_process, dsize=(0,0), fx=0.5, fy=0.5)
ml_process = cv2.putText(ml_process, "Lecture slide from 5.5 Fitting a Line with ML by Dr. <NAME>",
org=(0, 15), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1.1, color=1, thickness=1)
cv2_imshow(ml_process)
# + colab={"base_uri": "https://localhost:8080/"} id="Or9rYZE6N-Rx" outputId="3164c0bb-c82f-42d7-b671-cae93b0a7207"
# Compare with the above diagram.
x = torch.tensor(np.arange(0, 8))
y = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
y_hat = regression(x, m, b)
optimisation = []
epochs = 1000
optimizer = torch.optim.SGD([m, b], lr=0.01)
weights_biases = []
def regression(x, m, b):
return m*x + b
def mse(y_hat, y):
sigma = torch.sum((y_hat - y)**2)
return sigma/len(y)
for epoch in range(epochs):
optimizer.zero_grad() # Reset gradients to zero; else they accumulate, occupying memory resource for values that need not be stored.
y_hat = regression(x, m, b) # step 1: calculate the difference between prediction and truth.
C = mse(y_hat, y) # step 2: calculate the cost.
C.backward() # step 3: backpropagate.
optimizer.step() # step 4: Get gradient descent at set learning rate.
opt = [epoch, C.item(), m.grad.item(), b.grad.item()]
optimisation.append(opt)
m_b = [m.item(), b.item()]
weights_biases.append(m_b)
print('Epoch {}, cost {}, m grad {}, b grad {}'.format(epoch, '%.3g' % C.item(), '%.3g' % m.grad.item(), '%.3g' % b.grad.item()))
# + [markdown] id="G2hR2nqZqByB"
# ## N.6 Evaluation
# + [markdown] id="95v-EkGNqzDb"
# Initially, we started off with $m = -0.5$ for slope, and $b=2$ for bias.
# Check it out at the section **Regression in PyTorch** ;)
# Compare these initial values with the below $\text{optimised}$ $m$ and $b$.
# + colab={"base_uri": "https://localhost:8080/"} id="gzJWwACtq5o9" outputId="856ab243-363b-445c-b77b-e87ed33dbd87"
m.item()
# + colab={"base_uri": "https://localhost:8080/"} id="2SHIQdnxq7BA" outputId="427a50d5-a6a4-43e1-d985-bdc6e67e4d40"
b.item()
# + [markdown] id="wpqIkF8lp8qS"
# Let's plot some graphs to see the achievement of gradient descent and the changing behaviour through the optimisation process.
# + colab={"base_uri": "https://localhost:8080/"} id="A7Uvs2g6a8SJ" outputId="b0c91f28-3d65-4fc7-e908-28d2f9af0b66"
costs = [optimisation[1] for optimisation in optimisation]
weights = [optimisation[2] for optimisation in optimisation]
biases = [optimisation[3] for optimisation in optimisation]
# values from epoch 2 are corrupted for some reason.
# Correcting the values manually.
costs[1] = 8.57
weights[1] = 23.2
biases[1] = 3.6
print(costs[:10])
print(weights[:10])
print(biases[:10])
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="CgbURZoydytd" outputId="8124c14f-b636-470d-c385-ddedf0149a6b"
fig, ax = plt.subplots(figsize=(15, 4))
epoch = np.arange(1000)
parameters = [costs, weights, biases]
labels = ["costs", "weights", "biases"]
for i in range(3):
ax.plot(epoch, parameters[i], label=labels[i], linewidth=3)
ax.set_xlim(0, 10)
ax.legend()
plt.title("Optimising Regression with Gradient Descent", fontsize=20, fontweight='bold', fontname='Helvetica')
# + colab={"base_uri": "https://localhost:8080/"} id="8zsnebH2nsKK" outputId="6bd92606-c14d-46f3-d022-a591c34e4a39"
m.item()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Z8fhAb2EiHvy" outputId="34717439-2d0f-47f1-90f6-d39268876c6c"
# Optimisation final result
x = torch.tensor(np.arange(0, 8))
regression_plot(x, y, m, b)
# + [markdown] id="G041o9YwqYxD"
# See how fitting the line transitioned from the far-from-good predictions to can't-be-tighter fit.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="oVD2gX0tl2PS" outputId="5d35b8ef-6b52-4fc4-bf5c-159cd45ef556"
x = torch.tensor(np.arange(0, 8))
y = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
weights = [weight[0] for weight in weights_biases]
biases = [biases[1] for biases in weights_biases]
for i in range(1, 11):
regression_plot(x, y, weights[i*100-1], biases[i*100-1])
print(f'epoch {i*100-1} weight: {weights[i*100-1]} bias: {biases[i*100-1]}')
# + [markdown] id="3euNoIOcsULS"
# See the optimisation process in a single graph.
# + colab={"base_uri": "https://localhost:8080/", "height": 325} id="k5Qqrtkfq9MP" outputId="29028fa7-fc6b-49a2-ecf8-d684b7144e37"
x = torch.tensor(np.arange(0, 8))
y = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
weights = [weight[0] for weight in weights_biases]
biases = [biases[1] for biases in weights_biases]
fig, ax = plt.subplots()
for i in range(1, 11):
ax.scatter(x, y)
x_min, x_max = ax.get_xlim()
y_min = regression(x_min, weights[i*100-1], biases[i*100-1])
y_max = regression(x_max, weights[i*100-1], biases[i*100-1])
ax.set_xlim([x_min, x_max])
_ = ax.plot([x_min, x_max], [y_min, y_max])
plt.title("Gradual optimisation\nover 1000 epochs", fontsize=20, fontweight='bold')
# + [markdown] id="xj4aDmg5sZ4A"
# ## Further learning
# + [markdown] id="kRFFYVcbkBR0"
# ### Exercise 1
# Use PyTorch (or TensorFlow) to find the slope of $$y = x^2 + 2x+2$$ where $x=2$.
# + [markdown] id="SsvZ7oCCkl0i"
# ### Exercise 2
# Use the $Regression$ in PyTorch notebook to simulate a new linear relationshpi between $y$ and $x$, and then fit the parameters $m$ and $b$.
# + [markdown] id="8wlWYhLVkvTJ"
# ### Exercise 3
# Read about how $\text{differential programming}$, wherein computer programs can be differentiated, could be common soon.
# - https://tryolabs.com/blog/2020/04/02/swift-googles-bet-on-differentiable-programming/
# + [markdown] id="cLKNKjWyOzJ4"
# ### loss.backward()
# ```loss.backward()``` computes $\frac{\delta loss}{\delta x}$ for every parameter $x$ which has ```requires_grad=True```. These are accumulated into ```x.grad``` for every parameter $x$ ([Colsbury](https://discuss.pytorch.org/t/what-does-the-backward-function-do/9944)).
# + [markdown] id="IAabXmhZFojy"
# # New Functions
# - Plotting lines connecting points
# - ```plt.plot([x1,x2],[y1,y2],'k-')```
# https://stackoverflow.com/questions/35363444/plotting-lines-connecting-points
# - Put text on image
# - ```cv2.putText()```
# https://www.geeksforgeeks.org/python-opencv-cv2-puttext-method/
# - Line style - thickness
# - ```plt.plot(linewidth=n)```
# https://pythoninformer.com/python-libraries/matplotlib/line-plots/
# + [markdown] id="-E0dYI7pE2T0"
# # Error Notes
#
# RuntimeError: Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead.
#
# The tensor (x) was created with a condition attached (```requires_grad_()```), so we need to detach it, so convert it into np.array.
#
# https://stackoverflow.com/questions/55466298/pytorch-cant-call-numpy-on-variable-that-requires-grad-use-var-detach-num
# + [markdown] id="Cdx69eOZNFbc"
# https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb
| 03 Calculus for ML (Jon Krohn)/CALC I 05 Automatic Differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 2 | Programming Logic
#
# Reminder: in all of the assignments this semester, the answer is not the only consideration, but also how you get to it. It's OK (suggested even!) to use the internet for help. But you _should_ be able to answer all of these questions using only the programming techniques you have learned in class and from the readings.
#
# A few keys for success:
# - Avoid manual data entry
# - Emphasize logic and clarity
# - Use comments, docstrings, and descriptive variable names
# - In general, less code is better. But if more lines of code makes your program easier to read or understand what its doing, then go for it.
# ## Problem 1
# Write a Python program to count the number of even and odd numbers from a list of numbers. Test your code by running it on a list of integers from 1 to 9. No need to make this a function unless you want to.
# +
#Alternative 1
#Create a list of numbers from 1 to 9
number_list = list(range(1,10))
#Counters for even and odd numbers - Start at 0
even=0
odd=0
#For loop to count even and odd numbers in the list of integers
for numb in number_list:
if numb %2==1:
odd+=1
else:
even+=1
#Prints the counters and the list
print('There are',even, 'even numbers and', odd, 'odd numbers in the list',number_list)
# +
#Alternative 2: With user input
#Program asks the users for a list of numbers separated by a comma
#Then the list of strings is converted to integers
num_list = [int(s) for s in input("Please enter numbers separated by a comma:").split(',')]
#Counters for even and odd numbers - Start at 0
even=0
odd=0
#For loop to count even and odd numbers in the list of integers
for numb in number_list:
if numb %2==1:
odd+=1
else:
even+=1
#Prints the counters
print('There are',even, 'even numbers and', odd, 'odd numbers in the list')
# -
# ## Problem 2
# Write a Python function that takes a list of numbers and returns a list containing only the even numbers from the original list. Test your function by running it on a list of integers from 1 to 9.
# +
def even_numb(alist):
even_list=[]
for x in number_list:
if x %2==0:
even_list.append(x)
continue
return even_list
print('The even numbers of the list',number_list,'are',even_numb(number_list))
# -
# ## Problem 3
#
# 1. Create a function that accepts a list of integers as an argument and returns a list of floats which equals each number as a fraction of the sum of all the items in the original list.
#
# 2. Next, create a second function which is the same as the first, but limit each number in the output list to two decimals.
#
# 3. Create another function which builds on the previous one by allowing a "user" pass in an argument that defines the number of decimal places to use in the output list.
#
# 4. Test each of these functions with a list of integers
# +
#3.1
integer_list=list(range(1,6))
def fraction_of_sum(alist):
float_list=[]
for i in alist:
fraction=i/sum(alist)
float_list.append(fraction)
return float_list
fraction_of_sum(integer_list)
# +
#3.2
def fraction_of_sum2(alist):
float_list=[]
for i in alist:
fraction=round(i/sum(alist),2)
float_list.append(fraction)
return float_list
fraction_of_sum2(integer_list)
# +
#3.3
def fraction_of_sum_user(alist,x):
float_list=[]
for i in alist:
fraction=round(i/sum(alist),x)
float_list.append(fraction)
return float_list
fraction_of_sum_user(integer_list,3)
# -
# # Problem 4
# A prime number is any whole number greater than 1 that has no positive divisors besides 1 and itself. In other words, a prime number must be:
# 1. an integer
# 2. greater than 1
# 3. divisible only by 1 and itself.
#
# Write a function is_prime(n) that accepts an argument `n` and returns `True` (boolean) if `n` is a prime number and `False` if n is not prime. For example, `is_prime(11)` should return `True` and `is_prime(12)` should return `False`.
#
# +
def is_prime(n):
if n > 1:
for i in range(2,n):
if n % i == 0:
return False
else:
return False
return True
is_prime(2)
# -
# ## Problem 5
# 1. Create a class called `Housing`, and add the following attributes to it:
# - type
# - area
# - number of bedrooms
# - value (price)
# - year built.
# 2. Create two instances of your class and populate their attributes (make 'em up)
# 3. Create a method called `rent()` that calculates the estimated monthly rent for each house (assume that monthly rent is 0.4% of the value of the house)
# 4. Print the rent for both instances.
# +
class Housing:
def __init__(self,typ, area, nbedr, price, year):
self.typ = typ
self.area = area
self.nbedr = nbedr
self.price = price
self.year=year
def rent(self, month_rent=0.04):
return round(self.price*month_rent)
h1 = Housing('Apartment',1600,3,800000,2010)
h2 = Housing('Town House',3200,4,1200000,1950)
print('The rent for the appartment is $', h1.rent(), 'and the rent for the town house is $',h2.rent())
| assignments/assignment_2/assignment_2_JulianaRO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from salishsea_tools import viz_tools
# -
watercolor = 'lightskyblue'
landcolour = 'papayawhip'
mesh = xr.open_dataset('/home/sallen/MEOPAR/grid/mesh_mask201702.nc')
tmask = 1- mesh.tmask[0, 0]
data3d = xr.open_dataset('/Users/sallen/Documents/MIDOSS/Lagrangian_bunker-14_northern_strait_first30-14.nc')
data3d
imin, imax = 520, 720
jmin, jmax = 100, 300
y_slice = np.arange(imin, imax)
x_slice = np.arange(jmin, jmax)
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
data3d.OilWaterColumnOilVol_3D[8, 39, imin:imax, jmin:jmax].plot(ax=ax, cmap='plasma', vmax=100);
viz_tools.plot_land_mask(ax, '/Users/sallen/Documents/MEOPAR/grid/bathymetry_201702.nc',
xslice=x_slice, yslice=y_slice, color=landcolour)
ax.set_ylim(600, 675)
ax.set_xlim(100, 175);
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
data3d.OilWaterColumnOilVol_3D[:8, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 0], cmap='plasma', vmax=100);
data3d.OilWaterColumnOilVol_3D[8:15, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[0, 1], cmap='plasma', vmax=100)
data3d.OilWaterColumnOilVol_3D[15:24, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 0], cmap='plasma', vmax=100)
data3d.OilWaterColumnOilVol_3D[24:35, 39, imin:imax, jmin:jmax].sum(axis=0).plot(ax=axs[1, 1], cmap='plasma', vmax=100)
for ax in [axs[0, 0], axs[0, 1], axs[1, 0], axs[1, 1]]:
viz_tools.plot_land_mask(ax, '/Users/sallen/Documents/MEOPAR/grid/bathymetry_201702.nc',
xslice=x_slice, yslice=y_slice, color=landcolour)
ax.set_ylim(600, 675)
ax.set_xlim(100, 175);
# +
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
viz_tools.plot_land_mask(ax, '/Users/sallen/Documents/MEOPAR/grid/bathymetry_201702.nc',
xslice=x_slice, yslice=y_slice, color='black')
data3d.Beaching_Volume[imin:imax , jmin:jmax].plot(ax=ax, cmap='Reds', vmax=8, alpha=0.4);
ax.set_ylim(600, 675)
ax.set_xlim(100, 175);
# +
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
viz_tools.plot_land_mask(ax, '/Users/sallen/Documents/MEOPAR/grid/bathymetry_201702.nc',
xslice=x_slice, yslice=y_slice, color='black')
btime = (np.array(data3d.Beaching_Time - data3d.Beaching_Time.min())
) / np.timedelta64(1, 's') /3600.
colours = ax.pcolormesh(btime, cmap='Reds', alpha=0.4);
cb = fig.colorbar(colours)
cb.set_label('Beaching Time (hr)')
ax.set_ylim(600, 675)
ax.set_xlim(100, 175);
# -
profile = data3d.OilWaterColumnOilVol_3D[:, :, imin:imax, jmin:jmax].sum(axis=3).sum(axis=2).sum(axis=0)
fig, ax = plt.subplots(1, 1)
ax.plot(profile, profile.grid_z, 'o-')
ax.set_ylim(35, 40)
profile
data3d.OilWaterColumnOilVol_3D[:, 0, 640, 120].plot()
| MIDOSS/Snowflake.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Think Bayes
# -----------
#
# Example problem related to *Think Bayes*
#
# [The MIT License](https://opensource.org/licenses/MIT)
# Copyright 2016 <NAME>
import thinkbayes2
# The following problem was submitted to my blog, [Probably Overthinking It](http://allendowney.blogspot.com/2011/10/my-favorite-bayess-theorem-problems.html), by a user named Amit, who wrote:
#
# > The following data is about a poll that occurred in 3 states. In state1, 50% of voters support Party1, in state2, 60% of the voters support Party1, and in state3, 35% of the voters support Party1. Of the total population of the three states, 40% live in state1, 25% live in state2, and 35% live in state3. Given that a voter supports Party1, what is the probability that he lives in state2?
#
# My solution follows. First I'll create a suite to represent our prior knowledge. If we know nothing about a voter, we would use the relative populations of the states to guess where they are from.
prior = thinkbayes2.Suite({'State 1': 0.4, 'State 2': 0.25, 'State 3': 0.35})
prior.Print()
# Now if we know a voter supports Party 1, we can use that as data to update our belief. The following dictionary contains the likelihood of the data (supporting Party 1) under each hypothesis (which state the voter is from).
likelihood = {'State 1': 0.5, 'State 2': 0.60, 'State 3': 0.35}
# To make the posterior distribution, I'll start with a copy of the prior.
#
# The update consists of looping through the hypotheses and multiplying the prior probability of each hypothesis, `hypo`, by the likelihood of the data if `hypo` is true.
#
# The result is a map from hypotheses to posterior likelihoods, but they are not probabilities yet because they are not normalized.
posterior = prior.Copy()
for hypo in posterior:
posterior[hypo] *= likelihood[hypo]
posterior.Print()
# Normalizing the posterior distribution returns the total likelihood of the data, which is the normalizing constant.
posterior.Normalize()
# Now the posterior is a proper distribution:
posterior.Print()
# And the probability that the voter is from State 2 is about 32%.
| examples/voter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_p36
# language: python
# name: conda_pytorch_p36
# ---
# # Creating a Sentiment Analysis Web App
# ## Using PyTorch and SageMaker
#
# _Deep Learning Nanodegree Program | Deployment_
#
# ---
#
# Now that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.
#
# ## Instructions
#
# Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!
#
# In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.
#
# > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.
#
# ## General Outline
#
# Recall the general outline for SageMaker projects using a notebook instance.
#
# 1. Download or otherwise retrieve the data.
# 2. Process / Prepare the data.
# 3. Upload the processed data to S3.
# 4. Train a chosen model.
# 5. Test the trained model (typically using a batch transform job).
# 6. Deploy the trained model.
# 7. Use the deployed model.
#
# For this project, you will be following the steps in the general outline with some modifications.
#
# First, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.
#
# In addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.
# ## Step 1: Downloading the data
#
# As in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)
#
# > Maas, <NAME>., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
# %mkdir ../data
# !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
# ## Step 2: Preparing and Processing the data
#
# Also, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.
# +
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
# -
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
# Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.
# +
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
# -
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
# Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.
print(train_X[100])
print(train_y[100])
# The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.
# +
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import *
import re
from bs4 import BeautifulSoup
def review_to_words(review):
nltk.download("stopwords", quiet=True)
stemmer = PorterStemmer()
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
# -
# The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.
# TODO: Apply review_to_words to a review (train_X[100] or any other review)
print(' '.join(review_to_words(train_X[100])))
# **Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?
# **Answer:**
#
# The above method:
#
# * Removes HTML tags.
# * Converts the text to lower case.
# * It splits the strings into words, removing whitespaces.
# * It removes stop words (e.g. punctuation).
# * It stems the words, i.e. keep the root (stem) of a word such to keep unique roots, for example "entertaining" and "entertain" would become the common root "entertain".
# The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.
# +
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# -
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
# ## Transform the data
#
# In the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.
#
# Since we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.
# ### (TODO) Create a word dictionary
#
# To begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.
#
# > **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.
# +
import numpy as np
def build_dict(data, vocab_size = 5000):
"""Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer."""
# TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a
# sentence is a list of words.
word_count = {} # A dict storing the words that appear in the reviews along with how often they occur
for review in data:
for word in review:
if word not in word_count:
word_count[word] = 1
else:
word_count[word] += 1
# TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
sorted_words = sorted(word_count.items(), key=lambda item: item[1], reverse=True)
word_dict = {} # This is what we are building, a dictionary that translates words into integers
for idx, (word, _) in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'
word_dict[word] = idx + 2 # 'infrequent' labels
return word_dict
# -
word_dict = build_dict(train_X)
# **Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?
# **Answer:**
#
# The most common words are: movie, film, one, like, time.
#
# It does make sense that they are very frequenty, since the reviews are about movies and films. "one" and "like" are very common connective words. Finally, "time" can be quite frequent in reviews, like "worth my time" or "this was a waste of my time".
# TODO: Use this space to determine the five most frequently appearing words in the training set.
for word, count in word_dict.items():
if count >= 2 and count <= 6:
print(word)
# ### Save `word_dict`
#
# Later on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.
data_dir = '../data/pytorch' # The folder we will use for storing data
if not os.path.exists(data_dir): # Make sure that the folder exists
os.makedirs(data_dir)
with open(os.path.join(data_dir, 'word_dict.pkl'), "wb") as f:
pickle.dump(word_dict, f)
# ### Transform the reviews
#
# Now that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.
# +
def convert_and_pad(word_dict, sentence, pad=500):
NOWORD = 0 # We will use 0 to represent the 'no word' category
INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict
working_sentence = [NOWORD] * pad
for word_index, word in enumerate(sentence[:pad]):
if word in word_dict:
working_sentence[word_index] = word_dict[word]
else:
working_sentence[word_index] = INFREQ
return working_sentence, min(len(sentence), pad)
def convert_and_pad_data(word_dict, data, pad=500):
result = []
lengths = []
for sentence in data:
converted, leng = convert_and_pad(word_dict, sentence, pad)
result.append(converted)
lengths.append(leng)
return np.array(result), np.array(lengths)
# -
train_X, train_X_len = convert_and_pad_data(word_dict, train_X)
test_X, test_X_len = convert_and_pad_data(word_dict, test_X)
# As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?
# Use this cell to examine one of the processed reviews to make sure everything is working as intended.
print(train_X[0])
print(len(train_X[0]))
print(train_X_len)
# **Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?
# **Answer:**
#
# We might want to use different preprocessing for training and test sets. For example, we might want to add some augmentation to the training data to make the training process more robust. To do that, we would need different preprocessing functions for the training and test data.
# ## Step 3: Upload the data to S3
#
# As in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.
#
# ### Save the processed training dataset locally
#
# It is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.
# +
import pandas as pd
pd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \
.to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# -
# ### Uploading the training data
#
#
# Next, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.
# +
import sagemaker
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/sentiment_rnn'
role = sagemaker.get_execution_role()
# -
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
# **NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.
# ## Step 4: Build and Train the PyTorch Model
#
# In the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects
#
# - Model Artifacts,
# - Training Code, and
# - Inference Code,
#
# each of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.
#
# We will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.
# !pygmentize train/model.py
# The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.
#
# First we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.
# +
import torch
import torch.utils.data
# Read in only the first 250 rows
train_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)
# Turn the input pandas dataframe into tensors
train_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()
train_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()
# Build the dataset
train_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)
# Build the dataloader
train_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)
# -
# ### (TODO) Writing the training method
#
# Next we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.
def train(model, train_loader, epochs, optimizer, loss_fn, device):
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
optimizer.zero_grad()
output = model(batch_X)
loss = loss_fn(output, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
# Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.
# +
import torch.optim as optim
from train.model import LSTMClassifier
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(32, 100, 5000).to(device)
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_sample_dl, 5, optimizer, loss_fn, device)
# -
# In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.
# ### (TODO) Training the model
#
# When a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.
#
# **TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.
#
# The way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.
# +
from sagemaker.pytorch import PyTorch
estimator = PyTorch(entry_point="train.py",
source_dir="train",
role=role,
framework_version='0.4.0',
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
hyperparameters={
'epochs': 10,
'hidden_dim': 200,
})
# -
estimator.fit({'training': input_data})
# ## Step 5: Testing the model
#
# As mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.
#
# ## Step 6: Deploy the model for testing
#
# Now that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.
#
# There is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.
#
# **NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )
#
# Since we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.
#
# **NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.
#
# In other words **If you are no longer using a deployed endpoint, shut it down!**
#
# **TODO:** Deploy the trained model.
# TODO: Deploy the trained model
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# ## Step 7 - Use the model for testing
#
# Once deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.
test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)
# +
# We split the data into chunks and send each chunk seperately, accumulating the results.
def predict(data, rows=512):
split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))
predictions = np.array([])
for array in split_array:
predictions = np.append(predictions, predictor.predict(array))
return predictions
# -
predictions = predict(test_X.values)
predictions = [round(num) for num in predictions]
from sklearn.metrics import accuracy_score
accuracy_score(test_y, predictions)
# **Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?
# **Answer:**
#
# The XGBoost got an accuracy of 0.8694, which is slightly higher than our model's. It's hard to establish a solid conclusion however, due to the limited amount of the data considered in the testing and the similar accuracy score.
#
# The models perform differently because they are based on different principles; XGBoost is based on random trees, and our models are based on an RNN (LSTM).
#
# I think the our LSTM model would fit better for sentiment analysis based on understanding sentences, which are sequential data. The LSTM is capable of having short and long-term memory, being able to see the immediate context surrounding each word, as well as other context far away from it. That's why it's being deployed into many similar applications.
# ### (TODO) More testing
#
# We now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.
test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'
# The question we now need to answer is, how do we send this review to our model?
#
# Recall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.
# - Removed any html tags and stemmed the input
# - Encoded the review as a sequence of integers using `word_dict`
#
# In order process the review we will need to repeat these two steps.
#
# **TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.
# TODO: Convert test_review into a form usable by the model and save the results in test_data
test_data_sentence, test_data_len = convert_and_pad(word_dict, review_to_words(test_review))
test_data = pd.concat([pd.DataFrame([test_data_len]), pd.DataFrame([test_data_sentence])], axis=1)
print(test_data)
# Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.
predictor.predict(test_data)
# Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.
# ### Delete the endpoint
#
# Of course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.
estimator.delete_endpoint()
# ## Step 6 (again) - Deploy the model for the web app
#
# Now that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.
#
# As we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.
#
# We will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.
#
# When deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.
# - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.
# - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.
# - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.
# - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.
#
# For the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.
#
# ### (TODO) Writing inference code
#
# Before writing our custom inference code, we will begin by taking a look at the code which has been provided.
# !pygmentize serve/predict.py
# As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.
#
# **TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.
# ### Deploying the model
#
# Now that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.
#
# **NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.
# +
from sagemaker.predictor import RealTimePredictor
from sagemaker.pytorch import PyTorchModel
class StringPredictor(RealTimePredictor):
def __init__(self, endpoint_name, sagemaker_session):
super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')
model = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='0.4.0',
entry_point='predict.py',
source_dir='serve',
predictor_cls=StringPredictor)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# -
# ### Testing the model
#
# Now that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.
# +
import glob
def test_reviews(data_dir='../data/aclImdb', stop=250):
results = []
ground = []
# We make sure to test both positive and negative reviews
for sentiment in ['pos', 'neg']:
path = os.path.join(data_dir, 'test', sentiment, '*.txt')
files = glob.glob(path)
files_read = 0
print('Starting ', sentiment, ' files')
# Iterate through the files and send them to the predictor
for f in files:
with open(f) as review:
# First, we store the ground truth (was the review positive or negative)
if sentiment == 'pos':
ground.append(1)
else:
ground.append(0)
# Read in the review and convert to 'utf-8' for transmission via HTTP
review_input = review.read().encode('utf-8')
# Send the review to the predictor and store the results
results.append(int(predictor.predict(review_input)))
# Sending reviews to our endpoint one at a time takes a while so we
# only send a small number of reviews
files_read += 1
if files_read == stop:
break
return ground, results
# -
ground, results = test_reviews()
from sklearn.metrics import accuracy_score
accuracy_score(ground, results)
# As an additional test, we can try sending the `test_review` that we looked at earlier.
predictor.predict(test_review)
# Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.
# ## Step 7 (again): Use the model for the web app
#
# > **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.
#
# So far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.
#
# <img src="Web App Diagram.svg">
#
# The diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.
#
# In the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.
#
# Lastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.
#
# ### Setting up a Lambda function
#
# The first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.
#
# #### Part A: Create an IAM Role for the Lambda function
#
# Since we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.
#
# Using the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.
#
# In the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.
#
# Lastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.
#
# #### Part B: Create a Lambda function
#
# Now it is time to actually create the Lambda function.
#
# Using the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.
#
# On the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below.
#
# ```python
# # We need to use the low-level library to interact with SageMaker since the SageMaker API
# # is not available natively through Lambda.
# import boto3
#
# def lambda_handler(event, context):
#
# # The SageMaker runtime is what allows us to invoke the endpoint that we've created.
# runtime = boto3.Session().client('sagemaker-runtime')
#
# # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given
# response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created
# ContentType = 'text/plain', # The data format that is expected
# Body = event['body']) # The actual review
#
# # The response is an HTTP response whose body contains the result of our inference
# result = response['Body'].read().decode('utf-8')
#
# return {
# 'statusCode' : 200,
# 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },
# 'body' : result
# }
# ```
#
# Once you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.
predictor.endpoint
# Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.
#
# ### Setting up API Gateway
#
# Now that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.
#
# Using AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.
#
# On the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.
#
# Now we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.
#
# Select the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.
#
# For the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.
#
# Type the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.
#
# The last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.
#
# You have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.
# ## Step 4: Deploying our web app
#
# Now that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.
#
# In the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\*\*REPLACE WITH PUBLIC API URL\*\***. Replace this string with the url that you wrote down in the last step and then save the file.
#
# Now, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.
#
# If you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!
#
# > **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.
#
# **TODO:** Make sure that you include the edited `index.html` file in your project submission.
# Now that your web app is working, trying playing around with it and see how well it works.
#
# **Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?
# **Answer:**
#
# Example of positive review that was correctly classified:
#
# This has been a real treat! An amazing series, great acting, direction and such a suspenseful story it's really one of the very best I've seen ever. I love heist movies and I just found this one in Netflix and I literally couldn't stop watching through the night. The characters are simply amazing! Don't miss this!
#
# Example of negative review that was correctly classified:
#
# The series started with a good story and should have been ended since season 2 thats it, season 4 is the worse, a season with no meaning and no story and just a waste of time.
# ### Delete the endpoint
#
# Remember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.
predictor.delete_endpoint()
| project-deployment/SageMaker Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Image Captioning with RNNs
# In this exercise you will implement a vanilla recurrent neural networks and use them it to train a model that can generate novel captions for images.
# +
# As usual, a bit of setup
import time, os, json
import numpy as np
import matplotlib.pyplot as plt
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.rnn_layers import *
from cs231n.captioning_solver import CaptioningSolver
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# -
# # Microsoft COCO
# For this exercise we will use the 2014 release of the [Microsoft COCO dataset](http://mscoco.org/) which has become the standard testbed for image captioning. The dataset consists of 80,000 training images and 40,000 validation images, each annotated with 5 captions written by workers on Amazon Mechanical Turk.
#
# To download the data, change to the `cs231n/datasets` directory and run the script `get_coco_captioning.sh`.
#
# We have preprocessed the data and extracted features for you already. For all images we have extracted features from the fc7 layer of the VGG-16 network pretrained on ImageNet; these features are stored in the files `train2014_vgg16_fc7.h5` and `val2014_vgg16_fc7.h5` respectively. To cut down on processing time and memory requirements, we have reduced the dimensionality of the features from 4096 to 512; these features can be found in the files `train2014_vgg16_fc7_pca.h5` and `val2014_vgg16_fc7_pca.h5`.
#
# The raw images take up a lot of space (nearly 20GB) so we have not included them in the download. However all images are taken from Flickr, and URLs of the training and validation images are stored in the files `train2014_urls.txt` and `val2014_urls.txt` respectively. This allows you to download images on the fly for visualization. Since images are downloaded on-the-fly, **you must be connected to the internet to view images**.
#
# Dealing with strings is inefficient, so we will work with an encoded version of the captions. Each word is assigned an integer ID, allowing us to represent a caption by a sequence of integers. The mapping between integer IDs and words is in the file `coco2014_vocab.json`, and you can use the function `decode_captions` from the file `cs231n/coco_utils.py` to convert numpy arrays of integer IDs back into strings.
#
# There are a couple special tokens that we add to the vocabulary. We prepend a special `<START>` token and append an `<END>` token to the beginning and end of each caption respectively. Rare words are replaced with a special `<UNK>` token (for "unknown"). In addition, since we want to train with minibatches containing captions of different lengths, we pad short captions with a special `<NULL>` token after the `<END>` token and don't compute loss or gradient for `<NULL>` tokens. Since they are a bit of a pain, we have taken care of all implementation details around special tokens for you.
#
# You can load all of the MS-COCO data (captions, features, URLs, and vocabulary) using the `load_coco_data` function from the file `cs231n/coco_utils.py`. Run the following cell to do so:
# +
# Load COCO data from disk; this returns a dictionary
# We'll work with dimensionality-reduced features for this notebook, but feel
# free to experiment with the original features by changing the flag below.
data = load_coco_data(pca_features=True)
# Print out all the keys and values from the data dictionary
for k, v in data.iteritems():
if type(v) == np.ndarray:
print k, type(v), v.shape, v.dtype
else:
print k, type(v), len(v)
# -
# ## Look at the data
# It is always a good idea to look at examples from the dataset before working with it.
#
# You can use the `sample_coco_minibatch` function from the file `cs231n/coco_utils.py` to sample minibatches of data from the data structure returned from `load_coco_data`. Run the following to sample a small minibatch of training data and show the images and their captions. Running it multiple times and looking at the results helps you to get a sense of the dataset.
#
# Note that we decode the captions using the `decode_captions` function and that we download the images on-the-fly using their Flickr URL, so **you must be connected to the internet to viw images**.
# +
# Sample a minibatch and show the images and captions
batch_size = 3
captions, features, urls = sample_coco_minibatch(data, batch_size=batch_size)
for i, (caption, url) in enumerate(zip(captions, urls)):
plt.imshow(image_from_url(url))
plt.axis('off')
caption_str = decode_captions(caption, data['idx_to_word'])
plt.title(caption_str)
plt.show()
# -
# # Recurrent Neural Networks
# As discussed in lecture, we will use recurrent neural network (RNN) language models for image captioning. The file `cs231n/rnn_layers.py` contains implementations of different layer types that are needed for recurrent neural networks, and the file `cs231n/classifiers/rnn.py` uses these layers to implement an image captioning model.
#
# We will first implement different types of RNN layers in `cs231n/rnn_layers.py`.
# # Vanilla RNN: step forward
# Open the file `cs231n/rnn_layers.py`. This file implements the forward and backward passes for different types of layers that are commonly used in recurrent neural networks.
#
# First implement the function `rnn_step_forward` which implements the forward pass for a single timestep of a vanilla recurrent neural network. After doing so run the following to check your implementation.
# +
N, D, H = 3, 10, 4
x = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)
Wx = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)
Wh = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)
b = np.linspace(-0.2, 0.4, num=H)
next_h, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)
expected_next_h = np.asarray([
[-0.58172089, -0.50182032, -0.41232771, -0.31410098],
[ 0.66854692, 0.79562378, 0.87755553, 0.92795967],
[ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])
print 'next_h error: ', rel_error(expected_next_h, next_h)
# -
# # Vanilla RNN: step backward
# In the file `cs231n/rnn_layers.py` implement the `rnn_step_backward` function. After doing so run the following to numerically gradient check your implementation. You should see errors less than `1e-8`.
# +
from cs231n.rnn_layers import rnn_step_forward, rnn_step_backward
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
fx = lambda x: rnn_step_forward(x, h, Wx, Wh, b)[0]
fh = lambda prev_h: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_step_forward(x, h, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_step_forward(x, h, Wx, Wh, b)[0]
fb = lambda b: rnn_step_forward(x, h, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dnext_h)
dprev_h_num = eval_numerical_gradient_array(fh, h, dnext_h)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dnext_h)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dnext_h)
db_num = eval_numerical_gradient_array(fb, b, dnext_h)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dprev_h error: ', rel_error(dprev_h_num, dprev_h)
print 'dWx error: ', rel_error(dWx_num, dWx)
print 'dWh error: ', rel_error(dWh_num, dWh)
print 'db error: ', rel_error(db_num, db)
# -
# # Vanilla RNN: forward
# Now that you have implemented the forward and backward passes for a single timestep of a vanilla RNN, you will combine these pieces to implement a RNN that process an entire sequence of data.
#
# In the file `cs231n/rnn_layers.py`, implement the function `rnn_forward`. This should be implemented using the `rnn_step_forward` function that you defined above. After doing so run the following to check your implementation. You should see errors less than `1e-7`.
# +
N, T, D, H = 2, 3, 4, 5
x = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)
Wh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)
b = np.linspace(-0.7, 0.1, num=H)
h, _ = rnn_forward(x, h0, Wx, Wh, b)
expected_h = np.asarray([
[
[-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],
[-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],
[-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],
],
[
[-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],
[-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],
[-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])
print 'h error: ', rel_error(expected_h, h)
# -
# # Vanilla RNN: backward
# In the file `cs231n/rnn_layers.py`, implement the backward pass for a vanilla RNN in the function `rnn_backward`. This should run back-propagation over the entire sequence, calling into the `rnn_step_backward` function that you defined above.
# +
N, D, T, H = 2, 3, 10, 5
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_forward(x, h0, Wx, Wh, b)
dout = np.random.randn(*out.shape)
dx, dh0, dWx, dWh, db = rnn_backward(dout, cache)
fx = lambda x: rnn_forward(x, h0, Wx, Wh, b)[0]
fh0 = lambda h0: rnn_forward(x, h0, Wx, Wh, b)[0]
fWx = lambda Wx: rnn_forward(x, h0, Wx, Wh, b)[0]
fWh = lambda Wh: rnn_forward(x, h0, Wx, Wh, b)[0]
fb = lambda b: rnn_forward(x, h0, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dh0_num = eval_numerical_gradient_array(fh0, h0, dout)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dout)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
print 'dx error: ', rel_error(dx_num, dx)
print 'dh0 error: ', rel_error(dh0_num, dh0)
print 'dWx error: ', rel_error(dWx_num, dWx)
print 'dWh error: ', rel_error(dWh_num, dWh)
print 'db error: ', rel_error(db_num, db)
# -
# # Word embedding: forward
# In deep learning systems, we commonly represent words using vectors. Each word of the vocabulary will be associated with a vector, and these vectors will be learned jointly with the rest of the system.
#
# In the file `cs231n/rnn_layers.py`, implement the function `word_embedding_forward` to convert words (represented by integers) into vectors. Run the following to check your implementation. You should see error around `1e-8`.
# +
N, T, V, D = 2, 4, 5, 3
x = np.asarray([[0, 3, 1, 2], [2, 1, 0, 3]])
W = np.linspace(0, 1, num=V*D).reshape(V, D)
out, _ = word_embedding_forward(x, W)
expected_out = np.asarray([
[[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0.42857143, 0.5, 0.57142857]],
[[ 0.42857143, 0.5, 0.57142857],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429]]])
print 'out error: ', rel_error(expected_out, out)
# -
# # Word embedding: backward
# Implement the backward pass for the word embedding function in the function `word_embedding_backward`. After doing so run the following to numerically gradient check your implementation. You should see errors less than `1e-11`.
# +
N, T, V, D = 50, 3, 5, 6
x = np.random.randint(V, size=(N, T))
W = np.random.randn(V, D)
out, cache = word_embedding_forward(x, W)
dout = np.random.randn(*out.shape)
dW = word_embedding_backward(dout, cache)
f = lambda W: word_embedding_forward(x, W)[0]
dW_num = eval_numerical_gradient_array(f, W, dout)
print 'dW error: ', rel_error(dW, dW_num)
# -
# # Temporal Affine layer
# At every timestep we use an affine function to transform the RNN hidden vector at that timestep into scores for each word in the vocabulary. Because this is very similar to the affine layer that you implemented in assignment 2, we have provided this function for you in the `temporal_affine_forward` and `temporal_affine_backward` functions in the file `cs231n/rnn_layers.py`. Run the following to perform numeric gradient checking on the implementation.
# +
# Gradient check for temporal affine layer
N, T, D, M = 2, 3, 4, 5
x = np.random.randn(N, T, D)
w = np.random.randn(D, M)
b = np.random.randn(M)
out, cache = temporal_affine_forward(x, w, b)
dout = np.random.randn(*out.shape)
fx = lambda x: temporal_affine_forward(x, w, b)[0]
fw = lambda w: temporal_affine_forward(x, w, b)[0]
fb = lambda b: temporal_affine_forward(x, w, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dw_num = eval_numerical_gradient_array(fw, w, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
dx, dw, db = temporal_affine_backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
# -
# # Temporal Softmax loss
# In an RNN language model, at every timestep we produce a score for each word in the vocabulary. We know the ground-truth word at each timestep, so we use a softmax loss function to compute loss and gradient at each timestep. We sum the losses over time and average them over the minibatch.
#
# However there is one wrinke: since we operate over minibatches and different captions may have different lengths, we append `<NULL>` tokens to the end of each caption so they all have the same length. We don't want these `<NULL>` tokens to count toward the loss or gradient, so in addition to scores and ground-truth labels our loss function also accepts a `mask` array that tells it which elements of the scores count towards the loss.
#
# Since this is very similar to the softmax loss function you implemented in assignment 1, we have implemented this loss function for you; look at the `temporal_softmax_loss` function in the file `cs231n/rnn_layers.py`.
#
# Run the following cell to sanity check the loss and perform numeric gradient checking on the function.
# +
# Sanity check for temporal softmax loss
from cs231n.rnn_layers import temporal_softmax_loss
N, T, V = 100, 1, 10
def check_loss(N, T, V, p):
x = 0.001 * np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = np.random.rand(N, T) <= p
print temporal_softmax_loss(x, y, mask)[0]
check_loss(100, 1, 10, 1.0) # Should be about 2.3
check_loss(100, 10, 10, 1.0) # Should be about 23
check_loss(5000, 10, 10, 0.1) # Should be about 2.3
# Gradient check for temporal softmax loss
N, T, V = 7, 8, 9
x = np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = (np.random.rand(N, T) > 0.5)
loss, dx = temporal_softmax_loss(x, y, mask, verbose=False)
dx_num = eval_numerical_gradient(lambda x: temporal_softmax_loss(x, y, mask)[0], x, verbose=False)
print 'dx error: ', rel_error(dx, dx_num)
# -
# # RNN for image captioning
# Now that you have implemented the necessary layers, you can combine them to build an image captioning model. Open the file `cs231n/classifiers/rnn.py` and look at the `CaptioningRNN` class.
#
# Implement the forward and backward pass of the model in the `loss` function. For now you only need to implement the case where `cell_type='rnn'` for vanialla RNNs; you will implement the LSTM case later. After doing so, run the following to check your forward pass using a small test case; you should see error less than `1e-10`.
# +
N, D, W, H = 10, 20, 30, 40
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
V = len(word_to_idx)
T = 13
model = CaptioningRNN(word_to_idx,
input_dim=D,
wordvec_dim=W,
hidden_dim=H,
cell_type='rnn',
dtype=np.float64)
# Set all model parameters to fixed values
for k, v in model.params.iteritems():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
features = np.linspace(-1.5, 0.3, num=(N * D)).reshape(N, D)
captions = (np.arange(N * T) % V).reshape(N, T)
loss, grads = model.loss(features, captions)
expected_loss = 9.83235591003
print 'loss: ', loss
print 'expected loss: ', expected_loss
print 'difference: ', abs(loss - expected_loss)
# -
# Run the following cell to perform numeric gradient checking on the `CaptioningRNN` class; you should errors around `1e-7` or less.
# +
batch_size = 2
timesteps = 3
input_dim = 4
wordvec_dim = 5
hidden_dim = 6
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
vocab_size = len(word_to_idx)
captions = np.random.randint(vocab_size, size=(batch_size, timesteps))
features = np.random.randn(batch_size, input_dim)
model = CaptioningRNN(word_to_idx,
input_dim=input_dim,
wordvec_dim=wordvec_dim,
hidden_dim=hidden_dim,
cell_type='rnn',
dtype=np.float64,
)
loss, grads = model.loss(features, captions)
for param_name in sorted(grads):
f = lambda _: model.loss(features, captions)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print '%s relative error: %e' % (param_name, e)
# -
# # Overfit small data
# Similar to the `Solver` class that we used to train image classification models on the previous assignment, on this assignment we use a `CaptioningSolver` class to train image captioning models. Open the file `cs231n/captioning_solver.py` and read through the `CaptioningSolver` class; it should look very familiar.
#
# Once you have familiarized yourself with the API, run the following to make sure your model overfit a small sample of 100 training examples. You should see losses around 1.
# +
small_data = load_coco_data(max_train=50)
small_rnn_model = CaptioningRNN(
cell_type='rnn',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=256,
)
small_rnn_solver = CaptioningSolver(small_rnn_model, small_data,
update_rule='adam',
num_epochs=50,
batch_size=25,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=0.95,
verbose=True, print_every=10,
)
small_rnn_solver.train()
# Plot the training losses
plt.plot(small_rnn_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
# -
# # Test-time sampling
# Unlike classification models, image captioning models behave very differently at training time and at test time. At training time, we have access to the ground-truth caption so we feed ground-truth words as input to the RNN at each timestep. At test time, we sample from the distribution over the vocabulary at each timestep, and feed the sample as input to the RNN at the next timestep.
#
# In the file `cs231n/classifiers/rnn.py`, implement the `sample` method for test-time sampling. After doing so, run the following to sample from your overfit model on both training and validation data. The samples on training data should be very good; the samples on validation data probably won't make sense.
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = small_rnn_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
| assignment3/.ipynb_checkpoints/RNN_Captioning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#url = https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/{dataid}?Authorization={apikey}&format={format}
#{dataid} 為各資料集代碼 (參照:資料清單) ex.F-A0012-001
#{apikey} 為會員帳號對應之授權碼 <KEY>
#{format} 為資料格式,請參照各資料集頁面確認可下載之檔案格式 ex.XML、CAP、JSON、ZIP、KMZ、GRIB2
#範例:https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-C0032-001?Authorization=CWB-EB4703E5-E2F4-4CA1-8C04-FB94D6B2F6B8&format=json
#並請加入快取功能,如上述所示。
#API清單 https://opendata.cwb.gov.tw/dist/opendata-swagger.html
import requests
import json
url = 'https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-C0032-001?Authorization=CWB-EB4703E5-E2F4-4CA1-8C04-FB94D6B2F6B8&format=json'
res = requests.get(url)
data = json.loads(res.text)
final_list = data["cwbopendata"]["dataset"]["location"]
taipei = []
new_taipei = []
for i in final_list:
if i['locationName'] == '臺北市':
data = i
city = data['locationName']
for j in range(0,3):
dic = {}
r = []
for i in data['weatherElement']:
r.append(i['time'][j])
l = []
for i in r:
l.append(i['parameter']['parameterName'])
l.append(f"{i['startTime'][11:16]}"'-'f"{i['endTime'][11:16]}")
dic[city] = l
taipei.append(dic)
# elif i['locationName'] == '新北市':
# data = i
# city = data['locationName']
# for j in range(0,3):
# dic = {}
# r = []
# for i in data['weatherElement']:
# r.append(i['time'][j])
# l = []
# for i in r:
# l.append(i['parameter']['parameterName'])
# l.append(f"{i['startTime'][11:16]}"'-'f"{i['endTime'][11:16]}")
# dic[city] = l
# new_taipei.append(dic)
# else:
# continue
# -
taipei
# + tags=[]
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
import datetime
import tkinter as tk
res = requests.get('https://rate.bot.com.tw/xrt?Lang=zh-TW')
soup = bs(res.text,'lxml')
update_time = []
average = []
currency = [] #幣別
spread = []
final_list = []
data = soup.select('.print_show')
r = []
for i in data:
r.append(i.text.split())
for i in r:
dic = {}
dic['幣別'] = i[0]
final_list.append(dic)
#現買
purchase = []
data = soup.select('#ie11andabove > div > table > tbody > tr > td:nth-child(2)')
count = 0
for i in data:
final_list[count]['現金買價'] = i.text
count += 1
#現賣
selling = []
data = soup.select('#ie11andabove > div > table > tbody > tr > td:nth-child(3)')
count = 0
for i in data:
final_list[count]['現金賣價'] = i.text
count += 1
#即期買
purchase1 = []
data = soup.select('#ie11andabove > div > table > tbody > tr > td:nth-child(4)')
count = 0
for i in data:
final_list[count]['即期買價'] = i.text
count += 1
#即期賣
selling1 = []
data = soup.select('#ie11andabove > div > table > tbody > tr > td:nth-child(5)')
count = 0
for i in data:
final_list[count]['即期賣價'] = i.text
count += 1
# -
# +
#newsapi
from newsapi import NewsApiClient
import json
newsapi = NewsApiClient(api_key='<KEY>')
all_articles = newsapi.get_everything(q='國際 AND NOT 外遇',
domains='ettoday.net,Technews.tw,Setn.com,udn.com,Ltn.com.tw',
# from_param=2020-05-01',
# to='2020-06-01',
language='zh',
sort_by='publishedAt', #由新到舊
page_size = 30,
page = 1
) #篇幅為100
fn = 'Exam2-2.json'
with open(fn,'w',encoding ='utf8') as file:
json.dump(all_articles,file,ensure_ascii=False)
# print(all_articles['articles'])
# + tags=[]
import pymysql
import pandas as pd
MYSQL_HOST = 'localhost'
MYSQL_DB = 'neildb'
MYSQL_USER = 'root'
MYSQL_PASS = '<PASSWORD>'
def connect_mysql(): #連線資料庫
global connect, cursor
connect = pymysql.connect(host = MYSQL_HOST, db = MYSQL_DB, user = MYSQL_USER, password = <PASSWORD>,
charset = 'utf8', use_unicode = True)
cursor = connect.cursor()
connect_mysql() #呼叫連線資料庫函式
df = pd.read_sql('SELECT * FROM hiskio', con = connect) #使用connect指定的Mysql獲取資料
data = df.to_dict('recode')
for i in data:
if i['price'] != '免費' and i['price'] != 'NaN' and i['price'] != None :
i['price'] = int(i['price'].replace('NT$',''))
elif i['price'] == None:
i['price'] = 0
else :
i['price'] = 0
d = {}
for i in data:
d[i['category']] = ''
title_num = [] #每種類課程總和 [45, 74, 10, 12, 45, 5, 25]
for j in d:
t = []
for i in data:
count = 0
if i['category'] == j:
count += 1
t.append(count)
title_num.append(sum(t))
title = [] #課程分類 ['網站前端', '網站後端', '物聯網 IOT', '手機應用', '數據分析', '遊戲開發', '微軟應用']
price = [] #每個種類課程的"價格"總和 [109686, 281049, 21090, 35899, 211530, 15860, 151690]
free = [] #免費課程的數量 [7, 13, 4, 1, 11, 0, 0]
for k in d :
title.append(k)
p = []
f = []
count = 0
for i in data:
if i['category'] == k and i['price'] != 0:
p.append(i['price'])
if i['category'] == k and i['price'] == 0:
count =+ 1
f.append(count)
price.append(sum(p))
free.append(sum(f))
#---------------------------------------------------------------------
l = []
level = []
for i in data:
if i['level'] not in l:
l.append(i['level'])
d = {}
d[i['category']] = i['level']
level.append(d)
dic1 = {}
for i in title:
dic1[i] = []
for i in l:
r = []
d = {}
count = 0
for j in level[:10]:
if dic1 in
#---------------------------------------------------------------------
# + tags=[]
print(dic1)
print(l)
level
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Observed and Projected Climate Shifts http://koeppen-geiger.vu-wien.ac.at/shifts.htm
#
# not sure which data to include - see the data section - all data <10 GB
| .ipynb_checkpoints/cli.011_New -checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 1: Tools of the trade
#
# ## Goal:
# At what radial step do you enclose half of the Earth's mass?
#
# We all know the formula for the mass of a sphere:
# $ M = \frac{4}{3} \pi R^3 \times \rho $
#
# Note that the density here is provided in *shells* of radius, so you need to add them as you go.
#
# $ M_i = \frac{4}{3} \pi (R_{i+1}^3 - R_i^3) \times \rho_i $
#
# ## Approach:
# - Import the data.
# - Create arrays for radius and density.
# - Calculate the mass at each radial step and put it into a shell mass array.
# - Sum up the entries in the shell mass array for total mass.
# - Cumulatively sum up the enteries in the shell mass array for an enclosed mass array.
# - Isolate the values greater than half the mass of the Earth.
# - Graph radius vs. enclosed mass with an overlay of the isolated set.
# - Find the highest radius value in the isolated set.
#
# ## Alternative:
# - Import the data.
# - Create arrays for radius and density.
# - Calculate the mass at each radial step and put it into a shell mass array.
# - Sum up the entries in the shell mass array for total mass.
# - Cumulatively sum up the enteries in the shell mass array for an enclosed mass array.
# - Iterate through the enclosed mass array until you find the last value that is greater than half the mass of the Earth.
#
# ## Key Variables:
# - rad: Radius array, holds the radius data from the imported file.
# - den: Density array, holds the density data from the imported file.
# - shell: Shell mass array, holds the masses of each radial shell.
# - enclosed: Enclosed mass array, holds the enclosed mass at each radial step.
# - total: Total mass, the total mass of the Earth.
# - half: Half mass array, the values in emass that are greater than half the mass of Earth.
# +
# Importing Python modules
# some magic to make plots appear IN the notebook (not pop out like normal matplotlib)
# %matplotlib inline
import numpy as np # our basic math/numbers library
import matplotlib.pyplot as plt # the standard plotting package
import pandas as pd # PANDAS: a data science and analysis package we'll use all Quarter!!
# +
# Read in data
# file from: http://ds.iris.edu/ds/products/emc-prem/
# based on Dziewonski & Anderson (1981) http://adsabs.harvard.edu/abs/1981PEPI...25..297D
file = 'PREM_1s.csv'
df = pd.read_csv(file)
# -
# +
# Create an array for radius and density, and a placeholder for the array of mass shells.
rad=df['radius']
den=df['density']
shell=np.zeros_like(rad)
# Create an array of the mass at each radial step.
for i in range (0,198):
shell[i]= (4/3) * np.pi * (rad[i]**3 - rad[i+1]**3) * den[i]
# +
# Calculate the mass at each radial step and put it into a shell mass array.
enclosed = np.cumsum(shell)
# Sum up the entries in the shell mass array for total mass.
total = np.sum(shell)
# +
# Isolate the values greater than half the mass of the Earth.
half = np.where((enclosed >= (total/2)))[0]
# Graph radius vs. enclosed mass with an overlay of the isolated set.
plt.figure(figsize=(8,5))
plt.plot(rad, enclosed)
plt.title('Mass vs. Radius')
plt.xlabel('Radius (km)')
plt.ylabel('Mass (kg)')
plt.scatter(rad[half], enclosed[half], c='red', lw=0)
# -
print("At what radial step do you enclose half of the Earth's mass?", np.max(rad[half]), "meters from the core.")
# +
# Iterate through the enclosed mass array until you find the last value that is greater than half the mass of the Earth.
i = -1 # i was used previously, restoring value back to -1 to ensure that index 0 is accounted for.
while True:
i = i + 1
if (enclosed[i] >= (total/2)):
print("At what radial step do you enclose half of the Earth's mass?", rad[i], "meters from the core.")
break
# -
| lab1/lab1-HOWARD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9 (comp-methods)
# language: python
# name: comp-methods
# ---
# Monte Carlo integration is a technique for numerical integration using random numbers. It can be useful, for example, when evaluating integrals over domains of high dimension. Meshing in high dimensions suffers from the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality): 100 evenly spaced sample points suffice to sample the unit interval $[0,1]$ with no more than 0.01 distance between points, whereas sampling of the 10-dimensional unit hypercube $[0,1]^{10}$ with a lattice that has a spacing of 0.01 between adjacent points would require $10^{20}$ sample points.
#
# Let $\Omega \subset \mathbb R^n$ and let $f : \Omega \to \mathbb R$ be piecewise continuous.
# Let $p$ be such a probability density function on $\Omega$ that $p(x) > 0$ for all $x \in \Omega$.
# The Monte Carlo approach to approximate the integral $\int_\Omega f(x) dx$
# works as follows:
#
# 1. Generate a large number of independent samples $x_1,\dots,x_N \in \Omega$
# from the probablity distribution with the density $p$
# 2. Compute the quantity
#
# $$
# I_N = \frac 1 N \sum_{i=1}^N \frac{f(x_i)}{p(x_i)}.
# $$
#
# The [law of large numbers](https://en.wikipedia.org/wiki/Law_of_large_numbers) implies that
#
# $$
# I_N \to \int_\Omega f(x) dx \quad \text{as $n \to \infty$}.
# $$
#
# Choosing $p$ cleverly is the basic idea behind [importance sampling](https://en.wikipedia.org/wiki/Importance_sampling).
#
# [Buffon's needle](https://en.wikipedia.org/wiki/Buffon%27s_needle_problem) experiment is a classical pedagogical example of Monte Carlo integration.
# Suppose we have a floor made of parallel strips of wood, each the same width $t$, and we drop a needle of length $l < t$ onto the floor. Buffon showed that the probability $P$ that the needle will lie across a line between two strips is
#
# \begin{equation}\tag{1}
# P=\frac{2}{\pi}\frac{l}{t}.
# \end{equation}
#
# Let $s$ be the distance from the center of the needle to the closest parallel line, and let $\theta$ be the acute angle between the needle and one of the parallel lines.
# Here $s$ and $\theta$ are random variables with uniform distributions over $[0, t/2]$ and $[0,\pi/2]$, respectively. We write $p(s,\theta)$ for their joint probability density function and $A$ for the event that the needle lies across a line between two strips. Then
#
# $$
# P = \int_A p(s,\theta) ds d\theta.
# $$
#
# Writing $\Omega = [0, t/2] \times [0,\pi/2]$ and $f(x) = 1_A(x) p(x)$
# where $x = (s,\theta)$ and
#
# $$
# 1_A(x) =
# \begin{cases}
# 1 & x \in A,
# \\
# 0 & x \notin A,
# \end{cases}
# $$
#
# we have, using the notation above, that $I_N \to P$ as $N \to \infty$.
# Observe that, in this case,
#
# $$
# I_N = \frac 1 N \sum_{i=1}^N 1_A(x_i),
# $$
#
# and evaluating $I_N$ boils down to counting the needles that lie across a line between two strips.
#
# Take $l = 5/6$ and $t = 1$. The goal of this homework is to approximate
# $\pi$ via the formula
#
# $$
# \pi=\frac{2l}{t P} \approx \frac{2l}{t I_N},
# $$
#
# that follows from (1).
#
# Let us give some remarks on the history of Monte Carlo integration. Formula (1) was first derived in
#
# > Buffon, <NAME>., comte de. _Histoire naturelle, générale et particulière, Supplément 4_. Imprimerie royale, Paris, 1777. (scan in [Google Books](https://books.google.fi/books?id=AjhYD1vsVAIC&hl=fi&pg=PA100#v=onepage&q&f=false))
#
# Now my French is not very strong, but as claimed [here](https://en.wikipedia.org/wiki/Buffon's_needle_problem), it seems that approximating $\pi$ was not the original motivation for Buffon's question. You can have a look at his book, see pp. 100-104, and while you are at it, you can also try figure out if there is an error in his derivation, as claimed [here](https://mathworld.wolfram.com/Buffon-LaplaceNeedleProblem.html).
#
# The idea of using Buffon's formula to design a method for approximating the number $\pi$ goes back at least to Laplace. _"Si l'on projette un grand nombre de fois ce cylindre [...] ce qui fera connaître la valeur de la circonférence $2 \pi$"_, see p. 360 of
#
# > Laplace, <NAME>., <NAME>. _Théorie analytique des probabilités_. <NAME>, Paris, 1812. (scan in [Internet Archive](https://archive.org/details/thorieanalytiqu01laplgoog/page/n464/mode/2up))
#
# The first computerized Monte Carlo simulations were run on [ENIAC](https://en.wikipedia.org/wiki/ENIAC) in 1948 by a team including John and <NAME> and <NAME>. It can be argued that the simulations were also the first code written in the modern paradigm, associated with the "stored program concept," ever to be executed, see
#
# > <NAME>, Priestley, Mark, and <NAME>. _Los Alamos Bets on ENIAC: Nuclear Monte Carlo Simulations, 1947-1948_. IEEE Annals of the History of Computing 36, no. 3, 42-63, 2014. <https://doi.org/10.1109/MAHC.2014.40> (in [Helka](https://helka.helsinki.fi/permalink/358UOH_INST/qn0n39/cdi_ieee_primary_6880250))
#
# + tags=["rm"]
import numpy as np
rng = np.random.default_rng()
t = 1
l = 5/6
def sample():
'''Returns s and theta generated using the random number generator rng'''
# Draw samples from uniform distributions using the function rng.uniform, see
# https://numpy.org/doc/stable/reference/random/generated/numpy.random.Generator.uniform.html
raise NotImplementedError() # a placeholder, your implementation goes here
def intersects(s, theta):
'''Returns True iff the needle lies across a line between two strips'''
raise NotImplementedError() # a placeholder, your implementation goes here
# We will plot I_N for several N so let's save every nth approximation
def I(n, K):
'''Return I_n, I_{2n}, ..., I_{Kn}'''
out = np.zeros(K)
raise NotImplementedError() # a placeholder, your implementation goes here
return out
# +
import matplotlib.pyplot as plt
# Plot one sample
s, theta = sample()
c = np.array([0, s]) # Center of needle
d = np.array([np.cos(theta), np.sin(theta)]) # Direction of needle
# End points of needle
end1 = c - l/2*d
end2 = c + l/2*d
ends = np.stack((end1, end2))
xs = ends[:,0]
ys = ends[:,1]
plt.plot(xs, ys, 'r') # needle in red
plt.plot([-1,1],[0, 0], 'b') # closest line in blue
ax = plt.gca()
ax.set_ylim(-0.5, 1)
ax.set_aspect(1)
print(f'Needle intersects the closest line: {intersects(s, theta)}')
# -
# Plot convergence to pi
n = 100
K = 40
Is = I(n, K)
Ns = n*np.arange(1,K+1)
plt.plot(Ns, 2*l/(t*Is), 'b') # approximation in blue
plt.plot([n,n*K],[np.pi, np.pi],'r') # pi in red
ax = plt.gca()
ax.set_ylim(2.4, 3.6);
# **How to hand in your solution**
#
# 1. Run the whole notebook by choosing _Restart Kernel and Run All Cells_ in the _Run_ menu
# - Alternatively you can click the ⏩️ icon in the toolbar
# 2. Click the link below to check that the piece of code containing your solution was uploaded to pastebin
# - If you have changed the order of cells in the notebook, you may need to change the number in the below cell to the one in the left margin of the cell containing your solution
# 3. Copy the link and submit it in Moodle
# - You can copy the link easily by right-clicking it and choosing _Copy Output to Clipboard_
# + tags=["rm"]
# Upload the code in the first input cell to pastebin
# %pastebin 1
| integration/homework.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="0TD5ZrvEMbhZ"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License").
#
# # Convolutional VAE: An example with tf.keras and eager
#
# This example has moved:
#
# <table class="tfo-notebook-buttons" align="left"><td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/cvae.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td><td>
# <a target="_blank" href="https://github.com/uve/docs/blob/master/site/en/r2/tutorials/generative/cvae.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
# + [markdown] colab_type="text" id="ITZuApL56Mny"
# 
#
#
| tensorflow/contrib/eager/python/examples/generative_examples/cvae.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <html>
# <body>
# <table style="border: none" align="center">
# <tr style="border: none">
# <th style="border: none"><img src="https://github.com/pmservice/customer-satisfaction-prediction/blob/master/app/static/images/ml_icon_gray.png?raw=true" alt="Watson Machine Learning icon" height="45" width="45"></th>
# <th style="border: none"><font face="verdana" size="6" color="black"><b>Watson Machine Learning</b></font></th>
# </tr>
# </table>
# </body>
# </html>
# # WML - Tensorflow Support
# ## Contens:
# 1. Train a Tensorflow model in ** DSX Notebook **
# 2. Save the trained model into WML Repository
# ## Support Requirements:
# ### Supported Versions:
# 1. Python Runtime: Python 3.5
# 2. Tensorflow version: 1.2
# 3. Anaconda Runtime Version: Anaconda 4.2.9 for Python 3.5
# ### Mandatory Requirements for Online Deployment and Scoring:
# 1. The model to be deployed using Online Deployment and Scoring service should be persisted in WML Repository
# 2. The persisted model should contain Tensorflow signature metadata for serving(Refer section 2.1). Online deployment is restricted for those persisted Tensorflow models that do not contain this meta data. This requirement will be adequately documented.
#
# <hr>
# <table style="border: none" align="center">
# <tr style="border: none">
# <th style="border: none"><img src="https://github.com/pmservice/wml-sample-models/raw/master/scikit-learn/hand-written-digits-recognition/images/numbers_banner-04.png" width="600" alt="Icon"> </th>
# </tr>
# </table>
# # About the use case:- Recognition of hand written digits
# Using Tensorflow, we train a model that can recognize a handwritten number embedded in an image. The model is trained using the MNIST data set that can be accessed using the Tensorflow's sample dataset related APIs. Here, we use Tensorflow's implementation of Convolutional Neural Network(CNN) to build the model.
import tensorflow as tf
# # 1.0 Train a Tensorflow model
# ### 1.1 Import training data
# Using the code below, let us download the datasets that we will use for training, validation and test purposes. The APIs used here provides us the datasets in form of a **single dimensional NumPy array** which has been transformed from the actual images.
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# -
# ### 1.2 Set parameters required for creating the input and the target(label) Tensors
# n_input - Refers to the size of the single dimensional array that represents one image containing hand written number <br>
# n_classes - Refers to the number of possible categories of prediction outcomes. In this use case, the prediction outcome can be any of the **10 digits** i.e 0 -9
n_input = 784
n_classes = 10
# ### 1.3 Define the input and the target(label) Tensors
# x - A placeholder that will hold the input data for training and scoring <br>
# y - A placeholder that will hold the numeric value of the hand written number in the image
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input], name="x_input")
y = tf.placeholder(tf.float32, [None, n_classes])
# ### 1.4 Set the convolutional neural network related parameters
# The convolutional neural network that we are going to build requires weights and biases to be initialized for each of the convolution layer in the network. The code below initializes these weights and biases.
# +
# Initialize each convolution layer's weights, bias and dropout
weights = {
# 5x5 conv, 1 input, 32 outputs
"wc1": tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
"wc2": tf.Variable(tf.random_normal([5, 5, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
"wd1": tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
"out": tf.Variable(tf.random_normal([1024, n_classes])),
}
biases = {
"bc1": tf.Variable(tf.random_normal([32])),
"bc2": tf.Variable(tf.random_normal([64])),
"bd1": tf.Variable(tf.random_normal([1024])),
"out": tf.Variable(tf.random_normal([n_classes])),
}
dropout = 0.75
learning_rate = 0.001
# -
# ### 1.5 Build the model definition
# In Tensorflow, a model is built by building a computational graph. The computational graph is in turn built by defining the nodes. Each node refers to a placeholder or a transformation operation of the input data. The source of input data for a node could either be another node in the graph or from the user specified at time executing the graph. In Tensorflow's terms, each node is called as a Tensor. <br>
#
# In the cell below, we define the Tensors(nodes) in the graph that implement convolutional neural network architecture.
# +
# Reshape input picture
x_trans1 = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer -1
x_conv2d_l1 = tf.nn.conv2d(x_trans1, weights["wc1"], strides=[1, 1, 1, 1], padding="SAME")
x_w_bias_l1 = tf.nn.bias_add(x_conv2d_l1, biases["bc1"])
x_relu_l1 = tf.nn.relu(x_w_bias_l1)
conv1_out = tf.nn.max_pool(
x_relu_l1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
# Convolution Layer -2
x_conv2d_l2 = tf.nn.conv2d(
conv1_out, weights["wc2"], strides=[1, 1, 1, 1], padding="SAME"
)
x_w_bias_l2 = tf.nn.bias_add(x_conv2d_l2, biases["bc2"])
x_relu_l2 = tf.nn.relu(x_w_bias_l2)
conv2_out = tf.nn.max_pool(
x_relu_l2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME"
)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2_out, [-1, weights["wd1"].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights["wd1"]), biases["bd1"])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
conv_out = tf.add(tf.matmul(fc1, weights["out"]), biases["out"], name="output_tensor")
predictor = tf.argmax(conv_out, 1, name="predictor")
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=conv_out, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# To Evaluate model
correct_pred = tf.equal(tf.argmax(conv_out, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# -
# ### 1.6 Set parameters for training
# training_iters - Refers to the number of images that we plan to use for training the model. Using more images for training leads to better accuracy of the model <br>
# batch_size - The training will be performed iteratively on a batch of images. batch_size refers to the number of images that needs to be part of the batch <br>
# display_step - Refers to the n-th iteration of training after which the training accuracy data will be calculated and displayed
# Training Parameters
training_iters = 60000
batch_size = 128
display_step = 10
# ### 1.7 Initialize a Tensorflow Session to train the model
# Training a model refers to executing the computational graph that holds the model defintion. <br>
# Tensorflow uses a C++ backend application to execute the computational graph. The connection to the C++ backend application from Tensorflow's Python runtime is managed by Session object. We hence initialize a Session using the code below
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
sess = tf.Session()
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
print("Completed batch iteration: " + str(step * batch_size))
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y})
print(
"Iter "
+ str(step * batch_size)
+ ", Minibatch Loss= "
+ "{:.6f}".format(loss)
+ ", Training Accuracy= "
+ "{:.5f}".format(acc)
)
step += 1
print("Model training finished!")
# ** We have now trained a Tensorflow model. As a next step we need to persist this model in WML Repository **
# # 2.0 Save the trained Tensorflow model in WML Repository using Repository's Python Client
# ### 2.1 Create the signature of the tensors that will be required for scoring.
# Signature refers to the information about the Tensors that hold the input data and the output data for scoring. This signature will be used at the time of scoring using the WML Online Deployment and Scoring service. <br>
#
# As per our model definition, <br>
# the Tensor - "x" is the placeholder that holds the input data of the model and <br>
# the Tensor - "predictor" is the node that holds the predicted value.
# ### P.S :
# ** This is a mandatory requirement for scoring the model. Hence this data should be provided while saving the model. **
# +
classification_inputs = tf.saved_model.utils.build_tensor_info(x)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(predictor)
classification_signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={tf.saved_model.signature_constants.CLASSIFY_INPUTS: classification_inputs},
outputs={
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes
},
method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME,
)
print("classification_signature content:")
print(classification_signature)
legacy_op_init = tf.group(tf.tables_initializer(), name="legacy_init_op")
# -
# ### 2.2 Save the model using WML Repository's Python client
# First, we must import client libraries.
# +
import sys
import time
from repository_v3.mlrepository import MetaNames, MetaProps
from repository_v3.mlrepositoryartifact import MLRepositoryArtifact
from repository_v3.mlrepositoryclient import MLRepositoryClient
# -
# #### 2.2.1 Provide WML instance credentials and authenticate
# Authenticate to Watson Machine Learning service on Bluemix.
#
# **Action:** Add authentication information from your instance of Watson Machine Learning service here
# +
# WML Instance details
# SVT
# service_url = "https://ibm-watson-ml-svt.stage1.mybluemix.net"
service_url = "https://ibm-watson-ml.mybluemix.net"
user = "<PASSWORD>"
password = "<PASSWORD>"
instance_id = "d8b98cb2-cd06-4740-8467-def50eca91f9"
# -
ml_repository_client = MLRepositoryClient(service_url)
ml_repository_client.authorize(user, password)
# #### 2.2.2 Create a WML Artifact
# Define the metadata about the model that wish to persist along with the model.
tf_model_name = "k_tf_mnist_10203"
tf_model_metadata = {
MetaNames.DESCRIPTION: "Tensorflow model for predecting Hand-written digits",
MetaNames.AUTHOR_EMAIL: "<EMAIL>",
MetaNames.AUTHOR_NAME: "Krishna",
}
# Create a WML Repository artifact by specifying the session object that contains the graph of the model that we wish to save in WML Repository and the scoring signature of the model. We also specify other metadata that we want to save along the model.
tf_model_artifact = MLRepositoryArtifact(
sess,
signature_def_map={"predict_images": classification_signature},
legacy_init_op=legacy_op_init,
name=tf_model_name,
meta_props=MetaProps(tf_model_metadata.copy()),
)
# #### 2.3 Save the model to WML Repository
# The code below serializes the model artifact that contains reference to the Session object and the related details and saves it in WML Repository as a compressed tar ball. <br>
# The API returns a bunch of metadata that was created as part of saving the model.
saved_model = ml_repository_client.models.save(tf_model_artifact)
# Display few metadata of our interest that was generated as part of saving the model to the WML Repository. <br>
#
# modelVersionUrl displayed in output of the cell below refers to the WML Repository URL that points to the saved model.
model_uid = saved_model.uid
model_ver_url = saved_model.meta.prop("modelVersionUrl")
print(
"ModelType: "
+ saved_model.meta.prop("frameworkName")
+ "-"
+ saved_model.meta.prop("frameworkVersion")
)
print("ModelId: " + saved_model.uid)
print("modelVersionUrl: " + saved_model.meta.prop("modelVersionUrl"))
# As we now have persisted a trained model in WML Repository, we are ready to deploy and score using this model. The deployment and scoring functionality is explained in the notebook named "WML_TF_Serving_Using_Onllike_Deploy_Scoring_Service" in this project.
# !curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'Authorization: Bearer <KEY>' -d '{"inputs":[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.19607844948768616, 0.8784314393997192, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.27450981736183167, 0.11372549831867218, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4745098352432251, 0.9058824181556702, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5803921818733215, 0.658823549747467, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01568627543747425, 0.7647059559822083, 0.9058824181556702, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3764706254005432, 0.8235294818878174, 0.04313725605607033, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2705882489681244, 0.988235354423523, 0.5254902243614197, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.44705885648727417, 0.988235354423523, 0.08235294371843338, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1764705926179886, 0.9254902601242065, 0.8509804606437683, 0.0470588281750679, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7529412508010864, 0.988235354423523, 0.08235294371843338, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.658823549747467, 0.9686275124549866, 0.20784315466880798, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.07058823853731155, 1.0, 0.9921569228172302, 0.08235294371843338, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3294117748737335, 0.9490196704864502, 0.8274510502815247, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5529412031173706, 0.9921569228172302, 0.7411764860153198, 0.019607843831181526, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6627451181411743, 0.988235354423523, 0.41568630933761597, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.125490203499794, 0.9098039865493774, 0.9803922176361084, 0.25882354378700256, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.05882353335618973, 0.8823530077934265, 0.988235354423523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5254902243614197, 0.988235354423523, 0.8274510502815247, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08627451211214066, 0.988235354423523, 0.6431372761726379, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6627451181411743, 0.988235354423523, 0.6549019813537598, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03529411926865578, 0.8000000715255737, 0.8196079134941101, 0.07058823853731155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08627451211214066, 0.9921569228172302, 0.9921569228172302, 0.41960787773132324, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6627451181411743, 0.988235354423523, 0.7803922295570374, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.5058823823928833, 0.6431372761726379, 0.7647059559822083, 0.988235354423523, 0.988235354423523, 0.41568630933761597, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.16078431904315948, 0.6666666865348816, 0.960784375667572, 0.988235354423523, 0.988235354423523, 0.988235354423523, 0.988235354423523, 0.9098039865493774, 0.9058824181556702, 0.9843137860298157, 0.988235354423523, 0.988235354423523, 0.03529411926865578, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.19215688109397888, 0.3294117748737335, 0.3294117748737335, 0.3294117748737335, 0.3294117748737335, 0.0, 0.0, 0.6313725709915161, 0.988235354423523, 0.988235354423523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.49803924560546875, 0.988235354423523, 0.988235354423523, 0.1764705926179886, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.501960813999176, 0.9921569228172302, 0.9921569228172302, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.49803924560546875, 0.988235354423523, 0.988235354423523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.529411792755127, 0.988235354423523, 0.9568628072738647, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9098039865493774, 0.9254902601242065, 0.43529415130615234, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7019608020782471, 0.25882354378700256, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]}' 'https://ibm-watson-ml.mybluemix.net/v3/wml_instances/d8b98cb2-cd06-4740-8467-def50eca91f9/published_models/3a982151-54b5-462f-9006-2243587c6af1/deployments/1aa6e50c-ce52-495b-9ac5-2adad19d07ac/online'
# +
import json
import numpy as np
x, y = mnist.train.next_batch(batch_size)
json.dumps(np.array(x[0]).tolist())
# -
# !curl -X GET --header 'Accept: application/json' --header 'Authorization: Bearer <KEY>' 'https://ibm-watson-ml.mybluemix.net/v3/wml_instances/d8b98cb2-cd06-4740-8467-def50eca91f9/published_models/3a982151-54b5-462f-9006-2243587c6af1/deployments/1aa6e50c-ce52-495b-9ac5-2adad19d07ac'
image1 = mnist.test.images[
45,
].tolist()
image2 = mnist.test.images[
4,
].tolist()
scoring_data = {"inputs": [image1, image2]}
| applied-ai-apach-system-ml-and-dl4j/WML/a3_m4_WML_TF_Train_Save_Model_publish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ceyxasm/implementations/blob/main/ipl.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="sgNkXJ7gyn-b"
# # **Answering how often does the better team win and what is the significance of uncertainty and luck in a game.**
#
# Every year 7-8 teams compete in what is called **Indian Premiere League**. They play around 60 matches (10-12 matches/ team) and finally we have our winner.
# But
#
#
# * Can we be certain that better team did indeed win?
# * How does the probability of a team winning one match translate to winning the entire season
# * Does RCB play bad or are they just unlucky?
#
#
# + id="Esax1M4yqD63"
#importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
random.seed(1421)
# AB de Villiers scored 1421 runs off 611 balls in IPL death overs with 106 fours and 112 sixes.
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="VSRfNuU5rFCg" outputId="82cbac95-a330-4797-8fc5-8791e5714a73"
#data read
#dataset- https://www.kaggle.com/patrickb1912/ipl-complete-dataset-20082020
data=pd.read_csv('/content/drive/MyDrive/Colab Notebooks/datasets/IPL Matches 2008-2020.csv')
data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="dKXe5nnH1__5" outputId="11f78685-0805-4863-a3b7-6d682ba7e3a8"
team1=np.array(data.iloc[:,6])
team2=np.array(data.iloc[:,7])
team=[]
for i in team1:
if i not in team:
team.append(i)
#all these teams have however not been part of all the seasons
#teams that have been part of all the seasons are:
#Royal Challengers Bangalore
#Kings XI Punjab
#Mumbai Indians
#Kolkata Knight Riders
#Rajasthan Royals
#Chennai Super Kings
####>>>> https://www.quora.com/Is-there-any-IPL-player-who-played-in-all-season-from-the-same-team
#to make our analysis more uniform, we will be limiting ourself to these teams only and dropping the remaining
##LETS HOPE THIS DOESNOT CONTRIBUTE TO SAMPLING BAIS
#data preprocessing
row=data.shape[0]
drop_label=[]
drop_team=[ "Kochi Tuskers Kerala", "Pune Warriors", "Gujarat Lions",
"Rising Pune Supergiants", "Rising Pune Supergiant" ]
for i in drop_team:
data = data.loc[data["team1"] != i]
data= data.loc[data["team2"] != i]
print("data points before dropping: "+str(row)+"\ndata points after dropping: "+str(data.shape[0]))
data=data.replace({"De<NAME>":"<NAME>","Delhi Capitals": "Delhi Daredevils" })
#"<NAME>"--->"<NAME>"
#"Delhi Capitals"----> "Delhi Daredevils"
# + colab={"base_uri": "https://localhost:8080/"} id="Z5V3gi_erMts" outputId="0fc04837-326c-4a3a-c265-ebf9abcbb7c3"
#modifing our original dataframe
team1=np.array(data.iloc[:,6])
team2=np.array(data.iloc[:,7])
winner=np.array(data.iloc[:,10])
n_matches=len(team1)
team=[]
for i in range(len(team1)):
if team1[i] not in team:
team.append(team2[i])
if team2[i] not in team:
team.append(team2[i])
team_code=[]
for i in team:
x=i.split()
code=""
for j in x:
code+=j[0]
team_code.append(code)
print(code+" "+i)
# + id="M4lv6nvlOR8q"
for i in range(n_matches):
for j in range(len(team)):
if team1[i]==team[j]: team1[i]=team_code[j]
if team2[i]==team[j]: team2[i]=team_code[j]
if winner[i]==team[j]: winner[i]=team_code[j]
team=team_code
# + id="zNKlteW8sPk-" colab={"base_uri": "https://localhost:8080/", "height": 652} outputId="c7236227-09b7-44ea-fcdc-ac0225916cdd"
#summarizing our dataset in a dictionary
matches={}
for i in range(n_matches):
if team1[i] not in matches:
matches[team1[i]]={'wins':0,
'loss':0,
'matches':1}
else: matches[team1[i]]['matches']+=1
if winner[i]==team1[i]:
matches[team1[i]]['wins']+=1
else: matches[team1[i]]['loss']+=1
for i in range(n_matches):
matches[team2[i]]['matches']+=1
if winner[i]==team2[i]:
matches[team2[i]]['wins']+=1
else: matches[team2[i]]['loss']+=1
##adding number of season wins: 0 for RCB 3 for CSK like that
###>>>>>>>>>>>>>data compiled only till 2020 so dropping CSKs 2021 win; maybe u can contribute to the data set
for i in team:
matches[i]['cups']=0
matches['MI']['cups']=5
matches['CSK']['cups']=3
matches['SH']['cups']=1
matches['RR']['cups']=1
matches['DD']['cups']=1
matches['KKR']['cups']=2
wins=[]
loss=[]
turnout=[]
cups=[]
for i in team_code:
matches[i]['turnout']=matches[i]['wins']/(matches[i]['loss']+ matches[i]['wins'] )
wins.append(matches[i]['wins'])
loss.append(matches[i]['loss'])
turnout.append(matches[i]['turnout'])
cups.append(matches[i]['cups'])
print(i, matches[i])
plt.bar(team, turnout, color='purple')
plt.ylabel('turnout ratio')
plt.show()
plt.bar(team, cups, color='orange' )
plt.ylabel('number of cups')
plt.show()
# + id="5d3G884OH3mT"
# seasonresult simulates the outcome of a season given as:
# match_count- number of matches a team plays before semifinals
# win_prob- probability of our team of winning an individual match
# considering our team of intrest is going to win semis and finals (WISHFUL THINKING);
# it must win atleast half of the preceding matches it played that season
def seasonresult(match_count, win_prob):
win_count=0
for i in range(match_count):
if random.random() <= win_prob:
win_count+=1
return (win_count > (match_count)/2)
# + id="KylVese0ffpe"
# simulate_season- mimic a season count number of times and plots
# probability of winning one match v/s probability of winning the season
def simulate_season( count): #simulates a season 'count' number of times
p=0.5 #we vary our 1-game winning probability from 0.5 to 1 in steps of 0.01
probs=[]
season_win=[]
while(p<=1):
win=0
for i in range(count):
if seasonresult(7, p):
win+=1
probs.append(p)
season_win.append(win/count)
p+=0.01
for i in range(len(probs)):
if season_win[i]>=0.9:
ideal_prob=probs[i]
break
plt.plot(probs, season_win)
plt.axhline(y = 0.90, color = 'r', linestyle = '-')
plt.axvline(x = ideal_prob, color = 'black', linestyle = '-')
plt.xlabel('probability of winning a match')
plt.ylabel('probability of winning the season')
plt.show()
print("therefore for a team to win the season 90% times, its probability of winning a single match should be: "+ str(ideal_prob))
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="gYb5wmrEhxEf" outputId="5610747c-d5fb-4b26-8109-13aa2bb5af8a"
simulate_season(1000)
# + [markdown] id="wD3tuigX3IHq"
# For us to say that the better team wins the IPL (or have 90% confidence the better team shall win); their probability of winning on game must be around 0.74, but the best turnout (which is 0.596 by Mumbai Indians) is not even close.
#
# Infact from the previous plot, its clear that with a 0.5XYZ probability of winning a match, the probability of winning the season is in same neighbourhood.
#
# **This shows that events of 60 matches with 8 teams cannot be indicative of how good the team is**.
# That is, we should not have polarized pooling of cups;
# and yet we have celebrated teams like CKS and MI and then we have RCB which end up underperforming.
# + [markdown] id="mzulVgL75P7L"
# **So we ask, What is the confidence level that better team won 2021 IPL.**
# KKR v/s CSK
#
# *Assuming that these winning percentages are accurate reflections of the relative strengths of these two teams*
# + colab={"base_uri": "https://localhost:8080/"} id="U0DPyYybl8vf" outputId="75c8ea0d-94a5-45ff-fbf1-d46f9d727a0b"
#in 2021, each team played around 12 matches, that is roughlu 11 matches before appearing for semifinals
csk_win_rate=matches['CSK']['turnout']
kkr_win_rate=matches['KKR']['turnout']
won=0
simulation_count=1000 #we are going to simulate a season 1000 times
#if CSK plays 12-match series 1000 times, how many times will it win agains KKR?
for i in range(simulation_count):
if seasonresult(11, csk_win_rate/(csk_win_rate+ kkr_win_rate)):
won+=1
won_frac=won/simulation_count
print('If CSK plays 12-match series 1000 times, probability that it will win the season against KKR is: '+ str(won_frac))
# + [markdown] id="mBXjqFkP6V0P"
# Our confidence for the claim that better team won 2021 IPL is 0.562 which indeed is poor.
# **Safe to says, 60 matches are very little to ensure better teams wins;**
# On same note what should be minimum number of matches per IPL to ensure that better team wins.
#
# Or lets this address another intresting question,
# RCB perfroms poorly: but is it because they are unlucky or do they just play bad??
# **Or given that, RCB loses the season, how may matches should it play to attribute their loss to lack of skill and disregard any role of luck??**
#
# Mathematically->
# H0: RCB is just unlucky, Find number of matches it need to play to reject the null hypothesis.
# + id="xEePy_Xlydgb"
def frac_lost(win_prob, simulation_count, n_matches):
# win_prob- winning probability of RCB winning one match
# n_matches- number of matches it is supposed to play in one season
# simulation_count- number of n_matches series RCB plays
# function returns the probability of RCB losing the series given their win_prob and n_matches
won=0
for i in range(simulation_count):
if seasonresult(n_matches, win_prob):
won+=1
return (simulation_count - won)/simulation_count
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="eVKTPexlt60R" outputId="e3e9e892-543e-4a82-afc4-346c94dd431b"
rcb_win=matches['RCB']['turnout']
def find_count(team_prob):
simulation_count=1000
mx_count=1000
step=10
count=[]
loss_frac=[]
for i in range(1, mx_count, step):
count.append(i)
loss_frac.append(frac_lost( team_prob, simulation_count, i) )
match_number=0
for i in range(len(count)):
if loss_frac[i]>=0.95:
match_number=count[i]
break
plt.plot(count, loss_frac)
plt.axhline(y = 0.95, color = 'r', linestyle = '-')
plt.axvline(x = match_number , color = 'black', linestyle = '-')
plt.xlabel('number of matches')
plt.ylabel('probability of losing the series')
plt.show()
print("We need RCB to play "+str(match_number)+ " matches for 95% cnfidence in claim \'RCB is not unlucky and just tend to play bad \'")
find_count(rcb_win)
#print(rcb_win)
# + [markdown] id="Blbz5Bhz-k0L"
# Since it is not feasible to conduct this many number of matches in order to reject our Null Hypothesis; we can give RCB benefit of doubt and say that: RCB is just one unlucky team
| probability and stats/ipl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.5 64-bit (''base'': conda)'
# language: python
# name: python_defaultSpec_1597189773348
# ---
# +
import SimpleITK as sitk
import pdb
import click
import os
from os.path import join
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import csv
from scipy.ndimage import gaussian_filter
from scipy.ndimage.measurements import label
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.measurements import center_of_mass
# %matplotlib nbagg
input_path = '../data/hecktor_nii/'
output_path = '../data/bbox/'
output_shape = (144, 144, 144)
# +
def write_nii(wrt, img, path):
wrt.SetFileName(path)
wrt.Execute(img)
def check_singleGTVt(gtvt):
s = generate_binary_structure(2,2)
labeled_array, num_features = label(gtvt)
if num_features !=1:
print('num_features-------------------------------',num_features)
print('number of voxels:')
for i in np.unique(labeled_array)[1:]:
print (np.sum(labeled_array==i))
print('centers:')
for i in np.unique(labeled_array)[1:]:
print (center_of_mass(labeled_array==i))
return 0
def bbox_auto(vol_pt, gtvt, px_spacing_pt, px_spacing_ct, px_origin_pt, px_origin_ct, output_shape=(144, 144, 144), th = 3, auto_th = False, bbox=None):
# We find the oropharynx region from the PET based on brain segmentation
output_shape_pt = tuple(e1 // e2 for e1, e2 in zip(output_shape, px_spacing_pt))
# Gaussian smooth
vol_pt_gauss = gaussian_filter(vol_pt, sigma=3)
# auto_th: based on max SUV value in the top of the PET scan, for some cases that have unusual SUV values
if auto_th:
th = np.max(vol_pt[np.int(vol_pt.shape[0] * 2 // 3):, :, :]) / 2.6
print ('auto_th = ', th, '----------------------------------')
# OR fixed threshold (for all other cases)
vol_pt_thgauss = np.where(vol_pt_gauss > th, 1, 0)
# Find brain as biggest blob AND not in lowest third of the scan
labeled_array, _ = label(vol_pt_thgauss)
try:
vol_pt_brain = labeled_array == np.argmax(np.bincount(labeled_array[vol_pt.shape[0] * 2 // 3:].flat)[1:]) + 1
except:
print('th too high?')
# Quick fix just to pass for all cases
th = 0.1
vol_pt_thgauss = np.where(vol_pt_gauss > th, 1, 0)
labeled_array, _ = label(vol_pt_thgauss)
vol_pt_brain = labeled_array == np.argmax(np.bincount(labeled_array[vol_pt.shape[0] * 2 // 3:].flat)[1:]) + 1
# Find lowest voxel of the brain and box containing the brain
z = np.min(np.argwhere(np.sum(vol_pt_brain, axis=(1, 2))))
y1 = np.min(np.argwhere(np.sum(vol_pt_brain, axis=(0, 2))))
y2 = np.max(np.argwhere(np.sum(vol_pt_brain, axis=(0, 2))))
x1 = np.min(np.argwhere(np.sum(vol_pt_brain, axis=(0, 1))))
x2 = np.max(np.argwhere(np.sum(vol_pt_brain, axis=(0, 1))))
# Center bb based on this
zshift = 30//px_spacing_pt[2]
if z - (output_shape_pt[2] - zshift) < 0:
zbb = (0, output_shape_pt[2])
elif z + zshift > vol_pt.shape[0]:
zbb = (vol_pt.shape[0] - output_shape_pt[2], vol_pt.shape[0])
else:
zbb = (z - (output_shape_pt[2] - zshift), z + zshift)
yshift = 30//px_spacing_pt[1]
if np.int((y2 + y1) / 2 - yshift - np.int(output_shape_pt[1] / 2)) < 0:
ybb = (0, output_shape_pt[1])
elif np.int((y2 + y1) / 2 - yshift - np.int(output_shape_pt[1] / 2)) > vol_pt.shape[1]:
ybb = vol_pt.shape[1] - output_shape_pt[1], vol_pt.shape[1]
else:
ybb = (np.int((y2 + y1) / 2 - yshift - np.int(output_shape_pt[1] / 2)), np.int((y2 + y1) / 2 - yshift + np.int(output_shape_pt[1] / 2)))
if np.int((x2 + x1) / 2 - np.int(output_shape_pt[0] / 2)) < 0:
xbb = (0, output_shape_pt[0])
elif np.int((x2 + x1) / 2 - np.int(output_shape_pt[0] / 2)) > vol_pt.shape[2]:
xbb = vol_pt.shape[2] - output_shape_pt[0], vol_pt.shape[2]
else:
xbb = (np.int((x2 + x1) / 2 - np.int(output_shape_pt[0] / 2)), np.int((x2 + x1) / 2 + np.int(output_shape_pt[0] / 2)))
print(zbb, ybb, xbb)
z_pt = np.asarray(zbb,dtype=np.int)
y_pt = np.asarray(ybb,dtype=np.int)
x_pt = np.asarray(xbb,dtype=np.int)
# In the physical dimensions
z_abs = z_pt * px_spacing_pt[2] + px_origin_pt[2]
y_abs = y_pt * px_spacing_pt[1] + px_origin_pt[1]
x_abs = x_pt * px_spacing_pt[0] + px_origin_pt[0]
# In the CT resolution:
z_ct = np.asarray((z_abs-px_origin_ct[2])//px_spacing_ct[2],dtype=np.int)
y_ct = np.asarray((y_abs-px_origin_ct[1])//px_spacing_ct[1],dtype=np.int)
x_ct = np.asarray((x_abs-px_origin_ct[0])//px_spacing_ct[0],dtype=np.int)
print(z_ct,y_ct,x_ct)
# Check that the bbox contains the tumors
fail = False
if np.sum(gtvt[z_ct[0]:z_ct[1], y_ct[0]:y_ct[1], x_ct[0]:x_ct[1]]) != np.sum(gtvt):
print('GTVt outside bbox ------------------------------------')
fail = True
# Add the fails for which we had to change the threshold to keep track
if auto_th:
fail = True
if bbox is not None:
x_abs = bbox[0:2]
y_abs = bbox[2:4]
z_abs = bbox[4:6]
z_pt = np.asarray((z_abs - px_origin_pt[2])/ px_spacing_pt[2],dtype=np.int)
y_pt = np.asarray((y_abs - px_origin_pt[1])/ px_spacing_pt[1],dtype=np.int)
x_pt = np.asarray((x_abs - px_origin_pt[0])/ px_spacing_pt[0],dtype=np.int)
z_ct = np.asarray((z_abs-px_origin_ct[2])//px_spacing_ct[2],dtype=np.int)
y_ct = np.asarray((y_abs-px_origin_ct[1])//px_spacing_ct[1],dtype=np.int)
x_ct = np.asarray((x_abs-px_origin_ct[0])//px_spacing_ct[0],dtype=np.int)
#x_pt = np.asarray([50,76],dtype=np.int)
#y_pt = np.asarray([43,70],dtype=np.int)
#z_pt = np.asarray([212,256],dtype=np.int)
#print (x_pt,y_pt,z_pt)
#z_abs = z_pt * px_spacing_pt[2] + px_origin_pt[2]
#y_abs = y_pt * px_spacing_pt[1] + px_origin_pt[1]
#x_abs = x_pt * px_spacing_pt[0] + px_origin_pt[0]
#pdb.set_trace()
if np.sum(gtvt[z_ct[0]:z_ct[1], y_ct[0]:y_ct[1], x_ct[0]:x_ct[1]]) != np.sum(gtvt):
print('still GTVt outside bbox ------------------------------------')
else:
print('now GTVt inside bbox ------------------------------------')
# Plot box on vol_pt_brain for visualization
vol_pt_brain[z_pt[0]:z_pt[1], y_pt[0]:y_pt[0] + 1, x_pt[0]:x_pt[1]] = True
vol_pt_brain[z_pt[0]:z_pt[1], y_pt[1]:y_pt[1] + 1, x_pt[0]:x_pt[1]] = True
vol_pt_brain[z_pt[0]:z_pt[1], y_pt[0]:y_pt[1], x_pt[0]:x_pt[0] + 1] = True
vol_pt_brain[z_pt[0]:z_pt[1], y_pt[0]:y_pt[1], x_pt[1]:x_pt[1] + 1] = True
vol_pt_brain[z_pt[0]:z_pt[0] + 1, y_pt[0]:y_pt[1], x_pt[0]:x_pt[1]] = True
vol_pt_brain[z_pt[1]:z_pt[1] + 1, y_pt[0]:y_pt[1], x_pt[0]:x_pt[1]] = True
return vol_pt_brain, fail, z_abs, y_abs, x_abs
def clip(vol, clip_values=None):
# We clip the CT values
if clip_values:
vol[vol < clip_values[0]] = clip_values[0]
vol[vol > clip_values[1]] = clip_values[1]
return vol
# + tags=[]
try:
os.mkdir(output_path)
print("Directory ", output_path, " Created ")
except FileExistsError:
print("Directory ", output_path, " already exists")
writer = sitk.ImageFileWriter()
writer.SetImageIO("NiftiImageIO")
with open(join(output_path,'../bbipynb.csv'), 'w', newline='') as csvfile:
bbwrite = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
bbwrite.writerow(['PatientID', 'x1', 'x2', 'y1', 'y2', 'z1', 'z2'])
patients = []
for f in sorted(os.listdir(input_path)):
patients.append(f.split('_')[0])
nfail = 0
n=0
list_auto_th = ['CHUM010','CHUS021','CHGJ026','CHMR023','CHGJ053','CHMR028']
list_fix_bb = ['CHMR028','CHGJ053','CHGJ082']
dict_fix_bb = {
"CHMR028": np.asarray([-73.828125,68.359375,-112.109375,35.546875,-204.0536231994629,-60.17230224609375
]),
"CHGJ053": np.asarray([-86.1328125,54.4921875,-166.9921875,-26.3671875,-214.2802734375,-70.4007568359375]),
"CHGJ082": np.asarray([-68.5546875,72.0703125,-170.5078125,-29.8828125,-245.0201416015625,-101.140625])
}
for patient in patients:
#list_p = ['HN-CHUM-020','HN-CHUM-026','HN-CHUM-030','HN-CHUM-042','HN-CHUM-053','HN-CHUM-057','HN-CHUM-065','HN-CHUS-010','HN-CHUS-035','HN-CHUS-045','HN-CHUS-057','HN-CHUS-074','HN-CHUS-086','HN-CHUS-096','HN-HGJ-025','HN-HGJ-062','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053','HN-CHUM-053']
# # pdb.set_trace()
#if patient not in list_auto_th[:4]:
# continue
#if patient not in ['HN-HMR-028','HN-HGJ-053','HN-HGJ-082']:
# continue
print('************* patient:', patient)
in_path_ct = input_path + patient + '/' + patient + '_ct.nii.gz'
in_path_gtvt_roi = input_path + patient + '/' + patient + '_ct_gtvt.nii.gz'
if not os.path.exists(in_path_gtvt_roi):
print('no GTVt')
in_path_gtvn_roi = input_path + patient + '/' + patient + '_ct_gtvn.nii.gz'
in_path_pt = input_path + patient + '/' + patient + '_pt.nii.gz'
#out_path_bbox = output_path + patient + '/' + patient + '_ct_bbox'
try:
img_ct = sitk.ReadImage(in_path_ct)
img_pt = sitk.ReadImage(in_path_pt)
except:
print('cannot read ------------')
continue
px_spacing_ct = img_ct.GetSpacing()
px_spacing_pt = img_pt.GetSpacing()
px_origin_ct = img_ct.GetOrigin()
px_origin_pt = img_pt.GetOrigin()
img_ct = sitk.GetArrayFromImage(img_ct)
gtvt = sitk.GetArrayFromImage(sitk.ReadImage(in_path_gtvt_roi))
check_singleGTVt(gtvt)
#gtvn = sitk.GetArrayFromImage(sitk.ReadImage(in_path_gtvn_roi))
img_pt = sitk.GetArrayFromImage(sitk.ReadImage(in_path_pt))
# Fix threshold for some of the patients:
auto_th = False
if patient in list_auto_th[:4]:
auto_th = True
# Fix directly the bbox for some that don't work
bbox = None
if patient in list_fix_bb:
bbox = dict_fix_bb[patient]
img_brain, fail, z_bb, y_bb, x_bb = bbox_auto(img_pt, gtvt, px_spacing_pt, px_spacing_ct, px_origin_pt, px_origin_ct, output_shape, auto_th=auto_th, bbox=bbox)
nfail = nfail + fail
n = n + 1
perm = (0, 1, 2) # No permutation needed now
img_brain = sitk.GetImageFromArray(np.transpose(img_brain.astype(np.uint8), perm), isVector=False)
# img_pt = sitk.GetImageFromArray(np.transpose(img_pt, perm), isVector=False)
out_path_brain = output_path + patient + '_brain.nii'
write_nii(writer, img_brain, out_path_brain)
# Write bb position in csv. To change to panda frame
with open(join(output_path,'../bbipynb.csv'), 'a', newline='') as csvfile:
bbwrite = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
bbwrite.writerow([patient, str(x_bb[0]), str(x_bb[1]), str(y_bb[0]), str(y_bb[1]), str(z_bb[0]), str(z_bb[1])])
print ('fails/total',nfail,n)
# -
| notebooks/crop_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Basis Sets
# + run_control={"frozen": false, "read_only": false}
import psi4
import numpy as np
# + run_control={"frozen": false, "read_only": false}
# ==> Set Basic Psi4 Options <==
# Memory specification
psi4.set_memory(int(5e8))
numpy_memory = 2
# Set output file
psi4.core.set_output_file('output.dat', False)
# + [markdown] run_control={"frozen": false, "read_only": false}
# Thus far you've used a uniform single basis set from [Psi4's basis set library](https://github.com/psi4/psi4/tree/master/psi4/share/psi4/basis) where you've specified the orbital basis (*e.g.*, `psi4.set_options({'basis': '6-31G*'})` and allowed Psi4 to select any necessary auxiliary basis sets to fulfill your requested algorithm. In this tutorial, we'll learn how to construct custom orbital and auxiliary basis set and get information from the psi4.core.BasisSet object
# + [markdown] run_control={"frozen": false, "read_only": false}
# One distinction that's important to make early on is that a "BasisSet object" is always tailored to a Molecule --- there are no shells assigned to carbon in a cc-pVDZ BasisSet associated with water. In contrast, a "basis set definition" is a set of rules for applying shells to many Molecules (not _any_ Molecule-s because the definition mightn't include uranium, for instance) on the basis of elemental composition and atom labeling. There's nothing stopping you from assigning carbon-parametrized shells to carbon _and_ oxygen in a basis set definition. When the basis set definition is applied to water, the resulting BasisSet object will have carbon-parametrized shells assigned to oxygen but no shells assigned to carbon. Keep this distinction in mind since a basis set like `cc-pVDZ` is commonly used in both roles interchangeably.
# + run_control={"frozen": false, "read_only": false}
from pkg_resources import parse_version
if parse_version(psi4.__version__) >= parse_version('1.3a1'):
refnuc = 204.01995818060678
refscf = -228.95763005900784
else:
refnuc = 204.01995737868003
refscf = -228.95763005849557
bzb = psi4.geometry("""
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C1 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C1 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H1 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H1 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
RCC = 1.3915
RCH = 2.4715
RXX = 1.00
""")
psi4.core.IO.set_default_namespace("bzb")
def basisspec_psi4_yo__anonymous775(mol, role):
basstrings = {}
mol.set_basis_all_atoms("DZ", role=role)
mol.set_basis_by_symbol("C", "my3-21G", role=role)
mol.set_basis_by_label("H1", "sto-3g", role=role)
mol.set_basis_by_label("C1", "sto-3g", role=role)
basstrings['my3-21g'] = """
cartesian
****
H 0
S 2 1.00
5.4471780 0.1562850
0.8245470 0.9046910
S 1 1.00
0.1831920 1.0000000
****
C 0
S 3 1.00
172.2560000 0.0617669
25.9109000 0.3587940
5.5333500 0.7007130
SP 2 1.00
3.6649800 -0.3958970 0.2364600
0.7705450 1.2158400 0.8606190
SP 1 1.00
0.1958570 1.0000000 1.0000000
****
"""
basstrings['dz'] = """
spherical
****
H 0
S 3 1.00
19.2406000 0.0328280
2.8992000 0.2312080
0.6534000 0.8172380
S 1 1.00
0.1776000 1.0000000
****
"""
return basstrings
psi4.qcdb.libmintsbasisset.basishorde['ANONYMOUS775'] = basisspec_psi4_yo__anonymous775
psi4.set_options({'basis': 'anonymous775',
'scf_type': 'pk',
'e_convergence': 11,
'd_convergence': 11})
eb, wb = psi4.energy('scf', return_wfn=True)
psi4.compare_strings("c2v", bzb.schoenflies_symbol(), "Point group")
psi4.compare_values(refnuc, bzb.nuclear_repulsion_energy(), 10, "Nuclear repulsion energy")
psi4.compare_values(refscf, eb, 10, "SCF Energy")
# + run_control={"frozen": false, "read_only": false}
psi4.core.set_output_file('output.dat', True)
bsb = wb.get_basisset('ORBITAL')
bsb.print_detail_out()
bsb.print_out()
# + run_control={"frozen": false, "read_only": false}
# cc-pvdz aug-cc-pvdz
# BASIS H 5/ 5 C 14/15 H +4/ 4 C +9/10
# RIFIT H 14/15 C 56/66 H +9/10 C +16/20
# JKFIT H 23/25 C 70/81 H +9/10 C +16/20
mymol = psi4.qcdb.Molecule("""
C 0.0 0.0 0.0
O 1.4 0.0 0.0
H_r -0.5 -0.7 0.0
H_l -0.5 0.7 0.0
""")
print('[1] <<< uniform cc-pVDZ >>>')
wert = psi4.qcdb.BasisSet.pyconstruct(mymol, 'BASIS', 'cc-pvdz')
psi4.compare_integers(38, wert.nbf(), 'nbf()')
psi4.compare_integers(40, wert.nao(), 'nao()')
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm')
mymol.print_out()
print('[2] <<< RIFIT (default) >>>')
wert = psi4.qcdb.BasisSet.pyconstruct(mymol, 'DF_BASIS_MP2', '', 'RIFIT', 'cc-pvdz')
psi4.compare_integers(140, wert.nbf(), 'nbf()')
psi4.compare_integers(162, wert.nao(), 'nao()')
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm')
mymol.print_out()
print('[3] <<< cc-pVDZ w/ aug-cc-pVDZ on C >>>')
def basisspec_psi4_yo__anonymous775(mol, role):
basstrings = {}
mol.set_basis_all_atoms("DZ", role=role)
mol.set_basis_by_symbol("C", "my3-21G", role=role)
def basis__dz_PLUS(mol, role):
mol.set_basis_all_atoms("cc-pvdz", role=role)
mol.set_basis_by_symbol("C", "aug-cc-pvdz")
return {}
wert = psi4.qcdb.BasisSet.pyconstruct(mymol, 'BASIS', basis__dz_PLUS)
psi4.compare_integers(47, wert.nbf(), 'nbf()')
psi4.compare_integers(50, wert.nao(), 'nao()')
psi4.compare_strings('c2v', mymol.schoenflies_symbol(), 'symm')
mymol.print_out()
| Tutorials/01_Psi4NumPy-Basics/1g_basis-sets.ipynb |
#!/usr/bin/env python
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ATLAS Test Beam Data
#
# Python script for analysis of ATLAS test beam data.
#
# The program reads an ASCII (i.e. text) data file, containting a large number of events, where a charged particle (electron or pion) passed through a slice of the ATLAS detector. Each passage is recorded by different detectors (essentially three independent ones!), boiling down to eleven numbers (some more relevant than others). The exercise is to separate electron and pion events based on these numbers, and in turn use this information to measure the interaction of pions and electrons seperately.
#
# NOTE: Though the data is from particle physics, it could in principle have been from ANY other source, and the eleven numbers could for example have been indicators of cancer, key numbers for investors, or index numbers for identifying potential costumors.
#
# For more information on ATLAS test beam: http://www.nbi.dk/~petersen/Teaching/Stat2021/TestBeam/TestBeamDataAnalysis.html.
#
# ***
#
# ### Authors:
# - <NAME> (Niels Bohr Institute)
#
# ### Date:
# - 26-12-2021 (latest update)
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Set parameters of the program:
# Write extensive output
verbose = True
N_verbose = 20
# Open file, `DataSet_AtlasPid_ElectronPion_2GeV.txt`, and read in all the data:
# +
data = np.loadtxt('DataSet_AtlasPid_ElectronPion_2GeV.txt', skiprows=1, unpack=True)
Cher, nLT, nHT, EM0, EM1, EM2, EM3, Had0, Had1, Had2, Muon = data
Cher.shape
if (verbose) :
for i in range (N_verbose) :
print(f"Cher: {Cher[i]:6.1f} nLT, nHT: {int(nLT[i]):2d}, {int(nHT[i]):2d} EM: {EM0[i]:5.2f} {EM1[i]:5.2f} {EM2[i]:5.2f} {EM3[i]:5.2f} Had: {Had0[i]:5.2f} {Had1[i]:5.2f} {Had2[i]:5.2f} Muon: {Muon[i]:5.1f}")
# -
# ## Your analysis:
#
# This is where your analysis should go:
# +
# This is where your analysis should go:
# -
# And plot the results, cuts or whatever type of analysis choices you have made:
# +
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(16, 10))
ax[0,0].hist(Cher, bins=200, range=(400, 1400), histtype='step', label='Cherenkov')
ax[0,0].set(xlabel="Cherenkov", ylabel="Frequency")
ax[1,1].hist(EM1, bins=200, range=(-0.5, 2.0), histtype='step', label='EM1')
ax[1,1].set(xlabel="EM1", ylabel="Frequency")
h = ax[0,1].hist2d(EM1, Cher, bins=(100, 100), range=((-0.5, 2.0), (400, 1400)), norm=mpl.colors.LogNorm(), cmap="Reds")
plt.colorbar(h[3], ax=ax[0, 1]) # z-scale on the right of the figure
ax[0,1].set(xlabel="EM1", ylabel="Cherenkov")
# You can add your own fourth plot:
fig.delaxes(ax[1,0])
fig.tight_layout()
# -
# NOTE: The simple plotting function gives a warning for EM1 (not Cher). Ask yourself, if you did a check of the original data file input values in any way? That is always "healthy"!
#
# ***
# Below we show a simple, quick Seaborn function `jointplot` and how it can be useful (but possibly also too simple) when visualizing 2D-distributions:
# Show the joint distribution using kernel density estimation
g = sns.jointplot(Cher, EM1, xlim=(400, 1400), ylim=(-0.5, 2.0), kind="kde", height=10, space=0) # also try kind="hex" or "kde"
g.set_axis_labels(xlabel='Cher', ylabel='EM1')
# ## Questions to be answered:
#
# Generally, this analysis is about separating electrons and pions (and determining how well this can be done), followed by a few questions characterizing the detector response to each type of particle. Thus, you should imagine, that your new detector/equipment/questionaire gave you this output, and now it is up to you to find out, what this tells you about your experiment, and how to extract information from it in the best possible way. Typically, it will have taken you months (if not years) to get to this point.
#
# Note that this data is NOT meant for “fine tuned analysis”, but rather “crude inspection”. You should try to get simple approximate answers out - I’m sure that you will afterwards be able to fine tune them.
#
# Below are questions guiding you, some/most of which your analysis should cover, but you do **not** have to follow them blindly (I've put "Optional" on those that are not essential). Start by considering the data, and get a feel for the typical range of each variable. Plot the variables, both in 1D and also 2D! From considering these plots, guess/estimate an approximate knowledge of how electrons and pions distribute themselves in the variables above, and how to make a selection of these.
#
# As described on the webpage introducing the data, there are three (relevant) detectors:
# - Cherenkov,
# - TRT (Transition Radiation Tracker) and
# - Calorimeters
#
# They are each capable of separating electrons and pions. As they are (largely) _INDEPENDENT_ (three separate detectors), they may be used to cross check each other, and this is what you should use, in fact the essential part of this (and many other) analysis!
#
#
# Questions:
# ----------
# 1. Find for each of these three detector systems one variable, which seem to separate electrons and pions best. For example, start with the Cherenkov, which is only a single number, and assume/guess that the large peak at low values is mainly from pions, while the upper broad peak is from electrons (this you would know, as you designed the experiment). Now plot the TRT and Calorimeter distributions when the Cherenkov selects a pion and afterwards an electron. This should give you a good idea about how to separate pions and electrons using the TRT and Calorimeters.
#
# Hint: Sometimes variables from a single detector are more powerful, when they are combined, e.g. taken ratios of (or used in a Fisher or ML algorithm). For the TRT this may be somewhat doable, but for the EMcalo, it is not as simple. Here, one variable caries most of the separation power, but involving other layers may enhance the separation power. However, to begin with, just consider a single number from each detector.
#
#
# 2. Next you should try to see, if you can make a selection, which gives you a fairly large and clean electron and pion sample, respectively. The question is, how can you know how clean your sample is and how efficient your selection is? This can actually be measured in the data itself, using the fact that there are three independent detectors. For example, start by making an electron and a pion selection using two of the three variables, and plot the third variable for each of these selections. Now you can directly see, how electrons and pions will distribute themselves in this third variable. Are you worried, that there are pions in your electron sample, and vice versa? Well, there will probably be, but so few, that it won't matter too much, at least not to begin with. Why? Well, let us assume that for each detector, 80% of electrons pass your requirement, but also 10% of pions do. Assuming an even number of electrons and pions (which is not really the case), then with two detector cuts, you should get a sample, which is: $\frac{0.8\cdot0.8} {0.8\cdot0.8 + 0.1\cdot0.1} = 98.5\%$ pure.
#
# Now with this sample based on cuts on the two other detectors, ask what fraction of electrons and pions passes your electron selection. The fraction of electrons, that are not selected as electrons will be your TYPE I errors, denoted alpha, while the fraction of pions, that do pass the cut will be your TYPE II errors, denoted beta. Measure these for each of the two cuts in the three detector types, and ask yourself if they are "reasonable", i.e. something like in the example above. If not, then you should perhaps reconsider adjusting your cuts.
#
# By now, you should for each detector have 6 numbers:
# - The electron cut value above which you accept an electron.
# - The efficiency (i.e. 1-alpha) for electrons of this cut.
# - The fake rate (i.e. beta) for pions of this cut.
# - The pion cut value below which you accept a pion (may be same value as above for electrons!).
# - The efficiency (i.e. 1-alpha) for pions of this cut.
# - The fake rate (i.e. beta) for electrons of this cut.
#
#
# 3. Given the efficiencies and fake rates of each cut, try to combine these (again assuming that they are independent) into knowledge of your sample purities and also the total number of electrons and pions in the whole sample. Do the sum of estimated electrons and pions added actually match the number of particles in total? This is a good cross check!
#
#
# 4. If the number of pions was suddenly 1000 times that of elections, would you still be able to get a sample of fairly pure (say 90% pure) electrons? And if so, what would the efficiency for these electrons be? That is equivalent of asking, if you can get a 99.9% pure electron sample from the data given.
#
#
# 5. Expanding on problem 2), try now to calculate ROC curves for each of the three detectors. These are obtained by making a clean selection using the two other detectors for electrons and pions seperately, and then integrating over these two distributions, using the running (normalised) integral of each as x and y coordinate in a (ROC) curve. If you do not manage on your own, perhaps consider the ROC calculator example, which is posted along with this exercise.
#
#
# 6. __(Partially optional)__: One of the purposes of the testbeam was to measure the response of the TRT detector to exactly electrons and pions. Consider for example only events that has 33 TRT hits (i.e. `nLT` $= 33$). As the High-Threshold probability (i.e. probability of passing the High-Threshold, given that the Low-Threshold was passed), is assumed to be constant in the TRT detector (but quite different for electrons and pions), what distribution should the number of High-Threshold hits (`nHT`) follow? And is that really the case, both for electrons and pions?
#
#
# 7. __(Optional)__: Still considering `nLT` $=33$, and given that there are both electrons and pions in the sample (each with a different HT probability), `nHT` should in the unselected data be a combination of two distributions. Try to fit the number of HT hits with two distributions combined. Do they fit the data? And can this fit be used to estimate the fraction of pions and electrons in the sample? And does that estimate match you previous estimate? Perhaps retry with other values for the number of TRT hits.
#
#
# 8. __(Optional)__: Try to select pions using three different (mutually exclusive) techniques:
# 1. Passing only a hadronic calorimeter requirement (e.g. that the sum of the three HCal values is above some minimum energy).
# 2. Passing only Cherenkov AND EMcalo requirements.
# 3. Passing both A) and B).<br>
# Try to measure the HT probability (i.e. fraction of High-Threshold hits) for each of these three pion samples. Do they agree with each other?
| AppStat2022/Week6/original/ATLAStestbeam/ATLAStestbeam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
#import xlrd
import matplotlib.pyplot as plt
# %matplotlib inline
data=pd.read_csv('PHASEdata.csv')
data.head()
data_4d_1=data.iloc[0,1:].reshape(48,48)
data_4d_2=data.iloc[1,1:].reshape(48,48)
data_4d_3=data.iloc[2,1:].reshape(48,48)
data_4d_4=data.iloc[3,1:].reshape(48,48)
data_4d_5=data.iloc[4,1:].reshape(48,48)
data_4d_1
# +
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
a=np.linspace(1,48,48)
b=np.linspace(1,48,48)
x,y=np.meshgrid(a,b)
z1=data_4d_1
#z2=data_4d_2
#z3=data_4d_3
#z4=data_4d_4
#z5=data_4d_5
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z1)
#ax.scatter(x, y, z2)
#ax.scatter(x, y, z3)
#ax.scatter(x, y, z4)
#ax.scatter(x, y, z5)
#ax.set_zlim(0.5,1)
#ax.set_xlabel('theta')
plt.show()
# -
a=np.linspace(0,9,10)
b=np.linspace(0,9,10)
c=np.linspace(0,9,10)
x,y = np.meshgrid(a,b)
a = x.flatten('F')
print(a)
len(a)
| doc/.ipynb_checkpoints/AFM_loaddata-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from qiskit import *
from qiskit.visualization import plot_histogram
# +
# simulator # 3 iteration
counts_m0 = {'0000': 866, '0001': 13, '0010': 21, '0011': 13, '0100': 9, '0101': 7, '0110': 9, '0111': 9, '1000': 8, '1001': 12, '1010': 14, '1011': 6, '1100': 8, '1101': 6, '1110': 10, '1111': 13}
counts_m1 = {'0000': 5, '0001': 875, '0010': 19, '0011': 10, '0100': 14, '0101': 9, '0110': 11, '0111': 8, '1000': 16, '1001': 9, '1010': 5, '1011': 6, '1100': 9, '1101': 10, '1110': 10, '1111': 8}
counts_m2 = {'0000': 7, '0001': 14, '0010': 869, '0011': 16, '0100': 11, '0101': 9, '0110': 7, '0111': 11, '1000': 18, '1001': 8, '1010': 4, '1011': 12, '1100': 8, '1101': 8, '1110': 12, '1111': 10}
counts_m3 = {'0000': 15, '0001': 9, '0010': 5, '0011': 883, '0100': 16, '0101': 8, '0110': 6, '0111': 10, '1000': 12, '1001': 12, '1010': 6, '1011': 8, '1100': 10, '1101': 11, '1110': 8, '1111': 5}
counts_m4 = {'0000': 11, '0001': 15, '0010': 6, '0011': 6, '0100': 873, '0101': 7, '0110': 10, '0111': 9, '1000': 11, '1001': 12, '1010': 13, '1011': 7, '1100': 9, '1101': 13, '1110': 10, '1111': 12}
counts_m5 = {'0000': 8, '0001': 13, '0010': 10, '0011': 12, '0100': 9, '0101': 871, '0110': 12, '0111': 13, '1000': 10, '1001': 5, '1010': 9, '1011': 8, '1100': 11, '1101': 11, '1110': 14, '1111': 8}
counts_m6 = {'0000': 10, '0001': 12, '0010': 10, '0011': 9, '0100': 16, '0101': 10, '0110': 858, '0111': 8, '1000': 10, '1001': 13, '1010': 15, '1011': 6, '1100': 10, '1101': 19, '1110': 11, '1111': 7}
counts_m7 = {'0000': 9, '0001': 10, '0010': 11, '0011': 9, '0100': 10, '0101': 16, '0110': 17, '0111': 861, '1000': 9, '1001': 12, '1010': 9, '1011': 9, '1100': 7, '1101': 14, '1110': 10, '1111': 11}
counts_m8 = {'0000': 11, '0001': 12, '0010': 15, '0011': 15, '0100': 7, '0101': 13, '0110': 10, '0111': 5, '1000': 890, '1001': 4, '1010': 5, '1011': 7, '1100': 9, '1101': 1, '1110': 11, '1111': 9}
counts_m9 = {'0000': 12, '0001': 8, '0010': 7, '0011': 10, '0100': 10, '0101': 10, '0110': 14, '0111': 12, '1000': 10, '1001': 871, '1010': 17, '1011': 9, '1100': 11, '1101': 5, '1110': 8, '1111': 10}
counts_m10 = {'0000': 14, '0001': 9, '0010': 8, '0011': 10, '0100': 5, '0101': 11, '0110': 12, '0111': 8, '1000': 8, '1001': 9, '1010': 888, '1011': 9, '1100': 11, '1101': 4, '1110': 10, '1111': 8}
counts_m11 = {'0000': 3, '0001': 18, '0010': 8, '0011': 7, '0100': 10, '0101': 7, '0110': 4, '0111': 11, '1000': 10, '1001': 9, '1010': 10, '1011': 897, '1100': 7, '1101': 6, '1110': 9, '1111': 8}
counts_m12 = {'0000': 10, '0001': 10, '0010': 5, '0011': 6, '0100': 9, '0101': 8, '0110': 9, '0111': 12, '1000': 8, '1001': 14, '1010': 8, '1011': 12, '1100': 881, '1101': 11, '1110': 13, '1111': 8}
counts_m13 = {'0000': 10, '0001': 6, '0010': 8, '0011': 11, '0100': 10, '0101': 7, '0110': 10, '0111': 11, '1000': 12, '1001': 5, '1010': 7, '1011': 14, '1100': 7, '1101': 883, '1110': 10, '1111': 13}
counts_m14 = {'0000': 13, '0001': 7, '0010': 13, '0011': 10, '0100': 13, '0101': 12, '0110': 14, '0111': 10, '1000': 6, '1001': 7, '1010': 9, '1011': 10, '1100': 11, '1101': 10, '1110': 866, '1111': 13}
counts_m15 = {'0000': 11, '0001': 16, '0010': 12, '0011': 9, '0100': 12, '0101': 16, '0110': 14, '0111': 7, '1000': 8, '1001': 8, '1010': 7, '1011': 8, '1100': 16, '1101': 12, '1110': 8, '1111': 860}
counts_sim_states = [counts_m0, counts_m1, counts_m2, counts_m3, counts_m4, counts_m5, counts_m6, counts_m7, counts_m8, counts_m9, counts_m10, counts_m11, counts_m12, counts_m13, counts_m14, counts_m15]
# +
# ibmq_santiago # 3 iteration
counts_m0 = {'0000': 43, '0001': 67, '0010': 34, '0011': 44, '0100': 72, '0101': 69, '0110': 62, '0111': 55, '1000': 65, '1001': 66, '1010': 51, '1011': 62, '1100': 89, '1101': 93, '1110': 69, '1111': 83}
counts_m1 = {'0000': 65, '0001': 58, '0010': 49, '0011': 58, '0100': 66, '0101': 65, '0110': 44, '0111': 67, '1000': 52, '1001': 65, '1010': 69, '1011': 59, '1100': 77, '1101': 73, '1110': 80, '1111': 77}
counts_m2 = {'0000': 53, '0001': 50, '0010': 51, '0011': 54, '0100': 56, '0101': 44, '0110': 56, '0111': 63, '1000': 82, '1001': 78, '1010': 65, '1011': 80, '1100': 64, '1101': 71, '1110': 76, '1111': 81}
counts_m3 = {'0000': 50, '0001': 60, '0010': 45, '0011': 49, '0100': 67, '0101': 75, '0110': 66, '0111': 70, '1000': 54, '1001': 57, '1010': 57, '1011': 62, '1100': 78, '1101': 68, '1110': 93, '1111': 73}
counts_m4 = {'0000': 73, '0001': 64, '0010': 53, '0011': 69, '0100': 52, '0101': 71, '0110': 78, '0111': 76, '1000': 56, '1001': 74, '1010': 57, '1011': 84, '1100': 57, '1101': 63, '1110': 48, '1111': 49}
counts_m5 = {'0000': 67, '0001': 66, '0010': 69, '0011': 53, '0100': 49, '0101': 57, '0110': 58, '0111': 56, '1000': 69, '1001': 70, '1010': 65, '1011': 69, '1100': 74, '1101': 64, '1110': 74, '1111': 64}
counts_m6 = {'0000': 55, '0001': 66, '0010': 60, '0011': 66, '0100': 50, '0101': 65, '0110': 40, '0111': 54, '1000': 82, '1001': 87, '1010': 84, '1011': 78, '1100': 64, '1101': 57, '1110': 47, '1111': 69}
counts_m7 = {'0000': 45, '0001': 53, '0010': 67, '0011': 58, '0100': 72, '0101': 55, '0110': 73, '0111': 77, '1000': 50, '1001': 55, '1010': 66, '1011': 59, '1100': 61, '1101': 66, '1110': 86, '1111': 81}
counts_m8 = {'0000': 66, '0001': 54, '0010': 52, '0011': 56, '0100': 79, '0101': 73, '0110': 74, '0111': 79, '1000': 51, '1001': 50, '1010': 52, '1011': 54, '1100': 80, '1101': 71, '1110': 74, '1111': 59}
counts_m9 = {'0000': 57, '0001': 46, '0010': 71, '0011': 72, '0100': 59, '0101': 62, '0110': 83, '0111': 76, '1000': 49, '1001': 59, '1010': 81, '1011': 64, '1100': 61, '1101': 54, '1110': 71, '1111': 59}
counts_m10 = {'0000': 50, '0001': 43, '0010': 61, '0011': 62, '0100': 61, '0101': 64, '0110': 78, '0111': 58, '1000': 68, '1001': 58, '1010': 70, '1011': 78, '1100': 74, '1101': 49, '1110': 77, '1111': 73}
counts_m11 = {'0000': 67, '0001': 68, '0010': 65, '0011': 50, '0100': 72, '0101': 78, '0110': 57, '0111': 63, '1000': 54, '1001': 67, '1010': 61, '1011': 78, '1100': 63, '1101': 67, '1110': 60, '1111': 54}
counts_m12 = {'0000': 71, '0001': 90, '0010': 49, '0011': 63, '0100': 66, '0101': 63, '0110': 63, '0111': 57, '1000': 71, '1001': 68, '1010': 80, '1011': 66, '1100': 54, '1101': 55, '1110': 47, '1111': 61}
counts_m13 = {'0000': 70, '0001': 57, '0010': 61, '0011': 70, '0100': 62, '0101': 61, '0110': 51, '0111': 52, '1000': 76, '1001': 65, '1010': 70, '1011': 68, '1100': 69, '1101': 72, '1110': 52, '1111': 68}
counts_m14 = {'0000': 84, '0001': 78, '0010': 79, '0011': 75, '0100': 52, '0101': 51, '0110': 44, '0111': 48, '1000': 79, '1001': 55, '1010': 85, '1011': 66, '1100': 61, '1101': 64, '1110': 55, '1111': 48}
counts_m15 = {'0000': 81, '0001': 58, '0010': 85, '0011': 72, '0100': 65, '0101': 53, '0110': 66, '0111': 58, '1000': 67, '1001': 55, '1010': 64, '1011': 61, '1100': 74, '1101': 52, '1110': 64, '1111': 49}
counts_ibmq_s_states = [counts_m0, counts_m1, counts_m2, counts_m3, counts_m4, counts_m5, counts_m6, counts_m7, counts_m8, counts_m9, counts_m10, counts_m11, counts_m12, counts_m13, counts_m14, counts_m15]
# +
df_sim = pd.DataFrame(counts_sim_states)
df_sim.index = ['0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', '1010', '1011', '1100', '1101', '1110', '1111']
df_sim
# y-axis = Marked State
# x-axis = Measure State
# +
# Use rocket or mako for heatmap in sns. Put _r after the name for reverse colors.
# E.g rocket_r or mako_r
fig, ax = plt.subplots(figsize=(15, 10))
heatmap = sns.heatmap(df_sim, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
plt.xlabel('Measured State')
plt.ylabel('Marked State')
# plt.savefig('simulator_heatmap_no_iter.png')
plt.show()
# -
Average_Times_Measured_sim = []
Average_Times_Measured_sim = list(df_sim.mean(axis = 0)) # axis = 0 is average along the coulmn values.
print(Average_Times_Measured_sim)
df_sim_subtract = df_sim.subtract(Average_Times_Measured_sim, axis = 1)
df_sim_subtract
# +
fig, ax = plt.subplots(figsize=(15, 10))
heatmap = sns.heatmap(df_sim_subtract, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
plt.xlabel('Measured State')
plt.ylabel('Marked State')
# plt.savefig('simulator_heatmap_no_iter.png')
plt.show()
# +
# df_ibmqx2 = pd.DataFrame(counts_ibmqx2_states)
# df_ibmqx2.index = ['0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', '1010', '1011', '1100', '1101', '1110', '1111']
# df_ibmqx2
# # y-axis = Marked State
# # x-axis = Measure State
# +
# fig, ax = plt.subplots(figsize=(15, 10))
# heatmap = sns.heatmap(df_ibmqx2, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
# plt.xlabel('Measured State')
# plt.ylabel('Marked State')
# # plt.savefig('ibmqx2_heatmap_no_iter.png')
# plt.show()
# +
# Average_Times_Measured_ibmqx2 = []
# Average_Times_Measured_ibmqx2 = list(df_ibmqx2.mean(axis = 0))
# # Average_Times_Measured = df_ibmqx2.mean(axis = 0)
# print(Average_Times_Measured_ibmqx2)
# +
# df_ibmqx2_subtract = df_ibmqx2.subtract(Average_Times_Measured_ibmqx2, axis = 1)
# df_ibmqx2_subtract
# +
# fig, ax = plt.subplots(figsize=(15, 10))
# heatmap = sns.heatmap(df_ibmqx2_subtract, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
# plt.xlabel('Measured State')
# plt.ylabel('Marked State')
# # plt.savefig('ibmqx2_heatmap_no_iter.png')
# plt.show()
# +
# fig, ax = plt.subplots(figsize=(15, 10))
# heatmap = sns.heatmap(df_ibmqx2_subtract, vmin = -60, vmax = 60, cmap = 'Blues_r', cbar_kws = {'label': 'Times Measured'})
# # for data in heatmap.text:
# # n = data
# plt.xlabel('Measured State')
# plt.ylabel('Marked State')
# # plt.savefig('ibmqx2_heatmap_no_iter.png')
# plt.show()
# +
df_ibmq_s = pd.DataFrame(counts_ibmq_s_states)
df_ibmq_s.index = ['0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', '1010', '1011', '1100', '1101', '1110', '1111']
df_ibmq_s
# y-axis = Marked State
# x-axis = Measure State
# +
fig, ax = plt.subplots(figsize=(15, 10))
heatmap = sns.heatmap(df_ibmq_s, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
plt.xlabel('Measured State')
plt.ylabel('Marked State')
# plt.savefig('ibmq_s_heatmap_no_iter.png')
plt.show()
# -
Average_Times_Measured_ibmq_s = []
Average_Times_Measured_ibmq_s = list(df_ibmq_s.mean(axis = 0))
# Average_Times_Measured = df_ibmq_s.mean(axis = 0)
print(Average_Times_Measured_ibmq_s)
df_ibmq_s_subtract = df_ibmq_s.subtract(Average_Times_Measured_ibmq_s, axis = 1)
df_ibmq_s_subtract
# +
fig, ax = plt.subplots(figsize=(15, 10))
heatmap = sns.heatmap(df_ibmq_s_subtract, linewidths = 1, cmap = 'mako_r', cbar_kws = {'label': 'Times Measured'})
plt.xlabel('Measured State')
plt.ylabel('Marked State')
# plt.savefig('ibmq_s_heatmap_no_iter.png')
plt.show()
# +
fig, ax = plt.subplots(figsize=(15, 10))
heatmap = sns.heatmap(df_ibmq_s_subtract, vmin = -60, vmax = 60, cmap = 'Blues_r', cbar_kws = {'label': 'Times Measured'})
# for data in heatmap.text:
# n = data
plt.xlabel('Measured State')
plt.ylabel('Marked State')
# plt.savefig('ibmq_s_heatmap_no_iter.png')
plt.show()
# -
| analysis/result_analysis-3-iter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %run setup.py
# +
from exoplanet.utils import eval_in_model
from exoplanet.orbits import get_true_anomaly
import pymc3 as pm
import theano.tensor as tt
from maelstrom.synthetic import SyntheticBinary
from maelstrom.utils import mass_function
# +
# Check time delay signal is injected properly
period = 10.
eccen = 0.
asini = 500
varpi = 0.
tref = 0.
n_freq = 1
freqs = np.array([30])
amps = np.array([1])
# TESS 2 min cadence for 1 sector
time = np.arange(0, 27, 1.0 / (24 * 30))
lc = SyntheticBinary(time, freqs, amps,
period, eccen, asini, varpi, tref, tau=True)
lc.add_noise(snr=80)
# plt.plot(*amplitude_spectrum(lc.time, lc.flux))
# +
from tqdm import tqdm
from scipy.optimize import curve_fit
# This is a simple time delay model
def model(time, *theta):
freq, amp, asini = np.reshape(theta, (3, len(freqs)))
T_delay = 1 * (asini / 86400)
res = np.zeros(len(time))
for i in range(len(freq)):
res += amp[i] * np.sin(2 * np.pi * freq[i] * (time - T_delay[0]))
return res
# -
# Let's test that it works by choosing a random SNR..
# +
snr = 1000
asinis = []
lc = SyntheticBinary(time, freqs, amps, period, eccen, asini, varpi, tref, tau=False)
s_rms = lc.amplitude.max() / (np.sqrt(np.pi / len(lc.time)) * snr)
for i in tqdm(range(5000)):
noisy_flux = lc.flux_true + np.random.normal(loc=0.0, scale=s_rms, size=len(lc.time))
x0 = np.array([freqs[0], amps[0], 0]).flatten()
popt, pcov = curve_fit(model, lc.time, noisy_flux, p0=x0)
asinis.append(popt[-1])
# -
plt.hist(asinis);
# Now let's do it for a range of SNRs. The vals below are the d Sct SNR percentiles obtained in previous notebook
vals = [14.6757, 1.4668, 0.72857]
# +
from tqdm import tqdm
np.random.seed(42)
snrs = np.geomspace(10,1000,50)
asinis_sd = []
for snr in tqdm(snrs):
asinis = []
lc = SyntheticBinary(time, freqs, amps, period, eccen, asini, varpi, tref, tau=False)
s_rms = lc.amplitude.max() / (np.sqrt(np.pi / len(lc.time)) * snr)
for i in range(1000):
noisy_flux = lc.flux_true + np.random.normal(loc=0.0, scale=s_rms, size=len(lc.time))
x0 = np.array([freqs[0], amps[0], 0]).flatten()
popt, pcov = curve_fit(model, lc.time, noisy_flux, p0=x0)
asinis.append(popt[-1])
asinis_sd.append(np.std(asinis))
# +
from scipy import stats
import astropy.units as u
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log(snrs), np.log(asinis_sd))
# -
vals = [14.6757, 1.4668, 0.72857]
# +
fig, axes = plt.subplots(1,2,
figsize=[7,2.5],
constrained_layout=True)
linewidth=0.8
ax = axes[0]
ax.plot(snrs, asinis_sd, '.k', markersize=3, linewidth=0.7)
# ax.plot(snrs, np.exp((slope*np.log(snrs) + intercept)), '-.', c='black', linewidth=0.7)
ax.fill_between(snrs, 10**2,np.exp((slope*np.log(snrs) + intercept)), alpha=0.2)
ax.set_xscale('log')
ax.set_yscale('log')
ax.plot([0,92.1],[8,8],c=red, linewidth=linewidth)
ax.plot([0,358.4],[2,2], c=blue, linewidth=linewidth)
ax.plot([0,749.4],[0.96680069,0.96680069], c=green, linewidth=linewidth)
ax.plot([92.1,92.1],[0, 8], c=red, linewidth=linewidth)
ax.plot([358.4,358.4],[0,2], c=blue, linewidth=linewidth)
ax.plot([749.4,749.4],[0,0.96680069], c=green, linewidth=linewidth)
ax.set_xlabel('SNR of primary frequency')
ax.set_ylabel(r'$a\sin{i}/c$ scatter (s)')
ax.set_xlim(snrs[0], snrs[-1])
ax.set_ylim(None,10**2)
ax = axes[1]
periods = np.geomspace(10,1470,500)
stellar_mass = 1.8
worst_case = (mass_function(periods*u.day, 8*u.s).to(u.M_jup))**(1/3) * ((stellar_mass*u.M_sun).to(u.M_jup))**(2/3)
typical_case = (mass_function(periods*u.day, 2*u.s).to(u.M_jup))**(1/3) * ((stellar_mass*u.M_sun).to(u.M_jup))**(2/3)
best_case = (mass_function(periods*u.day, 1*u.s).to(u.M_jup))**(1/3) * ((stellar_mass*u.M_sun).to(u.M_jup))**(2/3)
ax.plot(periods, worst_case, label='SNR=50', c=red, linewidth=linewidth)
ax.plot(periods, typical_case, label='SNR=500', c=blue, linewidth=linewidth)
ax.plot(periods, best_case, label='SNR=1000', c=green, linewidth=linewidth)
ax.fill_between(periods, 75, 13, alpha=0.2, color='orange')
ax.set_xlabel(r'P$_{\rm orb}$ (d)')
ax.set_ylabel(r'Companion mass (M$_{\rm Jup}$)')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_yticks([1,10,100])
ax.set_xlim(10,1470)
per_med = len(periods) // 2
ax.text(periods[per_med], worst_case[per_med].value, '8 s', rotation=-18, c=red)
ax.text(periods[per_med], typical_case[per_med].value-0.5, '2 s', rotation=-18, c=blue)
ax.text(periods[per_med], best_case[per_med].value-0.5, '1 s', rotation=-18, c=green)
axes[0].annotate('Detectable', (0.65,0.7), xycoords='axes fraction')
plt.savefig(overleaf_path + 'best_mass_range.pdf', dpi=300, bbox_inches='tight', pad_inches=0)
# -
| paper/notebooks/(5.1) SNR vs asini relations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/NidhiChaurasia/Algo-Tree/blob/main/PythonProgramming.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="wwcbSdp4IpX1" outputId="0e167855-871d-4609-a17d-62912ffc08da"
a = 10
b = 20
print(a+b)
# + colab={"base_uri": "https://localhost:8080/"} id="FWfVHjaDKdZd" outputId="a31c6e38-b21e-456a-c18a-ab6fbe2a18f7"
a = 10
b = 20
if b>a:
print(b)
else:
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="F6Nekd8tLizA" outputId="4a6931b2-69be-4f33-c60b-c84eda7303c2"
h = 90
z = 8
d = (h+z)/2
print (d)
# + colab={"base_uri": "https://localhost:8080/"} id="_QGPIprPMmiC" outputId="25c22119-68fa-4618-c2c8-e666f108f82f"
a = int(input("Enter the value of your choice :"))
b = int(input("Enter the second value of your choice :a "))
++a
--a
++b
--b
print(a-b)
print(a+b)
print(a*b)
print(a/b)
print(a%b)
# + colab={"base_uri": "https://localhost:8080/"} id="jzSu2ERLOttJ" outputId="5e1cf982-5486-4287-d8a1-ce3aed148a82"
a = 1
++a
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="x8H8-DiyQF10" outputId="c1915309-a84a-468a-e0bb-38e839f8ec32"
p = float(input("Enter initial principal balance :"))
r = float(input("Enter interest rate value :"))
t = float(input("Enter number of time periods elapsed :"))
amount = p*(1+r/100)**t
compound_interest = amount - p
print("Compound Interest",compound_interest)
#Another method to calculate Compound Interest
#def compound_interest(principal,rate,time):
#amount = principal * (pow((1 + rate / 100) , time))
#CI = amount - principal
#print("Compound interest is" , CI)
#compund_interest(10000, 10.25, 5)
# + colab={"base_uri": "https://localhost:8080/"} id="iWQZe-iHYKa9" outputId="a2d50505-e58f-4143-df19-f033f9ebfcb6"
a = int(input("Enter the value :"))
b = "123"
b = int(b) #explicit type conversion in python
c = a+b
print(c)
z = 30.49 #implicit type conversion in python
k = 12
print(z+k)
# + colab={"base_uri": "https://localhost:8080/"} id="mnFXKHB_Zh8M" outputId="7bc22a5a-4946-4d2b-a72c-4c047833fd2b"
p = int(input("Enter the principal value :"))
r = int(input("Enter the rate value :"))
t = int(input("Enter the time :"))
simple_interest = p*r*t/100
print("Simple interest is :",simple_interest)
# + colab={"base_uri": "https://localhost:8080/"} id="P16poMdPaZsz" outputId="62947c2f-c8d9-4e27-a374-22ced5a6e787"
a = int(input("Enter the digit to print the table :"))
for i in range(1,11):
print(a,'*',i,'=', a*i)
# + colab={"base_uri": "https://localhost:8080/"} id="21CaBL0zcCa9" outputId="f3e5ea90-f033-4530-fadd-20696aca4af8"
n = int(input("Enter the no. to calculate the factorial of :"))
fact = 1
if n < 0:
print("FACTORIAL DOESN'T EXIST !")
elif n == 0:
print("FACTORIAL OF 0 IS 1")
else:
for i in range(1,n + 1):
fact = fact * i
print("The factorial of ",n,"is",fact)
# + colab={"base_uri": "https://localhost:8080/"} id="Mly5VhmRhRid" outputId="f24f6452-3f24-4961-b6a4-1073b9ab7eba"
#Area of the Circle
r = float(input("Enter radius of the circle :"))
pi = 3.142
area_ofcircle = pi*r**2
print("Area of circle is :",area_ofcircle)
| PythonProgramming.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" colab={} colab_type="code" id="G4KS5INhHjG0"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" colab={} colab_type="code" id="y4BPUIIYHjG4"
df = pd.read_csv('drugsComTrain_raw.csv')
# -
df.info()
# + _uuid="311c0a2fe59b15c7ed085f81baead6d7e9a3d51e" colab={} colab_type="code" id="HSdPe7-xHjG6" outputId="bbe306a1-d1fb-4033-86e0-8a9ea1920ff0"
df.head()
# + _uuid="bbfa1ef3057cd9bf56d4ede6a818928ca8af8369" colab={} colab_type="code" id="J8exDDaPHjHA" outputId="25a5dfd0-549a-4f67-bace-5b33c5a27189"
df.describe()
# + _uuid="92d9e86b7de0aebf486a99b81d0cf3f8305b29ea" colab={} colab_type="code" id="ws_4wAP6HjHJ" outputId="fbdb0a2c-0165-4b88-82e5-f5155c485ade"
#find a correlation among the rating and useful count
df.plot.scatter(x='rating',y='usefulCount')
# + _uuid="47034544053d61ea4cdc46c83e7c9ee62cec70ab" colab={} colab_type="code" id="0l8ZghgDHjHN" outputId="fd9419cc-90a4-41bc-e9a0-17218426e3ff"
#get the top 15 reviews in the list based on usefulness count
df.nlargest(15,'usefulCount')
# + _uuid="20c6e247572e1b1766f9b4e740f775e3992f023f" colab={} colab_type="code" id="1cGLfqWlHjHP" outputId="1e99ddf2-15db-4320-d495-3da959c088b0"
#see the most used drugs to treat conditions
df['drugName'].value_counts()
# + _uuid="db5ccf04cc9cf2dc14dad1ea0d0cf74d6e6c0c6b" colab={} colab_type="code" id="VNakZbLsHjHS" outputId="1ffe7a7e-5b0f-4a66-8ab2-c9da72bbbd14"
#find the most common conditions that are treated
df['condition'].value_counts().nlargest(15)
# + _uuid="bc5bbb42df38f946dde8eb5de1576b523384448f" colab={} colab_type="code" id="XrsdsMU3HjHa" outputId="66ee202a-1fca-402b-bc01-f9fd0e87d48a"
# checking out the test data
# commonCdf2 = df2['condition'].value_counts().nlargest(15)
# commonConditions = pd.DataFrame(commonCdf2)
# commonConditions
# + _uuid="b1c5397404f83a49c0abb0cb56fac37f74f67d50" colab={} colab_type="code" id="itkQ_QJzHjHd"
#establish a dataframe to focus on birth control
birth_control_df = df[df.condition == 'Birth Control']
# -
birth_control_df.info()
# + _uuid="129b1d6742038539ab00301c3bc3944d2eaa7a6a" colab={} colab_type="code" id="3JbCj-6oHjHf"
#locate each instance of birth control use in our dataframe
birth_control_df.head()
# + _uuid="1c937ad1176281b4a78504ccdd8fa0904ccd74a1" colab={} colab_type="code" id="b7lCZegSHjHk" outputId="5c79d083-5944-4774-82b8-4c9b85d29605"
#get the number of times each drug was used for birth control
birth_control_df['drugName'].value_counts().nlargest(15)
# + _uuid="7443a37bcb66513d01f341396346a4fb026504b0" colab={} colab_type="code" id="UdG2_iUfHjHo" outputId="4f5a51d5-9b42-42a7-ea52-c6ee9676a4fa"
#dataframe to show the average satifcation of using Etonogestrel for birth control(top choice of drug)
etonogestrelBCDf = birth_control_df[birth_control_df.drugName == 'Etonogestrel']
etonogestrelBCDf['rating'].mean()
# + _uuid="127a07c3529a2d20a95cf271f44b18a2da4cd60e" colab={} colab_type="code" id="NuiQDKuAHjHv" outputId="21f0689d-6ebc-4161-925d-1a2ea20918a7"
#find the top occurences of the drugs
df['drugName'].value_counts().nlargest(15)
# + _uuid="acfcdc05d1f57ea2728ebc2a8d100e228c8dbb1f" colab={} colab_type="code" id="hPIqD7jtHjH0" outputId="896facdb-c200-4518-bb91-c5e9c18a55a0"
def condition_func(condition, df):
if(type(condition) != str):
raise ValueError("The input should be a string")
condition_df = df.loc[df['condition'] == condition]
print(condition_df['drugName'].value_counts().nlargest(15))
condition_func('Birth Control', df)
# + colab={} colab_type="code" id="EJ1muZU4HjH3"
#Looop through the drug names
for drug_name in df.drugName.unique():
print(drug_name)
# -
pd.DatetimeIndex(start=min(df.date), end=max(df.date), freq='M')
# + colab={} colab_type="code" id="bx4cvopbHjH5"
df.index = pd.to_datetime(df.date)
drug_df = pd.DataFrame(index=pd.DatetimeIndex(start=min(df.date), end=max(df.date), freq='M'))
for d_name in df.drugName.value_counts().nlargest(5).index:
temp = df[df.drugName == d_name]
drug_df[d_name] = temp.rating.resample('M').mean()
drug_df
# + colab={} colab_type="code" id="EwzoHw9PM9SO"
#Make the line graph of them
for c in drug_df.columns:
drug_df[c].plot(title=c, ylim=(0, 10))
plt.show()
# -
drug_df.plot(ylim=(0,10))
plt.show()
# + colab={} colab_type="code" id="vZZE7NDrQuyI"
#Make a graph of the mean rating of each year
df.rating.resample('A').mean().plot(ylim=(0,10))
plt.show()
# -
df[df.condition == "Birth Control"].review.nunique()
# +
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
from sklearn.metrics import confusion_matrix
import pandas, xgboost, numpy, textblob, string
# -
def get_df(df, cond):
review = df[df.condition == cond].review[:500]
return pd.DataFrame({'text':review, 'label':cond})
# +
# we only take 500 reviews for each condition so the traning time will decrease
train_df = pd.DataFrame(columns=['text', 'label'])
condition_counts = df.condition.value_counts()
for cond in list(df.condition.unique()):
if cond in condition_counts and condition_counts[cond] > 100:
train_df = train_df.append(get_df(df, cond))
train_df.info()
# -
train_df['text'] = train_df.text.astype(str)
train_df['label'] = train_df.label.astype(str)
train_df.label.value_counts()
# +
# split the dataset into training and validation datasets
train_x, valid_x, train_y, valid_y = model_selection.train_test_split(train_df['text'], train_df['label'])
# label encode the target variable
encoder = preprocessing.LabelEncoder()
#The labels for the training set (Study guide answers)
train_y = encoder.fit_transform(train_y)
#The lavels for the testing data set (Exam answers)
valid_y = encoder.fit_transform(valid_y)
# +
# create a count vectorizer object
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(train_df['text'])
# transform the training and validation data using count vectorizer object
#Every word in everyreview encoded to a number, sotored in an array
#The rows following in the array, put the number of times each word occurs
#Row corresponds to a review
#This is the data we are training on (like a study guide)
xtrain_count = count_vect.transform(train_x)
#This is the data we are trying to predict (like the test ?s)
xvalid_count = count_vect.transform(valid_x)
# +
# word level tf-idf
#tfidf is the importance of a word, the more it appears reduces the sway that it has (like abusing power/authority) less is more
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000)
tfidf_vect.fit(train_df['text'])
xtrain_tfidf = tfidf_vect.transform(train_x)
xvalid_tfidf = tfidf_vect.transform(valid_x)
# ngram level tf-idf
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram.fit(train_df['text'])
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(train_x)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(valid_x)
# characters level tf-idf
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram_chars.fit(train_df['text'])
xtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(train_x)
xvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(valid_x)
# +
#classifies is ML Algo we are using
#feature_vector_train training portion of the features of the data set (usally X_train)
#label for all of the features(feature_vector_label) (usually y_train)
#feature_vector_valid is the features we are testing our predictions off of (usually X_test)
#We use the 4 of those to train and test
def train_model(classifier, feature_vector_train, label, feature_vector_valid, is_neural_net=False):
# fit the training dataset on the classifier
classifier.fit(feature_vector_train, label)
# predict the labels on validation dataset
predictions = classifier.predict(feature_vector_valid)
if is_neural_net:
predictions = predictions.argmax(axis=-1)
#valid y is the answers to our predictions, stored elsewhere
return metrics.accuracy_score(predictions, valid_y)
# +
# Linear Classifier on Count Vectors
#Testing the predictions using different features
accuracy = train_model(linear_model.LogisticRegression(), xtrain_count, train_y, xvalid_count)
print("LR, Count Vectors: ", accuracy)
# -
#TODO testing the confusion matrix stuff, going to add to a fucntion
#TODO will call that function in train_model since that's where prediction is
#TODO test confusion matrix on valid_y, prediction)
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
#print(valid_y)
#print(xvalid_count)
#c = confusion_matrix(valid_y, xvalid_count)
cm = confusion_matrix(valid_y, valid_y)
plt.imshow(c, cmap='cool', interpolation='nearest')
plt.show()
# +
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf, train_y, xvalid_tfidf)
print("LR, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("LR, N-Gram Vectors: ", accuracy)
# Linear Classifier on Character Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars)
print("LR, CharLevel Vectors: ", accuracy)
| DS201/DS201DrugReviewsProj/DRUG_REVIEW_ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import shutil
import glob
import os
def move_glob(dst_path, pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
shutil.move(p, dst_path)
os.mkdir('temp/dir2')
move_glob('temp/dir2', 'temp/**/*.txt')
| notebook/shutil_move_glob.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-tensorflow]
# language: python
# name: conda-env-.conda-tensorflow-py
# ---
# +
from tensorflow import keras
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
# +
# from nltk import word_tokenize
# stop_words = set(stopwords.words('english'))
# def listtosen(l):
# new = ""
# for x in l:
# new+=x
# new+=' '
# return new
# def rem_stopwords(s):
# word_tokens = word_tokenize(s)
# filtered_sentence = [w for w in word_tokens if not w in stop_words]
# filtered_sentence = []
# for w in word_tokens:
# if w not in stop_words:
# filtered_sentence.append(w)
# sen = listtosen(filtered_sentence)
# return sen
# x = rem_stopwords("This is new")
# print(x)
# -
df = pd.read_csv('reviews.csv')
df.isnull().values.any() # checks if data file have any null values
df.head()
def preprocess(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) +\
' '.join(emoticons).replace('-', '')
return text
from nltk import word_tokenize
X = []
sentences = list(df['reviews'])
for sen in sentences:
X.append(preprocess(sen))
#X.append(rem_stopwords(f))
y = df['sentiment']
# # Train( 80 ) Test( 20 ) Split
# +
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
print(X_train[3])
# -
# # Embedding Layer ( For converting textual data into numeric data )
# * First layer in DL model in Keras
# +
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
print(len(X_train[1]))
print(len(X_test[1]))
# -
print(X_train[2])
# +
# padding process
vocab_size = len(tokenizer.word_index) + 1
maxlen = 100
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
# -
print(len(X_train[1]))
print(len(X_test[1]))
print(vocab_size) # number of unique words in dataset
# +
# using GloVe for creating feature matrix
from numpy import array
from numpy import asarray
from numpy import zeros
embeddings_dictionary = dict()
glove_file = open('glove.6B.300d.txt', encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary [word] = vector_dimensions
glove_file.close()
# -
embedding_matrix = zeros((vocab_size, 300))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
len(embedding_matrix)
# # Text Classification with Recurrent Neural Network (LSTM)
from keras.layers import LSTM
from keras.layers import Bidirectional
model = Sequential()
model.add(Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=maxlen))
model.add(Bidirectional(LSTM(200)))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])
print(model.summary())
# +
history = model.fit(X_train, y_train, batch_size=128, epochs=7,verbose=1, validation_split=0.2)
score = model.evaluate(X_test, y_test, verbose=1)
# -
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
val_acc = history.history['acc'][4]
# +
# model.save('path_to_my_model.h5')
# # Recreate the exact same model purely from the file
# new_model = keras.models.load_model('path_to_my_model.h5')
# -
predictions = model.predict(X_test)
new_predictions = new_model.predict(X_test)
np.testing.assert_allclose(predictions, new_predictions, rtol=1e-6, atol=1e-6)
print(new_predictions)
print(X_train[2])
# +
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
# +
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
# +
n=24436
instance = X[n]
instance = tokenizer.texts_to_sequences(instance)
flat_list = []
for sublist in instance:
for item in sublist:
flat_list.append(item)
flat_list = [flat_list]
#print(flat_list)
instance = pad_sequences(flat_list, padding='post', maxlen=maxlen)
print(instance)
# -
print(instance)
model.predict_proba(instance)
model.predict_classes(instance)
print(y[n])
df.sample()
len(instance[0])
| Bidirectional LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Web Scraping Dynamic Javascript Web Page
# ## Environment setup
# Libraries
# pip install BeautifulSoup4
# pip install selenium
# pip install pandas
# Browser: Firefox or Chrome with Chromium
# ## Scraping using Selenium with geckodriver
# +
# import libraries
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import pandas as pd
from lxml import html
# -
# specify the url
# I will obtain data on the newest and hottest books at Chapters Indigo.
urlpage = 'https://www.chapters.indigo.ca/en-ca/books/new-and-hot/'
print(urlpage)
# +
# run firefox webdriver from executable path of your choice
# if geckodriver is not in an executable path
# driver = webdriver.Firefox(executable_path = 'GECKODRIVER_PATH')
# if geckodriver is in an executable path
driver = webdriver.Firefox()
# -
# get web page
driver.get(urlpage)
# find elements by xpath
# the number of results depends on the number of loaded elements
results = driver.find_elements_by_xpath('//*[@id="grid-view-product-list-7731"]//*[@class="product-list__product-details--grid"]')
print('Number of results', len(results))
# +
# loop over results and store in array
data = []
for res in results:
title = res.find_element_by_tag_name('h4').find_element_by_tag_name('a').text
author = res.find_element_by_tag_name('p').find_element_by_tag_name('a').text
# get the discounted price
price = res.find_element_by_class_name('product-list__price--grid').text
price = price[1:] # take off dollar sign
data.append({"title" : title, "author" : author, "price" : price})
# -
data
# close driver
driver.quit()
# +
# save to pandas dataframe
df = pd.DataFrame(data)
# reorder the columns
df = df[['title', 'author', 'price']]
# -
df
# write to csv without index
STORE_DATA_PATH = ''
df.to_csv(STORE_DATA_PATH + 'books.csv', index=False)
| dynamic-web/dynamic-JS-scraping-with-selenium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 4. Custom Model
# This guide will step through the process of implementing a custom model that can be used in NeuRec. It will show example code snippets that can help build a custom model, however, further development is needed to successfully complete the model.
#
# The process of implementing a custom model involves two main steps: creating a model class; and, adding the model to NeuRec. The following sections discusses these two steps in detail.
#
# ## Creating a Model Class
# This step involves creating a class in the **model** folder. This class must extend the **AbstractRecommender** class, also in the model folder, and implement the required functions, namely:
#
# * \_\_init\_\_()
# * build_graph()
# * train_model()
# * predict()
#
# > The class may have additional functions on top of these required functions.
#
# Additionally, the **properties** instance variable is required in the custom model.
#
# An example of a shell custom model is shown below:
#
# ```python
# from neurec.model.AbstractRecommender import AbstractRecommender
#
# class Custom(AbstractRecommender):
# properties = []
#
# def __init__(self, **kwds):
# super().__init__(**kwds)
# pass
#
# def build_graph(self):
# pass
#
# def train_model(self):
# pass
#
# def predict(self):
# pass
# ```
#
# The following sections will discuss each of the requirements in turn, starting with the **properties** instance variable.
#
# ### properties
# This variable defines the list of properties that the model requires to run. For example:
#
# ```python
# properties = [
# "learning_rate",
# "embedding_size",
# "learner",
# "topk",
# "epochs",
# "eps",
# "adv",
# "adver",
# "adv_epoch",
# "reg",
# "reg_adv",
# "batch_size",
# "verbose",
# "loss_function"
# ]
# ```
#
# These properties will then be read from the properties file when the model is instantiated, by the \_\_init\_\_() function in the AbstractRecommender class, and made available in the variable **self.conf**.
#
# ### \_\_init\_\_()
# This function can be used to initialise any settings that are required by the model. It must first call the parent \_\_init\_\_() function, which will load the properties, discussed above. Additionally, it will load the dataset into **self.dataset**.
#
# ```python
# from neurec.evaluation import Evaluate
# from neurec.model.AbstractRecommender import AbstractRecommender
# from neurec.util import reader, learner
# import numpy as np
# import tensorflow as tf
#
# class Custom(AbstractRecommender):
# def __init__(self):
# super().__init__(**kwds)
#
# # Load necessary settings from properties file
# self.layers = self.conf["layers"]
# self.learning_rate = self.conf["learning_rate"]
# self.learner = self.conf["learner"]
# self.loss_function = self.conf["loss_function"]
# self.num_epochs= self.conf["epochs"])
# self.batch_size= self.conf["batch_size"])
# # etc...
#
# # ...
# ```
#
# > If you need to log to the console, you can use the **self.logger** variable, further discussed [here](https://docs.python.org/3.6/library/logging.html)
#
# ### build_graph()
# The build_graph() function is called by NeuRec before training the model and, therefore, can be used to create the network. Here, you can setup the loss and optimiser functions for the model, which can then be used while training the model:
#
# ```python
# from neurec.evaluation import Evaluate
# from neurec.model.AbstractRecommender import AbstractRecommender
# from neurec.util import reader, learner
# import numpy as np
# import tensorflow as tf
#
# class Custom(AbstractRecommender):
# # ...
#
# def build_graph(self):
# self.loss = learner.pairwise_loss(self.loss_function,result) + self.reg_mf * ( tf.reduce_sum(tf.square(p1)) \
# + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1)))
#
# self.optimizer = learner.optimizer(self.learner, self.loss, self.learning_rate)
#
# # ...
# ```
#
# ### train_model()
# This function is called by NeuRec to train the model. You can use the data_gen module to generate the necessary data for training, which can be found in the **util** folder. Additionally, the Evaluate class can be used here to establish the performance of the model, in this example after every epoch.
#
# ```python
# from neurec.evaluation import Evaluate
# from neurec.model.AbstractRecommender import AbstractRecommender
# from neurec.util import reader, learner
# import numpy as np
# import tensorflow as tf
#
# class Custom(AbstractRecommender):
# # ...
#
# def train_model(self):
# for epoch in range(self.num_epochs):
# user_input, item_input_pos, item_input_neg = data_gen._get_pairwise_all_data(self.dataset)
#
# num_training_instances = len(user_input)
#
# for num_batch in np.arange(int(num_training_instances/self.batch_size)):
# bat_users, bat_items_pos, bat_items_neg = data_gen._get_pairwise_batch_data(user_input, item_input_pos, item_input_neg, num_batch, self.batch_size)
#
# Evaluate.test_model(self,self.dataset)
#
# # ...
# ```
#
# ### predict()
# The predict() function is used by the Evaluate classes to establish the performance of the model. An example of a predict function is given below:
#
# ```python
# from neurec.evaluation import Evaluate
# from neurec.model.AbstractRecommender import AbstractRecommender
# from neurec.util import reader, learner
# import numpy as np
# import tensorflow as tf
#
# class Custom(AbstractRecommender):
# # ...
#
# def predict(self):
# users = np.full(len(items), user_id, dtype=np.int32)
# return self.sess.run(self.output, feed_dict={self.user_input: users, self.item_input: items})
#
# # ...
# ```
#
# ## Adding to NeuRec
# After creating a custom model, NeuRec needs to be updated so that it knows the model is available. To do this, the model needs to be added to **data/models.py** as follows:
#
# ```python
# import Custom
#
# models = {
# "custom": Custom
# }
# ```
#
# > Make sure to set the properties file to use the new model, discussed in 3. Properties
| docs/4. Custom Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exponential, Log
#
# 1. np.exp() : $e^{x}$
# 2. np.exp2() : $2^{x}$
# 3. np.log() : $logx$
# 4. np.log2() : $log_{2}x$
# 5. np.log10() : $log_{10}x$
np.exp(1)
np.exp2(1)
# +
# %matplotlib inline
range_ = np.linspace(0,10,1000)
exp = np.exp(range_)
exp2 = np.exp2(range_)
log = np.log(range_)
log2 = np.log2(range_)
log5 = np.log(range_) / np.log(5) # 밑 = 5
plt.figure(figsize=(15,10))
plt.ylim(-10,10)
plt.plot(exp, label='exp')
plt.plot(exp2, label='exp2')
plt.plot(log, label='log')
plt.plot(log2, label='log2')
plt.plot(log5, label='log5')
plt.legend()
| 1.Study/2. with computer/4.Programming/2.Python/9. Numpy/ch3/Chapter3 Mathematical Functions_Notebook3 Exponents and Logarithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pickle, os, torch
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.metrics import r2_score
from mpl_toolkits.axes_grid1 import make_axes_locatable
# +
from matplotlib.ticker import NullFormatter
from scipy import stats
from matplotlib.ticker import NullFormatter
from scipy import stats
def multi_plot(yss, preds):
fig, ax = plt.subplots(1, 3, figsize=(20, 5.5))
ax = ax.flatten()
# target=r"log($SFR/M_{\odot}/yr$)"
targets=[r"$log(v_{disk}/km/s)$", r"log($M_{cold}/M_{\odot}$)", r"log($SFR/M_{\odot}/yr$)"]
# target=r"$v_{disk}$"
for i in range(3):
target=targets[i]
ys = yss[:,i]
pred = preds[:,i]
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.15, 0.67
bottom, height = 0.0, 0.75
bottom_h = bottom +height
left_h = left + width
# rect_Cbar = [0, bottom, 0.1, height]
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.15]
rect_histy = [left_h, bottom, 0.15+0.07, height]
# rect_cbar = [left_h+0.15+0.06, bottom, 0.05, height]
# start with a rectangular Figure
ax[i].set_axis_off()
# axCbar = plt.axes(rect_Cbar)
axHist = ax[i].inset_axes(rect_scatter)
axHistx = ax[i].inset_axes(rect_histx)
axHisty = ax[i].inset_axes(rect_histy)
# axCbar = ax[i][j].inset_axes(rect_cbar)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
axHistx.yaxis.set_major_formatter(nullfmt)
axHisty.xaxis.set_major_formatter(nullfmt)
bins=50
l=0.0
n_contour=4
# the scatter plot:
vals, x, y, hist =axHist.hist2d( ys, pred,bins=bins, range=[np.percentile(np.hstack([ys,pred]), [0+l,100-l]),
np.percentile(np.hstack([ys,pred]), [0+l,100-l])], norm=mpl.colors.LogNorm(), cmap=mpl.cm.viridis)
X, Y = np.meshgrid((x[1:]+x[:-1])/2, (y[1:]+y[:-1])/2)
axHist.contour(X,Y, np.log(vals.T+1), levels=n_contour, colors='white')
axHist.plot([min(ys),max(ys)],[min(ys),max(ys)], 'k--', label='Perfect correspondance')
axHist.set(xlabel=f'SAM {target}',ylabel=f'GNN {target}')
axHist.xaxis.label.set_fontsize(13)
axHist.yaxis.label.set_fontsize(13)
if i==0:
axHist.legend(fontsize=12, loc='upper left')
X, Y = X[0], Y.T[0]
axHist.set_xlim((min(X), max(X)))
axHist.set_ylim((min(Y), max(Y)))
yhat=r'$\hat{y}$'
#calculate metrics
pct=np.sum(np.abs(ys-pred)<0.2)/len(ys-pred)
r2=r2_score(ys,pred)
rho = np.corrcoef(ys,pred)[0,1]
print('bias', np.mean(ys-pred))
print('std', np.std(ys-pred))
print('rho', rho)
print('r2', r2)
print('pct',pct)
xt=0.45
yt=0.2
dy=0.07
font = {'weight': 'normal',
'size': 15}
axHist.text(xt,yt, f'Bias : {np.mean(ys-pred):.2f} dex', fontdict=font, transform=axHist.transAxes)
axHist.text(xt,yt-dy, r'$\sigma$ : '+f'{np.std(ys-pred):.3f} dex', fontdict=font, transform=axHist.transAxes)
axHist.text(xt,yt-2*dy, r'Pearson: '+f'{rho:.2f}', fontdict=font, transform=axHist.transAxes)
# axHist.text(xt,yt-3*dy, r'$R^2$: '+f'{r2:.3f}', fontdict=font, transform=axHist.transAxes)
# axHist.text(xt,yt-4*dy, '% < 0.2 dex: '+f'{pct*100:.1f}', fontdict=font, transform=axHist.transAxes)
# axHistx.hist(ys[:,n], bins=bins, histtype='step', density=1)
# axy=axHisty.hist(pred[:,n], bins=bins, histtype='step', density=1, orientation='horizontal')
ys_kde = stats.gaussian_kde(ys, 0.1)
pred_kde = stats.gaussian_kde(pred, 0.1)
axHistx.plot(X, ys_kde(X), 'k--', label=f'SAM')
axHisty.plot(pred_kde(Y), Y, "k-.", label=f'GNN')
axHistx.legend(fontsize=12)
axHisty.legend(loc='upper left', bbox_to_anchor=(0.,1.15), fontsize=12)
font = {'family' : 'Serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
axHistx.set(title=f'SAM-GNN {target}', )
axHistx.set_xlim(axHist.get_xlim())
axHisty.set_ylim(axHist.get_ylim())
divider = make_axes_locatable(axHisty)
cax = divider.append_axes("right", size="15%", pad=0.18)
divider = make_axes_locatable(axHist)
# cax = divider.append_axes("left", size="15%", pad=0.18)
# Plot vertical colorbar
plt.colorbar(hist, cax=cax)
# plt.show()
# plt.colorbar(hist, ax=axCbar)
# plt.show()
# fig.tight_layout()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=None)
return fig
# -
# this is the correlated run
folder = 'results_final_Gauss4d_020222'
run='Sage_vlarge_all_4t_z0.0_quantile_raw_dszkxp_3_6'
pointer=osp.expanduser(f'~/../../scratch/gpfs/cj1223/GraphResults/{folder}/{run}')
results=pickle.load(open(osp.join(pointer,'result_dict.pkl'), 'rb'))
config=pickle.load(open(osp.join(pointer,'construct_dict.pkl'), 'rb'))
ys, pred = results['low_ys'], results['low_pred']
# mstar_ys, mstar_pred = ys[:,0], pred[:,0]
mcold_ys, mcold_pred = ys[:,2]+9, pred[:,2]+9
# this is the correlated run
folder = 'results_final_Gauss4d_310122'
run='Sage_vlarge_all_4t_z0.0_quantile_raw_seqqhl_5_6'
pointer=osp.expanduser(f'~/../../scratch/gpfs/cj1223/GraphResults/{folder}/{run}')
results=pickle.load(open(osp.join(pointer,'result_dict.pkl'), 'rb'))
config=pickle.load(open(osp.join(pointer,'construct_dict.pkl'), 'rb'))
ys, pred = results['low_ys'], results['low_pred']
# mstar_ys, mstar_pred = ys[:,0], pred[:,0]
vdisk_ys, vdisk_pred = ys[:,1], pred[:,1]
sfr_ys, sfr_pred = ys[:,3], pred[:,3]
ys = np.vstack([ vdisk_ys, mcold_ys, sfr_ys]).T
pred = np.vstack([vdisk_pred, mcold_pred, sfr_pred]).T
# +
import matplotlib
font = {'family' : 'Serif',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
fig=multi_plot(ys, pred)
# -
fig.savefig('../paper_figures/performance_others.png', bbox_inches='tight')
plt.plot(ys[:,1]-pred[:,1],ys[:,2]-pred[:,2], 'ro')
np.corrcoef([ys[:,1]-pred[:,1],ys[:,2]-pred[:,2]])
res=ys-pred
np.corrcoef(res.T)
| analysis/GNN_plots_all_corr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Курс «Алгоритмы анализа данных»
# ## Урок 6. Градиентный бустинг (AdaBoost)
# ### Домашняя работа к уроку 5
# +
from sklearn.tree import DecisionTreeRegressor
from sklearn import model_selection
from sklearn.datasets import load_diabetes
import numpy as np
import matplotlib.pyplot as plt
# -
def gb_predict(X, trees_list, coef_list, eta):
# Реализуемый алгоритм градиентного бустинга будет инициализироваться нулевыми значениями,
# поэтому все деревья из списка trees_list уже являются дополнительными и при предсказании прибавляются с шагом eta
return np.array([sum([eta* coef * alg.predict([x])[0] for alg, coef in zip(trees_list, coef_list)]) for x in X])
def mean_squared_error(y_real, prediction):
return (sum((y_real - prediction)**2)) / len(y_real)
def bias(y, z):
return (y - z)
def gb_fit(n_trees, max_depth, X_train, X_test, y_train, y_test, coefs, eta):
# Деревья будем записывать в список
trees = []
# Будем записывать ошибки на обучающей и тестовой выборке на каждой итерации в список
train_errors = []
test_errors = []
for i in range(n_trees):
tree = DecisionTreeRegressor(max_depth=max_depth, random_state=42)
# инициализируем бустинг начальным алгоритмом, возвращающим ноль,
# поэтому первый алгоритм просто обучаем на выборке и добавляем в список
if len(trees) == 0:
# обучаем первое дерево на обучающей выборке
tree.fit(X_train, y_train)
train_errors.append(mean_squared_error(y_train, gb_predict(X_train, trees, coefs, eta)))
test_errors.append(mean_squared_error(y_test, gb_predict(X_test, trees, coefs, eta)))
else:
# Получим ответы на текущей композиции
target = gb_predict(X_train, trees, coefs, eta)
# алгоритмы начиная со второго обучаем на сдвиг
tree.fit(X_train, bias(y_train, target))
train_errors.append(mean_squared_error(y_train, gb_predict(X_train, trees, coefs, eta)))
test_errors.append(mean_squared_error(y_test, gb_predict(X_test, trees, coefs, eta)))
trees.append(tree)
return trees, train_errors, test_errors
def evaluate_alg(X_train, X_test, y_train, y_test, trees, coefs, eta):
train_prediction = gb_predict(X_train, trees, coefs, eta)
print(f'Ошибка алгоритма из {n_trees} деревьев глубиной {max_depth} \
с шагом {eta} на тренировочной выборке: {mean_squared_error(y_train, train_prediction)}')
test_prediction = gb_predict(X_test, trees, coefs, eta)
print(f'Ошибка алгоритма из {n_trees} деревьев глубиной {max_depth} \
с шагом {eta} на тестовой выборке: {mean_squared_error(y_test, test_prediction)}')
def get_error_plot(n_trees, train_err, test_err):
plt.xlabel('Iteration number')
plt.ylabel('MSE')
plt.xlim(0, n_trees)
plt.plot(list(range(n_trees)), train_err, label='train error')
plt.plot(list(range(n_trees)), test_err, label='test error')
plt.legend(loc='upper right')
plt.show()
X, y = load_diabetes(return_X_y=True)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25)
n_trees = 10
coefs = [1] * n_trees
max_depth = 3
eta = 1
trees, train_errors, test_errors = gb_fit(n_trees, max_depth, X_train, X_test, y_train, y_test, coefs, eta)
evaluate_alg(X_train, X_test, y_train, y_test, trees, coefs, eta)
get_error_plot(n_trees, train_errors, test_errors)
# #### Задание 1
# Для реализованной в методичке модели градиентного бустинга построить графики зависимости ошибки от количества деревьев в ансамбле и от максимальной глубины деревьев.
# Сделать выводы о зависимости ошибки от этих параметров.
# Перепишем функцию evaluate_alg таким образом, чтобы возвращались значения ошибок
def evaluate_alg_mod(X_train, X_test, y_train, y_test, trees, coefs, eta):
train_prediction = gb_predict(X_train, trees, coefs, eta)
error_train = mean_squared_error(y_train, train_prediction)
test_prediction = gb_predict(X_test, trees, coefs, eta)
error_test = mean_squared_error(y_test, test_prediction)
return error_train, error_test
def get_error_plot_mod(n, errs, labels, x_label, y_label):
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xlim(0, n)
for i in range(len(errs)):
plt.plot(list(range(n)), errs[i], label=labels[i])
plt.legend(loc='upper right')
plt.show()
# В цикле построим графики для разного количества деревьев и для разной максимальной глубины.
# Посчитаем для eta = 1
# +
eta = 1
errs_train = []
errs_test = []
labels = []
for max in range(1, 11):
labels.append(f'max depth {max}')
errors_train = []
errors_test = []
for n in range(1, 11):
coefs = [1] * n
trees, train_errors, test_errors = gb_fit(n, max, X_train, X_test, y_train, y_test, coefs, eta)
error_train, error_test = evaluate_alg_mod(X_train, X_test, y_train, y_test, trees, coefs, eta)
errors_train.append(error_train)
errors_test.append(error_test)
errs_train.append(errors_train)
errs_test.append(errors_test)
print('Тренировочные данные')
get_error_plot_mod(n, errs_train, labels, 'Количество деревьев', 'MSE')
print('Тестовые данные')
get_error_plot_mod(n, errs_test, labels, 'Количество деревьев', 'MSE')
# -
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 1
get_error_plot(10, errs_train[0], errs_test[0])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 2
get_error_plot(10, errs_train[1], errs_test[1])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 3
get_error_plot(10, errs_train[2], errs_test[2])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 4
get_error_plot(10, errs_train[3], errs_test[3])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 5
get_error_plot(10, errs_train[4], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 6
get_error_plot(10, errs_train[5], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 7
get_error_plot(10, errs_train[6], errs_test[6])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 8
get_error_plot(10, errs_train[7], errs_test[7])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 9
get_error_plot(10, errs_train[8], errs_test[8])
# Зависимость ошибки от количества деревьев для "eta" = 1 и "max depth" = 10
get_error_plot(10, errs_train[9], errs_test[9])
# Вывод: с увеличением глубины ошибка уменьшается, но при этом усиливается переобучение.
# Посчитаем для eta = 0.1
# +
eta = 0.1
errs_train = []
errs_test = []
labels = []
for max in range(1, 11):
labels.append(f'max depth {max}')
errors_train = []
errors_test = []
for n in range(1, 11):
coefs = [1] * n
trees, train_errors, test_errors = gb_fit(n, max, X_train, X_test, y_train, y_test, coefs, eta)
error_train, error_test = evaluate_alg_mod(X_train, X_test, y_train, y_test, trees, coefs, eta)
errors_train.append(error_train)
errors_test.append(error_test)
errs_train.append(errors_train)
errs_test.append(errors_test)
print('Тренировочные данные')
get_error_plot_mod(n, errs_train, labels, 'Количество деревьев', 'MSE')
print('Тестовые данные')
get_error_plot_mod(n, errs_test, labels, 'Количество деревьев', 'MSE')
# -
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 1
get_error_plot(10, errs_train[0], errs_test[0])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 2
get_error_plot(10, errs_train[1], errs_test[1])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 3
get_error_plot(10, errs_train[2], errs_test[2])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 4
get_error_plot(10, errs_train[3], errs_test[3])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 5
get_error_plot(10, errs_train[4], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 6
get_error_plot(10, errs_train[5], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 7
get_error_plot(10, errs_train[6], errs_test[6])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 8
get_error_plot(10, errs_train[7], errs_test[7])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 9
get_error_plot(10, errs_train[8], errs_test[8])
# Зависимость ошибки от количества деревьев для "eta" = 0.1 и "max depth" = 10
get_error_plot(10, errs_train[9], errs_test[9])
# Вывод: с увеличением глубины ошибка уменьшается, но при этом усиливается переобучение.
# Посчитаем для eta = 0.01
# +
eta = 0.01
errs_train = []
errs_test = []
labels = []
for max in range(1, 11):
labels.append(f'max depth {max}')
errors_train = []
errors_test = []
for n in range(1, 11):
coefs = [1] * n
trees, train_errors, test_errors = gb_fit(n, max, X_train, X_test, y_train, y_test, coefs, eta)
error_train, error_test = evaluate_alg_mod(X_train, X_test, y_train, y_test, trees, coefs, eta)
errors_train.append(error_train)
errors_test.append(error_test)
errs_train.append(errors_train)
errs_test.append(errors_test)
print('Тренировочные данные')
get_error_plot_mod(n, errs_train, labels, 'Количество деревьев', 'MSE')
print('Тестовые данные')
get_error_plot_mod(n, errs_test, labels, 'Количество деревьев', 'MSE')
# -
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 1
get_error_plot(10, errs_train[0], errs_test[0])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 2
get_error_plot(10, errs_train[1], errs_test[1])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 3
get_error_plot(10, errs_train[2], errs_test[2])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 4
get_error_plot(10, errs_train[3], errs_test[3])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 5
get_error_plot(10, errs_train[4], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 6
get_error_plot(10, errs_train[5], errs_test[5])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 7
get_error_plot(10, errs_train[6], errs_test[6])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 8
get_error_plot(10, errs_train[7], errs_test[7])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 9
get_error_plot(10, errs_train[8], errs_test[8])
# Зависимость ошибки от количества деревьев для "eta" = 0.01 и "max depth" = 10
get_error_plot(10, errs_train[9], errs_test[9])
# Вывод: с увеличением глубины ошибка уменьшается, но при этом усиливается переобучение.
# С увеличением глубины и количества деревьев ошибка уменьшается, но при этом усиливается переобучение, при этом при уменьшении eta ошибка большая при малом количестве деревьев и малой глубине дерева и монотонно убывает при большом количестве деревьев и большой глубине деревьев.
| Lesson 6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mcvine
# language: python
# name: mcvine
# ---
import sys, os
import numpy as np
from matplotlib import pyplot as plt
# +
parent_dir = os.path.abspath(os.pardir)
libpath = os.path.join(parent_dir, 'c3dp_source')
outpath = os.path.join(parent_dir, 'out')
mantid_path = os.path.join (parent_dir, 'mantid')
resultPath = os.path.join (parent_dir, 'results')
if not libpath in sys.path:
sys.path.insert(0, libpath)
# +
cell_name = 'empty_Clampcell_without_sample_exp'
cell_sample_name = 'Clampcell_with_sample_exp_b4'
cell_sample_colli_name = 'SNAP_43682_masked.nxs'
cell_sample_colli = os.path.join (mantid_path, cell_sample_colli_name)
cell_with_sample = os.path.join (mantid_path, cell_sample_name)
only_cell = os.path.join (mantid_path, cell_name)
# +
import masking_nexus_givenKernel as mask
masked_template = 'coll_exp_masked.nxs'
masked_onlycell_path = os.path.join(mantid_path, '{}_masked.nxs'.format(cell_name))
masked_cell_sample_path = os.path.join(mantid_path, '{}_masked.nxs'.format(cell_sample_name))
masked_template_path = os.path.join(mantid_path, '{}'.format(masked_template) )
mask.masking(cell_with_sample, masked_template_path, masked_cell_sample_path)
mask.masking(only_cell, masked_template_path, masked_onlycell_path)
# -
import reduce_nexasdata_using_mantid as red
import normalization_by_area as nrm
step = 0.01
# +
# binning = [0.5, step, 4.]
# d_onlyCell, I_onlyCell, error_onlyCell = red.mantid_reduction(masked_onlycell_path, binning)
# d_cell_sample, I_cell_sample, error_cell_sample = red.mantid_reduction(masked_cell_sample_path, binning)
# -
d_colli, I_colli, error_colli = red.mantid_reduction(cell_sample_colli, binning)
# +
plt.figure()
plt.plot (d_onlyCell, I_onlyCell, label ="only Cell")
# plt.plot (d_cell_sample, I_cell_sample, label ="cell+sample")
# plt.plot (d_exp, I_exp, label ="exp")
plt.legend()
# plt.errorbar (d_exp, I_exp, error_exp)
plt.xlim(1,4)
plt.xlabel('d_spacing ($\AA$)')
plt.ylabel('Intensity (arbitrary units)')
plt.show()
# +
plt.figure()
# plt.plot (d_onlyCell, I_onlyCell*270.+800, label ="only Cell")
plt.plot (d_cell_sample, I_cell_sample, label ="cell+sample")
# plt.plot (d_exp, I_exp, label ="exp")
plt.legend()
# plt.errorbar (d_exp, I_exp, error_exp)
plt.xlim(1,4)
plt.xlabel('d_spacing ($\AA$)')
plt.ylabel('Intensity (arbitrary units)')
plt.show()
# -
cellSample_minus_cell_name = 'cellSi_Minus_cell'
cellSample_minus_cell =np.loadtxt( os.path.join (resultPath, cellSample_minus_cell_name), delimiter=",")
d_minusCell = cellSample_minus_cell[:,0]
I_minusCell = cellSample_minus_cell[:,1]
error_minusCell = cellSample_minus_cell[:,2]
# %matplotlib inline
plt.figure(figsize=(15,7))
plt.errorbar (d_minusCell, (I_minusCell), error_minusCell, label ="minus cell")
# plt.errorbar (d_cell_sample, abs(I_cell_sample-I_onlyCell*255.), abs(error_cell_sample-error_onlyCell*255.), label ="cell+sample-cell")
# plt.errorbar (d_colli, (I_colli*2)+100, error_colli, label ="withColli")
plt.plot (d_colli, (I_colli/30000)-.21, label ="withColli")
# plt.plot (d_exp, I_exp, label ="exp")
# plt.legend()
# plt.errorbar (d_exp, I_exp, error_exp)
plt.xlim(1.035,3.5)
plt.ylim(-0.6, 1.35)
plt.xlabel('d_spacing ($\AA$)')
plt.ylabel('Intensity (arbitrary units)')
plt.show()
| notebooks/comparison_experiment_difference_cellSample_Minus_cell_correct_and_withCOLL_manipulatre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Paper explaining CenterNet: [Objects as Points](https://arxiv.org/abs/1904.07850)
# Paper explaining Spatial CNN: [Spatial As Deep: Spatial CNN for Traffic Scene Understanding](https://arxiv.org/abs/1712.06080)
#
# Inverse perspective mapping is briefly described here: [3D-LaneNet](https://arxiv.org/abs/1811.10203)
#
| notebooks/references.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1 id="tocheading">Contents</h1>
# <div id="toc"></div>
# + language="javascript"
# $.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')
# -
from IPython.display import Image
# # Python Review
#
# ## Python Program Structure
#
# - import statements
# - statements
# - functions
# - classes ( we have not introduced )
#
# ## Python Program Execution
#
# ### How python program is executed?
# The following are typically hidden steps
# - python intepreter translate source file foo.py to byte code
# - send byte code to python virtual machine to run
Image("python_execution.png")
# ### How to trigger python execution?
# - run in terminal: python foo.py
# - run in IDE (pycharm)
# - run interactively in jupyter notebook
# - run step by step in a debugger
#
# Note: no matter what, a python virtual machine is created to run byte code
# ### Python Execution Flow
# - generally from top to bottom
# - some exceptions
# - can skip a block of code. if/else branches
# - can execute a block of code many times (loops)
# - can skip function body
# ### Two ways of execution
# - run as main program
# - imported as a module
# !cat square.py
# !python square.py
# + active=""
# mingyu@mingyu-Aspire-T3-600:~/git/TCEF_Python_2021$ python3
# Python 3.8.5 (default, Jan 27 2021, 15:41:15)
# [GCC 9.3.0] on linux
# Type "help", "copyright", "credits" or "license" for more information.
# >>> from square import get_square
# __name__:square
# square.py is being imported as a module
# >>> get_square(10)
# 100
# >>>
#
# -
# ## Data Types
#
# ### basic
# - int: 1, 2,
# - float: 8.4, 12.01
# - str: "abc"
# - bool: True, False
#
# ### base 2, 8, 10, 16
# - bin
# - oct
# - hex
# ### str
# - indexing: `'hello'[1]`
# - slice and dice: `'python'[2:4]`
# - concatenation: `'a' + 'b'`
# - split and join:
# ```'hello world'.split() "-".join(["apple", "pear"])```
'hello'[1]
'python'[2:4]
'a' + 'b'
'hello world'.split()
"-".join(["apple", "pear"])
# ### list
# - [], list()
# - append, pop, sort, count, len, clear
# - list comprehension
[1, 2,3 ]
l = [1,2,3]
l.append(4)
l
l.pop()
[ i * i for i in range(5)]
# ### tuple
# - (), tuple()
# - immutable
(1,2,3,)
# ### set
# - set()
# - distinct
# - orderless
# - add, pop, len
s = set([1,2,2,3])
s
s.add(4)
s
# ### dict
# - {}, dict()
# - key - value pairs
# - keys are distinct
# - dict[key]=value, update, pop
d = {"NC": "north carolina", "VA": "virginia"}
d
d.keys()
d.values()
for k, v in d.items():
print(k, v)
d.update({"FL": "Florida"})
d
d['NC'] = "nc"
d
# ## Control Flow
# - if .. elif .. else
# - for
# - while
earth_is_flat = False
if earth_is_flat:
print("be careful not to fall off!!")
else:
print("no worry")
for i in range(5):
print(i)
# ## Input and Output
# - input
# - print
name = input("what is your name")
print("welcome", name)
# ### formatted output
# - f string, python 3.6 or higher
name='bob'
print(f"hello, {name}")
# ## functions
# - def
# - function name
# - arguments
# - function body
# - return values
# - lambda *
def sqaure(n):
return n*n
(lambda x:x*x)(3)
# ## File input and output
# - read
# - write
# - append
# - with
# +
# f = open("filename", "r")
# f.read()
# f.close()
# +
# f = open("filename", "w")
# f.write("hello, world")
# f.close()
# +
# f = open("filename", "a")
# f.write("hello, world")
# f.close()
# -
# ## Exception Handling and Debugging
# - assert
# - try ... except ... finally
this_value_must_be_true = False
assert this_value_must_be_true, "this is impossible"
try:
pass
except Exception as e:
print(e)
# ## Problem Solving
# + active=""
# Check if the Sentence Is Pangram
#
# A pangram is a sentence where every letter of the English alphabet appears at least once.
#
# Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
#
# Input: sentence = "thequickbrownfoxjumpsoverthelazydog"
# Output: true
# Explanation: sentence contains at least one of every letter of the English alphabet.
#
# Input: sentence = "leetcode"
# Output: false
#
# -
def pangram(sentence):
alphabet="abcdefghijklmnopqrstuvwxyz"
for c in alphabet:
if c not in sentence:
return False
return True
pangram("leetcode")
pangram("thequickbrownfoxjumpsoverthelazydog")
def pangram2(sentence):
d = {}
for c in sentence:
d.update({c: 1})
return sum(d.values()) == 26
pangram2("thequickbrownfoxjumpsoverthelazydog")
pangram2("leetcode")
| Lesson12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import display, HTML
display(HTML("<style>.container {width: 80% !important; }</style>"))
# +
# import warnings
# warnings.filterwarnings("default")
# -
import sys
import time
import scanpy as sc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
from matplotlib import colors
# +
myColors = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231',
'#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe',
'#008080', '#e6beff', '#9a6324', '#fffac8', '#800000',
'#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080',
'#307D7E', '#000000', "#DDEFFF", "#000035", "#7B4F4B",
"#A1C299", "#300018", "#C2FF99", "#0AA6D8", "#013349",
"#00846F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1",
"#1E6E00", "#DFFB71", "#868E7E", "#513A01", "#CCAA35"]
colors2 = plt.cm.Reds(np.linspace(0, 1, 128))
colors3 = plt.cm.Greys_r(np.linspace(0.7,0.8,20))
colorsComb = np.vstack([colors3, colors2])
mymap = colors.LinearSegmentedColormap.from_list('my_colormap', colorsComb)
# +
sys.path.append("../functions")
from SMaSH_functions import SMaSH_functions
sf = SMaSH_functions()
# +
sys.path.append("/home/ubuntu/Taneda/GitLab/lung/Functions/")
from scRNA_functions import scRNA_functions
fc = scRNA_functions()
# -
# # Loading annData object
obj = sc.read_h5ad('../../../External_datasets/mouse_brain_all_cells_20200625_with_annotations.h5ad')
obj.X = obj.X.toarray()
obj = obj[obj.obs["Cell broad annotation"]!='Unk']
print("%d genes across %s cells"%(obj.n_vars, obj.n_obs))
# #### Data preparation
sf.data_preparation(obj)
# #### Data split
s = time.time()
from sklearn.model_selection import train_test_split
# +
data = obj.X.copy()
myDict = {}
for idx, c in enumerate(obj.obs["Cell broad annotation"].cat.categories):
myDict[c] = idx
labels = []
for l in obj.obs["Cell broad annotation"].tolist():
labels.append(myDict[l])
labels = np.array(labels)
X = data
y = labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
# -
# #### scGeneFit
from scGeneFit.functions import *
markers = get_markers(X_train, y_train, num_markers=30, method="centers", epsilon=1.0, redundancy=0.25)
# +
genes = obj.var.index.tolist()
selectedGenes = [genes[m] for m in markers]
selectedGenes_dict = {}
selectedGenes_dict["group"] = selectedGenes
# -
e = time.time()
# #### Classifiers
sf.run_classifiers(obj, group_by="Cell broad annotation", genes=selectedGenes, classifier="KNN", balance=True, title="scGeneFit-KNN")
# #### Heatmap selected genes
# +
dict_ens_id = {}
for k, j in zip(obj.var.index.tolist(), obj.var['SYMBOL'].tolist()):
dict_ens_id[k] = j
selectedGenes_t = []
for g in selectedGenes:
selectedGenes_t.append(dict_ens_id[g])
obj.var.set_index(obj.var["SYMBOL"], inplace=True, drop=False)
obj.var.index.name = None
# +
matplotlib.rcdefaults()
matplotlib.rcParams.update({'font.size': 11})
ax = sc.pl.DotPlot(obj,
selectedGenes_t,
gene_symbols = "SYMBOL",
groupby="Cell broad annotation",
standard_scale='var',
use_raw=True,
figsize=(6,10),
linewidths=2).style(cmap=mymap, color_on='square', grid=True, dot_edge_lw=1)
ax.swap_axes(swap_axes=True)
# ax.show()
ax.savefig("Figures/scGeneFit_top30.pdf")
# -
# # Elapsed time
print("%d genes across %s cells"%(obj.n_vars, obj.n_obs))
print('Elapsed time (s): ', e-s)
| notebooks/mouse_brain/broad_population/scGeneFit_without_filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="RDjai3DUXAFs"
import csv
import os
from IPython.display import Image
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
RESULT_DIR = '/content/drive/MyDrive/dna-nn/results/'
FIGURE_DIR = '/content/drive/MyDrive/dna-nn/figures/'
# %matplotlib inline
mpl.style.use('seaborn-white')
plt.rcParams['font.size'] = 12
# + colab={"base_uri": "https://localhost:8080/", "height": 462} id="m1-2H6qgbr3V" outputId="b799aa8b-780d-49b1-eda4-7703a41d8853"
model_files = [file.split('.')[0].split('-')[:2]
for file in os.listdir(RESULT_DIR)
if file.endswith('dynamics.csv')]
df = pd.DataFrame(model_files, columns=['model', 'dataset'])
df['done'] = 1
df.sort_values(['dataset', 'model'], inplace=True)
df.set_index(['dataset', 'model'], inplace=True)
index = pd.MultiIndex.from_product([df.index.levels[0], df.index.levels[1]])
df_multi_idx = df.reindex(index, fill_value=0)
df_long = df_multi_idx.reset_index()
df = df_long.pivot(index='model', columns='dataset', values='done')
# df = df.style.applymap(lambda val: 'color:black' if val else 'color:red')
df
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="wemFn2cV7yl5" outputId="c9ae9033-1c6b-48d6-a227-fee36e9c1c93"
df_long.sort_values(['model', 'dataset'], inplace=True)
df_long = df_long.reindex(columns=['model', 'dataset', 'done'])
df_long.set_axis(range(len(df_long)), inplace=True)
df_long
# + id="FGMJPoKhZlQ5"
def plot_dynamics(file, ax):
tmp = pd.read_csv(file)[['accuracy', 'val_accuracy']]
tmp.plot(ax=ax, legend=False)
def plot_roc(file, ax):
tmp = pd.read_csv(file)
if 'ovr' in tmp.columns:
for cls in set(tmp['ovr']):
ax.plot((tmp.loc[tmp['ovr']==cls])['fpr'],
(tmp.loc[tmp['ovr']==cls])['tpr'])
else:
ax.plot(tmp['fpr'], tmp['tpr'])
ax.plot([0, 1], [0, 1], color='grey', linestyle='dashed')
def plot_pr(file, ax):
tmp = pd.read_csv(file)
if 'ovr' in tmp.columns:
for cls in set(tmp['ovr']):
ax.plot((tmp.loc[tmp['ovr']==cls])['recall'],
(tmp.loc[tmp['ovr']==cls])['precision'])
else:
ax.plot(tmp['recall'], tmp['precision'])
def plot_results(r, c, file_type, plot_func, xlabel, ylabel, save_to):
fig, axs = plt.subplots(r, c, sharex=True, sharey=True, figsize=(12, 28))
axs = axs.ravel()
for idx, row in df_long.iterrows():
if idx < c:
text = df_multi_idx.index.levels[0][idx]
axs[idx].text(0.5, 1.05, text, transform=axs[idx].transAxes, ha='center')
if (idx+1) % c == 0:
text = df_multi_idx.index.levels[1][(idx+1)//c-1]
axs[idx].text(1.05, 0.5, text, transform=axs[idx].transAxes)
if idx % c == 0:
axs[idx].set_ylabel(ylabel)
for ax in axs[-c:]:
ax.set_xlabel(xlabel)
axs[idx].spines['top'].set_visible(False)
axs[idx].spines['right'].set_visible(False)
model = row['model']
dataset = row['dataset']
file = f'{RESULT_DIR}{model}-{dataset}-{file_type}.csv'
if not os.path.exists(file):
continue
plot_func(file, axs[idx])
if file_type == 'dynamics':
axs[0].legend()
fig.savefig(FIGURE_DIR + save_to, bbox_inches='tight')
plt.close(fig)
# + id="xA7HOH1vHTvl"
r, c = len(df_multi_idx.index.levels[1]), len(df_multi_idx.index.levels[0])
plot_results(r, c, 'dynamics', plot_dynamics, 'epoch', 'accuracy', 'dynamics.png')
plot_results(r, c, 'roc', plot_roc, 'fpr', 'tpr', 'roc.png')
plot_results(r, c, 'pr', plot_pr, 'recall', 'precision', 'pr.png')
# + id="QW3Qcn_Vr6xP" colab={"base_uri": "https://localhost:8080/", "height": 814} outputId="3b4ad828-ea7c-4c74-e1d7-11762e598e5c"
Image(FIGURE_DIR + 'dynamics.png', width=450)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="iPdfe-JNxvZ3" outputId="8f534fc0-c4e9-41bc-e429-ba8c04bb2de3"
accuracy_files = [file for file in os.listdir(RESULT_DIR) if file.endswith('accuracy.csv')]
accuracies = []
for a in accuracy_files:
model, dataset = a.split('-')[:2]
with open(RESULT_DIR + a, 'r') as f:
reader = csv.DictReader(f)
d = next(reader)
for k in d:
d[k] = float(d[k])
d['model'] = model
d['dataset'] = dataset
accuracies.append(d)
acc = pd.DataFrame(accuracies)
acc = acc.reindex(columns=['dataset', 'model', 'accuracy', 'val_accuracy', 'test_accuracy'])
acc.sort_values(['dataset', 'model'], inplace=True)
acc.set_index(['dataset', 'model'], inplace=True)
acc
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="CQtOTBfk8gUz" outputId="b239448b-3c27-4963-86b3-7ab2bd98c54f"
acc_by_dataset = acc['test_accuracy'].groupby(by='dataset')
highest_test_acc = pd.DataFrame({
'model': acc_by_dataset.idxmax().map(lambda idx: idx[1]),
'test_accuracy': acc_by_dataset.max(),
})
highest_test_acc
# + id="U1O72-l0zFW4"
idx = pd.MultiIndex.from_product([acc.index.levels[0], acc.index.levels[1]])
acc_full = acc.reindex(idx)
r = len(acc_full.index.levels[0])
c = len(acc_full.index.levels[1])
fig, axs = plt.subplots(r, 1, sharex=True, sharey=True, figsize=(16,8))
for i0, ax in zip(acc_full.index.levels[0], axs):
acc_full.loc[i0].plot.bar(ax=ax, legend=False)
ax.set_ylabel('accuracy')
ax.text(1.01, 0.5, i0, transform=ax.transAxes)
ax.tick_params('x', rotation=90)
ax.hlines(0.8, -1, c + 1, color='grey', linestyles='dashed')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
axs[0].legend(bbox_to_anchor=(1.1, 1), loc='upper left')
plt.savefig(FIGURE_DIR + 'accuracy.png', bbox_inches='tight')
plt.close(fig)
# + id="8Tn-pL2S21p_" colab={"base_uri": "https://localhost:8080/", "height": 642} outputId="99e32b36-531f-4f59-d0f8-7e72ea0f5c8a"
Image(FIGURE_DIR + 'accuracy.png')
# + id="cWxIIPwL_HG5"
pr_files = [file for file in os.listdir(RESULT_DIR) if file.endswith('pr.csv')]
for pr_file in pr_files:
df = pd.read_csv(RESULT_DIR + pr_file)
if len(df.columns) not in {3, 4}:
print(pr_file, len(df.columns))
# + id="e2ivlHp5-X4w"
| cnn/result.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Za8-Nr5k11fh"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="Eq10uEbw0E4l"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="YHI3vyhv5p85"
# ## **Introduction to Colab and Python**
# + [markdown] colab_type="text" id="OVi775ZJ2bsy"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l01c01_introduction_to_colab_and_python.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l01c01_introduction_to_colab_and_python.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="F8YVA_634OFk"
# Welcome to this Colab where you will get a quick introduction to the Python programming language and the environment used for the course's exercises: Colab.
#
# Colab is a Python development environment that runs in the browser using Google Cloud.
#
# For example, to print "Hello World", just hover the mouse over [ ] and press the play button to the upper left. Or press shift-enter to execute.
# + colab={} colab_type="code" id="bdLopQtuAJi4"
# Never mind this statement, for compatibility reasons
from __future__ import absolute_import, division, print_function, unicode_literals
# + colab={} colab_type="code" id="X9uIpOS2zx7k"
print("Hello World")
# + [markdown] colab_type="text" id="wwJGmDrQ0EoB"
# ## Functions, Conditionals, and Iteration
# Let's create a Python function, and call it from a loop.
# + colab={} colab_type="code" id="pRllo2HLfXiu"
def HelloWorldXY(x, y):
if (x < 10):
print("Hello World, x was < 10")
elif (x < 20):
print("Hello World, x was >= 10 but < 20")
else:
print("Hello World, x was >= 20")
return x + y
for i in range(8, 25, 5): # i=8, 13, 18, 23 (start, stop, step)
print("--- Now running with i: {}".format(i))
r = HelloWorldXY(i,i)
print("Result from HelloWorld: {}".format(r))
# + colab={} colab_type="code" id="lHNmDCh0JpVP"
print(HelloWorldXY(1,2))
# + [markdown] colab_type="text" id="kiZG7uhm8qCF"
# Easy, right?
#
# If you want a loop starting at 0 to 2 (exclusive) you could do any of the following
# + colab={} colab_type="code" id="m8YQN1H41L-Y"
print("Iterate over the items. `range(2)` is like a list [0,1].")
for i in range(2):
print(i)
print("Iterate over an actual list.")
for i in [0,1]:
print(i)
print("While works")
i = 0
while i < 2:
print(i)
i += 1
# + colab={} colab_type="code" id="vIgmFZq4zszl"
print("Python supports standard key words like continue and break")
while True:
print("Entered while")
break
# + [markdown] colab_type="text" id="5QyOUhFw1OUX"
# ## Numpy and lists
# Python has lists built into the language.
# However, we will use a library called numpy for this.
# Numpy gives you lot's of support functions that are useful when doing Machine Learning.
#
# Here, you will also see an import statement. This statement makes the entire numpy package available and we can access those symbols using the abbreviated 'np' syntax.
# + colab={} colab_type="code" id="4Dxk4q-jzEy4"
import numpy as np # Make numpy available using np.
# Create a numpy array, and append an element
a = np.array(["Hello", "World"])
a = np.append(a, "!")
print("Current array: {}".format(a))
print("Printing each element")
for i in a:
print(i)
print("\nPrinting each element and their index")
for i,e in enumerate(a):
print("Index: {}, was: {}".format(i, e))
# + colab={} colab_type="code" id="RTa8_9G3LV03"
print("\nShowing some basic math on arrays")
b = np.array([0,1,4,3,2])
print("Max: {}".format(np.max(b)))
print("Average: {}".format(np.average(b)))
print("Max index: {}".format(np.argmax(b)))
# + colab={} colab_type="code" id="9YaGj5n4LW7P"
print("\nYou can print the type of anything")
print("Type of b: {}, type of b[0]: {}".format(type(b), type(b[0])))
# + colab={} colab_type="code" id="V6ilVhi9LXn_"
print("\nUse numpy to create a [3,3] dimension array with random number")
c = np.random.rand(3, 3)
print(c)
# + colab={} colab_type="code" id="W_Q-DkFCLYGA"
print("\nYou can print the dimensions of arrays")
print("Shape of a: {}".format(a.shape))
print("Shape of b: {}".format(b.shape))
print("Shape of c: {}".format(c.shape))
print("...Observe, Python uses both [0,1,2] and (0,1,2) to specify lists")
# + [markdown] colab_type="text" id="c-Jk4dG91dvD"
# ## Colab Specifics
# + [markdown] colab_type="text" id="G0cGd8sHEmKi"
# Colab is a virtual machine you can access directly. To run commands at the VM's terminal, prefix the line with an exclamation point (!).
#
#
# + colab={} colab_type="code" id="cLkfhyzq0W2y"
print("\nDoing $ls on filesystem")
# !ls -l
# !pwd
# + colab={} colab_type="code" id="gR2WTN1cOZ1n"
print("Install numpy") # Just for test, numpy is actually preinstalled in all Colab instancs
# !pip install numpy
# + [markdown] colab_type="text" id="QuWRpQdatAIU"
# **Exercise**
#
# Create a code cell underneath this text cell and add code to:
#
#
# * List the path of the current directory (pwd)
# * Go to / (cd) and list the content (ls -l)
# + colab={} colab_type="code" id="xU-cJbMCR61P"
# !pwd
# !cd /
# !ls -l
print("Hello")
# + [markdown] colab_type="text" id="7b5jv0ouFREV"
# All usage of Colab in this course is completely free or charge. Even GPU usage is provided free of charge for some hours of usage every day.
#
# **Using GPUs**
# * Many of the exercises in the course executes more quickly by using GPU runtime: Runtime | Change runtime type | Hardware accelerator | GPU
#
# **Some final words on Colab**
# * You execute each cell in order, you can edit & re-execute cells if you want
# * Sometimes, this could have unintended consequences. For example, if you add a dimension to an array and execute the cell multiple times, then the cells after may not work. If you encounter problem reset your environment:
# * Runtime -> Restart runtime... Resets your Python shell
# * Runtime -> Restart all runtimes... Will reset the Colab image, and get you back to a 100% clean environment
# * You can also clear the output in the Colab by doing: Edit -> Clear all outputs
# * Colabs in this course are loaded from GitHub. Save to your Google Drive if you want a copy with your code/output: File -> Save a copy in Drive...
#
# **Learn More**
# * Check out [this](https://www.youtube.com/watch?v=inN8seMm7UI&list=PLQY2H8rRoyvwLbzbnKJ59NkZvQAW9wLbx&index=3) episode of #CodingTensorFlow, and don't forget to subscribe to the YouTube channel ;)
#
| courses/udacity_intro_to_tensorflow_for_deep_learning/l01c01_introduction_to_colab_and_python.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp data.transforms
# -
#export
from fastai2.torch_basics import *
from fastai2.data.core import *
from fastai2.data.load import *
from fastai2.data.external import *
from nbdev.showdoc import *
# # Helper functions for processing data and basic transforms
#
# > Functions for getting, splitting, and labeling data, as well as generic transforms
# ## Get, split, and label
# For most data source creation we need functions to get a list of items, split them in to train/valid sets, and label them. fastai provides functions to make each of these steps easy (especially when combined with `fastai.data.blocks`).
# ### Get
# First we'll look at functions that *get* a list of items (generally file names).
#
# We'll use *tiny MNIST* (a subset of MNIST with just two classes, `7`s and `3`s) for our examples/tests throughout this page.
path = untar_data(URLs.MNIST_TINY)
(path/'train').ls()
# export
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# export
def get_files(path, extensions=None, recurse=True, folders=None):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
# This is the most general way to grab a bunch of file names from disk. If you pass `extensions` (including the `.`) then returned file names are filtered by that list. Only those files directly in `path` are included, unless you pass `recurse`, in which case all child folders are also searched recursively. `folders` is an optional list of directories to limit the search to.
t3 = get_files(path/'train'/'3', extensions='.png', recurse=False)
t7 = get_files(path/'train'/'7', extensions='.png', recurse=False)
t = get_files(path/'train', extensions='.png', recurse=True)
test_eq(len(t), len(t3)+len(t7))
test_eq(len(get_files(path/'train'/'3', extensions='.jpg', recurse=False)),0)
test_eq(len(t), len(get_files(path, extensions='.png', recurse=True, folders='train')))
t
#hide
test_eq(len(get_files(path/'train'/'3', recurse=False)),346)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders=['train', 'test'])),729)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='train')),709)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='training')),0)
# It's often useful to be able to create functions with customized behavior. `fastai.data` generally uses functions named as CamelCase verbs ending in `er` to create these functions. `FileGetter` is a simple example of such a function creator.
#export
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
fpng = FileGetter(extensions='.png', recurse=False)
test_eq(len(t7), len(fpng(path/'train'/'7')))
test_eq(len(t), len(fpng(path/'train', recurse=True)))
fpng_r = FileGetter(extensions='.png', recurse=True)
test_eq(len(t), len(fpng_r(path/'train')))
#export
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
#export
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
# This is simply `get_files` called with a list of standard image extensions.
test_eq(len(t), len(get_image_files(path, recurse=True, folders='train')))
#export
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial function that searches path suffix `suf` and passes along `kwargs`, only in `folders`, if specified."
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
# Same as `FileGetter`, but for image extensions.
test_eq(len(get_files(path/'train', extensions='.png', recurse=True, folders='3')),
len(ImageGetter( 'train', recurse=True, folders='3')(path)))
#export
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
# ### Split
# The next set of functions are used to *split* data into training and validation sets. The functions return two lists - a list of indices or masks for each of training and validation sets.
# export
def RandomSplitter(valid_pct=0.2, seed=None, **kwargs):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o, **kwargs):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(int(i) for i in torch.randperm(len(o)))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
src = list(range(30))
f = RandomSplitter(seed=42)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
#export
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o, **kwargs):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
items = list(range(10))
splitter = IndexSplitter([3,7,9])
test_eq(splitter(items),[[0,1,2,4,5,6,8],[3,7,9]])
# export
def _grandparent_idxs(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
# export
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o, **kwargs):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
fnames = [path/'train/3/9932.png', path/'valid/7/7189.png',
path/'valid/7/7320.png', path/'train/7/9833.png',
path/'train/3/7666.png', path/'valid/3/925.png',
path/'train/7/724.png', path/'valid/3/93055.png']
splitter = GrandparentSplitter()
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
# export
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o, **kwargs):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
splitter = FuncSplitter(lambda o: Path(o).parent.parent.name == 'valid')
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
# export
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o, **kwargs): return IndexSplitter(mask2idxs(mask))(o)
return _inner
items = list(range(6))
splitter = MaskSplitter([True,False,False,True,False,True])
test_eq(splitter(items),[[1,2,4],[0,3,5]])
# export
def FileSplitter(fname):
"Split `items` depending on the value of `mask`."
valid = Path(fname).read().split('\n')
def _func(x): return x.name in valid
def _inner(o, **kwargs): return FuncSplitter(_func)(o)
return _inner
with tempfile.TemporaryDirectory() as d:
fname = Path(d)/'valid.txt'
fname.write('\n'.join([Path(fnames[i]).name for i in [1,3,4]]))
splitter = FileSplitter(fname)
test_eq(splitter(fnames),[[0,2,5,6,7],[1,3,4]])
# export
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o, **kwargs):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = o[col].values
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
df = pd.DataFrame({'a': [0,1,2,3,4], 'b': [True,False,True,True,False]})
splits = ColSplitter('b')(df)
test_eq(splits, [[1,4], [0,2,3]])
# ### Label
# The final set of functions is used to *label* a single item of data.
# export
def parent_label(o, **kwargs):
"Label `item` with the parent folder name."
return Path(o).parent.name
# Note that `parent_label` doesn't have anything customize, so it doesn't return a function - you can just use it directly.
test_eq(parent_label(fnames[0]), '3')
test_eq(parent_label("fastai_dev/dev/data/mnist_tiny/train/3/9932.png"), '3')
[parent_label(o) for o in fnames]
#hide
#test for MS Windows when os.path.sep is '\\' instead of '/'
test_eq(parent_label(os.path.join("fastai_dev","dev","data","mnist_tiny","train", "3", "9932.png") ), '3')
# export
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o, **kwargs):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# `RegexLabeller` is a very flexible function since it handles any regex search of the stringified item. Pass `match=True` to use `re.match` (i.e. check only start of string), or `re.search` otherwise (default).
#
# For instance, here's an example the replicates the previous `parent_label` results.
f = RegexLabeller(fr'{os.path.sep}(\d){os.path.sep}')
test_eq(f(fnames[0]), '3')
[f(o) for o in fnames]
f = RegexLabeller(r'(\d*)', match=True)
test_eq(f(fnames[0].name), '9932')
#export
class ColReader():
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr(self, 'suff,label_delim')
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs): return detuplify(tuple(self._do_one(o, c) for c in self.cols))
# `cols` can be a list of column names or a list of indices (or a mix of both). If `label_delim` is passed, the result is split using it.
# +
df = pd.DataFrame({'a': 'a b c d'.split(), 'b': ['1 2', '0', '', '1 2 3']})
f = ColReader('a', pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], '0a1 0b1 0c1 0d1'.split())
f = ColReader('b', label_delim=' ')
test_eq([f(o) for o in df.itertuples()], [['1', '2'], ['0'], [], ['1', '2', '3']])
df['a1'] = df['a']
f = ColReader(['a', 'a1'], pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], [('0a1', '0a1'), ('0b1', '0b1'), ('0c1', '0c1'), ('0d1', '0d1')])
df = pd.DataFrame({'a': [L(0,1), L(2,3,4), L(5,6,7)]})
f = ColReader('a')
test_eq([f(o) for o in df.itertuples()], [L(0,1), L(2,3,4), L(5,6,7)])
# -
# ## Categorize -
#export
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False):
if is_categorical_dtype(col): items = L(col.cat.categories, use_list=True)
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def __eq__(self,b): return all_equal(b,self)
t = CategoryMap([4,2,3,4])
test_eq(t, [2,3,4])
test_eq(t.o2i, {2:0,3:1,4:2})
test_fail(lambda: t.o2i['unseen label'])
t = CategoryMap([4,2,3,4], add_na=True)
test_eq(t, ['#na#',2,3,4])
test_eq(t.o2i, {'#na#':0,2:1,3:2,4:3})
t = CategoryMap(pd.Series([4,2,3,4]), sort=False)
test_eq(t, [4,2,3])
test_eq(t.o2i, {4:0,2:1,3:2})
col = pd.Series(pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col)
test_eq(t, ['H','M','L'])
test_eq(t.o2i, {'H':0,'M':1,'L':2})
# export
class Categorize(Transform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, add_na=False):
self.add_na = add_na
self.vocab = None if vocab is None else CategoryMap(vocab, add_na=add_na)
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o): return TensorCategory(self.vocab.o2i[o])
def decodes(self, o): return Category (self.vocab [o])
#export
class Category(str, ShowTitle): _show_args = {'label': 'category'}
cat = Categorize()
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['cat', 'dog'])
test_eq(cat('cat'), 0)
test_eq(cat.decode(1), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'cat', 'dog'])
test_eq(cat('cat'), 1)
test_eq(cat.decode(2), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
# ## Multicategorize -
# export
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False):
self.add_na = add_na
self.vocab = None if vocab is None else CategoryMap(vocab, add_na=add_na)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o): return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
#export
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
cat = MultiCategorize()
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], tfms=[cat])
test_eq(tds[3][0], tensor([]))
test_eq(cat.vocab, ['a', 'b', 'c'])
test_eq(cat(['a', 'c']), tensor([0,2]))
test_eq(cat([]), tensor([]))
test_eq(cat.decode([1]), ['b'])
test_eq(cat.decode([0,2]), ['a', 'c'])
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class OneHotEncode(Transform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): self.c = c
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# Works in conjunction with ` MultiCategorize` or on its own if you have one-hot encoded targets (pass a `vocab` for decoding and `do_encode=False` in this case)
_tfm = OneHotEncode(c=3)
test_eq(_tfm([0,2]), tensor([1.,0,1]))
test_eq(_tfm.decode(tensor([0,1,1])), [1,2])
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
#hide
#test with passing the vocab
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(vocab=['a', 'b', 'c']), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab): self.vocab,self.c = vocab,len(vocab)
def encodes(self, o): return TensorCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
_tfm = EncodedMultiCategorize(vocab=['a', 'b', 'c'])
test_eq(_tfm([1,0,1]), tensor([1., 0., 1.]))
test_eq(type(_tfm([1,0,1])), TensorCategory)
test_eq(_tfm.decode(tensor([False, True, True])), ['b','c'])
#export
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
# ## End-to-end dataset example with MNIST
# Let's show how to use those functions to grab the mnist dataset in a `Datasets`. First we grab all the images.
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
# Then we split between train and validation depending on the folder.
splitter = GrandparentSplitter()
splits = splitter(items)
train,valid = (items[i] for i in splits)
train[:3],valid[:3]
# Our inputs are images that we open and convert to tensors, our targets are labeled depending on the parent directory and are categories.
# +
from PIL import Image
def open_img(fn:Path): return Image.open(fn).copy()
def img2tensor(im:Image.Image): return TensorImage(array(im)[None])
tfms = [[open_img, img2tensor],
[parent_label, Categorize()]]
train_ds = Datasets(train, tfms)
# -
x,y = train_ds[3]
xd,yd = decode_at(train_ds,3)
test_eq(parent_label(train[3]),yd)
test_eq(array(Image.open(train[3])),xd[0].numpy())
ax = show_at(train_ds, 3, cmap="Greys", figsize=(1,1))
assert ax.title.get_text() in ('3','7')
test_fig_exists(ax)
# ## ToTensor -
#export
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# ## IntToFloatTensor -
# export
class IntToFloatTensor(Transform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1, split_idx=None, as_item=True):
super().__init__(split_idx=split_idx,as_item=as_item)
self.div,self.div_mask = div,div_mask
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.div_(self.div_mask).long()
def decodes(self, o:TensorImage): return o.clamp(0., 1.) if self.div else o
t = (TensorImage(tensor(1)),tensor(2).long(),TensorMask(tensor(3)))
tfm = IntToFloatTensor(as_item=False)
ft = tfm(t)
test_eq(ft, [1./255, 2, 3])
test_eq(type(ft[0]), TensorImage)
test_eq(type(ft[2]), TensorMask)
test_eq(ft[0].type(),'torch.FloatTensor')
test_eq(ft[1].type(),'torch.LongTensor')
test_eq(ft[2].type(),'torch.LongTensor')
# ## Normalization -
# export
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# export
@docs
class Normalize(Transform):
"Normalize/denorm batch of `TensorImage`"
order=99
def __init__(self, mean=None, std=None, axes=(0,2,3)): self.mean,self.std,self.axes = mean,std,axes
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
mean,std = [0.5]*3,[0.5]*3
mean,std = broadcast_vec(1, 4, mean, std)
batch_tfms = [IntToFloatTensor, Normalize.from_stats(mean,std)]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4, device=default_device())
# +
x,y = tdl.one_batch()
xd,yd = tdl.decode((x,y))
test_eq(x.type(), 'torch.cuda.FloatTensor' if default_device().type=='cuda' else 'torch.FloatTensor')
test_eq(xd.type(), 'torch.FloatTensor')
test_eq(type(x), TensorImage)
test_eq(type(y), TensorCategory)
assert x.mean()<0.0
assert x.std()>0.5
assert 0<xd.mean()/255.<1
assert 0<xd.std()/255.<0.5
# -
#hide
nrm = Normalize()
batch_tfms = [IntToFloatTensor(), nrm]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4)
x,y = tdl.one_batch()
test_close(x.mean(), 0.0, 1e-4)
assert x.std()>0.9, x.std()
#Just for visuals
from fastai2.vision.core import *
tdl.show_batch((x,y))
x,y = torch.add(x,0),torch.add(y,0) #Lose type of tensors (to emulate predictions)
test_ne(type(x), TensorImage)
tdl.show_batch((x,y), figsize=(4,4)) #Check that types are put back by dl.
# +
#TODO: make the above check a proper test
# -
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/05_data.transforms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Video
Video('test_run/pyavi/example_file/video.avi', 'avi')
Video('test_run/pyavi/example_file/video_out.avi.avi', embed=True)
Video('test_run/pycrop/example_file/00000.avi')
import pickle
a = pickle.load(open('/home/ankur/LipSync/syncnet_python/test_run/pywork/example_file/activesd.pckl', 'rb'))
a[0].shape
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Xdl-8Vu0ve0n"
# # I. Gene Expression Features Correlation Analysis
# + id="kvTv9jV2fuGL" executionInfo={"elapsed": 85131, "status": "ok", "timestamp": 1602865811710, "user": {"displayName": "\uc774\ub3d9\ud5cc", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjpQSoXyzmmQmJdtMsQSQQAjBsuq9WySg6K-myXZQ=s64", "userId": "16505489669088483149"}, "user_tz": -540} outputId="757eaa7b-d4b3-4184-c7e9-554581f01d78" colab={"base_uri": "https://localhost:8080/", "height": 34}
## Package
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
## google drive mount
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="CYLFLL3wuzm7"
# Dataset 출처: https://vipdue.com/python-daixie-linear-regression-for-gene-expression-prediction/
# + id="V9NyN2tSD9X-" executionInfo={"elapsed": 1681, "status": "ok", "timestamp": 1602593773562, "user": {"displayName": "\uc774\ub3d9\ud5cc", "photoUrl": "https://lh3.googleusercontent.<KEY>", "userId": "16505489669088483149"}, "user_tz": -540} outputId="9887faa1-a80b-4761-f9ae-434cc4b9aa19" colab={"base_uri": "https://localhost:8080/", "height": 428}
asset_dir = '/content/gdrive/My Drive/Python_Programming/assets/'
column_pic = plt.imread(asset_dir + 'mrna_gene_expression.jpg')
plt.axis('off')
plt.rcParams["figure.figsize"] = (20,10)
plt.imshow(column_pic)
# + [markdown] id="zIV1zn_sEL6-"
# ### Load Dataset
# + id="sBSbq92VkK6S"
data_dir = '/content/gdrive/My Drive/Python_Programming/dataset/mRNA/'
microRNA = pd.read_csv(data_dir + 'microRNAScore-x.csv')
microRNA_well_explain = pd.read_csv(data_dir + 'mRNA-y-well-explained.csv')
microRNA_poor_explain = pd.read_csv(data_dir + 'mRNA-y-poor-explained.csv')
tissueType = pd.read_csv(data_dir + 'cancerType-x-Categorical.csv')
microRNA.head()
# + [markdown] id="1s60K20kEOUw"
# ### Dataset Check
# + id="gC2y4DkokMSb"
microRNA.info()
# + id="NZKH-R7kkNiw"
microRNA_well_explain.head()
# + id="v2-WuZ93kPUW"
microRNA_poor_explain.head()
# + id="VHEenjAVkSEC"
tissueType
# + id="TUBwZA6LkTUD"
tissueType["cancerType"].unique() ## 32가지
# + [markdown] id="3C1pMJ3DFdO8"
# ## 1. Visual Analysis
# + id="oWTXZ-OKEuew" executionInfo={"elapsed": 159517, "status": "ok", "timestamp": 1602865991663, "user": {"displayName": "\uc774\ub3d9\ud5cc", "photoUrl": "<KEY>", "userId": "16505489669088483149"}, "user_tz": -540} outputId="a5c3ff1f-7d0e-48fa-b184-c361c7b43e2d" colab={"background_save": true, "base_uri": "https://localhost:8080/", "height": 1000, "output_embedded_package_id": "1ydHPbe5UShWG9xwoTuiinId0bWdpxLop"}
## Pairplot
sns.pairplot(microRNA[['MIRC1','MIRC2','MIRC3','MIRC4','MIRC5','MIRC6','MIRC7']],hue="MIRC1") ## hue: 변수
# + [markdown] id="BfDftA_OEeHB"
# ## 2. Correlation Analysis
# + id="OWOB-UOVkaaW"
microRNA_process = microRNA.drop('Unnamed: 0',axis=1) # 'Unnamed: 0' column 삭제
plt.figure(figsize=(15,15))
sns.heatmap(data = microRNA_process.corr(method = 'pearson'), annot=True, fmt = '.2f', linewidths=.5, cmap='Blues')
# + [markdown] id="Y6D5b6IkEil_"
# ## 3. Linear Regression
# + id="vOkvBQ-0kcda"
sns.regplot(x='MIRC1', y='MIRC15',data=microRNA_process)
# + id="PtyGS8Aqkd8G"
sns.regplot(x='MIRC10', y='MIRC21',data=microRNA_process)
# + [markdown] id="FQj0A84IvHJa"
# # II. Gene Expression Prediction using Machine Learning Methods (Basic)
# + id="PNw3geldmo7h"
# data splitting
from sklearn.model_selection import train_test_split
# data modeling
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# + [markdown] id="6cHFkyE2CSeV"
# ## 1. Dataset Splitting
# + id="D60WSrclkgSG"
X = microRNA_process
y = tissueType["cancerType"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
print("Number of Train-set: ",len(y_train))
print("Number of Test-set: ",len(y_test))
X_train.head()
# + [markdown] id="OzCjYdktCUGU"
# ## 2. Machine Learning Model Training, Prediction, Evaluation
# + id="zvNcEt1vkiU5"
## (1) Model Selection
LR = LogisticRegression()
svm = SVC()
DT = DecisionTreeClassifier()
RF = RandomForestClassifier()
## (2) Model Training
LR_model = LR.fit(X_train, y_train)
SVM_model = svm.fit(X_train, y_train)
DT_model = DT.fit(X_train, y_train)
RF_model = RF.fit(X_train, y_train)
## (3) Model Prediction
LR_predict = LR_model.predict(X_test)
SVM_predict = SVM_model.predict(X_test)
DT_predict = DT_model.predict(X_test)
RF_predict = RF_model.predict(X_test)
## (4) Model Evaluation
LR_conf_matrix = confusion_matrix(y_test, LR_predict)
LR_acc_score = accuracy_score(y_test, LR_predict)
print("\n***** Logistc Regression *****")
print("confussion matrix")
print(LR_conf_matrix)
print("\n")
print("Accuracy of Logistic Regression:",LR_acc_score*100,'\n')
print(classification_report(y_test,LR_predict))
print("\n***** SVM *****")
SVM_conf_matrix = confusion_matrix(y_test, SVM_predict)
SVM_acc_score = accuracy_score(y_test, SVM_predict)
print("confussion matrix")
print(SVM_conf_matrix)
print("\n")
print("Accuracy of Support Vector Classifier:",SVM_acc_score*100,'\n')
print(classification_report(y_test,SVM_predict))
print("\n***** Decision Tree *****")
DT_conf_matrix = confusion_matrix(y_test, DT_predict)
DT_acc_score = accuracy_score(y_test, DT_predict)
print("confussion matrix")
print(DT_conf_matrix)
print("\n")
print("Accuracy of Decision Tree:",DT_acc_score*100,'\n')
print(classification_report(y_test,DT_predict))
print("\n***** Random Forest *****")
RF_conf_matrix = confusion_matrix(y_test, RF_predict)
RF_acc_score = accuracy_score(y_test, RF_predict)
print("confussion matrix")
print(RF_conf_matrix)
print("\n")
print("Accuracy of Random Forest:",RF_acc_score*100,'\n')
print(classification_report(y_test,RF_predict))
# + [markdown] id="JrHVK6UWvngU"
# # III. Gene Expression Prediction using Machine Learning Methods (Advanced)
# + [markdown] id="TDYt-rOHv-Cj"
# ## 1. Dataset Splitting
# + id="U-bvh0e1kko4"
""" class 분포 확인 """
X = microRNA_process
y = tissueType["cancerType"]
print(y.value_counts())
# + id="wlN7bDRwkmcK"
""" 예: 100개 이하 Class 제외 """
exclude = y[(y=='KICH') | (y=='MESO') | (y=='UVM') | (y=='ACC') | (y=='READ') | (y=='UCS') | (y=='DLBC') | (y=='CHOL') | (y=='GBM')]
X_process = X.drop(exclude.index)
y_process = y.drop(exclude.index)
print(len(y), len(y_process))
# + id="l5edfQ72koch"
""" Class 간의 비율 유지: stratify """
X_train, X_test, y_train, y_test = train_test_split(X_process, y_process, test_size = 0.20, stratify = y_process, random_state = 0)
print("*** Train Dataset ***")
print(y_train.value_counts())
print("\n*** Test Dataset ***")
print('\n',y_test.value_counts())
# + id="_GrI6ALqkrPE"
""" Over Sampling: Synthetic Minority Oversampling Technique (SMOTE) """
from collections import Counter
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=0)
X_train_sm, y_train_sm = sm.fit_sample(X_train, y_train)
print('Resampled dataset shape {}'.format(Counter(y_train_sm)))
# + [markdown] id="qirwXNFGv54F"
# ## 2. Training (Hyperparameter Tuning)
# + id="PYDXcxgrBv1M" executionInfo={"elapsed": 1484, "status": "ok", "timestamp": 1602593763976, "user": {"displayName": "\uc774\ub3d9\ud5cc", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjpQSoXyzmmQmJdtMsQSQQAjBsuq9WySg6K-myXZQ=s64", "userId": "16505489669088483149"}, "user_tz": -540} outputId="40af7c0c-55bc-476c-8016-204e865c5e8b" colab={"base_uri": "https://localhost:8080/", "height": 428}
column_pic = plt.imread(asset_dir + 'cv.png')
plt.axis('off')
plt.rcParams["figure.figsize"] = (15,7)
plt.imshow(column_pic)
# + id="X9QuN2XnBZsG" executionInfo={"elapsed": 488534, "status": "ok", "timestamp": 1602604143030, "user": {"displayName": "\uc774\ub3d9\ud5cc", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjpQSoXyzmmQmJdtMsQSQQAjBsuq9WySg6K-myXZQ=s64", "userId": "16505489669088483149"}, "user_tz": -540} outputId="4f8ac899-fa98-4149-abbd-d1b5c7fb50df" colab={"base_uri": "https://localhost:8080/", "height": 529}
from sklearn.model_selection import cross_val_score
import numpy as np
num_epoch = 10
coarse_hyperparameter_list = []
""" Random Forest """
for epoch in range(num_epoch):
n_estimators = int(np.random.uniform(50, 150))
max_depth = int(np.random.uniform(10, 100))
max_features = np.random.uniform(0.1, 1.0)
RF_model = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
n_jobs=-1,
random_state=0)
score = cross_val_score(RF_model, X_train_sm, y_train_sm, cv=5)
print("epoch = {0}, n_estimators = {1}, max_depth = {2}, max_features = {3}, score = {4:.5f}" \
.format(epoch, n_estimators, max_depth, max_features, score.mean()))
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'max_features': max_features,
'score': score.mean()}
coarse_hyperparameter_list.append(hyperparameter)
coarse_hyperparameter_list = pd.DataFrame.from_dict(coarse_hyperparameter_list)
coarse_hyperparameter_list = coarse_hyperparameter_list.sort_values("score", ascending=False)
coarse_hyperparameter_list.head(10)
# + id="SaVrqUdgqIey"
## (1) Model Selection
RF = RandomForestClassifier(n_estimators = 148, max_depth = 67, max_features = 0.172581)
## (2) Model Training
RF_model = RF.fit(X_train_sm, y_train_sm)
# + [markdown] id="ptaMrV4xCezE"
# ## 3. Prediction and Evaluation
# + id="GtHWz-aKk34Z"
## (3) Model Prediction
RF_predict = RF_model.predict(X_test)
## (4) Model Evaluation
RF_conf_matrix = confusion_matrix(y_test, RF_predict)
RF_acc_score = accuracy_score(y_test, RF_predict)
print("confussion matrix")
print(RF_conf_matrix)
print("\n")
print("Accuracy of Random Forest:",RF_acc_score*100,'\n')
print(classification_report(y_test,RF_predict))
# + [markdown] id="Z6Z0KJ8qRQxS"
# ## 4. Feature Importances
# + id="0qoQ1A8kk506"
importances = RF_model.feature_importances_
std = np.std([tree.feature_importances_ for tree in RF_model.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_test.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the impurity-based feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_test.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_test.shape[1]), indices)
plt.xlim([-1, X_test.shape[1]])
plt.show()
| 4.Gene Expression Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv_pyfolio
# language: python
# name: venv_pyfolio
# ---
import pandas as pd
#pip install git+https://github.com/quantopian/pyfolio
# import again if not work
import pyfolio
import matplotlib
matplotlib.use('Agg')
# %matplotlib inline
def get_daily_return(df):
df['daily_return']=df.account_value.pct_change(1)
#df=df.dropna()
print('Sharpe: ',(252**0.5)*df['daily_return'].mean()/ df['daily_return'].std())
return df
def backtest_strat(df):
strategy_ret= df.copy()
strategy_ret['Date'] = pd.to_datetime(strategy_ret['Date'])
strategy_ret.set_index('Date', drop = False, inplace = True)
strategy_ret.index = strategy_ret.index.tz_localize('UTC')
del strategy_ret['Date']
ts = pd.Series(strategy_ret['daily_return'].values, index=strategy_ret.index)
return ts
def get_account_value(model_name):
df_account_value=pd.DataFrame()
for i in range(rebalance_window+validation_window, len(unique_trade_date)+1,rebalance_window):
temp = pd.read_csv('results/account_value_trade_{}_{}.csv'.format(model_name,i))
df_account_value = df_account_value.append(temp,ignore_index=True)
df_account_value = pd.DataFrame({'account_value':df_account_value['0']})
sharpe=(252**0.5)*df_account_value.account_value.pct_change(1).mean()/df_account_value.account_value.pct_change(1).std()
print(sharpe)
df_account_value=df_account_value.join(df_trade_date[63:].reset_index(drop=True))
return df_account_value
# ## DJIA
dji = pd.read_csv("data/^DJI.csv")
test_dji=dji[(dji['Date']>='2016-01-01') & (dji['Date']<='2020-06-30')]
test_dji = test_dji.reset_index(drop=True)
test_dji.shape
test_dji.head()
test_dji['daily_return']=test_dji['Adj Close'].pct_change(1)
dow_strat = backtest_strat(test_dji)
# ## Ensemble Strategy
df=pd.read_csv('data/dow_30_2009_2020.csv')
rebalance_window = 63
validation_window = 63
unique_trade_date = df[(df.datadate > 20151001)&(df.datadate <= 20200707)].datadate.unique()
df_trade_date = pd.DataFrame({'datadate':unique_trade_date})
ensemble_account_value = get_account_value('ensemble')
ensemble_account_value.account_value.plot()
ensemble_account_value = get_daily_return(ensemble_account_value)
ensemble_account_value['Date'] = test_dji['Date']
ensemble_account_value.head()
ensemble_strat = backtest_strat(ensemble_account_value[0:1097])
# +
#ensemble_account_value.to_csv('results/df_account_value_ensemble_daily_return.csv',index=False)
# -
with pyfolio.plotting.plotting_context(font_scale=1.1):
pyfolio.create_full_tear_sheet(returns = ensemble_strat,
benchmark_rets=dow_strat, set_context=False)
| backtesting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NLP (Natural Language Processing) with Python
#
# **Summery**
#
# * Two class categorization problem
# * Training set : 200 training instances
# * Testing set : 100 test instances
# * Each document is one line of text
# * Fields are seperated by the tab '\t' character
# > CLASS \t TITLE \t DATE \t BODY
#
# * CLASS is either +1 or -1
#
# <br/>
#
# **Objective**
#
# Predict the labels for the 100 test instances.
#
# ## Process
# Importing the NLTK package
import nltk
# +
# Download the stopwords
#nltk.download_shell()
# -
# ### Importing the Data
#
# Data sets needed for the process is included inside the `dataset` directory in the root.
#
# As the summery indicates we have **TSV (Tab Seperated Values)** as the documents.
#
# Instead of parsing TSV manually using Python, I will take advantage of pandas.
# Importing the Pandas package
import pandas as pd
# Parse using read_csv
news = pd.read_csv('dataset/trainset.txt', sep='\t', names=['CLASS', 'TITLE', 'DATE', 'BODY'])
news.head()
# ### Exploratory Data Analysis
news.describe()
# Now we can use **groupby** to describe by *CLASS*, this way we can begin to think about the features that separate **+1** and **-1**
news.groupby('CLASS').describe()
# In the training set we have 98 instances of **-1** class. The remaining 102 instances bear the class of **+1**.
#
# We have two instances of class -1 that does not have a body and another 10 instances of class +1 without a body.
#
# Also, class +1 contains 10 instances where there is a no date specified.
#
# All the class instances contains a title.
#
# > Therefore we can assume TITLE plays a bigger role when it comes to classifying these news articles.
# Now we have to check the if lenght of the body plays a part in the classification.
#
# First lets create a addtional column contaning the body length.
news['BODY LENGTH'] = news['BODY'].apply(len)
news.head()
# ### Data Visualization
# Importing the Visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
news['BODY LENGTH'].plot.hist(bins=50)
news['BODY LENGTH'].plot.hist(bins=150)
# According to the above Histograms we can identify body length usually revolves around 0-1000 area, with exception some of the news Body lengths exceeding 4000 words.
# Overview of the Lengths
news['BODY LENGTH'].describe()
# Now we need to identify whether the BODY LENGTH have a effect on the CLASS classification.
news.hist(column='BODY LENGTH', by='CLASS', bins=60, figsize=(12,4))
# #### Using FacetGrid from the seaborn library to create a grid of 2 histograms of BODY LENGTH based off of the CLASS values.
g = sns.FacetGrid(news,col='CLASS')
g.map(plt.hist,'BODY LENGTH')
# #### Creating a boxplot of BODY LENGTH for each CLASS.
sns.boxplot(x='CLASS', y='BODY LENGTH', data=news, palette='rainbow')
# #### Creating a countplot of the number of occurrences for each type of CLASS.
sns.countplot(x='CLASS',data=news,palette='rainbow')
# As the histograms indicate we cannot distinguish BODY LENGTH having a clear effect on CLASSES -1 and +1.
#
# But we can observe that the CLASS -1 BODY LENGTHS spread closely around 0-1000 mark wheares CLASS +1 BODY LENGTHS more spread out.
# ### Text Pre-processing
# Main issues with the dataset is it consists of text data.
#
# Due to that we need to pre-process them in order to convert **corpus** to a **vector** format.
# Importing String library for remove punctuations
import string
# Importing Regular Expressions
import re
# Importing stop words
from nltk.corpus import stopwords
# Importing Stemming Library
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
# #### Text Processing Fuction
def text_process(mess):
"""
1. Remove punc
2. Remove numbers
3. Remove stop words + 'reuters' (News Network)
4. Stemming
5. Return list of clean text words
"""
text = [char for char in mess if char not in string.punctuation]
text = ''.join(text)
text = re.sub(r'\d+', ' ', text)
text = [word for word in text.split() if word.lower() not in stopwords.words('english')+['reuter']]
return [stemmer.stem(word) for word in text]
# ### Data Pipeline
# Now we need to Vectorize, train and evaluvate model. We can due to this step by step but the best way (easy way) is to create a data pipeline. We will use use SciKit Learn's pipeline capabilities to store a pipeline of workflow. This will allow us to set up all the transformations that we will do to the data for future use.
# We will use **TF-IDF** for the term weighting and normalization.
# #### What is TF-IDF
# TF-IDF stands for *term frequency-inverse document frequency*, and the tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus. Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query.
#
# One of the simplest ranking functions is computed by summing the tf-idf for each query term; many more sophisticated ranking functions are variants of this simple model.
#
# Typically, the tf-idf weight is composed by two terms: the first computes the normalized Term Frequency (TF), aka. the number of times a word appears in a document, divided by the total number of words in that document; the second term is the Inverse Document Frequency (IDF), computed as the logarithm of the number of the documents in the corpus divided by the number of documents where the specific term appears.
#
# **TF: Term Frequency**, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka. the total number of terms in the document) as a way of normalization:
#
# *TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document).*
#
# **IDF: Inverse Document Frequency**, which measures how important a term is. While computing TF, all terms are considered equally important. However it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following:
#
# *IDF(t) = log_e(Total number of documents / Number of documents with term t in it).*
#
# See below for a simple example.
#
# **Example:**
#
# Consider a document containing 100 words wherein the word cat appears 3 times.
#
# The term frequency (i.e., tf) for cat is then (3 / 100) = 0.03. Now, assume we have 10 million documents and the word cat appears in one thousand of these. Then, the inverse document frequency (i.e., idf) is calculated as log(10,000,000 / 1,000) = 4. Thus, the Tf-idf weight is the product of these quantities: 0.03 * 4 = 0.12.
# #### Pipeline Creation Process
# We will split the training data set into two parts as *training* and *test* for modal building and evaluvation.
# Importing train_test_split package
from sklearn.model_selection import train_test_split
news_body_train, news_body_test, class_train, class_test = train_test_split(news['BODY'], news['CLASS'], test_size=0.3)
print(len(news_body_train), len(news_body_test), len(news_body_train) + len(news_body_test))
# Imporing CountVectorizer Package
from sklearn.feature_extraction.text import CountVectorizer
# Importing Tfidf Library
from sklearn.feature_extraction.text import TfidfTransformer
# Importing MultinomialNB
from sklearn.naive_bayes import MultinomialNB
# Importing Pipeline Package
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB())
])
# Now we can directly pass news body data and the pipeline will do our pre-processing for us. We can treat it as a model/estimator API:
pipeline.fit(news_body_train,class_train)
predictions_eval = pipeline.predict(news_body_test)
# #### Lets make a simple evaluvation by comaparing the predictions with real train set values
import numpy as np
np.asarray(class_test.tolist())
predictions_eval
# Now lets create a report
# Import classification report package
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.metrics import accuracy_score
print(confusion_matrix(class_test, predictions_eval))
print('\n')
print(classification_report(class_test, predictions_eval))
print('\n')
print('Accuracy :', accuracy_score(class_test, predictions_eval))
# ### Comparing Models
# Now lets change the MultinomialNB to RandomForrest and generate reports
# Importing RandomForrestClassifier
from sklearn.ensemble import RandomForestClassifier
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)),
('tfidf', TfidfTransformer()),
('classifier', RandomForestClassifier())
])
pipeline.fit(news_body_train,class_train)
predictions_eval = pipeline.predict(news_body_test)
print(confusion_matrix(class_test, predictions_eval))
print('\n')
print(classification_report(class_test, predictions_eval))
print('\n')
print('Accuracy :', accuracy_score(class_test, predictions_eval))
# **Conclusion : *RandomForrestClassifier* offeres better precision than *MultinomialNB* when comes to CLASS +1**
# ### Can TITLE be used for News Classification?
# Here we will try to determine whether TITLE place a role in News classification.
#
# We will use the pipelines with TITLE based test and train sets.
# **Step 1 :** Train Test Split
news_title_train, news_title_test, class_train, class_test = train_test_split(news['TITLE'], news['CLASS'], test_size=0.3)
# **Step 2 :** Determine the pipeline. We will use the MultinomialNB.
#
# **Step 3 :** Train the model.
pipeline.fit(news_title_train,class_train)
# **Step 4 :** Predict
predictions_eval = pipeline.predict(news_title_test)
# **Step 5 :** Generate Reports
print(confusion_matrix(class_test, predictions_eval))
print('\n')
print(classification_report(class_test, predictions_eval))
print('\n')
print('Accuracy :', accuracy_score(class_test, predictions_eval))
# ## Modal Evaluvation
# After couple of runs we get a table like below.
runs = [1, 2, 3, 4]
body_mdf_acc = [0.91, 0.85, 0.95, 0.95]
body_rnf_acc = [0.89, 0.82, 0.86, 0.92]
title_mdf_acc = [0.93, 0.9, 0.93, 0.85]
plt.plot(runs, body_mdf_acc, color='g')
plt.plot(runs, body_rnf_acc, color='orange')
plt.plot(runs, title_mdf_acc, color='blue')
plt.xticks(np.arange(min(runs), max(runs)+1, 1.0))
plt.xlabel('Runs')
plt.ylabel('Accuracy')
plt.title('Model Accuracy by Runs')
plt.show()
# #### Conclusion
#
# According to the graph using BODY content with MultinomialNB will provide better predictions than the others.
#
# Therefore we can predict the test set without labels like below.
# ### Predicting Test Labels
# Parse using read_csv
news_without_labels = pd.read_csv('dataset/testsetwithoutlabels.txt', sep='\t', names=['TITLE', 'DATE', 'BODY'])
news_without_labels.head()
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB())
])
pipeline.fit(news_body_train,class_train)
predictions_final = pipeline.predict(news_without_labels['BODY'])
predictions_final
# #### Witing Predictions to CSV File
result = pd.DataFrame(data={'CLASS': predictions_final, 'TITLE': news_without_labels['TITLE'], 'DATE': news_without_labels['DATE'], 'BODY': news_without_labels['BODY']})
result.to_csv(path_or_buf='Final_Prediction.csv', index = False, header = True)
# ## Overall Conclusion
#
# * In NLP **MultinomialNB** is better than **RandomForrestClassifier**.
#
# * News cannot be classified upon content length.
#
# * **Title** of the News and **Body** of the news are two successful features to classify a news article.
#
# * From the above two features Body of the news is more helpful to correctly predict a category for a upcoming news article.
#
# ### About Class Labels
#
# After a careful analysis we can come into the conclusion of **CLASS -1** being a **Economical** news and **CLASS +1** being a **Non Economical** News.
#
#
# ## Additional Material
#
# Download the full `juputer notebook` using the following link or QR code.
#
# [https://github.com/DevDHera/Guide-to-NLP-with-Python](https://github.com/DevDHera/Guide-to-NLP-with-Python)
#
# <img src="./public/qr/QRcode.png">
| News-Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,10)
# # 1.
# Snake eyes: $$\frac{1}{6} \frac{1}{6} = \frac{1}{36}$$
# Sevens: $$\sum_{z} P_A(z)P_B(x-z)=\sum^{7}\frac{1}{6} \frac{1}{6} =\frac{6}{36} = \frac{1}{6}$$
# Ratio of snake eyes to sevens: $$\frac{\frac{1}{36}}{\frac{1}{6}} = \frac{1}{6}$$
#
# # 2.
# | | 1 | 2 | 3 | 4 | 5 | 6 |
# |---|---|---|---|----|----|----|
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
# | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
# | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
# | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
# | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
# | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
#
#
# The left column contains the number rolled by one die, and the top row contains the number rolled by the other die. The middle of the table has the sum of the two dice.
# $$P_{A+B}(z) = \sum_{z}P_A(z)P_B(x-z)\text{ for } z>x\\
# P_{4} = P_1 P_3 + P_2 P_2 + P_3 P_1
# = \frac{1}{36} + \frac{1}{36}+ \frac{1}{36}
# = \frac{1}{12}$$
n = 2
die_pdf = np.ones(6) * 1/6
sum_prob = np.convolve(die_pdf, die_pdf)
sum_val = np.arange(n,6*n+1)
plt.bar(sum_val, sum_prob)
plt.xlabel('Sum of Dice Roll')
plt.ylabel('Probability')
plt.show()
# # 3.
#
mean = sum(sum_val*sum_prob)
variance = sum((sum_val-mean)**2 * sum_prob)
print(mean, variance)
# # 4.
#
n = 10
sum_prob = die_pdf
for i in range(n-1):
sum_prob = np.convolve(die_pdf, sum_prob)
sum_prob
sum_val = np.arange(n,6*n+1)
plt.step(sum_val/10, sum_prob)
plt.xlabel('Sum of Dice Roll')
plt.ylabel('Probability')
plt.show()
# +
sum_val = np.arange(n,6*n+1)/10
plt.step(sum_val, sum_prob)
plt.semilogy()
plt.xlabel('Sum of Dice Roll')
plt.ylabel('Probability')
plt.show()
# +
sum_val = np.arange(n,6*n+1)/10
plt.bar(sum_val, sum_prob)
plt.semilogy()
plt.xlabel('Sum of Dice Roll')
plt.ylabel('Probability')
plt.show()
# -
# Yes it is Gaussian because when it is plotted with a log y-axis it is in the shape of an upside down parabola. On the step plot it looks like it is not symmetric, however when plotted with a bar plot, it can be seen that the ends are actually symmetric.
# # 5.
gaussian_pdf = []
x = np.linspace(-4, 4, num=50)
for i in range(50):
gaussian_pdf.append(stats.norm.pdf(x[i]))
gaus_conv = np.convolve(gaussian_pdf, gaussian_pdf)
x_1 = np.linspace(-8, 8, num=len(gaus_conv))
plt.step(x_1, gaus_conv)
plt.semilogy()
plt.show()
x_2 = x_1/2
plt.step(x_2, gaus_conv)
plt.semilogy()
plt.show()
# +
mean = sum(x*gaussian_pdf)
variance = sum((x-mean)**2 * gaussian_pdf)
mean_1 = sum(x_1*gaus_conv)
variance_1 = sum((x_1-mean_1)**2 * gaus_conv)
mean_2 = sum(x_2*gaus_conv)
variance_2 = sum((x_2-mean_2)**2 * gaus_conv)
print(mean, mean_1, mean_2)
print(variance, variance_1, variance_2)
# -
# The standard deviation increases when two Gaussians are convolved. Even when the convolution is averaged, the standard deviation is still higher than the original Gaussian's standard deviation. This means that integrating a signal for longer periods of time increases the noise which increases the standard deviation. So for longer periods of time, it is more likely that you will get a signal-like reading from the noise.
| HW2/HW2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <span style="color:orange">Anomaly Detection Tutorial (ANO102) - Level Intermediate</span>
# **Date Updated: Feb 25, 2020**
#
# # Work in progress
# We are currently working on this tutorial. Please check back soon!
#
# ### In the mean time, you can see:
# - __[Anomaly Detection Tutorial (ANO101) - Level Beginner](https://github.com/pycaret/pycaret/blob/master/Tutorials/Anomaly%20Detection%20Tutorial%20Level%20Beginner%20-%20ANO101.ipynb)__
| tutorials/Anomaly Detection Tutorial Level Intermediate - ANO102.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 데이터 불러오기
# +
import pandas as pd
df = pd.read_csv('c:\\til\\covid19_seoul.csv', encoding='euc-kr')
df.head()
# -
df.shape
# # 필요한 데이터로 새로운 테이블 만들기
sample = df[['확진일', '지역']]
sample.head()
sample.tail()
# 1. 월별 확진자
# 2. 지역별 확진자
# 3. 월별/지역별 확진자
# # 오탈자 수정
# - 종랑구 -> 중랑구
# - 한국, 기타 -> 타시도
# +
region = []
for i in range(len(sample['지역'])):
if sample['지역'][i] == '종랑구':
sample['지역'][i] = '중랑구'
if sample['지역'][i] == '한국':
sample['지역'][i] = '타시도'
if sample['지역'][i] == '기타':
sample['지역'][i] = '타시도'
if sample['지역'][i] not in region:
region.append(sample['지역'][i])
region
# -
# - 25개구 + 타시도
len(region)
# - 정렬
region.sort()
print(region)
n = 0
for i in range(len(sample)):
print(sample[:][i])
n += 1
if n > 10:
break
sample.head()
| .ipynb_checkpoints/20201024_data_analysis_with_covid19-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Candy Trade
# This notebook contains all data and code to replicate our candy trade analyses. Every participant of the tutorial received a handful of candy. They then conducted an experiment exploring the impact of candy trading on their candy selection happiness:
#
# 1. **Pre-trade**: Participants were asked to rate the happiness of their candy selection on a scale from 1-10 (trade 0).
#
# 2. **Trade 1**: Participants were then allowed to trade with one participant and rate the happiness with their selection following the trade on a scale from 1-10 (trade 1).
#
# 3. **Trade 2**: Participants were then allowed to trade with the whole group and rate their happiness with their final selection on a scale from 1-10 (trade 2).
# We will start the analyses of our candy trade data by importing the necessary packages.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Our data was collected using a [google form](https://goo.gl/forms/Mxi2fKrOYc9UCB9j1). For each trade, the participants contributed their name (*"participant_name"*), which trade they had just finished (*"trade_number"*), and their current happiness with their candy selection (*"happiness_rating"*).
data = pd.read_csv('../data/data.csv')
data
# ## Happiness rating vs. number of trades
# To explore the trends in individual candy selection happiness over the trades, we created a simple line plot.
# +
fig, ax = plt.subplots()
for key, grp in data.groupby(['participant_name']):
ax = grp.plot(ax=ax, kind='line', x='trade_number', y='happiness_rating', label=key)
plt.legend(loc='best')
plt.xlabel('Number of trades')
plt.ylabel('Happiness Rating')
plt.title('Happiness of individuals with candy selection vs. number of candy trades')
plt.show()
# -
# ## Changes in candy selection happiness of workshop group
# Finally, we explored whether happiness changed significantly as a group over the trades using a boxplot.
data.boxplot('happiness_rating', by='trade_number', figsize=(12, 8))
| candy_trade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Convolutional autoencoder for image denoising
#
# **Author:** [<NAME>](https://twitter.com/svpino)<br>
# **Date created:** 2021/03/01<br>
# **Last modified:** 2021/03/01<br>
# **Description:** How to train a deep convolutional autoencoder for image denoising.
# + [markdown] colab_type="text"
# ## Introduction
#
# This example demonstrates how to implement a deep convolutional autoencoder
# for image denoising, mapping noisy digits images from the MNIST dataset to
# clean digits images. This implementation is based on an original blog post
# titled [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
# by [<NAME>](https://twitter.com/fchollet).
# + [markdown] colab_type="text"
# ## Setup
# + colab_type="code"
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Model
def preprocess(array):
"""
Normalizes the supplied array and reshapes it into the appropriate format.
"""
array = array.astype("float32") / 255.0
array = np.reshape(array, (len(array), 28, 28, 1))
return array
def noise(array):
"""
Adds random noise to each image in the supplied array.
"""
noise_factor = 0.4
noisy_array = array + noise_factor * np.random.normal(
loc=0.0, scale=1.0, size=array.shape
)
return np.clip(noisy_array, 0.0, 1.0)
def display(array1, array2):
"""
Displays ten random images from each one of the supplied arrays.
"""
n = 10
indices = np.random.randint(len(array1), size=n)
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
for i, (image1, image2) in enumerate(zip(images1, images2)):
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + [markdown] colab_type="text"
# ## Prepare the data
# + colab_type="code"
# Since we only need images from the dataset to encode and decode, we
# won't use the labels.
(train_data, _), (test_data, _) = mnist.load_data()
# Normalize and reshape the data
train_data = preprocess(train_data)
test_data = preprocess(test_data)
# Create a copy of the data with added noise
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
# Display the train data and a version of it with added noise
display(train_data, noisy_train_data)
# + [markdown] colab_type="text"
# ## Build the autoencoder
#
# We are going to use the Functional API to build our convolutional autoencoder.
# + colab_type="code"
input = layers.Input(shape=(28, 28, 1))
# Encoder
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x)
# Autoencoder
autoencoder = Model(input, x)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
autoencoder.summary()
# + [markdown] colab_type="text"
# Now we can train our autoencoder using `train_data` as both our input data
# and target. Notice we are setting up the validation data using the same
# format.
# + colab_type="code"
autoencoder.fit(
x=train_data,
y=train_data,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(test_data, test_data),
)
# + [markdown] colab_type="text"
# Let's predict on our test dataset and display the original image together with
# the prediction from our autoencoder.
#
# Notice how the predictions are pretty close to the original images, although
# not quite the same.
# + colab_type="code"
predictions = autoencoder.predict(test_data)
display(test_data, predictions)
# + [markdown] colab_type="text"
# Now that we know that our autoencoder works, let's retrain it using the noisy
# data as our input and the clean data as our target. We want our autoencoder to
# learn how to denoise the images.
# + colab_type="code"
autoencoder.fit(
x=noisy_train_data,
y=train_data,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(noisy_test_data, test_data),
)
# + [markdown] colab_type="text"
# Let's now predict on the noisy data and display the results of our autoencoder.
#
# Notice how the autoencoder does an amazing job at removing the noise from the
# input images.
# + colab_type="code"
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions)
| examples/vision/ipynb/autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Importing the Libraries
# At first, we will load the libraries, that we will use to conduct the analysis.
# +
# Basic imports
import pandas as pd
import numpy as np
# Visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Sentiment analysis
# #!pip install TextBlob
from textblob import TextBlob
# -
# Now, we will load the data. In total, we have 55 files that need to be loaded and merged. We will do this in the next step.
# +
df = pd.read_csv("data/Kickstarter000.csv")
for i in range(1,56):
num = str(1000 + i)
dfi = pd.read_csv(f"data/Kickstarter{num[1:4]}.csv")
df = pd.concat([df,dfi], axis = 0)
df.reset_index()
df.to_csv("data/kickstarter_raw.csv")
# -
# The new data set contains 209222 observations and 37 columns.
df = pd.read_csv("data/kickstarter_raw.csv", index_col=0)
# ### Data Preparation
# At first we need to get a overview of the data to see where data cleaning is needed.
df.info()
# - Since we have some variables of type "object" in the data set, which probably contain relevant information, we will explore them in more detail later.
#
# - As we can see from above the variables **"blurb", "friends", "is_backing", "is_starred", "location", "permissions" and "usd_type"** contain missing values. The variables **"friends", "is_backing", "is_starred" and "permissions"** have more than 90% missings. We will delete them from our dataset and take care of of the other variables with missings later.
#
# - Further variables that can be delete as they do not have any valuable information for our analysis are **"currency_symbol", currency_trailing_code", "photo", "source_url", "urls", state_changed_at", "spotlight", "disable_communication", "is_starrable"**. In the next step, we will delete them together with the variables that have more than 90% missings.
#
# +
# Exclusion of the following columns from our data
df = df.drop(["currency_symbol",
"currency_trailing_code",
"friends",
"is_backing",
"is_starred",
"permissions",
"photo",
"source_url",
"urls",
"state_changed_at",
"spotlight",
"disable_communication",
"is_starrable"], axis=1)
# -
# Further, we can see that the type of the columns **"created_at", "deadline" and "launched_at"** is not correct (integer instead of datetime format). In the next step, we will change that.
#
# In addition, we will extract year, month and weekday out of the variables **"launched_at" and "dealine"** since we will later on explore these in more depth.
# +
# convert unix date time to pandas datetime format
df['created_at'] = pd.to_datetime(df['created_at'],unit='s')
df['deadline'] = pd.to_datetime(df['deadline'],unit='s')
df['launched_at'] = pd.to_datetime(df['launched_at'],unit='s')
# pick out year, month and weekday out of date of launching
df['yr_launched'] = df['launched_at'].dt.year
df['mo_launched'] = df['launched_at'].dt.month
df['wd_launched'] = df['launched_at'].dt.weekday
# pick out year, month and weekday out of date of deadline
df['yr_deadline'] = df['deadline'].dt.year
df['mo_deadline'] = df['deadline'].dt.month
df['wd_deadline'] = df['deadline'].dt.weekday
# -
# For our analysis it could also be interesting to investigate the duration between deadline and launched date in more depth. Thus, we will also calculate the duration between the aformentioned variables.
# +
# calculate duration of founding_phase (time between launching and deadline)
founding_phase = df['deadline'] - df['launched_at']
df['founding_phase'] = founding_phase.dt.days
# calculate duration of phase_before (time between creation and launcing)
phase_before = df['launched_at'] - df['created_at']
df['phase_before'] = phase_before.dt.days
# -
# The original variables **"deadline" and "launched_at"** are no longer needed. We can delete them.
# drop columns created_at, deadline, launched_at
df = df.drop(["created_at", "deadline", "launched_at"], axis=1)
# Now lets have a look on our target variable **"state"**. This variable contains info about the outcome of the project (failed or successful). Before, we encode "state", we will explore this variable in more depth.
# Counts
df.state.value_counts()
# We can see the variable "state" has 5 levels. Overall, most of the observations are labeled as "successful". For our research question only the levels "successful" and "failed" relevant. So, we exclude all other levels.
# +
df = df.query('state in ["successful", "failed"]')
# Prove if it works
df.state.value_counts()
# Shape of df
df.shape
# -
# After dropping rows and colums the data set contains 192664 observations and 29 columns.
#
# At next, we will create dummy variables of the **"state" and "staff_pick"** variables as this makes it easier to work with them.
#
#
# +
# Encoding of the state (target) variable:
df['state'] = df['state'].apply(lambda x: 1 if x == 'successful' else 0)
# 0 = failed
# 1 = sucessful
# -
df['staff_pick'] = df['staff_pick'].apply(lambda x: 1 if x == True else 0)
# Now, let's have a look at the frequency distribution of the individual variables in our data set. For this purpose, we will use histograms.
df.hist(bins=50, figsize = (20,15));
# A first glance on the distribution reveals that:
#
# - Many variables such as **"phase_before"** have outliers. As outliers can strongly affect our analysis we will exculde data from our df later.
# - Some variables are categorical like **"mo_lunched" and "staff_pick"** and other such ad **"founding_phase"** are numeric.
#
# We have to calculate the goal in usd using the **"goal" and "static_usd_rate"**
df["goal_usd"] = df["goal"]*df["static_usd_rate"]
# As outliers can strongly affect our analysis we exclude the highest 1.5% of our data.
# +
# get 98.5% quantile (highest 1.5%) of 'phase_before', 'usd_pledged', 'goal_usd'
q_hi1 = df["phase_before"].quantile(0.985)
q_hi2 = df["usd_pledged"].quantile(0.985)
q_hi3 = df["goal_usd"].quantile(0.985)
# remove highest 1.5% percent of the data with the 98.5% quantiles
df = df[(df["phase_before"] < q_hi1) & (df["usd_pledged"] < q_hi2) & (df["goal_usd"] < q_hi3)]
# -
#
# Now, we come back to our variables that contain missing values.
df.isnull().sum()
# As shown above, the variables **"location" and "usd_type"** contain a relatively small amount of missings. Since we are working with a large data set, deleting these rows should not be a problem.
# Drop rows which contain any NaN value in the selected columns
df = df.dropna( how='any',
subset=['location', 'usd_type', "blurb"])
df.info()
# Now we will explore variables of the type "object".
df[["blurb", "category", "country", "currency", "current_currency", "name", "profile", "slug", "usd_type", "location", "creator"]].head()
# The column **"creator", category" and "location"** contain dictionaries. We would like to extract the content of each column into separate columns. The **"profile"** coulmn contains a dictionary as well but the information provided is not useful for our analysis. Later on, we will drop this column.
df["category_dict"] = df["category"].apply(eval)
df_category = df["category_dict"].apply(pd.Series)
df_category.head()
df_category["main_category"] = df_category["slug"].str.split("/").str[0]
df_category.head()
df["main_category"] = df_category["main_category"]
df["subcategory"] = df_category["name"]
df = df.drop(["category", "category_dict"], axis=1)
# +
df["location"].fillna("{}", inplace = True)
df["location"] = df["location"].str.replace(',"is_root":false', "")
df["location"] = df["location"].str.replace(',"is_root":true', "" )
df["location"] = df["location"].str.replace('null', "{}" )
df["location_dict"] = df["location"].apply(eval)
df_location = df["location_dict"].apply(pd.Series)
df_location.head()
df["city"] = df_location["name"]
df["region"] = df_location["state"]
df["city_type"] = df_location["type"]
# -
df = df.drop(["location", "location_dict"], axis=1)
# +
df["creator_id"] = df["creator"].str[6:22]
df["creator_id"] = df["creator_id"].str.replace('"', "")
df["creator_id"] = df["creator_id"].str.split(",").str[0]
df["creator_id"] = df["creator_id"].astype(int)
df_creator = df[["creator_id","state"]].groupby("creator_id").count()
df_creator.rename(columns = {"state": "creator_num_projects"}, inplace = True)
df_creator.reset_index()
df = df.merge( df_creator, how = "left", on = "creator_id")
# -
df.info()
# The description of the project (**"blurb"**) could be a predictor of successful or failed projects. We will investigate this variable further. First, we will extract the number of words and then perform sentiment analysis.
# Extract number of words
df['words_blurb'] = df['blurb'].apply(lambda x: len(str(x).split()))
# Sentiment analysis
a = TextBlob(df.loc[1000, "blurb"])
print(df.loc[1000, "blurb"])
a.sentiment
df['polarity_blurb'] = df['blurb'].apply(lambda x: TextBlob(str(x)).sentiment[0])
df['subjectivity_blurb'] = df['blurb'].apply(lambda x: TextBlob(str(x)).sentiment[1])
# Finally, we will delete variables that are no longer needed and save the cleaned data.
df = df.drop(["creator",
"blurb",
"static_usd_rate",
"fx_rate",
"currency",
"current_currency",
"pledged",
"goal",
"profile",
"converted_pledged_amount",
"slug"], axis=1)
df.to_csv("data/data_clean.csv")
df = pd.read_csv("data/data_clean.csv", index_col=0)
| 01_data_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
# # Salads
# * ***Main***
df = pd.read_csv('SLD_main.csv')
df1 = pd.read_csv('SLD_main_1.csv')
df2 = pd.read_csv('SLD_main_2.csv')
df3 = pd.read_csv('SLD_main_3.csv')
print df.shape
df.head(3)
df = df.drop('Unnamed: 0', 1)
print df1.shape
df1 = df1.drop('Unnamed: 0', 1)
df1.head(3)
print df2.shape
df2 = df2.drop('Unnamed: 0', 1)
df2.head(3)
print df3.shape
df3 = df3.drop('Unnamed: 0', 1)
df3.head(3)
#concatenate the main tables.
SLD_main= pd.concat([df, df1, df2, df3])
#create a new dataframe with selected columns
SLD_main_reduced = SLD_main.drop(['recipeName', 'sourceDisplayName'], axis = 1)
#peek at dataframe
print SLD_main.shape
SLD_main.head(3)
for i in SLD_main.duplicated('id'):
if i == True:
print i
SLD_main = SLD_main.drop_duplicates('id')
SLD_main.shape
# * ***Flavors***
fdf = pd.read_csv('SLD_flavors.csv')
fdf1 = pd.read_csv('SLD_flavors_1.csv')
fdf2 = pd.read_csv('SLD_flavors_2.csv')
fdf3 = pd.read_csv('SLD_flavors_3.csv')
print fdf.shape
fdf = fdf.drop('Unnamed: 0', 1)
fdf = fdf.rename(columns = {'index':'id'})
fdf.head(3)
print fdf1.shape
fdf1 = fdf1.drop('Unnamed: 0', 1)
fdf1 = fdf1.rename(columns = {'index':'id'})
fdf1.head(3)
print fdf2.shape
fdf2 = fdf2.drop('Unnamed: 0', 1)
fdf2 = fdf2.rename(columns = {'index':'id'})
fdf2.head(3)
print fdf3.shape
fdf3 = fdf3.drop('Unnamed: 0', 1)
fdf3 = fdf3.rename(columns = {'index':'id'})
fdf3.head(3)
#concatenate the flavors tables.
SLD_flavors= pd.concat([fdf, fdf1, fdf2, fdf3])
#peek at dataframe
print SLD_flavors.shape
SLD_flavors.head(3)
for i in SLD_flavors.duplicated('id'):
if i == True:
print i
SLD_flavors = SLD_flavors.drop_duplicates('id')
SLD_flavors.shape
# * ***Cuisine***
cdf = pd.read_csv('SLD_cuisines.csv')
cdf1 = pd.read_csv('SLD_cuisines_1.csv')
cdf2 = pd.read_csv('SLD_cuisines_2.csv')
cdf3 = pd.read_csv('SLD_cuisines_3.csv')
print cdf.shape
cdf = cdf.drop('Unnamed: 0', 1)
cdf = cdf.rename(columns = {'index':'id'})
print cdf.columns
cdf.head(3)
print cdf1.shape
cdf1 = cdf1.drop('Unnamed: 0', 1)
cdf1 = cdf1.rename(columns = {'index':'id'})
print cdf1.columns
cdf1.head(3)
print cdf2.shape
cdf2 = cdf2.drop('Unnamed: 0', 1)
cdf2 = cdf2.rename(columns = {'index':'id'})
print cdf2.columns
cdf2.head(3)
print cdf3.shape
cdf3 = cdf3.drop('Unnamed: 0', 1)
cdf3 = cdf3.rename(columns = {'index':'id'})
print cdf3.columns
cdf3.head(3)
#concatenate the cuisine tables.
SLD_cuisines= pd.concat([cdf, cdf1, cdf2, cdf3])
#peek at dataframe
print SLD_cuisines.shape
SLD_cuisines.head(3)
for i in SLD_cuisines.duplicated('id'):
if i == True:
print i
SLD_cuisines = SLD_cuisines.drop_duplicates('id')
SLD_cuisines.shape
# * ***Details***
ddf = pd.read_csv('SLD_details.csv')
ddf1 = pd.read_csv('SLD_details_1.csv')
ddf2 = pd.read_csv('SLD_details_2.csv')
ddf3 = pd.read_csv('SLD_details_3.csv')
print ddf.shape
ddf = ddf.drop('Unnamed: 0', 1)
print ddf.columns
ddf.head(3)
print ddf1.shape
ddf1 = ddf1.drop('Unnamed: 0', 1)
print ddf1.columns
ddf1.head(3)
print ddf2.shape
ddf2 = ddf2.drop('Unnamed: 0', 1)
print ddf2.columns
ddf2.head(3)
print ddf3.shape
ddf3 = ddf3.drop('Unnamed: 0', 1)
print ddf3.columns
ddf3.head(3)
#concatenate the details tables.
SLD_details= pd.concat([ddf, ddf1, ddf2, ddf3])
#peek at dataframe
print SLD_details.shape
SLD_details.head(3)
for i in SLD_details.duplicated('id'):
if i == True:
print i
SLD_details = SLD_details.drop_duplicates('id')
SLD_details.shape
# * ***Ingredients***
idf = pd.read_csv('SLD_ingredients.csv')
idf1 = pd.read_csv('SLD_ingredients_1.csv')
idf2 = pd.read_csv('SLD_ingredients_2.csv')
idf3 = pd.read_csv('SLD_ingredients_3.csv')
print idf.shape
idf = idf.drop('Unnamed: 0', 1)
print idf.columns
idf.head(3)
print idf1.shape
idf1 = idf1.drop('Unnamed: 0', 1)
print idf1.columns
idf1.head(3)
print idf2.shape
idf2 = idf2.drop('Unnamed: 0', 1)
print idf2.columns
idf2.head(3)
print idf3.shape
idf3 = idf3.drop('Unnamed: 0', 1)
print idf3.columns
idf3.head(3)
#concatenate the ingredients tables.
SLD_ing= pd.concat([idf, idf1, idf2, idf3])
#create a new dataframe with selected columns
SLD_ing_reduced = SLD_ing[['id', 'ingredient_list']]
SLD_ing.head(3)
#drop unnamed column & make id first column
cols = list(SLD_ing)
cols.insert(0, cols.pop(cols.index('id')))
SLD_ing = SLD_ing.ix[:, cols]
SLD_ing.head(3)
for i in SLD_ing.duplicated('id'):
if i == True:
print i
SLD_ing = SLD_ing.drop_duplicates('id')
SLD_ing.shape
# ### Join all tables for Salads
# +
# set index to column 'id'
_df = [SLD_main, SLD_main_reduced, SLD_cuisines, SLD_flavors, SLD_details, SLD_ing, SLD_ing_reduced]
for df in _df:
df.set_index('id', inplace = True)
# -
# join dataframes
SLD_data = SLD_main.join([SLD_cuisines, SLD_flavors, SLD_details, SLD_ing])
SLD_data_reduced = SLD_main_reduced.join([SLD_flavors, SLD_details, SLD_ing_reduced])
# create a course column
SLD_data['course'] = 'salad'
SLD_data_reduced['course'] = 'salad'
SLD_data.head(3)
#Save into a csv file
SLD_data.to_csv('SLD_data.csv')
SLD_data_reduced.to_csv('SLD_data_reduced.csv')
| SALAD/SLD_CONCATENATE_DATAFRAMES.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.8 ('base')
# language: python
# name: python3
# ---
# # OPICS Multiprocessing
#
# OPICS support multiprocessing for faster simulations when working with large circuits.
import multiprocessing as mp
import opics as op
import numpy as np
import pandas as pd
import time
# ## Enabling multiprocessing
#
# ### Option 1: Using `mp_config`
#
# OPICS multiprocessing can be enabled using the `mp_config` argument in `opics.Network` module.
circuit = op.Network(network_id="MRR_arr", mp_config={"enabled": True, "proc_count": 0, "close_pool": True})
# ### Option 2: Using `enable_mp`
# OPICS multiprocessing can also be enabled by calling the `enable_mp` function in `opics.Network` module.
circuit = op.Network(network_id = "MRR_arr")
circuit.enable_mp( process_count = 0,
close_pool = True)
# ## Disable multiprocessing
#
# OPICS multiprocessing can be disabled by calling the `disable_mp` function.
circuit.disable_mp()
# ## Example: Multiple ring resonators coupled to a waveguide
#
# In order to see how multiprocessing can help speed-up simulations, let's create a circuit with `n` number of ring resonators coupled to a waveguide.
#
# <img style="width:50%;height:10%;" src="../_static/_images/micro_ring_array.jpg">
#
# Without multiprocessing
#
# We will be using `bulk_add_component` to add multiple components to the network.
components = op.libraries.ebeam
from opics.network import bulk_add_component
# +
timer_start = time.perf_counter()
circuit = op.Network()
n_rings = 1500
_components_data = []
for count in range(n_rings):
_components_data.append(
{"component": components.DC_halfring,
"params": {"f": circuit.f},
"component_id": f"dc_{count}"})
_components_data.append(
{"component": components.Waveguide,
"params": {"f": circuit.f, "length": np.pi * 5e-6},
"component_id": f"wg_{count}"})
bulk_add_component(circuit, _components_data)
circuit.add_component(components.GC, component_id="input")
circuit.add_component(components.GC, component_id="output")
# bulk connect
prev_comp = ""
for count in range(n_rings):
if count == 0:
circuit.connect("input", 1, f"dc_{count}", 0)
circuit.connect(f"dc_{count}", 1, f"wg_{count}", 0)
circuit.connect(f"wg_{count}", 1, f"dc_{count}", 3)
prev_comp = "dc_0"
elif count >= 1:
circuit.connect(prev_comp, 2, f"dc_{count}", 0)
circuit.connect(f"dc_{count}", 1, f"wg_{count}", 0)
circuit.connect(f"wg_{count}", 1, f"dc_{count}", 3)
prev_comp = f"dc_{count}"
circuit.connect(prev_comp, 2, "output", 1)
circuit.simulate_network()
timer_stop = time.perf_counter()
sim_time = timer_stop - timer_start
print(f"simulation finished in {sim_time} s")
# -
# With multiprocessing enabled
# +
timer_start = time.perf_counter()
circuit = op.Network()
circuit.enable_mp()
n_rings = 1500
_components_data = []
for count in range(n_rings):
_components_data.append(
{"component": components.DC_halfring,
"params": {"f": circuit.f},
"component_id": f"dc_{count}"})
_components_data.append(
{"component": components.Waveguide,
"params": {"f": circuit.f, "length": np.pi * 5e-6},
"component_id": f"wg_{count}"})
bulk_add_component(circuit, _components_data)
circuit.add_component(components.GC, component_id="input")
circuit.add_component(components.GC, component_id="output")
# bulk connect
prev_comp = ""
for count in range(n_rings):
if count == 0:
circuit.connect("input", 1, f"dc_{count}", 0)
circuit.connect(f"dc_{count}", 1, f"wg_{count}", 0)
circuit.connect(f"wg_{count}", 1, f"dc_{count}", 3)
prev_comp = "dc_0"
elif count >= 1:
circuit.connect(prev_comp, 2, f"dc_{count}", 0)
circuit.connect(f"dc_{count}", 1, f"wg_{count}", 0)
circuit.connect(f"wg_{count}", 1, f"dc_{count}", 3)
prev_comp = f"dc_{count}"
circuit.connect(prev_comp, 2, "output", 1)
circuit.simulate_network()
timer_stop = time.perf_counter()
sim_time = timer_stop - timer_start
print(f"simulation finished in {sim_time} s")
# -
# With multiprocessing enabled, we can get speed-ups of upto 30-40%.
# Here are multiprocessing results for different values of `n` on an `AMD Ryzen 9 5900X 12-Core Processor`.
df = pd.read_pickle("../_static/_data/mp_log_data.pkl")
df
# For smaller circuits, there is a `penalty` for creating sub-processes. This is something to keep in mind when using OPICS multiprocessing. However, for large circuits, we observed `speed-ups of up to 45%`.
| docs/source/notebooks/03-Multiprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1. Iterate over a list
import pandas as pd
car_models = list(pd.read_csv("car_models.csv"))
car_models
list_1 = [x for x in car_models]
for i in range(0, len(list_1)):
print(list_1[i])
for i in list_1:
print(i)
# 2. Check whether the integers 25 and -45 are in the list using the in operator
"D150" in list_1
"Mustang" in list_1
| Chapter01/.ipynb_checkpoints/Exercise 1.03-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import json
import os
import matplotlib.pyplot as plt
import seaborn as sb
import matplotlib
from steves_utils.summary_utils import (
get_experiments_from_path
)
from steves_utils.utils_v2 import (
get_experiments_base_path
)
# +
experiments_to_get = [
"oracle.run1.framed-cores_wisig",
"cores_wisig-oracle.run1.framed",
]
experiments = {}
for experiment in experiments_to_get:
print(experiment)
experiments[experiment] = get_experiments_from_path(
os.path.join(get_experiments_base_path(), "tl_2v2", experiment)
)
# +
all_trials = pd.DataFrame(columns=[
"experiment_name",
"source_val_label_accuracy",
"target_val_label_accuracy",
"x_transform",
])
# x_transform_mapping = {
# tuple(["unit_power"]):"Unit Power",
# tuple(["unit_mag"]): "Unit Magnitude",
# tuple([]): "None"
# }
for experiment in experiments_to_get:
for trial in experiments[experiment]:
datasets = trial["parameters"]["datasets"]
x_transforms = set()
for ds in datasets:
trans = str(ds["x_transforms"])
if "unit_mag" in trans: x_transforms.add("unit_mag")
if "unit_power" in trans: x_transforms.add("unit_power")
if "unit_mag" not in trans and "unit_power" not in trans:
x_transforms.add("none")
assert len(x_transforms) == 1
# transforms = list(set( ( tuple() for d in datasets) ))
# assert len(transforms) == 1
f = pd.DataFrame(trial["results"])
f["experiment_name"] = experiment
f["x_transform"] = x_transforms.pop()
# f["x_transform"] = "A"
f = f[all_trials.columns]
f = f.iloc[0] # Unknown why, but pandas is repeating trials for each domain in the trial!
all_trials = all_trials.append(f)
all_trials = all_trials.reset_index(drop=True)
all_trials
# -
m = pd.melt(all_trials,
id_vars=["experiment_name", "x_transform"],
value_vars=[
"source_val_label_accuracy",
"target_val_label_accuracy",
])
m
# +
matplotlib.rcParams.update({'font.size': 22})
plt.style.use('seaborn-whitegrid')
fg = sb.catplot(x='x_transform', col="experiment_name", y='value', hue='variable',
data=m, kind='bar', height=5, aspect=3, col_wrap=2, edgecolor = "black")
fg.set_xlabels('')
plt.figure(figsize=(15,50))
# iterate through axes
for ax in fg.axes.ravel():
ax.tick_params(labelbottom=True)
# add annotations
for c in ax.containers:
labels = [f'{(v.get_height()):.2f}' for v in c]
ax.bar_label(c, labels=labels, label_type='edge')
ax.margins(y=0.2)
# +
# Pick the best trials based on target (on a per experiment+x_transform basis)
idx = all_trials.groupby(["experiment_name", "x_transform"])["target_val_label_accuracy"].transform(max) == all_trials["target_val_label_accuracy"]
best = all_trials[idx]
m = pd.melt(best,
id_vars=["experiment_name", "x_transform"],
value_vars=[
"source_val_label_accuracy",
"target_val_label_accuracy",
])
matplotlib.rcParams.update({'font.size': 22})
plt.style.use('seaborn-whitegrid')
fg = sb.catplot(x='x_transform', col="experiment_name", y='value', hue='variable',
data=m, kind='bar', height=5, aspect=3, col_wrap=2, edgecolor = "black")
fg.set_xlabels('')
plt.figure(figsize=(15,50))
# iterate through axes
for ax in fg.axes.ravel():
ax.tick_params(labelbottom=True)
# add annotations
for c in ax.containers:
labels = [f'{(v.get_height()):.2f}' for v in c]
ax.bar_label(c, labels=labels, label_type='edge')
ax.margins(y=0.2)
# +
# For fun look at the worst trials
idx = all_trials.groupby(["experiment_name", "x_transform"])["target_val_label_accuracy"].transform(min) == all_trials["target_val_label_accuracy"]
worst = all_trials[idx]
m = pd.melt(worst,
id_vars=["experiment_name", "x_transform"],
value_vars=[
"source_val_label_accuracy",
"target_val_label_accuracy",
])
matplotlib.rcParams.update({'font.size': 22})
plt.style.use('seaborn-whitegrid')
fg = sb.catplot(x='x_transform', col="experiment_name", y='value', hue='variable',
data=m, kind='bar', height=5, aspect=3, col_wrap=2, edgecolor = "black")
fg.set_xlabels('')
plt.figure(figsize=(15,50))
# iterate through axes
for ax in fg.axes.ravel():
ax.tick_params(labelbottom=True)
# add annotations
for c in ax.containers:
labels = [f'{(v.get_height()):.2f}' for v in c]
ax.bar_label(c, labels=labels, label_type='edge')
ax.margins(y=0.2)
| analysis/tl_2v2/summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import spacy
nlp = spacy.load("en_core_web_lg", exclude=["ner"])
# +
s = 'Despite it being the "Big Apple", you can\'t find a good apple in NYC. I like them best from Martha\'s Vineyard.'
d = nlp(s)
result = [[token.lemma_.lower() for token in sentence if not token.is_stop and token.pos_ != 'PUNCT'] for sentence in d.sents]
#[[f'{token.lemma_}_{token.pos_}' for token in sentence] for sentence in d.sents]
# -
for sent in result:
print('[', end='')
for token in sent:
print('', token, '|', end='')
print(']')
# +
#nlp.Defaults.stop_words
# -
# ### NMF
# +
documents = ['apple apple', 'peach peach', 'apple peach']
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(analyzer='word',
min_df=2,
max_df=0.95,
sublinear_tf=False)
X = tfidf_vectorizer.fit_transform(documents)
# -
X.toarray()
# +
from sklearn.decomposition import NMF
nmf = NMF(n_components=2)
W = nmf.fit_transform(X.toarray().T)
H = nmf.components_
# -
W
H
# ### word embeddings
# +
import gensim.downloader as api
import time
t = time.time()
model_wiki = api.load('glove-wiki-gigaword-200')
#model_twitter = api.load("glove-twitter-200")
print(f'Time taken: {round((time.time() - t) / 60, 2)} mins')
# +
#model_wiki.most_similar('embedding')
# +
#model_twitter.most_similar('embedding')
# -
model_wiki.__contains__('tram')
# +
import numpy as np
words = [
'apple',
'banana',
'strawberry',
'blueberry',
'peach',
'apricot',
# 'car',
# 'bicycle',
# 'bus',
# 'metro',
# 'tram',
# 'train',
'asimov',
'dostoevsky',
'lermontov',
'nabokov',
'kafka',
'kundera'
]
vectors = np.array([model_wiki[word] for word in words])
# -
vectors.shape
# +
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
# t-SNE
tsne = TSNE(n_components=2, perplexity=5)
tsne_result = tsne.fit_transform(vectors)
# dendrogram
Z = hierarchy.linkage(vectors, method='average', metric='cosine')
# plot
fig, axes = plt.subplots(1, 2, figsize=(7,5))
# t-SNE
axes[0].scatter(tsne_result[:,0], tsne_result[:,1], color='black', marker='+')
for i,(x,y) in enumerate(tsne_result):
axes[0].text(x+5, y+5, words[i], fontsize=12)
axes[0].axis('off')
axes[0].set_title('t-SNE')
# dendrogram
dn = hierarchy.dendrogram(Z,
orientation='left',
labels=words,
ax=axes[1],
link_color_func=lambda k: 'black'
)
axes[1].spines['top'].set_visible(False)
axes[1].spines['right'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].spines['left'].set_visible(False)
axes[1].get_xaxis().set_visible(False)
axes[1].set_title('dendrogram')
fig.tight_layout()
fig.savefig('../figures/visualization-example.png')
# +
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, perplexity=5)
tsne_result = tsne.fit_transform(vectors)
# -
tsne_result.shape
# +
fig, ax = plt.subplots()
ax.scatter(tsne_result[:,0], tsne_result[:,1], color='black', marker='+')
for i,(x,y) in enumerate(tsne_result):
ax.text(x+5, y, words[i])
ax.axis('off')
# -
# ### temp
# +
import pandas as pd
df = pd.read_csv('../data/euroleaks/parsed.csv')
# -
df.head()
| notebooks/literature_review-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zMkw96ZTKbFi" colab_type="text"
# # POETRY GENERATION USING LSTM NETWORKS (CHARACTER-LEVEL MODEL)
# + [markdown] id="Kr-6JffyNzdn" colab_type="text"
# ###POETRY GENERATION IS A TEXT GENERATION PROBLEM THAT DEALS WITH SEQUENTIAL DATA.
# ###Sequential data can be defined as folows:
#
#
#
# ###Any data which has two events occurring in a particular time frame and the occurrence of event A before event B is an entirely different scenario as compared to the occurrence of event A after event B.
#
# ###In traditional ML problems, the order of occurrence of data points is not important. However in sequence prediction problems such as character-by-character text generation, our model has to output the correct character every time otherwise whole sequences of characters preceding it may become meaningless.
#
#
# ###This is what makes text/poetry generators tricky and to solve this problem we will use LSTM (Long Short Term Memory) networks which are a special kind of RNN's. LSTM's are well-suited to the task of capturing and remembering long-term dependencies. They are excellent at capturing context in sequential data and remembering it for long periods of time.
# + [markdown] id="mscbw1nk1wS3" colab_type="text"
# ###In this notebook, we will train an LSTM using Keras on a collection of sonnets by <NAME> (the famous American poet) which includes works like:
# ###*The Road Not Taken*
# ###*Mending Wall*
# ###*Nothing Gold Can Stay*
# ###among others.
# 
# + id="ApU1fkHkSDAl" colab_type="code" outputId="27800e92-605b-4211-8ee9-61e5a28c9c32" executionInfo={"status": "ok", "timestamp": 1548436933482, "user_tz": -300, "elapsed": 1418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-uYlieiRYDZk/AAAAAAAAAAI/AAAAAAAAADg/FSnrBXtYgYE/s64/photo.jpg", "userId": "02091847136436119698"}} colab={"base_uri": "https://localhost:8080/", "height": 84}
from google.colab import drive
drive.mount('/content/drive')
# %cd /content/drive/My Drive/
# %cd cs231n/
# %cd poetry_generator
# + [markdown] id="mVqL2zbp3eLj" colab_type="text"
# ##IMPORTING LIBRARIES
# + id="IYcmL7vjTkb7" colab_type="code" outputId="2ed5841b-7896-41f6-ec4e-f258b8e2e71c" executionInfo={"status": "ok", "timestamp": 1548481802886, "user_tz": -300, "elapsed": 1955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-uYlieiRYDZk/AAAAAAAAAAI/AAAAAAAAADg/FSnrBXtYgYE/s64/photo.jpg", "userId": "02091847136436119698"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import RNN
from keras.utils import np_utils
# + [markdown] id="g5ohtLKd-TY-" colab_type="text"
# ##LOADING DATA
# + id="pf8te9cuTzDx" colab_type="code" colab={}
text = (open("robert_frost.txt").read())
text = text.lower()
# + [markdown] id="6SMWKDNGniau" colab_type="text"
# ##MAPPING CHARACTERS TO NUMBERS
# + id="GazOMt-OT9zl" colab_type="code" colab={}
# since machines understand numbers better than words or characters, here we will map unique character to an arbitrary unique number to speed up training
characters = sorted(list(set(text)))
n_to_char = {n:char for n, char in enumerate(characters)}
char_to_n = {char:n for n, char in enumerate(characters)}
# + [markdown] id="GYvNrSCIoEFd" colab_type="text"
# ## PREPROCESSING OUR DATA
# + id="sF8hqmLtUgBL" colab_type="code" colab={}
X = [] # train array
Y = [] # target array
length = len(text) # total number of characters in text
seq_length = 100 # length of sequence of characters to consider before outputting next char in sequence
# creating sequences of 100 chars each stored in sequence variable
# label stores the next/correct prediction of each sequence or the 101st value in every sequence
for i in range(0, length-seq_length, 1):
sequence = text[i:i + seq_length]
label =text[i + seq_length]
X.append([char_to_n[char] for char in sequence])
Y.append(char_to_n[label])
# + id="eEHbTYL-Vb_z" colab_type="code" colab={}
# LSTM requires training data to be of shape (number_of_sequences, length_of_sequence, number_of_features) hence reshape X
X_modified = np.reshape(X, (len(X), seq_length, 1))
# scale X for faster training
X_modified = X_modified / float(len(characters))
# one-hot encode Y to prevent any logical issues arising from mapping characters to numbers
Y_modified = np_utils.to_categorical(Y)
# + [markdown] id="bw0a9k_vpQUH" colab_type="text"
# ##DEFINING LSTM MODEL
# + id="KllHFlM7VRkI" colab_type="code" colab={}
# Network has 3 layers of 700 neurons each with Dropout set to 20%
model = Sequential()
model.add(LSTM(700, input_shape=(X_modified.shape[1], X_modified.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(700, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(700))
model.add(Dropout(0.2))
model.add(Dense(Y_modified.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# + [markdown] id="NlY9CFYnp2_M" colab_type="text"
# ## TRAIN MODEL
# + id="eX-eY4mqVW1O" colab_type="code" colab={}
# training code -- use pre-trained weights instead as this may take upwards of 12 hours on a Tesla K80 equivalent GPU when epoch = 100
#model.fit(X_modified, Y_modified, epochs=100, batch_size=100)
#model.save_weights('frostweights.h5')
# + [markdown] id="krfjBbqpqBmo" colab_type="text"
# ##LOAD PRE-TRAINED WEIGHTS
# + id="i9R1K_MUWHbk" colab_type="code" colab={}
model.load_weights('weights.h5')
# + [markdown] id="2Q2HpETqqGVj" colab_type="text"
# ##GENERATE TEXT
# + id="mMBOxsqcXI-O" colab_type="code" colab={}
# select random row from X which is sequence of 100 characters
# predict next 100 characters following this row
string_mapped = X[99]
full_string = [n_to_char[value] for value in string_mapped]
# generating total 2000 characters
for i in range(2000):
#reshape input to previous shape and rescale to original scale
x = np.reshape(string_mapped,(1,len(string_mapped), 1))
x = x / float(len(characters))
# select character (number) with max probability
pred_index = np.argmax(model.predict(x, verbose=0))
# convert integers back to characters
seq = [n_to_char[value] for value in string_mapped]
# remove first character from 100 character string array and append newly predicted character
full_string.append(n_to_char[pred_index])
string_mapped.append(pred_index)
string_mapped = string_mapped[1:len(string_mapped)]
# + [markdown] id="oyWZPkE_ruMJ" colab_type="text"
# ## PRINT GENERATED RHYMES (2000 CHARACTERS)
# + id="BNTn8poTdk4q" colab_type="code" outputId="f7e710e3-2104-4ab3-b510-b416d6c0e5d4" executionInfo={"status": "ok", "timestamp": 1548441077698, "user_tz": -300, "elapsed": 457378, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-uYlieiRYDZk/AAAAAAAAAAI/AAAAAAAAADg/FSnrBXtYgYE/s64/photo.jpg", "userId": "02091847136436119698"}} colab={"base_uri": "https://localhost:8080/", "height": 1004}
#combining text
txt=""
for char in full_string:
txt = txt+char
#print generated text
print(txt)
# + [markdown] id="HZXRew2nrzER" colab_type="text"
# ###AS WE CAN SEE, THE ALGORITHM DID START GRASPING THE STRUCTURE OF ROBERT FROST'S POETRY AND MANAGED TO PRODUCE A SOMEWHAT DECENT/COHERENT COLECTION OF RHYMES.
# ###THE REAL PROBLEM WAS THAT WE WERE GPU LIMITED TO ONLY 30 EPOCH'S OVER THE TRAINING SET. THIS IS WHY WE SEE SEVERAL SPELLING MISTAKES IN THE GENERATED TEXT. THE MODEL WAS TRAINED ON A TESLA K80 GPU INSIDE GOOGLE COLAB AND I DID NOT HAVE ENOUGH TIME TO TRAIN IT FOR THE FULL 100 EPOCH'S OVER THE TRAINING DATA.
# ###A MORE POWERFUL GPU WITH MORE TRAINING HOURS WILL RESULT IN VERY ACCURATE, BEAUTIFUL RHYMES.
| poetry_generator/poetry_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demand Learning Comparison
#
# This notebook runs several learning models on the market data created by the other notebooks of this repository.
#
# The aim of this notebook is to provide a reproducible environment for research purposes. The used models are fully optimized as our focus is not on comparing their accuracy down to the last decimal place.
# Hence, we do not claim that the results of this notebook allow to make conclusions about the accuracy, superiority, or short comings of a particular learning technique.
#
# **Process:**
# * we read the data given as CSV files and parse them to create learning/evaluation data structures
# * Note: as the data generation requires some time, you can use a provided data set by using `USE_EXEMPLARY_DATA_SET = True`
# * we train several models (mostly using SciKit) on the given data and evaluate them accordingly
# * we analyse the results and display them in two tables:
# * model evalation: an overview of the runtimes, AIC, and McFadden results
# * market evaluation: an overview of the bias and SMRE for particular market situations
# * we create several visualizations to help the user interpreting the results
# +
import math
import numpy as np
import pandas as pd
# little helper to compare against other implementations
# if True, the test sets used are not sampled but taken from the top
STATIC_TEST_SET = False
# we provide one exemplary data set
# set to True, if use exemplary data set
USE_EXEMPLARY_DATA_SET = True
def load_datasets(setting = 'S3'):
global learning_X, learning_Y, prediction_X, prices, price_probas
dataset_folder = ''
ds_del = '\t'
if USE_EXEMPLARY_DATA_SET:
ds_del = ','
size = 10
datasets = [10] # we only added the smallest data set to the repository
set_pos = datasets.index(size)
dataset_folder = dataset_folder = 'settings/' + str(datasets[set_pos]) + 'k/'
learning_data = np.loadtxt('{}demand_learning_data_{}.csv'.format(dataset_folder, setting), delimiter=ds_del)
prediction_data = np.loadtxt('{}demand_prediction_data_{}.csv'.format(dataset_folder, setting), delimiter=ds_del)
price_data = np.loadtxt('{}PEW_comparison_{}.csv'.format(dataset_folder, setting), delimiter=ds_del)
_, learning_X, learning_Y = np.hsplit(learning_data, [1, learning_data.shape[1]-1])
# as of now, we compare approach that predict wheather
# an item is sold or not.
vpmin_1 = np.vectorize(lambda x: min(1,x))
learning_Y = vpmin_1(learning_Y.ravel())
prediction_X = np.hsplit(prediction_data, [2, prediction_data.shape[1]])[1]
prices, price_probas, price_probas_rest, _ = np.hsplit(price_data, [2, 3, price_data.shape[1]+1])
# +
def calculate_loglikelihood(probas, Y):
# fort the case of p = 0.0, we "cheat" a little bit
return sum([(Y[i]*math.log(max(float("10e-10"),probas[i])) + (1 - min(1, Y[i])) * math.log(max(float("10e-10"),(1 - probas[i])))) for i in range(len(Y))])
def calculate_AIC(probas, Y):
num_features = prediction_X.shape[1]
return -2 * calculate_loglikelihood(probas, Y) + 2 * num_features
def process_prediction(predictions, predictions_are_tuples = True):
if predictions_are_tuples:
ret = [item[1] for item in predictions]
else:
ret = predictions
# Ensuring (0,1) is only needed for Linear (non-logistic) regressions.
# It's rather unclear but sufficient for our tests
return [min(1, max(0, item)) for item in ret]
# -
# The next cell calculates the AIC and Loglikelihood for the null model. We will require both values later to determine the McFadden values.
def calc_null_model(learn_X, learn_Y):
model = linear_model.LogisticRegression()
# regressors return scalar values
test_size = 0.2
logl_factor = test_size / (1 - test_size) # scale factor for initial logl of null model
if STATIC_TEST_SET:
X_train = learning_X[:(round(len(learning_X)*(1-test_size))),:]
X_test = learning_X[(round(len(learning_X)*(1-test_size))):,:]
y_train = learning_Y[:(round(len(learning_Y)*(1-test_size)))]
y_test = learning_Y[(round(len(learning_Y)*(1-test_size))):]
else:
X_train, X_test, y_train, y_test = train_test_split(learn_X, learn_Y, test_size=test_size, random_state=17)
model.fit(np.zeros((np.shape(X_train)[0],9)), y_train)
# validate against training set for AIC calculation
probas = model.predict_proba(X_test)
probas = process_prediction(probas, True)
aic = calculate_AIC(probas, y_test)
logl = calculate_loglikelihood(probas, y_test)
return (aic, logl)
# The next cell contains the main learning method `fit_and_predict`.
# Whether we randomly select the test set or simply by using the last `n%` depends on the variable `STATIC_TEST_SET`.
#
#
# +
import time
from sklearn.model_selection import train_test_split
def fit_and_predict(name, learn_X, learn_Y, test_Y, model, predict_method):
# regressors return scalar values
predictions_are_tuples = True
if predict_method == 'predict':
predictions_are_tuples = False
test_size = 0.2
logl_factor = test_size / (1 - test_size) # scale factor for initial logl of null model
if STATIC_TEST_SET:
X_train = learning_X[:(round(len(learning_X)*(1-test_size))),:]
X_test = learning_X[(round(len(learning_X)*(1-test_size))):,:]
y_train = learning_Y[:(round(len(learning_Y)*(1-test_size)))]
y_test = learning_Y[(round(len(learning_Y)*(1-test_size))):]
else:
X_train, X_test, y_train, y_test = train_test_split(learn_X, learn_Y, test_size=test_size, random_state=17)
start_fit = time.time()
model.fit(X_train, y_train)
runtime_fit = (time.time() - start_fit) * 1000 # ms
# validate against training set for AIC calculation
start_predict = time.time()
probas = getattr(model, predict_method)(X_test)
runtime_predict = (time.time() - start_predict) * 1000 # ms
runtime_predict = runtime_predict / len(X_test)
probas = process_prediction(probas, predictions_are_tuples)
aic = calculate_AIC(probas, y_test)
logl = calculate_loglikelihood(probas, y_test)
logl_factor = 1.0
mcf = 1 - (logl / (logl_0 * logl_factor))
# validate against evaluation set
model.fit(learn_X, learn_Y)
probas = getattr(model, predict_method)(test_Y)
probas = process_prediction(probas, predictions_are_tuples)
# mname = str(model.__class__)
# mname = mname[mname.rfind('.')+1:mname.find("'>")]
return [name, runtime_fit, runtime_predict, aic, logl, mcf, probas]
# +
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
from ggplot import *
# %matplotlib inline
def plot_probability_graphs(p, setting_id):
# converting to "long form"
p_melted = pd.melt(p, id_vars=['Situation', 'Price'], var_name=['Method'])
# filtering for RFs (too bad estimates)
p_melted = p_melted[p_melted.Method != 'Extreme Gradient Boosting - Regressor']
p_melted = p_melted[p_melted.Method != 'Gradient Boosting Trees']
p_melted = p_melted[p_melted.Method != 'Random Forest - Regressor']
p_melted = p_melted[p_melted.Method != 'Random Forest - Classification']
p_melted = p_melted[p_melted.Method != 'Multi-Layer Perceptron - Regressor']
g = sns.FacetGrid(p_melted, col='Situation', col_wrap=4, hue='Method')
g = (g.map(plt.plot, "Price", "value").add_legend().set_ylabels('Probability'))
g.savefig("setting_{}.pdf".format(setting_id))
return g
# -
def calculate_price_point_table(df):
out = pd.DataFrame(columns=['Price', 'Method', 'dist__sum', 'smre', 'rel_dist__sum', 'dist__abs_sum', 'profit_dist__abs_sum'])
for price, v in df.groupby(['Price']):
cols = list(v) # get column names
cols.remove('Price')
cols.remove('Situation')
cols.remove('Actual Probabilities')
for method in cols:
row = [price, method]
row.append((v['Actual Probabilities']-v[method]).mean())
row.append(np.power((np.sqrt(np.absolute(v['Actual Probabilities']-v[method]))).mean(), 2))
row.append(((v['Actual Probabilities']-v[method]) / v['Actual Probabilities']).mean())
row.append((v['Actual Probabilities']-v[method]).abs().mean())
row.append((v['Actual Probabilities']*price-v[method]*price).abs().mean())
out.loc[len(out)]=row
return out
# +
from sklearn import neural_network
from sklearn import linear_model
from sklearn import ensemble
from sklearn import svm
import xgboost as xgb
def learn():
overview = pd.DataFrame(columns=['Model Name', 'Runtime Fitting (ms)', 'Runtime Prediction (ms per item)', 'AIC', 'LogLikelihood', 'McFadden Pseudo R^2'])
probas = pd.DataFrame(prices, columns=['Situation', 'Price'])
models = {'Logistic Regression':
{'short_name': 'LogR',
'model': linear_model.LogisticRegression(),
'predict_method': 'predict_proba'
},
'Linear Regression':
{'short_name': 'LinR',
'model': linear_model.LinearRegression(),
'predict_method': 'predict'
},
'Extreme Gradient Boosting - Classifier':
{'short_name': 'XGB',
'model': xgb.XGBClassifier(),
'predict_method': 'predict_proba'
},
'Extreme Gradient Boosting - Regressor':
{'short_name': 'XGB_Reg',
'model': xgb.XGBRegressor(),
'predict_method': 'predict'
},
'Gradient Boosting Trees':
{'short_name': 'GBT',
'model': ensemble.GradientBoostingRegressor(),
'predict_method': 'predict'
},
'Random Forest - Classification':
{'short_name': 'RFC',
'model': ensemble.RandomForestClassifier(),
'predict_method': 'predict_proba'
},
'Random Forest - Regressor':
{'short_name': 'RFR',
'model': ensemble.RandomForestRegressor(),
'predict_method': 'predict'
},
'Multi-Layer Perceptron - Classifier':
{'short_name': 'MLP',
'model': neural_network.MLPClassifier(),
'predict_method': 'predict_proba'
},
'Multi-Layer Perceptron - Regressor':
{'short_name': 'MLP_Reg',
'model': neural_network.MLPRegressor(),
'predict_method': 'predict'
},
'Support Vector Machine':
{'short_name': 'SVM',
'model': svm.SVC(probability=True),
'predict_method': 'predict_proba'
}
}
# all models are executed by default
for model in models:
models[model]['execute'] = True
# disable particular models for tests (e.g., SVM due to runtime issues)
# models['Gradient Boosting Trees']['execute'] = False
# models['Random Forest - Classification']['execute'] = False
# models['Random Forest - Regressor']['execute'] = False
# models['Multi-Layer Perceptron - Regressor']['execute'] = False
# models['Extreme Gradient Boosting - Regressor']['execute'] = False
# models['Support Vector Machine']['execute'] = False
for k, v in models.items():
if not v['execute']: continue
exec_meth = v['predict_method']
ret = fit_and_predict(k, learning_X, learning_Y, prediction_X, v['model'], exec_meth)
overview.loc[len(overview)]=ret[:-1]
probas[ret[0]] = ret[-1]
probas['Actual Probabilities'] = price_probas
return (overview, probas)
pricing_setting = 'S3'
load_datasets(pricing_setting)
# determine null model
aic_0, logl_0 = calc_null_model(learning_X, learning_Y)
print(f"Null model: \n\tAIC: {aic_0:,}\n\tLoglikelihood: {logl_0:,}")
res = learn()
overview = res[0]
probas = res[1]
display(overview)
# -
res_table = calculate_price_point_table(probas)
# +
# We'll filter for 3 exemplary prices: 4, 8, & 12
filtered = res_table[(res_table.Price % 4 == 0) & (res_table.Price > 0.0)]
display(filtered)
# -
g = plot_probability_graphs(probas, 'S1')
display(g)
# +
import matplotlib
# fixes problems with ACM's PreFlight compiler which complains about type 3 fonts
# see: http://phyletica.org/matplotlib-fonts/
matplotlib.rcParams['text.usetex'] = True
def plot_paper_graph__setting_comparison(situations):
f, axes = plt.subplots(1, len(situations), sharey=True)
i = 0
for k, p in situations.items():
axis = axes[i]
plt.rcParams["figure.figsize"] = (6,3)
cols = {'Price': 'Price',
'Logistic Regression': 'LR',
'Linear Regression': 'LS',
'Extreme Gradient Boosting - Classifier': 'XGB',
'Multi-Layer Perceptron - Classifier': 'MLP',
'Actual Probabilities': 'Monte Carlo'}
p = p.filter(items=cols.keys())
# renaming
p.rename(columns=cols, inplace=True)
for col in list(cols.values())[1:]:
# axx.plot(p.Price, p[[col]], label=col)
pass # disabling to ensure 'nice order of appearance'
# this is a manual fix to ensure that the order to drawing
# is optimized for the reader (i.e., models being far off
# are drawn first as they do less clutter the visual result)
axis.plot(p.Price, p[['LS']], label='LS')
axis.plot(p.Price, p[['XGB']], label='XGB')
axis.plot(p.Price, p[['MLP']], label='MLP')
axis.plot(p.Price, p[['LR']], label='LR')
axis.plot(p.Price, p[['Monte Carlo']], 'k-', label='Monte Carlo', linewidth=0.8)
axis.set_xlabel('Price')
if i == 0:
axis.set_ylabel('Probability') # set on left most graph only
plt.legend()
axis.set_title('Setting {}'.format(k))
i = i + 1
plt.show()
f.subplots_adjust(hspace=0.0)
f.savefig("dm__setting_comparison.pdf", bbox_inches='tight')
# we select to exemplary situations, that nicely show the expected effects
# of the settings and where no model is particularly good or bad.
# For the paper, we selected situation 9 of setting (i) and 4 of (iii)
selected_situations = {}
load_datasets('S1')
res = learn()
probas_filt = res[1]
selected_situations['(i)'] = probas_filt[probas_filt.Situation == 9]
load_datasets('S3')
res = learn()
probas_filt = res[1]
selected_situations['(iii)'] = probas_filt[probas_filt.Situation == 4]
plot_paper_graph__setting_comparison(selected_situations)
| demand_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Forecasting
import pandas as pd
bucket = "sagemaker-course-di"
prefix = "datasets"
filename = "passangers.csv"
data_s3_location = "s3://{}/{}/{}".format(bucket, prefix, filename) # S3 URL
s3_tabular_data = pd.read_csv(data_s3_location, header=0, sep=",", index_col=0)
s3_tabular_data.head()
from matplotlib import pyplot
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(s3_tabular_data, model='mutiplicative', period=4)
pyplot.rcParams.update({'figure.figsize': (20,20)})
result.plot().suptitle('Additive Decompose', fontsize=22)
pyplot.show()
| labs/lab7/forecasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div class="contentcontainer med left" style="margin-left: -50px;">
# <dl class="dl-horizontal">
# <dt>Title</dt> <dd> Text Element</dd>
# <dt>Dependencies</dt> <dd>Matplotlib</dd>
# <dt>Backends</dt> <dd><a href='./Text.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/Text.ipynb'>Bokeh</a></dd>
# </dl>
# </div>
import numpy as np
import holoviews as hv
hv.extension('matplotlib')
# The ``Text`` annotation is used to overlay text at a particular position on a plot:
# +
xs = np.linspace(-5,5,100)
curve = hv.Curve((xs,-(xs-2)**2))
text = hv.Text(0, -30, 'Quadratic Curve')
curve.opts(color='#D3D3D3') * text
# -
# For full documentation and the available style and plot options, use ``hv.help(hv.Text).``
| examples/reference/elements/matplotlib/Text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All the IPython Notebooks in this lecture series by Dr. <NAME> are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)**
# </i></small></small>
# # Python `staticmethod()`
#
# The **`staticmethod()`** built-in function returns a static method for a given function.
#
# **Syntax**:
#
# ```python
# staticmethod(function)
# ```
#
# Using **`staticmethod()`** is considered a un-Pythonic way of creating a static function.
#
# Hence, in newer versions of Python, you can use the **`@staticmethod`** decorator.
#
# The syntax of **`@staticmethod`** is:
#
# **Syntax**:
#
# ```python
# @staticmethod
# def func(args, ...)
# ```
# ## `staticmethod()` Parameters
#
# The **`staticmethod()`** method takes a single parameter:
#
# * **function** - function that needs to be converted to a static method
# ## Return Value from `staticmethod()`
#
# The **`staticmethod()`** returns a static method for a function passed as the parameter.
# ## What is a static method?
#
# Static methods, much like **[class methods](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/012_Python_classmethod%28%29.ipynb)**, are methods that are bound to a class rather than its object.
#
# They do not require a class instance creation. So, they are not dependent on the state of the object.
#
# The difference between a static method and a class method is:
#
# * Static method knows nothing about the class and just deals with the parameters.
# * Class method works with the class since its parameter is always the class itself.
#
# They can be called both by the class and its object.
#
# ```python
# Class.staticmethodFunc()
# or even
# Class().staticmethodFunc()
# ```
# +
# Example 1: Create a static method using staticmethod()
class Mathematics:
def addNumbers(x, y):
return x + y
# create addNumbers static method
Mathematics.addNumbers = staticmethod(Mathematics.addNumbers)
print('The sum is:', Mathematics.addNumbers(5, 10))
# -
# ## When do you use static methods?
#
# ### 1. Grouping utility function to a class
#
# Static methods have a limited use case because, like class methods or any other methods within a class, they cannot access the properties of the class itself.
#
# However, when you need a utility function that doesn't access any properties of a class but makes sense that it belongs to the class, we use static functions.
# +
# Example 2: Create a utility function as a static method
class Dates:
def __init__(self, date):
self.date = date
def getDate(self):
return self.date
@staticmethod
def toDashDate(date):
return date.replace("/", "-")
date = Dates("15-12-2016")
dateFromDB = "15/12/2016"
dateWithDash = Dates.toDashDate(dateFromDB)
if(date.getDate() == dateWithDash):
print("Equal")
else:
print("Unequal")
# -
# Here, we have a **`Dates`** class that only works with dates with dashes. However, in our previous database, all dates were present in slashes.
#
# In order to convert the slash-dates to dash-dates, we have created a utility function **`toDashDate`** within **`Dates`**.
#
# It is a static method because it doesn't need to access any properties of Dates itself and only requires the parameters.
#
# We can also create **`toDashDate`** outside the class, but since it works only for dates, it's logical to keep it inside the **`Dates`** class.
# ### 2. Having a single implementation
#
# Static methods are used when we don't want subclasses of a class change/override a specific implementation of a method.
# +
# Example 3: How inheritance works with static method?
class Dates:
def __init__(self, date):
self.date = date
def getDate(self):
return self.date
@staticmethod
def toDashDate(date):
return date.replace("/", "-")
class DatesWithSlashes(Dates):
def getDate(self):
return Dates.toDashDate(self.date)
date = Dates("15-12-2016")
dateFromDB = DatesWithSlashes("15/12/2016")
if(date.getDate() == dateFromDB.getDate()):
print("Equal")
else:
print("Unequal")
# -
# **Explanation**:
#
# Here, we wouldn't want the subclass **`DatesWithSlashes`** to override the static utility method **`toDashDate`** because it only has a single use, i.e. change date to dash-dates.
#
# We could easily use the static method to our advantage by overriding **`getDate()`** method in the subclass so that it works well with the **`DatesWithSlashes`** class.
| 002_Python_Functions_Built_in/019_Python_staticmethod().ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''tensorflow'': conda)'
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('co2_emission.csv')
df.tail()
df.info()
df.describe()
df['Entity'].value_counts()
df[df['Code'].isnull()]['Entity'].value_counts()
macro_areas = 'Africa,Europe (other),EU-28,Americas (other),Asia and Pacific (other),Middle East,International transport,Statistical differences,World'.split(',')
df.columns = ['Entity', 'Code', 'Year', 'Annual CO2 emissions (tonnes)']
df_emissions_countries = df[df['Entity'].isin(macro_areas) == False]
df_emissions_areas = df[df['Entity'].isin(macro_areas[:7])]
df[df['Entity'] == 'World'].plot.line(x='Year')
sns.lineplot(x='Year', y='Annual CO2 emissions (tonnes)', hue='Entity', data=df_emissions_areas)
plt.tight_layout()
plt.title('Macro-region emissions')
df_Europe1950 = df_emissions_areas[
(df_emissions_areas['Entity'].isin('EU-28,Europe (other)'.split(',')))
& (df_emissions_areas['Year'] > 1950)]
sns.lineplot(x='Year', y='Annual CO2 emissions (tonnes)', hue='Entity', data=df_Europe1950)
plt.title('EU Emissions from 1950')
# +
df_EU27_7 = df_emissions_countries[
(df_emissions_countries['Entity'].isin('Italy Netherlands Poland France Germany Spain Sweden'.split(' ')))
& (df_emissions_countries['Year'] > 1989)]
sns.lineplot(x='Year', y='Annual CO2 emissions (tonnes)', hue='Entity', data=df_EU27_7)
plt.title('Top 7 EU-27 Economies (1990 - 2017)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure(figsize=(10,8))
plt.tight_layout()
# -
def EmissionsReduction(listOfCountries, frame):
for i in listOfCountries:
E = df_EU27_7[(df_EU27_7['Entity'] == i) & (df_EU27_7['Year'] == 2017)]['Annual CO2 emissions (tonnes)'].iloc[0]
e = df_EU27_7[(df_EU27_7['Entity'] == i) & (df_EU27_7['Year'] == 1990)]['Annual CO2 emissions (tonnes)'].iloc[0]
frame[i] = (E - e)/e
frame = {}
EmissionsReduction('Italy Netherlands Poland France Germany Spain Sweden'.split(' '), frame)
frame
largestEconomies = ['China', 'United States', 'India', 'Japan', 'Germany', 'Russia', 'Indonesia', 'Brazil', 'United Kingdom', 'France']
df_emissions_topEconomies = df[
(df['Entity'].isin(largestEconomies))
& (df['Year'] > 1900)]
sns.lineplot(x='Year', y='Annual CO2 emissions (tonnes)', hue='Entity', data=df_emissions_topEconomies)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure(figsize=(10,8))
plt.tight_layout()
df_emissions_topEconomies.groupby('Entity').sum()['Annual CO2 emissions (tonnes)'].sort_values(ascending=False).plot.bar(color='#C87807')
plt.title('Historical Total Emissions (from 1900)')
# +
def Emissions2017(entity):
return df[(df['Entity'] == entity) & (df['Year'] == 2017)]['Annual CO2 emissions (tonnes)'].iloc[0]
gdp_population2017 = {
1 : ['United States', 19485394000000, 325084756, 9525067, Emissions2017('United States')],
2 : ['China', 12237700479375, 1421021791, 9596961, Emissions2017('China')],
3 : ['Japan', 4872415104315, 127502725, 377975, Emissions2017('Japan')],
4 : ['Russia', 1578417211937, 145530082, 17098246, Emissions2017('Russia')],
5 : ['India',2650725335364, 1338676785, 3287263, Emissions2017('India')],
6 : ['Germany', 3693204332230, 82658409, 357114, Emissions2017('Germany')]
}
# -
df_gdp_population = pd.DataFrame.from_dict(gdp_population2017, orient='index', columns = ['Entity', 'GDP nominal 2017', 'Population', 'Total Area (km2)', 'CO2 Emissions 2017'])
df_gdp_population
df_gdp_population['CO2 / GDP'] = df_gdp_population['CO2 Emissions 2017'] /df_gdp_population['GDP nominal 2017']
df_gdp_population['CO2 / Population'] = df_gdp_population['CO2 Emissions 2017'] /df_gdp_population['Population']
df_gdp_population['CO2 / Area'] = df_gdp_population['CO2 Emissions 2017'] /df_gdp_population['Total Area (km2)']
sns.barplot(x='Entity', y='CO2 Emissions 2017', data=df_gdp_population, palette='autumn', order=df_gdp_population.sort_values('CO2 Emissions 2017', ascending=False)['Entity'])
plt.title('CO2 emissions 2017')
plt.tight_layout()
sns.barplot(x='Entity', y='CO2 / GDP', data=df_gdp_population, palette='summer', order=df_gdp_population.sort_values('CO2 / GDP', ascending=False)['Entity'])
plt.title('CO2 / GDP 2017')
plt.tight_layout()
sns.barplot(x='Entity', y='CO2 / Population', data=df_gdp_population, palette='bone', order=df_gdp_population.sort_values('CO2 / Population', ascending=False)['Entity'])
plt.title('CO2 / Population 2017 ')
plt.tight_layout()
sns.barplot(x='Entity', y='CO2 / Area', data=df_gdp_population, palette='copper', order=df_gdp_population.sort_values('CO2 / Area', ascending=False)['Entity'], alpha=.7)
plt.title('CO2 / Area 2017 ')
plt.tight_layout()
G = df_gdp_population['GDP nominal 2017']
P = df_gdp_population['Population']
A = df_gdp_population['Total Area (km2)']
CO2= df_gdp_population['CO2 Emissions 2017']
df_gdp_population['Absolute value'] = CO2 / (G+P+A)
df_gdp_population
sns.barplot(x='Entity', y='Absolute value', data=df_gdp_population, palette='copper', order=df_gdp_population.sort_values('Absolute value', ascending=False)['Entity'])
plt.title('Absolute value')
plt.tight_layout()
| Data_Analysis/C02_emisions/CO2_emission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} tags=[] toc-hr-collapsed=false
# # Probabilistic Grammar Fuzzing
#
# Let us give grammars even more power by assigning _probabilities_ to individual expansions. This allows us to control how many of each element should be produced, and thus allows us to _target_ our generated tests towards specific functionality. We also show how to learn such probabilities from given sample inputs, and specifically direct our tests towards input features that are uncommon in these samples.
# + slideshow={"slide_type": "skip"}
from bookutils import YouTubeVideo
YouTubeVideo('9htOliNwopc')
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# **Prerequisites**
#
# * You should have read the [chapter on grammars](Grammars.ipynb).
# * Our implementation hooks into the grammar-based fuzzer introduced in ["Efficient Grammar Fuzzing"](GrammarFuzzer.ipynb)
# * For learning probabilities from samples, we make use of [parsers](Parser.ipynb).
# + [markdown] slideshow={"slide_type": "skip"}
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.ProbabilisticGrammarFuzzer import <identifier>
# ```
#
# and then make use of the following features.
#
#
# A _probabilistic_ grammar allows to attach individual _probabilities_ to production rules. To set the probability of an individual expansion `S` to the value `X` (between 0 and 1), replace it with a pair
#
# ```python
# (S, opts(prob=X))
# ```
#
# If we want to ensure that 90% of phone numbers generated have an area code starting with `9`, we can write:
#
# ```python
# >>> from Grammars import US_PHONE_GRAMMAR, extend_grammar, opts
# >>> PROBABILISTIC_US_PHONE_GRAMMAR: Grammar = extend_grammar(US_PHONE_GRAMMAR,
# >>> {
# >>> "<lead-digit>": [
# >>> "2", "3", "4", "5", "6", "7", "8",
# >>> ("9", opts(prob=0.9))
# >>> ],
# >>> })
# ```
# A `ProbabilisticGrammarFuzzer` will extract and interpret these options. Here is an example:
#
# ```python
# >>> probabilistic_us_phone_fuzzer = ProbabilisticGrammarFuzzer(PROBABILISTIC_US_PHONE_GRAMMAR)
# >>> [probabilistic_us_phone_fuzzer.fuzz() for i in range(5)]
# ['(918)925-2501',
# '(981)925-0792',
# '(934)995-5029',
# '(955)999-7801',
# '(964)927-0877']
# ```
# As you can see, the large majority of area codes now starts with `9`.
#
# 
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Law of Leading Digits
# + [markdown] slideshow={"slide_type": "fragment"}
# In all our examples so far, you may have noted that inputs generated by a program differ quite a bit from "natural" inputs as they occur in real life. This is true even for innocuous elements such as numbers – yes, the numbers we have generated so far actually _differ_ from numbers in the real world. This is because in real-life sets of numerical data, the _leading significant digit_ is likely to be small: Actually, on average, the leading digit `1` occurs more than _six times_ as often as the leading digit `8` or `9`. It has been shown that this result applies to a wide variety of data sets, including electricity bills, street addresses, stock prices, house prices, population numbers, death rates, lengths of rivers, physical and mathematical constants (Wikipedia).
# + [markdown] slideshow={"slide_type": "subslide"}
# This law of leading digits was first observed by Newcomb \cite{Newcomb1881} and later formalized by Benford in \cite{Benford1938}. Let us take a look at the conditions that determine the first digit of a number. We can easily compute the first digit by converting the number into a string and take the first character:
# + slideshow={"slide_type": "fragment"}
def first_digit_via_string(x: int) -> int:
return ord(repr(x)[0]) - ord('0')
# + slideshow={"slide_type": "fragment"}
first_digit_via_string(2001)
# + [markdown] slideshow={"slide_type": "subslide"}
# To do this mathematically, though, we have to take the fractional part of their logarithm, or formally
#
# $$
# d = 10^{\{\log_{10}(x)\}}
# $$
#
# where $\{x\}$ is the fractional part of $x$ (i.e. $\{1.234\} = 0.234$).
# + slideshow={"slide_type": "skip"}
import math
# + slideshow={"slide_type": "fragment"}
def first_digit_via_log(x: int) -> int:
frac, whole = math.modf(math.log10(x))
return int(10 ** frac)
# + slideshow={"slide_type": "fragment"}
first_digit_via_log(2001)
# + [markdown] slideshow={"slide_type": "subslide"}
# Most sets of "naturally" occurring numbers should not have any bias in the fractional parts of their logarithms, and hence, the fractional part $\{\log_{10}(x)\}$ is typically uniformly distributed. However, the fractional parts for the individual digits are _not_ evenly distributed.
# + [markdown] slideshow={"slide_type": "fragment"}
# For a number to start with a digit $d$, the condition $d < 10^{\{\log_{10}(x)\}} < d + 1$ must hold. To start with the digit 1, the fractional part $\{\log_{10}(x)\}$ must thus be in the range
# + slideshow={"slide_type": "fragment"}
(math.log10(1), math.log10(2))
# + [markdown] slideshow={"slide_type": "fragment"}
# To start with the digit 2, though, it must be in the range
# + slideshow={"slide_type": "fragment"}
(math.log10(2), math.log10(3))
# + [markdown] slideshow={"slide_type": "subslide"}
# which is much smaller. Formally, the probability $P(d)$ for a leading digit $d$ (again, assuming uniformly distributed fractional parts) is known as Benford's law:
# $$
# P(d) = \log_{10}(d + 1) - \log_{10}(d)
# $$
# which gives us:
# + slideshow={"slide_type": "fragment"}
def prob_leading_digit(d: int) -> float:
return math.log10(d + 1) - math.log10(d)
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us compute these probabilities for all digits:
# + slideshow={"slide_type": "subslide"}
digit_probs = [prob_leading_digit(d) for d in range(1, 10)]
[(d, "%.2f" % digit_probs[d - 1]) for d in range(1, 10)]
# + slideshow={"slide_type": "fragment"}
# ignore
import matplotlib.pyplot as plt # type: ignore
# + slideshow={"slide_type": "subslide"}
# ignore
labels = range(1, 10)
fig1, ax1 = plt.subplots()
ax1.pie(digit_probs, labels=labels, shadow=True, autopct='%1.1f%%',
counterclock=False, startangle=90)
ax1.axis('equal');
# + [markdown] slideshow={"slide_type": "fragment"}
# We see that a leading 1 is indeed six times as probable as a leading 9.
# + [markdown] slideshow={"slide_type": "subslide"}
# Benford's law has a number of applications. Most notably, it can be used to detect "non-natural" numbers, i.e. numbers that apparently were created randomly rather than coming from a "natural" source. if you write a scientific paper and fake data by putting in random numbers (for instance, [using our grammar fuzzer](GrammarFuzzer.ipynb) on integers), you will likely violate Benford's law, and this can indeed be spotted. On the other hand, how would we proceed if we _wanted_ to create numbers that adhere to Benford's law? To this end, we need to be able to _encode_ probabilities such as the above in our grammar, such that we can ensure that a leading digit is indeed a `1` in 30% of all cases.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Specifying Probabilities
#
# The goal of this chapter is to assign _probabilities_ to individual expansions in the grammar, such that we can express that some expansion alternatives should be favored over others. This is not only useful to generate "natural"-looking numbers, but even more so to _direct_ test generation towards a specific goal. If you recently have changed some code in your program, you would probably like to generate inputs that exercise precisely this code. By raising the probabilities on the input elements associated with the changed code, you will get more tests that exercise the changed code.
# + [markdown] slideshow={"slide_type": "subslide"}
# Our concept for expressing probabilities is to _annotate_ individual expansions with attributes such as probabilities, using the annotation mechanism introduced in [the chapter on grammars](Grammars.ipynb). To this end, we allow that an expansion cannot only be a string, but also a _pair_ of a string and a set of attributes, as in
#
# ```python
# "<expr>":
# [("<term> + <expr>", opts(prob=0.1)),
# ("<term> - <expr>", opts(prob=0.2)),
# "<term>"]
# ```
#
# Here, the `opts()` function would allow us to express probabilities for choosing the individual expansions. The addition would have a probability of 10%, the subtraction of 20%. The remaining probability (in this case 70%) is equally distributed over the non-attributed expansions (in this case the single last one).
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now use pairs with `opts()` to assign probabilities to our expression grammar:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import bookutils
# + slideshow={"slide_type": "skip"}
from Fuzzer import Fuzzer
# + button=false code_folding=[] new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
from GrammarFuzzer import GrammarFuzzer, all_terminals, display_tree, DerivationTree
# + button=false code_folding=[] new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
from Grammars import is_valid_grammar, EXPR_GRAMMAR, START_SYMBOL, crange
from Grammars import opts, exp_string, exp_opt, set_opts
from Grammars import Grammar, Expansion
# + slideshow={"slide_type": "skip"}
from typing import List, Dict, Set, Optional, cast, Any, Tuple
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
PROBABILISTIC_EXPR_GRAMMAR: Grammar = {
"<start>":
["<expr>"],
"<expr>":
[("<term> + <expr>", opts(prob=0.1)),
("<term> - <expr>", opts(prob=0.2)),
"<term>"],
"<term>":
[("<factor> * <term>", opts(prob=0.1)),
("<factor> / <term>", opts(prob=0.1)),
"<factor>"
],
"<factor>":
["+<factor>", "-<factor>", "(<expr>)",
"<leadinteger>", "<leadinteger>.<integer>"],
"<leadinteger>":
["<leaddigit><integer>", "<leaddigit>"],
# Benford's law: frequency distribution of leading digits
"<leaddigit>":
[("1", opts(prob=0.301)),
("2", opts(prob=0.176)),
("3", opts(prob=0.125)),
("4", opts(prob=0.097)),
("5", opts(prob=0.079)),
("6", opts(prob=0.067)),
("7", opts(prob=0.058)),
("8", opts(prob=0.051)),
("9", opts(prob=0.046)),
],
# Remaining digits are equally distributed
"<integer>":
["<digit><integer>", "<digit>"],
"<digit>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
}
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
assert is_valid_grammar(PROBABILISTIC_EXPR_GRAMMAR, supported_opts={'prob'})
# + [markdown] slideshow={"slide_type": "fragment"}
# This is how the grammar expansions are represented internally:
# + slideshow={"slide_type": "fragment"}
leaddigits: List[Expansion] = PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"]
leaddigits
# + [markdown] slideshow={"slide_type": "subslide"}
# However, we typically access the expansion string and the associated probability via designated helper functions, `exp_string()` (from the [chapter on Grammars](Grammars.ipynb)) and `exp_prob()`:
# + slideshow={"slide_type": "fragment"}
leaddigit_expansion = leaddigits[0]
leaddigit_expansion
# + slideshow={"slide_type": "fragment"}
exp_string(leaddigit_expansion)
# + slideshow={"slide_type": "fragment"}
def exp_prob(expansion: Expansion) -> float:
"""Return the options of an expansion"""
return exp_opt(expansion, 'prob')
# + slideshow={"slide_type": "fragment"}
exp_prob(leaddigit_expansion)
# + [markdown] slideshow={"slide_type": "fragment"}
# Our existing fuzzers are all set up to work with grammars annotated this way. They simply ignore all annotations.
# + slideshow={"slide_type": "subslide"}
f = GrammarFuzzer(PROBABILISTIC_EXPR_GRAMMAR)
f.fuzz()
# + slideshow={"slide_type": "skip"}
from GrammarCoverageFuzzer import GrammarCoverageFuzzer # minor dependency
# + slideshow={"slide_type": "fragment"}
f = GrammarCoverageFuzzer(PROBABILISTIC_EXPR_GRAMMAR)
f.fuzz()
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=true
# ## Computing Probabilities
#
# Let us define functions that access probabilities for given expansions. While doing so, they also check for inconsistencies.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Distributing Probabilities
#
# Here is how we distribute probabilities for expansions without specified probabilities. Given an expansion rule
#
# $$S ::= a_1\:|\: a_2 \:|\: \dots \:|\: a_n \:|\: u_1 \:|\: u_2 \:|\: \dots u_m$$
#
# with $n \ge 0$ alternatives $a_i$ for which the probability $p(a_i)$ is _specified_ and
# $m \ge 0$ alternatives $u_j$ for which the probability $p(u_j)$ is _unspecified_,
# the "remaining" probability is distributed equally over all $u_j$; in other words,
#
# $$p(u_j) = \frac{1 - \sum_{i = 1}^{n}p(a_i)}{m}$$
#
# If no probabilities are specified ($n = 0$), then all expansions have the same probability.
#
# The overall sum of probabilities must be 1:
#
# $$\sum_{i = 1}^{n} p(a_i) + \sum_{j = 1}^{m} p(u_i) = 1$$
#
# We check these properties while distributing probabilities.
# + [markdown] slideshow={"slide_type": "subslide"}
# The function `exp_probabilities()` returns a mapping of all expansions in a rule to their respective probabilities.
# + slideshow={"slide_type": "fragment"}
def exp_probabilities(expansions: List[Expansion],
nonterminal: str ="<symbol>") \
-> Dict[Expansion, float]:
probabilities = [exp_prob(expansion) for expansion in expansions]
prob_dist = prob_distribution(probabilities, nonterminal) # type: ignore
prob_mapping: Dict[Expansion, float] = {}
for i in range(len(expansions)):
expansion = exp_string(expansions[i])
prob_mapping[expansion] = prob_dist[i]
return prob_mapping
# + [markdown] slideshow={"slide_type": "subslide"}
# The gist of `exp_probabilities()` is handled in `prob_distribution()`, which does the actual checking and computation.
# + slideshow={"slide_type": "subslide"}
def prob_distribution(probabilities: List[Optional[float]],
nonterminal: str = "<symbol>"):
epsilon = 0.00001
number_of_unspecified_probabilities = probabilities.count(None)
if number_of_unspecified_probabilities == 0:
sum_probabilities = cast(float, sum(probabilities))
assert abs(sum_probabilities - 1.0) < epsilon, \
nonterminal + ": sum of probabilities must be 1.0"
return probabilities
sum_of_specified_probabilities = 0.0
for p in probabilities:
if p is not None:
sum_of_specified_probabilities += p
assert 0 <= sum_of_specified_probabilities <= 1.0, \
nonterminal + ": sum of specified probabilities must be between 0.0 and 1.0"
default_probability = ((1.0 - sum_of_specified_probabilities)
/ number_of_unspecified_probabilities)
all_probabilities = []
for p in probabilities:
if p is None:
p = default_probability
all_probabilities.append(p)
assert abs(sum(all_probabilities) - 1.0) < epsilon
return all_probabilities
# + [markdown] slideshow={"slide_type": "subslide"}
# Here's the mapping `exp_probabilities()` returns for the annotated `<leaddigit>` element:
# + slideshow={"slide_type": "fragment"}
print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"]))
# + [markdown] slideshow={"slide_type": "fragment"}
# If no expansion is annotated, all expansions have the same likelihood of being selected, as in our previous grammar fuzzers.
# + slideshow={"slide_type": "fragment"}
print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<digit>"]))
# + [markdown] slideshow={"slide_type": "fragment"}
# Here's how `exp_probabilities()` distributes any remaining probability across non-annotated expansions:
# + slideshow={"slide_type": "fragment"}
exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<expr>"])
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Checking Probabilities
# + [markdown] slideshow={"slide_type": "fragment"}
# We can use the checking capabilities of `exp_probabilities()` to check a probabilistic grammar for consistency:
# + slideshow={"slide_type": "fragment"}
def is_valid_probabilistic_grammar(grammar: Grammar,
start_symbol: str = START_SYMBOL) -> bool:
if not is_valid_grammar(grammar, start_symbol):
return False
for nonterminal in grammar:
expansions = grammar[nonterminal]
_ = exp_probabilities(expansions, nonterminal)
return True
# + slideshow={"slide_type": "fragment"}
assert is_valid_probabilistic_grammar(PROBABILISTIC_EXPR_GRAMMAR)
# + slideshow={"slide_type": "fragment"}
assert is_valid_probabilistic_grammar(EXPR_GRAMMAR)
# + slideshow={"slide_type": "skip"}
from ExpectError import ExpectError
# + slideshow={"slide_type": "subslide"}
with ExpectError():
assert is_valid_probabilistic_grammar({"<start>": [("1", opts(prob=0.5))]})
# + slideshow={"slide_type": "subslide"}
with ExpectError():
assert is_valid_probabilistic_grammar(
{"<start>": [("1", opts(prob=1.5)), "2"]})
# + [markdown] slideshow={"slide_type": "slide"}
# ## Expanding by Probability
#
# Now that we have seen how to specify probabilities for a grammar, we can actually implement probabilistic expansion. In our `ProbabilisticGrammarFuzzer`, it suffices to overload one method, namely `choose_node_expansion()`. For each of the children we can choose from (typically all expansions of a symbol), we determine their probability (using `exp_probabilities()` defined above), and make a weighted choice using `random.choices()` with a `weight` argument.
# + slideshow={"slide_type": "skip"}
import random
# + slideshow={"slide_type": "subslide"}
class ProbabilisticGrammarFuzzer(GrammarFuzzer):
"""A grammar-based fuzzer respecting probabilities in grammars."""
def check_grammar(self) -> None:
super().check_grammar()
assert is_valid_probabilistic_grammar(self.grammar)
def supported_opts(self) -> Set[str]:
return super().supported_opts() | {'prob'}
# + slideshow={"slide_type": "subslide"}
class ProbabilisticGrammarFuzzer(ProbabilisticGrammarFuzzer):
def choose_node_expansion(self, node: DerivationTree,
children_alternatives: List[Any]) -> int:
(symbol, tree) = node
expansions = self.grammar[symbol]
probabilities = exp_probabilities(expansions)
weights: List[float] = []
for children in children_alternatives:
expansion = all_terminals((symbol, children))
children_weight = probabilities[expansion]
if self.log:
print(repr(expansion), "p =", children_weight)
weights.append(children_weight)
if sum(weights) == 0:
# No alternative (probably expanding at minimum cost)
return random.choices(
range(len(children_alternatives)))[0]
else:
return random.choices(
range(len(children_alternatives)), weights=weights)[0]
# + [markdown] slideshow={"slide_type": "subslide"}
# Our probabilistic grammar fuzzer works just like the non-probabilistic grammar fuzzer, except that it actually respects probability annotations. Let us generate a couple of "natural" numbers that respect Benford's law:
# + slideshow={"slide_type": "fragment"}
natural_fuzzer = ProbabilisticGrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>")
print([natural_fuzzer.fuzz() for i in range(20)])
# + [markdown] slideshow={"slide_type": "fragment"}
# In contrast, these numbers are pure random:
# + slideshow={"slide_type": "fragment"}
integer_fuzzer = GrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>")
print([integer_fuzzer.fuzz() for i in range(20)])
# + [markdown] slideshow={"slide_type": "subslide"}
# Are the "natural" numbers really more "natural" than the random ones? To show that `ProbabilisticGrammarFuzzer` indeed respects the probabilistic annotations, let us create a specific fuzzer for the lead digit:
# + slideshow={"slide_type": "fragment"}
leaddigit_fuzzer = ProbabilisticGrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leaddigit>")
leaddigit_fuzzer.fuzz()
# + [markdown] slideshow={"slide_type": "fragment"}
# If we generate thousands of lead digits, their distribution should again follow Benford's law:
# + slideshow={"slide_type": "subslide"}
trials = 10000
count = {}
for c in crange('0', '9'):
count[c] = 0
for i in range(trials):
count[leaddigit_fuzzer.fuzz()] += 1
print([(digit, count[digit] / trials) for digit in count])
# + [markdown] slideshow={"slide_type": "fragment"}
# Quod erat demonstrandum! The distribution is pretty much exactly as originally specified. We now have a fuzzer where we can exercise control by specifying probabilities.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Directed Fuzzing
#
# Assigning probabilities to individual expansions gives us great control over which inputs should be generated. By choosing probabilities wisely, we can _direct_ fuzzing towards specific functions and features – for instance, towards functions that are particularly critical, prone to failures, or that have been recently changed.
# + [markdown] slideshow={"slide_type": "fragment"}
# As an example, consider the URL grammar from the [chapter on grammars](Grammars.ipynb). Let us assume we have just made a change to our implementation of the secure FTP protocol. By assigning a higher probability to the `ftps` scheme, we can generate more URLs that will specifically test this functionality.
# + [markdown] slideshow={"slide_type": "fragment"}
# First, let us define a helper function that sets a particular option:
# + [markdown] slideshow={"slide_type": "fragment"}
# Here's a specialization just for probabilities:
# + slideshow={"slide_type": "subslide"}
def set_prob(grammar: Grammar, symbol: str,
expansion: Expansion, prob: Optional[float]) -> None:
"""Set the probability of the given expansion of grammar[symbol]"""
set_opts(grammar, symbol, expansion, opts(prob=prob))
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us use `set_prob()` to give the `ftps` expansion a probability of 80%:
# + slideshow={"slide_type": "skip"}
from Grammars import URL_GRAMMAR, extend_grammar
# + slideshow={"slide_type": "fragment"}
probabilistic_url_grammar = extend_grammar(URL_GRAMMAR)
set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.8)
assert is_valid_probabilistic_grammar(probabilistic_url_grammar)
# + slideshow={"slide_type": "fragment"}
probabilistic_url_grammar["<scheme>"]
# + [markdown] slideshow={"slide_type": "fragment"}
# If we use this grammar for fuzzing, we will get plenty of `ftps:` prefixes:
# + slideshow={"slide_type": "subslide"}
prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar)
for i in range(10):
print(prob_url_fuzzer.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# In a similar vein, we can direct URL generation towards specific hosts or ports; we can favor URLs with queries, fragments, or logins – or URLs without these. All it takes is to set appropriate probabilities.
# + [markdown] slideshow={"slide_type": "fragment"}
# By setting the probability of an expansion to zero, we can effectively disable specific expansions:
# + slideshow={"slide_type": "fragment"}
set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.0)
assert is_valid_probabilistic_grammar(probabilistic_url_grammar)
# + slideshow={"slide_type": "subslide"}
prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar)
for i in range(10):
print(prob_url_fuzzer.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that even if we set the probability of an expansion to zero, we may still see the expansion taken. This can happen during the "closing" phase of [our grammar fuzzer](GrammarFuzzer.ipynb), when the expansion is closed at minimum cost. At this stage, even expansions with "zero" probability will be taken if this is necessary for closing the expansion.
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us illustrate this feature using the `<expr>` rule from our expression grammar:
# + slideshow={"slide_type": "skip"}
from Grammars import EXPR_GRAMMAR
# + slideshow={"slide_type": "fragment"}
probabilistic_expr_grammar = extend_grammar(EXPR_GRAMMAR)
probabilistic_expr_grammar["<expr>"]
# + [markdown] slideshow={"slide_type": "fragment"}
# If we set the probability of the `<term>` expansion to zero, the string should expand again and again.
# + slideshow={"slide_type": "fragment"}
set_prob(probabilistic_expr_grammar, "<expr>", "<term>", 0.0)
assert is_valid_probabilistic_grammar(probabilistic_expr_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# Still, in the "closing" phase, subexpressions will eventually expand into `<term>`, as it is the only way to close the expansion. Tracking `choose_node_expansion()` shows that it is invoked with only one possible expansion `<term>`, which has to be taken even though its specified probability is zero.
# + slideshow={"slide_type": "fragment"}
prob_expr_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_expr_grammar)
prob_expr_fuzzer.fuzz()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Probabilities in Context
#
# While specified probabilities give us a means to control which expansions are taken how often, this control by itself may not be enough. As an example, consider the following grammar for IPv4 addresses:
# + slideshow={"slide_type": "fragment"}
def decrange(start: int, end: int) -> List[Expansion]:
"""Return a list with string representations of numbers in the range [start, end)"""
return [repr(n) for n in range(start, end)]
# + slideshow={"slide_type": "fragment"}
IP_ADDRESS_GRAMMAR: Grammar = {
"<start>": ["<address>"],
"<address>": ["<octet>.<octet>.<octet>.<octet>"],
# ["0", "1", "2", ..., "255"]
"<octet>": decrange(0, 256)
}
# + slideshow={"slide_type": "subslide"}
print(IP_ADDRESS_GRAMMAR["<octet>"][:20])
# + slideshow={"slide_type": "fragment"}
assert is_valid_grammar(IP_ADDRESS_GRAMMAR)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can easily use this grammar to create IP addresses:
# + slideshow={"slide_type": "fragment"}
ip_fuzzer = ProbabilisticGrammarFuzzer(IP_ADDRESS_GRAMMAR)
ip_fuzzer.fuzz()
# + [markdown] slideshow={"slide_type": "fragment"}
# However, if we want to assign a specific probability to one of the four octets, we are out of luck. All we can do is to assign the same probability distribution for all four octets:
# + slideshow={"slide_type": "fragment"}
probabilistic_ip_address_grammar = extend_grammar(IP_ADDRESS_GRAMMAR)
set_prob(probabilistic_ip_address_grammar, "<octet>", "127", 0.8)
# + slideshow={"slide_type": "subslide"}
probabilistic_ip_fuzzer = ProbabilisticGrammarFuzzer(
probabilistic_ip_address_grammar)
probabilistic_ip_fuzzer.fuzz()
# + [markdown] slideshow={"slide_type": "fragment"}
# If we want to assign _different_ probabilities to each of the four octets, what do we do?
# + [markdown] slideshow={"slide_type": "fragment"}
# The answer lies in the concept of _context_, which we already have seen [while discussing coverage-driven fuzzers](GrammarCoverageFuzzer.ipynb). As with coverage-driven fuzzing, the idea is to _duplicate_ the element whose probability we want to set dependent on its context. In our case, this means to duplicate the `<octet>` element to four individual ones, each of which can then get an individual probability distribution. We can do this programmatically, using the `duplicate_context()` method:
# + slideshow={"slide_type": "skip"}
from GrammarCoverageFuzzer import duplicate_context # minor dependency
# + slideshow={"slide_type": "subslide"}
probabilistic_ip_address_grammar = extend_grammar(IP_ADDRESS_GRAMMAR)
duplicate_context(probabilistic_ip_address_grammar, "<address>")
# + slideshow={"slide_type": "fragment"}
probabilistic_ip_address_grammar["<address>"]
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now assign different probabilities to each of the `<octet>` symbols. For instance, we can force specific expansions by setting their probability to 100%:
# + slideshow={"slide_type": "fragment"}
set_prob(probabilistic_ip_address_grammar, "<octet-1>", "127", 1.0)
set_prob(probabilistic_ip_address_grammar, "<octet-2>", "0", 1.0)
# + slideshow={"slide_type": "fragment"}
assert is_valid_probabilistic_grammar(probabilistic_ip_address_grammar)
# + [markdown] slideshow={"slide_type": "fragment"}
# The remaining two octets `<octet-3>` and `<octet-4>` have no specific probabilities set. During fuzzing, all their expansions (all octets) are thus still available:
# + slideshow={"slide_type": "subslide"}
probabilistic_ip_fuzzer = ProbabilisticGrammarFuzzer(
probabilistic_ip_address_grammar)
[probabilistic_ip_fuzzer.fuzz() for i in range(5)]
# + [markdown] slideshow={"slide_type": "fragment"}
# Just as with coverage, we can duplicate grammar rules arbitrarily often to get more and more finer-grained control over probabilities. However, this finer-grained control also comes at the cost of having to maintain these probabilities. In the next section, we will therefore discuss means to assign and tune such probabilities automatically.
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=true
# ## Learning Probabilities from Samples
#
# Probabilities need not be set manually all the time. They can also be _learned_ from other sources, notably by counting _how frequently individual expansions occur in a given set of inputs_. This is useful in a number of situations, including:
#
# 1. Test _common_ features. The idea is that during testing, one may want to focus on frequently occurring (or frequently used) features first, to ensure correct functionality for the most common usages.
# 2. Test _uncommon_ features. Here, the idea is to have test generation focus on features that are rarely seen (or not seen at all) in inputs. This is the same motivation as with [grammar coverage](GrammarCoverageFuzzer.ipynb), but from a probabilistic standpoint.
# 3. Focus on specific _slices_. One may have a set of inputs that is of particular interest (for instance, because they exercise a critical functionality, or recently have discovered bugs). Using this learned distribution for fuzzing allows us to _focus_ on precisely these functionalities of interest.
#
# Let us first introduce counting expansions and learning probabilities, and then detail these scenarios.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Counting Expansions
#
# We start with implementing a means to take a set of inputs and determine the number of expansions in that set. To this end, we need the _parsers_ introduced [in the previous chapter](Parser.ipynb) to transform a string input into a derivation tree. For our IP address grammar, this is how this works:
# + slideshow={"slide_type": "skip"}
from Parser import Parser, EarleyParser
# + slideshow={"slide_type": "fragment"}
IP_ADDRESS_TOKENS = {"<octet>"} # EarleyParser needs explicit tokens
# + slideshow={"slide_type": "fragment"}
parser = EarleyParser(IP_ADDRESS_GRAMMAR)
# + slideshow={"slide_type": "fragment"}
tree, *_ = parser.parse("127.0.0.1")
display_tree(tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# In a tree such as this one, we can now _count_ individual expansions. In the above tree, for instance, we have two expansions of `<octet>` into `0`, one into `1`, and one into `127`. In other words, the expansion `<octet>` into `0` makes up 50% of all expansions seen; the expansions into `127` and `1` make up 25% each, and the other ones 0%. These are the probabilities we'd like to assign to our "learned" grammar.
# + [markdown] slideshow={"slide_type": "fragment"}
# We introduce a class `ExpansionCountMiner` which allows us to count how frequently individual expansions take place. Its initialization method takes a parser (say, an `EarleyParser`) that would be initialized with the appropriate grammar.
# + slideshow={"slide_type": "skip"}
from GrammarCoverageFuzzer import expansion_key # minor dependency
# + slideshow={"slide_type": "skip"}
from Grammars import is_nonterminal
# + slideshow={"slide_type": "subslide"}
class ExpansionCountMiner:
def __init__(self, parser: Parser, log: bool = False) -> None:
assert isinstance(parser, Parser)
self.grammar = extend_grammar(parser.grammar())
self.parser = parser
self.log = log
self.reset()
# + [markdown] slideshow={"slide_type": "fragment"}
# The attribute `expansion_counts` holds the expansions seen; adding a tree with `add_tree()` traverses the given tree and adds all expansions seen.
# + slideshow={"slide_type": "subslide"}
class ExpansionCountMiner(ExpansionCountMiner):
def reset(self) -> None:
self.expansion_counts: Dict[str, int] = {}
def add_coverage(self, symbol: str, children: List[DerivationTree]) -> None:
key = expansion_key(symbol, children)
if self.log:
print("Found", key)
if key not in self.expansion_counts:
self.expansion_counts[key] = 0
self.expansion_counts[key] += 1
def add_tree(self, tree: DerivationTree) -> None:
(symbol, children) = tree
if not is_nonterminal(symbol):
return
assert children is not None
direct_children: List[DerivationTree] = [
(symbol, None) if is_nonterminal(symbol)
else (symbol, []) for symbol, c in children]
self.add_coverage(symbol, direct_children)
for c in children:
self.add_tree(c)
# + [markdown] slideshow={"slide_type": "subslide"}
# The method `count_expansions()` is the one facing the public; it takes a list of inputs, parses them, and processes the resulting trees. The method ` counts()` returns the counts found.
# + slideshow={"slide_type": "fragment"}
class ExpansionCountMiner(ExpansionCountMiner):
def count_expansions(self, inputs: List[str]) -> None:
for inp in inputs:
tree, *_ = self.parser.parse(inp)
self.add_tree(tree)
def counts(self) -> Dict[str, int]:
return self.expansion_counts
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us try this out on our IP address grammar. We create an `ExpansionCountMiner` for our IP address grammar:
# + slideshow={"slide_type": "fragment"}
expansion_count_miner = ExpansionCountMiner(EarleyParser(IP_ADDRESS_GRAMMAR))
# + [markdown] slideshow={"slide_type": "subslide"}
# We parse a (small) set of IP addresses and count the expansions occurring:
# + slideshow={"slide_type": "fragment"}
expansion_count_miner.count_expansions(["127.0.0.1", "1.2.3.4"])
expansion_count_miner.counts()
# + [markdown] slideshow={"slide_type": "fragment"}
# You see that we have one expansion into `127`, and two into `0`. These are the counts we can use to assign probabilities.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Assigning Probabilities
#
# The distribution of counts, as determined by `ExpansionCountMiner` is what we can use to assign probabilities to our grammar. To this end, we introduce a subclass `ProbabilisticGrammarMiner` whose method `set_expansion_probabilities()` processes all expansions of a given symbol, checks whether it occurs in a given count distribution, and assigns probabilities using the following formula.
# + [markdown] slideshow={"slide_type": "subslide"}
# Given a set $T$ of derivation trees (as mined from samples), we determine the probabilities $p_i$ for each alternative $a_i$ of a symbol $S \rightarrow a_1 | \dots | a_n$ as
#
# $$p_i = \frac{\text{Expansions of $S \rightarrow a_i$ in $T$}}{\text{Expansions of $S$ in $T$}}$$
#
# Should $S$ not occur at all in $T$, then $p_i$ is _unspecified_.
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is the implementation of `set_expansion_probabilities()`, implementing the above formula:
# + slideshow={"slide_type": "subslide"}
class ProbabilisticGrammarMiner(ExpansionCountMiner):
def set_probabilities(self, counts: Dict[str, int]):
for symbol in self.grammar:
self.set_expansion_probabilities(symbol, counts)
def set_expansion_probabilities(self, symbol: str, counts: Dict[str, int]):
expansions = self.grammar[symbol]
if len(expansions) == 1:
set_prob(self.grammar, symbol, expansions[0], None)
return
expansion_counts = [
counts.get(
expansion_key(
symbol,
expansion),
0) for expansion in expansions]
total = sum(expansion_counts)
for i, expansion in enumerate(expansions):
p = expansion_counts[i] / total if total > 0 else None
# if self.log:
# print("Setting", expansion_key(symbol, expansion), p)
set_prob(self.grammar, symbol, expansion, p)
# + [markdown] slideshow={"slide_type": "subslide"}
# The typical use of `ProbabilisticGrammarMiner` is through `mine_probabilistic_grammar()`, which first determines a distribution from a set of inputs, and then sets the probabilities accordingly.
# + slideshow={"slide_type": "fragment"}
class ProbabilisticGrammarMiner(ProbabilisticGrammarMiner):
def mine_probabilistic_grammar(self, inputs: List[str]) -> Grammar:
self.count_expansions(inputs)
self.set_probabilities(self.counts())
return self.grammar
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us put this to use. We create a grammar miner for IP addresses:
# + slideshow={"slide_type": "fragment"}
probabilistic_grammar_miner = ProbabilisticGrammarMiner(
EarleyParser(IP_ADDRESS_GRAMMAR))
# + [markdown] slideshow={"slide_type": "fragment"}
# We now use `mine_probabilistic_grammar()` to mine the grammar:
# + slideshow={"slide_type": "fragment"}
probabilistic_ip_address_grammar = probabilistic_grammar_miner.mine_probabilistic_grammar([
"127.0.0.1", "1.2.3.4"])
# + slideshow={"slide_type": "fragment"}
assert is_valid_probabilistic_grammar(probabilistic_ip_address_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# Here's the resulting distribution of octets in our grammar:
# + slideshow={"slide_type": "fragment"}
[expansion for expansion in probabilistic_ip_address_grammar['<octet>']
if exp_prob(expansion) > 0]
# + [markdown] slideshow={"slide_type": "fragment"}
# If we use these probabilities for fuzzing, we will get the same distribution of octets as in our sample:
# + slideshow={"slide_type": "subslide"}
probabilistic_ip_fuzzer = ProbabilisticGrammarFuzzer(
probabilistic_ip_address_grammar)
[probabilistic_ip_fuzzer.fuzz() for i in range(10)]
# + [markdown] slideshow={"slide_type": "fragment"}
# By learning from a sample, we can thus adjust our fuzzing towards the (syntactic) properties of this very sample.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Testing Common Features
#
# Let us now get to our three usage scenarios. The first scenario is to create probability distributions right out of a sample, and to use these very distributions during test generation. This helps focusing test generation on those features that are _most commonly used_, which thus minimizes the risk of customers encountering failures.
# + [markdown] slideshow={"slide_type": "fragment"}
# To illustrate testing of common features, we choose the URL domain. Let us assume that we are running some Web-related service, and this is a sample of the URLs our customers access most:
# + slideshow={"slide_type": "subslide"}
URL_SAMPLE: List[str] = [
"https://user:[email protected]:80/",
"https://fuzzingbook.com?def=56&x89=3&x46=48&def=def",
"https://cispa.saarland:80/def?def=7&x23=abc",
"https://fuzzingbook.com:80/",
"https://fuzzingbook.com:80/abc?def=abc&abc=x14&def=abc&abc=2&def=38",
"ftps://fuzzingbook.com/x87",
"https://user:[email protected]:6?def=54&x44=abc",
"http://fuzzingbook.com:80?x33=25&def=8",
"http://fuzzingbook.com:8080/def",
]
# + [markdown] slideshow={"slide_type": "fragment"}
# Using the Earley parser from the [chapter on parsers](Parser.ipynb), we can parse any of these inputs into a parse tree; we have to specify a token set, though.
# + slideshow={"slide_type": "fragment"}
URL_TOKENS: Set[str] = {"<scheme>", "<userinfo>", "<host>", "<port>", "<id>"}
# + slideshow={"slide_type": "subslide"}
url_parser = EarleyParser(URL_GRAMMAR, tokens=URL_TOKENS)
url_input = URL_SAMPLE[2]
print(url_input)
tree, *_ = url_parser.parse(url_input)
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us apply our `ProbabilisticGrammarMiner` class on these inputs, using the above `url_parser` parser, and obtain a probabilistic URL grammar:
# + slideshow={"slide_type": "fragment"}
probabilistic_grammar_miner = ProbabilisticGrammarMiner(url_parser)
probabilistic_url_grammar = probabilistic_grammar_miner.mine_probabilistic_grammar(
URL_SAMPLE)
# + [markdown] slideshow={"slide_type": "fragment"}
# These are the counts we obtained during parsing:
# + slideshow={"slide_type": "subslide"}
print(probabilistic_grammar_miner.counts())
# + [markdown] slideshow={"slide_type": "fragment"}
# These counts translate into individual probabilities. We see that in our sample, most URLs use the `https:` scheme, whereas there is no input using the `ftp:` scheme.
# + slideshow={"slide_type": "fragment"}
probabilistic_url_grammar['<scheme>']
# + [markdown] slideshow={"slide_type": "fragment"}
# Likewise, we see that most given URLs have multiple parameters:
# + slideshow={"slide_type": "fragment"}
probabilistic_url_grammar['<params>']
# + [markdown] slideshow={"slide_type": "subslide"}
# When we use this probabilistic grammar for fuzzing, these distributions are reflected in our generated inputs – no `ftp:` schemes either, and most inputs have multiple parameters.
# + slideshow={"slide_type": "fragment"}
g = ProbabilisticGrammarFuzzer(probabilistic_url_grammar)
[g.fuzz() for i in range(10)]
# + [markdown] slideshow={"slide_type": "subslide"}
# Being able to replicate a probability distribution learned from a sample is not only important for focusing on commonly used features. It can also help in achieving _valid inputs_, in particular if one learns probabilities _in context_, as discussed above: If within a given context, some elements are more likely than others (because they depend on each other), a learned probability distribution will reflect this; and hence, inputs generated from this learned probability distribution will have a higher chance to be valid, too. We will explore this further in the [exercises](#Exercises), below.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Testing Uncommon Features
#
# So far, we have focused on _common_ features; but from a testing perspective, one may just as well test _uncommon_ features – that is, features that rarely occur in our usage samples and therefore would be less exercised in practice. This is a common scenario in security testing, where one focuses on uncommon (and possibly lesser-known) features, as fewer users means fewer bugs reported, and thus more bugs left to be found and exploited.
# + [markdown] slideshow={"slide_type": "subslide"}
# To have our probabilistic grammar fuzzer focus on _uncommon_ features, we _change the learned probabilities_ such that commonly occuring features (i.e., those with a high learned probability) get a low probability, and vice versa: The last shall be first, and the first last. A particularly simple way to achieve such an _inversion_ of probabilities is to _swap_ them: The alternatives with the highest and lowest probability swaps their probabilities, as so the alternatives with the second highest and second lowest probability, the alternatives with the third highest and lowest, and so on.
# + [markdown] slideshow={"slide_type": "fragment"}
# The function `invert_expansion()` takes an expansion (a list of alternatives) from a grammar and returns a new inverted expansion in which the probabilities have been swapped according to the rule above. It creates a list of indexes, sorts it by increasing probability, and then for each $n$-th element, assigns it the probability of the $n$-th last element in the indexes.
# + slideshow={"slide_type": "skip"}
import copy
# + slideshow={"slide_type": "subslide"}
def invert_expansion(expansion: List[Expansion]) -> List[Expansion]:
def sort_by_prob(x: Tuple[int, float]) -> float:
index, prob = x
return prob if prob is not None else 0.0
inverted_expansion: List[Expansion] = copy.deepcopy(expansion)
indexes_and_probs = [(index, exp_prob(alternative))
for index, alternative in enumerate(expansion)]
indexes_and_probs.sort(key=sort_by_prob)
indexes = [i for (i, _) in indexes_and_probs]
for j in range(len(indexes)):
k = len(indexes) - 1 - j
# print(indexes[j], "gets", indexes[k])
inverted_expansion[indexes[j]][1]['prob'] = expansion[indexes[k]][1]['prob'] # type: ignore
return inverted_expansion
# + [markdown] slideshow={"slide_type": "subslide"}
# Here's `invert_expansion()` in action. This is our original probability distribution for URL schemes:
# + slideshow={"slide_type": "fragment"}
probabilistic_url_grammar['<scheme>']
# + [markdown] slideshow={"slide_type": "fragment"}
# And this is the "inverted" distribution. We see that the `ftp:` scheme, which previously had a probability of zero, now has the highest probability, whereas the most common scheme, `https:`, now has the previous zero probability of the `ftp:` scheme.
# + slideshow={"slide_type": "subslide"}
invert_expansion(probabilistic_url_grammar['<scheme>'])
# + [markdown] slideshow={"slide_type": "fragment"}
# One nice feature of this swapping of probabilities is that the sum of probabilities stays unchanged; no normalization is needed. Another nice feature is that the inversion of the inversion returns the original distribution:
# + slideshow={"slide_type": "fragment"}
invert_expansion(invert_expansion(probabilistic_url_grammar['<scheme>']))
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that our implementation does not universally satisfy this property: If two alternatives $a_1$ and $a_2$ in the expansion share the same probability, then the second inversion may assign different probabilities to $a_1$ and $a_2$.
# + [markdown] slideshow={"slide_type": "fragment"}
# We can apply this inversion of expansions across the entire grammar:
# + slideshow={"slide_type": "fragment"}
def invert_probs(grammar: Grammar) -> Grammar:
inverted_grammar = extend_grammar(grammar)
for symbol in grammar:
inverted_grammar[symbol] = invert_expansion(grammar[symbol])
return inverted_grammar
# + [markdown] slideshow={"slide_type": "fragment"}
# This means that probabilities would be swapped for each and every expansion:
# + slideshow={"slide_type": "subslide"}
probabilistic_url_grammar["<digit>"]
# + slideshow={"slide_type": "subslide"}
inverted_probabilistic_url_grammar = invert_probs(probabilistic_url_grammar)
inverted_probabilistic_url_grammar["<digit>"]
# + [markdown] slideshow={"slide_type": "subslide"}
# If we now use this "inverted" grammar for fuzzing, the generated inputs will focus on the *complement of the input samples*. We will get plenty of tests of user/password features, as well as `ftp:` schemes – in essence, all the features present in our language, but rarely used (if at all) in our input samples.
# + slideshow={"slide_type": "subslide"}
g = ProbabilisticGrammarFuzzer(inverted_probabilistic_url_grammar)
[g.fuzz() for i in range(10)]
# + [markdown] slideshow={"slide_type": "subslide"}
# Besides having _only_ common or _only_ uncommon features, one can also create mixed forms – for instance, testing uncommon features in a common context. This can be helpful for security testing, where one may want an innocuous (common) "envelope" combined with an (uncommon) "payload". It all depends on where and how we tune the probabilities.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Learning Probabilities from Input Slices
#
# In our previous examples, we have learned from _all_ inputs to generate common or uncommon inputs. However, we can also learn from a _subset_ of inputs to focus on the features present in that subset (or, conversely, to _avoid_ its features). If we know, for instance, that there is some subset of inputs that covers a functionality of interest (say, because it is particularly critical or because it has been recently changed), we can learn from this very subset and focus our test generation on its features.
# + [markdown] slideshow={"slide_type": "subslide"}
# To illustrate this approach, let us use the CGI grammar introduced in the [chapter on coverage](Coverage.ipynb). We have a special interest in Line 25 in our CGI decoder – that is, the line that processes a `%` character followed by two valid hexadecimal digits:
#
# ```python
# ...
# elif c == '%':
# digit_high, digit_low = s[i + 1], s[i + 2]
# i += 2
# if digit_high in hex_values and digit_low in hex_values:
# v = hex_values[digit_high] * 16 + hex_values[digit_low] ### Line 25
# t += chr(v)
# ...
#
# ```
# Let us assume that we do not know precisely under which conditions Line 25 is executed – but still, we'd like to test it thoroughly. With our probability learning tools, we can learn these conditions, though. We start with a set of random inputs and consider the subset that covers Line 25.
# + slideshow={"slide_type": "skip"}
from Coverage import Coverage, cgi_decode
from Grammars import CGI_GRAMMAR
# + slideshow={"slide_type": "subslide"}
cgi_fuzzer = GrammarFuzzer(CGI_GRAMMAR)
# + slideshow={"slide_type": "fragment"}
trials = 100
coverage = {}
for i in range(trials):
cgi_input = cgi_fuzzer.fuzz()
with Coverage() as cov:
cgi_decode(cgi_input)
coverage[cgi_input] = cov.coverage()
# + [markdown] slideshow={"slide_type": "fragment"}
# These are all the random inputs that cover Line 25:
# + slideshow={"slide_type": "fragment"}
coverage_slice = [cgi_input for cgi_input in coverage
if ('cgi_decode', 25) in coverage[cgi_input]]
# + slideshow={"slide_type": "fragment"}
print(coverage_slice)
# + [markdown] slideshow={"slide_type": "subslide"}
# Actually, about half of the inputs cover Line 25:
# + slideshow={"slide_type": "fragment"}
len(coverage_slice) / trials
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us now learn a probabilistic grammar from this slice of inputs:
# + slideshow={"slide_type": "fragment"}
probabilistic_grammar_miner = ProbabilisticGrammarMiner(
EarleyParser(CGI_GRAMMAR))
probabilistic_cgi_grammar = probabilistic_grammar_miner.mine_probabilistic_grammar(
coverage_slice)
# + slideshow={"slide_type": "fragment"}
assert is_valid_probabilistic_grammar(probabilistic_cgi_grammar)
# + [markdown] slideshow={"slide_type": "fragment"}
# We see that percentage signs are very likely to occur:
# + slideshow={"slide_type": "fragment"}
probabilistic_cgi_grammar['<letter>']
# + [markdown] slideshow={"slide_type": "subslide"}
# Using this grammar, we can now generate tests that specifically target Line 25:
# + slideshow={"slide_type": "fragment"}
probabilistic_cgi_fuzzer = ProbabilisticGrammarFuzzer(
probabilistic_cgi_grammar)
print([probabilistic_cgi_fuzzer.fuzz() for i in range(20)])
# + slideshow={"slide_type": "fragment"}
trials = 100
coverage = {}
for i in range(trials):
cgi_input = probabilistic_cgi_fuzzer.fuzz()
with Coverage() as cov:
cgi_decode(cgi_input)
coverage[cgi_input] = cov.coverage()
# + [markdown] slideshow={"slide_type": "subslide"}
# We see that the fraction of inputs that cover Line 25 is much higher already, showing that our focusing works:
# + slideshow={"slide_type": "fragment"}
coverage_slice: List[str] = [cgi_input for cgi_input in coverage
if ('cgi_decode', 25) in coverage[cgi_input]]
# + slideshow={"slide_type": "fragment"}
len(coverage_slice) / trials
# + [markdown] slideshow={"slide_type": "fragment"}
# Repeating this one more time yields an even higher focusing:
# + slideshow={"slide_type": "subslide"}
for run in range(3):
probabilistic_cgi_grammar = probabilistic_grammar_miner.mine_probabilistic_grammar(
coverage_slice)
probabilistic_cgi_fuzzer = ProbabilisticGrammarFuzzer(
probabilistic_cgi_grammar)
trials = 100
coverage = {}
for i in range(trials):
cgi_input = probabilistic_cgi_fuzzer.fuzz()
with Coverage() as cov:
cgi_decode(cgi_input)
coverage[cgi_input] = cov.coverage()
coverage_slice = [cgi_input for cgi_input in coverage
if ('cgi_decode', 25) in coverage[cgi_input]]
# + slideshow={"slide_type": "subslide"}
len(coverage_slice) / trials
# + [markdown] slideshow={"slide_type": "fragment"}
# By learning (and re-learning) probabilities from a subset of sample inputs, we can _specialize_ fuzzers towards the properties of that subset – in our case, inputs that contain percentage signs and valid hexadecimal letters. The degree to which we can specialize things is induced by the number of variables we can control – in our case, the probabilities for the individual rules. Adding more context to the grammar, as discussed above, will increase the number of variables, and thus the amount of specialization.
# + [markdown] slideshow={"slide_type": "subslide"}
# A high degree of specialization, however, limits our possibilities to explore combinations that fall _outside_ of the selected scope, and limit our possibilities to find bugs induced by these combinations. This tradeoff is known as *exploration vs. exploitation* in machine learning – shall one try to explore as many (possibly shallow) combinations as possible, or focus (exploit) specific areas? In the end, it all depends on where the bugs are, and where we are most likely to find them. Assigning and learning probabilities allows us to control the search strategies – from the common to the uncommon to specific subsets.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Detecting Unnatural Numbers
#
# Let us close this chapter by getting back to our introductory example. We said that Benford's law allows us not only to produce, but also to detect "unnatural" lead digit distributions such as the ones produced by simple random choices.
#
# If we use the regular `GrammarFuzzer` class (which ignores probabilities) to generate (random) lead digits, this is the distribution we get for each leading digit:
# + slideshow={"slide_type": "subslide"}
sample_size = 1000
random_integer_fuzzer = GrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR,
start_symbol="<leaddigit>")
random_integers = [random_integer_fuzzer.fuzz() for i in range(sample_size)]
# + slideshow={"slide_type": "fragment"}
random_counts = [random_integers.count(str(c)) for c in crange('1', '9')]
random_counts
# + [markdown] slideshow={"slide_type": "fragment"}
# (For simplicity, we use the simple list `count()` method here rather than deploying the full-fledged `ProbabilisticGrammarMiner`.)
# + [markdown] slideshow={"slide_type": "fragment"}
# If we had a natural distribution of lead digits, this is what we would expect:
# + slideshow={"slide_type": "subslide"}
expected_prob_counts = [
exp_prob(
PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"][i]) *
sample_size for i in range(9)]
print(expected_prob_counts)
# + [markdown] slideshow={"slide_type": "fragment"}
# And if we had a random distribution, we would expect an equal distribution:
# + slideshow={"slide_type": "fragment"}
expected_random_counts = [sample_size / 9 for i in range(9)]
print(expected_random_counts)
# + [markdown] slideshow={"slide_type": "subslide"}
# Which distribution better matches our `random_counts` lead digits? To this end, we run a $\chi^2$-test to compare the distribution we found (`random_counts`) against the "natural" lead digit distribution `expected_prob_counts` and the random distribution `expected_random_counts`.
# + slideshow={"slide_type": "skip"}
from scipy.stats import chisquare # type: ignore
# + [markdown] slideshow={"slide_type": "fragment"}
# It turns out that there is a zero chance (`pvalue` = 0.0) that the observed distribution follows a "natural" distribution:
# + slideshow={"slide_type": "fragment"}
chisquare(random_counts, expected_prob_counts)
# + [markdown] slideshow={"slide_type": "fragment"}
# However, there is a 97% chance that the observed behavior follows a random distribution:
# + slideshow={"slide_type": "fragment"}
chisquare(random_counts, expected_random_counts)
# + [markdown] slideshow={"slide_type": "subslide"}
# Hence, if you find some numbers published and doubt their validity, you can run the above test to check whether they are likely to be natural. Better yet, insist that authors use Jupyter notebooks to produce their results, such that you can check every step of the calculation :-)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Synopsis
#
# A _probabilistic_ grammar allows to attach individual _probabilities_ to production rules. To set the probability of an individual expansion `S` to the value `X` (between 0 and 1), replace it with a pair
#
# ```python
# (S, opts(prob=X))
# ```
#
# If we want to ensure that 90% of phone numbers generated have an area code starting with `9`, we can write:
# + slideshow={"slide_type": "skip"}
from Grammars import US_PHONE_GRAMMAR, extend_grammar, opts
# + slideshow={"slide_type": "subslide"}
PROBABILISTIC_US_PHONE_GRAMMAR: Grammar = extend_grammar(US_PHONE_GRAMMAR,
{
"<lead-digit>": [
"2", "3", "4", "5", "6", "7", "8",
("9", opts(prob=0.9))
],
})
# + [markdown] slideshow={"slide_type": "fragment"}
# A `ProbabilisticGrammarFuzzer` will extract and interpret these options. Here is an example:
# + slideshow={"slide_type": "subslide"}
probabilistic_us_phone_fuzzer = ProbabilisticGrammarFuzzer(PROBABILISTIC_US_PHONE_GRAMMAR)
[probabilistic_us_phone_fuzzer.fuzz() for i in range(5)]
# + [markdown] slideshow={"slide_type": "fragment"}
# As you can see, the large majority of area codes now starts with `9`.
# + slideshow={"slide_type": "fragment"}
# ignore
from ClassDiagram import display_class_hierarchy
# + slideshow={"slide_type": "subslide"}
# ignore
display_class_hierarchy([ProbabilisticGrammarFuzzer],
public_methods=[
Fuzzer.run,
Fuzzer.runs,
GrammarFuzzer.__init__,
GrammarFuzzer.fuzz,
GrammarFuzzer.fuzz_tree,
ProbabilisticGrammarFuzzer.__init__,
],
types={
'DerivationTree': DerivationTree,
'Expansion': Expansion,
'Grammar': Grammar
},
project='fuzzingbook')
# + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Lessons Learned
#
# * By specifying probabilities, one can steer fuzzing towards input features of interest.
# * Learning probabilities from samples allows one to focus on features that are common or uncommon in input samples.
# * Learning probabilities from a subset of samples allows one to produce more similar inputs.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Next Steps
#
# Now that we have brought together probabilities and grammars (and revisited parsers and grammars), we have created a foundation for many applications. Our next chapters will focus on
#
# * how to [_reduce_ failing inputs to a minimum](Reducer.ipynb)
# * how to [carve](Carver.ipynb) and [produce](APIFuzzer.ipynb) tests at the function level
# * how to [automatically test (Web) user interfaces](WebFuzzer.ipynb)
#
# Enjoy!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Background
#
# The idea of mining probabilities by parsing a corpus of data was first covered in "Learning to Fuzz: Application-Independent Fuzz Testing with Probabilistic, Generative Models of Input Data" \cite{Patra2016} which also learns and applies probabilistic rules for derivation trees. Applying this idea on probabilistic grammars as well as inverting probabilities or learning from slices was first executed in the work "Inputs from Hell: Generating Uncommon Inputs from Common Samples" \cite{Pavese2018}.
#
# Our exposition of Benford's law follows [this article](https://brilliant.org/wiki/benfords-law/).
# + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} toc-hr-collapsed=true
# ## Exercises
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# ### Exercise 1: Probabilistic Fuzzing with Coverage
#
# Create a class `ProbabilisticGrammarCoverageFuzzer` that extends `GrammarCoverageFuzzer` with probabilistic capabilities. The idea is to first cover all uncovered expansions (like `GrammarCoverageFuzzer`) and once all expansions are covered, to proceed by probabilities (like `ProbabilisticGrammarFuzzer`).
#
# To this end, define new instances of the `choose_covered_node_expansion()` and `choose_uncovered_node_expansion()` methods that choose an expansion based on the given weights.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# If you are an advanced programmer, realize the class via *multiple inheritance* from `GrammarCoverageFuzzer` and `ProbabilisticGrammarFuzzer` to achieve this.
#
# Multiple inheritance is a tricky thing. If you have two classes $A'$ and $A''$ which both inherit from $A$, the same method $m()$ of $A$ may be overloaded in both $A'$ and $A''$. If one now inherits from _both_ $A'$ and $A''$, and calls $m()$, which of the $m()$ implementations should be called? Python "resolves" this conflict by simply invoking the one $m()$ method in the class one inherits from first.
#
# To avoid such conflicts, one can check whether the order in which one inherits makes a difference. The method `inheritance_conflicts()` compares the attributes with each other; if they refer to different code, you have to resolve the conflict.
# + slideshow={"slide_type": "skip"}
from bookutils import inheritance_conflicts
# + slideshow={"slide_type": "subslide"}
inheritance_conflicts(GrammarCoverageFuzzer, ProbabilisticGrammarFuzzer)
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden" solution2_first=true
# This is a method you _have_ to implement for multiple inheritance besides `choose_covered_node_expansion()` and `choose_uncovered_node_expansion()`.
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# **Solution**. With multiple inheritance, this is fairly easy; we just need to point the three methods to the right places:
# + cell_style="center" slideshow={"slide_type": "skip"} solution2="hidden"
class ProbabilisticGrammarCoverageFuzzer(
GrammarCoverageFuzzer, ProbabilisticGrammarFuzzer):
# Choose uncovered expansions first
def choose_node_expansion(self, node, children_alternatives):
return GrammarCoverageFuzzer.choose_node_expansion(
self, node, children_alternatives)
# Among uncovered expansions, pick by (relative) probability
def choose_uncovered_node_expansion(self, node, children_alternatives):
return ProbabilisticGrammarFuzzer.choose_node_expansion(
self, node, children_alternatives)
# For covered nodes, pick by probability, too
def choose_covered_node_expansion(self, node, children_alternatives):
return ProbabilisticGrammarFuzzer.choose_node_expansion(
self, node, children_alternatives)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# In the first nine invocations, our fuzzer covers one digit after another:
# + slideshow={"slide_type": "skip"} solution2="hidden"
cov_leaddigit_fuzzer = ProbabilisticGrammarCoverageFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leaddigit>")
print([cov_leaddigit_fuzzer.fuzz() for i in range(9)])
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# After these, we again proceed by probabilities:
# + slideshow={"slide_type": "skip"} solution2="hidden"
trials = 10000
count = {}
for c in crange('0', '9'):
count[c] = 0
for i in range(trials):
count[cov_leaddigit_fuzzer.fuzz()] += 1
print([(digit, count[digit] / trials) for digit in count])
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true
# ### Exercise 2: Learning from Past Bugs
#
# Learning from a set of inputs can be extremely valuable if one learns from _inputs that are known to have caused failures before._ In this exercise, you will go and learn distributions from past vulnerabilities.
#
# 1. Download [`js-vuln-db`](https://github.com/tunz/js-vuln-db), a set of JavaScript engine vulnerabilities. Each vulnerability comes with code that exercises it.
# 2. Extract all _number literals_ from the code, using `re.findall()` with appropriate regular expressions.
# 3. Convert these literals to (decimal) _numeric values_ and count their respective occurrences.
# 4. Create a grammar `RISKY_NUMBERS` that produces these numbers with probabilities reflecting the above counts.
#
# Of course, there is more to vulnerabilities than just a specific numbers, but some numbers are more likely to induce errors than others. The next time you fuzz a system, do not generate numbers randomly; instead, pick one from `RISKY_NUMBERS` :-)
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden"
# **Solution.** _Solution for the exercise_
| docs/notebooks/ProbabilisticGrammarFuzzer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cli_dev]
# language: python
# name: conda-env-cli_dev-py
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# # Use AML Pipelines to train multiple featurizers
# ### Multiple features on the same dataset, concatenate, and train
# Using the 20newsgroups dataset as an example, we will first compute features on the same dataset using two different featurizers. For this demo we use the same machine, however in cases with some featurizers being more expensive than others and large datasets it might make sense to split this to different machines.
# Eventually, features from each of these are concatenated and used to train a `sklearn` `Pipeline`.
# +
import os
from azureml.core import Workspace, Run, Experiment
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
# Also create a Project and attach to Workspace
project_folder = "scripts"
run_history_name = project_folder
if not os.path.isdir(project_folder):
os.mkdir(project_folder)
# -
from azureml.core.compute import BatchAiCompute, ComputeTarget
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.core.datastore import Datastore
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.compute import DsvmCompute
# Batch AI compute
cluster_name = "cpu-cluster"
try:
cluster = BatchAiCompute(ws, cluster_name)
print("found existing cluster.")
except:
print("creating new cluster")
provisioning_config = BatchAiCompute.provisioning_configuration(vm_size = "STANDARD_D2_v2",
autoscale_enabled = True,
cluster_min_nodes = 3,
cluster_max_nodes = 3)
# create the cluster
cluster = ComputeTarget.create(ws, cluster_name, provisioning_config)
cluster.wait_for_completion(show_output=True)
# We use the default blob datastore that comes with the workspace.
default_datastore = ws.get_default_datastore()
# # Python scripts
# - `fetch_newsgroups.py`: Fetch 20newsgroups data
# - `hashing_features.py`: Use feature hashing to generate features
# - `tfidf_features.py`: Compute tfidf features
# - `train_model.py`: Concatenate and train logistic regression model
# +
# %%writefile $project_folder/fetch_newsgroups.py
import argparse
import pickle
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
parser = argparse.ArgumentParser("download 20 newsgroups dataset")
parser.add_argument("--out_dir", type=str, help="output data dir")
args = parser.parse_args()
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
remove = ('headers', 'footers', 'quotes')
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
obj = {}
obj["data_train"] = data_train
obj["data_test"] = data_test
os.makedirs(args.out_dir)
with open(os.path.join(args.out_dir, "20news.pkl"), "wb") as fp:
pickle.dump(obj, fp)
# +
# %%writefile $project_folder/hashing_features.py
import argparse
import pickle
from sklearn.feature_extraction.text import HashingVectorizer
parser = argparse.ArgumentParser("generate feature hashing features for 20 newsgroups")
parser.add_argument("--input_dir", type=str, help="data directory")
parser.add_argument("--out_dir", type=str, help="output feature hashing features directory")
args = parser.parse_args()
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False)
with open(os.path.join(args.input_dir, "20news.pkl"), "rb") as fp:
obj = pickle.load(fp)
data_train = obj["data_train"]
X_train = vectorizer.fit_transform(data_train.data)
obj = {}
obj["X_train"] = X_train
obj["vectorizer"] = vectorizer
os.makedirs(args.out_dir)
with open(os.path.join(args.out_dir, "feats.pkl"), "wb") as fp:
pickle.dump(obj, fp)
# +
# %%writefile $project_folder/tfidf_features.py
import argparse
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
parser = argparse.ArgumentParser("generate feature hashing features for 20 newsgroups")
parser.add_argument("--input_dir", type=str, help="data directory")
parser.add_argument("--out_dir", type=str, help="output tfidf features directory")
parser.add_argument("--ngram", type=int, help="character ngram length")
args = parser.parse_args()
vectorizer = TfidfVectorizer(ngram_range=(args.ngram, args.ngram), analyzer="char")
with open(os.path.join(args.input_dir, "20news.pkl"), "rb") as fp:
obj = pickle.load(fp)
data_train = obj["data_train"]
X_train = vectorizer.fit_transform(data_train.data)
obj = {}
obj["X_train"] = X_train
obj["vectorizer"] = vectorizer
os.makedirs(args.out_dir)
with open(os.path.join(args.out_dir, "feats.pkl"), "wb") as fp:
pickle.dump(obj, fp)
# +
# %%writefile $project_folder/train_model.py
import argparse
import os
import pickle
from scipy import sparse
import sklearn
from sklearn.linear_model import LogisticRegression
import sklearn.pipeline
from sklearn.metrics import roc_auc_score
from azureml.core.run import Run
parser = argparse.ArgumentParser("train model for 20 newsgroups")
parser.add_argument("--hashing_dir", type=str, help="feature hashing directory")
parser.add_argument("--tfidf_dir", type=str, help="tfidf features directory")
parser.add_argument("--input_dir", type=str, help="data directory")
parser.add_argument("--output_dir", type=str, help="output model dir")
args = parser.parse_args()
vectorizers = []
X_train = []
with open(os.path.join(args.hashing_dir, "feats.pkl"), "rb") as fp:
obj = pickle.load(fp)
vectorizers.append(("feature_hashing", obj["vectorizer"]))
X_train.append(obj["X_train"])
with open(os.path.join(args.tfidf_dir, "feats.pkl"), "rb") as fp:
obj = pickle.load(fp)
vectorizers.append(("tfidf_features", obj["vectorizer"]))
X_train.append(obj["X_train"])
with open(os.path.join(args.input_dir, "20news.pkl"), "rb") as fp:
obj = pickle.load(fp)
y_train = obj["data_train"].target
y_test = obj["data_test"].target
raw_X_test = obj["data_test"].data
X_train = sparse.hstack(X_train)
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
final_model = sklearn.pipeline.Pipeline([("transformer",
sklearn.pipeline.FeatureUnion(vectorizers)),
("model", lr_model)])
# check performance of final model
pred_probs = final_model.predict_proba(raw_X_test)
# binarize labels to compute average auc
binarizer = sklearn.preprocessing.LabelBinarizer()
binarizer.fit(y_train)
y_test_bin = binarizer.transform(y_test)
auc = roc_auc_score(y_test_bin, pred_probs)
print(f"Current AUC: {auc}")
run = Run.get_context()
run.log("auc", auc)
os.makedirs(args.output_dir, exist_ok=True)
out_file = os.path.join(args.output_dir, "model.pkl")
with open(out_file, "wb") as fp:
pickle.dump(final_model, fp)
# -
# # Define runconfig environment in the dsvm
from azureml.core.runconfig import CondaDependencies, RunConfiguration
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
runconfig = RunConfiguration(conda_dependencies=cd)
runconfig.environment.docker.enabled = True
# # PipelineData where the code is written to and read from
raw_data = PipelineData("rawdata", datastore=default_datastore)
hashing_features = PipelineData("hashing", datastore=default_datastore)
tfidf_features = PipelineData("tfidf", datastore=default_datastore)
output_dir = PipelineData("model_output", datastore=default_datastore)
# # Define steps and run
# +
data_step = PythonScriptStep(
name="fetch 20newsgroups dataset",
script_name="fetch_newsgroups.py",
arguments=["--out_dir", raw_data],
outputs=[raw_data],
source_directory=project_folder,
runconfig=runconfig,
target=cluster
)
feature_hashing_step = PythonScriptStep(
name="feature hashing",
script_name="hashing_features.py",
arguments=["--input_dir", raw_data, "--out_dir", hashing_features],
inputs=[raw_data],
outputs=[hashing_features],
source_directory=project_folder,
runconfig=runconfig,
target=cluster
)
tfidf_step = PythonScriptStep(
name="tfidf",
script_name="tfidf_features.py",
arguments=["--input_dir", raw_data, "--out_dir", tfidf_features, "--ngram", 3],
inputs=[raw_data],
outputs=[tfidf_features],
source_directory=project_folder,
runconfig=runconfig,
target=cluster
)
model_step = PythonScriptStep(
name="train the final model",
script_name="train_model.py",
arguments=["--input_dir", raw_data,
"--hashing_dir", hashing_features,
"--tfidf_dir", tfidf_features,
"--output_dir", output_dir
],
inputs=[raw_data, hashing_features, tfidf_features],
outputs=[output_dir],
source_directory=project_folder,
runconfig=runconfig,
target=cluster
)
# -
pipeline = Pipeline(workspace=ws, steps=[model_step])
pipeline.validate()
pipeline_run = Experiment(ws, "train_model_20newsgroups").submit(pipeline)
# # Monitor runs using widget
from azureml.train.widgets import RunDetails
RunDetails(pipeline_run).show()
# # Complete run and print metrics
pipeline_run.wait_for_completion()
for step_run in pipeline_run.get_children():
print("{}: {}".format(step_run.name, step_run.get_metrics()))
# # Optionally Clean compute resources
# +
#cluster.delete()
| alpha/use-aml-pipeline-to-train-multiple-featurizers-concatenate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#requires Pytorch >= 0.4 for graph visualization. fastai currently uses 0.3 from conda install
#pip install tensorflow
#pip install git+https://github.com/lanpa/tensorboard-pytorch
#tensorboard --logdir="directory of logs file. default location is PATH/logs"
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.learner import *
from fastai.column_data import *
from tensorboard_cb import *
# -
# [http://files.grouplens.org/datasets/movielens/ml-latest-small.zip](http://files.grouplens.org/datasets/movielens/ml-latest-small.zip)
PATH = Path('../data/ml-latest-small')
# +
ratings = pd.read_csv(PATH/'ratings.csv')
movies = pd.read_csv(PATH/'movies.csv')
val_idxs = get_cv_idxs(len(ratings))
u_uniq = ratings.userId.unique()
user2idx = {o:i for i,o in enumerate(u_uniq)}
ratings.userId = ratings.userId.apply(lambda x: user2idx[x])
m_uniq = ratings.movieId.unique()
movie2idx = {o:i for i,o in enumerate(m_uniq)}
ratings.movieId = ratings.movieId.apply(lambda x: movie2idx[x])
n_users=int(ratings.userId.nunique())
n_movies=int(ratings.movieId.nunique())
x = ratings.drop(['rating', 'timestamp'],axis=1)
y = ratings['rating'].astype(np.float32)
min_rating,max_rating = ratings.rating.min(),ratings.rating.max()
md = ColumnarModelData.from_data_frame(PATH, val_idxs, x, y, ['userId', 'movieId'], 64)
# -
class EmbeddingNet(nn.Module):
def __init__(self, n_users, n_movies, n_factors=50, nh=10, p1=0.05, p2=0.5):
super().__init__()
self.u = nn.Embedding(n_users,n_factors)
self.m = nn.Embedding(n_movies, n_factors)
self.u.weight.data.uniform_(-0.01,0.01)
self.m.weight.data.uniform_(-0.01,0.01)
self.lin1 = nn.Linear(n_factors*2, nh)
self.lin2 = nn.Linear(nh,1)
self.drop1 = nn.Dropout(p1)
self.drop2 = nn.Dropout(p2)
def forward(self, cats, conts):
users,movies = cats[:,0],cats[:,1]
x = torch.cat([self.u(users),self.m(movies)], dim=1)
x = self.drop1(x)
x = self.lin1(x)
x = F.relu(x)
x = self.drop2(x)
x = self.lin2(x)
x = F.sigmoid(x)
return x * (max_rating-min_rating+1) + min_rating-0.5
learn = Learner(md, SingleModel(to_gpu(EmbeddingNet(n_users, n_movies))))
learn.opt_fn = optim.Adam
learn.crit = F.mse_loss
learn.clip = None
def rmsle(y_pred, targ):
tmp = y_pred - targ
return math.sqrt((tmp**2).mean())
tb_logger = TensorboardLogger(learn.model, md, "test", metrics_names=["rmsle"])
learn.fit(1e-3, 2, cycle_len=2, cycle_mult=2, wds=1e-5, metrics=[rmsle], callbacks=[tb_logger])
| fastai_tensorboard_callback/old/tensorboard-example-old.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 04. Convolutional Neural Network(CNN)으로 숫자 분류기 구현하기
#
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# %matplotlib inline
print ("PACKAGES LOADED")
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print ("MNIST ready")
# # Define Model
# +
n_input = 784
n_channel = 64 # Convolution Filter의 개수
n_classes = 10
x = tf.placeholder('float', [None, n_input])
y = tf.placeholder('float', [None, n_classes])
stddev = 0.1
weights = {
'c1' : tf.Variable(tf.random_normal([3, 3, 1,n_channel], stddev=stddev)), # 매트릭스 형태
'c2' : tf.Variable(tf.random_normal([3, 3, n_channel, 128], stddev=stddev)),
'd1' : tf.Variable(tf.random_normal([7*7*128, n_classes], stddev=stddev))
}
biases = {
'c1' : tf.Variable(tf.random_normal([n_channel], stddev=stddev)),
'c2' : tf.Variable(tf.random_normal([128], stddev=stddev)),
'd1' : tf.Variable(tf.random_normal([n_classes], stddev=stddev))
}
print("NETWORK READY")
# +
def CNN(_x, _w, _b):
# RESHAPE
_x_r = tf.reshape(_x, shape=[-1, 28, 28, 1]) # -1 : 몇인지 모르기 때문, 1: 흑백으로 전환
# CONVOLUTION
_conv1 = tf.nn.conv2d(_x_r, _w['c1'], strides=[1, 1, 1, 1], padding='SAME')
# ADD BIAS
_conv2 = tf.nn.bias_add(_conv1, _b['c1'])
# RELU
_conv3 = tf.nn.relu(_conv2)
#MAX-POOl
_pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 위에 conv1 conv2 conv3 pool은 외우세요!
# SECOND CONV LAYER
_temp = tf.nn.conv2d(_pool, _w['c2'], strides=[1, 1, 1, 1], padding='SAME')
_temp = tf.nn.bias_add(_temp, _b['c2'])
_temp = tf.nn.relu(_temp)
_temp = tf.nn.max_pool(_temp, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# VECTORIZE
_dense = tf.reshape(_temp, [-1, _w['d1'].get_shape().as_list()[0]])
# DENSE
_logit = tf.add(tf.matmul(_dense, _w['d1']), _b['d1'])
_out = {
'x_r' : _x_r, 'conv1' : _conv1, 'conv2' : _conv2, 'conv3' : _conv3,
'pool' : _pool, 'dense' : _dense, 'logit' : _logit
}
return _out
# PREDICTION
cnnout = CNN(x, weights, biases)
# LOSS AND OPTIMZER
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=cnnout['logit']))
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
corr = tf.equal(tf.argmax(cnnout['logit'], 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))
# INITIALIZER
init = tf.global_variables_initializer()
print ("FUNCTION READY")
# -
# - edwith에 나와있는 코드엔 오타들이 존재합니다
# - pool = tf.max_pool -> tf.nn.max_pool
# - _dense = tf.reshape(_temp. [-1, _w['d1'].get_shape().as_list()[0]])에서 _temp. -> _temp,
# - 'conv2' = conv2 -> 'conv2' : _conv2
# - logit' : _logit' -> _logit (' 제거)
# - return의 위치
# - 또한 Saver에 대한 정의가 없어 오류가 났었습니다
# # Saver
savedir = "nets/"
saver = tf.train.Saver(max_to_keep=3)
save_step = 4
if not os.path.exists(savedir):
os.makedirs(savedir)
print ("Saver Ready!")
# # RUN
# PARAMETERS
training_epochs = 20
batch_size = 100
display_step = 4
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# OPTIMIZER
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
feeds = {x: batch_xs, y: batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print (" Training accuracy: %.3f" % (train_acc))
feeds = {x: mnist.test.images[:100, :], y:mnist.test.labels[:100, :]} # 100개만 가지고 test
test_acc = sess.run(accr, feed_dict=feeds)
print (" Test accuracy: %.3f" % (test_acc))
# Save Net
if (epoch+1) % save_step == 0:
savename = savedir+"net-"+str(epoch+1)+".ckpt"
saver.save(sess. savename)
print ("[%s] SAVED" % (savename))
print ("Optimization Finished.")
do_restore = 1
if do_restore ==1:
sess = tf.Session()
epoch = 2
savename = savedir +"net-"+str(epoch)+".cpkt"
saver.restore(sess, savename)
print("Network RESTORED")
else:
print("Do Nothing")
# +
# 중간 중간 단계를 다 볼 수 있음!
# -
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
input_r = sess.run(cnnout['x_r'], feed_dict={x: trainimg[0:1, :]})
conv1 = sess.run(cnnout['conv1'], feed_dict={x: trainimg[0:1, :]})
conv2 = sess.run(cnnout['conv2'], feed_dict={x: trainimg[0:1, :]})
conv3 = sess.run(cnnout['conv3'], feed_dict={x: trainimg[0:1, :]})
pool = sess.run(cnnout['pool'], feed_dict={x: trainimg[0:1, :]})
dense = sess.run(cnnout['dense'], feed_dict={x: trainimg[0:1, :]})
out = sess.run(cnnout['logit'], feed_dict={x: trainimg[0:1, :]})
# +
# Let's see 'input_r'
print ("Size of 'input_r' is %s" % (input_r.shape,))
label = np.argmax(trainlabel[0, :])
print ("Label is %d" % (label))
# Plot !
plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray'))
plt.title("Label of this image is " + str(label) + "")
plt.colorbar()
plt.show()
# -
# # conv1
# +
# Let's see 'conv1'
print ("Size of 'conv1' is %s" % (conv1.shape,))
# Plot !
for i in range(3):
plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv1")
plt.colorbar()
plt.show()
# -
# # conv2 + bias
# +
# Let's see 'conv2'
print ("Size of 'conv2' is %s" % (conv2.shape,))
# Plot !
for i in range(3):
plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv2")
plt.colorbar()
plt.show()
# -
# # Conv3 (ReLU)
# +
# Let's see 'conv3'
print ("Size of 'conv3' is %s" % (conv3.shape,))
# Plot !
for i in range(3):
plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv3")
plt.colorbar()
plt.show()
# bias가 너무 작으면 모든 값이 0으로 줄어듬 -> activation 함수를 통하면 0으로 되고 학습이 안됨
# Dead Node, Dead Relu 현상
# -
# # Pool (max_pool)
# +
# Let's see 'pool'
print ("Size of 'pool' is %s" % (pool.shape,))
# Plot !
for i in range(3):
plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th pool")
plt.colorbar()
plt.show()
# -
# # Dense
# Let's see 'dense'
print ("Size of 'dense' is %s" % (dense.shape,))
# Let's see 'out'
print ("Size of 'out' is %s" % (out.shape,))
plt.matshow(out, cmap=plt.get_cmap('gray'))
plt.title("OUT")
plt.colorbar()
plt.show()
# # Convolution filters
# +
# Let's see weight!
wc1 = sess.run(weights['c1'])
print ("Size of 'wc1' is %s" % (wc1.shape,))
# Plot !
for i in range(3):
plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv filter")
plt.colorbar()
plt.show()
# -
| Lecture_Note/03. CNN Application/04.cnn_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pyNNQiu
# language: python
# name: pynnqiu
# ---
import numpy as np
import h5py
# +
fileD="/work/qiu/data4mt/data/2rdSubmissionData/trai/LCZ42_20985_Paris_spring_1.h5"
#load all the data in this file
hf = h5py.File(fileD, 'r')
x_thisFile=np.array(hf.get('x'))
y_thisFile_=np.array(hf.get('y_1'))
y_thisFile =np.array(hf.get('y_0'))
hf.close()
# +
from skimage import exposure
import matplotlib.pyplot as plt
def linearStretch(input, percent):
pLow, pHigh = np.percentile(input[~np.isnan(input)], (percent, 100 - percent))
img_rescale = exposure.rescale_intensity(input, in_range=(pLow, pHigh))
return img_rescale
def show_patch(image_rgb, one_channel=True):
"""Show image"""
if one_channel:
image_rgb = image_rgb.mean(axis=-1)
plt.imshow(image_rgb, cmap="Greys")
plt.show()
return
if image_rgb.shape[2] == 3:
img_rescaled = linearStretch(image_rgb, 1.25)
plt.imshow(img_rescaled)
plt.show()
plt.pause(0.001) # pause a bit so that plots are updated
# +
paletteLCZ = np.array([140, 0, 0,
133, 6, 10,
199, 14, 21,
183, 78, 18,
244, 104, 29,
247, 154, 90,
255, 255, 0,
192, 192, 192,
255, 204, 153,
77, 77, 77,
0, 102, 0,
21, 255, 21,
102, 153, 0,
185, 220, 126,
0, 102, 255,
255, 255, 204,
110, 107, 254])
paletteLCZ = paletteLCZ.reshape(17,3)
print(paletteLCZ)
def patch_2_lczColor(lcz):
img_rgb=np.zeros((lcz.shape[0],lcz.shape[1],3), dtype=np.uint8)
for i in np.arange(lcz.shape[0]):
for j in np.arange(lcz.shape[1]):
img_rgb[i,j,0]=paletteLCZ[lcz[i,j],0]
img_rgb[i,j,1]=paletteLCZ[lcz[i,j],1]
img_rgb[i,j,2]=paletteLCZ[lcz[i,j],2]
return img_rgb
# +
print(x_thisFile.shape, y_thisFile.shape, y_thisFile_.shape)
print(np.unique(y_thisFile_))
lcz = y_thisFile_.argmax(axis=-1)+1
print(lcz.shape, np.unique(lcz))
checkIdx = np.random.randint(0, high=x_thisFile.shape[0], size=(3))
print(checkIdx)
# +
from PIL import Image
#plt.subplot(3, checkIdx.shape[1])
for i in checkIdx:
x=x_thisFile[i,:,:,:]
print(x.shape)
show_patch(x[:,:,[2,1,0]], one_channel=False)
show_patch(y_thisFile[i,:,:,:])
lcz= y_thisFile_[i,:,:,:]
lcz = lcz.argmax(axis=-1)
#show_patch(lcz)
print(lcz.shape)
#print(lcz)
lcz = patch_2_lczColor(lcz)
#print(lcz)
show_patch(lcz, one_channel=False)
# -
| dataPrepare/check_created_Patches.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
my_list=[1,2,3]
my_list
my_list=['a',1,2.3]
my_list
len(my_list)
my_list[::-1]
my_list=['one','two','three']
print(f'this is my list items {my_list[0]}, {my_list[1]} and {my_list[2]}, ')
my_list[0]
my_list[1]
my_list[2]
my_list[1:]
my_list[-1]
my_list[2:3]
anotherList=['three','four']
my_list+anotherList
newList=my_list+anotherList
newList
my_list
anotherList
newList[0]='ONE ALL CAPS'
newList
newList.append('newItem')
newList
newList.append('SEVEN')
newList
newList.pop()
newList
popped_item=newList.pop()
popped_item
newList
newList.pop(0)
newList
newList=['a','b','c']
numList=[4,2,90,21]
newList.sort()
newList
type(None)
numList.sort()
numList
numList.reverse()
numList
| Python Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cyber Use Case Tutorial #2: Network Mapping using RAPIDS
#
# ### GTC SJ 2019 (18 March 2019)
# ### Authors:
# - <NAME> (NVIDIA)
# - <NAME> (NVIDIA)
# - <NAME> (NVIDIA)
# - <NAME> (NVIDIA)
# - <NAME> (NVIDIA)
#
# ### Goals:
# - Parse raw Windows Event Logs using cuDF
# - Load netflow data into a cuDF
# - Map parsed data to network graph edges using cuDF
# - Use cuGraph pagerank
# - Build a network graph
# ### Imports
# +
import os
import re
import time
import dask_cudf
import dask.delayed
import nvstrings
import nvcategory
import yaml
import pandas as pd
import numpy as np
import json
import cugraph
import dask.dataframe as dd
import dask
import cudf
from dask_cuda import LocalCUDACluster
from dask.distributed import Client, wait
from collections import OrderedDict
from cudf.core import DataFrame
# -
# ### Background
#
# With the insurmountable flow of data and connected devices today, it becomes critical to be able to map that data into a network graph for easy visual reference and analytics. We strive to recognize patterns and anomolies to combat cyber attacks.
#
# One of the common struggles today is the ability to parse data with speed. Here we will demonstrate how to parse raw Windows Event Logs.
#
# By the end of this tutorial, we'll be able to parse raw Windows Event Logs containing authorization data, combine with netflow data, to form a network mapping graph.
# ### Parsing Windows Event Logs
#
# In the cell below we'll be setting variables for the input columns and output columns.
#
# First, define the input columns and dtypes. These input columns are defined by the data source provided by [Los Alamos National Laboratory](https://csr.lanl.gov/data/2017.html). The additional column "Raw" integrates the values from those columns to form a raw Windows Event Log.
#
# Next, define the output columns and dtypes. These output columns are defined by the content of the Windows Event logs and more directly defined by the configuration of regex values `conf/lanl_regex_configs` used to parse each key value pair from the raw log.
# +
INPUT_COLS_SET = ['Time',
'EventID',
'LogHost',
'LogonType',
'LogonTypeDescription',
'UserName',
'DomainName',
'LogonID',
'SubjectUserName',
'SubjectDomainName',
'SubjectLogonID',
'Status',
'Source',
'ServiceName',
'Destination',
'AuthenticationPackage',
'FailureReason',
'ProcessName',
'ProcessID',
'ParentProcessName',
'ParentProcessID',
'Raw']
INPUT_DTYPES = ['str' for x in INPUT_COLS_SET]
OUTPUT_COLS_SUPERSET = ['detailed_authentication_information_authentication_package',
'new_logon_logon_guid',
'failure_information_failure_reason',
'failure_information_status',
'computername',
'new_logon_logon_id',
'subject_security_id',
'detailed_authentication_information_package_name_ntlm_only',
'logon_type',
'account_for_which_logon_failed_security_id',
'detailed_authentication_information_key_length',
'subject_logon_id',
'process_information_caller_process_name',
'eventcode',
'process_information_caller_process_id',
'subject_account_name',
'process_information_process_name',
'new_logon_account_name',
'process_information_process_id',
'failure_information_sub_status',
'new_logon_security_id',
'network_information_source_network_address',
'detailed_authentication_information_transited_services',
'new_logon_account_domain',
'subject_account_domain',
'detailed_authentication_information_logon_process',
'account_for_which_logon_failed_account_domain',
'account_for_which_logon_failed_account_name',
'network_information_workstation_name',
'network_information_source_port']
OUTPUT_DTYPES = ['str' for x in OUTPUT_COLS_SUPERSET]
# -
# ### Parsing Data Pipeline
#
# Preprocess, or clean the raw logs, by removing the non-printable characters then begin to parse logs by event code type. Here we use regex mappings per event code to parse out each data element defined in OUTPUT_COLS_SUPERSET
# +
def pipeline(df, event_codes, regex_mappings, clean=True):
"""
"""
if clean:
df = preprocess_logs(df)
out_dfs = []
# separate by eventcode and process differently
for code in event_codes:
portion = filter_by_pattern(df, code)
temp = process_logtype(portion, regex_mappings, code)
temp['time'] = portion['Time'].astype('int')
temp['eventcode'] = portion['EventID']
out_dfs.append(temp)
# recombine the processed output
out_df = cudf.concat(out_dfs)
return out_df
def concat_wrapper(df_list):
return cudf.concat(df_list)
def preprocess_logs(logs_gdf):
"""Lowercasing and replacing characters
"""
logs_gdf['Raw'] = (
logs_gdf['Raw'].str.lower()
.str.replace('\\\\t', '')
.str.replace('\\\\r', '')
.str.replace('\\\\n', ' | ')
)
return logs_gdf
def process_logtype(df, regexes, eventcode):
"""Ongoing strings development/fixes will allow for cleaner log processing code in the future
"""
# The below line currently fails due to https://github.com/rapidsai/cudf/issues/1364. Once the issue is resolved
# you may use this line instead to initialize your dataframe.
#log_df_processed = cudf.read_csv('conf/LANL_OUTPUT_COLS_SUPERSET.csv', dtype=OUTPUT_DTYPES)
log_df_processed = cudf.DataFrame([(col, ['str']) for col in OUTPUT_COLS_SUPERSET])
log_df_processed = log_df_processed[:0]
columns = list(regexes[eventcode].keys())
for col in columns:
regex_pattern = regexes[eventcode].get(col)
extracted_nvstrings = df['Raw'].str.extract(regex_pattern)
if not extracted_nvstrings.empty:
log_df_processed[col] = extracted_nvstrings[0]
for col in log_df_processed.columns:
if not log_df_processed[col].empty:
if log_df_processed[col].dtype == 'float64':
log_df_processed[col] = log_df_processed[col].astype('int').astype('str')
elif log_df_processed[col].dtype == 'object':
pass
else:
log_df_processed[col] = log_df_processed[col].astype('str')
if log_df_processed[col].empty:
log_df_processed[col] = nvstrings.to_device([])
return log_df_processed
def filter_by_pattern(df, pattern):
"""Filter based on whether a string contains a reex pattern
"""
df['present'] = df['EventID'].str.contains(pattern)
return df[df.present == True]
def read_data(filename, **kwargs):
"""
"""
gdf = dask_cudf.read_csv(filename, **kwargs)
return gdf
def load_regex_yaml(yaml_file):
with open(yaml_file) as f:
regex_dict = yaml.safe_load(f)
regex_dict = {k: v[0] for k, v in regex_dict.items()}
return regex_dict
def create_regex_dictionaries(yaml_directory):
regex_dict = {}
for f in os.listdir(yaml_directory):
temp_regex = load_regex_yaml(yaml_directory + '/' + f)
regex_dict[f[:-5]] = temp_regex
return regex_dict
# -
# ### Run Parsing Data Pipeline
#
# In this instance, we'll be focusing on parsing raw Windows Event Logs that are of event code [4624](https://www.ultimatewindowssecurity.com/securitylog/encyclopedia/event.aspx?eventid=4624) and [4625](https://www.ultimatewindowssecurity.com/securitylog/encyclopedia/event.aspx?eventid=4625).
# !mkdir -p ../../../data/input/lanl
# !if [ ! -f ../../../data/input/lanl/wls.csv ]; then tar -xzvf ../../../data/lanl/wls.tar.gz -C ../../../data/input/lanl; fi
# +
#raw lanl data parsing.
AUTH_INPUT_PATH = '../../../data/input/lanl/wls.csv'
REGEX_CONF_PATH = 'conf/lanl_regex_configs'
EVENT_CODES_OF_INTEREST = ['4624','4625']
REQUIRED_COLS = ['Time','EventID','Raw']
DELIMITER = ','
logs_gddf = dask_cudf.read_csv(AUTH_INPUT_PATH,
names=INPUT_COLS_SET,
delimiter=DELIMITER,
usecols=REQUIRED_COLS,
dtype=INPUT_DTYPES,
skip_blank_lines=True,
)
logs_gddf.head()
# -
logs_gddf.dtypes
# +
#raw lanl data parsing.
AUTH_INPUT_PATH = '../../../data/input/lanl/wls.csv'
REGEX_CONF_PATH = 'conf/lanl_regex_configs'
EVENT_CODES_OF_INTEREST = ['4624','4625']
REQUIRED_COLS = ['Time','EventID','Raw']
DELIMITER = ','
logs_gddf = dask_cudf.read_csv(AUTH_INPUT_PATH,
names=INPUT_COLS_SET,
delimiter=DELIMITER,
usecols=REQUIRED_COLS,
dtype=INPUT_DTYPES,
skip_blank_lines=True,
)
REGEX_MAPPINGS = create_regex_dictionaries(REGEX_CONF_PATH)
#parts = [dask.delayed(pipeline)(x, EVENT_CODES_OF_INTEREST, REGEX_MAPPINGS) for x in logs_gddf.to_delayed()]
parts = [dask.delayed(pipeline)(x, EVENT_CODES_OF_INTEREST, REGEX_MAPPINGS) for x in logs_gddf.to_delayed()]
temp_df = dask_cudf.from_delayed(parts)
# Bring data back to a single GPU, for downstream graph analytics
auth_gdf = temp_df.compute()
print(auth_gdf.shape)
# -
# ### Read edge definitions from JSON file
#
# Now the parsing of Windows Event Logs has concluded, next we prepare for the network mapping portion. Within the edge definitions configuration file we define our edges by indicating the source and destination for each edge, referencing the column names of our input data.
#
# Below we also read in the netflow data.
filename = 'conf/edge-definitions.json'
with open(filename) as f:
edge_defs = json.load(f)
# ### Build network mapping edge list
#
# This function helps to recognize the data types we read in via csv, particulary the string objects.
def get_dtypes(fn, delim, floats, strings):
with open(fn, errors='replace') as fp:
header = fp.readline().strip()
types = []
for col in header.split(delim):
if 'date' in col:
types.append((col, 'date'))
elif col in floats:
types.append((col, 'float64'))
elif col in strings:
types.append((col, 'str'))
else:
types.append((col, 'int32'))
return OrderedDict(types)
# ### Read in Netflow Data
#
# The netflow data is also provided by [Los Alamos National Laboratory](https://csr.lanl.gov/data/2017.html)
# !mkdir -p ../../../data/input/lanl
# !if [ ! -f ../../../data/input/lanl/netflow.csv ]; then tar -xzvf ../../../data/lanl/netflow.tar.gz -C ../../../data/input/lanl; fi
flow_input_path = '../../../data/input/lanl/netflow.csv'
dtypes_data_processed = get_dtypes(flow_input_path, ',', floats=[], strings=["SrcDevice", "DstDevice"])
flow_gdf = cudf.io.csv.read_csv(flow_input_path, delimiter=',', names=list(dtypes_data_processed),
dtype=list(dtypes_data_processed.values()), skiprows=1)
# Create a dictionary to reference both the auth data (parsed Windows Event Logs) and netflow data
ds_gdfs = {
'lanl_auth': auth_gdf,
'lanl_flow': flow_gdf
}
# ### Build edges dataframe
#
# In the cell below, reference each datasource and its corresponding edge configuration to build a new dataframe containing edges. This dataframe will notably contain `srcCol` and `dstCol` along with other reference data.
# +
edges_gdf = None
for ds in edge_defs:
ds_gdf = ds_gdfs[ds['dataSource']]
for e in ds["edges"]:
evtCols = ds["stringCols"].copy()
evtCols.append(e["srcCol"])
evtCols.append(e["dstCol"])
evtCols.append(ds["timeCol"])
if 'filters' in e:
for f in e['filters']:
evtCols.append(f['key'])
evtCols = list(set(evtCols))
eventsDF = ds_gdf
eventsDF = eventsDF[evtCols]
# Apply filters indicated in the edge configuration file
if 'filters' in e:
for f in e['filters']:
eventsDF = eventsDF[eventsDF[f['key']].str.contains(f['value']) == True]
# Remove any None values
src_idx = eventsDF[e['srcCol']].str.contains("None")
if len(eventsDF[src_idx])>0:
eventsDF = eventsDF[src_idx==False]
dst_idx = eventsDF[e['dstCol']].str.contains("None")
if len(eventsDF[dst_idx])>0:
eventsDF = eventsDF[dst_idx==False]
evt_edges_gdf = cudf.dataframe.DataFrame()
evt_edges_gdf['src'] = eventsDF[e["srcCol"]]
evt_edges_gdf['dst'] = eventsDF[e["dstCol"]]
# Adjust time to recent date (LANL data source begins at 1 second)
evt_edges_gdf['time'] = eventsDF[ds["timeCol"]]+1442131200
evt_edges_gdf['src_node_type'] = e["srcNodeType"]
evt_edges_gdf['dst_node_type'] = e["dstNodeType"]
evt_edges_gdf['relationship'] = e["relationship"]
evt_edges_gdf['data_source'] = ds["dataSource"]
if edges_gdf is None:
edges_gdf = evt_edges_gdf
else:
edges_gdf = cudf.concat([edges_gdf, evt_edges_gdf])
# -
# Use pandas to drop duplicates as this is not yet available in cudf for strings
edges_pd = edges_gdf.to_pandas().drop_duplicates()
edges_gdf = cudf.DataFrame.from_pandas(edges_pd)
# ### Create node list and assign numeric ids
#
# Now that we have `edges_gdf` we can prepare the data for cuGraph by assigning continguous ids to the nodes and edges. cuGraph requires that all edges and nodes be identified using contiguous IDs.
src_nodes_pd = edges_pd[['src', 'src_node_type']].rename(columns={"src": "id", "src_node_type": "node_type"}).drop_duplicates()
dst_nodes = edges_pd[['dst', 'dst_node_type']].rename(columns={"dst": "id", "dst_node_type": "node_type"}).drop_duplicates()
all_nodes_pd = src_nodes_pd.append(dst_nodes).drop_duplicates()
all_nodes_gdf = cudf.DataFrame.from_pandas(all_nodes_pd)
# Assign contiguous id's to nodes for cugraph
idx = np.arange(len(all_nodes_gdf))
all_nodes_gdf['idx'] = idx
idmap_gdf = cudf.DataFrame([('id', all_nodes_gdf['id']), ('idx', idx)])
# ### Add numeric src and dst node ids to edge list
# Add contiguous src id's to edges
edges_gdf['id'] = edges_gdf['src']
edges_gdf = edges_gdf.merge(idmap_gdf, on=['id'])
edges_gdf['src_idx'] = edges_gdf['idx']
edges_gdf = edges_gdf.drop(['id', 'idx'])
# Add contiguous dst id's to edges
edges_gdf['id'] = edges_gdf['dst']
edges_gdf = edges_gdf.merge(idmap_gdf, on=['id'])
edges_gdf['dst_idx'] = edges_gdf['idx']
edges_gdf = edges_gdf.drop(['id', 'idx'])
# ### Create input edge list for cuGraph
cg_edges_gdf = edges_gdf[['src_idx', 'dst_idx']]
cg_edges_gdf['src_idx'] = cg_edges_gdf['src_idx'].astype('int32')
cg_edges_gdf['dst_idx'] = cg_edges_gdf['dst_idx'].astype('int32')
# ### Run cuGraph PageRank
#
# Next we create our graph and run pagerank.
G = cugraph.Graph()
G.add_edge_list(cg_edges_gdf['src_idx'], cg_edges_gdf['dst_idx'], None)
# %time pr_gdf = cugraph.pagerank(G, alpha=0.85, max_iter=500, tol=1.0e-05)
print(pr_gdf)
# ### Add PageRank scores to node list
pr_gdf['idx'] = pr_gdf['vertex'].astype('int64')
all_nodes_gdf = all_nodes_gdf.merge(pr_gdf, on=['idx'], how='left')
all_nodes_gdf = all_nodes_gdf.drop(['vertex'])
# ### Graphistry Viz
#
# We use [Graphistry](https://www.graphistry.com/) to visualize the network mapping graph. A snapshot of the graph constructed from this notebook can be viewed below. Run the code below to contruct a graph using Graphistry.
#
# A snapshot of the graph constructed from this notebook is provided below. To generate it yourself, you'll need to register an account with Graphistry and configure your key below.
#
# 
#
#
# Zoom in to search for interesting subgraphs.
#
# 
import graphistry
# Register Graphistry key
# A graphistry instance is required to proceed. Please enter your own graphistry key and server information in the line below.
# Please visit https://www.graphistry.com/ for more information on Graphistry.
graphistry.register(key='',
protocol='http', server='')
g_edges_pd = edges_gdf.to_pandas()
g_edges_pd = g_edges_pd.drop(columns=['dst_idx', 'dst_node_type', 'src_idx', 'src_node_type'])
g_nodes_pd = all_nodes_gdf.to_pandas()
acct_nodes_pd = g_nodes_pd[g_nodes_pd['node_type']=='account'].assign(color=228004, icon="user")
addr_nodes_pd = g_nodes_pd[g_nodes_pd['node_type']=='address'].assign(color=228010, icon="desktop")
g_nodes_pd = pd.concat([acct_nodes_pd, addr_nodes_pd])
g = graphistry.edges(g_edges_pd) \
.bind(source='src', destination='dst')
g.nodes(g_nodes_pd).bind(node='id', point_color='color').plot()
| blog_notebooks/cyber/network_mapping/lanl_network_mapping_using_rapids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import pandas_profiling as pdp
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
pd.set_option("display.max_columns", 100)
pd.set_option("display.max_colwidth", 1000)
ROOT_PATH = Path().absolute().parents[1]
display(ROOT_PATH)
train_df = pd.read_csv(ROOT_PATH / "input" / "train.csv", nrows=5656575, dtype={'acoustic_data': np.int16, 'time_to_failure': np.float64})
plt.plot(train_df.index.values, train_df["time_to_failure"].values, c="darkred")
plt.show()
train_df.tail()
train_df = pd.read_csv(ROOT_PATH / "input" / "train.csv", nrows=5656574, dtype={'acoustic_data': np.int16, 'time_to_failure': np.float64})
plt.plot(train_df.index.values, train_df["time_to_failure"].values, c="darkred")
plt.show()
test_path = ROOT_PATH / "input" / "test"
test_files = list(test_path.glob("*"))
test_lengths = set()
for test_file in test_files:
test_df = pd.read_csv(test_file, dtype={'acoustic_data': np.int16})
test_lengths.add(len(test_df))
del test_df
print(test_lengths)
| notebook/eda_moco/length_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function
import keras
from keras import backend
from mnist.loader import MNIST
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from PIL import Image
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
# +
batch_size = 126
epochs = 10
# Input image dimensions
imgr, imgc = 28, 28
# -
emnist= MNIST(path='Dataletters\\')
emnist.lect_emnist('letters')
a ,b = emnist_data.load_training()
img=a[0].reshape(28,28)
img.shape
plt.imshow(img,cmap='gray')
a.shape
# +
a = a.reshape(124800, 28, 28)
b = b.reshape(124800, 1)
b = b-1
# +
a_train, a_test, b_train, b_test = train_test_split(a, b, test_size=0.25, random_state=111)
a_train = a_train.reshape(a_train.shape[0], img_rows, img_cols, 1)
a_test = a_test.reshape(a_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print(input_shape)
# +
a_train = a_train.astype('float32')
b_test = b_test.astype('float32')
a_train /= 255
a_test /= 255
b_train = keras.utils.to_categorical(b_train, num_classes)
b_test = keras.utils.to_categorical(b_test, num_classes)
# -
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
//model.add(Conv2D(64, (3, 3), activation='relu'))
//model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(26, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(a_train, b_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.3)
model.save('cnn_model.h5')
tf.test.is_gpu_available(
cuda_only=False, min_cuda_compute_capability=None
)
| CNNAirWriting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dataset Source : Kaggle Dataset
#
# Resource and Code : Computer Science youtube channel
#
# link: https://www.youtube.com/watch?v=FMKnvsKoQxE
import pandas as pd
import numpy as np
df=pd.read_csv('DOGE-USD.csv')
#set index as date
df=df.set_index(pd.DatetimeIndex(df['Date'].values))
df.head()
df.isnull().sum()
df["Open"].fillna( method ='ffill', inplace = True)
df["High"].fillna( method ='ffill', inplace = True)
df["Low"].fillna( method ='ffill', inplace = True)
df["Close"].fillna( method ='ffill', inplace = True)
df["Adj_Close"].fillna( method ='ffill', inplace = True)
df["Volume"].fillna( method ='ffill', inplace = True)
df.isnull().sum()
df=df[['Close']]
df.head()
pred_day=1
#collect the future price:
df['Prediction']=df[['Close']].shift(-pred_day)
df.tail()
x=np.array(df.drop(['Prediction'],1))
x=x[:len(df)-pred_day-1]
x
#create a dependent dataset
y=np.array(df['Prediction'])
y=y[:-pred_day-1]
y
#split the data into 80:20:
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=0.2)
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(n_estimators=4,random_state=587)
forest.fit(x_train,y_train)
print(forest.score(x_test,y_test))
#show the values vs actual
pred=forest.predict(x_test)
#print:
print(pred)
print()
print(y_test)
df2=df[:-pred_day]
x_val=df2.tail(1)['Close'][0]
print(x_val)
prediction=forest.predict([[x_val]])
print('The price of DogeCoin in',pred_day,'day(s) is to be',prediction)
print('The actual price was :',df2.tail(1)['Prediction'][0])
| DogeCoin/DogeCoin Price Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import gym
import itertools
import matplotlib
import numpy as np
import pandas as pd
import sys
if "../" not in sys.path:
sys.path.append("../")
from collections import defaultdict
from lib.envs.windy_gridworld import WindyGridworldEnv
from lib import plotting
matplotlib.style.use('ggplot')
# -
env = WindyGridworldEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.3, epsilon=0.1):
"""
SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Gamma discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 100 == 0:
print("\rEpisode {}/{}.".format(i_episode + 1, num_episodes), end="")
sys.stdout.flush()
state = env.reset()
action_probs = policy(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
for t in itertools.count():
# Take a step
next_state, reward, done, _ = env.step(action)
# Pick the next action
next_action_probs = policy(next_state)
next_action = np.random.choice(np.arange(len(next_action_probs)), p=next_action_probs)
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
# TD Update
td_target = reward + discount_factor * Q[next_state][next_action]
td_delta = td_target - Q[state][action]
Q[state][action] = ((1 - alpha) * Q[state][action]) + (alpha * td_delta)
if done:
break
action = next_action
state = next_state
return Q, stats
Q, stats = sarsa(env, 200)
plotting.plot_episode_stats(stats)
| TD/SARSA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import time
df = pd.read_csv('labeled_and_scored_comments.csv')
# -
# Dataset of Wikipedia comments made available by Jigsaw, a subsidiary of Google that created the Perspective tool.
df.sort_values(['score'])
# function to make calls to the Perspective API for toxicity score testing
# +
from googleapiclient.discovery import build
import json
def get_toxicity_score(comment):
API_KEY = '' # Put your API key here
client = build(
"commentanalyzer",
"v1alpha1",
developerKey=API_KEY,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
analyze_request = {
'comment': { 'text': comment },
'requestedAttributes': {'TOXICITY': {}}
}
response = client.comments().analyze(body=analyze_request).execute()
toxicity_score = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"]
return toxicity_score
# -
# Get the 50 smallest scores from the csv
df.nsmallest(n=50, columns=['score'])
# Get the 50 largers scores
df.nlargest(n=50, columns=['score'])
# Get some basic stats from the score column
df['score'].describe()
# Create a new column that will add the total tally for each row
df['Total_Score'] = df['toxic'] + df['severe_toxic'] + df['obscene'] + df['threat'] + df['insult'] + df['identity_hate']
# Get the first 10 rows to make sure that the the new column 'Total_Score' was created and updated
df.head(10)
# create a variable that hold all the rows with a Total_Score of 0
pd = df[df['Total_Score']==0]
# + tags=[]
pd
# -
# get basic stats for the rows with a score of 0
pd['score'].describe()
# show the rows with the smallest score with a total tally of 0
pd.nsmallest(n=10, columns=['score'])
# show the rows with the largest score with a total tally of 0
pd.nlargest(n=10, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 1 / and get basic stats for row with a total score of 1
pd1 = df[df['Total_Score']==1]
pd1['score'].describe()
# show the rows with the largest score with a total tally of 1
pd1.nlargest(n=10, columns=['score'])
# show the rows with the smallest score with a total tally of 1
pd1.nsmallest(n=10, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 2 / and get basic stats for row with a total score of 2
pd2 = df[df['Total_Score']==2]
pd2['score'].describe()
# show the rows with the largest score with a total tally of 2
pd2.nsmallest(n=10, columns=['score'])
# show the rows with the largest score with a total tally of 2
pd2.nlargest(n=10, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 3 / and get basic stats for row with a total score of 3
pd3 = df[df['Total_Score']==3]
pd3['score'].describe()
# show the rows with the smallest score with a total tally of 3
pd1.nsmallest(n=10, columns=['score'])
# show the row with the largest score with a total tally of 3
pd3.nlargest(n=10, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 4 / and get basic stats for row with a total score of 4
pd4 = df[df['Total_Score']==4]
pd4['score'].describe()
# show the rows with the smallest score with a total tally of 4
pd4.nsmallest(n=10, columns=['score'])
# show the rows with the largest score with a total tally of 4
pd4.nlargest(n=10, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 5 / and get basic stats for row with a total score of 5
pd5 = df[df['Total_Score']==5]
pd5['score'].describe()
# show the rows with the largest score with a total tally of 5
pd5.nsmallest(n=10, columns=['score'])
# show the rows with the largest score with a total tally of 5
pd5.nlargest(n=8, columns=['score'])
# create a variable that hold all the rows with a Total_Score of 6 / and get basic stats for row with a total score of 6
pd6 = df[df['Total_Score']==6]
pd6['score'].describe()
# show the rows with the smalles score with a total tally of 6
pd1.nsmallest(n=10, columns=['score'])
# show the rows with the largest score with a total tally of 6
pd6.nlargest(n=8, columns=['score'])
# Create comment list, loop throught get_toxicity_score function and get comment and score
# +
comment_list = ['stupid', 'how are you', 'fine thanks']
for comment in comment_list:
score = get_toxicity_score(comment)
print(comment, score, )
time.sleep(1)
# -
# create a list of namjoon hate comments
hate_namjoon_comments = ['B*tch i told you all started it now stfu and worry about namjoon\'s ugly face', "How many times u keep using this edited pict? Here\'s the non edited pict of ugly namjoon", "Namjoon and Jhope are ugly jimin taehyung and jin can\’t sing and jin a bitch running from enlistment FUCK BTS",
'what’s botched ass hwasa doing? what’s botched ass namjoon doing? worry but the people you stan cause they ugly in the face and ain’t doing shit', 'namjoon ugly ass deserve to be enlisted', 'One ugly bitvh just called namjoon ugly like tfff... I wanna b3at her a5s']
# get a toxicity score for the hate namjoon comments
for comment in hate_namjoon_comments:
score = get_toxicity_score(comment)
print(comment, score )
time.sleep(1)
# Set toxicity levels to categorize the comment scores for namjoon hate comments
# +
max_toxic_threshold = 0.84
min_toxic_threshold = 0.000004
mid_toxic_threshold = 0.12
for comment in hate_namjoon_comments:
score = get_toxicity_score(comment)
if score >= min_toxic_threshold and score < mid_toxic_threshold:
toxic_level = 'Low'
elif score >= mid_toxic_threshold and score < max_toxic_threshold:
toxic_level = 'Mid'
elif score >= max_toxic_threshold:
toxic_level = 'Max'
print(comment,':', score, ':', toxic_level )
time.sleep(1)
# -
# create a list of namjoon nice comments
good_namjoon_comments = ['kim namjoon being perfect', 'kim namjoon, most beautiful.', 'My beautiful sexy man I love you This man is so beautiful and sexy to me and I love him', "Namjoon's lips, mouth insists on being kissed! Believe me! And I will say that the boys with beautiful mouths, do not know how to kiss!", 'quickly realizing that i love seeing namjoon eat well', 'Namjoon\'s dimples on his cheeks is the most precious thing']
# Get toxicity scores for the good namjoon comments
for comment in good_namjoon_comments:
score = get_toxicity_score(comment)
print(comment, score )
time.sleep(1)
# Set toxicity levels to categorize the comment scores for namjoon hate comments
# +
max_toxic_threshold = 0.84
min_toxic_threshold = 0.000004
mid_toxic_threshold = 0.12
for comment in good_namjoon_comments:
score = get_toxicity_score(comment)
if score >= min_toxic_threshold and score < mid_toxic_threshold:
toxic_level = 'Low'
elif score >= mid_toxic_threshold and score < max_toxic_threshold:
toxic_level = 'Mid'
elif score >= max_toxic_threshold:
toxic_level = 'Max'
print(comment,':', score, ':', toxic_level )
time.sleep(1)
# -
| Data_Bias_Dalia_Vazquez (2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.015866, "end_time": "2022-06-04T09:16:58.382671", "exception": false, "start_time": "2022-06-04T09:16:58.366805", "status": "completed"} tags=[]
# # Trend-following
#
# This notebook implements a few improvements that <NAME> Kosowski (2020) (BK2020, hereafter) propose to the time-series momentum strategy which originally appeared in Moskowitz et al (2012).
#
# Note that like other notebooks, all backtests are performed by using a private library called `vivace`.
# + papermill={"duration": 3.679193, "end_time": "2022-06-04T09:17:02.077159", "exception": false, "start_time": "2022-06-04T09:16:58.397966", "status": "completed"} tags=[]
# %matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_baltas2020
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy, Weighting, RealisedVolatility
from vivace.backtest.stats import Performance
# + [markdown] papermill={"duration": 0.010174, "end_time": "2022-06-04T09:17:02.099529", "exception": false, "start_time": "2022-06-04T09:17:02.089355", "status": "completed"} tags=[]
# # Data
# + papermill={"duration": 0.021843, "end_time": "2022-06-04T09:17:02.131941", "exception": false, "start_time": "2022-06-04T09:17:02.110098", "status": "completed"} tags=[]
all_futures_baltas2020['name'].values
# + papermill={"duration": 0.020147, "end_time": "2022-06-04T09:17:02.167040", "exception": false, "start_time": "2022-06-04T09:17:02.146893", "status": "completed"} tags=[]
print(len(all_futures_baltas2020))
# + [markdown] papermill={"duration": 0.011589, "end_time": "2022-06-04T09:17:02.189304", "exception": false, "start_time": "2022-06-04T09:17:02.177715", "status": "completed"} tags=[]
# We try to use the same contract as the original paper where possible, resulting 54 futures contracts to analyse. The trading universe covers a wide range of asset classes and regions, and is made up of following instruments:
# - Equity index futures
# - Government bond futures
# - FX futures
# - Commodity futures
#
# where the same trend detection method is applied.
# + [markdown] papermill={"duration": 0.01261, "end_time": "2022-06-04T09:17:02.214528", "exception": false, "start_time": "2022-06-04T09:17:02.201918", "status": "completed"} tags=[]
# # Performance
#
# + [markdown] papermill={"duration": 0.010207, "end_time": "2022-06-04T09:17:02.235625", "exception": false, "start_time": "2022-06-04T09:17:02.225418", "status": "completed"} tags=[]
# ## Moskowitz et al
#
# First we calculate the original trend-following portfolio with a more traditional close-to-close realised volatility estimator. Although Moskowitz et al uses the exponentially weighted standard deviation, BK2020 appears to compare against the rolling standard deviation.
# + papermill={"duration": 656.169885, "end_time": "2022-06-04T09:27:58.415855", "exception": false, "start_time": "2022-06-04T09:17:02.245970", "status": "completed"} tags=[]
engine_original = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2020.index,
signal=signal.ParallelSignal([
signal.VolatilityScale(agg_method='rolling', window=21, ann_factor=261,
target_volatility=0.4, signal_cap=0.95),
signal.TSMOMMoskowitz2012(post_process=processing.AsFreq(freq='m', method='pad'))
], weighting='product'),
weighting=Weighting.EQUAL_WEIGHT.value,
log_level=logging.WARN,
)
engine_original.run()
# + papermill={"duration": 85.2614, "end_time": "2022-06-04T09:29:23.688775", "exception": false, "start_time": "2022-06-04T09:27:58.427375", "status": "completed"} tags=[]
engine_original_eq = (engine_original.calculate_equity_curve(calculate_net=False, rebalance_freq='m')
.rename('Trend-following portfolio (Moskowitz 2012)'))
# + papermill={"duration": 0.872625, "end_time": "2022-06-04T09:29:24.576544", "exception": false, "start_time": "2022-06-04T09:29:23.703919", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
engine_original_eq.plot(ax=ax, logy=True)
ax.set_title('Trend-following portfolio (Moskowitz 2012)')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": 0.142793, "end_time": "2022-06-04T09:29:24.731299", "exception": false, "start_time": "2022-06-04T09:29:24.588506", "status": "completed"} tags=[]
engine_original_eq.pipe(Performance).summary()
# + [markdown] papermill={"duration": 0.010597, "end_time": "2022-06-04T09:29:24.752565", "exception": false, "start_time": "2022-06-04T09:29:24.741968", "status": "completed"} tags=[]
#
# ## Volatility estimator
# BK2020 proposes a few changes in order to enhance the original trend-following strategy Moskowitz et al reported. The first thing is to use a more sophisticated realised volatility estimators for volatility scaling. Moskowitz used the exponentially weighted standard deviation with close-to-close. On the other hand, BK2020 suggests to use Yang-Zhang estimator as it is much more efficient.
# + papermill={"duration": 1249.307062, "end_time": "2022-06-04T09:50:14.071174", "exception": false, "start_time": "2022-06-04T09:29:24.764112", "status": "completed"} tags=[]
engine_vol = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2020.index,
signal=signal.ParallelSignal([
signal.VolatilityScale(volatility_type=RealisedVolatility.YZ.value,
agg_method='rolling', window=21, min_periods=10,
target_volatility=0.4, signal_cap=0.95, ann_factor=261),
signal.TSMOMMoskowitz2012(post_process=processing.AsFreq(freq='m', method='pad'))
], weighting='product'),
weighting=Weighting.EQUAL_WEIGHT.value,
log_level=logging.WARN,
)
engine_vol.run()
# + papermill={"duration": 85.780723, "end_time": "2022-06-04T09:51:39.863599", "exception": false, "start_time": "2022-06-04T09:50:14.082876", "status": "completed"} tags=[]
engine_vol_eq = (engine_vol.calculate_equity_curve(calculate_net=False, rebalance_freq='m')
.rename('Trend-following portfolio (YZ estimator)'))
# + papermill={"duration": 0.607836, "end_time": "2022-06-04T09:51:40.482437", "exception": false, "start_time": "2022-06-04T09:51:39.874601", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
engine_vol_eq.plot(ax=ax, logy=True);
ax.set_title('Trend-following portfolio (YZ estimator)')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": 0.094204, "end_time": "2022-06-04T09:51:40.587850", "exception": false, "start_time": "2022-06-04T09:51:40.493646", "status": "completed"} tags=[]
engine_vol_eq.pipe(Performance).summary()
# + [markdown] papermill={"duration": 0.012608, "end_time": "2022-06-04T09:51:40.613481", "exception": false, "start_time": "2022-06-04T09:51:40.600873", "status": "completed"} tags=[]
# ## TREND signal
#
# Another improvement BK2020 proposes is to use a continuous signal rather than a binary one in order to reduce the turnover when signals are weak. Specifically they used the t-values with the Newey-West standard error as the signal. They further capped and floored the signal between -1 and +1 to avoid extreme positions.
#
# `statsmodels`'s `OLS` class provides a method to compute the Newey-West standard error. https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLS.html
# + papermill={"duration": null, "end_time": null, "exception": false, "start_time": "2022-06-04T09:51:40.624656", "status": "running"} tags=[]
engine_trend = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2020.index,
signal=signal.ParallelSignal([
signal.VolatilityScale(agg_method='rolling', window=21, ann_factor=261,
target_volatility=0.4, signal_cap=0.95),
signal.TSMOMBaltas2020()
], weighting='product'),
weighting=Weighting.EQUAL_WEIGHT.value,
log_level=logging.WARN,
)
engine_trend.run()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_trend_eq = (engine_trend.calculate_equity_curve(calculate_net=False, rebalance_freq='m')
.rename('Trend-following portfolio (TREND signal)'))
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
engine_trend_eq.plot(ax=ax, logy=True);
ax.set_title('Trend-following portfolio (TREND signal)')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_trend_eq.pipe(Performance).summary()
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Combined
#
# Finally we run a backtest by using both the Yang-Zhang volatility estimator and the TREND signal.
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_combined = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_baltas2020.index,
signal=signal.ParallelSignal([
signal.VolatilityScale(volatility_type=RealisedVolatility.YZ.value,
agg_method='rolling', window=21, min_periods=10,
target_volatility=0.4, signal_cap=0.95, ann_factor=261),
signal.TSMOMBaltas2020()
], weighting='product'),
weighting=Weighting.EQUAL_WEIGHT.value,
log_level=logging.WARN,
)
engine_combined.run()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_combined_eq = (engine_combined.calculate_equity_curve(calculate_net=False, rebalance_freq='m')
.rename('Trend-following portfolio (YZ estimator + TREND signal)'))
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
engine_combined_eq.plot(ax=ax, logy=True)
ax.set_title('Trend-following portfolio (YZ estimator + TREND signal)')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_combined_eq.pipe(Performance).summary()
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# # Comparison
#
# In this section we compare the above 4 backtests.
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Performance
#
# As reported in BK2020, using the YZ estimator and TREND signal does not alter the performance characteristics. Rather, the TREND signal somewhat improves the Sharpe ratio. Also the TREND signal lowers the annualised volatility from 12.5% to 10.8% with the close-to-close realised volatility.
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
performance_comp = pd.concat((engine_original_eq, engine_vol_eq,
engine_trend_eq, engine_combined_eq), axis=1)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
performance_comp.plot(ax=ax, logy=True);
ax.set_title('Trend-following portfolio')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
performance_comp.pipe(Performance).summary()
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Turnover reduction
#
# Again, as reported in BK2020, both enhancements reduce the turnover significantly while not affecting the performance. The chart below shows the reduction in turnover for each futures contract. By utilising the YZ realised volatility and the TREND signal, overall the turnover is reduced by more than 30%.
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_original_pos = engine_original.get_position_by_instrument()
engine_vol_pos = engine_vol.get_position_by_instrument()
engine_trend_pos = engine_trend.get_position_by_instrument()
engine_combined_pos = engine_combined.get_position_by_instrument()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
engine_original_turnover = (engine_original_pos.diff().abs().mean().mul(261)
.rename('Trend-following portfolio (Moskowitz 2012)'))
engine_vol_turnover = (engine_vol_pos.diff().abs().mean().mul(261)
.rename('Trend-following portfolio (YZ estimator)'))
engine_trend_turnover = (engine_trend_pos.diff().abs().mean().mul(261)
.rename('Trend-following portfolio (TREND signal)'))
engine_combined_turnover = (engine_combined_pos.diff().abs().mean().mul(261)
.rename('Trend-following portfolio (YZ estimator + TREND signal)'))
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
turnover_reduction = (
pd.concat((engine_vol_turnover, engine_trend_turnover, engine_combined_turnover), axis=1)
.div(engine_original_turnover, axis=0)
.sub(1)
.reindex(index=all_futures_baltas2020.index)
.rename(index=all_futures_baltas2020['name'])
)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
turnover_reduction.mean()
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
ax = turnover_reduction.plot(kind='bar', figsize=(14, 4))
ax.yaxis.set_major_formatter(mticker.PercentFormatter(1))
ax.axhline(0, color='black', lw=1)
ax.axvline(24.5, color='black', lw=1)
ax.axvline(30.5, color='black', lw=1)
ax.axvline(42.5, color='black', lw=1)
ax.set_xlabel('Contract')
ax.set_ylabel('Turnover reduction');
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# # Recent performance
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
performance_comp.tail(252 * 2).pct_change().fillna(0).add(1).cumprod().plot(ax=ax, logy=True);
ax.set_title('Trend-following portfolio')
ax.set_ylabel('Cumulative returns');
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# # Reference
#
# - <NAME>. and <NAME>., 2020. Demystifying time-series momentum strategies: Volatility estimators, trading rules and pairwise correlations. Market Momentum: Theory and Practice", Wiley.
# - <NAME>., <NAME>. and <NAME>., 2012. Time series momentum. Journal of financial economics, 104(2), pp.228-250.
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
print(f'Updated: {datetime.utcnow():%d-%b-%Y %H:%M}')
| trend_following_baltas2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 自动并行
# :label:`sec_auto_para`
#
# 深度学习框架(例如,MxNet和PyTorch)会在后端自动构建计算图。利用计算图,系统可以了解所有依赖关系,并且可以选择性地并行执行多个不相互依赖的任务以提高速度。例如, :numref:`sec_async`中的 :numref:`fig_asyncgraph`独立初始化两个变量。因此,系统可以选择并行执行它们。
#
# 通常情况下单个操作符将使用所有CPU或单个GPU上的所有计算资源。例如,即使在一台机器上有多个CPU处理器,`dot` 操作符也将使用所有CPU上的所有核心(和线程)。这样的行为同样适用于单个GPU。因此,并行化对于单设备计算机来说并不是很有用,而并行化对于多个设备就很重要了。虽然并行化通常应用在多个GPU之间,但增加本地CPU以后还将提高少许性能。例如, :cite:`Hadjis.Zhang.Mitliagkas.ea.2016`则把结合GPU和CPU的训练应用到计算机视觉模型中。借助自动并行化框架的便利性,我们可以依靠几行Python代码实现相同的目标。更广泛地考虑,我们对自动并行计算的讨论主要集中在使用CPU和GPU的并行计算上,以及计算和通信的并行化内容。
#
# 请注意,我们至少需要两个GPU来运行本节中的实验。
#
# + origin_pos=1 tab=["mxnet"]
from mxnet import np, npx
from d2l import mxnet as d2l
npx.set_np()
# + [markdown] origin_pos=3
# ## 基于GPU的并行计算
#
# 让我们从定义一个具有参考性的用于测试的工作负载开始:下面的`run`函数将执行$10$ 次“矩阵-矩阵”乘法时需要使用的数据分配到两个变量(`x_gpu1`和`x_gpu2`)中,这两个变量分别位于我们选择的不同设备上。
#
# + origin_pos=4 tab=["mxnet"]
devices = d2l.try_all_gpus()
def run(x):
return [x.dot(x) for _ in range(50)]
x_gpu1 = np.random.uniform(size=(4000, 4000), ctx=devices[0])
x_gpu2 = np.random.uniform(size=(4000, 4000), ctx=devices[1])
# + [markdown] origin_pos=6 tab=["mxnet"]
# 现在我们使用函数来处理数据。我们通过在测量之前预热设备(对设备执行一次传递)来确保缓存的作用不影响最终的结果。
#
# + origin_pos=8 tab=["mxnet"]
run(x_gpu1) # 预热设备
run(x_gpu2)
npx.waitall()
with d2l.Benchmark('GPU1 时间'):
run(x_gpu1)
npx.waitall()
with d2l.Benchmark('GPU2 时间'):
run(x_gpu2)
npx.waitall()
# + [markdown] origin_pos=10 tab=["mxnet"]
# 如果我们删除两个任务之间的`waitall`语句,系统就可以在两个设备上自动实现并行计算。
#
# + origin_pos=12 tab=["mxnet"]
with d2l.Benchmark('GPU1 & GPU2'):
run(x_gpu1)
run(x_gpu2)
npx.waitall()
# + [markdown] origin_pos=14
# 在上述情况下,总执行时间小于两个部分执行时间的总和,因为深度学习框架自动调度两个GPU设备上的计算,而不需要用户编写复杂的代码。
#
# ## 并行计算与通信
#
# 在许多情况下,我们需要在不同的设备之间移动数据,比如在CPU和GPU之间,或者在不同的GPU之间。例如,当我们打算执行分布式优化时,就需要移动数据来聚合多个加速卡上的梯度。让我们通过在GPU上计算,然后将结果复制回CPU来模拟这个过程。
#
# + origin_pos=15 tab=["mxnet"]
def copy_to_cpu(x):
return [y.copyto(npx.cpu()) for y in x]
with d2l.Benchmark('在GPU1上运行'):
y = run(x_gpu1)
npx.waitall()
with d2l.Benchmark('复制到CPU'):
y_cpu = copy_to_cpu(y)
npx.waitall()
# + [markdown] origin_pos=17 tab=["mxnet"]
# 这种方式效率不高。注意到当列表中的其余部分还在计算时,我们可能就已经开始将`y`的部分复制到CPU了。例如,当我们计算一个小批量的梯度时,某些参数的梯度将比其他参数的梯度更早可用。因此,在GPU仍在运行时就开始使用PCI-Express总线带宽来移动数据对我们是有利的。删除这两个部分之间的`waitall`让我们模拟这个场景。
#
# + origin_pos=19 tab=["mxnet"]
with d2l.Benchmark('在GPU1上运行并复制到CPU'):
y = run(x_gpu1)
y_cpu = copy_to_cpu(y)
npx.waitall()
# + [markdown] origin_pos=21
# 两个操作所需的总时间少于它们各部分操作所需时间的总和。请注意,与并行计算的区别是通信操作使用的资源:CPU和GPU之间的总线。事实上,我们可以在两个设备上同时进行计算和通信。如上所述,计算和通信之间存在的依赖关系是必须先计算`y[i]`,然后才能将其复制到CPU。幸运的是,系统可以在计算`y[i]`的同时复制`y[i-1]`,以减少总的运行时间。
#
# 最后,我们给出了一个简单的两层多层感知机在CPU和两个GPU上训练时的计算图及其依赖关系的例子,如 :numref:`fig_twogpu`所示。手动调度由此产生的并行程序将是相当痛苦的。这就是基于图的计算后端进行优化的优势所在。
#
# 
# :label:`fig_twogpu`
#
# ## 小结
#
# * 现代系统拥有多种设备,如多个GPU和多个CPU,还可以并行地、异步地使用它们。
# * 现代系统还拥有各种通信资源,如PCI Express、存储(通常是固态硬盘或网络存储)和网络带宽,为了达到最高效率可以并行使用它们。
# * 后端可以通过自动化地并行计算和通信来提高性能。
#
# ## 练习
#
# 1. 在本节定义的`run`函数中执行了八个操作,并且操作之间没有依赖关系。设计一个实验,看看深度学习框架是否会自动地并行地执行它们。
# 1. 当单个操作符的工作量足够小,即使在单个CPU或GPU上,并行化也会有所帮助。设计一个实验来验证这一点。
# 1. 设计一个实验,在CPU和GPU这两种设备上使用并行计算和通信。
# 1. 使用诸如NVIDIA的[Nsight](https://developer.nvidia.com/nsight-compute-2019_5) 之类的调试器来验证你的代码是否有效。
# 1. 设计并实验具有更加复杂的数据依赖关系的计算任务,以查看是否可以在提高性能的同时获得正确的结果。
#
# + [markdown] origin_pos=22 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/2795)
#
| submodules/resource/d2l-zh/mxnet/chapter_computational-performance/auto-parallelism.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Performance programming
# We've spent most of this course looking at how to make code readable and reliable. For research work, it is often also important that code is efficient: that it does what it needs to do *quickly*.
# It is very hard to work out beforehand whether code will be efficient or not: it is essential to *Profile* code, to measure its performance, to determine what aspects of it are slow.
# When we looked at Functional programming, we claimed that code which is conceptualised in terms of actions on whole data-sets rather than individual elements is more efficient. Let's measure the performance of some different ways of implementing some code and see how they perform.
# ## Two Mandelbrots
# You're probably familiar with a famous fractal called the [Mandelbrot Set](https://www.youtube.com/watch?v=ZDU40eUcTj0).
# For a complex number $c$, $c$ is in the Mandelbrot set if the series $z_{i+1}=z_{i}^2+c$ (With $z_0=c$) stays close to $0$.
# Traditionally, we plot a color showing how many steps are needed for $\left|z_i\right|>2$, whereupon we are sure the series will diverge.
# Here's a trivial python implementation:
def mandel1(position, limit=50):
value = position
while abs(value) < 2:
limit -= 1
value = value ** 2 + position
if limit < 0:
return 0
return limit
xmin = -1.5
ymin = -1.0
xmax = 0.5
ymax = 1.0
resolution = 300
xstep = (xmax - xmin) / resolution
ystep = (ymax - ymin) / resolution
xs = [(xmin + (xmax - xmin) * i / resolution) for i in range(resolution)]
ys = [(ymin + (ymax - ymin) * i / resolution) for i in range(resolution)]
# %%timeit
data = [[mandel1(complex(x, y)) for x in xs] for y in ys]
data1 = [[mandel1(complex(x, y)) for x in xs] for y in ys]
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(data1, interpolation="none")
# -
# We will learn this lesson how to make a version of this code which works Ten Times faster:
# +
import numpy as np
def mandel_numpy(position, limit=50):
value = position
diverged_at_count = np.zeros(position.shape)
while limit > 0:
limit -= 1
value = value ** 2 + position
diverging = value * np.conj(value) > 4
first_diverged_this_time = np.logical_and(diverging, diverged_at_count == 0)
diverged_at_count[first_diverged_this_time] = limit
value[diverging] = 2
return diverged_at_count
# -
ymatrix, xmatrix = np.mgrid[ymin:ymax:ystep, xmin:xmax:xstep]
values = xmatrix + 1j * ymatrix
data_numpy = mandel_numpy(values)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(data_numpy, interpolation="none")
# -
# %%timeit
data_numpy = mandel_numpy(values)
# Note we get the same answer:
(data_numpy == data1).all()
| module09_programming_for_speed/09_00_performance_programming.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import print_function
import sisl
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# This example will setup the required electronic structures for usage in TBtrans.
# We will continue with the graphene nearest neighbour tight-binding model and perform simple transport calculations using TBtrans.
# Again we require the graphene unit-cell and the construction of the Hamiltonian object:
graphene = sisl.geom.graphene().tile(2, axis=0)
H = sisl.Hamiltonian(graphene)
H.construct([[0.1, 1.43], [0., -2.7]])
# Note that the above call of the graphene lattice is different from [TB 2](../TB_02/run.ipynb), and similar to [TB 1](../TB_01/run.ipynb). In this example we will create a *non-orthogonal* graphene lattice, i.e. the lattice vectors are the minimal lattice vectors of graphene.
# The minimal graphene lattice consists of 2 Carbon atoms.
# We `tile` the `Geometry` to make it slightly bigger.
# You are encouraged to draw the graphene lattice vectors, and draw an arrow in the direction of the transport (along the 2nd lattice vector). Note that one *can* calculate transport along non-orthogonal directions (also in TranSiesta).
#
# Assert that we have 16 non zero elements:
print(H)
# The Hamiltonian we have thus far created will be our *electrode*. Lets write it to a TBtrans readable file:
H.write('ELEC.nc')
# Now a file `ELEC.nc` file exists in the folder and it contains all the information (and more) that TBtrans requires to construct the self-energies for the electrode.
# ### Creating the device, `Hamiltonian` $\to$ `Hamiltonian`
# The `Geometry.tile` function is an explicit method to create bigger lattices from a smaller reference latice. Howewer, the `tile` routine is also available to the `Hamiltonian` object. Not only is it much easier to use, it also presents these advantages:
#
# * It guarentees that the matrix elements are the same as the reference `Hamiltonian`, i.e. you need not specify the parameters to `construct` twice,
# * It is *much* faster when creating systems of $>500,000$ atoms/orbitals from smaller reference systems,
# * It also requires less code which increases readability and is less prone to errors.
H_device = H.tile(3, axis=1)
print(H_device)
# For more information you may execute the following lines to view the documentation:
#
# help(Geometry.tile)
# help(Hamiltonian.tile)
#
#
# Now we have created the device electronic structure. The final step is to store it in a TBtrans readable format:
H_device.write('DEVICE.nc')
# Now run tbtrans:
#
# tbtrans RUN.fdf
tbt = sisl.get_sile('siesta.TBT.nc')
# After calculating the transport properties of the transport problem you may also use `sisl` to interact with the TBtrans output (in the `*.TBT.nc` file). Please repeat the same convergence tests you performed in example 02.
# What are the required **k**-point sampling compared to 02 for a similar transmission function ?
plt.plot(tbt.E, tbt.transmission(), label='k-averaged');
plt.plot(tbt.E, tbt.transmission(kavg=tbt.kindex([0, 0, 0])), label=r'$\Gamma$');
plt.xlabel('Energy [eV]'); plt.ylabel('Transmission'); plt.ylim([0, None]) ; plt.legend();
# ## Exercises
#
# - Extract the DOS for each sub-lattice and plot them, see the 1) `atom` or 2) `orbital` keywords in the `.DOS` and `.ADOS` routines
# ### Learned objectives
#
#
# - Calculation of transport in non-orthogonal lattices are possible, this applies to both TBtrans *and* TranSiesta
# - Extraction of DOS for a subset of atoms
| ts-tbt-sisl-tutorial-master/TB_03/run.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from imutils import face_utils #for resizing
import numpy as np
import argparse
import imutils
import dlib
import cv2
import time
from scipy.spatial import distance as dist #euclidian distance
import pandas as pd
import csv
from pathlib import Path
def write_dict_to_csv(csv_file, csv_columns, dict_data):
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
#writer.writeheader()
for key, value in dict_data.items():
writer.writerow({'name': key, 'face_data': value})
except IOError:
print("I/O error", csv_file)
return
def append_to_csv(csvfile, data):
with open(csvfile, 'a') as f:
writer = csv.writer(f)
for key, value in data.items():
writer.writerow([key,value])
return
def cvt_to_array(data, split_with=''):
if split_with == '':
return np.array(list(map(float, data)))
else:
return np.array(list(map(float, data.split(split_with))))
csv_columns = ['name', 'face_data']
csv_file = 'all_face_data.csv'
face_data = []
labels = []
data = {}
detector = dlib.get_frontal_face_detector() # detect the faces in the image. How many faces are there
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat') # predict the face landmarks such as mouth or eyes
#(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
#(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
facerec = dlib.face_recognition_model_v1('./dlib_face_recognition_resnet_model_v1.dat') #pretrained model.
#we send the data to this function and it returns a 128D vector that described the faces.
# +
#capture the person and save as the 128D vector
# this part captures only once
cap = cv2.VideoCapture(0)
#while True:
face_number = 0
while face_number == 0:
print('Please show your whole face to camera. When the face is detected, you will be asked for the name.')
time.sleep(0.5)
ret, image = cap.read()
image = imutils.resize(image, width=500) #resizing
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #it should convert to gray in onder to improve resultt.
rects = detector(gray, 0) # detect how many faces in the image
cv2.imshow('asd', gray)
for (i, rect) in enumerate(rects):
# for every faces
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect) # predict the face landmarks in image.
face_descriptor = facerec.compute_face_descriptor(image, shape) # send the shape data to resnet model. it returns a 128D vector
while face_descriptor == -1:
print('Face not found.')
else:
face_data.append(face_descriptor) # save the face data to array
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
name = raw_input('who is this')
labels.append(name)
data[labels[0]] = face_data[0]
face_data=[]
labels=[]
my_file = Path("./" + csv_file)
if my_file.is_file():
append_to_csv(csv_file, data)
print('File already exist, data is appended to file')
else:
write_dict_to_csv(csv_file, csv_columns, data)
print('File has been created and data saved to file.')
face_number += 1
#print(face_descriptor)
'''
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(image, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(image, [rightEyeHull], -1, (0, 255, 0), 1)
ear = (leftEAR + rightEAR) / 2.0
'''
cv2.imshow("Saved face", image)
cv2.waitKey(0)
#key = cv2.waitKey(1) & 0xFF
#break
# if the `q` key was pressed, break from the loopfurka
#if key == ord("q"):
# break
#time.sleep(0.5)
cap.release()
cv2.destroyAllWindows()
# + active=""
#
# +
# this part is actually real time working part. It calcuates the same data as the previous part.
# and it calculates euclidian distance for every faces. If any calculations is less than 0.55 then it means we found faces
cap = cv2.VideoCapture(0)
while True:
ret, image = cap.read()
image = imutils.resize(image, width=200)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
trying = np.array(facerec.compute_face_descriptor(image, shape))
#distance_faces = dist.euclidean(face_data, trying)
with open('./'+csv_file, 'r') as f:
reader = csv.reader(f)
for row in reader:
if row == [] or row[1] == "face_data":
continue
else:
#row[1] = np.array(list(map(float, row[1].split('\n'))))
row[1] = cvt_to_array(row[1], '\n')
trying = cvt_to_array(trying)
distance_faces = dist.euclidean(row[1], trying)
if distance_faces < 0.55:
content = row[0]
break
else:
content = "unknown"
cv2.putText(image,content, (10,40), cv2.FONT_HERSHEY_PLAIN, 1, 255)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
#print(distance_faces)
'''if distance_faces < 0.55:
cv2.putText(image,"furkan", (10,20), cv2.FONT_HERSHEY_PLAIN, 1, 255)
else:
cv2.putText(image,"unknown", (10,20), cv2.FONT_HERSHEY_PLAIN, 1, 255)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(image, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(image, [rightEyeHull], -1, (0, 255, 0), 1)
ear = (leftEAR + rightEAR) / 2.0
'''
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
#break
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
time.sleep(0.1)
cap.release()
cv2.destroyAllWindows()
# -
cap.release()
cv2.destroyAllWindows()
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# #### Installation of R packages
# +
#install.packages("ISwR")
# -
# #### Package loading
library(ISwR)
# #### Variable definition and assignment
weight <- 60
height = 1.75
subject <- "A"
healthy <- TRUE
# #### Variable evaluation
weight
# #### Functions for type checking
is.numeric(weight) # variable
is.double(weight)
is.integer(weight)
is.character(subject)
# #### Functions for variable conversion
weight <- as.integer(weight)
is.integer(weight)
# #### Computing the body mass index (BMI) from the weight and height
#Body mass index (BMI)
bmi <- weight/height^2
bmi
# #### Functions for string manipulation
message <- sprintf("%.1f", bmi)
print(message)
# #### Vector definition
weight <- c(60, 72, 57, 90, 95, 72)
height <- c(1.75, 1.80, 1.65, 1.90, 1.74, 1.91)
subject <- c("A", "B", "C", "D", "E", "F")
# #### Vector evaluation
weight
height
subject
# #### Creating a vector with a particular size
vec <- rep(0, 10)
vec
# #### Vector length
length(weight)
# #### Vector indexes: from one to the length of the vector
weight[1]
weight[length(weight)]
# #### Iteration: for loop
# from one to the length of weight
bmi <- 0
for (i in 1:length(weight)) {
bmi[i] <- weight[i]/height[i]^2
}
# evaluation of the bmi vector
bmi
# #### Iteration: while loop
# run while i is below or equal to the length of weight
bmi <- 0
i <- 1
while (i <= length(weight)) {
bmi[i] <- weight[i]/height[i]^2
i <- i + 1
}
bmi
# #### Remove a variable
rm(bmi)
exists("bmi")
# #### Right way of manipulating vectors: assigning at once
bmi <- weight/height^2
bmi
# #### Creating a function
# name <- function(parameters) { body }
compute_bmi <- function(weight, height) {
bmi <- weight/height^2
return(bmi)
}
# #### Using a function with scalars
# +
bmi <- compute_bmi(60, 1.75)
bmi
# -
# #### Using the same function with vectors
bmi <- compute_bmi(weight, height)
bmi
# #### Example of a function to compute the average
# (iterating in all elements of the vector)
average <- function(vec) {
s <- 0
n <- length(vec)
for (x in vec) {
s <- s + x
}
return(s/n)
}
# invoking the function
avg_bmi <- average(bmi)
avg_bmi
# #### Example of a function to compute the average
# (manipulating vectors at once)
average <- function(vec) {
s <- sum(vec)
n <- length(vec)
return(s/n)
}
# invoking the function
avg_bmi <- average(bmi)
avg_bmi
# #### Average function using mean function
# Major statistical functions are available in R
average <- function(vec) {
return(mean(vec))
}
# invoking the function
avg_bmi <- average(bmi)
avg_bmi
# #### Working with vectors with NA
# Operations with NA lead to NA.
x <- c(10, NA, 13)
y <- average(x)
y
# #### addressing NA with na.rm=TRUE
average <- function(vec) {
return(mean(vec, na.rm=TRUE))
}
x <- c(10, NA, 13)
y <- average(x)
y
# #### Plotting graphics
# scatter plots
plot(height, weight)
# #### Most functions contain many default parameters
plot(height, weight, pch=2)
# #### Default function arguments can be shown with args
args(plot.default)
# #### All functions in R that belongs to packages have help with examples
?base::plot
# #### Canvas for plotting is still active until a new plot
plot(height, weight)
hh = c(1.65, 1.70, 1.75, 1.80, 1.85, 1.90)
lines(hh, 22.5 * hh^2)
# #### Factors
# Factors are used to handle categorical data.
pain <- c(0,3,2,2,1)
fpain <- factor(pain,levels=0:3, ordered=TRUE)
fpain
# #### Levels provide correspondence between numerical values and categorical labels
levels(fpain) <- c("none","mild","medium","severe")
fpain
# #### Convert height to factor
# Levels: small, medium, high
# #### coding setting element by element
# +
lev <- rep("", length(height))
for (i in 1:length(height)) {
if (height[i] < 1.7)
lev[i] <- "short"
else if (height[i] < 1.9)
lev[i] <- "medium"
else
lev[i] <- "tall"
}
lev <- as.factor(lev)
lev
# -
# #### coding setting the vector at once
# It uses the cut function.
lev <- cut(height, breaks=c(0, 1.7, 1.9, .Machine$double.xmax), ordered=TRUE)
lev
levels(lev) <- c("short", "medium", "tall")
lev
# #### Matrix
# Matrices can be filled from vectors or data frames.
x <- 1:9
x
# #### Converting a vector to matrix
dim(x) <- c(3,3)
x
# #### accessing elements from a matrix
# +
for (i in 1:nrow(x))
for (j in 1:ncol(x))
print(x[i,j])
# -
# #### Iterating and assigning values to each element
# +
y <- x
for (i in 1:nrow(y))
for (j in 1:ncol(y))
y[i,j] <- 3 * y[i, j]
y
# -
# #### Assigning the values of a matrix at once
y <- 3*x
y
# #### Converting a vector to a matrix by row
x <- matrix(1:9,nrow=3,byrow=T)
x
# #### transposing a matrix
x <- t(x)
x
# #### computing the determinant of a matrix
det(x)
# #### Lists
# Lists are used to work with "objects"
# +
a <- c(5260,5470,5640,6180,6390,6515,6805,7515,7515,8230,8770)
b <- c(3910,4220,3885,5160,5645,4680,5265,5975,6790,6900,7335)
mybag <- list(a, b, 0, "a")
mybag
# -
# adding an element into a list
n <- length(mybag)
mybag[[n+1]] <- "b"
mybag
# #### List slicing
slice <- mybag[1]
slice
is.list(slice)
# #### Slicing is also a list
slice <- mybag[c(1,3)]
slice
is.list(slice)
# #### A list is also a vector
#list is also a vector
is.vector(slice)
# #### Member reference
# It accesses the element
h <- mybag[[1]]
h
# An element can be evaluated.
# In this case, it is a vector.
is.vector(h)
is.list(h)
# #### Naming variables
# They are properties on the list
mybag <- list(x=a, y=b, const=0, lit="a")
mybag
# #### Adding, accessing, and removing elements
mybag$c <- mybag$x - mybag$y
mybag$const <- NULL
mybag$lit <- NULL
mybag
# #### Data frames
# Data frames (tables) provide support for structured data.
d <- data.frame(A=a, B=b)
head(d)
# #### Adding a column in the data frame
d$c <- d$A + d$B
head(d)
d$A <- NULL
head(d)
# #### Reading csv file
# There are many functions for reading CSV, Excel, and RData formats.
wine = read.table(
"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",
header = TRUE, sep = ",")
colnames(wine) <- c('Type', 'Alcohol', 'Malic', 'Ash',
'Alcalinity', 'Magnesium', 'Phenols',
'Flavanoids', 'Nonflavanoids',
'Proanthocyanins', 'Color', 'Hue',
'Dilution', 'Proline')
head(wine)
# #### saving in binary format
save(wine, file="wine.RData")
# #### removing data frame from memory
rm(wine)
# #### load binary format
load("wine.RData")
head(wine, 3)
# #### exporting data.frame into csv file
write.table(wine, file="wine.csv", row.names=FALSE, quote = FALSE, sep = ",")
# #### filtering vectors
# +
a <- c(5260,5470,5640,6180,6390,6515,6805,7515,7515,8230,8770)
b <- c(3910,4220,3885,5160,5645,4680,5265,5975,6790,6900,7335)
# logical vector
bool <- (a > 7000)
bool
# -
# selecting elements from positions that are true
a[bool]
# filtering with logical expressions
b[a < 6000 | a > 7000]
b[6000 <= a & a <= 7000]
# #### filtering data frames
data <- data.frame(a=a, b=b)
data$c <- data$a - data$b
head(data, nrow(data))
head(data[data$a > 7000,])
head(data[data$a > 7000,c(1,2)])
# #### performance with matrix and data frames
#install.packages("pryr")
library(pryr)
rheight <- rnorm(100000, 1.8, sd=0.2)
rweight <- rnorm(100000, 72, sd=15)
# #### computing a entire column at once
start_time <- Sys.time()
hw <- data.frame(height=rheight, weight=rweight)
hw$bmi <- hw$weight/hw$height^2
end_time <- Sys.time()
end_time - start_time
object_size(hw)
# #### processing cell by cell
start_time <- Sys.time()
hw <- data.frame(height=rheight, weight=rweight)
for (i in 1:nrow(hw)) {
hw$bmi[i] <- hw$weight[i]/hw$height[i]^2
}
end_time <- Sys.time()
end_time - start_time
# #### convert the entire column
start_time <- Sys.time()
hw <- data.frame(height=rheight, weight=rweight)
hw <- as.matrix(hw)
hw <- cbind(hw, 0)
for (i in 1:nrow(hw)) {
hw[i,3] <- hw[i,2]/hw[i,1]^2
}
end_time <- Sys.time()
end_time - start_time
# #### apply family
#
# apply functions can be applied for all rows or columns.
#
# The first character of the function name establishes the return type (s: simple, l: list).
library(ISwR)
data(thuesen)
head(thuesen)
#lapply returns a list
lapply(thuesen, mean, na.rm=T)
#sapply returns a vector
sapply(thuesen, mean, na.rm=T)
# apply - second parameter (1: by rows, 2: by columns)
m <- as.matrix(thuesen)
apply(m, 1, min, na.rm=TRUE)
apply(m, 2, min, na.rm=TRUE)
# ### sort and order
library(ISwR)
data(thuesen)
head(thuesen)
sort(thuesen$blood.glucose)
order(thuesen$blood.glucose)
o <- order(thuesen$blood.glucose)
sorted <- thuesen[o,]
head(sorted)
# #### Pipelines
# The operator $\%$>$\%$ creates a pipeline.
#
# The first parameter of the next invoked function receives the data from the pipeline.
#
# Library $dplyr$ contains a set of functions that support relational algebra operations.
flight_data <- read.table(text = "Year Quarter Flights Delays
2016 1 11 6
2016 2 12 5
2016 3 13 3
2016 4 12 5
2017 1 10 4
2017 2 9 3
2017 3 11 4
2017 4 25 15
2018 1 14 3
2018 2 12 5
2018 3 13 3
2018 4 15 4",
header = TRUE,sep = "")
head(flight_data)
# +
#install.packages("dplyr")
# -
library(dplyr)
result <- flight_data %>%
filter(Delays > 5) %>%
select(Year, Quarter, Flights)
head(result)
library(dplyr)
result <- flight_data %>%
group_by(Year) %>%
summarize(mean = mean(Flights), sd = sd(Flights))
head(result)
nrow(flight_data)
head(flight_data)
#install.packages(reshape)
library(reshape)
result <- melt(flight_data[,c('Year', 'Quarter', 'Flights', 'Delays')],
id.vars = c(1,2))
nrow(result)
head(result[c(1:3,17:19), ])
# #### merge
#
# The function $merge$ can be used to join data frames. It can be used to produce inner, left, right, and outer joins.
stores <- data.frame(
city = c("Rio de Janeiro", "Sao Paulo", "Paris", "New York", "Tokyo"),
value = c(10, 12, 20, 25, 18))
head(stores)
divisions <- data.frame(
city = c("Rio de Janeiro", "Sao Paulo", "Paris", "New York", "Tokyo"),
country = c("Brazil", "Brazil", "France", "US", "Japan"))
head(divisions)
stdiv <- merge(stores, divisions, by.x="city", by.y="city")
head(stdiv)
result <- stdiv %>% group_by(country) %>%
summarize(count = n(), amount = sum(value))
head(result)
# #### statistical tests: t-test
# There are many statistical tests in R.
# One of the most used is the t-test. It checks if the mean of observations is not different from a theoretical value.
weight <- c(60, 72, 57, 90, 95, 72)
height <- c(1.75, 1.80, 1.65, 1.90, 1.74, 1.91)
bmi <- weight/height^2
t.test(bmi, mu=22.5)
| Introduction.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Density Estimation
# + [markdown] slideshow={"slide_type": "slide"}
# ### Preliminaries
#
# - Goal
# - Simple maximum likelihood estimates for Gaussian and categorical distributions
# - Materials
# - Mandatory
# - These lecture notes
# - Optional
# - Bishop pp. 67-70, 74-76, 93-94
# + [markdown] slideshow={"slide_type": "slide"}
# ### Why Density Estimation?
#
# Density estimation relates to building a model $p(x|\theta)$ from observations $D=\{x_1,\dotsc,x_N\}$.
#
# Why is this interesting? Some examples:
#
# - **Outlier detection**. Suppose $D=\{x_n\}$ are benign mammogram images. Build $p(x | \theta)$ from $D$. Then low value for $p(x^\prime | \theta)$ indicates that $x^\prime$ is a risky mammogram.
# + [markdown] slideshow={"slide_type": "fragment"}
# - **Compression**. Code a new data item based on **entropy**, which is a functional of $p(x|\theta)$:
# $$
# H[p] = -\sum_x p(x | \theta)\log p(x |\theta)
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - **Classification**. Let $p(x | \theta_1)$ be a model of attributes $x$ for credit-card holders that paid on time and $p(x | \theta_2)$ for clients that defaulted on payments. Then, assign a potential new client $x^\prime$ to either class based on the relative probability of $p(x^\prime | \theta_1)$ vs. $p(x^\prime|\theta_2)$.
# + [markdown] slideshow={"slide_type": "slide"}
#
# ### Example Problem
#
# <span class="exercise">
# Consider a set of observations $D=\{x_1,…,x_N\}$ in the 2-dimensional plane (see Figure). All observations were generated by the same process. We now draw an extra observation $x_\bullet = (a,b)$ from the same data generating process. What is the probability that $x_\bullet$ lies within the shaded rectangle $S$?
# </span>
#
#
#
#
# + slideshow={"slide_type": "subslide"}
using Distributions, PyPlot
N = 100
generative_dist = MvNormal([0,1.], [0.8 0.5; 0.5 1.0])
function plotObservations(obs::Matrix)
plot(obs[1,:], obs[2,:], "kx", zorder=3)
fill_between([0., 2.], 1., 2., color="k", alpha=0.4, zorder=2) # Shaded area
text(2.05, 1.8, "S", fontsize=12)
xlim([-3,3]); ylim([-2,4]); xlabel("a"); ylabel("b")
end
D = rand(generative_dist, N) # Generate observations from generative_dist
plotObservations(D)
x_dot = rand(generative_dist) # Generate x∙
plot(x_dot[1], x_dot[2], "ro");
# + [markdown] slideshow={"slide_type": "slide"}
# ### Log-Likelihood for a Multivariate Gaussian (MVG)
#
# - Assume we are given a set of IID data points $D=\{x_1,\ldots,x_N\}$, where $x_n \in \mathbb{R}^D$. We want to build a model for these data.
# + [markdown] slideshow={"slide_type": "fragment"}
# - **Model specification**: Let's assume a MVG model $x_n=\mu+\epsilon_n$ with $\epsilon_n \sim \mathcal{N}(0,\Sigma)$, or equivalently,
#
# $$\begin{align*}
# p(x_n|\mu,\Sigma) &= \mathcal{N}(x_n|\mu,\Sigma)
# = |2 \pi \Sigma|^{-1/2} \mathrm{exp} \left\{-\frac{1}{2}(x_n-\mu)^T
# \Sigma^{-1} (x_n-\mu) \right\}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Since the data are IID, $p(D|\theta)$ factorizes as
#
# $$
# p(D|\theta) = p(x_1,\ldots,x_N|\theta) \stackrel{\text{IID}}{=} \prod_n p(x_n|\theta)
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - This choice of model yields the following log-likelihood (use (B-C.9) and (B-C.4)),
#
# $$\begin{align*}
# \log &p(D|\theta) = \log \prod_n p(x_n|\theta) = \sum_n \log \mathcal{N}(x_n|\mu,\Sigma) \tag{1}\\
# &= N \cdot \log | 2\pi\Sigma |^{-1/2} - \frac{1}{2} \sum\nolimits_{n} (x_n-\mu)^T \Sigma^{-1} (x_n-\mu)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Maximum Likelihood estimation of mean of MVG
#
# - We want to maximize $\log p(D|\theta)$ wrt the parameters $\theta=\{\mu,\Sigma\}$. Let's take derivatives; first to mean $\mu$, (making use of (B-C.25) and (B-C.27)),
#
# $$\begin{align*}
# \nabla_\mu \log p(D|\theta) &= -\frac{1}{2}\sum_n \nabla_\mu \left[ (x_n-\mu)^T \Sigma^{-1} (x_n-\mu) \right] \\
# &= -\frac{1}{2}\sum_n \nabla_\mu \mathrm{Tr} \left[ -2\mu^T\Sigma^{-1}x_n + \mu^T\Sigma^{-1}\mu \right] \\
# &= -\frac{1}{2}\sum_n \left( -2\Sigma^{-1}x_n + 2\Sigma^{-1}\mu \right) \\
# &= \Sigma^{-1}\,\sum_n \left( x_n-\mu \right)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Setting the derivative to zero yields the **sample mean**
#
# $$\begin{equation*}
# \boxed{
# \hat \mu = \frac{1}{N} \sum_n x_n
# }
# \end{equation*}$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Maximum Likelihood estimation of variance of MVG
#
# - Now we take the gradient of the log-likelihood wrt the **precision matrix** $\Sigma^{-1}$ (making use of B-C.28 and B-C.24)
#
# $$\begin{align*}
# \nabla_{\Sigma^{-1}} &\log p(D|\theta) \\
# &= \nabla_{\Sigma^{-1}} \left[ \frac{N}{2} \log |2\pi\Sigma|^{-1} - \frac{1}{2} \sum_{n=1}^N (x_n-\mu)^T \Sigma^{-1} (x_n-\mu)\right] \\
# &= \nabla_{\Sigma^{-1}} \left[ \frac{N}{2} \log |\Sigma^{-1}| - \frac{1}{2} \sum_{n=1}^N \mathrm{Tr} \left[ (x_n-\mu) (x_n-\mu)^T \Sigma^{-1}\right] \right]\\
# &= \frac{N}{2}\Sigma -\frac{1}{2}\sum_n (x_n-\mu)(x_n-\mu)^T
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# Get optimum by setting the gradient to zero,
# $$\begin{equation*}
# \boxed{
# \hat \Sigma = \frac{1}{N} \sum_n (x_n-\hat\mu)(x_n - \hat\mu)^T}
# \end{equation*}$$
# which is also known as the **sample variance**.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sufficient Statistics
#
# - Note that the ML estimates can also be written as
# $$\begin{equation*}
# \hat \Sigma = \sum_n x_n x_n^T - \left( \sum_n x_n\right)\left( \sum_n x_n\right)^T, \quad \hat \mu = \frac{1}{N} \sum_n x_n
# \end{equation*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - I.o.w., the two statistics (a 'statistic' is a function of the data) $\sum_n x_n$ and $\sum_n x_n x_n^T$ are sufficient to estimate the parameters $\mu$ and $\Sigma$ from $N$ observations. In the literature, $\sum_n x_n$ and $\sum_n x_n x_n^T$ are called **sufficient statistics** for the Gaussian PDF.
# + [markdown] slideshow={"slide_type": "fragment"}
# - The actual parametrization of a PDF is always a re-parameteriation of the sufficient statistics.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Sufficient statistics are useful because they summarize all there is to learn about the data set in a minimal set of variables.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Solution to Example Problem
#
# <span class="exercise">
# We apply maximum likelihood estimation to fit a 2-dimensional Gaussian model ($m$) to data set $D$. Next, we evaluate $p(x_\bullet \in S | m)$ by (numerical) integration of the Gaussian pdf over $S$: $p(x_\bullet \in S | m) = \int_S p(x|m) \mathrm{d}x$.</span>
# + slideshow={"slide_type": "subslide"}
using HCubature, LinearAlgebra# Numerical integration package
# Maximum likelihood estimation of 2D Gaussian
μ = 1/N * sum(D,dims=2)[:,1]
D_min_μ = D - repeat(μ, 1, N)
Σ = Hermitian(1/N * D_min_μ*D_min_μ')
m = MvNormal(μ, convert(Matrix, Σ));
# Contour plot of estimated Gaussian density
A = Matrix{Float64}(undef,100,100); B = Matrix{Float64}(undef,100,100)
density = Matrix{Float64}(undef,100,100)
for i=1:100
for j=1:100
A[i,j] = a = (i-1)*6/100 .- 2
B[i,j] = b = (j-1)*6/100 .- 3
density[i,j] = pdf(m, [a,b])
end
end
c = contour(A, B, density, 6, zorder=1)
PyPlot.set_cmap("cool")
clabel(c, inline=1, fontsize=10)
# Plot observations, x∙, and the countours of the estimated Gausian density
plotObservations(D)
plot(x_dot[1], x_dot[2], "ro")
# Numerical integration of p(x|m) over S:
(val,err) = hcubature((x)->pdf(m,x), [0., 1.], [2., 2.])
println("p(x⋅∈S|m) ≈ $(val)")
# + [markdown] slideshow={"slide_type": "slide"}
# ### Discrete Data: the 1-of-K Coding Scheme
#
# - Consider a coin-tossing experiment with outcomes $x \in\{0,1\}$ (tail and head) and let $0\leq \mu \leq 1$ represent the probability of heads. This model can written as a **Bernoulli distribution**:
# $$
# p(x|\mu) = \mu^{x}(1-\mu)^{1-x}
# $$
# - Note that the variable $x$ acts as a (binary) **selector** for the tail or head probabilities. Think of this as an 'if'-statement in programming.
# + [markdown] slideshow={"slide_type": "fragment"}
# - **1-of-K coding scheme**. Now consider a $K$-sided coin (a _die_ (pl.: dice)). It is convenient to code the outcomes by $x=(x_1,\ldots,x_K)^T$ with **binary selection variables**
# $$
# x_k = \begin{cases} 1 & \text{if die landed on $k$th face}\\
# 0 & \text{otherwise} \end{cases}
# $$
# - E.g., for $K=6$, if the die lands on the 3rd face $\,\Rightarrow x=(0,0,1,0,0,0)^T$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Assume the probabilities $p(x_k=1) = \mu_k$ with $\sum_k \mu_k = 1$. The data generating distribution is then (note the similarity to the Bernoulli distribution)
#
# $$
# p(x|\mu) = \mu_1^{x_1} \mu_2^{x_2} \cdots \mu_k^{x_k}=\prod_k \mu_k^{x_k}
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - This generalized Bernoulli distribution is called the **categorical distribution** (or sometimes the 'multi-noulli' distribution).
#
# <!---
#
# - Note that $\sum_k x_k = 1$ and verify for yourself that $\mathrm{E}[x|\mu] = \mu$.
# - In these notes, we use the superscript to indicate that we are working with a **binary selection variable** in a 1-of-$K$ scheme.
# --->
# + [markdown] slideshow={"slide_type": "slide"}
# ### Categorical vs. Multinomial Distribution
#
# - Observe a data set $D=\{x_1,\ldots,x_N\}$ of $N$ IID rolls of a $K$-sided die, with generating PDF
# $$
# p(D|\mu) = \prod_n \prod_k \mu_k^{x_{nk}} = \prod_k \mu_k^{\sum_n x_{nk}} = \prod_k \mu_k^{m_k}
# $$
# where $m_k= \sum_n x_{nk}$ is the total number of occurrences that we 'threw' $k$ eyes.
# + [markdown] slideshow={"slide_type": "fragment"}
# - This distribution depends on the observations **only** through the quantities $\{m_k\}$, with generally $K \ll N$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - A related distribution is the distribution over $D_m=\{m_1,\ldots,m_K\}$, which is called the **multinomial distribution**,
# $$
# p(D_m|\mu) =\frac{N!}{m_1! m_2!\ldots m_K!} \,\prod_k \mu_k^{m_k}\,.
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - The catagorical distribution $p(D|\mu) = p(\,x_1,\ldots,x_N\,|\,\mu\,)$ is a distribution over the **observations** $\{x_1,\ldots,x_N\}$, whereas the multinomial distribution $p(D_m|\mu) = p(\,m_1,\ldots,m_K\,|\,\mu\,)$ is a distribution over the **data frequencies** $\{m_1,\ldots,m_K\}$.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Maximum Likelihood Estimation for the Multinomial
#
# - Now let's find the ML estimate for $\mu$, based on $N$ throws of a $K$-sided die. Again we use the shorthand $m_k \triangleq \sum_n x_{nk}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - The log-likelihood for the multinomial distribution is given by
#
# $$\begin{align*}
# \mathrm{L}(\mu) &\triangleq \log p(D_m|\mu) \propto \log \prod_k \mu_k^{m_k} = \sum_k m_k \log \mu_k \tag{2}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - When doing ML estimation, we must obey the constraint $\sum_k \mu_k = 1$, which can be accomplished by a <span style="color:red">Lagrange multiplier</span>. The **augmented log-likelihood** with Lagrange multiplier is then
#
# $$
# \mathrm{L}^\prime(\mu) = \sum_k m_k \log \mu_k + \lambda \cdot (1 - \sum_k \mu_k )
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Set derivative to zero yields the **sample proportion** for $\mu_k$
#
# $$\begin{equation*}
# \nabla_{\mu_k} \mathrm{L}^\prime = \frac{m_k }
# {\hat\mu_k } - \lambda \overset{!}{=} 0 \; \Rightarrow \; \boxed{\hat\mu_k = \frac{m_k }{N}}
# \end{equation*}$$
#
# where we get $\lambda$ from the constraint
#
# $$\begin{equation*}
# \sum_k \hat \mu_k = \sum_k \frac{m_k}
# {\lambda} = \frac{N}{\lambda} \overset{!}{=} 1
# \end{equation*}$$
#
# <!---
# - Interesting special case: **Binomial** (=$N$ coin tosses):
# $$p(x_n|\theta)= \theta^{[x_n=h]}(1-\theta)^{[x_n=t]}=\theta_h^{[x_n=h]} \theta_t^{[x_n=t]}
# $$
# yields $$\hat \theta = \frac{N_h}{N_h +N_t} $$
#
# - Compare this answer to Laplace's rule for predicting the next coin toss (in probability theory lesson) $$p(\,x_\bullet=h\,|\,\theta\,)=\frac{N_h+1}{N_h +N_t+2}\,.$$ What is the source of the difference?
# --->
# + [markdown] slideshow={"slide_type": "slide"}
# ### Recap ML for Density Estimation
#
# Given $N$ IID observations $D=\{x_1,\dotsc,x_N\}$.
#
# - For a **multivariate Gaussian** model $p(x_n|\theta) = \mathcal{N}(x_n|\mu,\Sigma)$, we obtain ML estimates
#
# $$\begin{align}
# \hat \mu &= \frac{1}{N} \sum_n x_n \tag{sample mean} \\
# \hat \Sigma &= \frac{1}{N} \sum_n (x_n-\hat\mu)(x_n - \hat \mu)^T \tag{sample variance}
# \end{align}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - For discrete outcomes modeled by a 1-of-K **categorical distribution** we find
#
# $$\begin{align}
# \hat\mu_k = \frac{1}{N} \sum_n x_{nk} \quad \left(= \frac{m_k}{N} \right) \tag{sample proportion}
# \end{align}$$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
#
# - Note the similarity for the means between discrete and continuous data.
# + [markdown] slideshow={"slide_type": "fragment"}
# - We didn't use a co-variance matrix for discrete data. Why?
# + slideshow={"slide_type": "skip"}
open("../../styles/aipstyle.html") do f
display("text/html", read(f, String))
end
# -
| lessons/notebooks/misc/Density-Estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jadetan6/CPEN-20A-ECE-2-3/blob/main/Demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DzXzNyFQuCkl"
# ##Python Indention
# + colab={"base_uri": "https://localhost:8080/"} id="JK7RtBdcjkCp" outputId="bf9825ad-8279-4c8d-911e-615bc86b774a"
if 5>2:
print ("Five is greater than two")
# + [markdown] id="SEdybkLvlw4r"
# ##Python Variable
# + colab={"base_uri": "https://localhost:8080/"} id="w8_fN_yml7AR" outputId="d9cbe57a-9b0c-47a1-846d-8a5f97616d2c"
a, b, c = 0,1,2
d= "Sally" #This is a type of string
s= 'Mark' #This is a type of string
A= "Raymond" #This is a type of string
print (a)
print (b)
print (c)
print (d)
print (s)
print (A)
# + colab={"base_uri": "https://localhost:8080/"} id="RTmvqLCLxh9-" outputId="761587cf-c20e-4809-ae1d-e6ea25bff722"
x = y = z = "four"
print (x)
print (y)
print (z)
# + colab={"base_uri": "https://localhost:8080/"} id="Sa2ScXzoxyRi" outputId="a6182a0d-5e50-4894-92ac-ff17c8da7e78"
x = "enjoying"
print("Python programming is" " " + x)
# + colab={"base_uri": "https://localhost:8080/"} id="TIGzopKwyKzF" outputId="2a50df4c-05c9-4e91-d027-7ae2ad05cd8d"
x= "Python is "
y= "enjoying"
z= x+y
print (z)
# + [markdown] id="jlVnU3WcjS2U"
# ##Casting
# + colab={"base_uri": "https://localhost:8080/"} id="wu_LSEBzwWJE" outputId="35a92c12-9680-47a8-bfea-37e9cca46092"
b="Sally" #This is a type of string
b=int(4)
print (b)
# + colab={"base_uri": "https://localhost:8080/"} id="gpsf-xoMxR59" outputId="269c73ef-913f-432e-c757-bc489040927b"
b= float (4)
print (b)
# + [markdown] id="iLXGP6Q0nzKc"
# ##Operations in Python
#
# + colab={"base_uri": "https://localhost:8080/"} id="QF4NE-2lpULJ" outputId="576ace94-9619-4fac-ff5a-ef6513b68e96"
k = 10
l = 5
print (k+l)
# + colab={"base_uri": "https://localhost:8080/"} id="V412XpPxn2jU" outputId="f4ff3ca6-649e-4c6e-801b-b7275f3f9267"
k+=l #Is the same as k = k+l
print (k)
# + colab={"base_uri": "https://localhost:8080/"} id="iKO0MDe4qJ3S" outputId="f1886766-4f3f-46da-93b3-2baa2a1bcc6a"
k>l and l==l
# + colab={"base_uri": "https://localhost:8080/"} id="0oz-naP1qRu6" outputId="a16cff0d-b28c-4787-f698-2e2341498d43"
k<l and k==k
# + colab={"base_uri": "https://localhost:8080/"} id="dH9_P0Ugqfhn" outputId="2ddb6b77-0d07-4779-a98e-5a4ba11d5619"
not(k<l) or k==k
# + colab={"base_uri": "https://localhost:8080/"} id="HYzcg-tcqpcf" outputId="e070e0dc-06cd-4e9f-ec12-043886176732"
k is l
# + colab={"base_uri": "https://localhost:8080/"} id="ZTPQiF7wq8eZ" outputId="456e4586-693c-4c96-abc2-53dff2968a77"
k%=5
k
# + colab={"base_uri": "https://localhost:8080/"} id="ThTayvrYzLWv" outputId="4aaef538-c4ff-46d4-bc27-05a66c6b4c76"
a,b,c = 0,-1,5
a is c
| Demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting categories with Logistic Regression
#
# **Aim**: The aim of this notebook is to predict if a mobile transaction is fraudulent or not by using the logistic regression algorithm with scikit-learn.
#
# ## Table of contents
#
# 2. Implementing the logistic regression algorithm
# 3. Fine-tuning parameters using GridsearchCV
# 4. Scaling
# 5. Interpreting the results
# ## Package Requirements
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# +
# Reading in the dataset
df = pd.read_csv('fraud_prediction.csv')
# -
# ## Implementing the logistic regression algorithm
# **Splitting the data into training and test sets**
# +
#Creating the features
features = df.drop('isFraud', axis = 1).values
target = df['isFraud'].values
# -
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size = 0.3, random_state = 42, stratify = target)
# **Creating and evaluating the base classifier**
# +
#Initializing an logistic regression object
logistic_regression = linear_model.LogisticRegression()
#Fitting the model to the training and test sets
logistic_regression.fit(X_train, y_train)
# +
#Accuracy score of the logistic regression model
logistic_regression.score(X_test, y_test)
# -
# ## Fine tuning parameters using GridSearchCV
# +
#Building the model with L1 penality
logistic_regression = linear_model.LogisticRegression(penalty='l1')
#Using GridSearchCV to search for the best parameter
grid = GridSearchCV(logistic_regression, {'C':[0.0001, 0.001, 0.01, 0.1, 10]})
grid.fit(X_train, y_train)
# Print out the best parameter
print("The most optimal inverse regularization strength is:", grid.best_params_)
# +
#Initializing an logistic regression object
logistic_regression = linear_model.LogisticRegression(C = 10, penalty = 'l1')
#Fitting the model to the training and test sets
logistic_regression.fit(X_train, y_train)
# +
#Accuracy score of the logistic regression model
logistic_regression.score(X_test, y_test)
# +
train_errors = []
test_errors = []
C_list = [0.0001, 0.001, 0.01, 0.1, 10, 100, 1000]
# Evaluate the training and test classification errors for each value of C
for value in C_list:
# Create LogisticRegression object and fit
logistic_regression = linear_model.LogisticRegression(C= value, penalty = 'l1')
logistic_regression.fit(X_train, y_train)
# Evaluate error rates and append to lists
train_errors.append(logistic_regression.score(X_train, y_train) )
test_errors.append(logistic_regression.score(X_test, y_test))
# Plot results
plt.semilogx(C_list, train_errors, C_list, test_errors)
plt.legend(("train", "test"))
plt.ylabel('Accuracy Score')
plt.xlabel('C (Inverse regularization strength)')
plt.show()
# -
# ## Scaling your data
# +
#Setting up the scaling pipeline
pipeline_order = [('scaler', StandardScaler()), ('logistic_reg', linear_model.LogisticRegression(C = 10, penalty = 'l1'))]
pipeline = Pipeline(pipeline_order)
#Fitting the classfier to the scaled dataset
logistic_regression_scaled = pipeline.fit(X_train, y_train)
#Extracting the score
logistic_regression_scaled.score(X_test, y_test)
# -
# ## Interpreting the results of the model
# +
#Printing out the coefficients of each variable
print(logistic_regression.coef_)
# +
#Printing out the intercept of the model
print(logistic_regression.intercept_)
| Chapter_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Datos son como el petroleo
# Sugerencias de personas en linkedin
#
# Publicidad facebook, Google
#
# CTR= click to retail
#
# Arboles de decicion se usan para saber si una persona va a hacer click en el anuncio o comprar x producto
#
# Regresion logistica es usada para decidir sobre na decision en binario (si o no)
#
# Se le otorga un credito a una persona
#
# Predecir crimenes (metodo usado en españa)
#
# Que hace un usuario en el telefono.
#
# Deportes en las cuales se usa la estadistica para ganar
#
#
| Theory/Ejemplos de data science.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# %matplotlib inline
import math
from scipy import stats
# # Random Counts #
# These form a class of random variables that are of fundamental importance in probability theory. You have seen some examples already: the number of matches (fixed points) in a random permutation of $n$ elements is an example of a "random count", as is the number of good elements in a simple random sample.
#
# The general setting is that there are a number of trials, each of which can be a success or a failure. The random count is the number of successes among all the trials.
#
# The distribution of the number of successes depends on the underlying assumptions of randomness. In this chapter we will study independent, identically distributed trials. Neither the matching problem nor simple random sampling fits this framework. However, we will see that both of these settings can be closely approximated by independent trials under some conditions on the parameters.
#
# Finally, we will discover some remarkable properties of random counts when the number of trials is itself random. Data science includes many powerful methods that are based on randomizing parameters.
#
# Let's start off with the simplest random count, that is a count that can only be either 0 or 1.
# ### Indicators and the Bernoulli $(p)$ Distribution ###
# Consider a trial that can only result in one success or one failure. The number of successes $X$ is thus a zero-one valued random variable and is said to have the *Bernoulli $(p)$ distribution* where $p = P(X = 1)$ is the probability of success.
#
# This very simple random count $X$ is called the *indicator of success* on the trial.
#
# Here is the probability histogram of a random variable $X$ that has the Bernoulli $(1/3)$ distribution.
bern_1_3 = Table().values([0,1]).probability([2/3, 1/3])
Plot(bern_1_3)
plt.xlabel('Value of $X$')
plt.title('Bernoulli (1/3)');
# ### Counting is the Same as Adding Zeros and Ones ###
#
# Consider a sequence of $n$ trials and for $1 \le i \le n$ let $X_i$ be the indicator of success on Trial $i$.
#
# The sum $S_n = X_1 + X_2 + \cdots + X_n$ is then the total number of successes in the $n$ trials. For example, if $n=3$ and $X_1 = 0$, $X_2 = 0$, and $X_3 = 1$, then there is one success in the three trials and $S_3 = 1$. As you increase the number of trials, the count stays level at every $i$ for which $X_i = 0$, and increases by 1 at each $i$ for which $X_i = 1$.
#
# We will start out by assuming that all the $X_i$'s are i.i.d. That is, trials are mutually independent and the chance of success in a fixed trial is the same for all trials.
#
# To fix such an example in your mind, think of the trials as being 7 rolls of a die, and let $X_i$ be the indicator of getting a six on roll $i$. Each $X_i$ has the Bernoulli $(1/6)$ distribution and all the $X_i$'s are independent. Their sum $S_7$ is the number of sixes in the 7 rolls.
| notebooks/Chapter_06/00_Random_Counts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# name: python3
# ---
# from swaptest import cswaptest
import numpy as np
import random as rd
import base.knn
import itertools
from sklearn import datasets, neighbors
# hyperparameter
n_variables = 4
n_train_points = 16
n_test_points = int(n_train_points*0.3)
k = 7
# +
# use iris dataset
iris = datasets.load_iris()
labels = iris.target
data_raw = iris.data
# get training indices
randomIndices0 = rd.sample(range(0, 50), int(n_train_points/3))
randomIndices1 = rd.sample(range(55, 100), int(n_train_points/3))
randomIndices2 = rd.sample(range(105, 150), n_train_points-int(n_train_points/3)*2)
indicsTrain = list(itertools.chain(randomIndices0, randomIndices1, randomIndices2))
# get test indices
n_test = n_test_points
indicsTest = []
while n_test != 0:
random = (rd.sample(range(0, 150), 1))[0]
if random not in indicsTest and random not in indicsTrain:
indicsTest.append(random)
n_test = n_test - 1
# now pick these data with given indices
train_datas = np.asarray([data_raw[i] for i in indicsTrain])
train_labels = np.asarray([labels[i] for i in indicsTrain])
test_datas = np.asarray([data_raw[i] for i in indicsTest])
test_labels = np.asarray([labels[i] for i in indicsTest])
# -
# predict
clf = neighbors.KNeighborsClassifier(n_neighbors = k)
clf.fit(train_datas, train_labels)
y_pred = clf.predict(test_datas)
accuracy, precision, recall, matrix = base.knn.bench_mark(test_labels, y_pred)
print('accuracy: ', accuracy)
print('precision: ', precision)
print('recall: ', recall)
print('matrix: ', matrix)
| jupyter/knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/joeusebio/Linear-Algebra-58020/blob/main/Final%20Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="Vy0I0uNES14y" outputId="482d5d6e-edda-434c-d516-3af32b997385"
print("problem 1 ans.")
eq1 = np.array([[1,1,1],[1,0,4],[0,0,5]])
eq2 = np.array([[89],[89],[95]])
amt = np.linalg.inv(eq1).dot(eq2)
print(amt)
# + colab={"base_uri": "https://localhost:8080/"} id="zRuj9gVRS2l9" outputId="c012ee9c-10de-4fb3-9b13-85201ce79439"
print("di ko alam kung anong problema nya pero ginawa ko naman lahat para di siya mag loko pero huli na ang lahat")
print("p.s:Merry Christmas maam no time na po thank you")
# + colab={"base_uri": "https://localhost:8080/"} id="nmCPO58fStI-" outputId="82e8ff95-56fd-4ada-d942-588bbcd6dada"
import numpy as np
from numpy.linalg import eig
print("problem 3 ans.")
A = np.array([[8,5,-6],[-12,-9,12],[-3,-3,5]])
print(A,"\n")
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:\n",w.round(),"\n")
print("The eigenvector/s is/are:\n",v.round())
| Final Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Inflected verbs
#
# #### Set-up and initialization
# +
import os
import random
import re
import urllib.request
from itertools import combinations, permutations
import requests
from bs4 import BeautifulSoup
import pandas as pd
# +
try:
os.mkdir('temp')
except FileExistsError:
pass
if os.path.isfile('./realwords.txt'):
with open('./realwords.txt') as rw_file:
realwords = {_.strip() for _ in rw_file.readlines()}
else:
realwords = set()
# -
banned_pairs = set()
for fn in ['cognates', 'cognates_maxlen12', 'homophones', 'homophones_maxlen12',
'misspellings', 'misspellings_maxlen12', 'surnames', 'surnames_maxlen12',
'forenames', 'forenames_maxlen12', 'typos',
'fake_words', 'fake_words_maxlen12', 'random_words', 'random_words_maxlen12']:
with open(f'../{fn}.csv') as fh:
new_pairs = {tuple(pair.strip().split(',')) for pair in fh.readlines()[1:]}
banned_pairs |= new_pairs
banned_pairs |= {(_[1], _[0]) for _ in banned_pairs}
banned_pairs |= {(_[0].lower(), _[1].lower()) for _ in banned_pairs}
banned_pairs |= {(_[1].lower(), _[0].lower()) for _ in banned_pairs}
banned_pairs |= {(_[0][0].upper()+_[0][1:], _[1][0].upper()+_[1][1:]) for _ in banned_pairs}
banned_pairs |= {(_[1][0].upper()+_[1][1:], _[0][0].upper()+_[0][1:]) for _ in banned_pairs}
# #### Grab basis dataset
# +
def clean(s):
s = s.replace('†', '')
s = s.replace('*', '')
s = s.replace('/', ' ')
s = re.sub(r'\(.+?\)', '', s)
return s.strip()
url = "https://en.wiktionary.org/wiki/Appendix:English_irregular_verbs"
res = requests.get(url, headers={'Cache-Control': 'no-cache'})
soup = BeautifulSoup(res.content,'lxml')
conj = []
for row in soup.table.tbody.find_all('tr')[1:]:
cell = row.find('td').find_all('i')
conj += cell
conj = [clean(_.text).split() for _ in conj]
# +
url = "https://www.usingenglish.com/reference/irregular-verbs/"
res = requests.get(url, headers={'Cache-Control': 'no-cache'})
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')
df = pd.read_html(str(table))
conj += list(df[0].dropna().apply(lambda x: clean(' '.join(str(_).lower() for _ in x)).replace(',', '').split(), axis=1))
# -
conj_dict = {}
for words in conj:
if words[0] not in conj_dict:
conj_dict[words[0]] = set()
conj_dict[words[0]] |= set(words)
# +
url = 'https://raw.githubusercontent.com/en-wl/wordlist/master/agid/infl.txt'
if not os.path.isfile(os.path.join('temp', 'infl.txt')):
urllib.request.urlretrieve(url, os.path.join('temp', url.split('/')[-1]))
with open(os.path.join('temp', 'infl.txt')) as fh:
lines = [_.strip() for _ in fh.readlines()]
# +
def clean(s):
for _ in '|:~<>!{}':
if _ in s:
return ''
if s[0].isdigit():
return ''
return s.replace(',', '')
new_lines = []
for line in lines:
if line[0] != line[0].upper():
if '?' not in line and 'V' in line:
line = [clean(_) for _ in line.split() if clean(_)]
if len(line) > 1:
new_lines.append(line)
# -
random.seed(377)
random.shuffle(new_lines)
n = 500
for line in new_lines:
if line[0] in conj_dict:
conj_dict[line[0]] |= set(line[1:])
elif n > 0:
n -= 1
conj_dict[line[0]] = set(line[1:])
conj_list = []
for infinitive in sorted(conj_dict.keys()):
for conjugate in sorted(conj_dict[infinitive]):
if infinitive != conjugate and infinitive and conjugate:
conj_list.append((conjugate, infinitive))
# #### Randomly select from the misspellings and write output
# +
conj = conj_list[:]
random.seed(227)
random.shuffle(conj)
wc = 0
with open('../conjugated.csv', 'w') as file:
file.write('conjugated,infinitive\n')
for pair in conj:
if pair not in banned_pairs and wc < 2400:
file.write(','.join(pair)+'\n')
wc += 1
# +
conj = list(filter(lambda x: len(x[0]) < 13 and len(x[1]) < 13, conj_list))
random.seed(77271)
random.shuffle(conj)
wc = 0
with open('../conjugated_maxlen12.csv', 'w') as file:
file.write('conjugated,infinitive\n')
for pair in conj:
if pair not in banned_pairs and wc < 2400:
file.write(','.join(pair)+'\n')
wc += 1
# -
# #### Write out list of real words
# +
for line in new_lines:
if line[0] in conj_dict:
conj_dict[line[0]] |= set(line[1:])
else:
conj_dict[line[0]] = set(line[1:])
realwords = set(conj_dict.keys())
for wl in conj_dict.values():
realwords |= wl
realwords = sorted(realwords)
with open('./realwords_conj.txt', 'w') as rw_file:
for word in sorted(realwords):
rw_file.write(word+'\n')
| binder/11 Regenerate inflected verbs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# In histograms, x axis contains a variable and y axis will be a frequency of that variable
# %matplotlib inline
import matplotlib.pyplot as plt
# We have a sample data of blood sugar level of different patients, we will try to plot number of patients by blood range and try to figure out how many patients are normal, pre-diabetic and diabetic
blood_sugar = [113, 85, 90, 150, 149, 88, 93, 115, 135, 80, 77, 82, 129]
plt.hist(blood_sugar, rwidth=0.8) # by default number of bins is set to 10
# ## bins parameter
plt.hist(blood_sugar,rwidth=0.5,bins=4)
# ## Histogram showing normal, pre-diabetic and diabetic patients distribution
# <ul style="color:brown;font-weight: bold;">
# <li>80-100: Normal</li>
# <li>100-125: Pre-diabetic</li>
# <li>80-100: Diabetic</li>
# </ul>
# +
plt.xlabel("Sugar Level")
plt.ylabel("Number Of Patients")
plt.title("Blood Sugar Chart")
plt.hist(blood_sugar, bins=[80,100,125,150], rwidth=0.95, color='g')
# -
# ## Mutiple data samples in a histogram
# +
plt.xlabel("Sugar Level")
plt.ylabel("Number Of Patients")
plt.title("Blood Sugar Chart")
blood_sugar_men = [113, 85, 90, 150, 149, 88, 93, 115, 135, 80, 77, 82, 129]
blood_sugar_women = [67, 98, 89, 120, 133, 150, 84, 69, 89, 79, 120, 112, 100]
plt.hist([blood_sugar_men,blood_sugar_women], bins=[80,100,125,150], rwidth=0.95, color=['green','orange'],label=['men','women'])
plt.legend()
# -
# ## histtype=step
# +
plt.xlabel("Sugar Level")
plt.ylabel("Number Of Patients")
plt.title("Blood Sugar Chart")
plt.hist(blood_sugar,bins=[80,100,125,150],rwidth=0.95,histtype='step')
# -
# ## Horizontal orientation
# +
plt.xlabel("Number Of Patients")
plt.ylabel("Sugar Level")
plt.title("Blood Sugar Chart")
plt.hist(blood_sugar, bins=[80,100,125,150], rwidth=0.95, orientation='horizontal')
# -
| 03_DataVisualization/05_HistogramPlots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dummy Variables Exercise
#
# In this exercise, you'll create dummy variables from the projects data set. The idea is to transform categorical data like this:
#
# | Project ID | Project Category |
# |------------|------------------|
# | 0 | Energy |
# | 1 | Transportation |
# | 2 | Health |
# | 3 | Employment |
#
# into new features that look like this:
#
# | Project ID | Energy | Transportation | Health | Employment |
# |------------|--------|----------------|--------|------------|
# | 0 | 1 | 0 | 0 | 0 |
# | 1 | 0 | 1 | 0 | 0 |
# | 2 | 0 | 0 | 1 | 0 |
# | 3 | 0 | 0 | 0 | 1 |
#
#
# (Note if you were going to use this data with a model influenced by multicollinearity, you would want to eliminate one of the columns to avoid redundant information.)
#
# The reasoning behind these transformations is that machine learning algorithms read in numbers not text. Text needs to be converted into numbers. You could assign a number to each category like 1, 2, 3, and 4. But categorical variable has no inherent order, so you want to reflect this in your features.
#
# Pandas makes it very easy to create dummy variables with the [get_dummies](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html) method. In this exercise, you'll create dummy variables from the World Bank projects data; however, there's a caveat. The World Bank data is not particularly clean, so you'll need to explore and wrangle the data first.
#
# You'll focus on the text values in the sector variables.
#
# Run the code cells below to read in the World Bank projects data set and then to filter out the data for text variables.
# +
import pandas as pd
import numpy as np
# read in the projects data set and do basic wrangling
projects = pd.read_csv('../data/projects_data.csv', dtype=str)
projects.drop('Unnamed: 56', axis=1, inplace=True)
projects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', ''))
projects['countryname'] = projects['countryname'].str.split(';', expand=True)[0]
projects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate'])
# keep the project name, lending, sector and theme data
sector = projects.copy()
sector = sector[['project_name', 'lendinginstr', 'sector1', 'sector2', 'sector3', 'sector4', 'sector5', 'sector',
'mjsector1', 'mjsector2', 'mjsector3', 'mjsector4', 'mjsector5',
'mjsector', 'theme1', 'theme2', 'theme3', 'theme4', 'theme5', 'theme ',
'goal', 'financier', 'mjtheme1name', 'mjtheme2name', 'mjtheme3name',
'mjtheme4name', 'mjtheme5name']]
# -
# Run the code cell below. This cell shows the percentage of each variable that is null. Notice the mjsector1 through mjsector5 variables are all null. The mjtheme1name through mjtheme5name are also all null as well as the theme variable.
#
# Because these variables contain so many null values, they're probably not very useful.
# output percentage of values that are missing
100 * sector.isnull().sum() / sector.shape[0]
# # Exercise 1
# The sector1 variable looks promising; it doesn't contain any null values at all. In the next cell, store the unique sector1 values in a list and output the results. Use the sort_values() and unique() methods.
# TODO: Create a list of the unique values in sector1. Use the sort_values() and unique() pandas methods.
# And then convert those results into a Python list
uniquesectors1 = list(sector['sector1'].sort_values().unique())
uniquesectors1
# run this code cell to see the number of unique values
print('Number of unique values in sector1:', len(uniquesectors1))
# 3060 different categories is quite a lot! Remember that with dummy variables, if you have n categorical values, you need n - 1 new variables! That means 3060 extra columns!
#
# # Exercise 2
#
# There are a few issues with this 'sector1' variable. First, there are values labeled '!$!0'. These should be substituted with NaN.
#
# Furthermore, each sector1 value ends with a ten or eleven character string like '!$!49!$!EP'. Some sectors show up twice in the list like:
# 'Other Industry; Trade and Services!$!70!$!YZ',
# 'Other Industry; Trade and Services!$!63!$!YZ',
#
# But it seems like those are actually the same sector. You'll need to remove everything past the exclamation point.
#
# Many values in the sector1 variable start with the term '(Historic)'. Try removing that phrase as well.
#
# ### replace() method
#
# With pandas, you can use the replace() method to search for text and replace parts of a string with another string. If you know the exact string you're looking for, the replace() method is straight forward. For example, say you wanted to remove the string '(Trial)' from this data:
#
# | data |
# |--------------------------|
# | '(Trial) Banking' |
# | 'Banking' |
# | 'Farming' |
# | '(Trial) Transportation' |
#
# You could use `df['data'].replace('(Trial'), '')` to replace (Trial) with an empty string.
#
# What about this data?
#
# | data |
# |------------------------------------------------|
# | 'Other Industry; Trade and Services?$ab' |
# | 'Other Industry; Trade and Services?ceg' |
#
# This type of data is trickier. In this case, there's a pattern where you want to remove a string that starts with an exclamation point and then has an unknown number of characters after it. When you need to match patterns of character, you can use [regular expressions](https://en.wikipedia.org/wiki/Regular_expression).
#
# The replace method can take a regular expression. So
# df['data'].replace('?.+', regex=True) where '?.+' means find a set of characters that starts with a question mark is then followed by one or more characters. You can see a [regular expression cheat sheet](https://medium.com/factory-mind/regex-tutorial-a-simple-cheatsheet-by-examples-649dc1c3f285) here.
#
# Fix these issues in the code cell below.
# +
# TODO: In the sector1 variable, replace the string '!$10' with nan
# HINT: you can use the pandas replace() method and numpy.nan
sector['sector1'] = sector['sector1'].replace('!$!0', np.nan)
# TODO: In the sector1 variable, remove the last 10 or 11 characters from the sector1 variable.
# HINT: There is more than one way to do this including the replace method
# HINT: You can use a regex expression '!.+'
# That regex expression looks for a string with an exclamation
# point followed by one or more characters
sector['sector1'] = sector['sector1'].replace('!.+', '', regex=True)
# TODO: Remove the string '(Historic)' from the sector1 variable
# HINT: You can use the replace method
sector['sector1'] = sector['sector1'].replace('^(\(Historic\))', '', regex=True)
print('Number of unique sectors after cleaning:', len(list(sector['sector1'].unique())))
print('Percentage of null values after cleaning:', 100 * sector['sector1'].isnull().sum() / sector['sector1'].shape[0])
# -
# Now there are 156 unique categorical values. That's better than 3060. If you were going to use this data with a supervised learning machine model, you could try converting these 156 values to dummy variables. You'd still have to train and test a model to see if those are good features.
#
# You could try to consolidate similar categories together, which is what the challenge exercise in part 4 is about.
#
# There are also still many entries with NaN values. How could you fill these in?
#
# You might try to determine an appropriate category from the 'project_name' or 'lendinginstr' variables. If you make dummy variables including NaN values, then you could consider a feature with all zeros to represent NaN. Or you could delete these records from the data set. Pandas will ignore NaN values by default. That means, for a given row, all dummy variables will have a value of 0 if the sector1 value was NaN.
#
# Don't forget about the bigger context! This data is being prepared for a machine learning algorithm. Whatever techniques you use to engineer new features, you'll need to use those when running your model on new data. So if your new data does not contain a sector1 value, you'll have to run whatever feature engineering processes you did on your training set.
# # Exercise 3
#
# In this next exercise, use the pandas pd.get_dummies() method to create dummy variables. Then use the concat() method to concatenate the dummy variables to a dataframe that contains the project totalamt variable and the project year from the boardapprovaldate.
# +
# TODO: Create dummy variables from the sector1_aggregates data. Put the results into a dataframe called dummies
# Hint: Use the get_dummies method
dummies = pd.DataFrame(pd.get_dummies(sector['sector1']))
# TODO: Filter the projects data for the totalamt, the year from boardapprovaldate, and the dummy variables
projects['year'] = projects['boardapprovaldate'].dt.year
df = projects[['totalamt','year']]
df_final = pd.concat([df, dummies], axis=1)
df_final.head()
# -
# You could continue to consolidate sector values using other techniques. For example, in the next exercise, you'll find categories with similar terms and then combine them together.
#
# Keep in mind that how much to consolidate will depend on your machine learning model performance and your hardware's ability to handle the extra features in memory. If your hardware's memory can handle 3060 new features and your machine learning algorithm performs better, then go for it!
# # Exercise 4 (Challenge)
#
# But can you do anything else with the sector1 variable?
#
# The percentage of null values for 'sector1' is now 3.49%. That turns out to be the same number as the null values for the 'sector' column. You can see this if you scroll back up to where the code calculated the percentage of null values for each variable.
#
# Perhaps the 'sector1' and 'sector' variable have the same information. If you look at the 'sector' variable, however, it also needs cleaning. The values look like this:
#
# 'Urban Transport;Urban Transport;Public Administration - Transportation'
#
# It turns out the 'sector' variable combines information from the 'sector1' through 'sector5' variables and the 'mjsector' variable. Run the code cell below to look at the sector variable.
sector['sector']
# What else can you do? If you look at all of the diferent sector1 categories, it might be useful to combine a few of them together. For example, there are various categories with the term "Energy" in them. And then there are other categories that seem related to energy but don't have the word energy in them like "Thermal" and "Hydro". Some categories have the term "Renewable Energy", so perhaps you could make a separate "Renewable Energy" category.
#
# Similarly, there are categories with the term "Transportation" in them, and then there are related categories like "Highways".
#
# In the next cell, find all sector1 values with the term 'Energy' in them. For each of these rows, put the string 'energy' in a new column called 'sector1_aggregates'. Do the same for "Transportation".
# +
import re
# Create the sector1_aggregates variable
sector.loc[:,'sector1_aggregates'] = sector['sector1']
# TODO: The code above created a new variable called sector1_aggregates.
# Currently, sector1_aggregates has all of the same values as sector1
# For this task, find all the rows in sector1_aggregates with the term 'Energy' in them,
# For all of these rows, replace whatever is the value is with the term 'Energy'.
# The idea is to simplify the category names by combining various categories together.
# Then, do the same for the term 'Transportation
# HINT: You can use the contains() methods. See the documentation for how to ignore case using the re library
# HINT: You might get an error saying "cannot index with vector containing NA / NaN values."
# Try converting NaN values to something else like False or a string
sector.loc[sector['sector1_aggregates'].str.contains('Energy', re.IGNORECASE).replace(np.nan, False),'sector1_aggregates'] = 'Energy'
sector.loc[sector['sector1_aggregates'].str.contains('Transportation', re.IGNORECASE).replace(np.nan, False),'sector1_aggregates'] = 'Transportation'
print('Number of unique sectors after cleaning:', len(list(sector['sector1_aggregates'].unique())))
# -
# # Conclusion
#
# Pandas makes it relatively easy to create dummy variables; however, oftentimes you'll need to clean the data first.
| ETL/Solutions/12_dummyvariables_exercise-solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Distirbuted Training of Mask-RCNN in Amazon SageMaker using EFS
#
# This notebook is a step-by-step tutorial on distributed tranining of [Mask R-CNN](https://arxiv.org/abs/1703.06870) implemented in [TensorFlow](https://www.tensorflow.org/) framework. Mask R-CNN is also referred to as heavy weight object detection model and it is part of [MLPerf](https://www.mlperf.org/training-results-0-6/).
#
# Concretely, we will describe the steps for training [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) and [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) in [Amazon SageMaker](https://aws.amazon.com/sagemaker/) using [Amazon EFS](https://aws.amazon.com/efs/) file-system as data source.
#
# The outline of steps is as follows:
#
# 1. Stage COCO 2017 dataset in [Amazon S3](https://aws.amazon.com/s3/)
# 2. Copy COCO 2017 dataset from S3 to Amazon EFS file-system mounted on this notebook instance
# 3. Build Docker training image and push it to [Amazon ECR](https://aws.amazon.com/ecr/)
# 4. Configure data input channels
# 5. Configure hyper-prarameters
# 6. Define training metrics
# 7. Define training job and start training
#
# Before we get started, let us initialize two python variables ```aws_region``` and ```s3_bucket``` that we will use throughout the notebook:
aws_region = # aws-region-code e.g. us-east-1
s3_bucket = # your-s3-bucket-name
# ## Stage COCO 2017 dataset in Amazon S3
#
# We use [COCO 2017 dataset](http://cocodataset.org/#home) for training. We download COCO 2017 training and validation dataset to this notebook instance, extract the files from the dataset archives, and upload the extracted files to your Amazon [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html). The ```prepare-s3-bucket.sh``` script executes this step.
# !cat ./prepare-s3-bucket.sh
# Using your *Amazon S3 bucket* as argument, run the cell below. If you have already uploaded COCO 2017 dataset to your Amazon S3 bucket, you may skip this step.
# %%time
# !./prepare-s3-bucket.sh {s3_bucket}
# ## Copy COCO 2017 dataset from S3 to Amazon EFS
#
# Next, we copy COCO 2017 dataset from S3 to EFS file-system. The ```prepare-efs.sh``` script executes this step.
# !cat ./prepare-efs.sh
# If you have already copied COCO 2017 dataset from S3 to your EFS file-system, skip this step.
# %%time
# !./prepare-efs.sh {s3_bucket}
# ## Build and push SageMaker training images
#
# For this step, the [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) attached to this notebook instance needs full access to Amazon ECR service. If you created this notebook instance using the ```./stack-sm.sh``` script in this repository, the IAM Role attached to this notebook instance is already setup with full access to ECR service.
#
# Below, we have a choice of two different implementations:
#
# 1. [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) implementation supports a maximum per-GPU batch size of 1, and does not support mixed precision. It can be used with mainstream TensorFlow releases.
#
# 2. [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) is an optimized implementation that supports a maximum batch size of 4 and supports mixed precision. This implementation uses custom TensorFlow ops. The required custom TensorFlow ops are available in [AWS Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) images in ```tensorflow-training``` repository with image tag ```1.15.2-gpu-py36-cu100-ubuntu18.04```, or later.
#
# It is recommended that you build and push both SageMaker training images and use either image for training later.
# ### TensorPack Faster-RCNN/Mask-RCNN
#
# Use ```./container/build_tools/build_and_push.sh``` script to build and push the TensorPack Faster-RCNN/Mask-RCNN training image to Amazon ECR.
# !cat ./container/build_tools/build_and_push.sh
# Using your *AWS region* as argument, run the cell below.
# %%time
# ! ./container/build_tools/build_and_push.sh {aws_region}
# Set ```tensorpack_image``` below to Amazon ECR URI of the image you pushed above.
tensorpack_image = # mask-rcnn-tensorpack-sagemaker ECR URI
# ### AWS Samples Mask R-CNN
# Use ```./container-optimized/build_tools/build_and_push.sh``` script to build and push the AWS Samples Mask R-CNN training image to Amazon ECR.
# !cat ./container-optimized/build_tools/build_and_push.sh
# Using your *AWS region* as argument, run the cell below.
# %%time
# ! ./container-optimized/build_tools/build_and_push.sh {aws_region}
# Set ```aws_samples_image``` below to Amazon ECR URI of the image you pushed above.
aws_samples_image = # mask-rcnn-tensorflow-sagemaker ECR URI
# ## SageMaker Initialization
#
# First we upgrade SageMaker to 2.3.0 API. If your notebook is already using latest Sagemaker 2.x API, you may skip the next cell.
# ! pip install --upgrade pip
# ! pip install sagemaker==2.3.0
# We have staged the data and we have built and pushed the training docker image to Amazon ECR. Now we are ready to start using Amazon SageMaker.
#
# +
# %%time
import os
import time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.estimator import Estimator
role = (
get_execution_role()
) # provide a pre-existing role ARN as an alternative to creating a new role
print(f"SageMaker Execution Role:{role}")
client = boto3.client("sts")
account = client.get_caller_identity()["Account"]
print(f"AWS account:{account}")
session = boto3.session.Session()
region = session.region_name
print(f"AWS region:{region}")
# -
# Next, we set the Amazon ECR image URI used for training. You saved this URI in a previous step.
training_image = # set to tensorpack_image or aws_samples_image
print(f'Training image: {training_image}')
# ## Define SageMaker Data Channels
#
# Next, we define the *train* and *log* data channels using EFS file-system. To do so, we need to specify the EFS file-system id, which is shown in the output of the command below.
# !df -kh | grep 'fs-' | sed 's/\(fs-[0-9a-z]*\).*/\1/'
# Set the EFS ```file_system_id``` below to the ouput of the command shown above. In the cell below, we define the `train` data input channel.
# +
from sagemaker.inputs import FileSystemInput
# Specify EFS ile system id.
file_system_id = # 'fs-xxxxxxxx'
print(f"EFS file-system-id: {file_system_id}")
# Specify directory path for input data on the file system.
# You need to provide normalized and absolute path below.
file_system_directory_path = '/mask-rcnn/sagemaker/input/train'
print(f'EFS file-system data input path: {file_system_directory_path}')
# Specify the access mode of the mount of the directory associated with the file system.
# Directory must be mounted 'ro'(read-only).
file_system_access_mode = 'ro'
# Specify your file system type
file_system_type = 'EFS'
train = FileSystemInput(file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode)
# -
# Below we create the log output directory and define the `log` data output channel.
# +
# Specify directory path for log output on the EFS file system.
# You need to provide normalized and absolute path below.
# For example, '/mask-rcnn/sagemaker/output/log'
# Log output directory must not exist
file_system_directory_path = f"/mask-rcnn/sagemaker/output/log-{int(time.time())}"
# Create the log output directory.
# EFS file-system is mounted on '$HOME/efs' mount point for this notebook.
home_dir = os.environ["HOME"]
local_efs_path = os.path.join(home_dir, "efs", file_system_directory_path[1:])
print(f"Creating log directory on EFS: {local_efs_path}")
assert not os.path.isdir(local_efs_path)
# ! sudo mkdir -p -m a=rw {local_efs_path}
assert os.path.isdir(local_efs_path)
# Specify the access mode of the mount of the directory associated with the file system.
# Directory must be mounted 'rw'(read-write).
file_system_access_mode = "rw"
log = FileSystemInput(
file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode,
)
data_channels = {"train": train, "log": log}
# -
# Next, we define the model output location in S3. Set ```s3_bucket``` to your S3 bucket name prior to running the cell below.
#
# The model checkpoints, logs and Tensorboard events will be written to the log output directory on the EFS file system you created above. At the end of the model training, they will be copied from the log output directory to the `s3_output_location` defined below.
prefix = "mask-rcnn/sagemaker" # prefix in your bucket
s3_output_location = f"s3://{s3_bucket}/{prefix}/output"
print(f"S3 model output location: {s3_output_location}")
# ## Configure Hyper-parameters
# Next we define the hyper-parameters.
#
# Note, some hyper-parameters are different between the two implementations. The batch size per GPU in TensorPack Faster-RCNN/Mask-RCNN is fixed at 1, but is configurable in AWS Samples Mask-RCNN. The learning rate schedule is specified in units of steps in TensorPack Faster-RCNN/Mask-RCNN, but in epochs in AWS Samples Mask-RCNN.
#
# The detault learning rate schedule values shown below correspond to training for a total of 24 epochs, at 120,000 images per epoch.
#
# <table align='left'>
# <caption>TensorPack Faster-RCNN/Mask-RCNN Hyper-parameters</caption>
# <tr>
# <th style="text-align:center">Hyper-parameter</th>
# <th style="text-align:center">Description</th>
# <th style="text-align:center">Default</th>
# </tr>
# <tr>
# <td style="text-align:center">mode_fpn</td>
# <td style="text-align:left">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>
# <td style="text-align:center">"True"</td>
# </tr>
# <tr>
# <td style="text-align:center">mode_mask</td>
# <td style="text-align:left">A value of "False" means Faster-RCNN model, "True" means Mask R-CNN moodel</td>
# <td style="text-align:center">"True"</td>
# </tr>
# <tr>
# <td style="text-align:center">eval_period</td>
# <td style="text-align:left">Number of epochs period for evaluation during training</td>
# <td style="text-align:center">1</td>
# </tr>
# <tr>
# <td style="text-align:center">lr_schedule</td>
# <td style="text-align:left">Learning rate schedule in training steps</td>
# <td style="text-align:center">'[240000, 320000, 360000]'</td>
# </tr>
# <tr>
# <td style="text-align:center">batch_norm</td>
# <td style="text-align:left">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>
# <td style="text-align:center">'FreezeBN'</td>
# </tr>
# <tr>
# <td style="text-align:center">images_per_epoch</td>
# <td style="text-align:left">Images per epoch </td>
# <td style="text-align:center">120000</td>
# </tr>
# <tr>
# <td style="text-align:center">data_train</td>
# <td style="text-align:left">Training data under data directory</td>
# <td style="text-align:center">'coco_train2017'</td>
# </tr>
# <tr>
# <td style="text-align:center">data_val</td>
# <td style="text-align:left">Validation data under data directory</td>
# <td style="text-align:center">'coco_val2017'</td>
# </tr>
# <tr>
# <td style="text-align:center">resnet_arch</td>
# <td style="text-align:left">Must be 'resnet50' or 'resnet101'</td>
# <td style="text-align:center">'resnet50'</td>
# </tr>
# <tr>
# <td style="text-align:center">backbone_weights</td>
# <td style="text-align:left">ResNet backbone weights</td>
# <td style="text-align:center">'ImageNet-R50-AlignPadding.npz'</td>
# </tr>
# <tr>
# <td style="text-align:center">load_model</td>
# <td style="text-align:left">Pre-trained model to load</td>
# <td style="text-align:center"></td>
# </tr>
# <tr>
# <td style="text-align:center">config:</td>
# <td style="text-align:left">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>
# <td style="text-align:center"></td>
# </tr>
# </table>
#
#
# <table align='left'>
# <caption>AWS Samples Mask-RCNN Hyper-parameters</caption>
# <tr>
# <th style="text-align:center">Hyper-parameter</th>
# <th style="text-align:center">Description</th>
# <th style="text-align:center">Default</th>
# </tr>
# <tr>
# <td style="text-align:center">mode_fpn</td>
# <td style="text-align:left">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>
# <td style="text-align:center">"True"</td>
# </tr>
# <tr>
# <td style="text-align:center">mode_mask</td>
# <td style="text-align:left">A value of "False" means Faster-RCNN model, "True" means Mask R-CNN moodel</td>
# <td style="text-align:center">"True"</td>
# </tr>
# <tr>
# <td style="text-align:center">eval_period</td>
# <td style="text-align:left">Number of epochs period for evaluation during training</td>
# <td style="text-align:center">1</td>
# </tr>
# <tr>
# <td style="text-align:center">lr_epoch_schedule</td>
# <td style="text-align:left">Learning rate schedule in epochs</td>
# <td style="text-align:center">'[(16, 0.1), (20, 0.01), (24, None)]'</td>
# </tr>
# <tr>
# <td style="text-align:center">batch_size_per_gpu</td>
# <td style="text-align:left">Batch size per gpu ( Minimum 1, Maximum 4)</td>
# <td style="text-align:center">4</td>
# </tr>
# <tr>
# <td style="text-align:center">batch_norm</td>
# <td style="text-align:left">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>
# <td style="text-align:center">'FreezeBN'</td>
# </tr>
# <tr>
# <td style="text-align:center">images_per_epoch</td>
# <td style="text-align:left">Images per epoch </td>
# <td style="text-align:center">120000</td>
# </tr>
# <tr>
# <td style="text-align:center">data_train</td>
# <td style="text-align:left">Training data under data directory</td>
# <td style="text-align:center">'train2017'</td>
# </tr>
# <tr>
# <td style="text-align:center">backbone_weights</td>
# <td style="text-align:left">ResNet backbone weights</td>
# <td style="text-align:center">'ImageNet-R50-AlignPadding.npz'</td>
# </tr>
# <tr>
# <td style="text-align:center">load_model</td>
# <td style="text-align:left">Pre-trained model to load</td>
# <td style="text-align:center"></td>
# </tr>
# <tr>
# <td style="text-align:center">config:</td>
# <td style="text-align:left">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>
# <td style="text-align:center"></td>
# </tr>
# </table>
hyperparameters = {
"mode_fpn": "True",
"mode_mask": "True",
"eval_period": 1,
"batch_norm": "FreezeBN",
}
# ## Define Training Metrics
# Next, we define the regular expressions that SageMaker uses to extract algorithm metrics from training logs and send them to [AWS CloudWatch metrics](https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/monitoring/working_with_metrics.html). These algorithm metrics are visualized in SageMaker console.
metric_definitions = [
{"Name": "fastrcnn_losses/box_loss", "Regex": ".*fastrcnn_losses/box_loss:\\s*(\\S+).*"},
{"Name": "fastrcnn_losses/label_loss", "Regex": ".*fastrcnn_losses/label_loss:\\s*(\\S+).*"},
{
"Name": "fastrcnn_losses/label_metrics/accuracy",
"Regex": ".*fastrcnn_losses/label_metrics/accuracy:\\s*(\\S+).*",
},
{
"Name": "fastrcnn_losses/label_metrics/false_negative",
"Regex": ".*fastrcnn_losses/label_metrics/false_negative:\\s*(\\S+).*",
},
{
"Name": "fastrcnn_losses/label_metrics/fg_accuracy",
"Regex": ".*fastrcnn_losses/label_metrics/fg_accuracy:\\s*(\\S+).*",
},
{
"Name": "fastrcnn_losses/num_fg_label",
"Regex": ".*fastrcnn_losses/num_fg_label:\\s*(\\S+).*",
},
{"Name": "maskrcnn_loss/accuracy", "Regex": ".*maskrcnn_loss/accuracy:\\s*(\\S+).*"},
{
"Name": "maskrcnn_loss/fg_pixel_ratio",
"Regex": ".*maskrcnn_loss/fg_pixel_ratio:\\s*(\\S+).*",
},
{"Name": "maskrcnn_loss/maskrcnn_loss", "Regex": ".*maskrcnn_loss/maskrcnn_loss:\\s*(\\S+).*"},
{"Name": "maskrcnn_loss/pos_accuracy", "Regex": ".*maskrcnn_loss/pos_accuracy:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/IoU=0.5", "Regex": ".*mAP\\(bbox\\)/IoU=0\\.5:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/IoU=0.5:0.95", "Regex": ".*mAP\\(bbox\\)/IoU=0\\.5:0\\.95:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/IoU=0.75", "Regex": ".*mAP\\(bbox\\)/IoU=0\\.75:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/large", "Regex": ".*mAP\\(bbox\\)/large:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/medium", "Regex": ".*mAP\\(bbox\\)/medium:\\s*(\\S+).*"},
{"Name": "mAP(bbox)/small", "Regex": ".*mAP\\(bbox\\)/small:\\s*(\\S+).*"},
{"Name": "mAP(segm)/IoU=0.5", "Regex": ".*mAP\\(segm\\)/IoU=0\\.5:\\s*(\\S+).*"},
{"Name": "mAP(segm)/IoU=0.5:0.95", "Regex": ".*mAP\\(segm\\)/IoU=0\\.5:0\\.95:\\s*(\\S+).*"},
{"Name": "mAP(segm)/IoU=0.75", "Regex": ".*mAP\\(segm\\)/IoU=0\\.75:\\s*(\\S+).*"},
{"Name": "mAP(segm)/large", "Regex": ".*mAP\\(segm\\)/large:\\s*(\\S+).*"},
{"Name": "mAP(segm)/medium", "Regex": ".*mAP\\(segm\\)/medium:\\s*(\\S+).*"},
{"Name": "mAP(segm)/small", "Regex": ".*mAP\\(segm\\)/small:\\s*(\\S+).*"},
]
# ## Define SageMaker Training Job
#
# Next, we use SageMaker [Estimator](https://sagemaker.readthedocs.io/en/stable/estimators.html) API to define a SageMaker Training Job.
#
# We recommned using 32 GPUs, so we set ```instance_count=4``` and ```instance_type='ml.p3.16xlarge'```, because there are 8 Tesla V100 GPUs per ```ml.p3.16xlarge``` instance. We recommend using 100 GB [Amazon EBS](https://aws.amazon.com/ebs/) storage volume with each training instance, so we set ```volume_size = 100```.
#
# We run the training job in your private VPC, so we need to set the ```subnets``` and ```security_group_ids``` prior to running the cell below. You may specify multiple subnet ids in the ```subnets``` list. The subnets included in the ```sunbets``` list must be part of the output of ```./stack-sm.sh``` CloudFormation stack script used to create this notebook instance. Specify only one security group id in ```security_group_ids``` list. The security group id must be part of the output of ```./stack-sm.sh``` script.
#
# For ```instance_type``` below, you have the option to use ```ml.p3.16xlarge``` with 16 GB per-GPU memory and 25 Gbs network interconnectivity, or ```ml.p3dn.24xlarge``` with 32 GB per-GPU memory and 100 Gbs network interconnectivity. The ```ml.p3dn.24xlarge``` instance type offers significantly better performance than ```ml.p3.16xlarge``` for Mask R-CNN distributed TensorFlow training.
# +
# Give Amazon SageMaker Training Jobs Access to FileSystem Resources in Your Amazon VPC.
security_group_ids = # ['sg-xxxxxxxx']
subnets = # [ 'subnet-xxxxxxx', 'subnet-xxxxxxx', 'subnet-xxxxxxx' ]
sagemaker_session = sagemaker.session.Session(boto_session=session)
mask_rcnn_estimator = Estimator(image_uri=training_image,
role=role,
instance_count=4,
instance_type='ml.p3.16xlarge',
volume_size = 100,
max_run = 400000,
output_path=s3_output_location,
sagemaker_session=sagemaker_session,
hyperparameters = hyperparameters,
metric_definitions = metric_definitions,
subnets=subnets,
security_group_ids=security_group_ids)
# -
# Finally, we launch the SageMaker training job. See ```Training Jobs``` in SageMaker console to monitor the training job.
# +
import time
job_name = f"mask-rcnn-efs-{int(time.time())}"
print(f"Launching Training Job: {job_name}")
# set wait=True below if you want to print logs in cell output
mask_rcnn_estimator.fit(inputs=data_channels, job_name=job_name, logs="All", wait=False)
# -
| advanced_functionality/distributed_tensorflow_mask_rcnn/mask-rcnn-efs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
#dictionary will help keep track on the trash bucket in each iteration
dict_onepass={0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0}
#checking for 20
for i in range(20):
store_onepass=[]
#import mnist dataset
mnist=tf.keras.datasets.mnist
(x_train, y_train), (x_test,y_test) =mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
#create cnn model
model =tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
seq_array1=[]
seq_array2=[]
for i in range(len(y_train)):
if y_train[i]==1:
seq_array1.append(i)
else:
seq_array2.append(i)
for i in seq_array1:
seq_array2.append(i)
new_x_train=[]
new_y_train=[]
for i in seq_array2:
new_x_train.append(x_train[i])
new_y_train.append(y_train[i])
import numpy as np
#change the array into numpy array
x_train=new_x_train
x_train=np.array(x_train)
y_train=new_y_train
y_train=np.array(y_train)
#fit model
model.fit(x_train,y_train,epochs=3,shuffle=False)
loss, acc = model.evaluate(x_test,y_test)
predict=model.predict([x_test])
import matplotlib.pyplot as plt
#generate random 50000 data between 0 and 1, similar to the nature of normalized mnist dataset
from numpy import random
new_test=[]
for i in range(50000):
x = random.rand(28, 28)
new_test.append(x)
#new numpy for testing random values
new_test=np.array(new_test)
new_predict=model.predict([new_test])
#placeholder for each number
one=0
three=0
two=0
four=0
five=0
six=0
seven=0
eight=0
nine=0
zero=0
#assign variable according to the prediction
for i in range(50000):
if np.argmax(new_predict[i])==1:
one+=1
if np.argmax(new_predict[i])==3:
three+=1
if np.argmax(new_predict[i])==2:
two+=1
if np.argmax(new_predict[i])==4:
four+=1
if np.argmax(new_predict[i])==5:
five+=1
if np.argmax(new_predict[i])==6:
six+=1
if np.argmax(new_predict[i])==7:
seven+=1
if np.argmax(new_predict[i])==8:
eight+=1
if np.argmax(new_predict[i])==9:
nine+=1
if np.argmax(new_predict[i])==0:
zero+=1
print(new_predict[0])
print(1,one)
print(2,two)
print(3,three)
print(4,four)
print(5,five)
print(6,six)
print(7,seven)
print(8,eight)
print(9,nine)
print(0,zero)
#assign to the variable
store_onepass.append(zero)
store_onepass.append(one)
store_onepass.append(two)
store_onepass.append(three)
store_onepass.append(four)
store_onepass.append(five)
store_onepass.append(six)
store_onepass.append(seven)
store_onepass.append(eight)
store_onepass.append(nine)
store_onepass=np.array(store_onepass)
print(np.argmax(store_onepass))
i=int(np.argmax(store_onepass))
# increasement the dictionary according to the prediction
dict_onepass[i]=dict_onepass[i]+1
#print the dictionary in each iteration
print(dict_onepass)
# -
| MNIST_Trash_Bucket.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Li-Li Cycle Test Plots
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import batt_cycle
# +
# folder holding data of interest
folder = '../Data/Li-Li/Data_2_7_19/'
# imports and concatenates the data for a cell
PP_A_0 = batt_cycle.import_data(folder + '_002_4.xls', 3)
# cleans and preps dataframes
PP_A, PP_A_break = batt_cycle.clean_prep_break(PP_A_0)
# reshapes the index data
PP_A_indeces = batt_cycle.reshape_cycle_indeces(PP_A, PP_A_break)
# +
# plotting a cycle range
start_cycle = 1
end_cycle = 96
range_data, label = batt_cycle.cycle_range_data(PP_A, PP_A_indeces, start_cycle, end_cycle)
plt.figure(figsize=(12,4))
plt.title('Li-Li Cell (PP_A) - '+label, fontsize=16, pad=10)
plt.xlabel('Time ($S$)')
plt.ylabel('Potential ($V$)')
plt.ylim(-abs(max(range_data['voltage'], key=abs)*1.1),
abs(max(range_data['voltage'], key=abs)*1.1))
plt.grid()
plt.plot(range_data['time_sec'], range_data['voltage'])
plt.show()
# +
# plotting a single cycle
start_cycle = 88
range_data, label = batt_cycle.cycle_range_data(PP_A, PP_A_indeces, start_cycle)
plt.figure(figsize=(12,4))
plt.title('Li-Li Cell (PP_A) - '+label, fontsize=16, pad=10)
plt.xlabel('Time ($S$)')
plt.ylabel('Potential ($V$)')
plt.ylim(-abs(max(range_data['voltage'], key=abs)*1.1),
abs(max(range_data['voltage'], key=abs)*1.1))
plt.grid()
plt.plot(range_data['time_sec'], range_data['voltage'])
plt.show()
# +
# plotting a stacked cycles
range_data1, label1 = batt_cycle.cycle_range_data(PP_A, PP_A_indeces, 1)
range_data2, label2 = batt_cycle.cycle_range_data(PP_A, PP_A_indeces, 30)
range_data3, label3 = batt_cycle.cycle_range_data(PP_A, PP_A_indeces, 96)
range_data1 = range_data1.reset_index()
range_data2 = range_data2.reset_index()
range_data3 = range_data3.reset_index()
plt.figure(figsize=(12,4))
plt.title('Li-Li Cell (PP_A)', fontsize=16, pad=10)
plt.xlabel('Data Point')
plt.ylabel('Potential ($V$)')
plt.ylim(-abs(max(range_data1['voltage'], key=abs)*1.1),
abs(max(range_data1['voltage'], key=abs)*1.1))
plt.grid()
plt.plot(range_data1.index, range_data1['voltage'], label='{}'.format(label1))
plt.plot(range_data2.index, range_data2['voltage'], label='{}'.format(label2))
plt.plot(range_data3.index, range_data3['voltage'], label='{}'.format(label3))
plt.legend()
plt.show()
# -
PP_A
| Dev_1/Li-Li_dev_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://github.com/bokeh/bokeh/blob/master/examples/howto/layouts/dashboard.py
# +
import numpy as np
from bokeh.layouts import column, grid
from bokeh.models import ColumnDataSource, CustomJS, Slider
from bokeh.plotting import figure, output_file, show
# +
output_file('dashboard.html')
tools = 'pan'
# -
def bollinger():
# Define Bollinger Bands.
upperband = np.random.randint(100, 150+1, size=100)
lowerband = upperband - 100
x_data = np.arange(1, 101)
# Bollinger shading glyph:
band_x = np.append(x_data, x_data[::-1])
band_y = np.append(lowerband, upperband[::-1])
p = figure(x_axis_type='datetime', tools=tools)
p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2)
p.title.text = 'Bollinger Bands'
p.title_location = 'left'
p.title.align = 'left'
p.plot_height = 600
p.plot_width = 800
p.grid.grid_line_alpha = 0.4
return [p]
def slider():
x = np.linspace(0, 10, 100)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(
y_range=(-10, 10), tools='', toolbar_location=None,
title="Sliders example")
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude")
freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency")
phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase")
offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset")
callback = CustomJS(args=dict(source=source, amp=amp_slider, freq=freq_slider, phase=phase_slider, offset=offset_slider),
code="""
const data = source.data;
const A = amp.value;
const k = freq.value;
const phi = phase.value;
const B = offset.value;
const x = data['x']
const y = data['y']
for (var i = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.change.emit();
""")
amp_slider.js_on_change('value', callback)
freq_slider.js_on_change('value', callback)
phase_slider.js_on_change('value', callback)
offset_slider.js_on_change('value', callback)
widgets = column(amp_slider, freq_slider, phase_slider, offset_slider)
return [widgets, plot]
def linked_panning():
N = 100
x = np.linspace(0, 4 * np.pi, N)
y1 = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(x) + np.cos(x)
s1 = figure(tools=tools)
s1.circle(x, y1, color="navy", size=8, alpha=0.5)
s2 = figure(tools=tools, x_range=s1.x_range, y_range=s1.y_range)
s2.circle(x, y2, color="firebrick", size=8, alpha=0.5)
s3 = figure(tools='pan, box_select', x_range=s1.x_range)
s3.circle(x, y3, color="olive", size=8, alpha=0.5)
return [s1, s2, s3]
l = grid([
bollinger(),
slider(),
linked_panning(),
], sizing_mode='stretch_both')
show(l)
| notebooks/bokeh_dashboard_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Express sklearn pipeline as codeflare pipeline
#
# Reference: https://scikit-learn.org/stable/auto_examples/compose/plot_digits_pipe.html#sphx-glr-auto-examples-compose-plot-digits-pipe-py
#
# %matplotlib inline
#
# # Pipelining: chaining a PCA and a logistic regression
#
# The PCA does an unsupervised dimensionality reduction, while the logistic
# regression does the prediction.
#
# We use a GridSearchCV to set the dimensionality of the PCA
#
# +
print(__doc__)
# Code source: <NAME>
# Modified for documentation by <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
pca = PCA()
# set the tolerance to a large value to make the example faster
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
X_digits, y_digits = datasets.load_digits(return_X_y=True)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 15, 30, 45, 64],
'logistic__C': np.logspace(-4, 4, 4),
}
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(np.arange(1, pca.n_components_ + 1),
pca.explained_variance_ratio_, '+', linewidth=2)
ax0.set_ylabel('PCA explained variance ratio')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.xlim(-1, 70)
plt.tight_layout()
plt.show()
# +
print(__doc__)
# Code source: <NAME>
# Modified for documentation by <NAME>
# Modified for codeflare pipeline by <NAME> & <NAME>
# License: Same as original code creator
import codeflare.pipelines.Datamodel as dm
import codeflare.pipelines.Runtime as rt
from codeflare.pipelines.Datamodel import Xy
from codeflare.pipelines.Datamodel import XYRef
from codeflare.pipelines.Runtime import ExecutionType
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.model_selection import KFold
import ray
ray.shutdown()
ray.init()
X_digits, y_digits = datasets.load_digits(return_X_y=True)
pca = PCA()
# set the tolerance to a large value to make the example faster
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipeline = dm.Pipeline()
node_pca = dm.EstimatorNode('pca', pca)
node_logistic = dm.EstimatorNode('logistic', logistic)
pipeline.add_edge(node_pca, node_logistic)
# input to pipeline
pipeline_input = dm.PipelineInput()
pipeline_input.add_xy_arg(node_pca, dm.Xy(X_digits, y_digits))
# param_grid
param_grid = {
'pca__n_components': [5, 15, 30, 45, 64],
'logistic__C': np.logspace(-4, 4, 4),
}
pipeline_param = dm.PipelineParam.from_param_grid(param_grid)
# default KFold for grid search
k = 5
kf = KFold(k)
# execute CF pipeplie grid_search_cv
result = rt.grid_search_cv(kf, pipeline, pipeline_input, pipeline_param)
import statistics
# pick the best mean' and best pipeline
best_pipeline = None
best_mean_scores = 0.0
best_n_components = 0
df = pd.DataFrame(columns =('n_components', 'mean_test_score', 'std_test_score'))
for cv_pipeline, scores in result.items():
mean = statistics.mean(scores)
std = statistics.stdev(scores)
n_components = 0
params = {}
# get the 'n_components' value of the PCA in this cv_pipeline
for node_name, node in cv_pipeline.get_nodes().items():
params[node_name] = node.get_estimator().get_params()
if 'n_components' in params[node_name]:
n_components = params[node_name]['n_components']
assert(n_components > 0)
df = df.append({'n_components' : n_components, 'mean_test_score' : mean, 'std_test_score' : std}, ignore_index=True)
if mean > 0.92:
print(mean)
print(str(params))
if mean > best_mean_scores:
best_pipeline = cv_pipeline
best_mean_scores = mean
best_n_components = n_components
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(np.arange(1, pca.n_components_ + 1),
pca.explained_variance_ratio_, '+', linewidth=2)
ax0.set_ylabel('PCA explained variance ratio')
# plot a vertical line with the n_components chosen from the best_pipeline
ax0.axvline(best_n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
components_col = 'n_components'
best_clfs = df.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.xlim(-1, 70)
plt.tight_layout()
plt.show()
# Due to the differences in split, the codeflare execution produces the best mean
# test score with a different n_components for PCA than that from the original Sklearn
# execution. The 2nd best in codeflare, nevertheless, matches the original
# Sklearn execution.
ray.shutdown()
# -
| notebooks/plot_digits_pipe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# # Realization of Recursive Filters
#
# *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing.*
# -
# ## Quantization of Filter Coefficients
#
# The finite numerical resolution of digital number representations has impact on the properties of filters, as already discussed for [non-recursive filters](../nonrecursive_filters/quantization_effects.ipynb#Quantization-Effects). The quantization of coefficients, state variables, algebraic operations and signals plays an important role in the design of recursive filters. Compared to non-recursive filters, the impact of quantization is often more prominent due to the feedback. Severe degradations from the desired characteristics and instability are potential consequences of a finite word length in practical implementations.
#
# A recursive filter of order $N \geq 2$ can be [decomposed into second-order sections (SOS)](../recursive_filters/cascaded_structures.ipynb). Due to the grouping of poles/zeros to filter coefficients with a limited amplitude range, a realization by cascaded SOS is favorable in practice. We therefore limit our investigation of quantization effects to SOS. The transfer function of a SOS is given as
#
# \begin{equation}
# H(z) = \frac{b_0 + b_1 z^{-1} + b_2 z^{-2}}{1 + a_1 z^{-1} + a_2 z^{-2}}
# \end{equation}
#
# This can be [split into a non-recursive part and a recursive part](../recursive_filters/introduction.ipynb#Recursive-Filters). The quantization effects of non-recursive filters have already been discussed. We therefore focus here on the recursive part given by the transfer function
#
# \begin{equation}
# H(z) = \frac{1}{1 + a_1 z^{-1} + a_2 z^{-2}}
# \end{equation}
#
# This section investigates the consequences of quantization in recursive filters. As for non-recursive filters, we first take a look at the quantization of filter coefficients. The structure used for the realization of the filter has impact on the quantization effects. We begin with the direct form followed by the coupled form, as example for an alternative structure.
# ### Direct Form
#
# Above transfer function of the recursive part of a SOS can be rewritten in terms of its complex conjugate poles $z_{\infty}$ and $z_{\infty}^*$ as
#
# \begin{equation}
# H(z) = \frac{1}{(z-z_{\infty}) (z-z_{\infty}^*)} = \frac{z^{-2}}{ 1 \underbrace{- 2 r \cos(\varphi)}_{a_1} \; z^{-1} + \underbrace{r^2}_{a_2} \; z^{-2} }
# \end{equation}
#
# where $r = |z_{\infty}|$ and $\varphi = \arg \{z_{\infty}\}$ denote the absolute value and phase of the pole $z_{\infty}$, respectively. Let's assume a [linear uniform quantization](../quantization/linear_uniform_quantization_error.ipynb#Quantization-Error-of-a-Linear-Uniform-Quantizer) of the coefficients $a_1$ and $a_2$ with quantization step $Q$. Discarding clipping, the following relations for the locations of the poles can be found
#
# \begin{align}
# r_n &= \sqrt{n \cdot Q} \\
# \varphi_{nm} &= \arccos \left( \sqrt{\frac{m^2 Q}{4 n}} \right)
# \end{align}
# for $n \in \mathbb{N}_0$ and $m \in \mathbb{Z}$. Quantization of the filter coefficients $a_1$ and $a_2$ into a finite number of amplitude values leads to a finite number of pole locations. In the $z$-plane the possible pole locations are given by the intersections of
#
# * circles whose radii $r_n$ are given by $r_n = \sqrt{n \cdot Q}$ with
# * equidistant vertical lines which intersect the horizontal axis at $\frac{1}{2} m \cdot Q$.
#
# The finite number of pole locations may lead to deviations from a desired filter characteristic since a desired pole location is moved to the next possible pole location. The filter may even get unstable, when poles are moved outside the unit circle. For illustration, the resulting pole locations for a SOS realized in direct form are computed and plotted.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy.signal as sig
import itertools
def compute_pole_locations(Q):
a1 = np.arange(-2, 2+Q, Q)
a2 = np.arange(0, 1+Q, Q)
p = np.asarray([np.roots([1, n, m]) for (n,m) in itertools.product(a1, a2)])
p = p[np.imag(p)!=0]
return p
def plot_pole_locations(p, Q):
ax = plt.gca()
for n in np.arange(np.ceil(2/Q)+1):
circle = Circle((0,0), radius=np.sqrt(n*Q), fill=False, color='black', ls='solid', alpha=0.05)
ax.add_patch(circle)
ax.axvline(.5*n*Q, color='0.95')
ax.axvline(-.5*n*Q, color='0.95')
unit_circle = Circle((0,0), radius=1, fill=False, color='red', ls='solid')
ax.add_patch(unit_circle)
plt.plot(np.real(p), np.imag(p), 'b.', ms = 4)
plt.xlabel(r'Re{$z$}')
plt.ylabel(r'Im{$z$}')
plt.axis([-1.1, 1.1, -1.1, 1.1])
# compute and plot pole locations
for w in [5,6]:
Q = 2/(2**(w-1)) # quantization stepsize
plt.figure(figsize=(5, 5))
p = compute_pole_locations(Q)
plot_pole_locations(p, Q)
plt.title(r'Direct form coefficient quantization to $w=%d$ bits'%w)
# -
# **Exercise**
#
# * What consequences does the distribution of pole locations on the desired characteristics of a filter have for e.g. low/high frequencies?
#
# Solution: Quantization of the original filter coefficients leads to a limited number of possible pole and zero locations. These locations are not uniformly distributed over the $z$-plane, as can be observed from above illustrations. The density of potential locations is especially low for low frequencies and close to the Nyquist frequency. The properties of a designed filter having poles and/or zeros at low/high frequencies will potentially deviate more when quantizing its coefficients, as a consequence.
# ### Coupled Form
#
# Besides the quantization step $Q$, the pole distribution depends also on the topology of the filter. In order to gain a different distribution of pole locations after quantization, one has to derive structures where the coefficients of the multipliers are given by other values than the direct form coefficients $a_1$ and $a_2$.
#
# One of these alternative structures is the coupled form (also known as Gold & Rader structure)
#
# 
#
# where $\Re\{z_\infty\} = r \cdot \cos \varphi$ and $\Im\{z_\infty\} = r \cdot \sin \varphi$ denote the real- and imaginary part of the complex pole $z_\infty$, respectively. Analysis of the structure reveals its difference equation as
#
# \begin{align}
# w[k] &= x[k] + \Re\{z_\infty\} \, w[k-1] - \Im\{z_\infty\} \, y[k-1] \\
# y[k] &= \Im\{z_\infty\} \, w[k-1] + \Re\{z_\infty\} \, y[k-1]
# \end{align}
#
# and its transfer function as
#
# \begin{equation}
# H(z) = \frac{\Im\{z_\infty\} \; z^{-1}}{ 1 - 2 \Re\{z_\infty\} \; z^{-1} + (\Re\{z_\infty\}^2 + \Im\{z_\infty\}^2) \; z^{-2} }
# \end{equation}
#
# Note that the numerator of the transfer function differs from the recursive only SOS given above. However, this can be considered in the design of the transfer function of a general SOS.
#
# The real- and imaginary part of the pole $z_\infty$ occur directly as coefficients for the multipliers in the coupled form. Quantization of these coefficients results therefore in a Cartesian grid of possible pole locations in the $z$-plane. This is illustrated in the following.
# +
def compute_pole_locations(w):
Q = 1/(2**(w-1)) # quantization stepsize
a1 = np.arange(-1, 1+Q, Q)
a2 = np.arange(-1, 1+Q, Q)
p = np.asarray([n+1j*m for (n,m) in itertools.product(a1, a2) if n**2+m**2 <= 1])
return p
def plot_pole_locations(p):
ax = plt.gca()
unit_circle = Circle((0,0), radius=1, fill=False, color='red', ls='solid')
ax.add_patch(unit_circle)
plt.plot(np.real(p), np.imag(p), 'b.', ms = 4)
plt.xlabel(r'Re{$z$}')
plt.ylabel(r'Im{$z$}')
plt.axis([-1.1, 1.1, -1.1, 1.1])
# compute and plot pole locations
for w in [5,6]:
plt.figure(figsize=(5, 5))
p = compute_pole_locations(w)
plot_pole_locations(p)
plt.title(r'Coupled form coefficient quantization to $w=%d$ bits'%w)
# -
# **Excercise**
#
# * What is the benefit of this representation in comparison to the direct from discussed in the previous section?
#
# Solution: A befit of the coupled form is a uniform distribution of potential pole and zero locations in the $z$-plane. This holds especially for low frequencies and close to the Nyquist frequency.
# ### Example - Influence of coefficient quantization
#
# The following example illustrates the effects of coefficient quantization for a recursive [Butterworth filter](https://en.wikipedia.org/wiki/Butterworth_filter) realized in cascaded SOSs in transposed direct form II.
# +
w = 16 # wordlength of filter coefficients
N = 7 # order of filter
def uniform_midtread_quantizer(x, w, xmin=1):
# quantization step
Q = xmin/(2**(w-1))
# limiter
x = np.copy(x)
idx = np.where(x <= -xmin)
x[idx] = -1
idx = np.where(x > xmin - Q)
x[idx] = 1 - Q
# linear uniform quantization
xQ = Q * np.floor(x/Q + 1/2)
return xQ
def zplane(z, p, title='Poles and Zeros'):
"Plots zero and pole locations in the complex z-plane"
ax = plt.gca()
ax.plot(np.real(z), np.imag(z), 'bo', fillstyle='none', ms = 10)
ax.plot(np.real(p), np.imag(p), 'rx', fillstyle='none', ms = 10)
unit_circle = Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.9)
ax.add_patch(unit_circle)
ax.axvline(0, color='0.7')
ax.axhline(0, color='0.7')
plt.title(title)
plt.xlabel(r'Re{$z$}')
plt.ylabel(r'Im{$z$}')
plt.axis('equal')
plt.xlim((-2, 2))
plt.ylim((-2, 2))
plt.grid()
# coefficients of recursive filter
b, a = sig.butter(N, 0.2, 'low')
# decomposition into SOS
sos = sig.tf2sos(b, a, pairing='nearest')
sos = sos/np.amax(np.abs(sos))
# quantization of SOS coefficients
sosq = uniform_midtread_quantizer(sos, w, xmin=1)
# compute overall transfer function of (quantized) filter
H = np.ones(512)
Hq = np.ones(512)
for n in range(sos.shape[0]):
Om, Hn = sig.freqz(sos[n, 0:3], sos[n, 3:6])
H = H * Hn
Om, Hn = sig.freqz(sosq[n, 0:3], sosq[n, 3:6])
Hq = Hq * Hn
# plot magnitude responses
plt.figure(figsize=(10, 3))
plt.plot(Om, 20 * np.log10(abs(H)), label='continuous')
plt.plot(Om, 20 * np.log10(abs(Hq)), label='quantized')
plt.title('Magnitude response')
plt.xlabel(r'$\Omega$')
plt.ylabel(r'$|H(e^{j \Omega})|$ in dB')
plt.legend(loc=3)
plt.grid()
# plot phase responses
plt.figure(figsize=(10, 3))
plt.plot(Om, np.unwrap(np.angle(H)), label='continuous')
plt.plot(Om, np.unwrap(np.angle(Hq)), label='quantized')
plt.title('Phase')
plt.xlabel(r'$\Omega$')
plt.ylabel(r'$\varphi (\Omega)$ in rad')
plt.legend(loc=3)
plt.grid()
# -
# **Exercise**
#
# * Decrease the word length `w` of the filter. What happens? At what word length does the filter become unstable?
# * Increase the order `N` of the filter for a fixed word length `w`. What happens?
#
# Solution: The deviations from the continuous (desired) realization of the filter increase with decreasing word length. The filter with order `N=5` becomes unstable for `w < 10`. Increasing the order `N` of the filter for a fixed word length results also in instabilities. Consequently, for a high order filter also a higher word length is required.
| Lectures_Advanced-DSP/recursive_filters/quantization_of_coefficients.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
import param
import panel as pn
pn.extension()
# -
# This example demonstrates how to use ``param.Action`` to trigger an update in a method that depends on that parameter. Actions can trigger any function, but if we simply want to trigger a method that depends on that action we can define a small ``lambda`` function that triggers the parameter explicitly.
# +
class ActionExample(param.Parameterized):
"""
Demonstrates how to use param.Action to trigger an update.
"""
number = param.Number(default=0)
action = param.Action(lambda x: x.param.trigger('action'), label='Click here!')
@param.depends('action')
def get_number(self):
return self.number
action_example = ActionExample()
pn.Column(
'# param.Action Example',
pn.Row(
pn.Column(pn.panel(action_example.param, show_labels=False, show_name=False, margin=0),
'Click the button to trigger an update in the output.'),
pn.WidgetBox(action_example.get_number, width=300))
).servable()
| examples/gallery/param/action_button.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Euiq_EOntgQm" colab_type="text"
# # Tutorial Part 21: Introduction to Bioinformatics
#
# So far in this tutorial, we've primarily worked on the problems of cheminformatics. We've been interested in seeing how we can use the techniques of machine learning to make predictions about the properties of molecules. In this tutorial, we're going to shift a bit and see how we can use classical computer science techniques and machine learning to tackle problems in bioinformatics.
#
# For this, we're going to use the venerable [biopython](https://biopython.org/) library to do some basic bioinformatics. A lot of the material in this notebook is adapted from the extensive official [Biopython tutorial]http://biopython.org/DIST/docs/tutorial/Tutorial.html). We strongly recommend checking out the official tutorial after you work through this notebook!
#
# ## Colab
#
# This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
#
# [](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/21_Introduction_to_Bioinformatics.ipynb)
#
# ## Setup
#
# To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment.
# + id="9k2qhejltgQo" colab_type="code" outputId="41f75690-8054-4d36-94ed-83e2f6b86b4d" colab={"base_uri": "https://localhost:8080/", "height": 462}
# %tensorflow_version 1.x
# !curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import deepchem_installer
# %time deepchem_installer.install(version='2.3.0')
# + [markdown] id="APnQxtIKtgQs" colab_type="text"
# We'll use `pip` to install `biopython`
# + id="HeYSJWSAtgQt" colab_type="code" outputId="f4aea39d-1bca-4cc4-c01f-4c04a440076d" colab={"base_uri": "https://localhost:8080/", "height": 139}
# !pip install biopython
# + id="4CxSQrxptgQx" colab_type="code" outputId="d3403ab5-0cc3-480a-ab99-4064ba4aa044" colab={"base_uri": "https://localhost:8080/", "height": 34}
import Bio
Bio.__version__
# + id="7eXZ-43CtgQ6" colab_type="code" outputId="8cc0c9f4-7ee5-447c-ab4b-caa4b0db2e4a" colab={"base_uri": "https://localhost:8080/", "height": 34}
from Bio.Seq import Seq
my_seq = Seq("AGTACACATTG")
my_seq
# + id="Fd-wViuTtgRB" colab_type="code" outputId="92b43663-3ceb-420f-f41f-a9b290f80858" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_seq.complement()
# + id="GlO-43FNtgRF" colab_type="code" outputId="5adf1324-d675-4644-dd00-6670f72b0532" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_seq.reverse_complement()
# + [markdown] id="W-LumeWptgRJ" colab_type="text"
# ## Parsing Sequence Records
#
# We're going to download a sample `fasta` file from the Biopython tutorial to use in some exercises. This file is a set of hits for a sequence (of lady slipper orcid genes).
# + id="U0A0B3-FtgRK" colab_type="code" outputId="c70346e5-19b9-4994-abd8-f7ecaee55fc7" colab={"base_uri": "https://localhost:8080/", "height": 204}
# !wget https://raw.githubusercontent.com/biopython/biopython/master/Doc/examples/ls_orchid.fasta
# + [markdown] id="iNwHJES1tgRP" colab_type="text"
# Let's take a look at what the contents of this file look like:
# + id="5ZudMHxttgRQ" colab_type="code" outputId="65c2458b-6a7b-47b0-be32-a8564a6f1cf7" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from Bio import SeqIO
for seq_record in SeqIO.parse('ls_orchid.fasta', 'fasta'):
print(seq_record.id)
print(repr(seq_record.seq))
print(len(seq_record))
# + [markdown] id="UV-0Mvv-tgRV" colab_type="text"
# ## Sequence Objects
#
# A large part of the biopython infrastructure deals with tools for handlings sequences. These could be DNA sequences, RNA sequences, amino acid sequences or even more exotic constructs. To tell biopython what type of sequence it's dealing with, you can specify the alphabet explicitly.
# + id="kdkqKHmgtgRW" colab_type="code" outputId="2cdece26-333d-4401-a6c2-8486d4721c83" colab={"base_uri": "https://localhost:8080/", "height": 34}
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
my_seq = Seq("ACAGTAGAC", IUPAC.unambiguous_dna)
my_seq
# + id="j5xDuf7DtgRb" colab_type="code" outputId="ca808df9-e5ed-409f-a0d9-fdb86fe8ce6e" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_seq.alphabet
# + [markdown] id="dUw07rNrtgRr" colab_type="text"
# If we want to code a protein sequence, we can do that just as easily.
# + id="O6WUnJEftgRs" colab_type="code" outputId="c9d45805-3166-41ee-cf14-74dacb39c011" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_prot = Seq("AAAAA", IUPAC.protein) # Alanine pentapeptide
my_prot
# + id="jdgRxL6qtgR0" colab_type="code" outputId="08119aad-7aa6-4346-b81b-fd23f636f531" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_prot.alphabet
# + [markdown] id="pTPKw7cHtgR3" colab_type="text"
# We can take the length of sequences and index into them like strings.
# + id="OkY6Tx60tgR4" colab_type="code" outputId="302f7833-2068-428a-a7fc-5431ee7bfd2c" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(len(my_prot))
# + id="YSOUpm8FtgR8" colab_type="code" outputId="eca74488-5978-425b-9df1-7a28e0a525bd" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_prot[0]
# + [markdown] id="PdQ8weemtgR_" colab_type="text"
# You can also use slice notation on sequences to get subsequences.
# + id="U5v3swWFtgSA" colab_type="code" outputId="f1fdd7bf-c504-4177-c22c-28ffa64466b6" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_prot[0:3]
# + [markdown] id="Ng_LjVQ6tgSI" colab_type="raw"
# You can concatenate sequences if they have the same type so this works.
# + id="ZG77QUj2tgSJ" colab_type="code" outputId="d9242318-f133-44b7-c7cd-bd6e21ab3d54" colab={"base_uri": "https://localhost:8080/", "height": 34}
my_prot + my_prot
# + [markdown] id="y2XacTYttgSM" colab_type="text"
# But this fails
# + id="MZ53Yjr1tgSO" colab_type="code" outputId="ca95ef4f-cdf6-4c72-c632-926e5b6b572e" colab={"base_uri": "https://localhost:8080/", "height": 287}
my_prot + my_seq
# + [markdown] id="_Z-KdC2WtgSR" colab_type="text"
# ## Transcription
#
# Transcription is the process by which a DNA sequence is converted into messenger RNA. Remember that this is part of the "central dogma" of biology in which DNA engenders messenger RNA which engenders proteins. Here's a nice representation of this cycle borrowed from a Khan academy [lesson](https://cdn.kastatic.org/ka-perseus-images/20ce29384b2e7ff0cdea72acaa5b1dbd7287ab00.png).
#
# <img src="https://cdn.kastatic.org/ka-perseus-images/20ce29384b2e7ff0cdea72acaa5b1dbd7287ab00.png">
# + [markdown] id="1ZjlCDDmtgSU" colab_type="text"
# Note from the image above that DNA has two strands. The top strand is typically called the coding strand, and the bottom the template strand. The template strand is used for the actual transcription process of conversion into messenger RNA, but in bioinformatics, it's more common to work with the coding strand. Let's now see how we can execute a transcription computationally using Biopython.
# + id="TvPiRx_0tgSU" colab_type="code" outputId="5dad0985-4ba9-4509-d918-5166a852e241" colab={"base_uri": "https://localhost:8080/", "height": 34}
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
coding_dna = Seq("ATGATCTCGTAA", IUPAC.unambiguous_dna)
coding_dna
# + id="arGizrBztgSX" colab_type="code" outputId="998c3c72-7ac3-40c2-9075-80d3b1ce7b6a" colab={"base_uri": "https://localhost:8080/", "height": 34}
template_dna = coding_dna.reverse_complement()
template_dna
# + [markdown] id="x8FjupA9tgSa" colab_type="text"
# Note that these sequences match those in the image below. You might be confused about why the `template_dna` sequence is shown reversed. The reason is that by convention, the template strand is read in the reverse direction.
#
# Let's now see how we can transcribe our `coding_dna` strand into messenger RNA. This will only swap 'T' for 'U' and change the alphabet for our object.
# + id="oo8bBugUtgSa" colab_type="code" outputId="f3124064-c9a5-4c7b-a3a5-5f2660068df1" colab={"base_uri": "https://localhost:8080/", "height": 34}
messenger_rna = coding_dna.transcribe()
messenger_rna
# + [markdown] id="6UTMKVfAtgSe" colab_type="text"
# We can also perform a "back-transcription" to recover the original coding strand from the messenger RNA.
# + id="edClUMputgSf" colab_type="code" outputId="55b1fb1a-72dd-4754-c168-af2c67b61766" colab={"base_uri": "https://localhost:8080/", "height": 34}
messenger_rna.back_transcribe()
# + [markdown] id="0GqZSdFptgSk" colab_type="text"
# ## Translation
#
# Translation is the next step in the process, whereby a messenger RNA is transformed into a protein sequence. Here's a beautiful diagram [from Wikipedia](https://en.wikipedia.org/wiki/Translation_(biology)#/media/File:Ribosome_mRNA_translation_en.svg) that lays out the basics of this process.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b1/Ribosome_mRNA_translation_en.svg/1000px-Ribosome_mRNA_translation_en.svg.png">
#
# Note how 3 nucleotides at a time correspond to one new amino acid added to the growing protein chain. A set of 3 nucleotides which codes for a given amino acid is called a "codon." We can use the `translate()` method on the messenger rna to perform this transformation in code.
# + [markdown] id="7K_pm48HtgSm" colab_type="text"
# messenger_rna.translate()
# + [markdown] id="IiUYHWmRtgSm" colab_type="text"
# The translation can also be performed directly from the coding sequence DNA
# + id="cy8y6y9CtgSn" colab_type="code" outputId="b1fbfcb2-dfd3-4ab9-d102-1d6bb110af7a" colab={"base_uri": "https://localhost:8080/", "height": 34}
coding_dna.translate()
# + [markdown] id="6hHsJnfQtgSq" colab_type="text"
# Let's now consider a longer genetic sequence that has some more interesting structure for us to look at.
# + id="iwpB4lYatgSs" colab_type="code" outputId="12cbe03d-14a5-4c51-cc22-b2b6f0398018" colab={"base_uri": "https://localhost:8080/", "height": 34}
coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG", IUPAC.unambiguous_dna)
coding_dna.translate()
# + [markdown] id="Ulq6Gc06tgSv" colab_type="text"
# In both of the sequences above, '*' represents the [stop codon](https://en.wikipedia.org/wiki/Stop_codon). A stop codon is a sequence of 3 nucleotides that turns off the protein machinery. In DNA, the stop codons are 'TGA', 'TAA', 'TAG'. Note that this latest sequence has multiple stop codons. It's possible to run the machinery up to the first stop codon and pause too.
# + id="6uScm61FtgSw" colab_type="code" outputId="254f7d2f-17e6-496e-fccd-3986cd3f0631" colab={"base_uri": "https://localhost:8080/", "height": 34}
coding_dna.translate(to_stop=True)
# + [markdown] id="aVLT-471tgS2" colab_type="text"
# We're going to introduce a bit of terminology here. A complete coding sequence CDS is a nucleotide sequence of messenger RNA which is made of a whole number of codons (that is, the length of the sequence is a multiple of 3), starts with a "start codon" and ends with a "stop codon". A start codon is basically the opposite of a stop codon and is mostly commonly the sequence "AUG", but can be different (especially if you're dealing with something like bacterial DNA).
#
# Let's see how we can translate a complete CDS of bacterial messenger RNA.
# + id="iy9-Co_WtgS3" colab_type="code" outputId="447eea41-332a-45f3-e831-6cde61bbab86" colab={"base_uri": "https://localhost:8080/", "height": 34}
from Bio.Alphabet import generic_dna
gene = Seq("GTGAAAAAGATGCAATCTATCGTACTCGCACTTTCCCTGGTTCTGGTCGCTCCCATGGCA" + \
"GCACAGGCTGCGGAAATTACGTTAGTCCCGTCAGTAAAATTACAGATAGGCGATCGTGAT" + \
"AATCGTGGCTATTACTGGGATGGAGGTCACTGGCGCGACCACGGCTGGTGGAAACAACAT" + \
"TATGAATGGCGAGGCAATCGCTGGCACCTACACGGACCGCCGCCACCGCCGCGCCACCAT" + \
"AAGAAAGCTCCTCATGATCATCACGGCGGTCATGGTCCAGGCAAACATCACCGCTAA",
generic_dna)
# We specify a "table" to use a different translation table for bacterial proteins
gene.translate(table="Bacterial")
# + id="yWmqHt3GtgS6" colab_type="code" outputId="ee9dc0ee-bd5c-4ff0-a1e9-bdbf50840005" colab={"base_uri": "https://localhost:8080/", "height": 34}
gene.translate(table="Bacterial", to_stop=True)
# + [markdown] id="hqQlZA2dtgS8" colab_type="text"
# # Handling Annotated Sequences
#
# Sometimes it will be useful for us to be able to handle annotated sequences where there's richer annotations, as in GenBank or EMBL files. For these purposes, we'll want to use the `SeqRecord` class.
# + id="nnHQ_fObtgS9" colab_type="code" outputId="446e3606-18d9-434c-87cf-81483a3b146c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from Bio.SeqRecord import SeqRecord
help(SeqRecord)
# + [markdown] id="20ZEptbZtgTC" colab_type="text"
# Let's write a bit of code involving `SeqRecord` and see how it comes out looking.
# + id="yD3E6wrYtgTC" colab_type="code" colab={}
from Bio.SeqRecord import SeqRecord
simple_seq = Seq("GATC")
simple_seq_r = SeqRecord(simple_seq)
# + id="3FItR96PtgTG" colab_type="code" outputId="7be1b5fd-9029-48e7-915a-d8dd73fcb346" colab={"base_uri": "https://localhost:8080/", "height": 51}
simple_seq_r.id = "AC12345"
simple_seq_r.description = "Made up sequence"
print(simple_seq_r.id)
print(simple_seq_r.description)
# + [markdown] id="cxAH3YE0tgTK" colab_type="text"
# Let's now see how we can use `SeqRecord` to parse a large fasta file. We'll pull down a file hosted on the biopython site.
# + id="vNxAQJkqtgTL" colab_type="code" outputId="127a850c-6681-439b-eb71-e29030beff3e" colab={"base_uri": "https://localhost:8080/", "height": 204}
# !wget https://raw.githubusercontent.com/biopython/biopython/master/Tests/GenBank/NC_005816.fna
# + id="mvFt3fVqtgTP" colab_type="code" outputId="3b1c7a3f-9f60-4aac-ef8e-2667a80327d1" colab={"base_uri": "https://localhost:8080/", "height": 54}
from Bio import SeqIO
record = SeqIO.read("NC_005816.fna", "fasta")
record
# + [markdown] id="5_fCYXkttgTW" colab_type="text"
# Note how there's a number of annotations attached to the `SeqRecord` object!
#
# Let's take a closer look.
# + id="N7OdmewwtgTa" colab_type="code" outputId="9184860d-7abf-4db4-e6b7-8167fc7f4240" colab={"base_uri": "https://localhost:8080/", "height": 34}
record.id
# + id="156aQviwtgTd" colab_type="code" outputId="dff034ee-633e-473c-94cf-e85b7f6d38d7" colab={"base_uri": "https://localhost:8080/", "height": 34}
record.name
# + id="Ov2neH1XtgTk" colab_type="code" outputId="3f3e85a3-3d56-4e04-9fa0-1f1c3f897991" colab={"base_uri": "https://localhost:8080/", "height": 34}
record.description
# + [markdown] id="HTOwKix8tgTr" colab_type="text"
# Let's now look at the same sequence, but downloaded from GenBank. We'll download the hosted file from the biopython tutorial website as before.
# + id="LpqMN5Z_tgTs" colab_type="code" outputId="8f2d3366-4aba-4182-a922-105ded3c4bfb" colab={"base_uri": "https://localhost:8080/", "height": 204}
# !wget https://raw.githubusercontent.com/biopython/biopython/master/Tests/GenBank/NC_005816.gb
# + id="PhalU4PRtgTw" colab_type="code" outputId="dd19f359-9385-4a2c-89fb-61b2eec70081" colab={"base_uri": "https://localhost:8080/", "height": 54}
from Bio import SeqIO
record = SeqIO.read("NC_005816.gb", "genbank")
record
# + [markdown] id="inhAKrVItgT2" colab_type="text"
# ## SeqIO Objects
#
# TODO(rbharath): Continue filling this up in future PRs.
# + id="okON0bUHtgT6" colab_type="code" colab={}
| examples/tutorials/21_Introduction_to_Bioinformatics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Basic KB-NUFFT Example
#
# This notebook implements a basic KB-NUFFT example. It takes a Shepp-Logan phantom, applies the forward NUFFT, and then the adjoint NUFFT. An alternative processing path is to apply the density compensation function prior to adjoint NUFFT. Both of these examples are included here.
#
# ### Note
#
# This notebook uses the shepp_logan_phantom from scikit-image, which is included in scikit-image v0.16. The standard Anaconda scikit-image as of this writing is 0.15. To use this notebook, you'll need to upgrade scikit-image to v0.16, e.g. ```conda install -c conda-forge scikit-image=0.16```.
#
# ### References
#
# <NAME>., & <NAME>. (2003). Nonuniform fast Fourier transforms using min-max interpolation. IEEE transactions on signal processing, 51(2), 560-574.
#
# <NAME>., <NAME>., & <NAME>. (2005). Rapid gridding reconstruction with a minimal oversampling ratio. IEEE transactions on medical imaging, 24(6), 799-808.
# +
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.data import shepp_logan_phantom
from tfkbnufft import kbnufft_forward, kbnufft_adjoint
from tfkbnufft.kbnufft import KbNufftModule
from tfkbnufft.mri.dcomp_calc import calculate_radial_dcomp_tf, calculate_density_compensator
dtype = tf.float32
# -
# create a simple shepp logan phantom and plot it
image = shepp_logan_phantom().astype(np.complex)
im_size = image.shape
plt.imshow(np.absolute(image))
plt.gray()
plt.title('Shepp-Logan Phantom')
plt.show()
# convert the phantom to a tensor and unsqueeze coil and batch dimension
image = tf.convert_to_tensor(image)[None, None, ...]
print('image shape: {}'.format(image.shape))
# +
# create a k-space trajectory and plot it
spokelength = image.shape[-1] * 2
grid_size = (spokelength, spokelength)
nspokes = 405
ga = np.deg2rad(180 / ((1 + np.sqrt(5)) / 2))
kx = np.zeros(shape=(spokelength, nspokes))
ky = np.zeros(shape=(spokelength, nspokes))
ky[:, 0] = np.linspace(-np.pi, np.pi, spokelength)
for i in range(1, nspokes):
kx[:, i] = np.cos(ga) * kx[:, i - 1] - np.sin(ga) * ky[:, i - 1]
ky[:, i] = np.sin(ga) * kx[:, i - 1] + np.cos(ga) * ky[:, i - 1]
ky = np.transpose(ky)
kx = np.transpose(kx)
ktraj = np.stack((ky.flatten(), kx.flatten()), axis=0)
# plot the first 40 spokes
plt.plot(kx[:40, :].transpose(), ky[:40, :].transpose())
plt.axis('equal')
plt.title('k-space trajectory (first 40 spokes)')
plt.show()
# -
# convert k-space trajectory to a tensor and unsqueeze batch dimension
ktraj = tf.convert_to_tensor(ktraj)[None, ...]
print('ktraj shape: {}'.format(ktraj.shape))
# +
# create NUFFT objects, use 'ortho' for orthogonal FFTs
nufft_ob = KbNufftModule(im_size=im_size, grid_size=grid_size, norm='ortho')
print(nufft_ob)
# +
# plot the kernel
fig, axs = plt.subplots(1, 2)
axs.flat[0].plot(np.real(nufft_ob.table[0]))
axs.flat[1].plot(np.imag(nufft_ob.table[0]))
plt.show()
# -
# # %%debug
# calculate k-space data
kdata = kbnufft_forward(nufft_ob._extract_nufft_interpob())(image, ktraj)
# add some noise (robustness test)
siglevel = tf.reduce_mean(tf.math.abs(kdata))
kdata = kdata + tf.cast((siglevel/5) *tf.random.normal(kdata.shape, dtype=siglevel.dtype), kdata.dtype)
# plot the k-space data on log-scale
kdata_numpy = np.reshape(kdata.numpy(), (nspokes, spokelength))
plt.imshow(np.log10(np.absolute(kdata_numpy)))
plt.gray()
plt.title('k-space data, log10 scale')
plt.show()
# +
# adjnufft back
# method 1: no density compensation (blurry image)
interpob = nufft_ob._extract_nufft_interpob()
nufft_adj = kbnufft_adjoint(interpob)
image_blurry = nufft_adj(kdata, ktraj)
# method 2: use density compensation
dcomp = calculate_radial_dcomp_tf(interpob, kbnufft_forward(interpob), nufft_adj, ktraj[0])[None, :]
dcomp_new = calculate_density_compensator(interpob, kbnufft_forward(interpob), nufft_adj, ktraj[0])[None, :]
image_sharp = nufft_adj(kdata * tf.cast(dcomp, kdata.dtype), ktraj)
image_sharp_new = nufft_adj(kdata * tf.cast(dcomp_new, kdata.dtype), ktraj)
# +
# show the images
image_blurry_numpy = np.squeeze(image_blurry.numpy())
image_sharp_numpy = np.squeeze(image_sharp.numpy())
image_sharp_new_numpy = np.squeeze(image_sharp_new.numpy())
plt.figure(0)
plt.imshow(np.absolute(image_blurry_numpy))
plt.gray()
plt.title('blurry image')
plt.figure(1)
plt.imshow(np.absolute(image_sharp_numpy))
plt.gray()
plt.title('sharp image (with density compensation)')
plt.figure(2)
plt.imshow(np.absolute(image_sharp_new_numpy))
plt.gray()
plt.title('sharp image (with density compensation from Pipe et al)')
plt.show()
| notebooks/basic_examples_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:generic_expression] *
# language: python
# name: conda-env-generic_expression-py
# ---
# # Visualization of template experiment
# This notebook plots the gene expression data of the template experiment in order to confirm the strength of the differential signal, since we will be performing a DE analysis downstream.
# +
# %load_ext autoreload
# %autoreload 2
import os
import sys
import pandas as pd
import numpy as np
import random
import umap
from plotnine import (ggplot,
labs,
geom_point,
aes,
ggsave,
theme_bw,
theme,
facet_wrap,
scale_color_manual,
guides,
guide_legend,
element_blank,
element_text,
element_rect,
element_line,
coords)
from ponyo import utils
np.random.seed(123)
# -
# Read in config variables
base_dir = os.path.abspath(os.path.join(os.getcwd(),"../"))
config_file = os.path.abspath(os.path.join(base_dir,
"config_pseudomonas.tsv"))
params = utils.read_config(config_file)
# Load parameters
local_dir = params["local_dir"]
dataset_name = params['dataset_name']
project_id = params['project_id']
template_data_file = params['template_data_file']
# Load metadata file with grouping assignments for samples
metadata_file = os.path.join(
base_dir,
dataset_name,
"data",
"metadata",
project_id+"_groups.tsv")
# +
# Read template data
data = pd.read_csv(template_data_file, sep="\t", header=0, index_col=0)
data.head()
# +
# Read metadata
metadata = pd.read_csv(metadata_file, sep="\t", header=0, index_col=0)
metadata.head()
# +
# Embed expression data into low dimensional space
model = umap.UMAP(random_state=123).fit(data)
data_encoded = model.transform(data)
data_encoded_df = pd.DataFrame(data=data_encoded,
index=data.index,
columns=['1','2'])
# +
# Label samples
group1_ids = list(metadata[metadata['group']==1].index)
#data_encoded_df['group'] = 'clinical multi-drug resistant'
#data_encoded_df.loc[group1_ids,'group'] = 'clinical'
#data_encoded_df.loc['GSM625982.CEL','group'] = 'control'
data_encoded_df['group'] = 'untreated'
data_encoded_df.loc[group1_ids,'group'] = 'treated with tobramycin'
# -
data_encoded_df.head()
# +
# Plot PAO1
fig = ggplot(data_encoded_df, aes(x='1', y='2'))
fig += geom_point(aes(color='group'), alpha=0.7)
fig += labs(x ='UMAP 1',
y = 'UMAP 2',
title = 'Gene expression of template experiment')
fig += theme_bw()
fig += theme(
legend_title_align = "center",
plot_background=element_rect(fill='white'),
legend_key=element_rect(fill='white', colour='white'),
legend_title=element_text(family='sans-serif', size=15),
legend_text=element_text(family='sans-serif', size=12),
plot_title=element_text(family='sans-serif', size=15),
axis_text=element_text(family='sans-serif', size=12),
axis_title=element_text(family='sans-serif', size=15)
)
fig += guides(colour=guide_legend(override_aes={'alpha': 1}))
print(fig)
# -
# **Observation:** We see a good separation between the treated and untreated samples. We expect this template experiment to provide a fairly strong differential signal in our DE analysis.
| pseudomonas_analysis/archive/Viz_template_experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Werner-Holevo with Siddhu CV PPT Non-Multiplicativity
#
# This notebook generates the plot showing the non-multiplicativity of the PPT communication value of the
# 3-dimensional Werner-Holevo channel run in parallel with the dephrasure channel. That is, we consider $cv^{PPT}(\mathcal{N}_{WH,3,\lambda}\otimes \mathcal{N}_{s})$ where $\mathcal{N}_{WH,d,\lambda}$ is the $d$-dimensional Werner-Holevo channel and $\mathcal{N}_{s}$ is the Siddhu channel. Specifically we see for any $s$, the non-multiplicativity is the same. <br />
#
# All data is stored in `*/CVChannel.jl/notebook/plot_data`.
# +
#We use numpy rather than csv because csv reads in csv files as arrays of strings
import matplotlib.pyplot as plt
import numpy as np
import os
file_base = os.getcwd()+"/plot_data/"
file_to_read = os.path.abspath(file_base+"/werner-holevo-with-siddhu-data.csv")
data = np.genfromtxt(file_to_read,delimiter=',',usecols=np.arange(0,15)) #This gets rid of info column
x_axis = data[0,1:]
for i in range(0,6):
plt.plot(x_axis,data[i+1,1:], linestyle = (0, (1, 2*i+1)), lw=3, label ='$s$='+str(i/10))
plt.title('Non-Multiplicativity of $\mathcal{N}_{WH,3,\lambda} \otimes \mathcal{N}_{p,q}$ for $\lambda$='+str((5*i)/100))
plt.ylabel('Difference')
plt.xlabel('$q$ parameter')
plt.xlim(0, 0.35)
plt.ylim(0, 0.45)
plt.legend()
#plt.savefig("cv-versus-cvppt-of-WH-with-identity.jpg")
plt.figure(dpi=1200)
plt.show()
| notebook/non-multiplicativity_of_werner_holevo_with_siddhu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF,WhiteKernel,DotProduct
import pandas as pd
import torch
# +
from snorkel import SnorkelSession
from snorkel.models import candidate_subclass
from snorkel.annotations import load_gold_labels
from snorkel.learning.pytorch import LSTM
from snorkel.annotations import load_marginals
from scipy import vstack
session = SnorkelSession()
# +
ChemicalDisease = candidate_subclass('ChemicalDisease', ['chemical', 'disease'])
train = session.query(ChemicalDisease).filter(ChemicalDisease.split == 0).all()
dev = session.query(ChemicalDisease).filter(ChemicalDisease.split == 1).all()
test = session.query(ChemicalDisease).filter(ChemicalDisease.split == 2).all()
print('Training set:\t{0} candidates'.format(len(train)))
print('Dev set:\t{0} candidates'.format(len(dev)))
print('Test set:\t{0} candidates'.format(len(test)))
# -
train_marginals = load_marginals(session, split=0)
from load_external_annotations import load_external_labels
load_external_labels(session, ChemicalDisease, split=2, annotator='gold')
L_gold_test = load_gold_labels(session, annotator_name='gold', split=2)
L_gold_dev = load_gold_labels(session,annotator_name='gold',split=1)
L_gold_dev.shape
dev_labels = L_gold_dev.toarray().reshape(920,)
dev_labels[dev_labels == -1] = 0
full_train_set = train.copy()
full_train_set.extend(dev)
full_train_labels = list(train_marginals).copy()
full_train_labels.extend(dev_labels)
full_train_labels = np.array(full_train_labels)
len(full_train_set)
# why is it dropping so many data points?
# +
train_kwargs = {
'lr': 0.01,
'embedding_dim': 100,
'hidden_dim': 100,
'n_epochs': 100,
'dropout': 0.5,
'rebalance': .25,
'print_freq': 5,
'seed': 1701,
'num_layers': 5,
}
lstm = LSTM(n_threads=None)
lstm.train(full_train_set, full_train_labels, X_dev=dev, Y_dev=L_gold_dev, **train_kwargs)
# -
lstm.save('trained_on_all_5_layers')
lstm.score(test, L_gold_test)
dev_features = lstm.feature_outputs(dev, 100)
train_features = lstm.feature_outputs(train, 100)
kernel_dev = RBF(1) + WhiteKernel(1)
kernel_train = RBF(1) + WhiteKernel(1)
gpc_dev = GaussianProcessClassifier(kernel = kernel_dev)
gpc_train = GaussianProcessClassifier(kernel = kernel_train)
train_labels = train_marginals.copy()
train_labels[train_labels > .5] = 1
train_labels[train_labels <= .5] = 0
gpc_dev.fit(dev_features.detach().numpy().reshape(920,10), dev_labels.reshape(920,))
gpc_train.fit(train_features.detach().numpy().reshape(8439,10), train_labels.reshape(8439,))
test_features = lstm.feature_outputs(test,100)
gpc_dev.kernel_
gpc_train.kernel_
ud = []
ut = []
preds_d = gpc_dev.predict_proba(test_features.detach().numpy().reshape(4687,10), uncertainty = ud)
preds_t = gpc_train.predict_proba(test_features.detach().numpy().reshape(4687,10), uncertainty = ut)
ud = ud[0]
ut = ut[0]
gpc_dev.classes_
# what is the order of the probabilties? I think I was probably doingit backwards actually.
pd, pt = [],[]
for x in preds_d:
if x[0] > .5:
pd.append(0)
else:
pd.append(1)
for x in preds_t:
if x[0] > .5:
pt.append(0)
else:
pt.append(1)
ud = np.array(ud)
ut = np.array(ut)
ud.mean()
ud.max()
ut.mean()
ut.max()
test_y = L_gold_test.toarray().reshape(4687,)
test_y[test_y == -1] = 0
buckets = np.linspace(0,.4,15)
f1scores = []
for i in range(14):
count = 0
tp,fp,tn,fn = 0,0,0,0
for j,p in enumerate(test_y):
if ud[j] >= buckets[i] and ud[j] < buckets[i+1]:
count += 1
if p == 0 and pd[j] == 0:
tn += 1
elif p == 0 and pd[j] == 1:
fp += 1
elif p == 1 and pd[j] == 0:
fn += 1
else: #p == 1 and preds[j] == 1:
tp += 1
try:
p = tp / (tp + fp)
except:
p = 0
try:
r = tp / (tp + fn)
except:
r = 0
try:
f1 = 2/(1/p + 1/r)
except:
f1 = 0
f1scores.append(f1)
print ("Bucket {}: \n Count: {}\n F1: {}\n{},{},{},{}".format(buckets[i+1], count, f1,tp,fp,tn,fn))
plt.bar(np.arange(len(f1scores)),f1scores)
f1scores = []
for i in range(14):
count = 0
tp,fp,tn,fn = 0,0,0,0
for j,p in enumerate(test_y):
if ut[j] >= buckets[i] and ut[j] < buckets[i+1]:
count += 1
if p == 0 and pt[j] == 0:
tn += 1
elif p == 0 and pt[j] == 1:
fp += 1
elif p == 1 and pt[j] == 0:
fn += 1
else: #p == 1 and preds[j] == 1:
tp += 1
try:
p = tp / (tp + fp)
except:
p = 0
try:
r = tp / (tp + fn)
except:
r = 0
try:
f1 = 2/(1/p + 1/r)
except:
f1 = 0
f1scores.append(f1)
print ("Bucket {}: \n Count: {}\n F1: {}\n{},{},{},{}".format(buckets[i+1], count, f1,tp,fp,tn,fn))
plt.bar(np.arange(len(f1scores)),f1scores)
tp,fp,tn,fn = 0,0,0,0
for j,p in enumerate (test_y):
if p == 0 and pt[j] == 0:
tn += 1
elif p == 0 and pt[j] == 1:
fp += 1
elif p == 1 and pt[j] == 0:
fn += 1
else: #p == 1 and preds[j] == 1:
tp += 1
try:
p = tp / (tp + fp)
except:
p = 0
try:
r = tp / (tp + fn)
except:
r = 0
try:
f1 = 2/(1/p + 1/r)
except:
f1 = 0
f1
p
dev_cov= np.matmul(dev_features.detach().numpy().reshape(920,10).transpose(), dev_features.detach().numpy().reshape(920,10))
np.linalg.svd(dev_cov)
gpc_dev.score(dev_features.detach().numpy().reshape(920,10), dev_labels)
gpc_train.score(train_features.detach().numpy().reshape(8439,10), train_labels)
gpc_dev.kernel_
gpc_train.kernel_
| tutorials/cdr/testing 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Anchor explanations for fashion MNIST
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # suppress deprecation messages
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from alibi.explainers import AnchorImage
# ### Load and prepare fashion MNIST data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
idx = 0
plt.imshow(x_train[idx]);
# Scale, reshape and categorize data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
# ### Define CNN model
def model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = model()
cnn.summary()
# ### Train model
cnn.fit(x_train, y_train, batch_size=64, epochs=3)
# Evaluate the model on test set
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
# ### Define superpixels
#
# Function to generate rectangular superpixels for a given image. Alternatively, use one of the built in methods. It is important to have meaningful superpixels in order to generate a useful explanation. Please check scikit-image's [segmentation methods](http://scikit-image.org/docs/dev/api/skimage.segmentation.html) (*felzenszwalb*, *slic* and *quickshift* built in the explainer) for more information on the built in methods.
def superpixel(image, size=(4, 7)):
segments = np.zeros([image.shape[0], image.shape[1]])
row_idx, col_idx = np.where(segments == 0)
for i, j in zip(row_idx, col_idx):
segments[i, j] = int((image.shape[1]/size[1]) * (i//size[0]) + j//size[1])
return segments
segments = superpixel(x_train[idx])
plt.imshow(segments);
# ### Define prediction function
predict_fn = lambda x: cnn.predict(x)
# ### Initialize anchor image explainer
image_shape = x_train[idx].shape
explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=superpixel)
# ### Explain a prediction
#
# The explanation returns a mask with the superpixels that constitute the anchor.
#
# Image to be explained:
i = 1
image = x_test[i]
plt.imshow(image[:,:,0]);
# Model prediction:
cnn.predict(image.reshape(1, 28, 28, 1)).argmax()
# The predicted category correctly corresponds to the class `Pullover`:
#
# | Label | Description |
# | --- | --- |
# | 0 | T-shirt/top |
# | 1 | Trouser |
# | 2 | Pullover |
# | 3 | Dress |
# | 4 | Coat |
# | 5 | Sandal |
# | 6 | Shirt |
# | 7 | Sneaker |
# | 8 | Bag |
# | 9 | Ankle boot |
# Generate explanation:
explanation = explainer.explain(image, threshold=.95, p_sample=.8, seed=0)
# Show anchor:
plt.imshow(explanation.anchor[:,:,0]);
# From the example, it looks like the end of the sleeve alone is sufficient to predict a pullover.
| examples/anchor_image_fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head(2)
# -
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Calculate the percentage of students who passed math **and** reading (% Overall Passing)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
#Calculate the total number of schools
Total_Schools = school_data_complete["school_name"].nunique()
Total_Schools
#Calculate the total number of students
Total_Students = school_data_complete["Student ID"].count()
Total_Students
#Calculate the total budget
Total_Budget = school_data["budget"].sum()
Total_Budget
#Calculate the average math score
#Calculate the average reading score
Average_Math_Score = school_data_complete["math_score"].mean()
Average_Reading_Score = school_data_complete["reading_score"].mean()
print(f"{Average_Math_Score} {Average_Reading_Score}")
# % Passing Math based on 70
math_pass = school_data_complete[(school_data_complete["math_score"]>=70)]
count_pass_math = math_pass["math_score"].count()
per_math_pass = (count_pass_math/Total_Students)*100
print(per_math_pass)
math_pass = school_data_complete[(school_data_complete["math_score"]>=70)]
math_pass.head(3)
# % Passing reading based on 70
reading_pass = school_data_complete[(school_data_complete["reading_score"]>=70)]
count_pass_reading = reading_pass["reading_score"].count()
per_reading_pass = (count_pass_reading/Total_Students)*100
print(per_reading_pass)
reading_pass = school_data_complete[(school_data_complete["reading_score"]>=70)]
reading_pass.head(3)
# Calculate the percentage of students who passed math and reading (% Overall Passing)
overall_pass = (per_math_pass + per_reading_pass)/2
print(overall_pass)
# +
District_Summary_df = pd.DataFrame({
"Total Schools": [Total_Schools],
"Total Students": [Total_Students],
"Total Budget": [Total_Budget],
"Average Math Score": [Average_Math_Score],
"Average Reading Score": [Average_Reading_Score],
"% Passing Math": [per_math_pass],
"% Passing Reading": [per_reading_pass],
"% Overall Passing": [overall_pass]
})
District_Summary_df["Total Students"] = District_Summary_df["Total Students"].map("{:,}".format)
District_Summary_df["Total Budget"] = District_Summary_df["Total Budget"].map("${:,.2f}".format)
District_Summary_df["Average Math Score"] = District_Summary_df["Average Math Score"].map("{:,.2f}".format)
District_Summary_df
# -
# ## School Summary
# Create groupby variable to group everything by school
group = school_data_complete.groupby("school_name")
group
#Group school data complete by school name
for school_name, school_name_df in group:
print(school_name)
print(school_name_df)
Total_Students_per_school = school_data_complete.groupby(["school_name"])["student_name"].count()
Total_Students_per_school
Sum_Budget = school_data_complete.groupby(["school_name"])["budget"].sum()
Sum_Budget
Student_Budget_per_school = Sum_Budget / Total_Students_per_school
Student_Budget_per_school
#Average Math Score
Mean_Math_Score_per_school = school_data_complete.groupby(["school_name"])["math_score"].mean()
Mean_Math_Score_per_school
#Average Reading Score
Mean_Reading_Score_per_school = school_data_complete.groupby(["school_name"])["reading_score"].mean()
Mean_Reading_Score_per_school
# % Percentage Passing Math
school_passing_math = school_data_complete[(school_data_complete["math_score"] >= 70)]
Passing_Math_per_school = school_passing_math.groupby(["school_name"])["student_name"].count()
Percentage_Passing_Math_per_school = (Passing_Math_per_school / Total_Students_per_school) * 100
Percentage_Passing_Math_per_school
# % Passing Reading
school_passing_reading = school_data_complete[(school_data_complete["reading_score"] >= 70)]
Passing_Reading_per_school = school_passing_reading.groupby(["school_name"])["student_name"].count()
Percentage_Passing_Reading_per_school = (Passing_Reading_per_school / Total_Students_per_school) * 100
Percentage_Passing_Reading_per_school
#% Overall Passing (The percentage of students that passed math **and** reading.)
school_passing_math_and_reading = school_data_complete[(school_data_complete["reading_score"] >= 70)
& (school_data_complete["math_score"] >= 70)]
Overall_Passing_per_school = school_passing_math_and_reading.groupby(["school_name"])["student_name"].count()
Percentage_Overall_Passing_per_school = (Overall_Passing_per_school / Total_Students_per_school) * 100
Percentage_Overall_Passing_per_school
#Create a dataframe to hold the above results
School_Summary_df = pd.DataFrame({
"School Name": school_name,
"Total Students": Total_Students_per_school,
"Total School Budget": Sum_Budget,
"Per Student Budget": Student_Budget_per_school,
"Average Math Score": Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school
})
School_Summary_df["Total School Budget"] = School_Summary_df["Total School Budget"].map("${:,.2f}".format)
School_Summary_df["Per Student Budget"] = School_Summary_df["Per Student Budget"].map("${:,.2f}".format)
School_Summary_df
# ## Top Performing Schools (By % Overall Passing)
# * Sort and display the top five performing schools by % overall passing.
Top_Schools_df = School_Summary_df.sort_values(["% Overall Passing"], ascending=False)
Top_Schools_df.head()
# ## Bottom Performing Schools (By % Overall Passing)
# * Sort and display the five worst-performing schools by % overall passing.
Bottom_Schools_df = School_Summary_df.sort_values(["% Overall Passing"], ascending=True)
Bottom_Schools_df.head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# +
students_9th_df = school_data_complete[(school_data_complete["grade"] == "9th")]
students_10th_df = school_data_complete[(school_data_complete["grade"] == "10th")]
students_11th_df = school_data_complete[(school_data_complete["grade"] == "11th")]
students_12th_df = school_data_complete[(school_data_complete["grade"] == "12th")]
# print(f"{students_9th} {students_10th} {students_11th} {students_12th}")
avg_math_9th = students_9th_df.groupby(["school_name"])["math_score"].mean()
avg_math_10th = students_10th_df.groupby(["school_name"])["math_score"].mean()
avg_math_11th = students_11th_df.groupby(["school_name"])["math_score"].mean()
avg_math_12th = students_12th_df.groupby(["school_name"])["math_score"].mean()
# print(f"{avg_math_9th} {avg_math_10th} {avg_math_11th} {avg_math_12th}")
# +
Math_Scores_by_Grade_df = pd.DataFrame({
"9th": avg_math_9th,
"10th": avg_math_10th,
"11th": avg_math_11th,
"12th": avg_math_12th
})
Math_Scores_by_Grade_df.index.name = None
Math_Scores_by_Grade_df
# -
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# +
students_9th_scores = students_9th_df.groupby(["school_name"])["reading_score"].mean()
students_10th_scores = students_10th_df.groupby(["school_name"])["reading_score"].mean()
students_11th_scores = students_11th_df.groupby(["school_name"])["reading_score"].mean()
students_12th_scores = students_12th_df.groupby(["school_name"])["reading_score"].mean()
Reading_Score_by_Grade_df = pd.DataFrame({
"9th": students_9th_scores,
"10th": students_10th_scores,
"11th": students_11th_scores,
"12th": students_12th_scores
})
Reading_Score_by_Grade_df.index.name = None
Reading_Score_by_Grade_df
# -
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
spending_bins = [0, 585, 630, 645, 675]
group_names = ["<$585", "$585-630", "$630-645", "$645-675"]
# +
school_spending_df = pd.DataFrame({
"School Name": school_name,
"Total Students": Total_Students_per_school,
"Total School Budget": Sum_Budget,
"Per Student Budget": Student_Budget_per_school,
"Average Math Score": Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school
})
school_spending_df
# -
# Categorize spending based on the bins.
school_spending_df["Spending Ranges (Per Student)"] = pd.cut(Student_Budget_per_school, spending_bins, labels=group_names)
school_spending_df
# ## Scores by School Size
# Establish the bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# +
school_size_df = pd.DataFrame({
"School Name": school_name,
"Total Students": Total_Students_per_school,
"Total School Budget": Sum_Budget,
"Per Student Budget": Student_Budget_per_school,
"Average Math Score": Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school
})
# school_size_df
# -
school_size_df["school size"] = pd.cut(Total_Students_per_school, size_bins, labels=group_names)
school_size_df
# Calculate averages for the desired columns.
avg_math_scores = school_size_df.groupby(["school size"])["Average Math Score"].mean()
avg_reading_scores = school_size_df.groupby(["school size"])["Average Reading Score"].mean()
avg_passing_math = school_size_df.groupby(["school size"])["% Passing Math"].mean()
avg_passing_reading = school_size_df.groupby(["school size"])["% Passing Reading"].mean()
avg_overall_passing = school_size_df.groupby(["school size"])["% Overall Passing"].mean()
# +
Scores_by_School_Size_df = pd.DataFrame({
"Average Math Score" : Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school
})
Scores_by_School_Size_df
# -
# ## Scores by School Type
# * Perform the same operations as above, based on school type
# +
School_Name_df = pd.DataFrame({
"School Name": school_name,
"Total Students": Total_Students_per_school,
"Total School Budget": Sum_Budget,
"Per Student Budget": Student_Budget_per_school,
"Average Math Score": Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school
})
School_Name_df
# +
# Assemble into DataFrame.
size_summary_df = pd.DataFrame({
"Average Math Score" : Mean_Math_Score_per_school,
"Average Reading Score": Mean_Reading_Score_per_school,
"% Passing Math": Percentage_Passing_Math_per_school,
"% Passing Reading": Percentage_Passing_Reading_per_school,
"% Overall Passing": Percentage_Overall_Passing_per_school})
size_summary_df
# -
avg_math_scores
# +
Scores_by_School_Type_df = pd.DataFrame({
"Average Math Score" : avg_math_scores,
"Average Reading Score": avg_reading_scores,
"% Passing Math": avg_passing_math,
"% Passing Reading": avg_passing_reading,
"% Overall Passing": avg_overall_passing
})
Scores_by_School_Type_df
| Resources/PyCitySchools_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from astropy.table import Table, Column, unique
from astropy.time import Time
import os
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from scipy.stats import binned_statistic
from xcor_tools_nicer import find_nearest as find_nearest
from xcor_tools_nicer import clock_to_mjd as clock_to_mjd
font_prop = font_manager.FontProperties(size=20)
# %matplotlib inline
from astropy.modeling import fitting, powerlaws
from fast_histogram import histogram1d
homedir = os.path.expanduser("~")
exe_dir = os.getcwd()
obj_name = "GX_339-4"
obj_prefix = "gx339-2021"
data_dir = homedir + "/Reduced_data/%s" % obj_name
# evt_list = "%s/in/%s_evtlists.txt" % (exe_dir, obj_prefix)
# data_files = [line.strip() for line in open(evt_list)]
evt_list = data_dir+"/gx3394-1.evt"
rsp_matrix_file = "%s/in/nicer_v1.02rbn-5.rsp" % exe_dir
rsp_hdu = fits.open(rsp_matrix_file)
detchans = np.int(rsp_hdu['EBOUNDS'].header['DETCHANS'])
print(detchans)
evts = Table.read(evt_list, format='fits', hdu=1)
print(evts)
# +
a = (evts['PI'] >= 500) & (evts['PI'] <= 550)
b = (evts['PI'] >= 750) & (evts['PI'] <= 800)
c = (evts['PI'] >= 620) & (evts['PI'] <= 670)
# d = (evts['PI'] >= 500) & (evts['PI'] <= 800)
a = histogram1d(evts['PI'][a], range=[500,560], bins=6)
b = histogram1d(evts['PI'][b], range=[750,810], bins=6)
c = histogram1d(evts['PI'][c], range=[620,680], bins=6)
bins_a = np.arange(500,560,10)
bins_b = np.arange(750,810,10)
bins_c = np.arange(620,680,10)
print(len(bins_a))
print(len(a))
print(bins_a)
cont = np.append(a[0:-1],b[0:-1])
x_cont = np.append(bins_a[0:-1], bins_b[0:-1])
fe = c[0:-1]
x_fe = bins_c[0:-1]
# -
plt.scatter(x_cont, cont)
# +
# myplfit=plfit.plfit(cont,usefortran=False)
# -
for i in range(0,2):
pl_init = powerlaws.PowerLaw1D(amplitude=50, x_0=1000., alpha=4.)
fit_pl = fitting.LevMarLSQFitter()
pl = fit_pl(pl_init, x_cont, cont)
# +
# print(fit_pl.fit_info['message'])
# -
print(pl)
plt.scatter(x_cont, cont)
plt.plot(x_cont, pl(x_cont))
plt.plot(x_fe, pl(x_fe))
plt.plot(x_fe, fe)
for i in range(len(x_fe)):
print(fe[i]/pl(x_fe[i]))
ratio = np.average(fe/pl(x_fe))
print(ratio)
| testing_line-eqw.ipynb |
Subsets and Splits